Cycles: camera motion blur enabled.

Still more work needed to get object motion blur ready.
This commit is contained in:
Brecht Van Lommel 2012-10-09 18:37:14 +00:00
parent 2a3c65169f
commit 94f869a256
12 changed files with 53 additions and 34 deletions

@ -131,13 +131,9 @@ class CyclesRender_PT_light_paths(CyclesButtonsPanel, Panel):
class CyclesRender_PT_motion_blur(CyclesButtonsPanel, Panel):
bl_label = "Motion Blur"
bl_label = "Camera Motion Blur"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return False
def draw_header(self, context):
rd = context.scene.render

@ -17,6 +17,7 @@
*/
#include "camera.h"
#include "integrator.h"
#include "graph.h"
#include "light.h"
#include "mesh.h"
@ -227,7 +228,9 @@ void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::DupliObject
object->use_motion = true;
}
sync_mesh_motion(b_ob, object->mesh, motion);
/* mesh deformation blur not supported yet */
if(!scene->integrator->motion_blur)
sync_mesh_motion(b_ob, object->mesh, motion);
}
return;

@ -149,6 +149,9 @@ void BlenderSync::sync_data(BL::SpaceView3D b_v3d, BL::Object b_override, const
void BlenderSync::sync_integrator()
{
#ifdef __CAMERA_MOTION__
BL::RenderSettings r = b_scene.render();
#endif
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
experimental = (RNA_enum_get(&cscene, "feature_set") != 0);
@ -175,7 +178,7 @@ void BlenderSync::sync_integrator()
integrator->layer_flag = render_layer.layer;
integrator->sample_clamp = get_float(cscene, "sample_clamp");
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
integrator->motion_blur = (!preview && r.use_motion_blur());
#endif

@ -349,7 +349,7 @@ __device_inline float3 bvh_triangle_refine(KernelGlobals *kg, ShaderData *sd, co
#ifdef __INTERSECTION_REFINE__
if(isect->object != ~0) {
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
Transform tfm = sd->ob_itfm;
#else
Transform tfm = object_fetch_transform(kg, isect->object, ray->time, OBJECT_INVERSE_TRANSFORM);
@ -370,7 +370,7 @@ __device_inline float3 bvh_triangle_refine(KernelGlobals *kg, ShaderData *sd, co
P = P + D*rt;
if(isect->object != ~0) {
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
Transform tfm = sd->ob_tfm;
#else
Transform tfm = object_fetch_transform(kg, isect->object, ray->time, OBJECT_TRANSFORM);

@ -63,7 +63,7 @@ __device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
if(kernel_data.cam.have_motion)
transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
#endif
@ -106,7 +106,7 @@ __device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, floa
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
if(kernel_data.cam.have_motion)
transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
#endif
@ -180,7 +180,7 @@ __device void camera_sample_panorama(KernelGlobals *kg, float raster_x, float ra
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
if(kernel_data.cam.have_motion)
transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
#endif
@ -212,7 +212,7 @@ __device void camera_sample(KernelGlobals *kg, int x, int y, float filter_u, flo
float raster_x = x + kernel_tex_interp(__filter_table, filter_u, FILTER_TABLE_SIZE);
float raster_y = y + kernel_tex_interp(__filter_table, filter_v, FILTER_TABLE_SIZE);
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
/* motion blur */
if(kernel_data.cam.shuttertime == 0.0f)
ray->time = TIME_INVALID;

@ -31,7 +31,7 @@ __device_inline Transform object_fetch_transform(KernelGlobals *kg, int object,
{
Transform tfm;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
/* if we do motion blur */
if(sd->flag & SD_OBJECT_MOTION) {
/* fetch motion transforms */
@ -70,7 +70,7 @@ __device_inline Transform object_fetch_transform(KernelGlobals *kg, int object,
__device_inline void object_position_transform(KernelGlobals *kg, ShaderData *sd, float3 *P)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
*P = transform_point(&sd->ob_tfm, *P);
#else
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM);
@ -80,7 +80,7 @@ __device_inline void object_position_transform(KernelGlobals *kg, ShaderData *sd
__device_inline void object_inverse_position_transform(KernelGlobals *kg, ShaderData *sd, float3 *P)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
*P = transform_point(&sd->ob_itfm, *P);
#else
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_INVERSE_TRANSFORM);
@ -90,7 +90,7 @@ __device_inline void object_inverse_position_transform(KernelGlobals *kg, Shader
__device_inline void object_inverse_normal_transform(KernelGlobals *kg, ShaderData *sd, float3 *N)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
*N = normalize(transform_direction_transposed(&sd->ob_tfm, *N));
#else
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM);
@ -100,7 +100,7 @@ __device_inline void object_inverse_normal_transform(KernelGlobals *kg, ShaderDa
__device_inline void object_normal_transform(KernelGlobals *kg, ShaderData *sd, float3 *N)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
*N = normalize(transform_direction_transposed(&sd->ob_itfm, *N));
#else
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_INVERSE_TRANSFORM);
@ -110,7 +110,7 @@ __device_inline void object_normal_transform(KernelGlobals *kg, ShaderData *sd,
__device_inline void object_dir_transform(KernelGlobals *kg, ShaderData *sd, float3 *D)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
*D = transform_direction(&sd->ob_tfm, *D);
#else
Transform tfm = object_fetch_transform(kg, sd->object, 0.0f, OBJECT_TRANSFORM);
@ -120,7 +120,7 @@ __device_inline void object_dir_transform(KernelGlobals *kg, ShaderData *sd, flo
__device_inline float3 object_location(KernelGlobals *kg, ShaderData *sd)
{
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
return make_float3(sd->ob_tfm.x.w, sd->ob_tfm.y.w, sd->ob_tfm.z.w);
#else
Transform tfm = object_fetch_transform(kg, sd->object, 0.0f, OBJECT_TRANSFORM);

@ -343,7 +343,7 @@ __device float4 kernel_path_progressive(KernelGlobals *kg, RNG *rng, int sample,
light_ray.P = ray_offset(sd.P, sd.Ng);
light_ray.D = ao_D;
light_ray.t = kernel_data.background.ao_distance;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -368,7 +368,7 @@ __device float4 kernel_path_progressive(KernelGlobals *kg, RNG *rng, int sample,
BsdfEval L_light;
bool is_lamp;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -520,7 +520,7 @@ __device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ray
light_ray.P = ray_offset(sd.P, sd.Ng);
light_ray.D = ao_D;
light_ray.t = kernel_data.background.ao_distance;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -545,7 +545,7 @@ __device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ray
BsdfEval L_light;
bool is_lamp;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -728,7 +728,7 @@ __device float4 kernel_path_non_progressive(KernelGlobals *kg, RNG *rng, int sam
light_ray.P = ray_offset(sd.P, sd.Ng);
light_ray.D = ao_D;
light_ray.t = kernel_data.background.ao_distance;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -748,7 +748,7 @@ __device float4 kernel_path_non_progressive(KernelGlobals *kg, RNG *rng, int sam
BsdfEval L_light;
bool is_lamp;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
light_ray.time = sd.time;
#endif
@ -867,7 +867,7 @@ __device float4 kernel_path_non_progressive(KernelGlobals *kg, RNG *rng, int sam
bsdf_ray.dP = sd.dP;
bsdf_ray.dD = bsdf_domega_in;
#endif
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
bsdf_ray.time = sd.time;
#endif
@ -925,7 +925,7 @@ __device void kernel_path_trace(KernelGlobals *kg,
float lens_u = path_rng(kg, &rng, sample, PRNG_LENS_U);
float lens_v = path_rng(kg, &rng, sample, PRNG_LENS_V);
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
float time = path_rng(kg, &rng, sample, PRNG_TIME);
#else
float time = 0.0f;

@ -68,7 +68,7 @@ __device_inline void shader_setup_from_ray(KernelGlobals *kg, ShaderData *sd,
#endif
/* matrices and time */
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
sd->ob_tfm = object_fetch_transform(kg, sd->object, ray->time, OBJECT_TRANSFORM);
sd->ob_itfm = object_fetch_transform(kg, sd->object, ray->time, OBJECT_INVERSE_TRANSFORM);
@ -171,7 +171,7 @@ __device void shader_setup_from_sample(KernelGlobals *kg, ShaderData *sd,
}
#endif
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
sd->time = time;
sd->ob_tfm = object_fetch_transform(kg, sd->object, time, OBJECT_TRANSFORM);
@ -275,7 +275,7 @@ __device_inline void shader_setup_from_background(KernelGlobals *kg, ShaderData
sd->I = -sd->P;
sd->shader = kernel_data.background.shader;
sd->flag = kernel_tex_fetch(__shader_flag, (sd->shader & SHADER_MASK)*2);
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
sd->time = ray->time;
#endif
sd->ray_length = 0.0f;

@ -108,7 +108,8 @@ CCL_NAMESPACE_BEGIN
#define __PASSES__
#define __BACKGROUND_MIS__
#define __AO__
//#define __MOTION__
#define __CAMERA_MOTION__
//#define __OBJECT_MOTION__
#endif
//#define __SOBOL_FULL_SCREEN__
@ -129,7 +130,7 @@ enum PathTraceDimension {
PRNG_FILTER_V = 1,
PRNG_LENS_U = 2,
PRNG_LENS_V = 3,
#ifdef __MOTION__
#ifdef __CAMERA_MOTION__
PRNG_TIME = 4,
PRNG_UNUSED = 5,
PRNG_BASE_NUM = 6,
@ -426,7 +427,7 @@ typedef struct ShaderData {
/* length of the ray being shaded */
float ray_length;
#ifdef __MOTION__
#ifdef __OBJECT_MOTION__
/* object <-> world space transformations, cached to avoid
* re-interpolating them constantly for shading */
Transform ob_tfm;

@ -193,6 +193,7 @@ void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
}
}
}
#ifdef __CAMERA_MOTION__
else if(need_motion == Scene::MOTION_BLUR) {
/* todo: exact camera position will not be hit this way */
if(use_motion) {
@ -200,6 +201,7 @@ void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
kcam->have_motion = 1;
}
}
#endif
/* depth of field */
kcam->aperturesize = aperturesize;
@ -208,7 +210,11 @@ void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
kcam->bladesrotation = bladesrotation;
/* motion blur */
#ifdef __CAMERA_MOTION__
kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: 0.0f;
#else
kcam->shuttertime = 0.0f;
#endif
/* type */
kcam->type = type;

@ -722,7 +722,11 @@ void MeshManager::device_update(Device *device, DeviceScene *dscene, Scene *scen
foreach(Shader *shader, scene->shaders)
shader->need_update_attributes = false;
#ifdef __OBJECT_MOTION__
bool motion_blur = scene->need_motion() == Scene::MOTION_BLUR;
#else
bool motion_blur = false;
#endif
foreach(Object *object, scene->objects)
object->compute_bounds(motion_blur);

@ -220,6 +220,7 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
memcpy(&objects[offset+8], &mtfm_pre, sizeof(float4)*4);
memcpy(&objects[offset+12], &mtfm_post, sizeof(float4)*4);
}
#ifdef __OBJECT_MOTION__
else if(need_motion == Scene::MOTION_BLUR) {
if(ob->use_motion) {
/* decompose transformations for interpolation */
@ -234,6 +235,7 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
memcpy(&objects[offset+8], &no_motion, sizeof(float4));
}
}
#endif
/* dupli object coords */
objects[offset+16] = make_float4(ob->dupli_generated[0], ob->dupli_generated[1], ob->dupli_generated[2], 0.0f);
@ -297,7 +299,11 @@ void ObjectManager::apply_static_transforms(Scene *scene, Progress& progress)
/* counter mesh users */
map<Mesh*, int> mesh_users;
#ifdef __OBJECT_MOTION__
bool motion_blur = scene->need_motion() == Scene::MOTION_BLUR;
#else
bool motion_blur = false;
#endif
foreach(Object *object, scene->objects) {
map<Mesh*, int>::iterator it = mesh_users.find(object->mesh);