blender/intern/cycles/kernel/geom/geom_object.h

558 lines
17 KiB
C
Raw Normal View History

/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Object Primitive
*
* All mesh and curve primitives are part of an object. The same mesh and curves
* may be instanced multiple times by different objects.
*
* If the mesh is not instanced multiple times, the object will not be explicitly
* stored as a primitive in the BVH, rather the bare triangles are curved are
* directly primitives in the BVH with world space locations applied, and the object
* ID is looked up afterwards. */
CCL_NAMESPACE_BEGIN
/* Object attributes, for now a fixed size and contents */
enum ObjectTransform {
OBJECT_TRANSFORM = 0,
OBJECT_TRANSFORM_MOTION_PRE = 0,
OBJECT_INVERSE_TRANSFORM = 4,
OBJECT_TRANSFORM_MOTION_POST = 4,
OBJECT_PROPERTIES = 8,
OBJECT_DUPLI = 9
};
enum ObjectVectorTransform {
OBJECT_VECTOR_MOTION_PRE = 0,
OBJECT_VECTOR_MOTION_POST = 3
};
/* Object to world space transformation */
ccl_device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, enum ObjectTransform type)
{
int offset = object*OBJECT_SIZE + (int)type;
Transform tfm;
tfm.x = kernel_tex_fetch(__objects, offset + 0);
tfm.y = kernel_tex_fetch(__objects, offset + 1);
tfm.z = kernel_tex_fetch(__objects, offset + 2);
tfm.w = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return tfm;
}
/* Object to world space transformation for motion vectors */
ccl_device_inline Transform object_fetch_vector_transform(KernelGlobals *kg, int object, enum ObjectVectorTransform type)
{
int offset = object*OBJECT_VECTOR_SIZE + (int)type;
Transform tfm;
tfm.x = kernel_tex_fetch(__objects_vector, offset + 0);
tfm.y = kernel_tex_fetch(__objects_vector, offset + 1);
tfm.z = kernel_tex_fetch(__objects_vector, offset + 2);
tfm.w = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return tfm;
}
/* Motion blurred object transformations */
#ifdef __OBJECT_MOTION__
ccl_device_inline Transform object_fetch_transform_motion(KernelGlobals *kg, int object, float time)
{
DecompMotionTransform motion;
int offset = object*OBJECT_SIZE + (int)OBJECT_TRANSFORM_MOTION_PRE;
motion.mid.x = kernel_tex_fetch(__objects, offset + 0);
motion.mid.y = kernel_tex_fetch(__objects, offset + 1);
motion.mid.z = kernel_tex_fetch(__objects, offset + 2);
motion.mid.w = kernel_tex_fetch(__objects, offset + 3);
motion.pre_x = kernel_tex_fetch(__objects, offset + 4);
motion.pre_y = kernel_tex_fetch(__objects, offset + 5);
motion.post_x = kernel_tex_fetch(__objects, offset + 6);
motion.post_y = kernel_tex_fetch(__objects, offset + 7);
Transform tfm;
transform_motion_interpolate(&tfm, &motion, time);
return tfm;
}
ccl_device_inline Transform object_fetch_transform_motion_test(KernelGlobals *kg, int object, float time, Transform *itfm)
{
int object_flag = kernel_tex_fetch(__object_flag, object);
if(object_flag & SD_OBJECT_MOTION) {
/* if we do motion blur */
Transform tfm = object_fetch_transform_motion(kg, object, time);
if(itfm)
*itfm = transform_quick_inverse(tfm);
return tfm;
}
else {
Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
if(itfm)
*itfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM);
return tfm;
}
}
#endif
/* Transform position from object to world space */
ccl_device_inline void object_position_transform(KernelGlobals *kg, const ShaderData *sd, float3 *P)
{
#ifdef __OBJECT_MOTION__
*P = transform_point_auto(&ccl_fetch(sd, ob_tfm), *P);
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_TRANSFORM);
*P = transform_point(&tfm, *P);
#endif
}
/* Transform position from world to object space */
ccl_device_inline void object_inverse_position_transform(KernelGlobals *kg, const ShaderData *sd, float3 *P)
{
#ifdef __OBJECT_MOTION__
*P = transform_point_auto(&ccl_fetch(sd, ob_itfm), *P);
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_INVERSE_TRANSFORM);
*P = transform_point(&tfm, *P);
#endif
}
/* Transform normal from world to object space */
ccl_device_inline void object_inverse_normal_transform(KernelGlobals *kg, const ShaderData *sd, float3 *N)
{
#ifdef __OBJECT_MOTION__
*N = normalize(transform_direction_transposed_auto(&ccl_fetch(sd, ob_tfm), *N));
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_TRANSFORM);
*N = normalize(transform_direction_transposed(&tfm, *N));
#endif
}
/* Transform normal from object to world space */
ccl_device_inline void object_normal_transform(KernelGlobals *kg, const ShaderData *sd, float3 *N)
{
#ifdef __OBJECT_MOTION__
*N = normalize(transform_direction_transposed_auto(&ccl_fetch(sd, ob_itfm), *N));
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_INVERSE_TRANSFORM);
*N = normalize(transform_direction_transposed(&tfm, *N));
#endif
}
/* Transform direction vector from object to world space */
ccl_device_inline void object_dir_transform(KernelGlobals *kg, const ShaderData *sd, float3 *D)
{
#ifdef __OBJECT_MOTION__
*D = transform_direction_auto(&ccl_fetch(sd, ob_tfm), *D);
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_TRANSFORM);
*D = transform_direction(&tfm, *D);
#endif
}
/* Transform direction vector from world to object space */
ccl_device_inline void object_inverse_dir_transform(KernelGlobals *kg, const ShaderData *sd, float3 *D)
{
#ifdef __OBJECT_MOTION__
*D = transform_direction_auto(&ccl_fetch(sd, ob_itfm), *D);
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_INVERSE_TRANSFORM);
*D = transform_direction(&tfm, *D);
#endif
}
/* Object center position */
ccl_device_inline float3 object_location(KernelGlobals *kg, const ShaderData *sd)
{
2015-05-09 14:34:30 +00:00
if(ccl_fetch(sd, object) == OBJECT_NONE)
return make_float3(0.0f, 0.0f, 0.0f);
#ifdef __OBJECT_MOTION__
2015-05-09 14:34:30 +00:00
return make_float3(ccl_fetch(sd, ob_tfm).x.w, ccl_fetch(sd, ob_tfm).y.w, ccl_fetch(sd, ob_tfm).z.w);
#else
2015-05-09 14:34:30 +00:00
Transform tfm = object_fetch_transform(kg, ccl_fetch(sd, object), OBJECT_TRANSFORM);
return make_float3(tfm.x.w, tfm.y.w, tfm.z.w);
#endif
}
/* Total surface area of object */
ccl_device_inline float object_surface_area(KernelGlobals *kg, int object)
{
int offset = object*OBJECT_SIZE + OBJECT_PROPERTIES;
float4 f = kernel_tex_fetch(__objects, offset);
return f.x;
}
/* Pass ID number of object */
ccl_device_inline float object_pass_id(KernelGlobals *kg, int object)
{
if(object == OBJECT_NONE)
return 0.0f;
int offset = object*OBJECT_SIZE + OBJECT_PROPERTIES;
float4 f = kernel_tex_fetch(__objects, offset);
return f.y;
}
/* Per object random number for shader variation */
ccl_device_inline float object_random_number(KernelGlobals *kg, int object)
{
if(object == OBJECT_NONE)
return 0.0f;
int offset = object*OBJECT_SIZE + OBJECT_PROPERTIES;
float4 f = kernel_tex_fetch(__objects, offset);
return f.z;
}
/* Particle ID from which this object was generated */
ccl_device_inline int object_particle_id(KernelGlobals *kg, int object)
{
if(object == OBJECT_NONE)
return 0;
int offset = object*OBJECT_SIZE + OBJECT_PROPERTIES;
float4 f = kernel_tex_fetch(__objects, offset);
return __float_as_uint(f.w);
}
/* Generated texture coordinate on surface from where object was instanced */
ccl_device_inline float3 object_dupli_generated(KernelGlobals *kg, int object)
{
if(object == OBJECT_NONE)
return make_float3(0.0f, 0.0f, 0.0f);
int offset = object*OBJECT_SIZE + OBJECT_DUPLI;
float4 f = kernel_tex_fetch(__objects, offset);
return make_float3(f.x, f.y, f.z);
}
/* UV texture coordinate on surface from where object was instanced */
ccl_device_inline float3 object_dupli_uv(KernelGlobals *kg, int object)
{
if(object == OBJECT_NONE)
return make_float3(0.0f, 0.0f, 0.0f);
int offset = object*OBJECT_SIZE + OBJECT_DUPLI;
float4 f = kernel_tex_fetch(__objects, offset + 1);
return make_float3(f.x, f.y, 0.0f);
}
/* Information about mesh for motion blurred triangles and curves */
ccl_device_inline void object_motion_info(KernelGlobals *kg, int object, int *numsteps, int *numverts, int *numkeys)
{
int offset = object*OBJECT_SIZE + OBJECT_DUPLI;
if(numkeys) {
float4 f = kernel_tex_fetch(__objects, offset);
*numkeys = __float_as_int(f.w);
}
float4 f = kernel_tex_fetch(__objects, offset + 1);
if(numsteps)
*numsteps = __float_as_int(f.z);
if(numverts)
*numverts = __float_as_int(f.w);
}
/* Pass ID for shader */
ccl_device int shader_pass_id(KernelGlobals *kg, const ShaderData *sd)
{
2015-05-09 14:34:30 +00:00
return kernel_tex_fetch(__shader_flag, (ccl_fetch(sd, shader) & SHADER_MASK)*2 + 1);
}
/* Particle data from which object was instanced */
ccl_device_inline float particle_index(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 0);
return f.x;
}
ccl_device float particle_age(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 0);
return f.y;
}
ccl_device float particle_lifetime(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 0);
return f.z;
}
ccl_device float particle_size(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 0);
return f.w;
}
ccl_device float4 particle_rotation(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 1);
return f;
}
ccl_device float3 particle_location(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f = kernel_tex_fetch(__particles, offset + 2);
return make_float3(f.x, f.y, f.z);
}
ccl_device float3 particle_velocity(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f2 = kernel_tex_fetch(__particles, offset + 2);
float4 f3 = kernel_tex_fetch(__particles, offset + 3);
return make_float3(f2.w, f3.x, f3.y);
}
ccl_device float3 particle_angular_velocity(KernelGlobals *kg, int particle)
{
int offset = particle*PARTICLE_SIZE;
float4 f3 = kernel_tex_fetch(__particles, offset + 3);
float4 f4 = kernel_tex_fetch(__particles, offset + 4);
return make_float3(f3.z, f3.w, f4.x);
}
/* Object intersection in BVH */
ccl_device_inline float3 bvh_clamp_direction(float3 dir)
{
/* clamp absolute values by exp2f(-80.0f) to avoid division by zero when calculating inverse direction */
float ooeps = 8.271806E-25f;
return make_float3((fabsf(dir.x) > ooeps)? dir.x: copysignf(ooeps, dir.x),
2014-05-04 16:19:08 +00:00
(fabsf(dir.y) > ooeps)? dir.y: copysignf(ooeps, dir.y),
(fabsf(dir.z) > ooeps)? dir.z: copysignf(ooeps, dir.z));
}
ccl_device_inline float3 bvh_inverse_direction(float3 dir)
{
return 1.0f / dir;
}
/* Transform ray into object space to enter static object in BVH */
2015-05-09 14:34:30 +00:00
ccl_device_inline void bvh_instance_push(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, ccl_addr_space float *t)
{
Transform tfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM);
*P = transform_point(&tfm, ray->P);
float len;
*dir = bvh_clamp_direction(normalize_len(transform_direction(&tfm, ray->D), &len));
*idir = bvh_inverse_direction(*dir);
if(*t != FLT_MAX)
*t *= len;
}
#ifdef __QBVH__
/* Same as above, but optimized for QBVH scene intersection,
* which needs to modify two max distances.
*
* TODO(sergey): Investigate if passing NULL instead of t1 gets optimized
* so we can avoid having this duplication.
*/
ccl_device_inline void qbvh_instance_push(KernelGlobals *kg,
int object,
const Ray *ray,
float3 *P,
float3 *dir,
float3 *idir,
float *t,
float *t1)
{
Transform tfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM);
*P = transform_point(&tfm, ray->P);
float len;
*dir = bvh_clamp_direction(normalize_len(transform_direction(&tfm, ray->D), &len));
*idir = bvh_inverse_direction(*dir);
if(*t != FLT_MAX)
*t *= len;
if(*t1 != -FLT_MAX)
*t1 *= len;
}
#endif
/* Transorm ray to exit static object in BVH */
2015-05-09 14:34:30 +00:00
ccl_device_inline void bvh_instance_pop(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, ccl_addr_space float *t)
{
if(*t != FLT_MAX) {
Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
*t *= len(transform_direction(&tfm, 1.0f/(*idir)));
}
*P = ray->P;
*dir = bvh_clamp_direction(ray->D);
*idir = bvh_inverse_direction(*dir);
}
Cycles: shadow function optimization for transparent shadows (CPU only). Old algorithm: Raytrace from one transparent surface to the next step by step. To minimize overhead in cases where we don't need transparent shadows, we first trace a regular shadow ray. We check if the hit primitive was potentially transparent, and only in that case start marching. this gives extra ray cast for the cases were we do want transparency. New algorithm: We trace a single ray. If it hits any opaque surface, or more than a given number of transparent surfaces is hit, then we consider the geometry to be entirely blocked. If not, all transparent surfaces will be recorded and we will shade them one by one to determine how much light is blocked. This all happens in one scene intersection function. Recording all hits works well in some cases but may be slower in others. If we have many semi-transparent hairs, one intersection may be faster because you'd be reinteresecting the same hairs a lot with each step otherwise. If however there is mostly binary transparency then we may be recording many unnecessary intersections when one of the first surfaces blocks all light. We found that this helps quite nicely in some scenes, on koro.blend this can give a 50% reduction in render time, on the pabellon barcelona scene and a forest scene with transparent leaves it was 30%. Some other files rendered maybe 1% or 2% slower, but this seems a reasonable tradeoff. Differential Revision: https://developer.blender.org/D473
2014-04-19 15:02:30 +00:00
/* Same as above, but returns scale factor to apply to multiple intersection distances */
ccl_device_inline void bvh_instance_pop_factor(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, float *t_fac)
{
Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
*t_fac = len(transform_direction(&tfm, 1.0f/(*idir)));
*P = ray->P;
*dir = bvh_clamp_direction(ray->D);
*idir = bvh_inverse_direction(*dir);
}
#ifdef __OBJECT_MOTION__
/* Transform ray into object space to enter motion blurred object in BVH */
Cycles: shadow function optimization for transparent shadows (CPU only). Old algorithm: Raytrace from one transparent surface to the next step by step. To minimize overhead in cases where we don't need transparent shadows, we first trace a regular shadow ray. We check if the hit primitive was potentially transparent, and only in that case start marching. this gives extra ray cast for the cases were we do want transparency. New algorithm: We trace a single ray. If it hits any opaque surface, or more than a given number of transparent surfaces is hit, then we consider the geometry to be entirely blocked. If not, all transparent surfaces will be recorded and we will shade them one by one to determine how much light is blocked. This all happens in one scene intersection function. Recording all hits works well in some cases but may be slower in others. If we have many semi-transparent hairs, one intersection may be faster because you'd be reinteresecting the same hairs a lot with each step otherwise. If however there is mostly binary transparency then we may be recording many unnecessary intersections when one of the first surfaces blocks all light. We found that this helps quite nicely in some scenes, on koro.blend this can give a 50% reduction in render time, on the pabellon barcelona scene and a forest scene with transparent leaves it was 30%. Some other files rendered maybe 1% or 2% slower, but this seems a reasonable tradeoff. Differential Revision: https://developer.blender.org/D473
2014-04-19 15:02:30 +00:00
ccl_device_inline void bvh_instance_motion_push(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, float *t, Transform *tfm)
{
Transform itfm;
*tfm = object_fetch_transform_motion_test(kg, object, ray->time, &itfm);
*P = transform_point(&itfm, ray->P);
float len;
*dir = bvh_clamp_direction(normalize_len(transform_direction(&itfm, ray->D), &len));
*idir = bvh_inverse_direction(*dir);
if(*t != FLT_MAX)
*t *= len;
}
#ifdef __QBVH__
/* Same as above, but optimized for QBVH scene intersection,
* which needs to modify two max distances.
*
* TODO(sergey): Investigate if passing NULL instead of t1 gets optimized
* so we can avoid having this duplication.
*/
ccl_device_inline void qbvh_instance_motion_push(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, float *t, float *t1, Transform *tfm)
{
Transform itfm;
*tfm = object_fetch_transform_motion_test(kg, object, ray->time, &itfm);
*P = transform_point(&itfm, ray->P);
float len;
*dir = bvh_clamp_direction(normalize_len(transform_direction(&itfm, ray->D), &len));
*idir = bvh_inverse_direction(*dir);
if(*t != FLT_MAX)
*t *= len;
if(*t1 != -FLT_MAX)
*t1 *= len;
}
#endif
/* Transorm ray to exit motion blurred object in BVH */
Cycles: shadow function optimization for transparent shadows (CPU only). Old algorithm: Raytrace from one transparent surface to the next step by step. To minimize overhead in cases where we don't need transparent shadows, we first trace a regular shadow ray. We check if the hit primitive was potentially transparent, and only in that case start marching. this gives extra ray cast for the cases were we do want transparency. New algorithm: We trace a single ray. If it hits any opaque surface, or more than a given number of transparent surfaces is hit, then we consider the geometry to be entirely blocked. If not, all transparent surfaces will be recorded and we will shade them one by one to determine how much light is blocked. This all happens in one scene intersection function. Recording all hits works well in some cases but may be slower in others. If we have many semi-transparent hairs, one intersection may be faster because you'd be reinteresecting the same hairs a lot with each step otherwise. If however there is mostly binary transparency then we may be recording many unnecessary intersections when one of the first surfaces blocks all light. We found that this helps quite nicely in some scenes, on koro.blend this can give a 50% reduction in render time, on the pabellon barcelona scene and a forest scene with transparent leaves it was 30%. Some other files rendered maybe 1% or 2% slower, but this seems a reasonable tradeoff. Differential Revision: https://developer.blender.org/D473
2014-04-19 15:02:30 +00:00
ccl_device_inline void bvh_instance_motion_pop(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, float *t, Transform *tfm)
{
if(*t != FLT_MAX)
*t *= len(transform_direction(tfm, 1.0f/(*idir)));
*P = ray->P;
*dir = bvh_clamp_direction(ray->D);
*idir = bvh_inverse_direction(*dir);
}
Cycles: shadow function optimization for transparent shadows (CPU only). Old algorithm: Raytrace from one transparent surface to the next step by step. To minimize overhead in cases where we don't need transparent shadows, we first trace a regular shadow ray. We check if the hit primitive was potentially transparent, and only in that case start marching. this gives extra ray cast for the cases were we do want transparency. New algorithm: We trace a single ray. If it hits any opaque surface, or more than a given number of transparent surfaces is hit, then we consider the geometry to be entirely blocked. If not, all transparent surfaces will be recorded and we will shade them one by one to determine how much light is blocked. This all happens in one scene intersection function. Recording all hits works well in some cases but may be slower in others. If we have many semi-transparent hairs, one intersection may be faster because you'd be reinteresecting the same hairs a lot with each step otherwise. If however there is mostly binary transparency then we may be recording many unnecessary intersections when one of the first surfaces blocks all light. We found that this helps quite nicely in some scenes, on koro.blend this can give a 50% reduction in render time, on the pabellon barcelona scene and a forest scene with transparent leaves it was 30%. Some other files rendered maybe 1% or 2% slower, but this seems a reasonable tradeoff. Differential Revision: https://developer.blender.org/D473
2014-04-19 15:02:30 +00:00
/* Same as above, but returns scale factor to apply to multiple intersection distances */
ccl_device_inline void bvh_instance_motion_pop_factor(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *dir, float3 *idir, float *t_fac, Transform *tfm)
{
*t_fac = len(transform_direction(tfm, 1.0f/(*idir)));
*P = ray->P;
*dir = bvh_clamp_direction(ray->D);
*idir = bvh_inverse_direction(*dir);
}
#endif
2015-05-09 14:34:30 +00:00
/* TODO(sergey): This is only for until we've got OpenCL 2.0
* on all devices we consider supported. It'll be replaced with
* generic address space.
*/
#ifdef __KERNEL_OPENCL__
ccl_device_inline void object_dir_transform_addrspace(KernelGlobals *kg,
const ShaderData *sd,
ccl_addr_space float3 *D)
{
float3 private_D = *D;
object_dir_transform(kg, sd, &private_D);
*D = private_D;
}
ccl_device_inline void object_normal_transform_addrspace(KernelGlobals *kg,
const ShaderData *sd,
ccl_addr_space float3 *N)
{
float3 private_N = *N;
object_normal_transform(kg, sd, &private_N);
2015-05-09 14:34:30 +00:00
*N = private_N;
}
#endif
#ifndef __KERNEL_OPENCL__
# define object_dir_transform_auto object_dir_transform
# define object_normal_transform_auto object_normal_transform
#else
# define object_dir_transform_auto object_dir_transform_addrspace
# define object_normal_transform_auto object_normal_transform_addrspace
#endif
CCL_NAMESPACE_END