2011-04-27 11:58:34 +00:00
|
|
|
/*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Copyright 2011-2013 Blender Foundation
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
2014-12-25 01:50:24 +00:00
|
|
|
* limitations under the License.
|
2011-04-27 11:58:34 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
|
|
/* Perspective Camera */
|
|
|
|
|
2013-11-15 23:17:10 +00:00
|
|
|
ccl_device float2 camera_sample_aperture(KernelGlobals *kg, float u, float v)
|
2011-09-16 13:14:02 +00:00
|
|
|
{
|
|
|
|
float blades = kernel_data.cam.blades;
|
2014-08-27 08:51:50 +00:00
|
|
|
float2 bokeh;
|
2011-09-16 13:14:02 +00:00
|
|
|
|
|
|
|
if(blades == 0.0f) {
|
|
|
|
/* sample disk */
|
2014-08-27 08:51:50 +00:00
|
|
|
bokeh = concentric_sample_disk(u, v);
|
2011-09-16 13:14:02 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* sample polygon */
|
|
|
|
float rotation = kernel_data.cam.bladesrotation;
|
2014-08-27 08:51:50 +00:00
|
|
|
bokeh = regular_polygon_sample(blades, rotation, u, v);
|
2011-09-16 13:14:02 +00:00
|
|
|
}
|
2014-08-27 08:51:50 +00:00
|
|
|
|
|
|
|
/* anamorphic lens bokeh */
|
|
|
|
bokeh.x *= kernel_data.cam.inv_aperture_ratio;
|
|
|
|
|
|
|
|
return bokeh;
|
2011-09-16 13:14:02 +00:00
|
|
|
}
|
|
|
|
|
Cycles: OpenCL kernel split
This commit contains all the work related on the AMD megakernel split work
which was mainly done by Varun Sundar, George Kyriazis and Lenny Wang, plus
some help from Sergey Sharybin, Martijn Berger, Thomas Dinges and likely
someone else which we're forgetting to mention.
Currently only AMD cards are enabled for the new split kernel, but it is
possible to force split opencl kernel to be used by setting the following
environment variable: CYCLES_OPENCL_SPLIT_KERNEL_TEST=1.
Not all the features are supported yet, and that being said no motion blur,
camera blur, SSS and volumetrics for now. Also transparent shadows are
disabled on AMD device because of some compiler bug.
This kernel is also only implements regular path tracing and supporting
branched one will take a bit. Branched path tracing is exposed to the
interface still, which is a bit misleading and will be hidden there soon.
More feature will be enabled once they're ported to the split kernel and
tested.
Neither regular CPU nor CUDA has any difference, they're generating the
same exact code, which means no regressions/improvements there.
Based on the research paper:
https://research.nvidia.com/sites/default/files/publications/laine2013hpg_paper.pdf
Here's the documentation:
https://docs.google.com/document/d/1LuXW-CV-sVJkQaEGZlMJ86jZ8FmoPfecaMdR-oiWbUY/edit
Design discussion of the patch:
https://developer.blender.org/T44197
Differential Revision: https://developer.blender.org/D1200
2015-05-09 14:34:30 +00:00
|
|
|
ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* create ray form raster position */
|
|
|
|
Transform rastertocamera = kernel_data.cam.rastertocamera;
|
2015-07-21 13:36:35 +00:00
|
|
|
float3 raster = make_float3(raster_x, raster_y, 0.0f);
|
|
|
|
float3 Pcamera = transform_perspective(&rastertocamera, raster);
|
|
|
|
|
|
|
|
#ifdef __CAMERA_MOTION__
|
|
|
|
if(kernel_data.cam.have_perspective_motion) {
|
|
|
|
/* TODO(sergey): Currently we interpolate projected coordinate which
|
|
|
|
* gives nice looking result and which is simple, but is in fact a bit
|
|
|
|
* different comparing to constructing projective matrix from an
|
|
|
|
* interpolated field of view.
|
|
|
|
*/
|
|
|
|
if(ray->time < 0.5f) {
|
|
|
|
Transform rastertocamera_pre = kernel_data.cam.perspective_motion.pre;
|
|
|
|
float3 Pcamera_pre =
|
|
|
|
transform_perspective(&rastertocamera_pre, raster);
|
|
|
|
Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
Transform rastertocamera_post = kernel_data.cam.perspective_motion.post;
|
|
|
|
float3 Pcamera_post =
|
|
|
|
transform_perspective(&rastertocamera_post, raster);
|
|
|
|
Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
float3 P = make_float3(0.0f, 0.0f, 0.0f);
|
|
|
|
float3 D = Pcamera;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
/* modify ray for depth of field */
|
2011-09-16 13:14:02 +00:00
|
|
|
float aperturesize = kernel_data.cam.aperturesize;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2011-09-16 13:14:02 +00:00
|
|
|
if(aperturesize > 0.0f) {
|
|
|
|
/* sample point on aperture */
|
|
|
|
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
/* compute point on plane of focus */
|
2016-10-22 21:25:39 +00:00
|
|
|
float ft = kernel_data.cam.focaldistance/D.z;
|
|
|
|
float3 Pfocus = D*ft;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
/* update ray for effect of lens */
|
2016-10-22 21:25:39 +00:00
|
|
|
P = make_float3(lensuv.x, lensuv.y, 0.0f);
|
|
|
|
D = normalize(Pfocus - P);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* transform ray from camera to world */
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
|
2012-10-09 18:37:14 +00:00
|
|
|
#ifdef __CAMERA_MOTION__
|
2015-02-21 07:33:21 +00:00
|
|
|
if(kernel_data.cam.have_motion) {
|
2016-02-12 17:33:43 +00:00
|
|
|
# ifdef __KERNEL_OPENCL__
|
2015-05-09 13:57:51 +00:00
|
|
|
const MotionTransform tfm = kernel_data.cam.motion;
|
2015-02-21 07:33:21 +00:00
|
|
|
transform_motion_interpolate(&cameratoworld,
|
2015-05-09 13:57:51 +00:00
|
|
|
((const DecompMotionTransform*)&tfm),
|
2015-02-21 07:33:21 +00:00
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# else
|
2015-05-09 13:57:51 +00:00
|
|
|
transform_motion_interpolate(&cameratoworld,
|
|
|
|
((const DecompMotionTransform*)&kernel_data.cam.motion),
|
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# endif
|
2015-02-21 07:33:21 +00:00
|
|
|
}
|
2012-04-30 12:49:26 +00:00
|
|
|
#endif
|
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
P = transform_point(&cameratoworld, P);
|
|
|
|
D = normalize(transform_direction(&cameratoworld, D));
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2016-10-22 13:59:23 +00:00
|
|
|
bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
|
2016-10-24 10:26:12 +00:00
|
|
|
if(!use_stereo) {
|
2016-10-22 13:59:23 +00:00
|
|
|
/* No stereo */
|
2016-10-22 21:25:39 +00:00
|
|
|
ray->P = P;
|
|
|
|
ray->D = D;
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2016-10-22 13:59:23 +00:00
|
|
|
float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
|
|
|
|
|
|
|
|
ray->dP = differential3_zero();
|
|
|
|
ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
|
|
|
|
ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Spherical stereo */
|
2016-10-22 21:25:39 +00:00
|
|
|
spherical_stereo_transform(kg, &P, &D);
|
|
|
|
ray->P = P;
|
|
|
|
ray->D = D;
|
2016-10-22 13:59:23 +00:00
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
|
|
|
/* Ray differentials, computed from scratch using the raster coordinates
|
|
|
|
* because we don't want to be affected by depth of field. We compute
|
|
|
|
* ray origin and direction for the center and two neighbouring pixels
|
|
|
|
* and simply take their differences. */
|
|
|
|
float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
|
|
|
|
|
|
|
|
float3 Pcenter = Pnostereo;
|
|
|
|
float3 Dcenter = Pcamera;
|
|
|
|
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
|
|
|
|
spherical_stereo_transform(kg, &Pcenter, &Dcenter);
|
|
|
|
|
|
|
|
float3 Px = Pnostereo;
|
|
|
|
float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
|
|
|
|
Dx = normalize(transform_direction(&cameratoworld, Dx));
|
|
|
|
spherical_stereo_transform(kg, &Px, &Dx);
|
|
|
|
|
|
|
|
ray->dP.dx = Px - Pcenter;
|
|
|
|
ray->dD.dx = Dx - Dcenter;
|
|
|
|
|
|
|
|
float3 Py = Pnostereo;
|
|
|
|
float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
|
|
|
|
Dy = normalize(transform_direction(&cameratoworld, Dy));
|
|
|
|
spherical_stereo_transform(kg, &Py, &Dy);
|
|
|
|
|
|
|
|
ray->dP.dy = Py - Pcenter;
|
|
|
|
ray->dD.dy = Dy - Dcenter;
|
2011-04-27 11:58:34 +00:00
|
|
|
#endif
|
2016-10-22 13:59:23 +00:00
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
#ifdef __CAMERA_CLIPPING__
|
|
|
|
/* clipping */
|
2016-10-22 13:59:23 +00:00
|
|
|
float z_inv = 1.0f / normalize(Pcamera).z;
|
|
|
|
float nearclip = kernel_data.cam.nearclip * z_inv;
|
|
|
|
ray->P += nearclip * ray->D;
|
|
|
|
ray->dP.dx += nearclip * ray->dD.dx;
|
|
|
|
ray->dP.dy += nearclip * ray->dD.dy;
|
2015-01-13 21:34:49 +00:00
|
|
|
ray->t = kernel_data.cam.cliplength * z_inv;
|
2011-04-27 11:58:34 +00:00
|
|
|
#else
|
|
|
|
ray->t = FLT_MAX;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Orthographic Camera */
|
Cycles: OpenCL kernel split
This commit contains all the work related on the AMD megakernel split work
which was mainly done by Varun Sundar, George Kyriazis and Lenny Wang, plus
some help from Sergey Sharybin, Martijn Berger, Thomas Dinges and likely
someone else which we're forgetting to mention.
Currently only AMD cards are enabled for the new split kernel, but it is
possible to force split opencl kernel to be used by setting the following
environment variable: CYCLES_OPENCL_SPLIT_KERNEL_TEST=1.
Not all the features are supported yet, and that being said no motion blur,
camera blur, SSS and volumetrics for now. Also transparent shadows are
disabled on AMD device because of some compiler bug.
This kernel is also only implements regular path tracing and supporting
branched one will take a bit. Branched path tracing is exposed to the
interface still, which is a bit misleading and will be hidden there soon.
More feature will be enabled once they're ported to the split kernel and
tested.
Neither regular CPU nor CUDA has any difference, they're generating the
same exact code, which means no regressions/improvements there.
Based on the research paper:
https://research.nvidia.com/sites/default/files/publications/laine2013hpg_paper.pdf
Here's the documentation:
https://docs.google.com/document/d/1LuXW-CV-sVJkQaEGZlMJ86jZ8FmoPfecaMdR-oiWbUY/edit
Design discussion of the patch:
https://developer.blender.org/T44197
Differential Revision: https://developer.blender.org/D1200
2015-05-09 14:34:30 +00:00
|
|
|
ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* create ray form raster position */
|
|
|
|
Transform rastertocamera = kernel_data.cam.rastertocamera;
|
2012-04-16 08:35:21 +00:00
|
|
|
float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
float3 P;
|
|
|
|
float3 D = make_float3(0.0f, 0.0f, 1.0f);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2013-03-21 02:38:11 +00:00
|
|
|
/* modify ray for depth of field */
|
|
|
|
float aperturesize = kernel_data.cam.aperturesize;
|
|
|
|
|
|
|
|
if(aperturesize > 0.0f) {
|
|
|
|
/* sample point on aperture */
|
|
|
|
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
|
|
|
|
|
|
|
|
/* compute point on plane of focus */
|
2016-10-22 21:25:39 +00:00
|
|
|
float3 Pfocus = D * kernel_data.cam.focaldistance;
|
2013-03-21 02:38:11 +00:00
|
|
|
|
|
|
|
/* update ray for effect of lens */
|
2013-04-04 23:52:33 +00:00
|
|
|
float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
|
2016-10-22 21:25:39 +00:00
|
|
|
P = Pcamera + lensuvw;
|
|
|
|
D = normalize(Pfocus - lensuvw);
|
2013-03-21 02:38:11 +00:00
|
|
|
}
|
2013-04-06 11:52:40 +00:00
|
|
|
else {
|
2016-10-22 21:25:39 +00:00
|
|
|
P = Pcamera;
|
2013-04-06 11:52:40 +00:00
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
/* transform ray from camera to world */
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
|
2012-10-09 18:37:14 +00:00
|
|
|
#ifdef __CAMERA_MOTION__
|
2015-02-21 07:33:21 +00:00
|
|
|
if(kernel_data.cam.have_motion) {
|
2016-02-12 17:33:43 +00:00
|
|
|
# ifdef __KERNEL_OPENCL__
|
2015-05-09 13:57:51 +00:00
|
|
|
const MotionTransform tfm = kernel_data.cam.motion;
|
2015-02-21 07:33:21 +00:00
|
|
|
transform_motion_interpolate(&cameratoworld,
|
2015-05-09 13:57:51 +00:00
|
|
|
(const DecompMotionTransform*)&tfm,
|
2015-02-21 07:33:21 +00:00
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# else
|
2015-05-09 13:57:51 +00:00
|
|
|
transform_motion_interpolate(&cameratoworld,
|
|
|
|
(const DecompMotionTransform*)&kernel_data.cam.motion,
|
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# endif
|
2015-02-21 07:33:21 +00:00
|
|
|
}
|
2012-04-30 12:49:26 +00:00
|
|
|
#endif
|
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
ray->P = transform_point(&cameratoworld, P);
|
|
|
|
ray->D = normalize(transform_direction(&cameratoworld, D));
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
|
|
|
/* ray differential */
|
2011-12-20 12:25:45 +00:00
|
|
|
ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
|
|
|
|
ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2013-05-03 21:34:51 +00:00
|
|
|
ray->dD = differential3_zero();
|
2011-04-27 11:58:34 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __CAMERA_CLIPPING__
|
|
|
|
/* clipping */
|
|
|
|
ray->t = kernel_data.cam.cliplength;
|
|
|
|
#else
|
|
|
|
ray->t = FLT_MAX;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-05-04 16:20:51 +00:00
|
|
|
/* Panorama Camera */
|
2012-02-28 16:44:54 +00:00
|
|
|
|
2016-08-01 13:40:46 +00:00
|
|
|
ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
|
|
|
|
float raster_x, float raster_y,
|
|
|
|
float lens_u, float lens_v,
|
|
|
|
ccl_addr_space Ray *ray)
|
2012-02-28 16:44:54 +00:00
|
|
|
{
|
|
|
|
Transform rastertocamera = kernel_data.cam.rastertocamera;
|
2012-04-16 08:35:21 +00:00
|
|
|
float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
|
2012-02-28 16:44:54 +00:00
|
|
|
|
|
|
|
/* create ray form raster position */
|
2016-10-22 21:25:39 +00:00
|
|
|
float3 P = make_float3(0.0f, 0.0f, 0.0f);
|
|
|
|
float3 D = panorama_to_direction(kg, Pcamera.x, Pcamera.y);
|
2012-05-05 19:44:35 +00:00
|
|
|
|
2013-07-10 17:25:52 +00:00
|
|
|
/* indicates ray should not receive any light, outside of the lens */
|
2016-10-22 21:25:39 +00:00
|
|
|
if(is_zero(D)) {
|
2013-07-10 17:25:52 +00:00
|
|
|
ray->t = 0.0f;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-09-17 11:25:29 +00:00
|
|
|
/* modify ray for depth of field */
|
|
|
|
float aperturesize = kernel_data.cam.aperturesize;
|
|
|
|
|
|
|
|
if(aperturesize > 0.0f) {
|
|
|
|
/* sample point on aperture */
|
|
|
|
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
|
|
|
|
|
|
|
|
/* compute point on plane of focus */
|
2016-10-24 12:04:31 +00:00
|
|
|
float3 Dfocus = normalize(D);
|
|
|
|
float3 Pfocus = Dfocus * kernel_data.cam.focaldistance;
|
2012-09-17 11:25:29 +00:00
|
|
|
|
2016-10-24 12:04:31 +00:00
|
|
|
/* calculate orthonormal coordinates perpendicular to Dfocus */
|
2012-09-17 11:25:29 +00:00
|
|
|
float3 U, V;
|
2016-10-24 12:04:31 +00:00
|
|
|
U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
|
|
|
|
V = normalize(cross(Dfocus, U));
|
2012-09-17 11:25:29 +00:00
|
|
|
|
|
|
|
/* update ray for effect of lens */
|
2016-10-22 21:25:39 +00:00
|
|
|
P = U * lensuv.x + V * lensuv.y;
|
|
|
|
D = normalize(Pfocus - P);
|
2012-09-17 11:25:29 +00:00
|
|
|
}
|
|
|
|
|
2012-02-28 16:44:54 +00:00
|
|
|
/* transform ray from camera to world */
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
|
2012-10-09 18:37:14 +00:00
|
|
|
#ifdef __CAMERA_MOTION__
|
2015-05-09 13:57:51 +00:00
|
|
|
if(kernel_data.cam.have_motion) {
|
2016-02-12 17:33:43 +00:00
|
|
|
# ifdef __KERNEL_OPENCL__
|
2015-05-09 13:57:51 +00:00
|
|
|
const MotionTransform tfm = kernel_data.cam.motion;
|
|
|
|
transform_motion_interpolate(&cameratoworld,
|
|
|
|
(const DecompMotionTransform*)&tfm,
|
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# else
|
2015-02-21 07:33:21 +00:00
|
|
|
transform_motion_interpolate(&cameratoworld,
|
2015-05-09 13:57:51 +00:00
|
|
|
(const DecompMotionTransform*)&kernel_data.cam.motion,
|
2015-02-21 07:33:21 +00:00
|
|
|
ray->time);
|
2016-02-12 17:33:43 +00:00
|
|
|
# endif
|
2015-05-09 13:57:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
2012-04-30 12:49:26 +00:00
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
P = transform_point(&cameratoworld, P);
|
|
|
|
D = normalize(transform_direction(&cameratoworld, D));
|
2016-10-22 13:59:23 +00:00
|
|
|
|
|
|
|
/* Stereo transform */
|
|
|
|
bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
|
2016-10-24 10:26:12 +00:00
|
|
|
if(use_stereo) {
|
2016-10-22 21:25:39 +00:00
|
|
|
spherical_stereo_transform(kg, &P, &D);
|
2016-10-22 13:59:23 +00:00
|
|
|
}
|
2012-02-28 16:44:54 +00:00
|
|
|
|
2016-10-22 21:25:39 +00:00
|
|
|
ray->P = P;
|
|
|
|
ray->D = D;
|
|
|
|
|
2012-02-28 16:44:54 +00:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2016-10-22 13:59:23 +00:00
|
|
|
/* Ray differentials, computed from scratch using the raster coordinates
|
|
|
|
* because we don't want to be affected by depth of field. We compute
|
|
|
|
* ray origin and direction for the center and two neighbouring pixels
|
|
|
|
* and simply take their differences. */
|
|
|
|
float3 Pcenter = Pcamera;
|
|
|
|
float3 Dcenter = panorama_to_direction(kg, Pcenter.x, Pcenter.y);
|
|
|
|
Pcenter = transform_point(&cameratoworld, Pcenter);
|
|
|
|
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
|
2016-10-24 10:26:12 +00:00
|
|
|
if(use_stereo) {
|
2016-10-22 13:59:23 +00:00
|
|
|
spherical_stereo_transform(kg, &Pcenter, &Dcenter);
|
|
|
|
}
|
|
|
|
|
|
|
|
float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
|
|
|
|
float3 Dx = panorama_to_direction(kg, Px.x, Px.y);
|
|
|
|
Px = transform_point(&cameratoworld, Px);
|
|
|
|
Dx = normalize(transform_direction(&cameratoworld, Dx));
|
2016-10-24 10:26:12 +00:00
|
|
|
if(use_stereo) {
|
2016-10-22 13:59:23 +00:00
|
|
|
spherical_stereo_transform(kg, &Px, &Dx);
|
|
|
|
}
|
|
|
|
|
|
|
|
ray->dP.dx = Px - Pcenter;
|
|
|
|
ray->dD.dx = Dx - Dcenter;
|
|
|
|
|
|
|
|
float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
|
|
|
|
float3 Dy = panorama_to_direction(kg, Py.x, Py.y);
|
|
|
|
Py = transform_point(&cameratoworld, Py);
|
|
|
|
Dy = normalize(transform_direction(&cameratoworld, Dy));
|
2016-10-24 10:26:12 +00:00
|
|
|
if(use_stereo) {
|
2016-10-22 13:59:23 +00:00
|
|
|
spherical_stereo_transform(kg, &Py, &Dy);
|
|
|
|
}
|
|
|
|
|
|
|
|
ray->dP.dy = Py - Pcenter;
|
|
|
|
ray->dD.dy = Dy - Dcenter;
|
2012-02-28 16:44:54 +00:00
|
|
|
#endif
|
2016-10-14 22:11:42 +00:00
|
|
|
|
|
|
|
#ifdef __CAMERA_CLIPPING__
|
|
|
|
/* clipping */
|
2016-10-22 13:59:23 +00:00
|
|
|
float nearclip = kernel_data.cam.nearclip;
|
|
|
|
ray->P += nearclip * ray->D;
|
|
|
|
ray->dP.dx += nearclip * ray->dD.dx;
|
|
|
|
ray->dP.dy += nearclip * ray->dD.dy;
|
2016-10-14 22:11:42 +00:00
|
|
|
ray->t = kernel_data.cam.cliplength;
|
|
|
|
#else
|
|
|
|
ray->t = FLT_MAX;
|
|
|
|
#endif
|
2012-02-28 16:44:54 +00:00
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
/* Common */
|
|
|
|
|
2016-08-01 13:40:46 +00:00
|
|
|
ccl_device_inline void camera_sample(KernelGlobals *kg,
|
|
|
|
int x, int y,
|
|
|
|
float filter_u, float filter_v,
|
|
|
|
float lens_u, float lens_v,
|
|
|
|
float time,
|
|
|
|
ccl_addr_space Ray *ray)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* pixel filter */
|
2013-04-01 20:26:43 +00:00
|
|
|
int filter_table_offset = kernel_data.film.filter_table_offset;
|
2013-04-01 20:26:52 +00:00
|
|
|
float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
|
|
|
|
float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2015-02-26 12:27:02 +00:00
|
|
|
#ifdef __CAMERA_MOTION__
|
|
|
|
/* motion blur */
|
2015-10-27 08:16:04 +00:00
|
|
|
if(kernel_data.cam.shuttertime == -1.0f) {
|
2016-09-03 01:37:17 +00:00
|
|
|
ray->time = 0.5f;
|
2015-10-27 08:16:04 +00:00
|
|
|
}
|
|
|
|
else {
|
2015-11-20 09:42:34 +00:00
|
|
|
/* TODO(sergey): Such lookup is unneeded when there's rolling shutter
|
2016-01-14 12:01:39 +00:00
|
|
|
* effect in use but rolling shutter duration is set to 0.0.
|
2015-11-20 09:42:34 +00:00
|
|
|
*/
|
2015-10-27 08:16:04 +00:00
|
|
|
const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
|
|
|
|
ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
|
2015-11-20 09:42:34 +00:00
|
|
|
/* TODO(sergey): Currently single rolling shutter effect type only
|
|
|
|
* where scanlines are acquired from top to bottom and whole scanline
|
|
|
|
* is acquired at once (no delay in acquisition happens between pixels
|
2016-01-09 11:56:28 +00:00
|
|
|
* of single scanline).
|
2015-11-20 09:42:34 +00:00
|
|
|
*
|
|
|
|
* Might want to support more models in the future.
|
|
|
|
*/
|
|
|
|
if(kernel_data.cam.rolling_shutter_type) {
|
|
|
|
/* Time corresponding to a fully rolling shutter only effect:
|
|
|
|
* top of the frame is time 0.0, bottom of the frame is time 1.0.
|
|
|
|
*/
|
|
|
|
const float time = 1.0f - (float)y / kernel_data.cam.height;
|
|
|
|
const float duration = kernel_data.cam.rolling_shutter_duration;
|
|
|
|
if(duration != 0.0f) {
|
|
|
|
/* This isn't fully physical correct, but lets us to have simple
|
|
|
|
* controls in the interface. The idea here is basically sort of
|
|
|
|
* linear interpolation between how much rolling shutter effect
|
|
|
|
* exist on the frame and how much of it is a motion blur effect.
|
|
|
|
*/
|
|
|
|
ray->time = (ray->time - 0.5f) * duration;
|
|
|
|
ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ray->time = time;
|
|
|
|
}
|
|
|
|
}
|
2015-10-27 08:16:04 +00:00
|
|
|
}
|
2015-02-26 12:27:02 +00:00
|
|
|
#endif
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
/* sample */
|
2012-02-28 16:44:54 +00:00
|
|
|
if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
|
|
|
|
camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
|
|
|
|
else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
|
2013-03-21 02:38:11 +00:00
|
|
|
camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
|
2011-04-27 11:58:34 +00:00
|
|
|
else
|
2012-09-17 11:25:29 +00:00
|
|
|
camera_sample_panorama(kg, raster_x, raster_y, lens_u, lens_v, ray);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2012-11-21 13:00:57 +00:00
|
|
|
/* Utilities */
|
|
|
|
|
2013-11-15 23:17:10 +00:00
|
|
|
ccl_device_inline float3 camera_position(KernelGlobals *kg)
|
2013-06-08 10:51:33 +00:00
|
|
|
{
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
|
|
|
|
}
|
|
|
|
|
2013-11-15 23:17:10 +00:00
|
|
|
ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
|
2012-11-21 13:00:57 +00:00
|
|
|
{
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
|
|
|
|
|
|
|
|
if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
|
|
|
|
float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
|
|
|
|
return fabsf(dot((P - camP), camD));
|
|
|
|
}
|
|
|
|
else
|
2013-06-04 17:20:00 +00:00
|
|
|
return len(P - camP);
|
2012-11-21 13:00:57 +00:00
|
|
|
}
|
|
|
|
|
2014-05-27 13:56:59 +00:00
|
|
|
ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
|
|
|
|
{
|
|
|
|
Transform cameratoworld = kernel_data.cam.cameratoworld;
|
|
|
|
|
|
|
|
if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
|
|
|
|
float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
|
|
|
|
return -camD;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
|
|
|
|
return normalize(camP - P);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-15 23:17:10 +00:00
|
|
|
ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
|
2013-06-08 10:51:33 +00:00
|
|
|
{
|
|
|
|
if(kernel_data.cam.type != CAMERA_PANORAMA) {
|
|
|
|
/* perspective / ortho */
|
Cycles: OpenCL kernel split
This commit contains all the work related on the AMD megakernel split work
which was mainly done by Varun Sundar, George Kyriazis and Lenny Wang, plus
some help from Sergey Sharybin, Martijn Berger, Thomas Dinges and likely
someone else which we're forgetting to mention.
Currently only AMD cards are enabled for the new split kernel, but it is
possible to force split opencl kernel to be used by setting the following
environment variable: CYCLES_OPENCL_SPLIT_KERNEL_TEST=1.
Not all the features are supported yet, and that being said no motion blur,
camera blur, SSS and volumetrics for now. Also transparent shadows are
disabled on AMD device because of some compiler bug.
This kernel is also only implements regular path tracing and supporting
branched one will take a bit. Branched path tracing is exposed to the
interface still, which is a bit misleading and will be hidden there soon.
More feature will be enabled once they're ported to the split kernel and
tested.
Neither regular CPU nor CUDA has any difference, they're generating the
same exact code, which means no regressions/improvements there.
Based on the research paper:
https://research.nvidia.com/sites/default/files/publications/laine2013hpg_paper.pdf
Here's the documentation:
https://docs.google.com/document/d/1LuXW-CV-sVJkQaEGZlMJ86jZ8FmoPfecaMdR-oiWbUY/edit
Design discussion of the patch:
https://developer.blender.org/T44197
Differential Revision: https://developer.blender.org/D1200
2015-05-09 14:34:30 +00:00
|
|
|
if(ccl_fetch(sd, object) == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
|
2013-06-08 10:51:33 +00:00
|
|
|
P += camera_position(kg);
|
|
|
|
|
|
|
|
Transform tfm = kernel_data.cam.worldtondc;
|
|
|
|
return transform_perspective(&tfm, P);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* panorama */
|
|
|
|
Transform tfm = kernel_data.cam.worldtocamera;
|
|
|
|
|
Cycles: OpenCL kernel split
This commit contains all the work related on the AMD megakernel split work
which was mainly done by Varun Sundar, George Kyriazis and Lenny Wang, plus
some help from Sergey Sharybin, Martijn Berger, Thomas Dinges and likely
someone else which we're forgetting to mention.
Currently only AMD cards are enabled for the new split kernel, but it is
possible to force split opencl kernel to be used by setting the following
environment variable: CYCLES_OPENCL_SPLIT_KERNEL_TEST=1.
Not all the features are supported yet, and that being said no motion blur,
camera blur, SSS and volumetrics for now. Also transparent shadows are
disabled on AMD device because of some compiler bug.
This kernel is also only implements regular path tracing and supporting
branched one will take a bit. Branched path tracing is exposed to the
interface still, which is a bit misleading and will be hidden there soon.
More feature will be enabled once they're ported to the split kernel and
tested.
Neither regular CPU nor CUDA has any difference, they're generating the
same exact code, which means no regressions/improvements there.
Based on the research paper:
https://research.nvidia.com/sites/default/files/publications/laine2013hpg_paper.pdf
Here's the documentation:
https://docs.google.com/document/d/1LuXW-CV-sVJkQaEGZlMJ86jZ8FmoPfecaMdR-oiWbUY/edit
Design discussion of the patch:
https://developer.blender.org/T44197
Differential Revision: https://developer.blender.org/D1200
2015-05-09 14:34:30 +00:00
|
|
|
if(ccl_fetch(sd, object) != OBJECT_NONE)
|
2013-06-08 10:51:33 +00:00
|
|
|
P = normalize(transform_point(&tfm, P));
|
|
|
|
else
|
|
|
|
P = normalize(transform_direction(&tfm, P));
|
|
|
|
|
|
|
|
float2 uv = direction_to_panorama(kg, P);
|
|
|
|
|
|
|
|
return make_float3(uv.x, uv.y, 0.0f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
CCL_NAMESPACE_END
|