Blender  V2.93
kernel_camera.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 /* Perspective Camera */
20 
22 {
23  float blades = cam->blades;
24  float2 bokeh;
25 
26  if (blades == 0.0f) {
27  /* sample disk */
28  bokeh = concentric_sample_disk(u, v);
29  }
30  else {
31  /* sample polygon */
32  float rotation = cam->bladesrotation;
33  bokeh = regular_polygon_sample(blades, rotation, u, v);
34  }
35 
36  /* anamorphic lens bokeh */
37  bokeh.x *= cam->inv_aperture_ratio;
38 
39  return bokeh;
40 }
41 
43  float raster_x,
44  float raster_y,
45  float lens_u,
46  float lens_v,
47  ccl_addr_space Ray *ray)
48 {
49  /* create ray form raster position */
50  ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
51  float3 raster = make_float3(raster_x, raster_y, 0.0f);
52  float3 Pcamera = transform_perspective(&rastertocamera, raster);
53 
54 #ifdef __CAMERA_MOTION__
55  if (kernel_data.cam.have_perspective_motion) {
56  /* TODO(sergey): Currently we interpolate projected coordinate which
57  * gives nice looking result and which is simple, but is in fact a bit
58  * different comparing to constructing projective matrix from an
59  * interpolated field of view.
60  */
61  if (ray->time < 0.5f) {
62  ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_pre;
63  float3 Pcamera_pre = transform_perspective(&rastertocamera_pre, raster);
64  Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
65  }
66  else {
67  ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_post;
68  float3 Pcamera_post = transform_perspective(&rastertocamera_post, raster);
69  Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
70  }
71  }
72 #endif
73 
74  float3 P = zero_float3();
75  float3 D = Pcamera;
76 
77  /* modify ray for depth of field */
78  float aperturesize = kernel_data.cam.aperturesize;
79 
80  if (aperturesize > 0.0f) {
81  /* sample point on aperture */
82  float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
83 
84  /* compute point on plane of focus */
85  float ft = kernel_data.cam.focaldistance / D.z;
86  float3 Pfocus = D * ft;
87 
88  /* update ray for effect of lens */
89  P = make_float3(lensuv.x, lensuv.y, 0.0f);
90  D = normalize(Pfocus - P);
91  }
92 
93  /* transform ray from camera to world */
94  Transform cameratoworld = kernel_data.cam.cameratoworld;
95 
96 #ifdef __CAMERA_MOTION__
97  if (kernel_data.cam.num_motion_steps) {
99  kernel_tex_array(__camera_motion),
100  kernel_data.cam.num_motion_steps,
101  ray->time);
102  }
103 #endif
104 
105  P = transform_point(&cameratoworld, P);
106  D = normalize(transform_direction(&cameratoworld, D));
107 
108  bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
109  if (!use_stereo) {
110  /* No stereo */
111  ray->P = P;
112  ray->D = D;
113 
114 #ifdef __RAY_DIFFERENTIALS__
115  float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
116 
117  ray->dP = differential3_zero();
118  ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
119  ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
120 #endif
121  }
122  else {
123  /* Spherical stereo */
125  ray->P = P;
126  ray->D = D;
127 
128 #ifdef __RAY_DIFFERENTIALS__
129  /* Ray differentials, computed from scratch using the raster coordinates
130  * because we don't want to be affected by depth of field. We compute
131  * ray origin and direction for the center and two neighboring pixels
132  * and simply take their differences. */
133  float3 Pnostereo = transform_point(&cameratoworld, zero_float3());
134 
135  float3 Pcenter = Pnostereo;
136  float3 Dcenter = Pcamera;
137  Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
138  spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
139 
140  float3 Px = Pnostereo;
141  float3 Dx = transform_perspective(&rastertocamera,
142  make_float3(raster_x + 1.0f, raster_y, 0.0f));
143  Dx = normalize(transform_direction(&cameratoworld, Dx));
144  spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
145 
146  ray->dP.dx = Px - Pcenter;
147  ray->dD.dx = Dx - Dcenter;
148 
149  float3 Py = Pnostereo;
150  float3 Dy = transform_perspective(&rastertocamera,
151  make_float3(raster_x, raster_y + 1.0f, 0.0f));
152  Dy = normalize(transform_direction(&cameratoworld, Dy));
153  spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
154 
155  ray->dP.dy = Py - Pcenter;
156  ray->dD.dy = Dy - Dcenter;
157 #endif
158  }
159 
160 #ifdef __CAMERA_CLIPPING__
161  /* clipping */
162  float z_inv = 1.0f / normalize(Pcamera).z;
163  float nearclip = kernel_data.cam.nearclip * z_inv;
164  ray->P += nearclip * ray->D;
165  ray->dP.dx += nearclip * ray->dD.dx;
166  ray->dP.dy += nearclip * ray->dD.dy;
167  ray->t = kernel_data.cam.cliplength * z_inv;
168 #else
169  ray->t = FLT_MAX;
170 #endif
171 }
172 
173 /* Orthographic Camera */
175  float raster_x,
176  float raster_y,
177  float lens_u,
178  float lens_v,
179  ccl_addr_space Ray *ray)
180 {
181  /* create ray form raster position */
182  ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
183  float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
184 
185  float3 P;
186  float3 D = make_float3(0.0f, 0.0f, 1.0f);
187 
188  /* modify ray for depth of field */
189  float aperturesize = kernel_data.cam.aperturesize;
190 
191  if (aperturesize > 0.0f) {
192  /* sample point on aperture */
193  float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
194 
195  /* compute point on plane of focus */
196  float3 Pfocus = D * kernel_data.cam.focaldistance;
197 
198  /* update ray for effect of lens */
199  float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
200  P = Pcamera + lensuvw;
201  D = normalize(Pfocus - lensuvw);
202  }
203  else {
204  P = Pcamera;
205  }
206  /* transform ray from camera to world */
207  Transform cameratoworld = kernel_data.cam.cameratoworld;
208 
209 #ifdef __CAMERA_MOTION__
210  if (kernel_data.cam.num_motion_steps) {
211  transform_motion_array_interpolate(&cameratoworld,
212  kernel_tex_array(__camera_motion),
213  kernel_data.cam.num_motion_steps,
214  ray->time);
215  }
216 #endif
217 
218  ray->P = transform_point(&cameratoworld, P);
219  ray->D = normalize(transform_direction(&cameratoworld, D));
220 
221 #ifdef __RAY_DIFFERENTIALS__
222  /* ray differential */
223  ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
224  ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
225 
226  ray->dD = differential3_zero();
227 #endif
228 
229 #ifdef __CAMERA_CLIPPING__
230  /* clipping */
231  ray->t = kernel_data.cam.cliplength;
232 #else
233  ray->t = FLT_MAX;
234 #endif
235 }
236 
237 /* Panorama Camera */
238 
240 #ifdef __CAMERA_MOTION__
241  const ccl_global DecomposedTransform *cam_motion,
242 #endif
243  float raster_x,
244  float raster_y,
245  float lens_u,
246  float lens_v,
247  ccl_addr_space Ray *ray)
248 {
249  ProjectionTransform rastertocamera = cam->rastertocamera;
250  float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
251 
252  /* create ray form raster position */
253  float3 P = zero_float3();
254  float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
255 
256  /* indicates ray should not receive any light, outside of the lens */
257  if (is_zero(D)) {
258  ray->t = 0.0f;
259  return;
260  }
261 
262  /* modify ray for depth of field */
263  float aperturesize = cam->aperturesize;
264 
265  if (aperturesize > 0.0f) {
266  /* sample point on aperture */
267  float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v) * aperturesize;
268 
269  /* compute point on plane of focus */
270  float3 Dfocus = normalize(D);
271  float3 Pfocus = Dfocus * cam->focaldistance;
272 
273  /* calculate orthonormal coordinates perpendicular to Dfocus */
274  float3 U, V;
275  U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
276  V = normalize(cross(Dfocus, U));
277 
278  /* update ray for effect of lens */
279  P = U * lensuv.x + V * lensuv.y;
280  D = normalize(Pfocus - P);
281  }
282 
283  /* transform ray from camera to world */
284  Transform cameratoworld = cam->cameratoworld;
285 
286 #ifdef __CAMERA_MOTION__
287  if (cam->num_motion_steps) {
289  &cameratoworld, cam_motion, cam->num_motion_steps, ray->time);
290  }
291 #endif
292 
293  P = transform_point(&cameratoworld, P);
294  D = normalize(transform_direction(&cameratoworld, D));
295 
296  /* Stereo transform */
297  bool use_stereo = cam->interocular_offset != 0.0f;
298  if (use_stereo) {
299  spherical_stereo_transform(cam, &P, &D);
300  }
301 
302  ray->P = P;
303  ray->D = D;
304 
305 #ifdef __RAY_DIFFERENTIALS__
306  /* Ray differentials, computed from scratch using the raster coordinates
307  * because we don't want to be affected by depth of field. We compute
308  * ray origin and direction for the center and two neighboring pixels
309  * and simply take their differences. */
310  float3 Pcenter = Pcamera;
311  float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
312  Pcenter = transform_point(&cameratoworld, Pcenter);
313  Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
314  if (use_stereo) {
315  spherical_stereo_transform(cam, &Pcenter, &Dcenter);
316  }
317 
318  float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
319  float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
320  Px = transform_point(&cameratoworld, Px);
321  Dx = normalize(transform_direction(&cameratoworld, Dx));
322  if (use_stereo) {
323  spherical_stereo_transform(cam, &Px, &Dx);
324  }
325 
326  ray->dP.dx = Px - Pcenter;
327  ray->dD.dx = Dx - Dcenter;
328 
329  float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
330  float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
331  Py = transform_point(&cameratoworld, Py);
332  Dy = normalize(transform_direction(&cameratoworld, Dy));
333  if (use_stereo) {
334  spherical_stereo_transform(cam, &Py, &Dy);
335  }
336 
337  ray->dP.dy = Py - Pcenter;
338  ray->dD.dy = Dy - Dcenter;
339 #endif
340 
341 #ifdef __CAMERA_CLIPPING__
342  /* clipping */
343  float nearclip = cam->nearclip;
344  ray->P += nearclip * ray->D;
345  ray->dP.dx += nearclip * ray->dD.dx;
346  ray->dP.dy += nearclip * ray->dD.dy;
347  ray->t = cam->cliplength;
348 #else
349  ray->t = FLT_MAX;
350 #endif
351 }
352 
353 /* Common */
354 
355 ccl_device_inline void camera_sample(KernelGlobals *kg,
356  int x,
357  int y,
358  float filter_u,
359  float filter_v,
360  float lens_u,
361  float lens_v,
362  float time,
363  ccl_addr_space Ray *ray)
364 {
365  /* pixel filter */
366  int filter_table_offset = kernel_data.film.filter_table_offset;
367  float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
368  float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
369 
370 #ifdef __CAMERA_MOTION__
371  /* motion blur */
372  if (kernel_data.cam.shuttertime == -1.0f) {
373  ray->time = 0.5f;
374  }
375  else {
376  /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
377  * effect in use but rolling shutter duration is set to 0.0.
378  */
379  const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
380  ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
381  /* TODO(sergey): Currently single rolling shutter effect type only
382  * where scan-lines are acquired from top to bottom and whole scan-line
383  * is acquired at once (no delay in acquisition happens between pixels
384  * of single scan-line).
385  *
386  * Might want to support more models in the future.
387  */
388  if (kernel_data.cam.rolling_shutter_type) {
389  /* Time corresponding to a fully rolling shutter only effect:
390  * top of the frame is time 0.0, bottom of the frame is time 1.0.
391  */
392  const float time = 1.0f - (float)y / kernel_data.cam.height;
393  const float duration = kernel_data.cam.rolling_shutter_duration;
394  if (duration != 0.0f) {
395  /* This isn't fully physical correct, but lets us to have simple
396  * controls in the interface. The idea here is basically sort of
397  * linear interpolation between how much rolling shutter effect
398  * exist on the frame and how much of it is a motion blur effect.
399  */
400  ray->time = (ray->time - 0.5f) * duration;
401  ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
402  }
403  else {
404  ray->time = time;
405  }
406  }
407  }
408 #endif
409 
410  /* sample */
411  if (kernel_data.cam.type == CAMERA_PERSPECTIVE) {
412  camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
413  }
414  else if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
415  camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
416  }
417  else {
418 #ifdef __CAMERA_MOTION__
419  const ccl_global DecomposedTransform *cam_motion = kernel_tex_array(__camera_motion);
420  camera_sample_panorama(&kernel_data.cam, cam_motion, raster_x, raster_y, lens_u, lens_v, ray);
421 #else
422  camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
423 #endif
424  }
425 }
426 
427 /* Utilities */
428 
430 {
431  Transform cameratoworld = kernel_data.cam.cameratoworld;
432  return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
433 }
434 
436 {
437  Transform cameratoworld = kernel_data.cam.cameratoworld;
438  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
439 
440  if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
441  float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
442  return fabsf(dot((P - camP), camD));
443  }
444  else {
445  return len(P - camP);
446  }
447 }
448 
450 {
451  if (kernel_data.cam.type != CAMERA_PANORAMA) {
452  Transform worldtocamera = kernel_data.cam.worldtocamera;
453  return transform_point(&worldtocamera, P).z;
454  }
455  else {
456  Transform cameratoworld = kernel_data.cam.cameratoworld;
457  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
458  return len(P - camP);
459  }
460 }
461 
463 {
464  Transform cameratoworld = kernel_data.cam.cameratoworld;
465 
466  if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
467  float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
468  return -camD;
469  }
470  else {
471  float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
472  return normalize(camP - P);
473  }
474 }
475 
477 {
478  if (kernel_data.cam.type != CAMERA_PANORAMA) {
479  /* perspective / ortho */
480  if (sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
481  P += camera_position(kg);
482 
483  ProjectionTransform tfm = kernel_data.cam.worldtondc;
484  return transform_perspective(&tfm, P);
485  }
486  else {
487  /* panorama */
488  Transform tfm = kernel_data.cam.worldtocamera;
489 
490  if (sd->object != OBJECT_NONE)
491  P = normalize(transform_point(&tfm, P));
492  else
493  P = normalize(transform_direction(&tfm, P));
494 
496 
497  return make_float3(uv.x, uv.y, 0.0f);
498  }
499 }
500 
typedef float(TangentPoint)[2]
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
#define U
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned int U
Definition: btGjkEpa3.h:78
double time
ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
CCL_NAMESPACE_BEGIN ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
Definition: kernel_camera.h:21
ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
ccl_device_inline float camera_z_depth(KernelGlobals *kg, float3 P)
ccl_device_inline void camera_sample(KernelGlobals *kg, int x, int y, float filter_u, float filter_v, float lens_u, float lens_v, float time, ccl_addr_space Ray *ray)
ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
Definition: kernel_camera.h:42
ccl_device_inline float3 camera_position(KernelGlobals *kg)
#define kernel_data
#define kernel_tex_array(tex)
#define ccl_addr_space
#define ccl_device
#define ccl_constant
#define ccl_device_inline
#define ccl_global
#define CCL_NAMESPACE_END
#define fabsf(x)
#define make_float3(x, y, z)
ccl_device differential3 differential3_zero()
CCL_NAMESPACE_BEGIN ccl_device float lookup_table_read(KernelGlobals *kg, float x, int offset, int size)
ccl_device float2 regular_polygon_sample(float corners, float rotation, float u, float v)
ccl_device float2 concentric_sample_disk(float u1, float u2)
ccl_device_inline void spherical_stereo_transform(ccl_constant KernelCamera *cam, float3 *P, float3 *D)
ccl_device_inline float3 panorama_to_direction(ccl_constant KernelCamera *cam, float u, float v)
ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, float3 dir)
#define __CAMERA_MOTION__
Definition: kernel_types.h:109
#define FILTER_TABLE_SIZE
Definition: kernel_types.h:45
#define PRIM_NONE
Definition: kernel_types.h:60
#define OBJECT_NONE
Definition: kernel_types.h:59
ShaderData
#define SHUTTER_TABLE_SIZE
Definition: kernel_types.h:47
@ CAMERA_PERSPECTIVE
Definition: kernel_types.h:610
@ CAMERA_PANORAMA
Definition: kernel_types.h:610
@ CAMERA_ORTHOGRAPHIC
Definition: kernel_types.h:610
static float P(float k)
Definition: math_interp.c:41
float z
Definition: sky_float3.h:35
float y
Definition: sky_float3.h:35
float x
Definition: sky_float3.h:35
__forceinline avxf cross(const avxf &a, const avxf &b)
Definition: util_avxf.h:119
ccl_device_inline float3 float4_to_float3(const float4 a)
Definition: util_math.h:415
ccl_device_inline float2 normalize(const float2 &a)
ccl_device_inline float dot(const float2 &a, const float2 &b)
ccl_device_inline float2 interp(const float2 &a, const float2 &b, float t)
ccl_device_inline bool is_zero(const float2 &a)
ccl_device_inline float3 zero_float3()
ccl_device_inline float3 transform_perspective(const ProjectionTransform *t, const float3 a)
ccl_device_inline float3 transform_direction(const Transform *t, const float3 a)
ccl_device_inline float3 transform_point(const Transform *t, const float3 a)
ccl_device void transform_motion_array_interpolate(Transform *tfm, const ccl_global DecomposedTransform *motion, uint numsteps, float time)
CCL_NAMESPACE_BEGIN struct View V
uint len
BLI_INLINE float D(const float *data, const int res[3], int x, int y, int z)
Definition: voxel.c:29