Blender V4.5
optix/device_impl.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2019 NVIDIA Corporation
2 * SPDX-FileCopyrightText: 2019-2022 Blender Foundation
3 *
4 * SPDX-License-Identifier: Apache-2.0 */
5
6#ifdef WITH_OPTIX
7
9# include "device/optix/queue.h"
10
11# include "bvh/bvh.h"
12# include "bvh/optix.h"
13
14# include "scene/hair.h"
15# include "scene/mesh.h"
16# include "scene/object.h"
17# include "scene/pointcloud.h"
18# include "scene/scene.h"
19
20# include "util/debug.h"
21# include "util/log.h"
22# include "util/path.h"
23# include "util/progress.h"
24# include "util/task.h"
25
26# define __KERNEL_OPTIX__
28
30
31static void execute_optix_task(TaskPool &pool, OptixTask task, OptixResult &failure_reason)
32{
33 OptixTask additional_tasks[16];
34 unsigned int num_additional_tasks = 0;
35
36 const OptixResult result = optixTaskExecute(task, additional_tasks, 16, &num_additional_tasks);
37 if (result == OPTIX_SUCCESS) {
38 for (unsigned int i = 0; i < num_additional_tasks; ++i) {
39 pool.push([&pool, additional_task = additional_tasks[i], &failure_reason] {
40 execute_optix_task(pool, additional_task, failure_reason);
41 });
42 }
43 }
44 else {
45 failure_reason = result;
46 }
47}
48
49OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
50 : CUDADevice(info, stats, profiler, headless),
51# ifdef WITH_OSL
52 osl_colorsystem(this, "osl_colorsystem", MEM_READ_ONLY),
53# endif
54 sbt_data(this, "__sbt", MEM_READ_ONLY),
55 launch_params(this, "kernel_params", false)
56{
57 /* Make the CUDA context current. */
58 if (!cuContext) {
59 /* Do not initialize if CUDA context creation failed already. */
60 return;
61 }
62 const CUDAContextScope scope(this);
63
64 /* Create OptiX context for this device. */
65 OptixDeviceContextOptions options = {};
66# ifdef WITH_CYCLES_LOGGING
67 options.logCallbackLevel = 4; /* Fatal = 1, Error = 2, Warning = 3, Print = 4. */
68 options.logCallbackFunction = [](unsigned int level, const char *, const char *message, void *) {
69 switch (level) {
70 case 1:
71 LOG_IF(FATAL, VLOG_IS_ON(1)) << message;
72 break;
73 case 2:
74 LOG_IF(ERROR, VLOG_IS_ON(1)) << message;
75 break;
76 case 3:
77 LOG_IF(WARNING, VLOG_IS_ON(1)) << message;
78 break;
79 case 4:
80 LOG_IF(INFO, VLOG_IS_ON(1)) << message;
81 break;
82 default:
83 break;
84 }
85 };
86# endif
87 if (DebugFlags().optix.use_debug) {
88 VLOG_INFO << "Using OptiX debug mode.";
89 options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
90 }
91 optix_assert(optixDeviceContextCreate(cuContext, &options, &context));
92# ifdef WITH_CYCLES_LOGGING
93 optix_assert(optixDeviceContextSetLogCallback(
94 context, options.logCallbackFunction, options.logCallbackData, options.logCallbackLevel));
95# endif
96
97 /* Fix weird compiler bug that assigns wrong size. */
98 launch_params.data_elements = sizeof(KernelParamsOptiX);
99
100 /* Allocate launch parameter buffer memory on device. */
101 launch_params.alloc_to_device(1);
102}
103
104OptiXDevice::~OptiXDevice()
105{
106 /* Make CUDA context current. */
107 const CUDAContextScope scope(this);
108
109 free_bvh_memory_delayed();
110
111 sbt_data.free();
112 texture_info.free();
113 launch_params.free();
114
115 /* Unload modules. */
116 if (optix_module != nullptr) {
117 optixModuleDestroy(optix_module);
118 }
119 for (int i = 0; i < 2; ++i) {
120 if (builtin_modules[i] != nullptr) {
121 optixModuleDestroy(builtin_modules[i]);
122 }
123 }
124 for (int i = 0; i < NUM_PIPELINES; ++i) {
125 if (pipelines[i] != nullptr) {
126 optixPipelineDestroy(pipelines[i]);
127 }
128 }
129 for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
130 if (groups[i] != nullptr) {
131 optixProgramGroupDestroy(groups[i]);
132 }
133 }
134
135# ifdef WITH_OSL
136 if (osl_camera_module != nullptr) {
137 optixModuleDestroy(osl_camera_module);
138 }
139 for (const OptixModule &module : osl_modules) {
140 if (module != nullptr) {
141 optixModuleDestroy(module);
142 }
143 }
144 for (const OptixProgramGroup &group : osl_groups) {
145 if (group != nullptr) {
146 optixProgramGroupDestroy(group);
147 }
148 }
149 osl_colorsystem.free();
150# endif
151
152 optixDeviceContextDestroy(context);
153}
154
155unique_ptr<DeviceQueue> OptiXDevice::gpu_queue_create()
156{
157 return make_unique<OptiXDeviceQueue>(this);
158}
159
160BVHLayoutMask OptiXDevice::get_bvh_layout_mask(uint /*kernel_features*/) const
161{
162 /* OptiX has its own internal acceleration structure format. */
163 return BVH_LAYOUT_OPTIX;
164}
165
166static string get_optix_include_dir()
167{
168 const char *env_dir = getenv("OPTIX_ROOT_DIR");
169 const char *default_dir = CYCLES_RUNTIME_OPTIX_ROOT_DIR;
170
171 if (env_dir && env_dir[0]) {
172 const string env_include_dir = path_join(env_dir, "include");
173 return env_include_dir;
174 }
175 if (default_dir[0]) {
176 const string default_include_dir = path_join(default_dir, "include");
177 return default_include_dir;
178 }
179
180 return string();
181}
182
183string OptiXDevice::compile_kernel_get_common_cflags(const uint kernel_features)
184{
185 string common_cflags = CUDADevice::compile_kernel_get_common_cflags(kernel_features);
186
187 /* Add OptiX SDK include directory to include paths. */
188 common_cflags += string_printf(" -I\"%s\"", get_optix_include_dir().c_str());
189
190 /* Specialization for shader ray-tracing. */
191 if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
192 common_cflags += " --keep-device-functions";
193 }
194
195 return common_cflags;
196}
197
198void OptiXDevice::create_optix_module(TaskPool &pool,
199 OptixModuleCompileOptions &module_options,
200 string &ptx_data,
201 OptixModule &module,
202 OptixResult &result)
203{
204 OptixTask task = nullptr;
205 result = optixModuleCreateWithTasks(context,
206 &module_options,
207 &pipeline_options,
208 ptx_data.data(),
209 ptx_data.size(),
210 nullptr,
211 nullptr,
212 &module,
213 &task);
214 if (result == OPTIX_SUCCESS) {
215 execute_optix_task(pool, task, result);
216 }
217}
218
219bool OptiXDevice::load_kernels(const uint kernel_features)
220{
221 if (have_error()) {
222 /* Abort early if context creation failed already. */
223 return false;
224 }
225
226# ifdef WITH_OSL
227 /* TODO: Consider splitting kernels into an OSL-camera-only and a full-OSL variant. */
228 const bool use_osl_shading = (kernel_features & KERNEL_FEATURE_OSL_SHADING);
229 const bool use_osl_camera = (kernel_features & KERNEL_FEATURE_OSL_CAMERA);
230# else
231 const bool use_osl_shading = false;
232 const bool use_osl_camera = false;
233# endif
234
235 /* Skip creating OptiX module if only doing denoising. */
236 const bool need_optix_kernels = (kernel_features &
238
239 /* Detect existence of OptiX kernel and SDK here early. So we can error out
240 * before compiling the CUDA kernels, to avoid failing right after when
241 * compiling the OptiX kernel. */
242 string suffix = use_osl_shading ? "_osl" :
243 (kernel_features & (KERNEL_FEATURE_NODE_RAYTRACE | KERNEL_FEATURE_MNEE)) ?
244 "_shader_raytrace" :
245 "";
246 string ptx_filename;
247 if (need_optix_kernels) {
248 ptx_filename = path_get("lib/kernel_optix" + suffix + ".ptx.zst");
249 if (use_adaptive_compilation() || path_file_size(ptx_filename) == -1) {
250 std::string optix_include_dir = get_optix_include_dir();
251 if (optix_include_dir.empty()) {
252 set_error(
253 "Unable to compile OptiX kernels at runtime. Set OPTIX_ROOT_DIR environment variable "
254 "to a directory containing the OptiX SDK.");
255 return false;
256 }
257 if (!path_is_directory(optix_include_dir)) {
258 set_error(string_printf(
259 "OptiX headers not found at %s, unable to compile OptiX kernels at runtime. Install "
260 "OptiX SDK in the specified location, or set OPTIX_ROOT_DIR environment variable to a "
261 "directory containing the OptiX SDK.",
262 optix_include_dir.c_str()));
263 return false;
264 }
265 }
266 }
267
268 /* Load CUDA modules because we need some of the utility kernels. */
269 if (!CUDADevice::load_kernels(kernel_features)) {
270 return false;
271 }
272
273 if (!need_optix_kernels) {
274 return true;
275 }
276
277 const CUDAContextScope scope(this);
278
279 /* Unload existing OptiX module and pipelines first. */
280 if (optix_module != nullptr) {
281 optixModuleDestroy(optix_module);
282 optix_module = nullptr;
283 }
284 for (int i = 0; i < 2; ++i) {
285 if (builtin_modules[i] != nullptr) {
286 optixModuleDestroy(builtin_modules[i]);
287 builtin_modules[i] = nullptr;
288 }
289 }
290 for (int i = 0; i < NUM_PIPELINES; ++i) {
291 if (pipelines[i] != nullptr) {
292 optixPipelineDestroy(pipelines[i]);
293 pipelines[i] = nullptr;
294 }
295 }
296 for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
297 if (groups[i] != nullptr) {
298 optixProgramGroupDestroy(groups[i]);
299 groups[i] = nullptr;
300 }
301 }
302
303# ifdef WITH_OSL
304 if (osl_camera_module != nullptr) {
305 optixModuleDestroy(osl_camera_module);
306 osl_camera_module = nullptr;
307 }
308
309 /* Recreating base OptiX module invalidates all OSL modules too, since they link against it. */
310 for (const OptixModule &module : osl_modules) {
311 if (module != nullptr) {
312 optixModuleDestroy(module);
313 }
314 }
315 osl_modules.clear();
316
317 for (const OptixProgramGroup &group : osl_groups) {
318 if (group != nullptr) {
319 optixProgramGroupDestroy(group);
320 }
321 }
322 osl_groups.clear();
323# endif
324
325 OptixModuleCompileOptions module_options = {};
326 module_options.maxRegisterCount = 0; /* Do not set an explicit register limit. */
327
328 if (DebugFlags().optix.use_debug) {
329 module_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_LEVEL_0;
330 module_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
331 }
332 else {
333 module_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_LEVEL_3;
334 module_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_NONE;
335 }
336
337 module_options.boundValues = nullptr;
338 module_options.numBoundValues = 0;
339 module_options.payloadTypes = nullptr;
340 module_options.numPayloadTypes = 0;
341
342 /* Default to no motion blur and two-level graph, since it is the fastest option. */
343 pipeline_options.usesMotionBlur = false;
344 pipeline_options.traversableGraphFlags =
345 OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING;
346 pipeline_options.numPayloadValues = 8;
347 pipeline_options.numAttributeValues = 2; /* u, v */
348 pipeline_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
349 pipeline_options.pipelineLaunchParamsVariableName = "kernel_params"; /* See globals.h */
350
351 pipeline_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_TRIANGLE;
352 if (kernel_features & KERNEL_FEATURE_HAIR) {
353 if (kernel_features & KERNEL_FEATURE_HAIR_THICK) {
354 pipeline_options.usesPrimitiveTypeFlags |= OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_CATMULLROM;
355 }
356 else {
357 pipeline_options.usesPrimitiveTypeFlags |= OPTIX_PRIMITIVE_TYPE_FLAGS_CUSTOM;
358 }
359 }
360 if (kernel_features & KERNEL_FEATURE_POINTCLOUD) {
361 pipeline_options.usesPrimitiveTypeFlags |= OPTIX_PRIMITIVE_TYPE_FLAGS_CUSTOM;
362 }
363
364 /* Keep track of whether motion blur is enabled, so to enable/disable motion in BVH builds
365 * This is necessary since objects may be reported to have motion if the Vector pass is
366 * active, but may still need to be rendered without motion blur if that isn't active as well. */
367 if (kernel_features & KERNEL_FEATURE_OBJECT_MOTION) {
368 pipeline_options.usesMotionBlur = true;
369 /* Motion blur can insert motion transforms into the traversal graph.
370 * It is no longer a two-level graph then, so need to set flags to allow any configuration. */
371 pipeline_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY;
372 }
373
374 { /* Load and compile PTX module with OptiX kernels. */
375 string ptx_data;
376 if (use_adaptive_compilation() || path_file_size(ptx_filename) == -1) {
377 string cflags = compile_kernel_get_common_cflags(kernel_features);
378 ptx_filename = compile_kernel(cflags, ("kernel" + suffix).c_str(), "optix", true);
379 }
380 if (ptx_filename.empty() || !path_read_compressed_text(ptx_filename, ptx_data)) {
381 set_error(string_printf("Failed to load OptiX kernel from '%s'", ptx_filename.c_str()));
382 return false;
383 }
384
385 TaskPool pool;
386 OptixResult result;
387 create_optix_module(pool, module_options, ptx_data, optix_module, result);
388 pool.wait_work();
389 if (result != OPTIX_SUCCESS) {
390 set_error(string_printf("Failed to load OptiX kernel from '%s' (%s)",
391 ptx_filename.c_str(),
392 optixGetErrorName(result)));
393 return false;
394 }
395 }
396
397 /* Create program groups. */
398 OptixProgramGroupDesc group_descs[NUM_PROGRAM_GROUPS] = {};
399 OptixProgramGroupOptions group_options = {}; /* There are no options currently. */
400 group_descs[PG_RGEN_INTERSECT_CLOSEST].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
401 group_descs[PG_RGEN_INTERSECT_CLOSEST].raygen.module = optix_module;
402 group_descs[PG_RGEN_INTERSECT_CLOSEST].raygen.entryFunctionName =
403 "__raygen__kernel_optix_integrator_intersect_closest";
404 group_descs[PG_RGEN_INTERSECT_SHADOW].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
405 group_descs[PG_RGEN_INTERSECT_SHADOW].raygen.module = optix_module;
406 group_descs[PG_RGEN_INTERSECT_SHADOW].raygen.entryFunctionName =
407 "__raygen__kernel_optix_integrator_intersect_shadow";
408 group_descs[PG_RGEN_INTERSECT_SUBSURFACE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
409 group_descs[PG_RGEN_INTERSECT_SUBSURFACE].raygen.module = optix_module;
410 group_descs[PG_RGEN_INTERSECT_SUBSURFACE].raygen.entryFunctionName =
411 "__raygen__kernel_optix_integrator_intersect_subsurface";
412 group_descs[PG_RGEN_INTERSECT_VOLUME_STACK].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
413 group_descs[PG_RGEN_INTERSECT_VOLUME_STACK].raygen.module = optix_module;
414 group_descs[PG_RGEN_INTERSECT_VOLUME_STACK].raygen.entryFunctionName =
415 "__raygen__kernel_optix_integrator_intersect_volume_stack";
416 group_descs[PG_RGEN_INTERSECT_DEDICATED_LIGHT].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
417 group_descs[PG_RGEN_INTERSECT_DEDICATED_LIGHT].raygen.module = optix_module;
418 group_descs[PG_RGEN_INTERSECT_DEDICATED_LIGHT].raygen.entryFunctionName =
419 "__raygen__kernel_optix_integrator_intersect_dedicated_light";
420 group_descs[PG_MISS].kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
421 group_descs[PG_MISS].miss.module = optix_module;
422 group_descs[PG_MISS].miss.entryFunctionName = "__miss__kernel_optix_miss";
423 group_descs[PG_HITD].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
424 group_descs[PG_HITD].hitgroup.moduleCH = optix_module;
425 group_descs[PG_HITD].hitgroup.entryFunctionNameCH = "__closesthit__kernel_optix_hit";
426 group_descs[PG_HITD].hitgroup.moduleAH = optix_module;
427 group_descs[PG_HITD].hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_visibility_test";
428 group_descs[PG_HITS].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
429 group_descs[PG_HITS].hitgroup.moduleAH = optix_module;
430 group_descs[PG_HITS].hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_shadow_all_hit";
431 group_descs[PG_HITV].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
432 group_descs[PG_HITV].hitgroup.moduleCH = optix_module;
433 group_descs[PG_HITV].hitgroup.entryFunctionNameCH = "__closesthit__kernel_optix_hit";
434 group_descs[PG_HITV].hitgroup.moduleAH = optix_module;
435 group_descs[PG_HITV].hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_volume_test";
436
437 OptixProgramGroupDesc ignore_desc = {};
438 ignore_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
439 ignore_desc.hitgroup.moduleCH = optix_module;
440 ignore_desc.hitgroup.entryFunctionNameCH = "__closesthit__kernel_optix_ignore";
441 ignore_desc.hitgroup.moduleAH = optix_module;
442 ignore_desc.hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_ignore";
443
444 if (kernel_features & KERNEL_FEATURE_HAIR) {
445 if (kernel_features & KERNEL_FEATURE_HAIR_THICK) {
446 /* Built-in thick curve intersection. */
447 OptixBuiltinISOptions builtin_options = {};
448 builtin_options.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_CATMULLROM;
449 builtin_options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE |
450 OPTIX_BUILD_FLAG_ALLOW_COMPACTION |
451 OPTIX_BUILD_FLAG_ALLOW_UPDATE;
452 builtin_options.curveEndcapFlags = OPTIX_CURVE_ENDCAP_DEFAULT; /* Disable end-caps. */
453 builtin_options.usesMotionBlur = false;
454
455 optix_assert(optixBuiltinISModuleGet(
456 context, &module_options, &pipeline_options, &builtin_options, &builtin_modules[0]));
457
458 group_descs[PG_HITD].hitgroup.moduleIS = builtin_modules[0];
459 group_descs[PG_HITD].hitgroup.entryFunctionNameIS = nullptr;
460 group_descs[PG_HITS].hitgroup.moduleIS = builtin_modules[0];
461 group_descs[PG_HITS].hitgroup.entryFunctionNameIS = nullptr;
462
463 if (pipeline_options.usesMotionBlur) {
464 builtin_options.usesMotionBlur = true;
465
466 optix_assert(optixBuiltinISModuleGet(
467 context, &module_options, &pipeline_options, &builtin_options, &builtin_modules[1]));
468
469 group_descs[PG_HITD_MOTION] = group_descs[PG_HITD];
470 group_descs[PG_HITD_MOTION].hitgroup.moduleIS = builtin_modules[1];
471 group_descs[PG_HITS_MOTION] = group_descs[PG_HITS];
472 group_descs[PG_HITS_MOTION].hitgroup.moduleIS = builtin_modules[1];
473 group_descs[PG_HITV_MOTION] = ignore_desc;
474 group_descs[PG_HITL_MOTION] = ignore_desc;
475 }
476 }
477 else {
478 /* Custom ribbon intersection. */
479 group_descs[PG_HITD].hitgroup.moduleIS = optix_module;
480 group_descs[PG_HITS].hitgroup.moduleIS = optix_module;
481 group_descs[PG_HITD].hitgroup.entryFunctionNameIS = "__intersection__curve_ribbon";
482 group_descs[PG_HITS].hitgroup.entryFunctionNameIS = "__intersection__curve_ribbon";
483 }
484 }
485
486 if (kernel_features & KERNEL_FEATURE_POINTCLOUD) {
487 group_descs[PG_HITD_POINTCLOUD] = group_descs[PG_HITD];
488 group_descs[PG_HITD_POINTCLOUD].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
489 group_descs[PG_HITD_POINTCLOUD].hitgroup.moduleIS = optix_module;
490 group_descs[PG_HITD_POINTCLOUD].hitgroup.entryFunctionNameIS = "__intersection__point";
491 group_descs[PG_HITS_POINTCLOUD] = group_descs[PG_HITS];
492 group_descs[PG_HITS_POINTCLOUD].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
493 group_descs[PG_HITS_POINTCLOUD].hitgroup.moduleIS = optix_module;
494 group_descs[PG_HITS_POINTCLOUD].hitgroup.entryFunctionNameIS = "__intersection__point";
495 group_descs[PG_HITV_POINTCLOUD] = ignore_desc;
496 group_descs[PG_HITL_POINTCLOUD] = ignore_desc;
497 }
498
499 /* Add hit group for local intersections. */
501 group_descs[PG_HITL].kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
502 group_descs[PG_HITL].hitgroup.moduleAH = optix_module;
503 group_descs[PG_HITL].hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_local_hit";
504 }
505
506 /* Shader ray-tracing replaces some functions with direct callables. */
507 if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
508 group_descs[PG_RGEN_SHADE_SURFACE_RAYTRACE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
509 group_descs[PG_RGEN_SHADE_SURFACE_RAYTRACE].raygen.module = optix_module;
510 group_descs[PG_RGEN_SHADE_SURFACE_RAYTRACE].raygen.entryFunctionName =
511 "__raygen__kernel_optix_integrator_shade_surface_raytrace";
512
513 /* Kernels with OSL shading support are built without SVM, so can skip those direct callables
514 * there. */
515 if (!use_osl_shading) {
516 group_descs[PG_CALL_SVM_AO].kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
517 group_descs[PG_CALL_SVM_AO].callables.moduleDC = optix_module;
518 group_descs[PG_CALL_SVM_AO].callables.entryFunctionNameDC = "__direct_callable__svm_node_ao";
519 group_descs[PG_CALL_SVM_BEVEL].kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
520 group_descs[PG_CALL_SVM_BEVEL].callables.moduleDC = optix_module;
521 group_descs[PG_CALL_SVM_BEVEL].callables.entryFunctionNameDC =
522 "__direct_callable__svm_node_bevel";
523 }
524 }
525
526 if (kernel_features & KERNEL_FEATURE_MNEE) {
527 group_descs[PG_RGEN_SHADE_SURFACE_MNEE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
528 group_descs[PG_RGEN_SHADE_SURFACE_MNEE].raygen.module = optix_module;
529 group_descs[PG_RGEN_SHADE_SURFACE_MNEE].raygen.entryFunctionName =
530 "__raygen__kernel_optix_integrator_shade_surface_mnee";
531 }
532
533 /* OSL uses direct callables to execute, so shading needs to be done in OptiX if OSL is used. */
534 if (use_osl_shading) {
535 group_descs[PG_RGEN_SHADE_BACKGROUND].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
536 group_descs[PG_RGEN_SHADE_BACKGROUND].raygen.module = optix_module;
537 group_descs[PG_RGEN_SHADE_BACKGROUND].raygen.entryFunctionName =
538 "__raygen__kernel_optix_integrator_shade_background";
539 group_descs[PG_RGEN_SHADE_LIGHT].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
540 group_descs[PG_RGEN_SHADE_LIGHT].raygen.module = optix_module;
541 group_descs[PG_RGEN_SHADE_LIGHT].raygen.entryFunctionName =
542 "__raygen__kernel_optix_integrator_shade_light";
543 group_descs[PG_RGEN_SHADE_SURFACE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
544 group_descs[PG_RGEN_SHADE_SURFACE].raygen.module = optix_module;
545 group_descs[PG_RGEN_SHADE_SURFACE].raygen.entryFunctionName =
546 "__raygen__kernel_optix_integrator_shade_surface";
547 group_descs[PG_RGEN_SHADE_VOLUME].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
548 group_descs[PG_RGEN_SHADE_VOLUME].raygen.module = optix_module;
549 group_descs[PG_RGEN_SHADE_VOLUME].raygen.entryFunctionName =
550 "__raygen__kernel_optix_integrator_shade_volume";
551 group_descs[PG_RGEN_SHADE_SHADOW].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
552 group_descs[PG_RGEN_SHADE_SHADOW].raygen.module = optix_module;
553 group_descs[PG_RGEN_SHADE_SHADOW].raygen.entryFunctionName =
554 "__raygen__kernel_optix_integrator_shade_shadow";
555 group_descs[PG_RGEN_SHADE_DEDICATED_LIGHT].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
556 group_descs[PG_RGEN_SHADE_DEDICATED_LIGHT].raygen.module = optix_module;
557 group_descs[PG_RGEN_SHADE_DEDICATED_LIGHT].raygen.entryFunctionName =
558 "__raygen__kernel_optix_integrator_shade_dedicated_light";
559 group_descs[PG_RGEN_EVAL_DISPLACE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
560 group_descs[PG_RGEN_EVAL_DISPLACE].raygen.module = optix_module;
561 group_descs[PG_RGEN_EVAL_DISPLACE].raygen.entryFunctionName =
562 "__raygen__kernel_optix_shader_eval_displace";
563 group_descs[PG_RGEN_EVAL_BACKGROUND].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
564 group_descs[PG_RGEN_EVAL_BACKGROUND].raygen.module = optix_module;
565 group_descs[PG_RGEN_EVAL_BACKGROUND].raygen.entryFunctionName =
566 "__raygen__kernel_optix_shader_eval_background";
567 group_descs[PG_RGEN_EVAL_CURVE_SHADOW_TRANSPARENCY].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
568 group_descs[PG_RGEN_EVAL_CURVE_SHADOW_TRANSPARENCY].raygen.module = optix_module;
569 group_descs[PG_RGEN_EVAL_CURVE_SHADOW_TRANSPARENCY].raygen.entryFunctionName =
570 "__raygen__kernel_optix_shader_eval_curve_shadow_transparency";
571 }
572
573# ifdef WITH_OSL
574 /* When using custom OSL cameras, integrator_init_from_camera is its own specialized module. */
575 if (use_osl_camera) {
576 /* Load and compile the OSL camera PTX module. */
577 string ptx_data, ptx_filename = path_get("lib/kernel_optix_osl_camera.ptx.zst");
578 if (!path_read_compressed_text(ptx_filename, ptx_data)) {
579 set_error(
580 string_printf("Failed to load OptiX OSL camera kernel from '%s'", ptx_filename.c_str()));
581 return false;
582 }
583
584 TaskPool pool;
585 OptixResult result;
586 create_optix_module(pool, module_options, ptx_data, osl_camera_module, result);
587 pool.wait_work();
588 if (result != OPTIX_SUCCESS) {
589 set_error(string_printf("Failed to load OptiX kernel from '%s' (%s)",
590 ptx_filename.c_str(),
591 optixGetErrorName(result)));
592 return false;
593 }
594
595 group_descs[PG_RGEN_INIT_FROM_CAMERA].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
596 group_descs[PG_RGEN_INIT_FROM_CAMERA].raygen.module = osl_camera_module;
597 group_descs[PG_RGEN_INIT_FROM_CAMERA].raygen.entryFunctionName =
598 "__raygen__kernel_optix_integrator_init_from_camera";
599 }
600# endif
601
602 optix_assert(optixProgramGroupCreate(
603 context, group_descs, NUM_PROGRAM_GROUPS, &group_options, nullptr, nullptr, groups));
604
605 /* Get program stack sizes. */
606 OptixStackSizes stack_size[NUM_PROGRAM_GROUPS] = {};
607 /* Set up SBT, which in this case is used only to select between different programs. */
608 sbt_data.alloc(NUM_PROGRAM_GROUPS);
609 memset(sbt_data.host_pointer, 0, sizeof(SbtRecord) * NUM_PROGRAM_GROUPS);
610 for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
611 optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
612 optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
613 }
614 sbt_data.copy_to_device(); /* Upload SBT to device. */
615
616 /* Calculate maximum trace continuation stack size. */
617 unsigned int trace_css = stack_size[PG_HITD].cssCH;
618 /* This is based on the maximum of closest-hit and any-hit/intersection programs. */
619 trace_css = std::max(trace_css, stack_size[PG_HITD].cssIS + stack_size[PG_HITD].cssAH);
620 trace_css = std::max(trace_css, stack_size[PG_HITS].cssIS + stack_size[PG_HITS].cssAH);
621 trace_css = std::max(trace_css, stack_size[PG_HITL].cssIS + stack_size[PG_HITL].cssAH);
622 trace_css = std::max(trace_css, stack_size[PG_HITV].cssIS + stack_size[PG_HITV].cssAH);
623 trace_css = std::max(trace_css,
624 stack_size[PG_HITD_MOTION].cssIS + stack_size[PG_HITD_MOTION].cssAH);
625 trace_css = std::max(trace_css,
626 stack_size[PG_HITS_MOTION].cssIS + stack_size[PG_HITS_MOTION].cssAH);
627 trace_css = std::max(
628 trace_css, stack_size[PG_HITD_POINTCLOUD].cssIS + stack_size[PG_HITD_POINTCLOUD].cssAH);
629 trace_css = std::max(
630 trace_css, stack_size[PG_HITS_POINTCLOUD].cssIS + stack_size[PG_HITS_POINTCLOUD].cssAH);
631
632 OptixPipelineLinkOptions link_options = {};
633 link_options.maxTraceDepth = 1;
634
635 if (use_osl_shading || use_osl_camera) {
636 /* OSL kernels will be (re)created on by OSL manager. */
637 }
638 else if (kernel_features & (KERNEL_FEATURE_NODE_RAYTRACE | KERNEL_FEATURE_MNEE)) {
639 /* Create shader ray-tracing and MNEE pipeline. */
640 vector<OptixProgramGroup> pipeline_groups;
641 pipeline_groups.reserve(NUM_PROGRAM_GROUPS);
642 if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
643 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SURFACE_RAYTRACE]);
644 pipeline_groups.push_back(groups[PG_CALL_SVM_AO]);
645 pipeline_groups.push_back(groups[PG_CALL_SVM_BEVEL]);
646 }
647 if (kernel_features & KERNEL_FEATURE_MNEE) {
648 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SURFACE_MNEE]);
649 }
650 pipeline_groups.push_back(groups[PG_MISS]);
651 pipeline_groups.push_back(groups[PG_HITD]);
652 pipeline_groups.push_back(groups[PG_HITS]);
653 pipeline_groups.push_back(groups[PG_HITL]);
654 pipeline_groups.push_back(groups[PG_HITV]);
655 if (pipeline_options.usesMotionBlur) {
656 pipeline_groups.push_back(groups[PG_HITD_MOTION]);
657 pipeline_groups.push_back(groups[PG_HITS_MOTION]);
658 pipeline_groups.push_back(groups[PG_HITV_MOTION]);
659 pipeline_groups.push_back(groups[PG_HITL_MOTION]);
660 }
661 if (kernel_features & KERNEL_FEATURE_POINTCLOUD) {
662 pipeline_groups.push_back(groups[PG_HITD_POINTCLOUD]);
663 pipeline_groups.push_back(groups[PG_HITS_POINTCLOUD]);
664 pipeline_groups.push_back(groups[PG_HITV_POINTCLOUD]);
665 pipeline_groups.push_back(groups[PG_HITL_POINTCLOUD]);
666 }
667
668 optix_assert(optixPipelineCreate(context,
669 &pipeline_options,
670 &link_options,
671 pipeline_groups.data(),
672 pipeline_groups.size(),
673 nullptr,
674 nullptr,
675 &pipelines[PIP_SHADE]));
676
677 /* Combine ray generation and trace continuation stack size. */
678 const unsigned int css = std::max(stack_size[PG_RGEN_SHADE_SURFACE_RAYTRACE].cssRG,
679 stack_size[PG_RGEN_SHADE_SURFACE_MNEE].cssRG) +
680 link_options.maxTraceDepth * trace_css;
681 const unsigned int dss = std::max(stack_size[PG_CALL_SVM_AO].dssDC,
682 stack_size[PG_CALL_SVM_BEVEL].dssDC);
683
684 /* Set stack size depending on pipeline options. */
685 optix_assert(optixPipelineSetStackSize(
686 pipelines[PIP_SHADE], 0, dss, css, pipeline_options.usesMotionBlur ? 3 : 2));
687 }
688
689 { /* Create intersection-only pipeline. */
690 vector<OptixProgramGroup> pipeline_groups;
691 pipeline_groups.reserve(NUM_PROGRAM_GROUPS);
692 pipeline_groups.push_back(groups[PG_RGEN_INTERSECT_CLOSEST]);
693 pipeline_groups.push_back(groups[PG_RGEN_INTERSECT_SHADOW]);
694 pipeline_groups.push_back(groups[PG_RGEN_INTERSECT_SUBSURFACE]);
695 pipeline_groups.push_back(groups[PG_RGEN_INTERSECT_VOLUME_STACK]);
696 pipeline_groups.push_back(groups[PG_RGEN_INTERSECT_DEDICATED_LIGHT]);
697 pipeline_groups.push_back(groups[PG_MISS]);
698 pipeline_groups.push_back(groups[PG_HITD]);
699 pipeline_groups.push_back(groups[PG_HITS]);
700 pipeline_groups.push_back(groups[PG_HITL]);
701 pipeline_groups.push_back(groups[PG_HITV]);
702 if (pipeline_options.usesMotionBlur) {
703 pipeline_groups.push_back(groups[PG_HITD_MOTION]);
704 pipeline_groups.push_back(groups[PG_HITS_MOTION]);
705 }
706 if (kernel_features & KERNEL_FEATURE_POINTCLOUD) {
707 pipeline_groups.push_back(groups[PG_HITD_POINTCLOUD]);
708 pipeline_groups.push_back(groups[PG_HITS_POINTCLOUD]);
709 }
710
711 optix_assert(optixPipelineCreate(context,
712 &pipeline_options,
713 &link_options,
714 pipeline_groups.data(),
715 pipeline_groups.size(),
716 nullptr,
717 nullptr,
718 &pipelines[PIP_INTERSECT]));
719
720 /* Calculate continuation stack size based on the maximum of all ray generation stack sizes. */
721 const unsigned int css =
722 std::max(stack_size[PG_RGEN_INTERSECT_CLOSEST].cssRG,
723 std::max(stack_size[PG_RGEN_INTERSECT_SHADOW].cssRG,
724 std::max(stack_size[PG_RGEN_INTERSECT_SUBSURFACE].cssRG,
725 stack_size[PG_RGEN_INTERSECT_VOLUME_STACK].cssRG))) +
726 link_options.maxTraceDepth * trace_css;
727
728 optix_assert(optixPipelineSetStackSize(
729 pipelines[PIP_INTERSECT], 0, 0, css, pipeline_options.usesMotionBlur ? 3 : 2));
730 }
731
732 return !have_error();
733}
734
735bool OptiXDevice::load_osl_kernels()
736{
737# ifdef WITH_OSL
738 if (have_error()) {
739 return false;
740 }
741
742 struct OSLKernel {
743 string ptx;
744 string fused_entry;
745 };
746
747 auto get_osl_kernel = [&](const OSL::ShaderGroupRef &group) {
748 if (!group) {
749 return OSLKernel{};
750 }
751 string osl_ptx, fused_name;
752 osl_globals.ss->getattribute(group.get(), "group_fused_name", fused_name);
753 osl_globals.ss->getattribute(
754 group.get(), "ptx_compiled_version", OSL::TypeDesc::PTR, &osl_ptx);
755
756 int groupdata_size = 0;
757 osl_globals.ss->getattribute(group.get(), "llvm_groupdata_size", groupdata_size);
758 if (groupdata_size == 0) {
759 // Old attribute name from our patched OSL version as fallback.
760 osl_globals.ss->getattribute(group.get(), "groupdata_size", groupdata_size);
761 }
762 if (groupdata_size > 2048) { /* See 'group_data' array in kernel/osl/osl.h */
763 set_error(
764 string_printf("Requested OSL group data size (%d) is greater than the maximum "
765 "supported with OptiX (2048)",
766 groupdata_size));
767 return OSLKernel{};
768 }
769
770 return OSLKernel{std::move(osl_ptx), std::move(fused_name)};
771 };
772
773 /* This has to be in the same order as the ShaderType enum, so that the index calculation in
774 * osl_eval_nodes checks out */
775 vector<OSLKernel> osl_kernels;
776 osl_kernels.emplace_back(get_osl_kernel(osl_globals.camera_state));
777 for (const OSL::ShaderGroupRef &group : osl_globals.surface_state) {
778 osl_kernels.emplace_back(get_osl_kernel(group));
779 }
780 for (const OSL::ShaderGroupRef &group : osl_globals.volume_state) {
781 osl_kernels.emplace_back(get_osl_kernel(group));
782 }
783 for (const OSL::ShaderGroupRef &group : osl_globals.displacement_state) {
784 osl_kernels.emplace_back(get_osl_kernel(group));
785 }
786 for (const OSL::ShaderGroupRef &group : osl_globals.bump_state) {
787 osl_kernels.emplace_back(get_osl_kernel(group));
788 }
789
790 if (have_error()) {
791 return false;
792 }
793
794 const CUDAContextScope scope(this);
795
796 if (pipelines[PIP_SHADE]) {
797 optixPipelineDestroy(pipelines[PIP_SHADE]);
798 }
799
800 for (OptixModule &module : osl_modules) {
801 if (module != nullptr) {
802 optixModuleDestroy(module);
803 module = nullptr;
804 }
805 }
806 for (OptixProgramGroup &group : osl_groups) {
807 if (group != nullptr) {
808 optixProgramGroupDestroy(group);
809 group = nullptr;
810 }
811 }
812
813 /* We always need to reserve a spot for the camera shader group, but if it's unused
814 * and there are no other shader groups, we can skip creating the pipeline. */
815 if (osl_kernels.size() == 1 && osl_kernels[0].ptx.empty()) {
816 return true;
817 }
818
819 OptixProgramGroupOptions group_options = {}; /* There are no options currently. */
820 OptixModuleCompileOptions module_options = {};
821 module_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_LEVEL_3;
822 module_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_NONE;
823
824 /* In addition to the modules for each OSL group, we need to load our own osl_services.ptx
825 * as well as the shadeops.ptx that's embedded in OSL. */
826 size_t id_osl_services = osl_kernels.size();
827 size_t id_osl_shadeops = osl_kernels.size() + 1;
828 osl_groups.resize(osl_kernels.size() + 2);
829 osl_modules.resize(osl_kernels.size() + 2);
830
831 { /* Load and compile PTX module with OSL services. */
832 string osl_services_ptx, ptx_filename = path_get("lib/kernel_optix_osl_services.ptx.zst");
833 if (!path_read_compressed_text(ptx_filename, osl_services_ptx)) {
834 set_error(string_printf("Failed to load OptiX OSL services kernel from '%s'",
835 ptx_filename.c_str()));
836 return false;
837 }
838
839 const char *shadeops_ptx_ptr = nullptr;
840 osl_globals.ss->getattribute("shadeops_cuda_ptx", OSL::TypeDesc::PTR, &shadeops_ptx_ptr);
841 int shadeops_ptx_size = 0;
842 osl_globals.ss->getattribute("shadeops_cuda_ptx_size", OSL::TypeDesc::INT, &shadeops_ptx_size);
843 string shadeops_ptx(shadeops_ptx_ptr, shadeops_ptx_size);
844
845 TaskPool pool;
846 OptixResult services_result, shadeops_result;
847 create_optix_module(
848 pool, module_options, osl_services_ptx, osl_modules[id_osl_services], services_result);
849 create_optix_module(
850 pool, module_options, shadeops_ptx, osl_modules[id_osl_shadeops], shadeops_result);
851 pool.wait_work();
852
853 {
854 if (services_result != OPTIX_SUCCESS) {
855 set_error(string_printf("Failed to load OptiX OSL services kernel from '%s' (%s)",
856 ptx_filename.c_str(),
857 optixGetErrorName(services_result)));
858 return false;
859 }
860 OptixProgramGroupDesc group_desc = {};
861 group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
862 group_desc.callables.entryFunctionNameDC = "__direct_callable__dummy_services";
863 group_desc.callables.moduleDC = osl_modules[id_osl_services];
864
865 optix_assert(optixProgramGroupCreate(context,
866 &group_desc,
867 1,
868 &group_options,
869 nullptr,
870 nullptr,
871 &osl_groups[id_osl_services]));
872 }
873
874 {
875 if (shadeops_result != OPTIX_SUCCESS) {
876 set_error(string_printf("Failed to load OptiX OSL shadeops kernel (%s)",
877 optixGetErrorName(shadeops_result)));
878 return false;
879 }
880 OptixProgramGroupDesc group_desc = {};
881 group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
882 group_desc.callables.entryFunctionNameDC = "__direct_callable__dummy_shadeops";
883 group_desc.callables.moduleDC = osl_modules[id_osl_shadeops];
884
885 optix_assert(optixProgramGroupCreate(context,
886 &group_desc,
887 1,
888 &group_options,
889 nullptr,
890 nullptr,
891 &osl_groups[id_osl_shadeops]));
892 }
893 }
894
895 TaskPool pool;
896 vector<OptixResult> results(osl_kernels.size(), OPTIX_SUCCESS);
897
898 for (size_t i = 0; i < osl_kernels.size(); ++i) {
899 if (osl_kernels[i].ptx.empty()) {
900 continue;
901 }
902
903 create_optix_module(pool, module_options, osl_kernels[i].ptx, osl_modules[i], results[i]);
904 }
905
906 pool.wait_work();
907
908 for (size_t i = 0; i < osl_kernels.size(); ++i) {
909 if (osl_kernels[i].ptx.empty()) {
910 continue;
911 }
912
913 if (results[i] != OPTIX_SUCCESS) {
914 set_error(string_printf("Failed to load OptiX OSL kernel for %s (%s)",
915 osl_kernels[i].fused_entry.c_str(),
916 optixGetErrorName(results[i])));
917 return false;
918 }
919
920 OptixProgramGroupDesc group_desc = {};
921 group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
922 group_desc.callables.entryFunctionNameDC = osl_kernels[i].fused_entry.c_str();
923 group_desc.callables.moduleDC = osl_modules[i];
924
925 optix_assert(optixProgramGroupCreate(
926 context, &group_desc, 1, &group_options, nullptr, nullptr, &osl_groups[i]));
927 }
928
929 /* Update SBT with new entries. */
930 sbt_data.alloc(NUM_PROGRAM_GROUPS + osl_groups.size());
931 for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
932 optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
933 }
934 for (size_t i = 0; i < osl_groups.size(); ++i) {
935 if (osl_groups[i] != nullptr) {
936 optix_assert(optixSbtRecordPackHeader(osl_groups[i], &sbt_data[NUM_PROGRAM_GROUPS + i]));
937 }
938 else {
939 /* Default to "__direct_callable__dummy_services", so that OSL evaluation for empty
940 * materials has direct callables to call and does not crash. */
941 optix_assert(optixSbtRecordPackHeader(osl_groups[id_osl_services],
942 &sbt_data[NUM_PROGRAM_GROUPS + i]));
943 }
944 }
945 sbt_data.copy_to_device(); /* Upload updated SBT to device. */
946
947 OptixPipelineLinkOptions link_options = {};
948 link_options.maxTraceDepth = 0;
949
950 {
951 vector<OptixProgramGroup> pipeline_groups;
952 pipeline_groups.reserve(NUM_PROGRAM_GROUPS);
953 pipeline_groups.push_back(groups[PG_RGEN_SHADE_BACKGROUND]);
954 pipeline_groups.push_back(groups[PG_RGEN_SHADE_LIGHT]);
955 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SURFACE]);
956 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SURFACE_RAYTRACE]);
957 pipeline_groups.push_back(groups[PG_CALL_SVM_AO]);
958 pipeline_groups.push_back(groups[PG_CALL_SVM_BEVEL]);
959 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SURFACE_MNEE]);
960 pipeline_groups.push_back(groups[PG_RGEN_SHADE_VOLUME]);
961 pipeline_groups.push_back(groups[PG_RGEN_SHADE_SHADOW]);
962 pipeline_groups.push_back(groups[PG_RGEN_SHADE_DEDICATED_LIGHT]);
963 pipeline_groups.push_back(groups[PG_RGEN_EVAL_DISPLACE]);
964 pipeline_groups.push_back(groups[PG_RGEN_EVAL_BACKGROUND]);
965 pipeline_groups.push_back(groups[PG_RGEN_EVAL_CURVE_SHADOW_TRANSPARENCY]);
966 pipeline_groups.push_back(groups[PG_RGEN_INIT_FROM_CAMERA]);
967
968 for (const OptixProgramGroup &group : osl_groups) {
969 if (group != nullptr) {
970 pipeline_groups.push_back(group);
971 }
972 }
973
974 optix_assert(optixPipelineCreate(context,
975 &pipeline_options,
976 &link_options,
977 pipeline_groups.data(),
978 pipeline_groups.size(),
979 nullptr,
980 nullptr,
981 &pipelines[PIP_SHADE]));
982
983 /* Get program stack sizes. */
984 OptixStackSizes stack_size[NUM_PROGRAM_GROUPS] = {};
985 vector<OptixStackSizes> osl_stack_size(osl_groups.size());
986
987 for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
988 optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
989 }
990 for (size_t i = 0; i < osl_groups.size(); ++i) {
991 if (osl_groups[i] != nullptr) {
992 optix_assert(optixProgramGroupGetStackSize(
993 osl_groups[i], &osl_stack_size[i], pipelines[PIP_SHADE]));
994 }
995 }
996
997 const unsigned int css = std::max(stack_size[PG_RGEN_SHADE_SURFACE_RAYTRACE].cssRG,
998 stack_size[PG_RGEN_SHADE_SURFACE_MNEE].cssRG);
999 unsigned int dss = std::max(stack_size[PG_CALL_SVM_AO].dssDC,
1000 stack_size[PG_CALL_SVM_BEVEL].dssDC);
1001 for (unsigned int i = 0; i < osl_stack_size.size(); ++i) {
1002 dss = std::max(dss, osl_stack_size[i].dssDC);
1003 }
1004
1005 optix_assert(optixPipelineSetStackSize(
1006 pipelines[PIP_SHADE], 0, dss, css, pipeline_options.usesMotionBlur ? 3 : 2));
1007 }
1008
1009 /* Copy colorsystem data from OSL to the device. */
1010 {
1011 /* The interface here is somewhat complex, since the colorsystem contains strings whose
1012 * representation is different between CPU and GPU.
1013 * OSL's ColorSystem type therefore consists of two parts: First the "fixed data" (e.g. floats)
1014 * that is identical between both, and then the strings.
1015 * To perform this conversion, in addition to the pointer to the CPU data, we query two sizes:
1016 * The total size of the CPU data and the number of strings. */
1017 uint8_t *cpu_data = nullptr;
1018 size_t cpu_data_sizes[2] = {0, 0};
1019 osl_globals.ss->getattribute("colorsystem", OSL::TypeDesc::PTR, &cpu_data);
1020 osl_globals.ss->getattribute(
1021 "colorsystem:sizes", TypeDesc(TypeDesc::LONGLONG, 2), (void *)cpu_data_sizes);
1022
1023 size_t cpu_full_size = cpu_data_sizes[0];
1024 size_t num_strings = cpu_data_sizes[1];
1025 size_t fixed_data_size = cpu_full_size - sizeof(ustringhash) * num_strings;
1026
1027 /* Allocate a buffer to fit the fixed data, as well as all the strings in GPU form. */
1028 uint8_t *gpu_data = osl_colorsystem.alloc(fixed_data_size + sizeof(size_t) * num_strings);
1029
1030 /* Copy the fixed data as-is. */
1031 memcpy(gpu_data, cpu_data, fixed_data_size);
1032
1033 /* Convert each string to GPU format. */
1034 ustringhash *cpu_strings = reinterpret_cast<ustringhash *>(cpu_data + fixed_data_size);
1035 size_t *gpu_strings = reinterpret_cast<size_t *>(gpu_data + fixed_data_size);
1036 for (int i = 0; i < num_strings; i++) {
1037 gpu_strings[i] = cpu_strings[i].hash();
1038 }
1039
1040 /* Copy GPU form of the data to the device. */
1041 osl_colorsystem.copy_to_device();
1042
1043 update_launch_params(offsetof(KernelParamsOptiX, osl_colorsystem),
1044 &osl_colorsystem.device_pointer,
1045 sizeof(device_ptr));
1046 }
1047
1048 return !have_error();
1049# else
1050 return false;
1051# endif
1052}
1053
1054OSLGlobals *OptiXDevice::get_cpu_osl_memory()
1055{
1056# ifdef WITH_OSL
1057 return &osl_globals;
1058# else
1059 return nullptr;
1060# endif
1061}
1062
1063bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
1064 OptixBuildOperation operation,
1065 const OptixBuildInput &build_input,
1066 const uint16_t num_motion_steps)
1067{
1068 /* Allocate and build acceleration structures only one at a time, to prevent parallel builds
1069 * from running out of memory (since both original and compacted acceleration structure memory
1070 * may be allocated at the same time for the duration of this function). The builds would
1071 * otherwise happen on the same CUDA stream anyway. */
1072 static thread_mutex mutex;
1074
1075 const CUDAContextScope scope(this);
1076
1077 bool use_fast_trace_bvh = (bvh->params.bvh_type == BVH_TYPE_STATIC);
1078
1079 /* Compute memory usage. */
1080 OptixAccelBufferSizes sizes = {};
1081 OptixAccelBuildOptions options = {};
1082 options.operation = operation;
1083 if (build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES) {
1084 /* The build flags have to match the ones used to query the built-in curve intersection
1085 * program (see optixBuiltinISModuleGet above) */
1086 options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION |
1087 OPTIX_BUILD_FLAG_ALLOW_UPDATE;
1088 use_fast_trace_bvh = true;
1089 }
1090 else if (use_fast_trace_bvh) {
1091 VLOG_INFO << "Using fast to trace OptiX BVH";
1092 options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
1093 }
1094 else {
1095 VLOG_INFO << "Using fast to update OptiX BVH";
1096 options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_BUILD | OPTIX_BUILD_FLAG_ALLOW_UPDATE;
1097 }
1098
1099 options.motionOptions.numKeys = num_motion_steps;
1100 options.motionOptions.flags = OPTIX_MOTION_FLAG_START_VANISH | OPTIX_MOTION_FLAG_END_VANISH;
1101 options.motionOptions.timeBegin = 0.0f;
1102 options.motionOptions.timeEnd = 1.0f;
1103
1104 optix_assert(optixAccelComputeMemoryUsage(context, &options, &build_input, 1, &sizes));
1105
1106 /* Allocate required output buffers. */
1107 device_only_memory<char> temp_mem(this, "optix temp as build mem", true);
1108 temp_mem.alloc_to_device(align_up(sizes.tempSizeInBytes, 8) + 8);
1109 if (!temp_mem.device_pointer) {
1110 /* Make sure temporary memory allocation succeeded. */
1111 return false;
1112 }
1113
1114 /* Acceleration structure memory has to be allocated on the device (not allowed on the host). */
1115 device_only_memory<char> &out_data = *bvh->as_data;
1116 if (operation == OPTIX_BUILD_OPERATION_BUILD) {
1117 assert(out_data.device == this);
1118 out_data.alloc_to_device(sizes.outputSizeInBytes);
1119 if (!out_data.device_pointer) {
1120 return false;
1121 }
1122 }
1123 else {
1124 assert(out_data.device_pointer && out_data.device_size >= sizes.outputSizeInBytes);
1125 }
1126
1127 /* Finally build the acceleration structure. */
1128 OptixAccelEmitDesc compacted_size_prop = {};
1129 compacted_size_prop.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
1130 /* A tiny space was allocated for this property at the end of the temporary buffer above.
1131 * Make sure this pointer is 8-byte aligned. */
1132 compacted_size_prop.result = align_up(temp_mem.device_pointer + sizes.tempSizeInBytes, 8);
1133
1134 OptixTraversableHandle out_handle = 0;
1135 optix_assert(optixAccelBuild(context,
1136 nullptr,
1137 &options,
1138 &build_input,
1139 1,
1140 temp_mem.device_pointer,
1141 sizes.tempSizeInBytes,
1142 out_data.device_pointer,
1143 sizes.outputSizeInBytes,
1144 &out_handle,
1145 use_fast_trace_bvh ? &compacted_size_prop : nullptr,
1146 use_fast_trace_bvh ? 1 : 0));
1147 bvh->traversable_handle = static_cast<uint64_t>(out_handle);
1148
1149 /* Wait for all operations to finish. */
1150 cuda_assert(cuStreamSynchronize(nullptr));
1151
1152 /* Compact acceleration structure to save memory (do not do this in viewport for faster builds).
1153 */
1154 if (use_fast_trace_bvh) {
1155 uint64_t compacted_size = sizes.outputSizeInBytes;
1156 cuda_assert(cuMemcpyDtoH(&compacted_size, compacted_size_prop.result, sizeof(compacted_size)));
1157
1158 /* Temporary memory is no longer needed, so free it now to make space. */
1159 temp_mem.free();
1160
1161 /* There is no point compacting if the size does not change. */
1162 if (compacted_size < sizes.outputSizeInBytes) {
1163 device_only_memory<char> compacted_data(this, "optix compacted as", false);
1164 compacted_data.alloc_to_device(compacted_size);
1165 if (!compacted_data.device_pointer) {
1166 /* Do not compact if memory allocation for compacted acceleration structure fails.
1167 * Can just use the uncompacted one then, so succeed here regardless. */
1168 return !have_error();
1169 }
1170
1171 optix_assert(optixAccelCompact(context,
1172 nullptr,
1173 out_handle,
1174 compacted_data.device_pointer,
1175 compacted_size,
1176 &out_handle));
1177 bvh->traversable_handle = static_cast<uint64_t>(out_handle);
1178
1179 /* Wait for compaction to finish. */
1180 cuda_assert(cuStreamSynchronize(nullptr));
1181
1182 std::swap(out_data.device_size, compacted_data.device_size);
1183 std::swap(out_data.device_pointer, compacted_data.device_pointer);
1184 /* Original acceleration structure memory is freed when 'compacted_data' goes out of scope.
1185 */
1186 }
1187 }
1188
1189 return !have_error();
1190}
1191
1192void OptiXDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
1193{
1194 const bool use_fast_trace_bvh = (bvh->params.bvh_type == BVH_TYPE_STATIC);
1195
1196 free_bvh_memory_delayed();
1197
1198 BVHOptiX *const bvh_optix = static_cast<BVHOptiX *>(bvh);
1199
1200 progress.set_substatus("Building OptiX acceleration structure");
1201
1202 if (!bvh->params.top_level) {
1203 assert(bvh->objects.size() == 1 && bvh->geometry.size() == 1);
1204
1205 /* Refit is only possible in viewport for now (because AS is built with
1206 * OPTIX_BUILD_FLAG_ALLOW_UPDATE only there, see above). */
1207 OptixBuildOperation operation = OPTIX_BUILD_OPERATION_BUILD;
1208 if (refit && !use_fast_trace_bvh) {
1209 assert(bvh_optix->traversable_handle != 0);
1210 operation = OPTIX_BUILD_OPERATION_UPDATE;
1211 }
1212 else {
1213 bvh_optix->as_data->free();
1214 bvh_optix->traversable_handle = 0;
1215 }
1216
1217 /* Build bottom level acceleration structures (BLAS). */
1218 Geometry *const geom = bvh->geometry[0];
1219 if (geom->is_hair()) {
1220 /* Build BLAS for curve primitives. */
1221 Hair *const hair = static_cast<Hair *const>(geom);
1222 if (hair->num_segments() == 0) {
1223 return;
1224 }
1225
1226 const size_t num_segments = hair->num_segments();
1227
1228 size_t num_motion_steps = 1;
1230 if (pipeline_options.usesMotionBlur && hair->get_use_motion_blur() && motion_keys) {
1231 num_motion_steps = hair->get_motion_steps();
1232 }
1233
1234 device_vector<OptixAabb> aabb_data(this, "optix temp aabb data", MEM_READ_ONLY);
1235 device_vector<int> index_data(this, "optix temp index data", MEM_READ_ONLY);
1236 device_vector<float4> vertex_data(this, "optix temp vertex data", MEM_READ_ONLY);
1237 /* Four control points for each curve segment. */
1238 size_t num_vertices = num_segments * 4;
1239 if (hair->curve_shape == CURVE_THICK) {
1240 num_vertices = hair->num_keys() + 2 * hair->num_curves();
1241 index_data.alloc(num_segments);
1242 vertex_data.alloc(num_vertices * num_motion_steps);
1243 }
1244 else {
1245 aabb_data.alloc(num_segments * num_motion_steps);
1246 }
1247
1248 /* Get AABBs for each motion step. */
1249 for (size_t step = 0; step < num_motion_steps; ++step) {
1250 /* The center step for motion vertices is not stored in the attribute. */
1251 const float3 *keys = hair->get_curve_keys().data();
1252 size_t center_step = (num_motion_steps - 1) / 2;
1253 if (step != center_step) {
1254 size_t attr_offset = (step > center_step) ? step - 1 : step;
1255 /* Technically this is a float4 array, but sizeof(float3) == sizeof(float4). */
1256 keys = motion_keys->data_float3() + attr_offset * hair->get_curve_keys().size();
1257 }
1258
1259 if (hair->curve_shape == CURVE_THICK) {
1260 for (size_t curve_index = 0, segment_index = 0, vertex_index = step * num_vertices;
1261 curve_index < hair->num_curves();
1262 ++curve_index)
1263 {
1264 const Hair::Curve curve = hair->get_curve(curve_index);
1265 const array<float> &curve_radius = hair->get_curve_radius();
1266
1267 const int first_key_index = curve.first_key;
1268 {
1269 vertex_data[vertex_index++] = make_float4(keys[first_key_index].x,
1270 keys[first_key_index].y,
1271 keys[first_key_index].z,
1272 curve_radius[first_key_index]);
1273 }
1274
1275 for (int k = 0; k < curve.num_segments(); ++k) {
1276 if (step == 0) {
1277 index_data[segment_index++] = vertex_index - 1;
1278 }
1279 vertex_data[vertex_index++] = make_float4(keys[first_key_index + k].x,
1280 keys[first_key_index + k].y,
1281 keys[first_key_index + k].z,
1282 curve_radius[first_key_index + k]);
1283 }
1284
1285 const int last_key_index = first_key_index + curve.num_keys - 1;
1286 {
1287 vertex_data[vertex_index++] = make_float4(keys[last_key_index].x,
1288 keys[last_key_index].y,
1289 keys[last_key_index].z,
1290 curve_radius[last_key_index]);
1291 vertex_data[vertex_index++] = make_float4(keys[last_key_index].x,
1292 keys[last_key_index].y,
1293 keys[last_key_index].z,
1294 curve_radius[last_key_index]);
1295 }
1296 }
1297 }
1298 else {
1299 for (size_t curve_index = 0, i = 0; curve_index < hair->num_curves(); ++curve_index) {
1300 const Hair::Curve curve = hair->get_curve(curve_index);
1301
1302 for (int segment = 0; segment < curve.num_segments(); ++segment, ++i) {
1304 curve.bounds_grow(segment, keys, hair->get_curve_radius().data(), bounds);
1305
1306 const size_t index = step * num_segments + i;
1307 aabb_data[index].minX = bounds.min.x;
1308 aabb_data[index].minY = bounds.min.y;
1309 aabb_data[index].minZ = bounds.min.z;
1310 aabb_data[index].maxX = bounds.max.x;
1311 aabb_data[index].maxY = bounds.max.y;
1312 aabb_data[index].maxZ = bounds.max.z;
1313 }
1314 }
1315 }
1316 }
1317
1318 /* Upload AABB data to GPU. */
1319 aabb_data.copy_to_device();
1320 index_data.copy_to_device();
1321 vertex_data.copy_to_device();
1322
1323 vector<device_ptr> aabb_ptrs;
1324 aabb_ptrs.reserve(num_motion_steps);
1325 vector<device_ptr> width_ptrs;
1326 vector<device_ptr> vertex_ptrs;
1327 width_ptrs.reserve(num_motion_steps);
1328 vertex_ptrs.reserve(num_motion_steps);
1329 for (size_t step = 0; step < num_motion_steps; ++step) {
1330 aabb_ptrs.push_back(aabb_data.device_pointer + step * num_segments * sizeof(OptixAabb));
1331 const device_ptr base_ptr = vertex_data.device_pointer +
1332 step * num_vertices * sizeof(float4);
1333 width_ptrs.push_back(base_ptr + 3 * sizeof(float)); /* Offset by vertex size. */
1334 vertex_ptrs.push_back(base_ptr);
1335 }
1336
1337 /* Force a single any-hit call, so shadow record-all behavior works correctly. */
1338 unsigned int build_flags = OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;
1339 OptixBuildInput build_input = {};
1340 if (hair->curve_shape == CURVE_THICK) {
1341 build_input.type = OPTIX_BUILD_INPUT_TYPE_CURVES;
1342 build_input.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_CATMULLROM;
1343 build_input.curveArray.numPrimitives = num_segments;
1344 build_input.curveArray.vertexBuffers = (CUdeviceptr *)vertex_ptrs.data();
1345 build_input.curveArray.numVertices = num_vertices;
1346 build_input.curveArray.vertexStrideInBytes = sizeof(float4);
1347 build_input.curveArray.widthBuffers = (CUdeviceptr *)width_ptrs.data();
1348 build_input.curveArray.widthStrideInBytes = sizeof(float4);
1349 build_input.curveArray.indexBuffer = (CUdeviceptr)index_data.device_pointer;
1350 build_input.curveArray.indexStrideInBytes = sizeof(int);
1351 build_input.curveArray.flag = build_flags;
1352 build_input.curveArray.primitiveIndexOffset = hair->curve_segment_offset;
1353 }
1354 else {
1355 /* Disable visibility test any-hit program, since it is already checked during
1356 * intersection. Those trace calls that require any-hit can force it with a ray flag. */
1357 build_flags |= OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT;
1358
1359 build_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
1360 build_input.customPrimitiveArray.aabbBuffers = (CUdeviceptr *)aabb_ptrs.data();
1361 build_input.customPrimitiveArray.numPrimitives = num_segments;
1362 build_input.customPrimitiveArray.strideInBytes = sizeof(OptixAabb);
1363 build_input.customPrimitiveArray.flags = &build_flags;
1364 build_input.customPrimitiveArray.numSbtRecords = 1;
1365 build_input.customPrimitiveArray.primitiveIndexOffset = hair->curve_segment_offset;
1366 }
1367
1368 if (!build_optix_bvh(bvh_optix, operation, build_input, num_motion_steps)) {
1369 progress.set_error("Failed to build OptiX acceleration structure");
1370 }
1371 }
1372 else if (geom->is_mesh() || geom->is_volume()) {
1373 /* Build BLAS for triangle primitives. */
1374 Mesh *const mesh = static_cast<Mesh *const>(geom);
1375 if (mesh->num_triangles() == 0) {
1376 return;
1377 }
1378
1379 const size_t num_verts = mesh->get_verts().size();
1380
1381 size_t num_motion_steps = 1;
1383 if (pipeline_options.usesMotionBlur && mesh->get_use_motion_blur() && motion_keys) {
1384 num_motion_steps = mesh->get_motion_steps();
1385 }
1386
1387 device_vector<int> index_data(this, "optix temp index data", MEM_READ_ONLY);
1388 index_data.alloc(mesh->get_triangles().size());
1389 memcpy(index_data.data(),
1390 mesh->get_triangles().data(),
1391 mesh->get_triangles().size() * sizeof(int));
1392 device_vector<float4> vertex_data(this, "optix temp vertex data", MEM_READ_ONLY);
1393 vertex_data.alloc(num_verts * num_motion_steps);
1394
1395 for (size_t step = 0; step < num_motion_steps; ++step) {
1396 const float3 *verts = mesh->get_verts().data();
1397
1398 size_t center_step = (num_motion_steps - 1) / 2;
1399 /* The center step for motion vertices is not stored in the attribute. */
1400 if (step != center_step) {
1401 verts = motion_keys->data_float3() + (step > center_step ? step - 1 : step) * num_verts;
1402 }
1403
1404 memcpy(vertex_data.data() + num_verts * step, verts, num_verts * sizeof(float3));
1405 }
1406
1407 /* Upload triangle data to GPU. */
1408 index_data.copy_to_device();
1409 vertex_data.copy_to_device();
1410
1411 vector<device_ptr> vertex_ptrs;
1412 vertex_ptrs.reserve(num_motion_steps);
1413 for (size_t step = 0; step < num_motion_steps; ++step) {
1414 vertex_ptrs.push_back(vertex_data.device_pointer + num_verts * step * sizeof(float3));
1415 }
1416
1417 /* Force a single any-hit call, so shadow record-all behavior works correctly. */
1418 unsigned int build_flags = OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;
1419 OptixBuildInput build_input = {};
1420 build_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
1421 build_input.triangleArray.vertexBuffers = (CUdeviceptr *)vertex_ptrs.data();
1422 build_input.triangleArray.numVertices = num_verts;
1423 build_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
1424 build_input.triangleArray.vertexStrideInBytes = sizeof(float4);
1425 build_input.triangleArray.indexBuffer = index_data.device_pointer;
1426 build_input.triangleArray.numIndexTriplets = mesh->num_triangles();
1427 build_input.triangleArray.indexFormat = OPTIX_INDICES_FORMAT_UNSIGNED_INT3;
1428 build_input.triangleArray.indexStrideInBytes = 3 * sizeof(int);
1429 build_input.triangleArray.flags = &build_flags;
1430 /* The SBT does not store per primitive data since Cycles already allocates separate
1431 * buffers for that purpose. OptiX does not allow this to be zero though, so just pass in
1432 * one and rely on that having the same meaning in this case. */
1433 build_input.triangleArray.numSbtRecords = 1;
1434 build_input.triangleArray.primitiveIndexOffset = mesh->prim_offset;
1435
1436 if (!build_optix_bvh(bvh_optix, operation, build_input, num_motion_steps)) {
1437 progress.set_error("Failed to build OptiX acceleration structure");
1438 }
1439 }
1440 else if (geom->is_pointcloud()) {
1441 /* Build BLAS for points primitives. */
1442 PointCloud *const pointcloud = static_cast<PointCloud *const>(geom);
1443 const size_t num_points = pointcloud->num_points();
1444 if (num_points == 0) {
1445 return;
1446 }
1447
1448 size_t num_motion_steps = 1;
1449 Attribute *motion_points = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
1450 if (pipeline_options.usesMotionBlur && pointcloud->get_use_motion_blur() && motion_points) {
1451 num_motion_steps = pointcloud->get_motion_steps();
1452 }
1453
1454 device_vector<OptixAabb> aabb_data(this, "optix temp aabb data", MEM_READ_ONLY);
1455 aabb_data.alloc(num_points * num_motion_steps);
1456
1457 /* Get AABBs for each motion step. */
1458 for (size_t step = 0; step < num_motion_steps; ++step) {
1459 /* The center step for motion vertices is not stored in the attribute. */
1460 size_t center_step = (num_motion_steps - 1) / 2;
1461
1462 if (step == center_step) {
1463 const float3 *points = pointcloud->get_points().data();
1464 const float *radius = pointcloud->get_radius().data();
1465
1466 for (size_t i = 0; i < num_points; ++i) {
1467 const PointCloud::Point point = pointcloud->get_point(i);
1469 point.bounds_grow(points, radius, bounds);
1470
1471 const size_t index = step * num_points + i;
1472 aabb_data[index].minX = bounds.min.x;
1473 aabb_data[index].minY = bounds.min.y;
1474 aabb_data[index].minZ = bounds.min.z;
1475 aabb_data[index].maxX = bounds.max.x;
1476 aabb_data[index].maxY = bounds.max.y;
1477 aabb_data[index].maxZ = bounds.max.z;
1478 }
1479 }
1480 else {
1481 size_t attr_offset = (step > center_step) ? step - 1 : step;
1482 const float4 *points = motion_points->data_float4() + attr_offset * num_points;
1483
1484 for (size_t i = 0; i < num_points; ++i) {
1485 const PointCloud::Point point = pointcloud->get_point(i);
1487 point.bounds_grow(points[i], bounds);
1488
1489 const size_t index = step * num_points + i;
1490 aabb_data[index].minX = bounds.min.x;
1491 aabb_data[index].minY = bounds.min.y;
1492 aabb_data[index].minZ = bounds.min.z;
1493 aabb_data[index].maxX = bounds.max.x;
1494 aabb_data[index].maxY = bounds.max.y;
1495 aabb_data[index].maxZ = bounds.max.z;
1496 }
1497 }
1498 }
1499
1500 /* Upload AABB data to GPU. */
1501 aabb_data.copy_to_device();
1502
1503 vector<device_ptr> aabb_ptrs;
1504 aabb_ptrs.reserve(num_motion_steps);
1505 for (size_t step = 0; step < num_motion_steps; ++step) {
1506 aabb_ptrs.push_back(aabb_data.device_pointer + step * num_points * sizeof(OptixAabb));
1507 }
1508
1509 /* Disable visibility test any-hit program, since it is already checked during
1510 * intersection. Those trace calls that require anyhit can force it with a ray flag.
1511 * For those, force a single any-hit call, so shadow record-all behavior works correctly. */
1512 unsigned int build_flags = OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT |
1513 OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL;
1514 OptixBuildInput build_input = {};
1515 build_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
1516 build_input.customPrimitiveArray.aabbBuffers = (CUdeviceptr *)aabb_ptrs.data();
1517 build_input.customPrimitiveArray.numPrimitives = num_points;
1518 build_input.customPrimitiveArray.strideInBytes = sizeof(OptixAabb);
1519 build_input.customPrimitiveArray.flags = &build_flags;
1520 build_input.customPrimitiveArray.numSbtRecords = 1;
1521 build_input.customPrimitiveArray.primitiveIndexOffset = pointcloud->prim_offset;
1522
1523 if (!build_optix_bvh(bvh_optix, operation, build_input, num_motion_steps)) {
1524 progress.set_error("Failed to build OptiX acceleration structure");
1525 }
1526 }
1527 }
1528 else {
1529 unsigned int num_instances = 0;
1530 unsigned int max_num_instances = 0xFFFFFFFF;
1531
1532 bvh_optix->as_data->free();
1533 bvh_optix->traversable_handle = 0;
1534 bvh_optix->motion_transform_data->free();
1535
1536 optixDeviceContextGetProperty(context,
1537 OPTIX_DEVICE_PROPERTY_LIMIT_MAX_INSTANCE_ID,
1538 &max_num_instances,
1539 sizeof(max_num_instances));
1540 /* Do not count first bit, which is used to distinguish instanced and non-instanced objects. */
1541 max_num_instances >>= 1;
1542 if (bvh->objects.size() > max_num_instances) {
1543 progress.set_error(
1544 "Failed to build OptiX acceleration structure because there are too many instances");
1545 return;
1546 }
1547
1548 /* Fill instance descriptions. */
1549 device_vector<OptixInstance> instances(this, "optix tlas instances", MEM_READ_ONLY);
1550 instances.alloc(bvh->objects.size());
1551
1552 /* Calculate total motion transform size and allocate memory for them. */
1553 size_t motion_transform_offset = 0;
1554 if (pipeline_options.usesMotionBlur) {
1555 size_t total_motion_transform_size = 0;
1556 for (Object *const ob : bvh->objects) {
1557 if (ob->is_traceable() && ob->use_motion()) {
1558 total_motion_transform_size = align_up(total_motion_transform_size,
1559 OPTIX_TRANSFORM_BYTE_ALIGNMENT);
1560 const size_t motion_keys = max(ob->get_motion().size(), (size_t)2) - 2;
1561 total_motion_transform_size = total_motion_transform_size +
1562 sizeof(OptixSRTMotionTransform) +
1563 motion_keys * sizeof(OptixSRTData);
1564 }
1565 }
1566
1567 assert(bvh_optix->motion_transform_data->device == this);
1568 bvh_optix->motion_transform_data->alloc_to_device(total_motion_transform_size);
1569 }
1570
1571 for (Object *ob : bvh->objects) {
1572 /* Skip non-traceable objects. */
1573 if (!ob->is_traceable()) {
1574 continue;
1575 }
1576
1577 BVHOptiX *const blas = static_cast<BVHOptiX *>(ob->get_geometry()->bvh.get());
1578 OptixTraversableHandle handle = blas->traversable_handle;
1579 if (handle == 0) {
1580 continue;
1581 }
1582
1583 OptixInstance &instance = instances[num_instances++];
1584 memset(&instance, 0, sizeof(instance));
1585
1586 /* Clear transform to identity matrix. */
1587 instance.transform[0] = 1.0f;
1588 instance.transform[5] = 1.0f;
1589 instance.transform[10] = 1.0f;
1590
1591 /* Set user instance ID to object index. */
1592 instance.instanceId = ob->get_device_index();
1593
1594 /* Add some of the object visibility bits to the mask.
1595 * __prim_visibility contains the combined visibility bits of all instances, so is not
1596 * reliable if they differ between instances. But the OptiX visibility mask can only contain
1597 * 8 bits, so have to trade-off here and select just a few important ones.
1598 */
1599 instance.visibilityMask = ob->visibility_for_tracing() & 0xFF;
1600
1601 /* Have to have at least one bit in the mask, or else instance would always be culled. */
1602 if (0 == instance.visibilityMask) {
1603 instance.visibilityMask = 0xFF;
1604 }
1605
1606 if (ob->get_geometry()->is_hair() &&
1607 static_cast<const Hair *>(ob->get_geometry())->curve_shape == CURVE_THICK)
1608 {
1609 if (pipeline_options.usesMotionBlur && ob->get_geometry()->has_motion_blur()) {
1610 /* Select between motion blur and non-motion blur built-in intersection module. */
1611 instance.sbtOffset = PG_HITD_MOTION - PG_HITD;
1612 }
1613 }
1614 else if (ob->get_geometry()->is_pointcloud()) {
1615 /* Use the hit group that has an intersection program for point clouds. */
1616 instance.sbtOffset = PG_HITD_POINTCLOUD - PG_HITD;
1617
1618 /* Also skip point clouds in local trace calls. */
1619 instance.visibilityMask |= 4;
1620 }
1621 {
1622 /* Can disable __anyhit__kernel_optix_visibility_test by default (except for thick curves,
1623 * since it needs to filter out end-caps there).
1624 *
1625 * It is enabled where necessary (visibility mask exceeds 8 bits or the other any-hit
1626 * programs like __anyhit__kernel_optix_shadow_all_hit) via OPTIX_RAY_FLAG_ENFORCE_ANYHIT.
1627 */
1628 instance.flags = OPTIX_INSTANCE_FLAG_DISABLE_ANYHIT;
1629 }
1630
1631 /* Insert motion traversable if object has motion. */
1632 if (pipeline_options.usesMotionBlur && ob->use_motion()) {
1633 size_t motion_keys = max(ob->get_motion().size(), (size_t)2) - 2;
1634 size_t motion_transform_size = sizeof(OptixSRTMotionTransform) +
1635 motion_keys * sizeof(OptixSRTData);
1636
1637 const CUDAContextScope scope(this);
1638
1639 motion_transform_offset = align_up(motion_transform_offset,
1640 OPTIX_TRANSFORM_BYTE_ALIGNMENT);
1641 CUdeviceptr motion_transform_gpu = bvh_optix->motion_transform_data->device_pointer +
1642 motion_transform_offset;
1643 motion_transform_offset += motion_transform_size;
1644
1645 /* Allocate host side memory for motion transform and fill it with transform data. */
1646 array<uint8_t> motion_transform_storage(motion_transform_size);
1647 OptixSRTMotionTransform *motion_transform = reinterpret_cast<OptixSRTMotionTransform *>(
1648 motion_transform_storage.data());
1649 motion_transform->child = handle;
1650 motion_transform->motionOptions.numKeys = ob->get_motion().size();
1651 motion_transform->motionOptions.flags = OPTIX_MOTION_FLAG_NONE;
1652 motion_transform->motionOptions.timeBegin = 0.0f;
1653 motion_transform->motionOptions.timeEnd = 1.0f;
1654
1655 OptixSRTData *const srt_data = motion_transform->srtData;
1656 array<DecomposedTransform> decomp(ob->get_motion().size());
1658 decomp.data(), ob->get_motion().data(), ob->get_motion().size());
1659
1660 for (size_t i = 0; i < ob->get_motion().size(); ++i) {
1661 /* Scale. */
1662 srt_data[i].sx = decomp[i].y.w; /* scale.x.x */
1663 srt_data[i].sy = decomp[i].z.w; /* scale.y.y */
1664 srt_data[i].sz = decomp[i].w.w; /* scale.z.z */
1665
1666 /* Shear. */
1667 srt_data[i].a = decomp[i].z.x; /* scale.x.y */
1668 srt_data[i].b = decomp[i].z.y; /* scale.x.z */
1669 srt_data[i].c = decomp[i].w.x; /* scale.y.z */
1670 assert(decomp[i].z.z == 0.0f); /* scale.y.x */
1671 assert(decomp[i].w.y == 0.0f); /* scale.z.x */
1672 assert(decomp[i].w.z == 0.0f); /* scale.z.y */
1673
1674 /* Pivot point. */
1675 srt_data[i].pvx = 0.0f;
1676 srt_data[i].pvy = 0.0f;
1677 srt_data[i].pvz = 0.0f;
1678
1679 /* Rotation. */
1680 srt_data[i].qx = decomp[i].x.x;
1681 srt_data[i].qy = decomp[i].x.y;
1682 srt_data[i].qz = decomp[i].x.z;
1683 srt_data[i].qw = decomp[i].x.w;
1684
1685 /* Translation. */
1686 srt_data[i].tx = decomp[i].y.x;
1687 srt_data[i].ty = decomp[i].y.y;
1688 srt_data[i].tz = decomp[i].y.z;
1689 }
1690
1691 /* Upload motion transform to GPU. */
1692 cuMemcpyHtoD(motion_transform_gpu, motion_transform, motion_transform_size);
1693 motion_transform = nullptr;
1694 motion_transform_storage.clear();
1695
1696 /* Get traversable handle to motion transform. */
1697 optixConvertPointerToTraversableHandle(context,
1698 motion_transform_gpu,
1699 OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM,
1700 &instance.traversableHandle);
1701 }
1702 else {
1703 instance.traversableHandle = handle;
1704
1705 if (ob->get_geometry()->is_instanced()) {
1706 /* Set transform matrix. */
1707 memcpy(instance.transform, &ob->get_tfm(), sizeof(instance.transform));
1708 }
1709 }
1710 }
1711
1712 /* Upload instance descriptions. */
1713 instances.resize(num_instances);
1714 instances.copy_to_device();
1715
1716 /* Build top-level acceleration structure (TLAS) */
1717 OptixBuildInput build_input = {};
1718 build_input.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
1719 build_input.instanceArray.instances = instances.device_pointer;
1720 build_input.instanceArray.numInstances = num_instances;
1721
1722 if (!build_optix_bvh(bvh_optix, OPTIX_BUILD_OPERATION_BUILD, build_input, 0)) {
1723 progress.set_error("Failed to build OptiX acceleration structure");
1724 }
1725 tlas_handle = bvh_optix->traversable_handle;
1726 }
1727}
1728
1729void OptiXDevice::release_bvh(BVH *bvh)
1730{
1731 thread_scoped_lock lock(delayed_free_bvh_mutex);
1732 /* Do delayed free of BVH memory, since geometry holding BVH might be deleted
1733 * while GPU is still rendering. */
1734 BVHOptiX *const bvh_optix = static_cast<BVHOptiX *>(bvh);
1735
1736 delayed_free_bvh_memory.emplace_back(std::move(bvh_optix->as_data));
1737 delayed_free_bvh_memory.emplace_back(std::move(bvh_optix->motion_transform_data));
1738 bvh_optix->traversable_handle = 0;
1739}
1740
1741void OptiXDevice::free_bvh_memory_delayed()
1742{
1743 thread_scoped_lock lock(delayed_free_bvh_mutex);
1744 delayed_free_bvh_memory.free_memory();
1745}
1746
1747void OptiXDevice::const_copy_to(const char *name, void *host, const size_t size)
1748{
1749 /* Set constant memory for CUDA module. */
1750 CUDADevice::const_copy_to(name, host, size);
1751
1752 if (strcmp(name, "data") == 0) {
1753 assert(size <= sizeof(KernelData));
1754
1755 /* Update traversable handle (since it is different for each device on multi devices). */
1756 KernelData *const data = (KernelData *)host;
1757 *(OptixTraversableHandle *)&data->device_bvh = tlas_handle;
1758
1759 update_launch_params(offsetof(KernelParamsOptiX, data), host, size);
1760 return;
1761 }
1762
1763 /* Update data storage pointers in launch parameters. */
1764# define KERNEL_DATA_ARRAY(data_type, data_name) \
1765 if (strcmp(name, #data_name) == 0) { \
1766 update_launch_params(offsetof(KernelParamsOptiX, data_name), host, size); \
1767 return; \
1768 }
1769 KERNEL_DATA_ARRAY(IntegratorStateGPU, integrator_state)
1770# include "kernel/data_arrays.h"
1771# undef KERNEL_DATA_ARRAY
1772}
1773
1774void OptiXDevice::update_launch_params(const size_t offset, void *data, const size_t data_size)
1775{
1776 const CUDAContextScope scope(this);
1777
1778 cuda_assert(cuMemcpyHtoD(launch_params.device_pointer + offset, data, data_size));
1779}
1780
1782
1783#endif /* WITH_OPTIX */
unsigned int uint
float progress
Definition WM_types.hh:1019
volatile int lock
BMesh const char void * data
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
void refit(btStridingMeshInterface *triangles, const btVector3 &aabbMin, const btVector3 &aabbMax)
SIMD_FORCE_INLINE const btScalar & z() const
Return the z value.
Definition btQuadWord.h:117
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
Attribute * find(ustring name) const
bool top_level
Definition params.h:80
int bvh_type
Definition params.h:105
Definition bvh/bvh.h:67
vector< Geometry * > geometry
Definition bvh/bvh.h:70
BVHParams params
Definition bvh/bvh.h:69
vector< Object * > objects
Definition bvh/bvh.h:71
bool is_volume() const
bool is_pointcloud() const
bool is_hair() const
size_t prim_offset
AttributeSet attributes
bool is_mesh() const
Definition hair.h:13
Curve get_curve(const size_t i) const
Definition hair.h:111
size_t curve_segment_offset
Definition hair.h:90
size_t num_curves() const
Definition hair.h:126
size_t num_segments() const
Definition hair.h:131
CurveShapeType curve_shape
Definition hair.h:91
size_t num_keys() const
Definition hair.h:121
void alloc_to_device(const size_t num, bool shrink_to_fit=true)
@ MEM_READ_ONLY
CCL_NAMESPACE_BEGIN struct Options options
#define KERNEL_DATA_ARRAY(type, name)
Definition data_arrays.h:8
DebugFlags & DebugFlags()
Definition debug.h:145
#define KERNEL_FEATURE_OBJECT_MOTION
#define KERNEL_FEATURE_OSL_SHADING
#define KERNEL_FEATURE_SUBSURFACE
#define KERNEL_FEATURE_HAIR_THICK
#define KERNEL_FEATURE_PATH_TRACING
#define KERNEL_FEATURE_OSL_CAMERA
#define KERNEL_FEATURE_HAIR
#define KERNEL_FEATURE_NODE_RAYTRACE
#define KERNEL_FEATURE_BAKING
#define KERNEL_FEATURE_MNEE
#define KERNEL_FEATURE_POINTCLOUD
#define CCL_NAMESPACE_END
ccl_device_forceinline float4 make_float4(const float x, const float y, const float z, const float w)
#define offsetof(t, d)
static float verts[][3]
ThreadMutex mutex
#define this
VecBase< float, 4 > float4
#define assert(assertion)
VecBase< float, D > step(VecOp< float, D >, VecOp< float, D >) RET
@ ATTR_STD_MOTION_VERTEX_POSITION
@ CURVE_THICK
@ BVH_LAYOUT_OPTIX
#define VLOG_INFO
Definition log.h:71
#define VLOG_IS_ON(severity)
Definition log.h:35
Segment< FEdge *, Vec3r > segment
int BVHLayoutMask
Definition params.h:50
@ BVH_TYPE_STATIC
Definition params.h:40
size_t path_file_size(const string &path)
Definition path.cpp:554
bool path_is_directory(const string &path)
Definition path.cpp:582
string path_get(const string &sub)
Definition path.cpp:337
string path_join(const string &dir, const string &file)
Definition path.cpp:415
bool path_read_compressed_text(const string &path, string &text)
Definition path.cpp:754
static struct PyModuleDef module
Definition python.cpp:796
long long TypeDesc
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
Definition string.cpp:23
float3 * data_float3()
float4 * data_float4()
void bounds_grow(const int k, const float3 *curve_keys, const float *curve_radius, BoundBox &bounds) const
Definition hair.cpp:44
int first_key
Definition hair.h:19
int num_segments() const
Definition hair.h:22
int num_keys
Definition hair.h:20
size_t num_triangles() const
Definition scene/mesh.h:77
bool use_motion() const
int get_device_index() const
bool is_traceable() const
uint visibility_for_tracing() const
void bounds_grow(const float3 *points, const float *radius, BoundBox &bounds) const
Point get_point(const int i) const
size_t num_points() const
void push(TaskRunFunction &&task)
Definition task.cpp:21
void wait_work(Summary *stats=nullptr)
Definition task.cpp:27
i
Definition text_draw.cc:230
max
Definition text_draw.cc:251
std::mutex thread_mutex
Definition thread.h:27
std::unique_lock< std::mutex > thread_scoped_lock
Definition thread.h:28
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, const size_t size)
uint64_t device_ptr
Definition types_base.h:44
ccl_device_inline size_t align_up(const size_t offset, const size_t alignment)
Definition types_base.h:47