Blender V4.5
hip/device_impl.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#ifdef WITH_HIP
6
7# include <cstdio>
8# include <cstdlib>
9# include <cstring>
10
12
13# include "util/debug.h"
14# include "util/log.h"
15# include "util/md5.h"
16# include "util/path.h"
17# include "util/string.h"
18# include "util/system.h"
19# include "util/time.h"
20# include "util/types.h"
21
22# ifdef _WIN32
23# include "util/windows.h"
24# endif
25
27
29
31
32class HIPDevice;
33
34bool HIPDevice::have_precompiled_kernels()
35{
36 string fatbins_path = path_get("lib");
37 return path_exists(fatbins_path);
38}
39
40BVHLayoutMask HIPDevice::get_bvh_layout_mask(uint /*kernel_features*/) const
41{
42 return BVH_LAYOUT_BVH2;
43}
44
45void HIPDevice::set_error(const string &error)
46{
48
49 if (first_error) {
50 fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n");
51 fprintf(stderr,
52 "https://docs.blender.org/manual/en/latest/render/cycles/gpu_rendering.html\n\n");
53 first_error = false;
54 }
55}
56
57HIPDevice::HIPDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler, bool headless)
58 : GPUDevice(info, stats, profiler, headless)
59{
60 /* Verify that base class types can be used with specific backend types */
61 static_assert(sizeof(texMemObject) == sizeof(hipTextureObject_t));
62 static_assert(sizeof(arrayMemObject) == sizeof(hArray));
63
64 first_error = true;
65
66 hipDevId = info.num;
67 hipDevice = 0;
68 hipContext = nullptr;
69
70 hipModule = nullptr;
71
72 need_texture_info = false;
73
74 pitch_alignment = 0;
75
76 /* Initialize HIP. */
77 hipError_t result = hipInit(0);
78 if (result != hipSuccess) {
79 set_error(string_printf("Failed to initialize HIP runtime (%s)", hipewErrorString(result)));
80 return;
81 }
82
83 /* Setup device and context. */
84 result = hipDeviceGet(&hipDevice, hipDevId);
85 if (result != hipSuccess) {
86 set_error(string_printf("Failed to get HIP device handle from ordinal (%s)",
87 hipewErrorString(result)));
88 return;
89 }
90
91 /* hipDeviceMapHost for mapping host memory when out of device memory.
92 * hipDeviceLmemResizeToMax for reserving local memory ahead of render,
93 * so we can predict which memory to map to host. */
94 int value;
95 hip_assert(hipDeviceGetAttribute(&value, hipDeviceAttributeCanMapHostMemory, hipDevice));
96 can_map_host = value != 0;
97
98 hip_assert(
99 hipDeviceGetAttribute(&pitch_alignment, hipDeviceAttributeTexturePitchAlignment, hipDevice));
100
101 unsigned int ctx_flags = hipDeviceLmemResizeToMax;
102 if (can_map_host) {
103 ctx_flags |= hipDeviceMapHost;
104 init_host_memory();
105 }
106
107 /* Create context. */
108 result = hipCtxCreate(&hipContext, ctx_flags, hipDevice);
109
110 if (result != hipSuccess) {
111 set_error(string_printf("Failed to create HIP context (%s)", hipewErrorString(result)));
112 return;
113 }
114
115 int major, minor;
116 hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId);
117 hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId);
118 hipDevArchitecture = major * 100 + minor * 10;
119
120 /* Get hip runtime Version needed for memory types. */
121 hip_assert(hipRuntimeGetVersion(&hipRuntimeVersion));
122
123 /* Pop context set by hipCtxCreate. */
124 hipCtxPopCurrent(nullptr);
125}
126
127HIPDevice::~HIPDevice()
128{
129 texture_info.free();
130 if (hipModule) {
131 hip_assert(hipModuleUnload(hipModule));
132 }
133 hip_assert(hipCtxDestroy(hipContext));
134}
135
136bool HIPDevice::support_device(const uint /*kernel_features*/)
137{
138 if (hipSupportsDevice(hipDevId)) {
139 return true;
140 }
141 /* We only support Navi and above. */
142 hipDeviceProp_t props;
143 hipGetDeviceProperties(&props, hipDevId);
144
145 set_error(string_printf("HIP backend requires AMD RDNA graphics card or up, but found %s.",
146 props.name));
147 return false;
148}
149
150bool HIPDevice::check_peer_access(Device *peer_device)
151{
152 if (peer_device == this) {
153 return false;
154 }
155 if (peer_device->info.type != DEVICE_HIP && peer_device->info.type != DEVICE_OPTIX) {
156 return false;
157 }
158
159 HIPDevice *const peer_device_hip = static_cast<HIPDevice *>(peer_device);
160
161 int can_access = 0;
162 hip_assert(hipDeviceCanAccessPeer(&can_access, hipDevice, peer_device_hip->hipDevice));
163 if (can_access == 0) {
164 return false;
165 }
166
167 // Ensure array access over the link is possible as well (for 3D textures)
168 hip_assert(hipDeviceGetP2PAttribute(
169 &can_access, hipDevP2PAttrHipArrayAccessSupported, hipDevice, peer_device_hip->hipDevice));
170 if (can_access == 0) {
171 return false;
172 }
173
174 // Enable peer access in both directions
175 {
176 const HIPContextScope scope(this);
177 hipError_t result = hipCtxEnablePeerAccess(peer_device_hip->hipContext, 0);
178 if (result != hipSuccess) {
179 set_error(string_printf("Failed to enable peer access on HIP context (%s)",
180 hipewErrorString(result)));
181 return false;
182 }
183 }
184 {
185 const HIPContextScope scope(peer_device_hip);
186 hipError_t result = hipCtxEnablePeerAccess(hipContext, 0);
187 if (result != hipSuccess) {
188 set_error(string_printf("Failed to enable peer access on HIP context (%s)",
189 hipewErrorString(result)));
190 return false;
191 }
192 }
193
194 return true;
195}
196
197bool HIPDevice::use_adaptive_compilation()
198{
200}
201
202/* Common HIPCC flags which stays the same regardless of shading model,
203 * kernel sources md5 and only depends on compiler or compilation settings.
204 */
205string HIPDevice::compile_kernel_get_common_cflags(const uint kernel_features)
206{
207 const int machine = system_cpu_bits();
208 const string source_path = path_get("source");
209 const string include_path = source_path;
210 string cflags = string_printf(
211 "-m%d "
212 "-DHIPCC "
213 "-I\"%s\"",
214 machine,
215 include_path.c_str());
216 if (use_adaptive_compilation()) {
217 cflags += " -D__KERNEL_FEATURES__=" + to_string(kernel_features);
218 }
219
220 const char *extra_cflags = getenv("CYCLES_HIP_EXTRA_CFLAGS");
221 if (extra_cflags) {
222 cflags += string(" ") + string(extra_cflags);
223 }
224
225# ifdef WITH_NANOVDB
226 cflags += " -DWITH_NANOVDB";
227# endif
228
229# ifdef WITH_CYCLES_DEBUG
230 cflags += " -DWITH_CYCLES_DEBUG";
231# endif
232
233 return cflags;
234}
235
236string HIPDevice::compile_kernel(const uint kernel_features, const char *name, const char *base)
237{
238 /* Compute kernel name. */
239 int major, minor;
240 hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId);
241 hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId);
242 const std::string arch = hipDeviceArch(hipDevId);
243
244 /* Attempt to use kernel provided with Blender. */
245 if (!use_adaptive_compilation()) {
246 const string fatbin = path_get(string_printf("lib/%s_%s.fatbin.zst", name, arch.c_str()));
247 VLOG_INFO << "Testing for pre-compiled kernel " << fatbin << ".";
248 if (path_exists(fatbin)) {
249 VLOG_INFO << "Using precompiled kernel.";
250 return fatbin;
251 }
252 }
253
254 /* Try to use locally compiled kernel. */
255 string source_path = path_get("source");
256 const string source_md5 = path_files_md5_hash(source_path);
257
258 /* We include cflags into md5 so changing hip toolkit or changing other
259 * compiler command line arguments makes sure fatbin gets re-built.
260 */
261 string common_cflags = compile_kernel_get_common_cflags(kernel_features);
262 const string kernel_md5 = util_md5_string(source_md5 + common_cflags);
263
264 const char *const kernel_ext = "genco";
265 std::string options = "-Wno-parentheses-equality -Wno-unused-value -ffast-math";
266 if (hipNeedPreciseMath(arch)) {
267 options.append(
268 " -fhip-fp32-correctly-rounded-divide-sqrt -fno-gpu-approx-transcendentals "
269 "-fgpu-flush-denormals-to-zero -ffp-contract=off");
270 }
271
272# ifndef NDEBUG
273 options.append(" -save-temps");
274# endif
275 if (major == 9 && minor == 0) {
276 /* Reduce optimization level on VEGA GPUs to avoid some rendering artifacts */
277 options.append(" -O1");
278 }
279 options.append(" --offload-arch=").append(arch);
280
281 const string include_path = source_path;
282 const string fatbin_file = string_printf(
283 "cycles_%s_%s_%s", name, arch.c_str(), kernel_md5.c_str());
284 const string fatbin = path_cache_get(path_join("kernels", fatbin_file));
285 VLOG_INFO << "Testing for locally compiled kernel " << fatbin << ".";
286 if (path_exists(fatbin)) {
287 VLOG_INFO << "Using locally compiled kernel.";
288 return fatbin;
289 }
290
291# ifdef _WIN32
292 if (!use_adaptive_compilation() && have_precompiled_kernels()) {
293 if (!hipSupportsDevice(hipDevId)) {
294 set_error(
295 string_printf("HIP backend requires compute capability 10.1 or up, but found %d.%d. "
296 "Your GPU is not supported.",
297 major,
298 minor));
299 }
300 else {
301 set_error(
302 string_printf("HIP binary kernel for this graphics card compute "
303 "capability (%d.%d) not found.",
304 major,
305 minor));
306 }
307 return string();
308 }
309# endif
310
311 /* Compile. */
312 const char *const hipcc = hipewCompilerPath();
313 if (hipcc == nullptr) {
314 set_error(
315 "HIP hipcc compiler not found. "
316 "Install HIP toolkit in default location.");
317 return string();
318 }
319
320# ifdef WITH_HIP_SDK_5
321 int hip_major_ver = hipRuntimeVersion / 10000000;
322 if (hip_major_ver > 5) {
323 set_error(string_printf(
324 "HIP Runtime version %d does not work with kernels compiled with HIP SDK 5\n",
325 hip_major_ver));
326 return string();
327 }
328# endif
329 const int hipcc_hip_version = hipewCompilerVersion();
330 VLOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
331
332 double starttime = time_dt();
333
335
336 source_path = path_join(path_join(source_path, "kernel"),
337 path_join("device", path_join(base, string_printf("%s.cpp", name))));
338
339 string command = string_printf("%s %s -I \"%s\" --%s \"%s\" -o \"%s\" %s",
340 hipcc,
341 options.c_str(),
342 include_path.c_str(),
343 kernel_ext,
344 source_path.c_str(),
345 fatbin.c_str(),
346 common_cflags.c_str());
347
348 printf("Compiling %sHIP kernel ...\n%s\n",
349 (use_adaptive_compilation()) ? "adaptive " : "",
350 command.c_str());
351
352# ifdef _WIN32
353 command = "call " + command;
354# endif
355 if (system(command.c_str()) != 0) {
356 set_error(
357 "Failed to execute compilation command, "
358 "see console for details.");
359 return string();
360 }
361
362 /* Verify if compilation succeeded */
363 if (!path_exists(fatbin)) {
364 set_error(
365 "HIP kernel compilation failed, "
366 "see console for details.");
367 return string();
368 }
369
370 printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
371
372 return fatbin;
373}
374
375bool HIPDevice::load_kernels(const uint kernel_features)
376{
377 /* TODO(sergey): Support kernels re-load for HIP devices adaptive compile.
378 *
379 * Currently re-loading kernels will invalidate memory pointers.
380 */
381 if (hipModule) {
382 if (use_adaptive_compilation()) {
383 VLOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
384 }
385 return true;
386 }
387
388 /* check if hip init succeeded */
389 if (hipContext == nullptr) {
390 return false;
391 }
392
393 /* check if GPU is supported */
394 if (!support_device(kernel_features)) {
395 return false;
396 }
397
398 /* get kernel */
399 const char *kernel_name = "kernel";
400 string fatbin = compile_kernel(kernel_features, kernel_name);
401 if (fatbin.empty()) {
402 return false;
403 }
404
405 /* open module */
406 HIPContextScope scope(this);
407
408 string fatbin_data;
409 hipError_t result;
410
411 if (path_read_compressed_text(fatbin, fatbin_data)) {
412 result = hipModuleLoadData(&hipModule, fatbin_data.c_str());
413 }
414 else {
415 result = hipErrorFileNotFound;
416 }
417
418 if (result != hipSuccess) {
419 set_error(string_printf(
420 "Failed to load HIP kernel from '%s' (%s)", fatbin.c_str(), hipewErrorString(result)));
421 }
422
423 if (result == hipSuccess) {
424 kernels.load(this);
425 reserve_local_memory(kernel_features);
426 }
427
428 return (result == hipSuccess);
429}
430
431void HIPDevice::reserve_local_memory(const uint kernel_features)
432{
433 /* Together with hipDeviceLmemResizeToMax, this reserves local memory
434 * needed for kernel launches, so that we can reliably figure out when
435 * to allocate scene data in mapped host memory. */
436 size_t total = 0, free_before = 0, free_after = 0;
437
438 {
439 HIPContextScope scope(this);
440 hipMemGetInfo(&free_before, &total);
441 }
442
443 {
444 /* Use the biggest kernel for estimation. */
445 const DeviceKernel test_kernel = (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) ?
447 (kernel_features & KERNEL_FEATURE_MNEE) ?
450
451 /* Launch kernel, using just 1 block appears sufficient to reserve memory for all
452 * multiprocessors. It would be good to do this in parallel for the multi GPU case
453 * still to make it faster. */
454 HIPDeviceQueue queue(this);
455
456 device_ptr d_path_index = 0;
457 device_ptr d_render_buffer = 0;
458 int d_work_size = 0;
459 DeviceKernelArguments args(&d_path_index, &d_render_buffer, &d_work_size);
460
461 queue.init_execution();
462 queue.enqueue(test_kernel, 1, args);
463 queue.synchronize();
464 }
465
466 {
467 HIPContextScope scope(this);
468 hipMemGetInfo(&free_after, &total);
469 }
470
471 VLOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
472 << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
473
474# if 0
475 /* For testing mapped host memory, fill up device memory. */
476 const size_t keep_mb = 1024;
477
478 while (free_after > keep_mb * 1024 * 1024LL) {
479 hipDeviceptr_t tmp;
480 hip_assert(hipMalloc(&tmp, 10 * 1024 * 1024LL));
481 hipMemGetInfo(&free_after, &total);
482 }
483# endif
484}
485
486void HIPDevice::get_device_memory_info(size_t &total, size_t &free)
487{
488 HIPContextScope scope(this);
489
490 hipMemGetInfo(&free, &total);
491}
492
493bool HIPDevice::alloc_device(void *&device_pointer, const size_t size)
494{
495 HIPContextScope scope(this);
496
497 hipError_t mem_alloc_result = hipMalloc((hipDeviceptr_t *)&device_pointer, size);
498 return mem_alloc_result == hipSuccess;
499}
500
501void HIPDevice::free_device(void *device_pointer)
502{
503 HIPContextScope scope(this);
504
505 hip_assert(hipFree((hipDeviceptr_t)device_pointer));
506}
507
508bool HIPDevice::shared_alloc(void *&shared_pointer, const size_t size)
509{
510 HIPContextScope scope(this);
511
512 hipError_t mem_alloc_result = hipHostMalloc(
513 &shared_pointer, size, hipHostMallocMapped | hipHostMallocWriteCombined);
514
515 return mem_alloc_result == hipSuccess;
516}
517
518void HIPDevice::shared_free(void *shared_pointer)
519{
520 HIPContextScope scope(this);
521
522 hipHostFree(shared_pointer);
523}
524
525void *HIPDevice::shared_to_device_pointer(const void *shared_pointer)
526{
527 HIPContextScope scope(this);
528 void *device_pointer = nullptr;
529 hip_assert(
530 hipHostGetDevicePointer((hipDeviceptr_t *)&device_pointer, (void *)shared_pointer, 0));
531 return device_pointer;
532}
533
534void HIPDevice::copy_host_to_device(void *device_pointer, void *host_pointer, const size_t size)
535{
536 const HIPContextScope scope(this);
537
538 hip_assert(hipMemcpyHtoD((hipDeviceptr_t)device_pointer, host_pointer, size));
539}
540
541void HIPDevice::mem_alloc(device_memory &mem)
542{
543 if (mem.type == MEM_TEXTURE) {
544 assert(!"mem_alloc not supported for textures.");
545 }
546 else if (mem.type == MEM_GLOBAL) {
547 assert(!"mem_alloc not supported for global memory.");
548 }
549 else {
550 generic_alloc(mem);
551 }
552}
553
554void HIPDevice::mem_copy_to(device_memory &mem)
555{
556 if (mem.type == MEM_GLOBAL) {
557 global_copy_to(mem);
558 }
559 else if (mem.type == MEM_TEXTURE) {
560 tex_copy_to((device_texture &)mem);
561 }
562 else {
563 if (!mem.device_pointer) {
564 generic_alloc(mem);
565 generic_copy_to(mem);
566 }
567 else if (mem.is_resident(this)) {
568 generic_copy_to(mem);
569 }
570 }
571}
572
573void HIPDevice::mem_move_to_host(device_memory &mem)
574{
575 if (mem.type == MEM_GLOBAL) {
576 global_free(mem);
577 global_alloc(mem);
578 }
579 else if (mem.type == MEM_TEXTURE) {
580 tex_free((device_texture &)mem);
581 tex_alloc((device_texture &)mem);
582 }
583 else {
584 assert(!"mem_move_to_host only supported for texture and global memory");
585 }
586}
587
588void HIPDevice::mem_copy_from(
589 device_memory &mem, const size_t y, size_t w, const size_t h, size_t elem)
590{
591 if (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) {
592 assert(!"mem_copy_from not supported for textures.");
593 }
594 else if (mem.host_pointer) {
595 const size_t size = elem * w * h;
596 const size_t offset = elem * y * w;
597
598 if (mem.device_pointer) {
599 const HIPContextScope scope(this);
600 hip_assert(hipMemcpyDtoH(
601 (char *)mem.host_pointer + offset, (hipDeviceptr_t)mem.device_pointer + offset, size));
602 }
603 else {
604 memset((char *)mem.host_pointer + offset, 0, size);
605 }
606 }
607}
608
609void HIPDevice::mem_zero(device_memory &mem)
610{
611 if (!mem.device_pointer) {
612 mem_alloc(mem);
613 }
614 if (!mem.device_pointer) {
615 return;
616 }
617
618 if (!(mem.is_shared(this) && mem.host_pointer == mem.shared_pointer)) {
619 const HIPContextScope scope(this);
620 hip_assert(hipMemsetD8((hipDeviceptr_t)mem.device_pointer, 0, mem.memory_size()));
621 }
622 else if (mem.host_pointer) {
623 memset(mem.host_pointer, 0, mem.memory_size());
624 }
625}
626
627void HIPDevice::mem_free(device_memory &mem)
628{
629 if (mem.type == MEM_GLOBAL) {
630 global_free(mem);
631 }
632 else if (mem.type == MEM_TEXTURE) {
633 tex_free((device_texture &)mem);
634 }
635 else {
636 generic_free(mem);
637 }
638}
639
640device_ptr HIPDevice::mem_alloc_sub_ptr(device_memory &mem, const size_t offset, size_t /*size*/)
641{
642 return (device_ptr)(((char *)mem.device_pointer) + mem.memory_elements_size(offset));
643}
644
645void HIPDevice::const_copy_to(const char *name, void *host, const size_t size)
646{
647 HIPContextScope scope(this);
648 hipDeviceptr_t mem;
649 size_t bytes;
650
651 hip_assert(hipModuleGetGlobal(&mem, &bytes, hipModule, "kernel_params"));
652 assert(bytes == sizeof(KernelParamsHIP));
653
654 /* Update data storage pointers in launch parameters. */
655# define KERNEL_DATA_ARRAY(data_type, data_name) \
656 if (strcmp(name, #data_name) == 0) { \
657 hip_assert(hipMemcpyHtoD(mem + offsetof(KernelParamsHIP, data_name), host, size)); \
658 return; \
659 }
660 KERNEL_DATA_ARRAY(KernelData, data)
661 KERNEL_DATA_ARRAY(IntegratorStateGPU, integrator_state)
662# include "kernel/data_arrays.h"
663# undef KERNEL_DATA_ARRAY
664}
665
666void HIPDevice::global_alloc(device_memory &mem)
667{
668 if (mem.is_resident(this)) {
669 generic_alloc(mem);
670 generic_copy_to(mem);
671 }
672
673 const_copy_to(mem.name, &mem.device_pointer, sizeof(mem.device_pointer));
674}
675
676void HIPDevice::global_copy_to(device_memory &mem)
677{
678 if (!mem.device_pointer) {
679 generic_alloc(mem);
680 generic_copy_to(mem);
681 }
682 else if (mem.is_resident(this)) {
683 generic_copy_to(mem);
684 }
685
686 const_copy_to(mem.name, &mem.device_pointer, sizeof(mem.device_pointer));
687}
688
689void HIPDevice::global_free(device_memory &mem)
690{
691 if (mem.is_resident(this) && mem.device_pointer) {
692 generic_free(mem);
693 }
694}
695
696static size_t tex_src_pitch(const device_texture &mem)
697{
698 return mem.data_width * datatype_size(mem.data_type) * mem.data_elements;
699}
700
701static hip_Memcpy2D tex_2d_copy_param(const device_texture &mem, const int pitch_alignment)
702{
703 /* 2D texture using pitch aligned linear memory. */
704 const size_t src_pitch = tex_src_pitch(mem);
705 const size_t dst_pitch = align_up(src_pitch, pitch_alignment);
706
707 hip_Memcpy2D param;
708 memset(&param, 0, sizeof(param));
709 param.dstMemoryType = hipMemoryTypeDevice;
710 param.dstDevice = mem.device_pointer;
711 param.dstPitch = dst_pitch;
712 param.srcMemoryType = hipMemoryTypeHost;
713 param.srcHost = mem.host_pointer;
714 param.srcPitch = src_pitch;
715 param.WidthInBytes = param.srcPitch;
716 param.Height = mem.data_height;
717
718 return param;
719}
720
721static HIP_MEMCPY3D tex_3d_copy_param(const device_texture &mem)
722{
723 const size_t src_pitch = tex_src_pitch(mem);
724
725 HIP_MEMCPY3D param;
726 memset(&param, 0, sizeof(HIP_MEMCPY3D));
727 param.dstMemoryType = hipMemoryTypeArray;
728 param.dstArray = (hArray)mem.device_pointer;
729 param.srcMemoryType = hipMemoryTypeHost;
730 param.srcHost = mem.host_pointer;
731 param.srcPitch = src_pitch;
732 param.WidthInBytes = param.srcPitch;
733 param.Height = mem.data_height;
734 param.Depth = mem.data_depth;
735 return param;
736}
737
738void HIPDevice::tex_alloc(device_texture &mem)
739{
740 HIPContextScope scope(this);
741
742 hipTextureAddressMode address_mode = hipAddressModeWrap;
743 switch (mem.info.extension) {
744 case EXTENSION_REPEAT:
745 address_mode = hipAddressModeWrap;
746 break;
747 case EXTENSION_EXTEND:
748 address_mode = hipAddressModeClamp;
749 break;
750 case EXTENSION_CLIP:
751 address_mode = hipAddressModeBorder;
752 break;
753 case EXTENSION_MIRROR:
754 address_mode = hipAddressModeMirror;
755 break;
756 default:
757 assert(0);
758 break;
759 }
760
761 hipTextureFilterMode filter_mode;
763 filter_mode = hipFilterModePoint;
764 }
765 else {
766 filter_mode = hipFilterModeLinear;
767 }
768
769 /* Image Texture Storage */
770 hipArray_Format format;
771 switch (mem.data_type) {
772 case TYPE_UCHAR:
773 format = HIP_AD_FORMAT_UNSIGNED_INT8;
774 break;
775 case TYPE_UINT16:
776 format = HIP_AD_FORMAT_UNSIGNED_INT16;
777 break;
778 case TYPE_UINT:
779 format = HIP_AD_FORMAT_UNSIGNED_INT32;
780 break;
781 case TYPE_INT:
782 format = HIP_AD_FORMAT_SIGNED_INT32;
783 break;
784 case TYPE_FLOAT:
785 format = HIP_AD_FORMAT_FLOAT;
786 break;
787 case TYPE_HALF:
788 format = HIP_AD_FORMAT_HALF;
789 break;
790 default:
791 assert(0);
792 return;
793 }
794
795 Mem *cmem = nullptr;
796 hArray array_3d = nullptr;
797
798 if (!mem.is_resident(this)) {
799 thread_scoped_lock lock(device_mem_map_mutex);
800 cmem = &device_mem_map[&mem];
801 cmem->texobject = 0;
802
803 if (mem.data_depth > 1) {
804 array_3d = (hArray)mem.device_pointer;
805 cmem->array = reinterpret_cast<arrayMemObject>(array_3d);
806 }
807 }
808 else if (mem.data_depth > 1) {
809 /* 3D texture using array, there is no API for linear memory. */
810 HIP_ARRAY3D_DESCRIPTOR desc;
811
812 desc.Width = mem.data_width;
813 desc.Height = mem.data_height;
814 desc.Depth = mem.data_depth;
815 desc.Format = format;
816 desc.NumChannels = mem.data_elements;
817 desc.Flags = 0;
818
819 VLOG_WORK << "Array 3D allocate: " << mem.name << ", "
820 << string_human_readable_number(mem.memory_size()) << " bytes. ("
822
823 hip_assert(hipArray3DCreate((hArray *)&array_3d, &desc));
824
825 if (!array_3d) {
826 return;
827 }
828
829 mem.device_pointer = (device_ptr)array_3d;
830 mem.device_size = mem.memory_size();
831 stats.mem_alloc(mem.memory_size());
832
833 const HIP_MEMCPY3D param = tex_3d_copy_param(mem);
834 hip_assert(hipDrvMemcpy3D(&param));
835
836 thread_scoped_lock lock(device_mem_map_mutex);
837 cmem = &device_mem_map[&mem];
838 cmem->texobject = 0;
839 cmem->array = reinterpret_cast<arrayMemObject>(array_3d);
840 }
841 else if (mem.data_height > 0) {
842 /* 2D texture, using pitch aligned linear memory. */
843 const size_t dst_pitch = align_up(tex_src_pitch(mem), pitch_alignment);
844 const size_t dst_size = dst_pitch * mem.data_height;
845
846 cmem = generic_alloc(mem, dst_size - mem.memory_size());
847 if (!cmem) {
848 return;
849 }
850
851 const hip_Memcpy2D param = tex_2d_copy_param(mem, pitch_alignment);
852 hip_assert(hipDrvMemcpy2DUnaligned(&param));
853 }
854 else {
855 /* 1D texture, using linear memory. */
856 cmem = generic_alloc(mem);
857 if (!cmem) {
858 return;
859 }
860
861 hip_assert(hipMemcpyHtoD(mem.device_pointer, mem.host_pointer, mem.memory_size()));
862 }
863
864 /* Set Mapping and tag that we need to (re-)upload to device */
865 TextureInfo tex_info = mem.info;
866
871 {
872 /* Bindless textures. */
873 hipResourceDesc resDesc;
874 memset(&resDesc, 0, sizeof(resDesc));
875
876 if (array_3d) {
877 resDesc.resType = hipResourceTypeArray;
878 resDesc.res.array.h_Array = array_3d;
879 resDesc.flags = 0;
880 }
881 else if (mem.data_height > 0) {
882 const size_t dst_pitch = align_up(tex_src_pitch(mem), pitch_alignment);
883
884 resDesc.resType = hipResourceTypePitch2D;
885 resDesc.res.pitch2D.devPtr = mem.device_pointer;
886 resDesc.res.pitch2D.format = format;
887 resDesc.res.pitch2D.numChannels = mem.data_elements;
888 resDesc.res.pitch2D.height = mem.data_height;
889 resDesc.res.pitch2D.width = mem.data_width;
890 resDesc.res.pitch2D.pitchInBytes = dst_pitch;
891 }
892 else {
893 resDesc.resType = hipResourceTypeLinear;
894 resDesc.res.linear.devPtr = mem.device_pointer;
895 resDesc.res.linear.format = format;
896 resDesc.res.linear.numChannels = mem.data_elements;
897 resDesc.res.linear.sizeInBytes = mem.device_size;
898 }
899
900 hipTextureDesc texDesc;
901 memset(&texDesc, 0, sizeof(texDesc));
902 texDesc.addressMode[0] = address_mode;
903 texDesc.addressMode[1] = address_mode;
904 texDesc.addressMode[2] = address_mode;
905 texDesc.filterMode = filter_mode;
906 texDesc.flags = HIP_TRSF_NORMALIZED_COORDINATES;
907
908 thread_scoped_lock lock(device_mem_map_mutex);
909 cmem = &device_mem_map[&mem];
910
911 if (hipTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, nullptr) != hipSuccess) {
912 set_error(
913 "Failed to create texture. Maximum GPU texture size or available GPU memory was likely "
914 "exceeded.");
915 }
916
917 tex_info.data = (uint64_t)cmem->texobject;
918 }
919 else {
920 tex_info.data = (uint64_t)mem.device_pointer;
921 }
922
923 {
924 /* Update texture info. */
925 thread_scoped_lock lock(texture_info_mutex);
926 const uint slot = mem.slot;
927 if (slot >= texture_info.size()) {
928 /* Allocate some slots in advance, to reduce amount of re-allocations. */
929 texture_info.resize(slot + 128);
930 }
931 texture_info[slot] = tex_info;
932 need_texture_info = true;
933 }
934}
935
936void HIPDevice::tex_copy_to(device_texture &mem)
937{
938 if (!mem.device_pointer) {
939 /* Not yet allocated on device. */
940 tex_alloc(mem);
941 }
942 else if (!mem.is_resident(this)) {
943 /* Peering with another device, may still need to create texture info and object. */
944 bool texture_allocated = false;
945 {
946 thread_scoped_lock lock(texture_info_mutex);
947 texture_allocated = mem.slot < texture_info.size() && texture_info[mem.slot].data != 0;
948 }
949 if (!texture_allocated) {
950 tex_alloc(mem);
951 }
952 }
953 else {
954 /* Resident and fully allocated, only copy. */
955 if (mem.data_depth > 0) {
956 HIPContextScope scope(this);
957 const HIP_MEMCPY3D param = tex_3d_copy_param(mem);
958 hip_assert(hipDrvMemcpy3D(&param));
959 }
960 else if (mem.data_height > 0) {
961 HIPContextScope scope(this);
962 const hip_Memcpy2D param = tex_2d_copy_param(mem, pitch_alignment);
963 hip_assert(hipDrvMemcpy2DUnaligned(&param));
964 }
965 else {
966 generic_copy_to(mem);
967 }
968 }
969}
970
971void HIPDevice::tex_free(device_texture &mem)
972{
973 HIPContextScope scope(this);
974 thread_scoped_lock lock(device_mem_map_mutex);
975
976 /* Check if the memory was allocated for this device. */
977 auto it = device_mem_map.find(&mem);
978 if (it == device_mem_map.end()) {
979 return;
980 }
981
982 const Mem &cmem = it->second;
983
984 /* Always clear texture info and texture object, regardless of residency. */
985 {
986 thread_scoped_lock lock(texture_info_mutex);
987 texture_info[mem.slot] = TextureInfo();
988 }
989
990 if (cmem.texobject) {
991 /* Free bindless texture. */
992 hipTexObjectDestroy(cmem.texobject);
993 }
994
995 if (!mem.is_resident(this)) {
996 /* Do not free memory here, since it was allocated on a different device. */
997 device_mem_map.erase(device_mem_map.find(&mem));
998 }
999 else if (cmem.array) {
1000 /* Free array. */
1001 hipArrayDestroy(reinterpret_cast<hArray>(cmem.array));
1002 stats.mem_free(mem.device_size);
1003 mem.device_pointer = 0;
1004 mem.device_size = 0;
1005
1006 device_mem_map.erase(device_mem_map.find(&mem));
1007 }
1008 else {
1009 lock.unlock();
1010 generic_free(mem);
1011 }
1012}
1013
1014unique_ptr<DeviceQueue> HIPDevice::gpu_queue_create()
1015{
1016 return make_unique<HIPDeviceQueue>(this);
1017}
1018
1019bool HIPDevice::should_use_graphics_interop(const GraphicsInteropDevice &interop_device,
1020 const bool log)
1021{
1022 if (headless) {
1023 /* Avoid any call which might involve interaction with a graphics backend when we know that
1024 * we don't have active graphics context. This avoids potential crash in the driver. */
1025 return false;
1026 }
1027
1028 HIPContextScope scope(this);
1029
1030 switch (interop_device.type) {
1032 /* Disable graphics interop for now, because of driver bug in 21.40. See #92972.
1033 * Also missing Vulkan support which is needed now. */
1034 return false;
1035
1036 /* Check whether this device is part of OpenGL context.
1037 *
1038 * Using HIP device for graphics interoperability which is not part of the OpenGL context is
1039 * possible, but from the empiric measurements with CUDA it can be considerably slower than
1040 * using naive pixels copy. */
1041 int num_all_devices = 0;
1042 hip_assert(hipGetDeviceCount(&num_all_devices));
1043
1044 if (num_all_devices == 0) {
1045 return false;
1046 }
1047
1048 vector<hipDevice_t> gl_devices(num_all_devices);
1049 uint num_gl_devices = 0;
1050 hipGLGetDevices(&num_gl_devices, gl_devices.data(), num_all_devices, hipGLDeviceListAll);
1051
1052 bool found = false;
1053 for (hipDevice_t gl_device : gl_devices) {
1054 if (gl_device == hipDevice) {
1055 found = true;
1056 break;
1057 }
1058 }
1059
1060 if (log) {
1061 if (found) {
1062 VLOG_INFO << "Graphics interop: found matching OpenGL device for HIP";
1063 }
1064 else {
1065 VLOG_INFO << "Graphics interop: no matching OpenGL device for HIP";
1066 }
1067 }
1068
1069 return found;
1070 }
1074 /* TODO: Implement Vulkan support. */
1075 return false;
1076 }
1077
1078 return false;
1079}
1080
1081int HIPDevice::get_num_multiprocessors()
1082{
1083 return get_device_default_attribute(hipDeviceAttributeMultiprocessorCount, 0);
1084}
1085
1086int HIPDevice::get_max_num_threads_per_multiprocessor()
1087{
1088 return get_device_default_attribute(hipDeviceAttributeMaxThreadsPerMultiProcessor, 0);
1089}
1090
1091bool HIPDevice::get_device_attribute(hipDeviceAttribute_t attribute, int *value)
1092{
1093 HIPContextScope scope(this);
1094
1095 return hipDeviceGetAttribute(value, attribute, hipDevice) == hipSuccess;
1096}
1097
1098int HIPDevice::get_device_default_attribute(hipDeviceAttribute_t attribute,
1099 const int default_value)
1100{
1101 int value = 0;
1102 if (!get_device_attribute(attribute, &value)) {
1103 return default_value;
1104 }
1105 return value;
1106}
1107
1109
1110#endif
void BLI_kdtree_nd_ free(KDTree *tree)
unsigned int uint
volatile int lock
BMesh const char void * data
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
HIP hip
Definition debug.h:129
DeviceType type
virtual void set_error(const string &error)
DeviceInfo info
void mem_alloc(const size_t size)
Definition util/stats.h:18
void mem_free(const size_t size)
Definition util/stats.h:24
bool is_resident(Device *sub_device) const
Definition memory.cpp:134
size_t memory_elements_size(const int elements)
bool is_shared(Device *sub_device) const
Definition memory.cpp:139
static constexpr size_t datatype_size(DataType datatype)
@ MEM_TEXTURE
@ TYPE_UINT16
CCL_NAMESPACE_BEGIN struct Options options
#define KERNEL_DATA_ARRAY(type, name)
Definition data_arrays.h:8
DebugFlags & DebugFlags()
Definition debug.h:145
#define KERNEL_FEATURE_NODE_RAYTRACE
#define KERNEL_FEATURE_MNEE
#define CCL_NAMESPACE_END
@ DEVICE_OPTIX
@ DEVICE_HIP
static const char * to_string(const Interpolation &interp)
Definition gl_shader.cc:108
#define log
#define assert(assertion)
#define printf(...)
@ BVH_LAYOUT_BVH2
DeviceKernel
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE
@ DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE
format
#define VLOG_INFO
Definition log.h:71
#define VLOG_WORK
Definition log.h:74
string util_md5_string(const string &str)
Definition md5.cpp:378
static void error(const char *str)
int BVHLayoutMask
Definition params.h:50
string path_cache_get(const string &sub)
Definition path.cpp:360
string path_get(const string &sub)
Definition path.cpp:337
string path_files_md5_hash(const string &dir)
Definition path.cpp:611
string path_join(const string &dir, const string &file)
Definition path.cpp:415
bool path_exists(const string &path)
Definition path.cpp:563
void path_create_directories(const string &filepath)
Definition path.cpp:647
bool path_read_compressed_text(const string &path, string &text)
Definition path.cpp:754
string string_human_readable_size(size_t size)
Definition string.cpp:257
string string_human_readable_number(size_t num)
Definition string.cpp:276
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
Definition string.cpp:23
bool adaptive_compile
Definition debug.h:70
uint64_t data
uint interpolation
int system_cpu_bits()
Definition system.cpp:130
std::unique_lock< std::mutex > thread_scoped_lock
Definition thread.h:28
CCL_NAMESPACE_BEGIN double time_dt()
Definition time.cpp:38
uint64_t device_ptr
Definition types_base.h:44
ccl_device_inline size_t align_up(const size_t offset, const size_t alignment)
Definition types_base.h:47
@ IMAGE_DATA_TYPE_NANOVDB_FP16
@ IMAGE_DATA_TYPE_NANOVDB_FLOAT
@ IMAGE_DATA_TYPE_NANOVDB_FLOAT3
@ IMAGE_DATA_TYPE_NANOVDB_FPN
@ INTERPOLATION_CLOSEST
@ EXTENSION_REPEAT
@ EXTENSION_CLIP
@ EXTENSION_EXTEND
@ EXTENSION_MIRROR