Blender V4.5
gpu_context.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 by Mike Erwin. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
15
16#include "BKE_global.hh"
17
18#include "BLI_assert.h"
19#include "BLI_threads.h"
20#include "BLI_vector_set.hh"
21
22#include "DNA_userdef_types.h"
23
24#include "GHOST_C-api.h"
25#include "GHOST_Types.h"
26
27#include "GPU_context.hh"
28
29#include "GPU_batch.hh"
30#include "GPU_pass.hh"
31#include "gpu_backend.hh"
33#include "gpu_matrix_private.hh"
34#include "gpu_private.hh"
35#include "gpu_shader_private.hh"
36
37#ifdef WITH_VULKAN_BACKEND
38# include "vk_backend.hh"
39#endif
40#ifdef WITH_OPENGL_BACKEND
41# include "gl_backend.hh"
42# include "gl_context.hh"
43#endif
44#ifdef WITH_METAL_BACKEND
45# include "mtl_backend.hh"
46#endif
47#include "dummy_backend.hh"
48
49#include "draw_debug.hh"
50
51#include <mutex>
52
53using namespace blender::gpu;
54
55static thread_local Context *active_ctx = nullptr;
56
58static int num_backend_users = 0;
59
60static void gpu_backend_create();
61static void gpu_backend_discard();
62
63/* -------------------------------------------------------------------- */
66
67namespace blender::gpu {
68
80
82{
83 /* Derived class should have called free_resources already. */
84 BLI_assert(front_left == nullptr);
85 BLI_assert(back_left == nullptr);
86 BLI_assert(front_right == nullptr);
87 BLI_assert(back_right == nullptr);
88 BLI_assert(texture_pool == nullptr);
89
92
94 delete state_manager;
95 delete imm;
96}
97
118
120{
121 return (this == active_ctx) && pthread_equal(pthread_self(), thread_);
122}
123
125{
126 return active_ctx;
127}
128
130{
131 if (this->dummy_vbo) {
132 return this->dummy_vbo;
133 }
134
135 /* TODO(fclem): get rid of this dummy VBO. */
136 GPUVertFormat format = {0};
140 return this->dummy_vbo;
141}
142
152
162
172
183
184} // namespace blender::gpu
185
187
188/* -------------------------------------------------------------------- */
189
190GPUContext *GPU_context_create(void *ghost_window, void *ghost_context)
191{
192 {
193 std::scoped_lock lock(backend_users_mutex);
194 if (num_backend_users == 0) {
195 /* Automatically create backend when first context is created. */
197 }
199 }
200
201 Context *ctx = GPUBackend::get()->context_alloc(ghost_window, ghost_context);
202
204
206
207 return wrap(ctx);
208}
209
210void GPU_context_discard(GPUContext *ctx_)
211{
212 Context *ctx = unwrap(ctx_);
213 BLI_assert(active_ctx == ctx);
214
216
217 GPUBackend *backend = GPUBackend::get();
218 /* Flush any remaining printf while making sure we are inside render boundaries. */
219 backend->render_begin();
220 printf_end(ctx);
221 backend->render_end();
222
223 delete ctx;
224 active_ctx = nullptr;
225
226 {
227 std::scoped_lock lock(backend_users_mutex);
230 if (num_backend_users == 0) {
231 /* Discard backend when last context is discarded. */
233 }
234 }
235}
236
237void GPU_context_active_set(GPUContext *ctx_)
238{
239 Context *ctx = unwrap(ctx_);
240
241 if (active_ctx) {
243 active_ctx->deactivate();
244 }
245
246 active_ctx = ctx;
247
248 if (ctx) {
249 ctx->activate();
250 /* It can happen that the previous context drew with a different color-space.
251 * In the case where the new context is drawing with the same shader that was previously bound
252 * (shader binding optimization), the uniform would not be set again because the dirty flag
253 * would not have been set (since the color space of this new context never changed). The
254 * shader would reuse the same color-space as the previous context frame-buffer (see #137855).
255 */
257 }
258}
259
261{
262 return wrap(Context::get());
263}
264
265void GPU_context_begin_frame(GPUContext *ctx)
266{
267 blender::gpu::Context *_ctx = unwrap(ctx);
268 if (_ctx) {
269 _ctx->begin_frame();
270 }
271}
272
273void GPU_context_end_frame(GPUContext *ctx)
274{
275 blender::gpu::Context *_ctx = unwrap(ctx);
276 if (_ctx) {
277 _ctx->end_frame();
278 }
279}
280
281/* -------------------------------------------------------------------- */
286
288
290{
291 main_context_mutex.lock();
292}
293
295{
296 main_context_mutex.unlock();
297}
298
300
301/* -------------------------------------------------------------------- */
307
309{
310 GPUBackend *backend = GPUBackend::get();
311 BLI_assert(backend);
312 /* WORKAROUND: Currently a band-aid for the heist production. Has no side effect for GL backend
313 * but should be fixed for Metal. */
314 if (backend) {
315 backend->render_begin();
317 }
318}
320{
321 GPUBackend *backend = GPUBackend::get();
322 BLI_assert(backend);
323 if (backend) {
325 backend->render_end();
326 }
327}
328void GPU_render_step(bool force_resource_release)
329{
330 GPUBackend *backend = GPUBackend::get();
331 BLI_assert(backend);
332 if (backend) {
334 backend->render_step(force_resource_release);
336 }
337
339}
340
342
343/* -------------------------------------------------------------------- */
346
348static std::optional<eGPUBackendType> g_backend_type_override = std::nullopt;
349static std::optional<bool> g_backend_type_supported = std::nullopt;
350static GPUBackend *g_backend = nullptr;
351static GHOST_SystemHandle g_ghost_system = nullptr;
352
353void GPU_backend_ghost_system_set(void *ghost_system_handle)
354{
355 g_ghost_system = reinterpret_cast<GHOST_SystemHandle>(ghost_system_handle);
356}
357
359{
360 return g_ghost_system;
361}
362
364{
365 g_backend_type = backend;
366 g_backend_type_supported = std::nullopt;
367}
368
373
375{
376 g_backend_type_override = backend_type;
377}
378
383
385{
386 blender::VectorSet<eGPUBackendType> backends_to_check;
387 if (g_backend_type_override.has_value()) {
388 backends_to_check.add(*g_backend_type_override);
389 }
390#if defined(WITH_OPENGL_BACKEND)
391 backends_to_check.add(GPU_BACKEND_OPENGL);
392#elif defined(WITH_METAL_BACKEND)
393 backends_to_check.add(GPU_BACKEND_METAL);
394#endif
395
396#if defined(WITH_VULKAN_BACKEND)
397 backends_to_check.add(GPU_BACKEND_VULKAN);
398#endif
399
400 for (const eGPUBackendType backend_type : backends_to_check) {
401 GPU_backend_type_selection_set(backend_type);
402 if (GPU_backend_supported()) {
403 return true;
404 }
406 }
407
409 return false;
410}
411
413{
414 switch (g_backend_type) {
416#ifdef WITH_OPENGL_BACKEND
417 return true;
418#else
419 return false;
420#endif
422#ifdef WITH_VULKAN_BACKEND
424#else
425 return false;
426#endif
428#ifdef WITH_METAL_BACKEND
430#else
431 return false;
432#endif
433 case GPU_BACKEND_NONE:
434 return true;
435 default:
436 BLI_assert(false && "No backend specified");
437 return false;
438 }
439}
440
448
450{
451 BLI_assert(g_backend == nullptr);
453
454 switch (g_backend_type) {
455#ifdef WITH_OPENGL_BACKEND
457 g_backend = MEM_new<GLBackend>(__func__);
458 break;
459#endif
460#ifdef WITH_VULKAN_BACKEND
462 g_backend = MEM_new<VKBackend>(__func__);
463 break;
464#endif
465#ifdef WITH_METAL_BACKEND
467 g_backend = MEM_new<MTLBackend>(__func__);
468 break;
469#endif
470 case GPU_BACKEND_NONE:
471 g_backend = MEM_new<DummyBackend>(__func__);
472 break;
473 default:
474 BLI_assert(0);
475 break;
476 }
477}
478
480{
482 g_backend->init_resources();
483}
484
486{
488 g_backend->delete_resources();
489}
490
492{
493 /* TODO: assert no resource left. */
494 MEM_delete(g_backend);
495 g_backend = nullptr;
496}
497
499{
500
501#ifdef WITH_OPENGL_BACKEND
502 if (g_backend && dynamic_cast<GLBackend *>(g_backend) != nullptr) {
503 return GPU_BACKEND_OPENGL;
504 }
505#endif
506
507#ifdef WITH_METAL_BACKEND
508 if (g_backend && dynamic_cast<MTLBackend *>(g_backend) != nullptr) {
509 return GPU_BACKEND_METAL;
510 }
511#endif
512
513#ifdef WITH_VULKAN_BACKEND
514 if (g_backend && dynamic_cast<VKBackend *>(g_backend) != nullptr) {
515 return GPU_BACKEND_VULKAN;
516 }
517#endif
518
519 return GPU_BACKEND_NONE;
520}
521
523{
524 return g_backend;
525}
526
528
529/* -------------------------------------------------------------------- */
532
534{
536#ifdef WITH_OPENGL_BACKEND
538 return GHOST_kDrawingContextTypeOpenGL;
539#endif
540#ifdef WITH_VULKAN_BACKEND
542 return GHOST_kDrawingContextTypeVulkan;
543#endif
544#ifdef WITH_METAL_BACKEND
546 return GHOST_kDrawingContextTypeMetal;
547#endif
548 default:
551 }
552}
553
555{
556 /* Contexts can only be created on the main thread. */
558
559 GHOST_ContextHandle main_thread_ghost_context = GHOST_GetActiveGPUContext();
560 GPUContext *main_thread_gpu_context = GPU_context_active_get();
561
562 /* GPU settings for context creation. */
563 GHOST_GPUSettings gpu_settings = {0};
564 gpu_settings.context_type = ghost_context_type();
565 if (G.debug & G_DEBUG_GPU) {
566 gpu_settings.flags |= GHOST_gpuDebugContext;
567 }
568 gpu_settings.preferred_device.index = U.gpu_preferred_index;
569 gpu_settings.preferred_device.vendor_id = U.gpu_preferred_vendor_id;
570 gpu_settings.preferred_device.device_id = U.gpu_preferred_device_id;
571
572 /* Grab the system handle. */
573 GHOST_SystemHandle ghost_system = reinterpret_cast<GHOST_SystemHandle>(
576
577 /* Create a Ghost GPU Context using the system handle. */
578 ghost_context_ = GHOST_CreateGPUContext(ghost_system, gpu_settings);
579 BLI_assert(ghost_context_);
580
581 /* Create a GPU context for the secondary thread to use. */
582 gpu_context_ = GPU_context_create(nullptr, ghost_context_);
583 BLI_assert(gpu_context_);
584
585 /* Release the Ghost GPU Context from this thread. */
587 reinterpret_cast<GHOST_ContextHandle>(ghost_context_));
588 BLI_assert(success);
589 UNUSED_VARS_NDEBUG(success);
590
591 /* Restore the main thread contexts.
592 * (required as the above context creation also makes it active). */
593 GHOST_ActivateGPUContext(main_thread_ghost_context);
594 GPU_context_active_set(main_thread_gpu_context);
595}
596
598{
599 /* Contexts should be destructed on the thread they were activated. */
601
602 GPU_context_discard(gpu_context_);
603
604 GHOST_ReleaseGPUContext(reinterpret_cast<GHOST_ContextHandle>(ghost_context_));
605
606 GHOST_SystemHandle ghost_system = reinterpret_cast<GHOST_SystemHandle>(
609 GHOST_DisposeGPUContext(ghost_system, reinterpret_cast<GHOST_ContextHandle>(ghost_context_));
610}
611
613{
614 /* Contexts need to be activated in the thread they're going to be used. */
616
617 GHOST_ActivateGPUContext(reinterpret_cast<GHOST_ContextHandle>(ghost_context_));
618 GPU_context_active_set(gpu_context_);
619}
620
@ G_FLAG_GPU_BACKEND_FALLBACK
@ G_DEBUG_GPU
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
int BLI_thread_is_main(void)
Definition threads.cc:179
#define UNUSED_VARS_NDEBUG(...)
GHOST C-API function and type declarations.
GHOST_ContextHandle GHOST_CreateGPUContext(GHOST_SystemHandle systemhandle, GHOST_GPUSettings gpuSettings)
GHOST_TSuccess GHOST_ReleaseGPUContext(GHOST_ContextHandle contexthandle)
GHOST_ContextHandle GHOST_GetActiveGPUContext()
GHOST_TSuccess GHOST_ActivateGPUContext(GHOST_ContextHandle contexthandle)
GHOST_TSuccess GHOST_DisposeGPUContext(GHOST_SystemHandle systemhandle, GHOST_ContextHandle contexthandle)
static GHOST_SystemCocoa * ghost_system
GHOST_TDrawingContextType
@ GHOST_kDrawingContextTypeNone
GHOST_TSuccess
Definition GHOST_Types.h:80
@ GHOST_gpuDebugContext
Definition GHOST_Types.h:70
#define GPU_batch_create(primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:148
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:204
void GPU_render_end()
void GPU_render_step(bool force_resource_release=false)
GPUContext * GPU_context_create(void *ghost_window, void *ghost_context)
void GPU_context_main_lock()
void GPU_backend_type_selection_set(const eGPUBackendType backend)
bool GPU_backend_supported()
void * GPU_backend_ghost_system_get()
void GPU_context_begin_frame(GPUContext *ctx)
void GPU_render_begin()
bool GPU_backend_type_selection_is_overridden()
GPUContext * GPU_context_active_get()
eGPUBackendType GPU_backend_type_selection_get()
void GPU_context_main_unlock()
void GPU_context_discard(GPUContext *)
bool GPU_backend_type_selection_detect()
void GPU_context_end_frame(GPUContext *ctx)
void GPU_context_active_set(GPUContext *)
eGPUBackendType GPU_backend_get_type()
void GPU_backend_ghost_system_set(void *ghost_system_handle)
void GPU_backend_type_selection_set_override(eGPUBackendType backend_type)
void GPU_pass_cache_update()
Definition gpu_pass.cc:326
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
void GPU_shader_unbind()
#define GPU_vertbuf_create_with_format(format)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_FETCH_FLOAT
uint GPU_vertformat_attr_add(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
volatile int lock
#define U
bool add(const Key &key)
static DebugDraw & get()
static Context * get()
GPUMatrixState * matrix_state
Batch * procedural_triangles_batch_get()
VertBuf * dummy_vbo_get()
Batch * procedural_triangle_strips_batch_get()
virtual void activate()=0
virtual void end_frame()=0
Batch * procedural_lines_batch_get()
virtual void begin_frame()=0
Batch * procedural_points_batch_get()
virtual void render_end()=0
static GPUBackend * get()
virtual void render_step(bool force_resource_release=false)=0
virtual void render_begin()=0
virtual Context * context_alloc(void *ghost_window, void *ghost_context)=0
static bool metal_is_supported()
static bool is_supported()
Simple API to draw debug shapes and log in the viewport.
static std::optional< bool > g_backend_type_supported
static bool gpu_backend_supported()
static int num_backend_users
static eGPUBackendType g_backend_type
static void gpu_backend_discard()
static blender::Mutex main_context_mutex
void gpu_backend_delete_resources()
void gpu_backend_init_resources()
static blender::Mutex backend_users_mutex
static void gpu_backend_create()
static GHOST_SystemHandle g_ghost_system
static GPUBackend * g_backend
static GHOST_TDrawingContextType ghost_context_type()
static std::optional< eGPUBackendType > g_backend_type_override
static Context * active_ctx
GPUMatrixState * GPU_matrix_state_create()
Definition gpu_matrix.cc:55
void GPU_matrix_state_discard(GPUMatrixState *state)
Definition gpu_matrix.cc:76
format
#define G(x, y, z)
static Context * unwrap(GPUContext *ctx)
static GPUContext * wrap(Context *ctx)
void printf_begin(Context *ctx)
void printf_end(Context *ctx)
std::mutex Mutex
Definition BLI_mutex.hh:47
GHOST_TDrawingContextType context_type
GHOST_GPUDevice preferred_device