Blender V4.5
image_gpu.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "MEM_guardedalloc.h"
10
11#include "BLI_boxpack_2d.h"
12#include "BLI_linklist.h"
13#include "BLI_listbase.h"
14#include "BLI_math_base.hh"
15#include "BLI_rect.h"
16#include "BLI_threads.h"
17#include "BLI_time.h"
18
19#include "DNA_image_types.h"
20#include "DNA_userdef_types.h"
21
23#include "IMB_imbuf.hh"
24#include "IMB_imbuf_types.hh"
25
26#include "BKE_global.hh"
27#include "BKE_image.hh"
29#include "BKE_main.hh"
30
31#include "GPU_capabilities.hh"
32#include "GPU_state.hh"
33#include "GPU_texture.hh"
34
36
37/* Prototypes. */
38static void gpu_free_unused_buffers();
39static void image_free_gpu(Image *ima, const bool immediate);
41 Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h);
42
44{
45 if (image) {
46 /* Render result and compositor output are always premultiplied */
48 return true;
49 }
50 /* Generated images use pre multiplied float buffer, but straight alpha for byte buffers. */
51 if (image->type == IMA_TYPE_UV_TEST && ibuf) {
52 return ibuf->float_buffer.data != nullptr;
53 }
54 }
55 if (ibuf) {
56 if (ibuf->float_buffer.data) {
57 return image ? (image->alpha_mode != IMA_ALPHA_STRAIGHT) : false;
58 }
59
60 return image ? (image->alpha_mode == IMA_ALPHA_PREMUL) : true;
61 }
62 return false;
63}
64
65/* -------------------------------------------------------------------- */
68
69static bool is_over_resolution_limit(int w, int h)
70{
72}
73
78
79static GPUTexture *gpu_texture_create_tile_mapping(Image *ima, const int multiview_eye)
80{
81 GPUTexture *tilearray = ima->gputexture[TEXTARGET_2D_ARRAY][multiview_eye];
82
83 if (tilearray == nullptr) {
84 return nullptr;
85 }
86
87 float array_w = GPU_texture_width(tilearray);
88 float array_h = GPU_texture_height(tilearray);
89
90 /* Determine maximum tile number. */
92 ImageTile *last_tile = (ImageTile *)ima->tiles.last;
93 int max_tile = last_tile->tile_number - 1001;
94
95 /* create image */
96 int width = max_tile + 1;
97 float *data = MEM_calloc_arrayN<float>(size_t(width) * 8, __func__);
98 for (int i = 0; i < width; i++) {
99 data[4 * i] = -1.0f;
100 }
102 int i = tile->tile_number - 1001;
103 ImageTile_Runtime *tile_runtime = &tile->runtime;
104 data[4 * i] = tile_runtime->tilearray_layer;
105
106 float *tile_info = &data[4 * width + 4 * i];
107 tile_info[0] = tile_runtime->tilearray_offset[0] / array_w;
108 tile_info[1] = tile_runtime->tilearray_offset[1] / array_h;
109 tile_info[2] = tile_runtime->tilearray_size[0] / array_w;
110 tile_info[3] = tile_runtime->tilearray_size[1] / array_h;
111 }
112
113 GPUTexture *tex = GPU_texture_create_1d_array(
114 ima->id.name + 2, width, 2, 1, GPU_RGBA32F, GPU_TEXTURE_USAGE_SHADER_READ, data);
115 GPU_texture_mipmap_mode(tex, false, false);
116
118
119 return tex;
120}
121
127
128static int compare_packtile(const void *a, const void *b)
129{
130 const PackTile *tile_a = (const PackTile *)a;
131 const PackTile *tile_b = (const PackTile *)b;
132
133 return tile_a->pack_score < tile_b->pack_score;
134}
135
136static GPUTexture *gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
137{
138 int arraywidth = 0, arrayheight = 0;
139 ListBase boxes = {nullptr};
140
141 int planes = 0;
142
144 ImageUser iuser;
145 BKE_imageuser_default(&iuser);
146 iuser.tile = tile->tile_number;
147 ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, nullptr);
148
149 if (ibuf) {
150 PackTile *packtile = MEM_callocN<PackTile>(__func__);
151 packtile->tile = tile;
152 packtile->boxpack.w = ibuf->x;
153 packtile->boxpack.h = ibuf->y;
154
155 if (is_over_resolution_limit(packtile->boxpack.w, packtile->boxpack.h)) {
156 packtile->boxpack.w = smaller_power_of_2_limit(packtile->boxpack.w);
157 packtile->boxpack.h = smaller_power_of_2_limit(packtile->boxpack.h);
158 }
159 arraywidth = max_ii(arraywidth, packtile->boxpack.w);
160 arrayheight = max_ii(arrayheight, packtile->boxpack.h);
161
162 /* We sort the tiles by decreasing size, with an additional penalty term
163 * for high aspect ratios. This improves packing efficiency. */
164 float w = packtile->boxpack.w, h = packtile->boxpack.h;
165 packtile->pack_score = max_ff(w, h) / min_ff(w, h) * w * h;
166
167 BKE_image_release_ibuf(ima, ibuf, nullptr);
168 BLI_addtail(&boxes, packtile);
169 planes = max_ii(planes, ibuf->planes);
170 }
171 }
172
173 BLI_assert(arraywidth > 0 && arrayheight > 0);
174
176 int arraylayers = 0;
177 /* Keep adding layers until all tiles are packed. */
178 while (boxes.first != nullptr) {
179 ListBase packed = {nullptr};
180 BLI_box_pack_2d_fixedarea(&boxes, arraywidth, arrayheight, &packed);
181 BLI_assert(packed.first != nullptr);
182
183 LISTBASE_FOREACH (PackTile *, packtile, &packed) {
184 ImageTile *tile = packtile->tile;
185 ImageTile_Runtime *tile_runtime = &tile->runtime;
186 int *tileoffset = tile_runtime->tilearray_offset;
187 int *tilesize = tile_runtime->tilearray_size;
188
189 tileoffset[0] = packtile->boxpack.x;
190 tileoffset[1] = packtile->boxpack.y;
191 tilesize[0] = packtile->boxpack.w;
192 tilesize[1] = packtile->boxpack.h;
193 tile_runtime->tilearray_layer = arraylayers;
194 }
195
197 arraylayers++;
198 }
199
200 const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
201 const bool use_grayscale = planes <= 8;
202 /* Create Texture without content. */
203 GPUTexture *tex = IMB_touch_gpu_texture(ima->id.name + 2,
204 main_ibuf,
205 arraywidth,
206 arrayheight,
207 arraylayers,
208 use_high_bitdepth,
209 use_grayscale);
210
211 /* Upload each tile one by one. */
213 const ImageTile_Runtime *tile_runtime = &tile->runtime;
214 const int tilelayer = tile_runtime->tilearray_layer;
215 const int *tileoffset = tile_runtime->tilearray_offset;
216 const int *tilesize = tile_runtime->tilearray_size;
217
218 if (tilesize[0] == 0 || tilesize[1] == 0) {
219 continue;
220 }
221
222 ImageUser iuser;
223 BKE_imageuser_default(&iuser);
224 iuser.tile = tile->tile_number;
225 ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, nullptr);
226
227 if (ibuf) {
228 const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
230 ibuf,
231 UNPACK2(tileoffset),
232 tilelayer,
233 UNPACK2(tilesize),
234 use_high_bitdepth,
235 use_grayscale,
236 store_premultiplied);
237 }
238
239 BKE_image_release_ibuf(ima, ibuf, nullptr);
240 }
241
242 if (GPU_mipmap_enabled()) {
244 GPU_texture_mipmap_mode(tex, true, true);
245 if (ima) {
247 }
248 }
249 else {
250 GPU_texture_mipmap_mode(tex, false, true);
251 }
252
253 return tex;
254}
255
257
258/* -------------------------------------------------------------------- */
261
262static GPUTexture **get_image_gpu_texture_ptr(Image *ima,
263 eGPUTextureTarget textarget,
264 const int multiview_eye)
265{
266 const bool in_range = (int(textarget) >= 0) && (textarget < TEXTARGET_COUNT);
267 BLI_assert(in_range);
268 BLI_assert(ELEM(multiview_eye, 0, 1));
269
270 if (in_range) {
271 return &(ima->gputexture[textarget][multiview_eye]);
272 }
273 return nullptr;
274}
275
277{
278 fprintf(stderr, "GPUTexture: Blender Texture Not Loaded!\n");
279 switch (textarget) {
281 return GPU_texture_create_error(2, true);
283 return GPU_texture_create_error(1, true);
284 case TEXTARGET_2D:
285 default:
286 return GPU_texture_create_error(2, false);
287 }
288}
289
292{
294 /* Calculate the clipping region with the tile buffer.
295 * TODO(jbakker): should become part of ImageTileData to deduplicate with image engine. */
296 rcti buffer_rect;
298 &buffer_rect, 0, changes.tile_data.tile_buffer->x, 0, changes.tile_data.tile_buffer->y);
299 rcti clipped_update_region;
300 const bool has_overlap = BLI_rcti_isect(
301 &buffer_rect, &changes.changed_region.region, &clipped_update_region);
302 if (!has_overlap) {
303 continue;
304 }
305
307 changes.tile_data.tile,
308 changes.tile_data.tile_buffer,
309 clipped_update_region.xmin,
310 clipped_update_region.ymin,
311 BLI_rcti_size_x(&clipped_update_region),
312 BLI_rcti_size_y(&clipped_update_region));
313 }
314}
315
317{
318 PartialUpdateChecker<ImageTileData> checker(image, iuser, image->runtime->partial_update_user);
320 switch (changes.get_result_code()) {
322 image_free_gpu(image, true);
323 break;
324 }
325
328 break;
329 }
330
332 /* GPUTextures are up to date. */
333 break;
334 }
335 }
336}
337
339{
340 if (!image) {
341 return;
342 }
343
344 /* Note that the image can cache both stereo views, so we only invalidate the cache if the view
345 * index is more than 2. */
346 if (!ELEM(image->gpu_pass, IMAGE_GPU_PASS_NONE, iuser->pass) ||
347 !ELEM(image->gpu_layer, IMAGE_GPU_LAYER_NONE, iuser->layer) ||
348 (!ELEM(image->gpu_view, IMAGE_GPU_VIEW_NONE, iuser->multi_index) && iuser->multi_index >= 2))
349 {
351 }
352}
353
355 ImageUser *iuser,
356 const bool use_viewers,
357 const bool use_tile_mapping,
358 bool try_only)
359{
361
362 if (ima == nullptr) {
363 return result;
364 }
365
366 /* Free any unused GPU textures, since we know we are in a thread with OpenGL
367 * context and might as well ensure we have as much space free as possible. */
369
370 /* Free GPU textures when requesting a different render pass/layer.
371 * When `iuser` isn't set (texture painting single image mode) we assume that
372 * the current `pass` and `layer` should be 0. */
373 short requested_pass = iuser ? iuser->pass : 0;
374 short requested_layer = iuser ? iuser->layer : 0;
375 short requested_view = iuser ? iuser->multi_index : 0;
376 /* There is room for 2 multiview textures. When a higher number is requested we should always
377 * target the first view slot. This is fine as multi view images aren't used together. */
378 if (requested_view < 2) {
379 requested_view = 0;
380 }
381 if (ima->gpu_pass != requested_pass || ima->gpu_layer != requested_layer ||
382 ima->gpu_view != requested_view)
383 {
384 ima->gpu_pass = requested_pass;
385 ima->gpu_layer = requested_layer;
386 ima->gpu_view = requested_view;
387 /* The cache should be invalidated here, but it is intentionally isn't due to possible
388 * performance implications, see the BKE_image_ensure_gpu_texture function for more
389 * information. */
390 }
391#undef GPU_FLAGS_TO_CHECK
392
393 if (ima->runtime->partial_update_user == nullptr) {
394 ima->runtime->partial_update_user = BKE_image_partial_update_create(ima);
395 }
396
398
399 /* Tag as in active use for garbage collector. */
401
402 /* Test if we need to get a tiled array texture. */
403 eGPUTextureTarget textarget = (use_tile_mapping && ima->source == IMA_SRC_TILED) ?
406
407 /* Test if we already have a texture. */
408 int current_view = iuser ? iuser->multi_index : 0;
409 if (current_view >= 2) {
410 current_view = 0;
411 }
412
413 result.texture = get_image_gpu_texture_ptr(ima, textarget, current_view);
414 if (textarget == TEXTARGET_2D_ARRAY) {
415 result.tile_mapping = get_image_gpu_texture_ptr(ima, TEXTARGET_TILE_MAPPING, current_view);
416 }
417
418 if (*result.texture) {
419 return result;
420 }
421
422 if (try_only) {
423 /* If we got this far, it means the texture is not loaded. */
424 return result;
425 }
426
427 /* Check if we have a valid image. If not, we return a dummy
428 * texture with zero bind-code so we don't keep trying. */
430 if (tile == nullptr) {
431 *result.texture = image_gpu_texture_error_create(textarget);
432 if (textarget == TEXTARGET_2D_ARRAY) {
434 }
435 return result;
436 }
437
438 /* check if we have a valid image buffer */
439 void *lock;
440 ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, (use_viewers) ? &lock : nullptr);
441 if (ibuf == nullptr) {
442 BKE_image_release_ibuf(ima, ibuf, (use_viewers) ? lock : nullptr);
443 *result.texture = image_gpu_texture_error_create(textarget);
444 if (textarget == TEXTARGET_2D_ARRAY) {
446 }
447 return result;
448 }
449
450 if (textarget == TEXTARGET_2D_ARRAY) {
451 /* For materials, array and tile mapping in case there are UDIM tiles. */
452 *result.texture = gpu_texture_create_tile_array(ima, ibuf);
453 *result.tile_mapping = gpu_texture_create_tile_mapping(ima, iuser ? iuser->multiview_eye : 0);
454 }
455 else {
456 /* Single image texture. */
457 const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
458 const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
459
461 ima->id.name + 2, ibuf, use_high_bitdepth, store_premultiplied);
462
463 if (*result.texture) {
465
466 if (GPU_mipmap_enabled()) {
469 GPU_texture_mipmap_mode(*result.texture, true, true);
470 }
471 else {
472 GPU_texture_mipmap_mode(*result.texture, false, true);
473 }
474 }
475 }
476
477 if (*result.texture) {
478 GPU_texture_original_size_set(*result.texture, ibuf->x, ibuf->y);
479 }
480
481 BKE_image_release_ibuf(ima, ibuf, (use_viewers) ? lock : nullptr);
482
483 return result;
484}
485
487{
488 return *image_get_gpu_texture(image, iuser, false, false, false).texture;
489}
490
492{
493 return *image_get_gpu_texture(image, iuser, true, false, false).texture;
494}
495
497 ImageUser *iuser,
498 const bool use_tile_mapping)
499{
500 return image_get_gpu_texture(image, iuser, false, use_tile_mapping, false);
501}
502
504 ImageUser *iuser,
505 const bool use_tile_mapping)
506{
507 return image_get_gpu_texture(image, iuser, false, use_tile_mapping, true);
508}
509
511
512/* -------------------------------------------------------------------- */
518
521
523{
524 if (gpu_texture_free_queue == nullptr) {
525 return;
526 }
527
528 std::scoped_lock lock(gpu_texture_queue_mutex);
529
530 while (gpu_texture_free_queue != nullptr) {
531 GPUTexture *tex = static_cast<GPUTexture *>(BLI_linklist_pop(&gpu_texture_free_queue));
532 GPU_texture_free(tex);
533 }
534}
535
542
544
545/* -------------------------------------------------------------------- */
548
549static void image_free_gpu(Image *ima, const bool immediate)
550{
551 for (int eye = 0; eye < 2; eye++) {
552 for (int i = 0; i < TEXTARGET_COUNT; i++) {
553 if (ima->gputexture[i][eye] != nullptr) {
554 if (immediate) {
555 GPU_texture_free(ima->gputexture[i][eye]);
556 }
557 else {
558 std::scoped_lock lock(gpu_texture_queue_mutex);
560 }
561
562 ima->gputexture[i][eye] = nullptr;
563 }
564 }
565 }
566
568}
569
574
576{
577 if (bmain) {
578 LISTBASE_FOREACH (Image *, ima, &bmain->images) {
580 }
581 }
582}
583
585{
586 if (bmain) {
587 LISTBASE_FOREACH (Image *, ima, &bmain->images) {
588 if (BKE_image_is_animated(ima)) {
590 }
591 }
592 }
593}
594
596{
597 static int lasttime = 0;
598 int ctime = int(BLI_time_now_seconds());
599
600 /*
601 * Run garbage collector once for every collecting period of time
602 * if textimeout is 0, that's the option to NOT run the collector
603 */
604 if (U.textimeout == 0 || ctime % U.texcollectrate || ctime == lasttime) {
605 return;
606 }
607
608 /* of course not! */
609 if (G.is_rendering) {
610 return;
611 }
612
613 lasttime = ctime;
614
615 LISTBASE_FOREACH (Image *, ima, &bmain->images) {
616 if ((ima->flag & IMA_NOCOLLECT) == 0 && ctime - ima->lastused > U.textimeout) {
617 /* If it's in GL memory, deallocate and set time tag to current time
618 * This gives textures a "second chance" to be used before dying. */
621 ima->lastused = ctime;
622 }
623 /* Otherwise, just kill the buffers */
624 else {
626 }
627 }
628 }
629}
630
632
633/* -------------------------------------------------------------------- */
636
637static ImBuf *update_do_scale(const uchar *rect,
638 const float *rect_float,
639 int *x,
640 int *y,
641 int *w,
642 int *h,
643 int limit_w,
644 int limit_h,
645 int full_w,
646 int full_h)
647{
648 /* Partial update with scaling. */
649 float xratio = limit_w / float(full_w);
650 float yratio = limit_h / float(full_h);
651
652 int part_w = *w, part_h = *h;
653
654 /* Find sub coordinates in scaled image. Take ceiling because we will be
655 * losing 1 pixel due to rounding errors in x,y. */
656 *x *= xratio;
657 *y *= yratio;
658 *w = int(ceil(xratio * (*w)));
659 *h = int(ceil(yratio * (*h)));
660
661 /* ...but take back if we are over the limit! */
662 if (*x + *w > limit_w) {
663 (*w)--;
664 }
665 if (*y + *h > limit_h) {
666 (*h)--;
667 }
668
669 /* Scale pixels. */
670 ImBuf *ibuf = IMB_allocFromBuffer(rect, rect_float, part_w, part_h, 4);
671 IMB_scale(ibuf, *w, *h, IMBScaleFilter::Box, false);
672
673 return ibuf;
674}
675
676static void gpu_texture_update_scaled(GPUTexture *tex,
677 const uchar *rect,
678 const float *rect_float,
679 int full_w,
680 int full_h,
681 int x,
682 int y,
683 int layer,
684 const int *tile_offset,
685 const int *tile_size,
686 int w,
687 int h)
688{
689 ImBuf *ibuf;
690 if (layer > -1) {
691 ibuf = update_do_scale(
692 rect, rect_float, &x, &y, &w, &h, tile_size[0], tile_size[1], full_w, full_h);
693
694 /* Shift to account for tile packing. */
695 x += tile_offset[0];
696 y += tile_offset[1];
697 }
698 else {
699 /* Partial update with scaling. */
700 int limit_w = GPU_texture_width(tex);
701 int limit_h = GPU_texture_height(tex);
702
703 ibuf = update_do_scale(rect, rect_float, &x, &y, &w, &h, limit_w, limit_h, full_w, full_h);
704 }
705
706 void *data = (ibuf->float_buffer.data) ? (void *)(ibuf->float_buffer.data) :
707 (void *)(ibuf->byte_buffer.data);
709
710 GPU_texture_update_sub(tex, data_format, data, x, y, blender::math::max(layer, 0), w, h, 1);
711
712 IMB_freeImBuf(ibuf);
713}
714
715static void gpu_texture_update_unscaled(GPUTexture *tex,
716 uchar *rect,
717 float *rect_float,
718 int x,
719 int y,
720 int layer,
721 const int tile_offset[2],
722 int w,
723 int h,
724 int tex_stride,
725 int tex_offset)
726{
727 if (layer > -1) {
728 /* Shift to account for tile packing. */
729 x += tile_offset[0];
730 y += tile_offset[1];
731 }
732
733 void *data = (rect_float) ? (void *)(rect_float + tex_offset) : (void *)(rect + tex_offset);
734 eGPUDataFormat data_format = (rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
735
736 /* Partial update without scaling. Stride and offset are used to copy only a
737 * subset of a possible larger buffer than what we are updating. */
738 GPU_unpack_row_length_set(tex_stride);
739
740 GPU_texture_update_sub(tex, data_format, data, x, y, blender::math::max(layer, 0), w, h, 1);
741 /* Restore default. */
743}
744
746 GPUTexture *tex, Image *ima, ImBuf *ibuf, ImageTile *tile, int x, int y, int w, int h)
747{
748 bool scaled;
749 if (tile != nullptr) {
750 ImageTile_Runtime *tile_runtime = &tile->runtime;
751 int *tilesize = tile_runtime->tilearray_size;
752 scaled = (ibuf->x != tilesize[0]) || (ibuf->y != tilesize[1]);
753 }
754 else {
755 scaled = (GPU_texture_width(tex) != ibuf->x) || (GPU_texture_height(tex) != ibuf->y);
756 }
757
758 if (scaled) {
759 /* Extra padding to account for bleed from neighboring pixels. */
760 const int padding = 4;
761 const int xmax = min_ii(x + w + padding, ibuf->x);
762 const int ymax = min_ii(y + h + padding, ibuf->y);
763 x = max_ii(x - padding, 0);
764 y = max_ii(y - padding, 0);
765 w = xmax - x;
766 h = ymax - y;
767 }
768
769 /* Get texture data pointers. */
770 float *rect_float = ibuf->float_buffer.data;
771 uchar *rect = ibuf->byte_buffer.data;
772 int tex_stride = ibuf->x;
773 int tex_offset = ibuf->channels * (y * ibuf->x + x);
774
775 const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
776 if (rect_float) {
777 /* Float image is already in scene linear colorspace or non-color data by
778 * convention, no colorspace conversion needed. But we do require 4 channels
779 * currently. */
780 if (ibuf->channels != 4 || scaled || !store_premultiplied) {
781 rect_float = MEM_malloc_arrayN<float>(4 * size_t(w) * size_t(h), __func__);
782 if (rect_float == nullptr) {
783 return;
784 }
785
786 tex_stride = w;
787 tex_offset = 0;
788
790 rect_float, x, y, w, h, ibuf, store_premultiplied);
791 }
792 }
793 else {
794 /* Byte image is in original colorspace from the file, and may need conversion. */
796 /* Not scaled Non-color data, just store buffer as is. */
797 }
801 {
802 /* sRGB or scene linear or scaled down non-color data, store as byte texture that the GPU
803 * can decode directly. */
804 rect = MEM_malloc_arrayN<uchar>(4 * size_t(w) * size_t(h), __func__);
805 if (rect == nullptr) {
806 return;
807 }
808
809 tex_stride = w;
810 tex_offset = 0;
811
812 /* Convert to scene linear with sRGB compression, and premultiplied for
813 * correct texture interpolation. */
814 IMB_colormanagement_imbuf_to_byte_texture(rect, x, y, w, h, ibuf, store_premultiplied);
815 }
816 else {
817 /* Other colorspace, store as float texture to avoid precision loss. */
818 rect_float = MEM_malloc_arrayN<float>(4 * size_t(w) * size_t(h), __func__);
819 if (rect_float == nullptr) {
820 return;
821 }
822
823 tex_stride = w;
824 tex_offset = 0;
825
827 rect_float, x, y, w, h, ibuf, store_premultiplied);
828 }
829 }
830
831 if (scaled) {
832 /* Slower update where we first have to scale the input pixels. */
833 if (tile != nullptr) {
834 ImageTile_Runtime *tile_runtime = &tile->runtime;
835 int *tileoffset = tile_runtime->tilearray_offset;
836 int *tilesize = tile_runtime->tilearray_size;
837 int tilelayer = tile_runtime->tilearray_layer;
839 tex, rect, rect_float, ibuf->x, ibuf->y, x, y, tilelayer, tileoffset, tilesize, w, h);
840 }
841 else {
843 tex, rect, rect_float, ibuf->x, ibuf->y, x, y, -1, nullptr, nullptr, w, h);
844 }
845 }
846 else {
847 /* Fast update at same resolution. */
848 if (tile != nullptr) {
849 ImageTile_Runtime *tile_runtime = &tile->runtime;
850 int *tileoffset = tile_runtime->tilearray_offset;
851 int tilelayer = tile_runtime->tilearray_layer;
853 tex, rect, rect_float, x, y, tilelayer, tileoffset, w, h, tex_stride, tex_offset);
854 }
855 else {
857 tex, rect, rect_float, x, y, -1, nullptr, w, h, tex_stride, tex_offset);
858 }
859 }
860
861 /* Free buffers if needed. */
862 if (rect && rect != ibuf->byte_buffer.data) {
863 MEM_freeN(rect);
864 }
865 if (rect_float && rect_float != ibuf->float_buffer.data) {
866 MEM_freeN(rect_float);
867 }
868
869 if (GPU_mipmap_enabled()) {
871 }
872 else {
874 }
875
877}
878
880 Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h)
881{
882 const int eye = 0;
883 GPUTexture *tex = ima->gputexture[TEXTARGET_2D][eye];
884 /* Check if we need to update the main gputexture. */
885 if (tex != nullptr && tile == ima->tiles.first) {
886 gpu_texture_update_from_ibuf(tex, ima, ibuf, nullptr, x, y, w, h);
887 }
888
889 /* Check if we need to update the array gputexture. */
890 tex = ima->gputexture[TEXTARGET_2D_ARRAY][eye];
891 if (tex != nullptr) {
892 gpu_texture_update_from_ibuf(tex, ima, ibuf, tile, x, y, w, h);
893 }
894}
895
896void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
897{
898 ImageTile *image_tile = BKE_image_get_tile_from_iuser(ima, iuser);
899 ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, nullptr);
900 BKE_image_update_gputexture_delayed(ima, image_tile, ibuf, x, y, w, h);
901 BKE_image_release_ibuf(ima, ibuf, nullptr);
902}
903
905 Image *ima, ImageTile *image_tile, ImBuf *ibuf, int x, int y, int w, int h)
906{
907 /* Check for full refresh. */
908 if (ibuf != nullptr && ima->source != IMA_SRC_TILED && x == 0 && y == 0 && w == ibuf->x &&
909 h == ibuf->y)
910 {
912 }
913 else {
914 rcti dirty_region;
915 BLI_rcti_init(&dirty_region, x, x + w, y, y + h);
916 BKE_image_partial_update_mark_region(ima, image_tile, ibuf, &dirty_region);
917 }
918}
919
920void BKE_image_paint_set_mipmap(Main *bmain, bool mipmap)
921{
922 LISTBASE_FOREACH (Image *, ima, &bmain->images) {
924 if (ima->gpuflag & IMA_GPU_MIPMAP_COMPLETE) {
925 for (int a = 0; a < TEXTARGET_COUNT; a++) {
927 for (int eye = 0; eye < 2; eye++) {
928 GPUTexture *tex = ima->gputexture[a][eye];
929 if (tex != nullptr) {
930 GPU_texture_mipmap_mode(tex, mipmap, true);
931 }
932 }
933 }
934 }
935 }
936 else {
938 }
939 }
940 else {
941 ima->gpuflag &= ~IMA_GPU_MIPMAP_COMPLETE;
942 }
943 }
944}
945
ImBuf * BKE_image_acquire_ibuf(Image *ima, ImageUser *iuser, void **r_lock)
void BKE_image_partial_update_mark_region(Image *image, const ImageTile *image_tile, const ImBuf *image_buffer, const rcti *updated_region)
Mark a region of the image to update.
void BKE_image_release_ibuf(Image *ima, ImBuf *ibuf, void *lock)
void BKE_image_tag_time(Image *ima)
void BKE_image_sort_tiles(Image *ima)
bool BKE_image_has_opengl_texture(Image *ima)
void BKE_imageuser_default(ImageUser *iuser)
PartialUpdateUser * BKE_image_partial_update_create(const Image *image)
Create a new PartialUpdateUser. An Object that contains data to use partial updates.
bool BKE_image_is_animated(Image *image)
void BKE_image_partial_update_mark_full_update(Image *image)
Mark the whole image to be updated.
#define BLI_assert(a)
Definition BLI_assert.h:46
void BLI_box_pack_2d_fixedarea(struct ListBase *boxes, int width, int height, struct ListBase *packed)
#define LISTBASE_FOREACH(type, var, list)
void void BLI_freelistN(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:497
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
void void BLI_listbase_sort(ListBase *listbase, int(*cmp)(const void *, const void *)) ATTR_NONNULL(1
MINLINE int power_of_2_min_i(int n)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
MINLINE int max_ii(int a, int b)
ATTR_WARN_UNUSED_RESULT const size_t num
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition BLI_rect.h:198
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition rct.cc:414
bool BLI_rcti_isect(const struct rcti *src1, const struct rcti *src2, struct rcti *dest)
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition BLI_rect.h:194
unsigned char uchar
int BLI_thread_is_main(void)
Definition threads.cc:179
Platform independent time functions.
double BLI_time_now_seconds(void)
Definition time.cc:65
#define UNPACK2(a)
#define ELEM(...)
@ IMA_ALPHA_STRAIGHT
@ IMA_ALPHA_PREMUL
@ IMA_SRC_TILED
@ IMA_GPU_MIPMAP_COMPLETE
@ IMA_TYPE_UV_TEST
@ IMA_TYPE_R_RESULT
@ IMA_TYPE_COMPOSITE
@ IMA_NOCOLLECT
@ IMA_HIGH_BITDEPTH
eGPUTextureTarget
@ TEXTARGET_2D
@ TEXTARGET_2D_ARRAY
@ TEXTARGET_COUNT
@ TEXTARGET_TILE_MAPPING
int GPU_texture_size_with_limit(int res)
bool GPU_mipmap_enabled()
Definition gpu_state.cc:293
int GPU_texture_height(const GPUTexture *texture)
void GPU_texture_free(GPUTexture *texture)
int GPU_texture_width(const GPUTexture *texture)
void GPU_texture_unbind(GPUTexture *texture)
GPUTexture * GPU_texture_create_error(int dimension, bool array)
eGPUDataFormat
@ GPU_DATA_UBYTE
@ GPU_DATA_FLOAT
void GPU_texture_extend_mode(GPUTexture *texture, GPUSamplerExtendMode extend_mode)
@ GPU_TEXTURE_USAGE_SHADER_READ
@ GPU_SAMPLER_EXTEND_MODE_REPEAT
void GPU_texture_update_sub(GPUTexture *texture, eGPUDataFormat data_format, const void *pixels, int offset_x, int offset_y, int offset_z, int width, int height, int depth)
void GPU_texture_mipmap_mode(GPUTexture *texture, bool use_mipmap, bool use_filter)
@ GPU_RGBA32F
void GPU_texture_update_mipmap_chain(GPUTexture *texture)
GPUTexture * GPU_texture_create_1d_array(const char *name, int width, int layer_len, int mip_len, eGPUTextureFormat format, eGPUTextureUsage usage, const float *data)
void GPU_unpack_row_length_set(uint len)
void GPU_texture_original_size_set(GPUTexture *texture, int width, int height)
void IMB_colormanagement_imbuf_to_byte_texture(unsigned char *out_buffer, int offset_x, int offset_y, int width, int height, const ImBuf *ibuf, bool store_premultiplied)
bool IMB_colormanagement_space_is_scene_linear(const ColorSpace *colorspace)
bool IMB_colormanagement_space_is_data(const ColorSpace *colorspace)
void IMB_colormanagement_imbuf_to_float_texture(float *out_buffer, int offset_x, int offset_y, int width, int height, const ImBuf *ibuf, bool store_premultiplied)
bool IMB_colormanagement_space_is_srgb(const ColorSpace *colorspace)
ImBuf * IMB_allocFromBuffer(const uint8_t *byte_buffer, const float *float_buffer, unsigned int w, unsigned int h, unsigned int channels)
GPUTexture * IMB_touch_gpu_texture(const char *name, ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth, bool use_grayscale)
Definition util_gpu.cc:255
void IMB_update_gpu_texture_sub(GPUTexture *tex, ImBuf *ibuf, int x, int y, int z, int w, int h, bool use_high_bitdepth, bool use_grayscale, bool use_premult)
Definition util_gpu.cc:281
void IMB_freeImBuf(ImBuf *ibuf)
GPUTexture * IMB_create_gpu_texture(const char *name, ImBuf *ibuf, bool use_high_bitdepth, bool use_premult)
Definition util_gpu.cc:312
bool IMB_scale(ImBuf *ibuf, unsigned int newx, unsigned int newy, IMBScaleFilter filter, bool threaded=true)
Definition scaling.cc:777
Read Guarded memory(de)allocation.
volatile int lock
#define U
BMesh const char void * data
btMatrix3x3 scaled(const btVector3 &s) const
Create a scaled copy of the matrix.
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
#define packed
#define ceil
#define IMAGE_GPU_PASS_NONE
#define IMAGE_GPU_LAYER_NONE
#define IMAGE_GPU_VIEW_NONE
uint padding(uint offset, uint alignment)
static ImageGPUTextures image_get_gpu_texture(Image *ima, ImageUser *iuser, const bool use_viewers, const bool use_tile_mapping, bool try_only)
Definition image_gpu.cc:354
static int compare_packtile(const void *a, const void *b)
Definition image_gpu.cc:128
static void gpu_free_unused_buffers()
Definition image_gpu.cc:522
static void image_gpu_texture_partial_update_changes_available(Image *image, PartialUpdateChecker< ImageTileData >::CollectResult &changes)
Definition image_gpu.cc:290
void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
Definition image_gpu.cc:896
static GPUTexture * gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
Definition image_gpu.cc:136
ImageGPUTextures BKE_image_get_gpu_material_texture_try(Image *image, ImageUser *iuser, const bool use_tile_mapping)
Definition image_gpu.cc:503
void BKE_image_free_anim_gputextures(Main *bmain)
Definition image_gpu.cc:584
static void gpu_texture_update_from_ibuf(GPUTexture *tex, Image *ima, ImBuf *ibuf, ImageTile *tile, int x, int y, int w, int h)
Definition image_gpu.cc:745
GPUTexture * BKE_image_get_gpu_viewer_texture(Image *image, ImageUser *iuser)
Definition image_gpu.cc:491
bool BKE_image_has_gpu_texture_premultiplied_alpha(Image *image, ImBuf *ibuf)
Definition image_gpu.cc:43
ImageGPUTextures BKE_image_get_gpu_material_texture(Image *image, ImageUser *iuser, const bool use_tile_mapping)
Definition image_gpu.cc:496
static void image_gpu_texture_try_partial_update(Image *image, ImageUser *iuser)
Definition image_gpu.cc:316
void BKE_image_update_gputexture_delayed(Image *ima, ImageTile *image_tile, ImBuf *ibuf, int x, int y, int w, int h)
Definition image_gpu.cc:904
static GPUTexture * image_gpu_texture_error_create(eGPUTextureTarget textarget)
Definition image_gpu.cc:276
void BKE_image_ensure_gpu_texture(Image *image, ImageUser *iuser)
Definition image_gpu.cc:338
static void image_free_gpu(Image *ima, const bool immediate)
Definition image_gpu.cc:549
void BKE_image_free_gputextures(Image *ima)
Definition image_gpu.cc:570
static bool is_over_resolution_limit(int w, int h)
Definition image_gpu.cc:69
GPUTexture * BKE_image_get_gpu_texture(Image *image, ImageUser *iuser)
Definition image_gpu.cc:486
void BKE_image_paint_set_mipmap(Main *bmain, bool mipmap)
Definition image_gpu.cc:920
static GPUTexture ** get_image_gpu_texture_ptr(Image *ima, eGPUTextureTarget textarget, const int multiview_eye)
Definition image_gpu.cc:262
static void gpu_texture_update_unscaled(GPUTexture *tex, uchar *rect, float *rect_float, int x, int y, int layer, const int tile_offset[2], int w, int h, int tex_stride, int tex_offset)
Definition image_gpu.cc:715
static int smaller_power_of_2_limit(int num)
Definition image_gpu.cc:74
static GPUTexture * gpu_texture_create_tile_mapping(Image *ima, const int multiview_eye)
Definition image_gpu.cc:79
static void image_update_gputexture_ex(Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h)
Definition image_gpu.cc:879
static ImBuf * update_do_scale(const uchar *rect, const float *rect_float, int *x, int *y, int *w, int *h, int limit_w, int limit_h, int full_w, int full_h)
Definition image_gpu.cc:637
static LinkNode * gpu_texture_free_queue
Definition image_gpu.cc:519
static void gpu_texture_update_scaled(GPUTexture *tex, const uchar *rect, const float *rect_float, int full_w, int full_h, int x, int y, int layer, const int *tile_offset, const int *tile_size, int w, int h)
Definition image_gpu.cc:676
void BKE_image_free_all_gputextures(Main *bmain)
Definition image_gpu.cc:575
void BKE_image_free_unused_gpu_textures()
Definition image_gpu.cc:536
static blender::Mutex gpu_texture_queue_mutex
Definition image_gpu.cc:520
void BKE_image_free_old_gputextures(Main *bmain)
Definition image_gpu.cc:595
const ccl_global KernelWorkTile * tile
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
#define G(x, y, z)
@ PartialChangesDetected
Changes detected since the last time requested.
@ FullUpdateNeeded
Unable to construct partial updates. Caller should perform a full update.
@ NoChangesDetected
No changes detected since the last time requested.
T max(const T &a, const T &b)
std::mutex Mutex
Definition BLI_mutex.hh:47
char name[66]
Definition DNA_ID.h:415
const ColorSpace * colorspace
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
unsigned char planes
GPUTexture ** texture
Definition BKE_image.hh:607
short multi_index
short gpuflag
short gpu_pass
short gpu_layer
short gpu_view
ListBase tiles
struct GPUTexture * gputexture[3][2]
short source
ImageRuntimeHandle * runtime
void * last
void * first
ListBase images
Definition BKE_main.hh:253
ImageTile * tile
Definition image_gpu.cc:124
float pack_score
Definition image_gpu.cc:125
FixedSizeBoxPack boxpack
Definition image_gpu.cc:123
ePartialUpdateIterResult get_next_change()
Load the next changed region.
CollectResult collect_changes()
Check for new changes since the last time this method was invoked for this user.
rcti region
region of the image that has been updated. Region can be bigger than actual changes.
int ymin
int xmin
void * BKE_image_free_buffers
Definition stubs.c:35
void * BKE_image_get_tile_from_iuser
Definition stubs.c:37
void * BKE_image_get_tile
Definition stubs.c:36
i
Definition text_draw.cc:230