Blender  V2.93
image_gpu.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
17  * All rights reserved.
18  */
19 
24 #include "MEM_guardedalloc.h"
25 
26 #include "BLI_bitmap.h"
27 #include "BLI_boxpack_2d.h"
28 #include "BLI_linklist.h"
29 #include "BLI_listbase.h"
30 #include "BLI_threads.h"
31 
32 #include "DNA_image_types.h"
33 #include "DNA_userdef_types.h"
34 
35 #include "IMB_colormanagement.h"
36 #include "IMB_imbuf.h"
37 #include "IMB_imbuf_types.h"
38 
39 #include "BKE_global.h"
40 #include "BKE_image.h"
41 #include "BKE_main.h"
42 
43 #include "GPU_capabilities.h"
44 #include "GPU_state.h"
45 #include "GPU_texture.h"
46 
47 #include "PIL_time.h"
48 
49 /* Prototypes. */
50 static void gpu_free_unused_buffers(void);
51 static void image_free_gpu(Image *ima, const bool immediate);
52 static void image_update_gputexture_ex(
53  Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h);
54 
55 /* Internal structs. */
56 #define IMA_PARTIAL_REFRESH_TILE_SIZE 256
57 typedef struct ImagePartialRefresh {
59  int tile_x;
60  int tile_y;
62 
63 /* Is the alpha of the `GPUTexture` for a given image/ibuf premultiplied. */
65 {
66  if (image) {
67  /* Render result and compositor output are always premultiplied */
69  return true;
70  }
71  /* Generated images use pre multiplied float buffer, but straight alpha for byte buffers. */
72  if (image->type == IMA_TYPE_UV_TEST && ibuf) {
73  return ibuf->rect_float != NULL;
74  }
75  }
76  if (ibuf) {
77  if (ibuf->rect_float) {
78  return image ? (image->alpha_mode != IMA_ALPHA_STRAIGHT) : false;
79  }
80 
81  return image ? (image->alpha_mode == IMA_ALPHA_PREMUL) : true;
82  }
83  return false;
84 }
85 
86 /* -------------------------------------------------------------------- */
89 static bool is_over_resolution_limit(int w, int h, bool limit_gl_texture_size)
90 {
91  return (w > GPU_texture_size_with_limit(w, limit_gl_texture_size) ||
92  h > GPU_texture_size_with_limit(h, limit_gl_texture_size));
93 }
94 
95 static int smaller_power_of_2_limit(int num, bool limit_gl_texture_size)
96 {
97  return power_of_2_min_i(GPU_texture_size_with_limit(num, limit_gl_texture_size));
98 }
99 
100 static GPUTexture *gpu_texture_create_tile_mapping(Image *ima, const int multiview_eye)
101 {
102  GPUTexture *tilearray = ima->gputexture[TEXTARGET_2D_ARRAY][multiview_eye];
103 
104  if (tilearray == NULL) {
105  return 0;
106  }
107 
108  float array_w = GPU_texture_width(tilearray);
109  float array_h = GPU_texture_height(tilearray);
110 
111  ImageTile *last_tile = (ImageTile *)ima->tiles.last;
112  /* Tiles are sorted by number. */
113  int max_tile = last_tile->tile_number - 1001;
114 
115  /* create image */
116  int width = max_tile + 1;
117  float *data = (float *)MEM_callocN(width * 8 * sizeof(float), __func__);
118  for (int i = 0; i < width; i++) {
119  data[4 * i] = -1.0f;
120  }
121  LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
122  int i = tile->tile_number - 1001;
123  data[4 * i] = tile->runtime.tilearray_layer;
124 
125  float *tile_info = &data[4 * width + 4 * i];
126  tile_info[0] = tile->runtime.tilearray_offset[0] / array_w;
127  tile_info[1] = tile->runtime.tilearray_offset[1] / array_h;
128  tile_info[2] = tile->runtime.tilearray_size[0] / array_w;
129  tile_info[3] = tile->runtime.tilearray_size[1] / array_h;
130  }
131 
133  GPU_texture_mipmap_mode(tex, false, false);
134 
135  MEM_freeN(data);
136 
137  return tex;
138 }
139 
140 typedef struct PackTile {
143  float pack_score;
145 
146 static int compare_packtile(const void *a, const void *b)
147 {
148  const PackTile *tile_a = (const PackTile *)a;
149  const PackTile *tile_b = (const PackTile *)b;
150 
151  return tile_a->pack_score < tile_b->pack_score;
152 }
153 
155 {
156  const bool limit_gl_texture_size = (ima->gpuflag & IMA_GPU_MAX_RESOLUTION) == 0;
157  int arraywidth = 0, arrayheight = 0;
158  ListBase boxes = {NULL};
159 
160  LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
161  ImageUser iuser;
162  BKE_imageuser_default(&iuser);
163  iuser.tile = tile->tile_number;
164  ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
165 
166  if (ibuf) {
167  PackTile *packtile = (PackTile *)MEM_callocN(sizeof(PackTile), __func__);
168  packtile->tile = tile;
169  packtile->boxpack.w = ibuf->x;
170  packtile->boxpack.h = ibuf->y;
171 
173  packtile->boxpack.w, packtile->boxpack.h, limit_gl_texture_size)) {
174  packtile->boxpack.w = smaller_power_of_2_limit(packtile->boxpack.w, limit_gl_texture_size);
175  packtile->boxpack.h = smaller_power_of_2_limit(packtile->boxpack.h, limit_gl_texture_size);
176  }
177  arraywidth = max_ii(arraywidth, packtile->boxpack.w);
178  arrayheight = max_ii(arrayheight, packtile->boxpack.h);
179 
180  /* We sort the tiles by decreasing size, with an additional penalty term
181  * for high aspect ratios. This improves packing efficiency. */
182  float w = packtile->boxpack.w, h = packtile->boxpack.h;
183  packtile->pack_score = max_ff(w, h) / min_ff(w, h) * w * h;
184 
185  BKE_image_release_ibuf(ima, ibuf, NULL);
186  BLI_addtail(&boxes, packtile);
187  }
188  }
189 
190  BLI_assert(arraywidth > 0 && arrayheight > 0);
191 
193  int arraylayers = 0;
194  /* Keep adding layers until all tiles are packed. */
195  while (boxes.first != NULL) {
196  ListBase packed = {NULL};
197  BLI_box_pack_2d_fixedarea(&boxes, arraywidth, arrayheight, &packed);
198  BLI_assert(packed.first != NULL);
199 
200  LISTBASE_FOREACH (PackTile *, packtile, &packed) {
201  ImageTile *tile = packtile->tile;
202  int *tileoffset = tile->runtime.tilearray_offset;
203  int *tilesize = tile->runtime.tilearray_size;
204 
205  tileoffset[0] = packtile->boxpack.x;
206  tileoffset[1] = packtile->boxpack.y;
207  tilesize[0] = packtile->boxpack.w;
208  tilesize[1] = packtile->boxpack.h;
209  tile->runtime.tilearray_layer = arraylayers;
210  }
211 
212  BLI_freelistN(&packed);
213  arraylayers++;
214  }
215 
216  const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
217  /* Create Texture without content. */
219  ima->id.name + 2, main_ibuf, arraywidth, arrayheight, arraylayers, use_high_bitdepth);
220 
221  /* Upload each tile one by one. */
222  LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
223  int tilelayer = tile->runtime.tilearray_layer;
224  int *tileoffset = tile->runtime.tilearray_offset;
225  int *tilesize = tile->runtime.tilearray_size;
226 
227  if (tilesize[0] == 0 || tilesize[1] == 0) {
228  continue;
229  }
230 
231  ImageUser iuser;
232  BKE_imageuser_default(&iuser);
233  iuser.tile = tile->tile_number;
234  ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
235 
236  if (ibuf) {
237  const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
239  ibuf,
240  UNPACK2(tileoffset),
241  tilelayer,
242  UNPACK2(tilesize),
243  use_high_bitdepth,
244  store_premultiplied);
245  }
246 
247  BKE_image_release_ibuf(ima, ibuf, NULL);
248  }
249 
250  if (GPU_mipmap_enabled()) {
252  GPU_texture_mipmap_mode(tex, true, true);
253  if (ima) {
255  }
256  }
257  else {
258  GPU_texture_mipmap_mode(tex, false, true);
259  }
260 
261  return tex;
262 }
263 
266 /* -------------------------------------------------------------------- */
271  eGPUTextureTarget textarget,
272  const int multiview_eye)
273 {
274  const bool in_range = (textarget >= 0) && (textarget < TEXTARGET_COUNT);
275  BLI_assert(in_range);
276  BLI_assert(multiview_eye == 0 || multiview_eye == 1);
277 
278  if (in_range) {
279  return &(ima->gputexture[textarget][multiview_eye]);
280  }
281  return NULL;
282 }
283 
285 {
286  fprintf(stderr, "GPUTexture: Blender Texture Not Loaded!\n");
287  switch (textarget) {
288  case TEXTARGET_2D_ARRAY:
289  return GPU_texture_create_error(2, true);
291  return GPU_texture_create_error(1, true);
292  case TEXTARGET_2D:
293  default:
294  return GPU_texture_create_error(2, false);
295  }
296 }
297 
299  ImageUser *iuser,
300  ImBuf *ibuf,
301  eGPUTextureTarget textarget)
302 {
303  if (ima == NULL) {
304  return NULL;
305  }
306 
307  /* Free any unused GPU textures, since we know we are in a thread with OpenGL
308  * context and might as well ensure we have as much space free as possible. */
310 
311  /* Free GPU textures when requesting a different render pass/layer.
312  * When `iuser` isn't set (texture painting single image mode) we assume that
313  * the current `pass` and `layer` should be 0. */
314  short requested_pass = iuser ? iuser->pass : 0;
315  short requested_layer = iuser ? iuser->layer : 0;
316  short requested_view = iuser ? iuser->multi_index : 0;
317  const bool limit_resolution = U.glreslimit != 0 &&
318  ((iuser && (iuser->flag & IMA_SHOW_MAX_RESOLUTION) == 0) ||
319  (iuser == NULL));
320  short requested_gpu_flags = limit_resolution ? 0 : IMA_GPU_MAX_RESOLUTION;
321 #define GPU_FLAGS_TO_CHECK (IMA_GPU_MAX_RESOLUTION)
322  /* There is room for 2 multiview textures. When a higher number is requested we should always
323  * target the first view slot. This is fine as multi view images aren't used together. */
324  if (requested_view < 2) {
325  requested_view = 0;
326  }
327  if (ima->gpu_pass != requested_pass || ima->gpu_layer != requested_layer ||
328  ima->gpu_view != requested_view ||
329  ((ima->gpuflag & GPU_FLAGS_TO_CHECK) != requested_gpu_flags)) {
330  ima->gpu_pass = requested_pass;
331  ima->gpu_layer = requested_layer;
332  ima->gpu_view = requested_view;
333  ima->gpuflag &= ~GPU_FLAGS_TO_CHECK;
334  ima->gpuflag |= requested_gpu_flags | IMA_GPU_REFRESH;
335  }
336 #undef GPU_FLAGS_TO_CHECK
337 
338  /* Check if image has been updated and tagged to be updated (full or partial). */
339  ImageTile *tile = BKE_image_get_tile(ima, 0);
340  if (((ima->gpuflag & IMA_GPU_REFRESH) != 0) ||
341  ((ibuf == NULL || tile == NULL || !tile->ok) &&
342  ((ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) != 0))) {
343  image_free_gpu(ima, true);
346  }
347  else if (ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) {
348  BLI_assert(ibuf);
349  BLI_assert(tile && tile->ok);
350  ImagePartialRefresh *refresh_area;
351  while ((refresh_area = BLI_pophead(&ima->gpu_refresh_areas))) {
352  const int tile_offset_x = refresh_area->tile_x * IMA_PARTIAL_REFRESH_TILE_SIZE;
353  const int tile_offset_y = refresh_area->tile_y * IMA_PARTIAL_REFRESH_TILE_SIZE;
354  const int tile_width = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->x - tile_offset_x);
355  const int tile_height = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->y - tile_offset_y);
357  ima, tile, ibuf, tile_offset_x, tile_offset_y, tile_width, tile_height);
358  MEM_freeN(refresh_area);
359  }
361  }
362 
363  /* Tag as in active use for garbage collector. */
364  BKE_image_tag_time(ima);
365 
366  /* Test if we already have a texture. */
367  int current_view = iuser ? iuser->multi_index : 0;
368  if (current_view >= 2) {
369  current_view = 0;
370  }
371  GPUTexture **tex = get_image_gpu_texture_ptr(ima, textarget, current_view);
372  if (*tex) {
373  return *tex;
374  }
375 
376  /* Check if we have a valid image. If not, we return a dummy
377  * texture with zero bind-code so we don't keep trying. */
378  if (tile == NULL || tile->ok == 0) {
379  *tex = image_gpu_texture_error_create(textarget);
380  return *tex;
381  }
382 
383  /* check if we have a valid image buffer */
384  ImBuf *ibuf_intern = ibuf;
385  if (ibuf_intern == NULL) {
386  ibuf_intern = BKE_image_acquire_ibuf(ima, iuser, NULL);
387  if (ibuf_intern == NULL) {
388  *tex = image_gpu_texture_error_create(textarget);
389  return *tex;
390  }
391  }
392 
393  if (textarget == TEXTARGET_2D_ARRAY) {
394  *tex = gpu_texture_create_tile_array(ima, ibuf_intern);
395  }
396  else if (textarget == TEXTARGET_TILE_MAPPING) {
397  *tex = gpu_texture_create_tile_mapping(ima, iuser ? iuser->multiview_eye : 0);
398  }
399  else {
400  const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
401  const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima,
402  ibuf_intern);
403  const bool limit_gl_texture_size = (ima->gpuflag & IMA_GPU_MAX_RESOLUTION) == 0;
404 
405  *tex = IMB_create_gpu_texture(ima->id.name + 2,
406  ibuf_intern,
407  use_high_bitdepth,
408  store_premultiplied,
409  limit_gl_texture_size);
410 
411  if (*tex) {
412  GPU_texture_wrap_mode(*tex, true, false);
413 
414  if (GPU_mipmap_enabled()) {
416  if (ima) {
418  }
419  GPU_texture_mipmap_mode(*tex, true, true);
420  }
421  else {
422  GPU_texture_mipmap_mode(*tex, false, true);
423  }
424  }
425  }
426 
427  /* if `ibuf` was given, we don't own the `ibuf_intern` */
428  if (ibuf == NULL) {
429  BKE_image_release_ibuf(ima, ibuf_intern, NULL);
430  }
431 
432  if (*tex) {
433  GPU_texture_orig_size_set(*tex, ibuf_intern->x, ibuf_intern->y);
434  }
435 
436  return *tex;
437 }
438 
440 {
441  return image_get_gpu_texture(image, iuser, ibuf, TEXTARGET_2D);
442 }
443 
445 {
446  return image_get_gpu_texture(image, iuser, ibuf, TEXTARGET_2D_ARRAY);
447 }
448 
450 {
451  return image_get_gpu_texture(image, iuser, ibuf, TEXTARGET_TILE_MAPPING);
452 }
453 
456 /* -------------------------------------------------------------------- */
465 
466 static void gpu_free_unused_buffers(void)
467 {
468  if (gpu_texture_free_queue == NULL) {
469  return;
470  }
471 
473 
474  while (gpu_texture_free_queue != NULL) {
477  }
478 
480 }
481 
483 {
484  if (BLI_thread_is_main()) {
486  }
487 }
488 
491 /* -------------------------------------------------------------------- */
495 static void image_free_gpu(Image *ima, const bool immediate)
496 {
497  for (int eye = 0; eye < 2; eye++) {
498  for (int i = 0; i < TEXTARGET_COUNT; i++) {
499  if (ima->gputexture[i][eye] != NULL) {
500  if (immediate) {
501  GPU_texture_free(ima->gputexture[i][eye]);
502  }
503  else {
507  }
508 
509  ima->gputexture[i][eye] = NULL;
510  }
511  }
512  }
513 
515 }
516 
518 {
520 }
521 
523 {
524  if (bmain) {
525  LISTBASE_FOREACH (Image *, ima, &bmain->images) {
527  }
528  }
529 }
530 
531 /* same as above but only free animated images */
533 {
534  if (bmain) {
535  LISTBASE_FOREACH (Image *, ima, &bmain->images) {
536  if (BKE_image_is_animated(ima)) {
538  }
539  }
540  }
541 }
542 
544 {
545  static int lasttime = 0;
546  int ctime = (int)PIL_check_seconds_timer();
547 
548  /*
549  * Run garbage collector once for every collecting period of time
550  * if textimeout is 0, that's the option to NOT run the collector
551  */
552  if (U.textimeout == 0 || ctime % U.texcollectrate || ctime == lasttime) {
553  return;
554  }
555 
556  /* of course not! */
557  if (G.is_rendering) {
558  return;
559  }
560 
561  lasttime = ctime;
562 
563  LISTBASE_FOREACH (Image *, ima, &bmain->images) {
564  if ((ima->flag & IMA_NOCOLLECT) == 0 && ctime - ima->lastused > U.textimeout) {
565  /* If it's in GL memory, deallocate and set time tag to current time
566  * This gives textures a "second chance" to be used before dying. */
567  if (BKE_image_has_opengl_texture(ima)) {
569  ima->lastused = ctime;
570  }
571  /* Otherwise, just kill the buffers */
572  else {
574  }
575  }
576  }
577 }
580 /* -------------------------------------------------------------------- */
585  float *rect_float,
586  int *x,
587  int *y,
588  int *w,
589  int *h,
590  int limit_w,
591  int limit_h,
592  int full_w,
593  int full_h)
594 {
595  /* Partial update with scaling. */
596  float xratio = limit_w / (float)full_w;
597  float yratio = limit_h / (float)full_h;
598 
599  int part_w = *w, part_h = *h;
600 
601  /* Find sub coordinates in scaled image. Take ceiling because we will be
602  * losing 1 pixel due to rounding errors in x,y. */
603  *x *= xratio;
604  *y *= yratio;
605  *w = (int)ceil(xratio * (*w));
606  *h = (int)ceil(yratio * (*h));
607 
608  /* ...but take back if we are over the limit! */
609  if (*x + *w > limit_w) {
610  (*w)--;
611  }
612  if (*y + *h > limit_h) {
613  (*h)--;
614  }
615 
616  /* Scale pixels. */
617  ImBuf *ibuf = IMB_allocFromBuffer((uint *)rect, rect_float, part_w, part_h, 4);
618  IMB_scaleImBuf(ibuf, *w, *h);
619 
620  return ibuf;
621 }
622 
624  uchar *rect,
625  float *rect_float,
626  int full_w,
627  int full_h,
628  int x,
629  int y,
630  int layer,
631  const int *tile_offset,
632  const int *tile_size,
633  int w,
634  int h)
635 {
636  ImBuf *ibuf;
637  if (layer > -1) {
638  ibuf = update_do_scale(
639  rect, rect_float, &x, &y, &w, &h, tile_size[0], tile_size[1], full_w, full_h);
640 
641  /* Shift to account for tile packing. */
642  x += tile_offset[0];
643  y += tile_offset[1];
644  }
645  else {
646  /* Partial update with scaling. */
647  int limit_w = GPU_texture_width(tex);
648  int limit_h = GPU_texture_height(tex);
649 
650  ibuf = update_do_scale(rect, rect_float, &x, &y, &w, &h, limit_w, limit_h, full_w, full_h);
651  }
652 
653  void *data = (ibuf->rect_float) ? (void *)(ibuf->rect_float) : (void *)(ibuf->rect);
654  eGPUDataFormat data_format = (ibuf->rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
655 
656  GPU_texture_update_sub(tex, data_format, data, x, y, layer, w, h, 1);
657 
658  IMB_freeImBuf(ibuf);
659 }
660 
662  uchar *rect,
663  float *rect_float,
664  int x,
665  int y,
666  int layer,
667  const int tile_offset[2],
668  int w,
669  int h,
670  int tex_stride,
671  int tex_offset)
672 {
673  if (layer > -1) {
674  /* Shift to account for tile packing. */
675  x += tile_offset[0];
676  y += tile_offset[1];
677  }
678 
679  void *data = (rect_float) ? (void *)(rect_float + tex_offset) : (void *)(rect + tex_offset);
680  eGPUDataFormat data_format = (rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
681 
682  /* Partial update without scaling. Stride and offset are used to copy only a
683  * subset of a possible larger buffer than what we are updating. */
684  GPU_unpack_row_length_set(tex_stride);
685 
686  GPU_texture_update_sub(tex, data_format, data, x, y, layer, w, h, 1);
687  /* Restore default. */
689 }
690 
692  GPUTexture *tex, Image *ima, ImBuf *ibuf, ImageTile *tile, int x, int y, int w, int h)
693 {
694  bool scaled;
695  if (tile != NULL) {
696  int *tilesize = tile->runtime.tilearray_size;
697  scaled = (ibuf->x != tilesize[0]) || (ibuf->y != tilesize[1]);
698  }
699  else {
700  scaled = (GPU_texture_width(tex) != ibuf->x) || (GPU_texture_height(tex) != ibuf->y);
701  }
702 
703  if (scaled) {
704  /* Extra padding to account for bleed from neighboring pixels. */
705  const int padding = 4;
706  const int xmax = min_ii(x + w + padding, ibuf->x);
707  const int ymax = min_ii(y + h + padding, ibuf->y);
708  x = max_ii(x - padding, 0);
709  y = max_ii(y - padding, 0);
710  w = xmax - x;
711  h = ymax - y;
712  }
713 
714  /* Get texture data pointers. */
715  float *rect_float = ibuf->rect_float;
716  uchar *rect = (uchar *)ibuf->rect;
717  int tex_stride = ibuf->x;
718  int tex_offset = ibuf->channels * (y * ibuf->x + x);
719 
720  const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
721  if (rect_float == NULL) {
722  /* Byte pixels. */
724  const bool compress_as_srgb = !IMB_colormanagement_space_is_scene_linear(
725  ibuf->rect_colorspace);
726 
727  rect = (uchar *)MEM_mallocN(sizeof(uchar[4]) * w * h, __func__);
728  if (rect == NULL) {
729  return;
730  }
731 
732  tex_stride = w;
733  tex_offset = 0;
734 
735  /* Convert to scene linear with sRGB compression, and premultiplied for
736  * correct texture interpolation. */
738  rect, x, y, w, h, ibuf, compress_as_srgb, store_premultiplied);
739  }
740  }
741  else {
742  /* Float pixels. */
743  if (ibuf->channels != 4 || scaled || !store_premultiplied) {
744  rect_float = (float *)MEM_mallocN(sizeof(float[4]) * w * h, __func__);
745  if (rect_float == NULL) {
746  return;
747  }
748 
749  tex_stride = w;
750  tex_offset = 0;
751 
753  rect_float, x, y, w, h, ibuf, store_premultiplied);
754  }
755  }
756 
757  if (scaled) {
758  /* Slower update where we first have to scale the input pixels. */
759  if (tile != NULL) {
760  int *tileoffset = tile->runtime.tilearray_offset;
761  int *tilesize = tile->runtime.tilearray_size;
762  int tilelayer = tile->runtime.tilearray_layer;
764  tex, rect, rect_float, ibuf->x, ibuf->y, x, y, tilelayer, tileoffset, tilesize, w, h);
765  }
766  else {
768  tex, rect, rect_float, ibuf->x, ibuf->y, x, y, -1, NULL, NULL, w, h);
769  }
770  }
771  else {
772  /* Fast update at same resolution. */
773  if (tile != NULL) {
774  int *tileoffset = tile->runtime.tilearray_offset;
775  int tilelayer = tile->runtime.tilearray_layer;
777  tex, rect, rect_float, x, y, tilelayer, tileoffset, w, h, tex_stride, tex_offset);
778  }
779  else {
781  tex, rect, rect_float, x, y, -1, NULL, w, h, tex_stride, tex_offset);
782  }
783  }
784 
785  /* Free buffers if needed. */
786  if (rect && rect != (uchar *)ibuf->rect) {
787  MEM_freeN(rect);
788  }
789  if (rect_float && rect_float != ibuf->rect_float) {
790  MEM_freeN(rect_float);
791  }
792 
793  if (GPU_mipmap_enabled()) {
795  }
796  else {
798  }
799 
801 }
802 
804  Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h)
805 {
807  /* Check if we need to update the main gputexture. */
808  if (tex != NULL && tile == ima->tiles.first) {
809  gpu_texture_update_from_ibuf(tex, ima, ibuf, NULL, x, y, w, h);
810  }
811 
812  /* Check if we need to update the array gputexture. */
813  tex = ima->gputexture[TEXTARGET_2D_ARRAY][0];
814  if (tex != NULL) {
815  gpu_texture_update_from_ibuf(tex, ima, ibuf, tile, x, y, w, h);
816  }
817 }
818 
819 /* Partial update of texture for texture painting. This is often much
820  * quicker than fully updating the texture for high resolution images. */
821 void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
822 {
823  ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, NULL);
824  ImageTile *tile = BKE_image_get_tile_from_iuser(ima, iuser);
825 
826  if ((ibuf == NULL) || (w == 0) || (h == 0)) {
827  /* Full reload of texture. */
829  }
830  image_update_gputexture_ex(ima, tile, ibuf, x, y, w, h);
831  BKE_image_release_ibuf(ima, ibuf, NULL);
832 }
833 
834 /* Mark areas on the GPUTexture that needs to be updated. The areas are marked in chunks.
835  * The next time the GPUTexture is used these tiles will be refreshes. This saves time
836  * when writing to the same place multiple times This happens for during foreground
837  * rendering. */
839  struct Image *ima, struct ImBuf *ibuf, int x, int y, int w, int h)
840 {
841  /* Check for full refresh. */
842  if (ibuf && x == 0 && y == 0 && w == ibuf->x && h == ibuf->y) {
843  ima->gpuflag |= IMA_GPU_REFRESH;
844  }
845  /* Check if we can promote partial refresh to a full refresh. */
850  }
851  /* Image is already marked for complete refresh. */
852  if (ima->gpuflag & IMA_GPU_REFRESH) {
853  return;
854  }
855 
856  /* Schedule the tiles that covers the requested area. */
857  const int start_tile_x = x / IMA_PARTIAL_REFRESH_TILE_SIZE;
858  const int start_tile_y = y / IMA_PARTIAL_REFRESH_TILE_SIZE;
859  const int end_tile_x = (x + w) / IMA_PARTIAL_REFRESH_TILE_SIZE;
860  const int end_tile_y = (y + h) / IMA_PARTIAL_REFRESH_TILE_SIZE;
861  const int num_tiles_x = (end_tile_x + 1) - (start_tile_x);
862  const int num_tiles_y = (end_tile_y + 1) - (start_tile_y);
863  const int num_tiles = num_tiles_x * num_tiles_y;
864  const bool allocate_on_heap = BLI_BITMAP_SIZE(num_tiles) > 16;
865  BLI_bitmap *requested_tiles = NULL;
866  if (allocate_on_heap) {
867  requested_tiles = BLI_BITMAP_NEW(num_tiles, __func__);
868  }
869  else {
870  requested_tiles = BLI_BITMAP_NEW_ALLOCA(num_tiles);
871  }
872 
873  /* Mark the tiles that have already been requested. They don't need to be requested again. */
874  int num_tiles_not_scheduled = num_tiles;
876  if (area->tile_x < start_tile_x || area->tile_x > end_tile_x || area->tile_y < start_tile_y ||
877  area->tile_y > end_tile_y) {
878  continue;
879  }
880  int requested_tile_index = (area->tile_x - start_tile_x) +
881  (area->tile_y - start_tile_y) * num_tiles_x;
882  BLI_BITMAP_ENABLE(requested_tiles, requested_tile_index);
883  num_tiles_not_scheduled--;
884  if (num_tiles_not_scheduled == 0) {
885  break;
886  }
887  }
888 
889  /* Schedule the tiles that aren't requested yet. */
890  if (num_tiles_not_scheduled) {
891  int tile_index = 0;
892  for (int tile_y = start_tile_y; tile_y <= end_tile_y; tile_y++) {
893  for (int tile_x = start_tile_x; tile_x <= end_tile_x; tile_x++) {
894  if (!BLI_BITMAP_TEST_BOOL(requested_tiles, tile_index)) {
896  area->tile_x = tile_x;
897  area->tile_y = tile_y;
899  }
900  tile_index++;
901  }
902  }
904  }
905  if (allocate_on_heap) {
906  MEM_freeN(requested_tiles);
907  }
908 }
909 
910 /* these two functions are called on entering and exiting texture paint mode,
911  * temporary disabling/enabling mipmapping on all images for quick texture
912  * updates with glTexSubImage2D. images that didn't change don't have to be
913  * re-uploaded to OpenGL */
914 void BKE_image_paint_set_mipmap(Main *bmain, bool mipmap)
915 {
916  LISTBASE_FOREACH (Image *, ima, &bmain->images) {
917  if (BKE_image_has_opengl_texture(ima)) {
918  if (ima->gpuflag & IMA_GPU_MIPMAP_COMPLETE) {
919  for (int eye = 0; eye < 2; eye++) {
920  for (int a = 0; a < TEXTARGET_COUNT; a++) {
922  GPUTexture *tex = ima->gputexture[a][eye];
923  if (tex != NULL) {
924  GPU_texture_mipmap_mode(tex, mipmap, true);
925  }
926  }
927  }
928  }
929  }
930  else {
932  }
933  }
934  else {
935  ima->gpuflag &= ~IMA_GPU_MIPMAP_COMPLETE;
936  }
937  }
938 }
939 
typedef float(TangentPoint)[2]
void BKE_image_release_ibuf(struct Image *ima, struct ImBuf *ibuf, void *lock)
Definition: image.c:5113
bool BKE_image_is_animated(struct Image *image)
Definition: image.c:5640
struct ImBuf * BKE_image_acquire_ibuf(struct Image *ima, struct ImageUser *iuser, void **r_lock)
Definition: image.c:5100
bool BKE_image_has_opengl_texture(struct Image *ima)
Definition: image.c:660
void BKE_imageuser_default(struct ImageUser *iuser)
Definition: image.c:3451
void BKE_image_tag_time(struct Image *ima)
Definition: image.c:1184
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition: BLI_bitmap.h:78
#define BLI_BITMAP_SIZE(_tot)
Definition: BLI_bitmap.h:47
#define BLI_BITMAP_NEW_ALLOCA(_tot)
Definition: BLI_bitmap.h:54
#define BLI_BITMAP_TEST_BOOL(_bitmap, _index)
Definition: BLI_bitmap.h:73
#define BLI_BITMAP_NEW(_tot, _alloc_string)
Definition: BLI_bitmap.h:50
unsigned int BLI_bitmap
Definition: BLI_bitmap.h:32
void BLI_box_pack_2d_fixedarea(struct ListBase *boxes, int width, int height, struct ListBase *packed)
Definition: boxpack_2d.c:693
void * BLI_pophead(ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:257
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:547
void void BLI_listbase_sort(struct ListBase *listbase, int(*cmp)(const void *, const void *)) ATTR_NONNULL(1
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
MINLINE int power_of_2_min_i(int n)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
MINLINE int max_ii(int a, int b)
unsigned char uchar
Definition: BLI_sys_types.h:86
unsigned int uint
Definition: BLI_sys_types.h:83
#define BLI_MUTEX_INITIALIZER
Definition: BLI_threads.h:84
int BLI_thread_is_main(void)
Definition: threads.cc:234
void BLI_mutex_lock(ThreadMutex *mutex)
Definition: threads.cc:401
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition: threads.cc:406
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:83
#define UNPACK2(a)
#define ELEM(...)
#define MIN2(a, b)
@ IMA_NOCOLLECT
@ IMA_HIGH_BITDEPTH
@ IMA_GPU_MAX_RESOLUTION
@ IMA_GPU_REFRESH
@ IMA_GPU_PARTIAL_REFRESH
@ IMA_GPU_MIPMAP_COMPLETE
#define IMA_SHOW_MAX_RESOLUTION
@ IMA_TYPE_UV_TEST
@ IMA_TYPE_R_RESULT
@ IMA_TYPE_COMPOSITE
@ IMA_ALPHA_STRAIGHT
@ IMA_ALPHA_PREMUL
eGPUTextureTarget
@ TEXTARGET_2D
@ TEXTARGET_2D_ARRAY
@ TEXTARGET_COUNT
@ TEXTARGET_TILE_MAPPING
int GPU_texture_size_with_limit(int res, bool limit_gl_texture_size)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
bool GPU_mipmap_enabled(void)
Definition: gpu_state.cc:299
GPUTexture * GPU_texture_create_1d_array(const char *name, int w, int h, int mip_len, eGPUTextureFormat format, const float *data)
Definition: gpu_texture.cc:243
void GPU_texture_update_sub(GPUTexture *tex, eGPUDataFormat data_format, const void *pixels, int offset_x, int offset_y, int offset_z, int width, int height, int depth)
Definition: gpu_texture.cc:356
void GPU_texture_wrap_mode(GPUTexture *tex, bool use_repeat, bool use_clamp)
Definition: gpu_texture.cc:496
int GPU_texture_height(const GPUTexture *tex)
Definition: gpu_texture.cc:532
struct GPUTexture GPUTexture
Definition: GPU_texture.h:33
void GPU_texture_mipmap_mode(GPUTexture *tex, bool use_mipmap, bool use_filter)
Definition: gpu_texture.cc:477
int GPU_texture_width(const GPUTexture *tex)
Definition: gpu_texture.cc:527
eGPUDataFormat
Definition: GPU_texture.h:171
@ GPU_DATA_UBYTE
Definition: GPU_texture.h:175
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:172
void GPU_texture_free(GPUTexture *tex)
Definition: gpu_texture.cc:508
void GPU_texture_unbind(GPUTexture *tex)
Definition: gpu_texture.cc:421
@ GPU_RGBA32F
Definition: GPU_texture.h:91
void GPU_texture_orig_size_set(GPUTexture *tex, int w, int h)
Definition: gpu_texture.cc:547
void GPU_unpack_row_length_set(uint len)
Definition: gpu_texture.cc:398
GPUTexture * GPU_texture_create_error(int dimension, bool array)
Definition: gpu_texture.cc:329
void GPU_texture_generate_mipmap(GPUTexture *tex)
Definition: gpu_texture.cc:447
bool IMB_colormanagement_space_is_scene_linear(struct ColorSpace *colorspace)
void IMB_colormanagement_imbuf_to_byte_texture(unsigned char *out_buffer, const int x, const int y, const int width, const int height, const struct ImBuf *ibuf, const bool compress_as_srgb, const bool store_premultiplied)
bool IMB_colormanagement_space_is_data(struct ColorSpace *colorspace)
void IMB_colormanagement_imbuf_to_float_texture(float *out_buffer, const int offset_x, const int offset_y, const int width, const int height, const struct ImBuf *ibuf, const bool store_premultiplied)
bool IMB_scaleImBuf(struct ImBuf *ibuf, unsigned int newx, unsigned int newy)
Definition: scaling.c:1667
struct GPUTexture * IMB_touch_gpu_texture(const char *name, struct ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth)
Definition: util_gpu.c:167
void IMB_freeImBuf(struct ImBuf *ibuf)
Definition: allocimbuf.c:211
struct GPUTexture * IMB_create_gpu_texture(const char *name, struct ImBuf *ibuf, bool use_high_bitdepth, bool use_premult, bool limit_gl_texture_size)
Definition: util_gpu.c:219
void IMB_update_gpu_texture_sub(struct GPUTexture *tex, struct ImBuf *ibuf, int x, int y, int z, int w, int h, bool use_high_bitdepth, bool use_premult)
Definition: util_gpu.c:189
struct ImBuf * IMB_allocFromBuffer(const unsigned int *rect, const float *rectf, unsigned int w, unsigned int h, unsigned int channels)
Definition: allocimbuf.c:433
Contains defines and structs used throughout the imbuf module.
Read Guarded memory(de)allocation.
Platform independent time functions.
unsigned int U
Definition: btGjkEpa3.h:78
btMatrix3x3 scaled(const btVector3 &s) const
Create a scaled copy of the matrix.
Definition: btMatrix3x3.h:622
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
short gpuflag
short gpu_pass
short gpu_layer
short gpu_view
ListBase tiles
short type
struct GPUTexture * gputexture[3][2]
ListBase gpu_refresh_areas
char alpha_mode
uint padding(uint offset, uint alignment)
static GPUTexture * image_get_gpu_texture(Image *ima, ImageUser *iuser, ImBuf *ibuf, eGPUTextureTarget textarget)
Definition: image_gpu.c:298
static int compare_packtile(const void *a, const void *b)
Definition: image_gpu.c:146
GPUTexture * BKE_image_get_gpu_texture(Image *image, ImageUser *iuser, ImBuf *ibuf)
Definition: image_gpu.c:439
void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
Definition: image_gpu.c:821
static int smaller_power_of_2_limit(int num, bool limit_gl_texture_size)
Definition: image_gpu.c:95
static GPUTexture * image_gpu_texture_error_create(eGPUTextureTarget textarget)
Definition: image_gpu.c:284
struct ImagePartialRefresh ImagePartialRefresh
void BKE_image_update_gputexture_delayed(struct Image *ima, struct ImBuf *ibuf, int x, int y, int w, int h)
Definition: image_gpu.c:838
GPUTexture * BKE_image_get_gpu_tilemap(Image *image, ImageUser *iuser, ImBuf *ibuf)
Definition: image_gpu.c:449
static void gpu_free_unused_buffers(void)
Definition: image_gpu.c:466
void BKE_image_free_anim_gputextures(Main *bmain)
Definition: image_gpu.c:532
#define GPU_FLAGS_TO_CHECK
static void gpu_texture_update_from_ibuf(GPUTexture *tex, Image *ima, ImBuf *ibuf, ImageTile *tile, int x, int y, int w, int h)
Definition: image_gpu.c:691
bool BKE_image_has_gpu_texture_premultiplied_alpha(Image *image, ImBuf *ibuf)
Definition: image_gpu.c:64
static ThreadMutex gpu_texture_queue_mutex
Definition: image_gpu.c:464
static void gpu_texture_update_scaled(GPUTexture *tex, uchar *rect, float *rect_float, int full_w, int full_h, int x, int y, int layer, const int *tile_offset, const int *tile_size, int w, int h)
Definition: image_gpu.c:623
static ImBuf * update_do_scale(uchar *rect, float *rect_float, int *x, int *y, int *w, int *h, int limit_w, int limit_h, int full_w, int full_h)
Definition: image_gpu.c:584
static void image_free_gpu(Image *ima, const bool immediate)
Definition: image_gpu.c:495
#define IMA_PARTIAL_REFRESH_TILE_SIZE
Definition: image_gpu.c:56
struct PackTile PackTile
void BKE_image_free_gputextures(Image *ima)
Definition: image_gpu.c:517
void BKE_image_paint_set_mipmap(Main *bmain, bool mipmap)
Definition: image_gpu.c:914
static void gpu_texture_update_unscaled(GPUTexture *tex, uchar *rect, float *rect_float, int x, int y, int layer, const int tile_offset[2], int w, int h, int tex_stride, int tex_offset)
Definition: image_gpu.c:661
static void image_update_gputexture_ex(Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h)
Definition: image_gpu.c:803
static LinkNode * gpu_texture_free_queue
Definition: image_gpu.c:463
GPUTexture * BKE_image_get_gpu_tiles(Image *image, ImageUser *iuser, ImBuf *ibuf)
Definition: image_gpu.c:444
static bool is_over_resolution_limit(int w, int h, bool limit_gl_texture_size)
Definition: image_gpu.c:89
static GPUTexture ** get_image_gpu_texture_ptr(Image *ima, eGPUTextureTarget textarget, const int multiview_eye)
Definition: image_gpu.c:270
void BKE_image_free_all_gputextures(Main *bmain)
Definition: image_gpu.c:522
void BKE_image_free_unused_gpu_textures()
Definition: image_gpu.c:482
void BKE_image_free_old_gputextures(Main *bmain)
Definition: image_gpu.c:543
static GPUTexture * gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
Definition: image_gpu.c:154
static GPUTexture * gpu_texture_create_tile_mapping(Image *ima, const int multiview_eye)
Definition: image_gpu.c:100
void * BKE_image_free_buffers
void * BKE_image_get_tile_from_iuser
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:47
static unsigned a[3]
Definition: RandGen.cpp:92
static void area(int d1, int d2, int e1, int e2, float weights[2])
char name[66]
Definition: DNA_ID.h:283
int channels
struct ColorSpace * rect_colorspace
unsigned int * rect
float * rect_float
struct ImagePartialRefresh * prev
Definition: image_gpu.c:58
struct ImagePartialRefresh * next
Definition: image_gpu.c:58
struct ImageTile_Runtime runtime
char multiview_eye
short multi_index
void * last
Definition: DNA_listBase.h:47
void * first
Definition: DNA_listBase.h:47
Definition: BKE_main.h:116
ListBase images
Definition: BKE_main.h:154
ImageTile * tile
Definition: image_gpu.c:142
float pack_score
Definition: image_gpu.c:143
FixedSizeBoxPack boxpack
Definition: image_gpu.c:141
double PIL_check_seconds_timer(void)
Definition: time.c:80
ccl_device_inline float3 ceil(const float3 &a)
#define G(x, y, z)