Blender  V2.93
gpu_select_pick.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include <float.h>
27 #include <stdlib.h>
28 #include <string.h>
29 
30 #include "GPU_debug.h"
31 #include "GPU_framebuffer.h"
32 #include "GPU_immediate.h"
33 #include "GPU_select.h"
34 #include "GPU_state.h"
35 
36 #include "MEM_guardedalloc.h"
37 
38 #include "BLI_listbase.h"
39 #include "BLI_rect.h"
40 #include "BLI_utildefines.h"
41 
42 #include "gpu_select_private.h"
43 
44 #include "BLI_strict_flags.h"
45 
46 /* #define DEBUG_PRINT */
47 
48 /* Alloc number for depths */
49 #define ALLOC_DEPTHS 200
50 
51 /* Z-depth of cleared depth buffer */
52 #define DEPTH_MAX 0xffffffff
53 
54 /* ----------------------------------------------------------------------------
55  * SubRectStride
56  */
57 
58 /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
59 typedef struct SubRectStride {
60  uint start; /* start here */
61  uint span; /* read these */
62  uint span_len; /* len times (read span 'len' times). */
63  uint skip; /* skip those */
65 
66 /* we may want to change back to float if uint isn't well supported */
67 typedef uint depth_t;
68 
74 static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
75 {
76  const int src_x = BLI_rcti_size_x(src);
77  // const int src_y = BLI_rcti_size_y(src);
78  const int dst_x = BLI_rcti_size_x(dst);
79  const int dst_y = BLI_rcti_size_y(dst);
80  const int x = dst->xmin - src->xmin;
81  const int y = dst->ymin - src->ymin;
82 
83  BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin && src->xmax >= dst->xmax &&
84  src->ymax >= dst->ymax);
85  BLI_assert(x >= 0 && y >= 0);
86 
87  r_sub->start = (uint)((src_x * y) + x);
88  r_sub->span = (uint)dst_x;
89  r_sub->span_len = (uint)dst_y;
90  r_sub->skip = (uint)(src_x - dst_x);
91 }
92 
97 BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
98 {
99  return (*prev != *curr) && (*curr != DEPTH_MAX);
100 }
101 
102 /* ----------------------------------------------------------------------------
103  * DepthBufCache
104  *
105  * Result of reading glReadPixels,
106  * use for both cache and non-cached storage.
107  */
108 
109 /* store result of glReadPixels */
110 typedef struct DepthBufCache {
115 
117 {
118  DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
119  rect->id = SELECT_ID_NONE;
120  return rect;
121 }
122 
123 static bool depth_buf_rect_depth_any(const DepthBufCache *rect_depth, uint rect_len)
124 {
125  const depth_t *curr = rect_depth->buf;
126  for (uint i = 0; i < rect_len; i++, curr++) {
127  if (*curr != DEPTH_MAX) {
128  return true;
129  }
130  }
131  return false;
132 }
133 
134 static bool depth_buf_subrect_depth_any(const DepthBufCache *rect_depth,
135  const SubRectStride *sub_rect)
136 {
137  const depth_t *curr = rect_depth->buf + sub_rect->start;
138  for (uint i = 0; i < sub_rect->span_len; i++) {
139  const depth_t *curr_end = curr + sub_rect->span;
140  for (; curr < curr_end; curr++, curr++) {
141  if (*curr != DEPTH_MAX) {
142  return true;
143  }
144  }
145  curr += sub_rect->skip;
146  }
147  return false;
148 }
149 
150 static bool depth_buf_rect_depth_any_filled(const DepthBufCache *rect_prev,
151  const DepthBufCache *rect_curr,
152  uint rect_len)
153 {
154 #if 0
155  return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
156 #else
157  const depth_t *prev = rect_prev->buf;
158  const depth_t *curr = rect_curr->buf;
159  for (uint i = 0; i < rect_len; i++, curr++, prev++) {
160  if (depth_is_filled(prev, curr)) {
161  return true;
162  }
163  }
164  return false;
165 #endif
166 }
167 
172  const DepthBufCache *rect_dst,
173  const SubRectStride *sub_rect)
174 {
175  /* same as above but different rect sizes */
176  const depth_t *prev = rect_src->buf + sub_rect->start;
177  const depth_t *curr = rect_dst->buf + sub_rect->start;
178  for (uint i = 0; i < sub_rect->span_len; i++) {
179  const depth_t *curr_end = curr + sub_rect->span;
180  for (; curr < curr_end; prev++, curr++) {
181  if (depth_is_filled(prev, curr)) {
182  return true;
183  }
184  }
185  prev += sub_rect->skip;
186  curr += sub_rect->skip;
187  }
188  return false;
189 }
190 
191 /* ----------------------------------------------------------------------------
192  * DepthID
193  *
194  * Internal structure for storing hits.
195  */
196 
197 typedef struct DepthID {
201 
202 static int depth_id_cmp(const void *v1, const void *v2)
203 {
204  const DepthID *d1 = v1, *d2 = v2;
205  if (d1->id < d2->id) {
206  return -1;
207  }
208  if (d1->id > d2->id) {
209  return 1;
210  }
211 
212  return 0;
213 }
214 
215 static int depth_cmp(const void *v1, const void *v2)
216 {
217  const DepthID *d1 = v1, *d2 = v2;
218  if (d1->depth < d2->depth) {
219  return -1;
220  }
221  if (d1->depth > d2->depth) {
222  return 1;
223  }
224 
225  return 0;
226 }
227 
228 /* depth sorting */
229 typedef struct GPUPickState {
230  /* cache on initialization */
231  uint (*buffer)[4];
232 
233  /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
235  /* mode of operation */
236  char mode;
237 
238  /* OpenGL drawing, never use when (is_cached == true). */
239  struct {
240  /* The current depth, accumulated as we draw */
242  /* Scratch buffer, avoid allocs every time (when not caching) */
244 
245  /* Pass to glReadPixels (x, y, w, h) */
247 
248  /* Set after first draw */
249  bool is_init;
251  } gl;
252 
253  /* src: data stored in 'cache' and 'gl',
254  * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
255  struct {
258  } src, dst;
259 
260  /* Store cache between `GPU_select_cache_begin/end` */
261  bool use_cache;
262  bool is_cached;
263  struct {
264  /* Cleanup used for iterating over both source and destination buffers:
265  * src.clip_rect -> dst.clip_rect */
267 
268  /* List of DepthBufCache, sized of 'src.clip_rect' */
270  } cache;
271 
272  /* Picking methods. */
273  union {
274  /* GPU_SELECT_PICK_ALL */
275  struct {
279  } all;
280 
281  /* GPU_SELECT_PICK_NEAREST */
282  struct {
285  };
286 
287  /* Previous state to restore after drawing. */
288  int viewport[4];
289  int scissor[4];
293 
295 
296 void gpu_select_pick_begin(uint (*buffer)[4], uint bufsize, const rcti *input, char mode)
297 {
298  GPUPickState *ps = &g_pick_state;
299 
300 #ifdef DEBUG_PRINT
301  printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
302 #endif
303 
304  GPU_debug_group_begin("Selection Pick");
305 
306  ps->bufsize = bufsize;
307  ps->buffer = buffer;
308  ps->mode = mode;
309 
310  const uint rect_len = (uint)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
311  ps->dst.clip_rect = *input;
312  ps->dst.rect_len = rect_len;
313 
314  /* Restrict OpenGL operations for when we don't have cache */
315  if (ps->is_cached == false) {
319 
320  /* disable writing to the framebuffer */
321  GPU_color_mask(false, false, false, false);
322 
323  GPU_depth_mask(true);
324  /* Always use #GL_LEQUAL even though GPU_SELECT_PICK_ALL always clears the buffer. This is
325  * because individual objects themselves might have sections that overlap and we need these
326  * to have the correct distance information. */
328 
329  float viewport[4];
330  GPU_viewport_size_get_f(viewport);
331 
332  ps->src.clip_rect = *input;
333  ps->src.rect_len = rect_len;
334 
335  ps->gl.clip_readpixels[0] = (int)viewport[0];
336  ps->gl.clip_readpixels[1] = (int)viewport[1];
339 
341 
342  /* It's possible we don't want to clear depth buffer,
343  * so existing elements are masked by current z-buffer. */
344  GPU_clear_depth(1.0f);
345 
346  /* scratch buffer (read new values here) */
347  ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
348  ps->gl.rect_depth = depth_buf_malloc(rect_len);
349 
350  /* set initial 'far' value */
351  for (uint i = 0; i < rect_len; i++) {
352  ps->gl.rect_depth->buf[i] = DEPTH_MAX;
353  }
354 
355  ps->gl.is_init = false;
356  ps->gl.prev_id = 0;
357  }
358  else {
359  /* Using cache (ps->is_cached == true) */
360  /* src.clip_rect -> dst.clip_rect */
362  BLI_assert(ps->gl.rect_depth == NULL);
364  }
365 
366  if (mode == GPU_SELECT_PICK_ALL) {
367  ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
368  ps->all.hits_len = 0;
369  ps->all.hits_len_alloc = ALLOC_DEPTHS;
370  }
371  else {
372  /* Set to 0xff for SELECT_ID_NONE */
373  ps->nearest.rect_id = MEM_mallocN(sizeof(uint) * ps->dst.rect_len, __func__);
374  memset(ps->nearest.rect_id, 0xff, sizeof(uint) * ps->dst.rect_len);
375  }
376 }
377 
382 static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
383 {
384  GPUPickState *ps = &g_pick_state;
385  const uint id = rect_curr->id;
386  /* find the best depth for this pass and store in 'all.hits' */
387  depth_t depth_best = DEPTH_MAX;
388 
389 #define EVAL_TEST() \
390  if (depth_best > *curr) { \
391  depth_best = *curr; \
392  } \
393  ((void)0)
394 
395  if (ps->is_cached == false) {
396  const depth_t *curr = rect_curr->buf;
397  BLI_assert(ps->src.rect_len == ps->dst.rect_len);
398  const uint rect_len = ps->src.rect_len;
399  for (uint i = 0; i < rect_len; i++, curr++) {
400  EVAL_TEST();
401  }
402  }
403  else {
404  /* same as above but different rect sizes */
405  const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
406  for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
407  const depth_t *curr_end = curr + ps->cache.sub_rect.span;
408  for (; curr < curr_end; curr++) {
409  EVAL_TEST();
410  }
411  curr += ps->cache.sub_rect.skip;
412  }
413  }
414 
415 #undef EVAL_TEST
416 
417  /* ensure enough space */
418  if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
419  ps->all.hits_len_alloc += ALLOC_DEPTHS;
420  ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
421  }
422  DepthID *d = &ps->all.hits[ps->all.hits_len++];
423  d->id = id;
424  d->depth = depth_best;
425 }
426 
427 static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev,
428  const DepthBufCache *rect_curr)
429 {
430  GPUPickState *ps = &g_pick_state;
431  const uint id = rect_curr->id;
432  /* keep track each pixels ID in 'nearest.rect_id' */
433  if (id != SELECT_ID_NONE) {
434  uint *id_ptr = ps->nearest.rect_id;
435 
436  /* Check against DEPTH_MAX because XRAY will clear the buffer,
437  * so previously set values will become unset.
438  * In this case just leave those id's left as-is. */
439 #define EVAL_TEST() \
440  if (depth_is_filled(prev, curr)) { \
441  *id_ptr = id; \
442  } \
443  ((void)0)
444 
445  if (ps->is_cached == false) {
446  const depth_t *prev = rect_prev->buf;
447  const depth_t *curr = rect_curr->buf;
448  BLI_assert(ps->src.rect_len == ps->dst.rect_len);
449  const uint rect_len = ps->src.rect_len;
450  for (uint i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
451  EVAL_TEST();
452  }
453  }
454  else {
455  /* same as above but different rect sizes */
456  const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
457  const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
458  for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
459  const depth_t *curr_end = curr + ps->cache.sub_rect.span;
460  for (; curr < curr_end; prev++, curr++, id_ptr++) {
461  EVAL_TEST();
462  }
463  prev += ps->cache.sub_rect.skip;
464  curr += ps->cache.sub_rect.skip;
465  }
466  }
467 
468 #undef EVAL_TEST
469  }
470 }
471 
472 bool gpu_select_pick_load_id(uint id, bool end)
473 {
474  GPUPickState *ps = &g_pick_state;
475 
476  if (ps->gl.is_init) {
477  if (id == ps->gl.prev_id && !end) {
478  /* No need to read if we are still drawing for the same id since
479  * all these depths will be merged / de-duplicated in the end. */
480  return true;
481  }
482 
483  const uint rect_len = ps->src.rect_len;
487  /* perform initial check since most cases the array remains unchanged */
488 
489  bool do_pass = false;
491  if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
492  ps->gl.rect_depth_test->id = ps->gl.prev_id;
494  do_pass = true;
495  }
496  }
497  else {
499  ps->gl.rect_depth_test->id = ps->gl.prev_id;
501  do_pass = true;
502  }
503  }
504 
505  if (do_pass) {
506  /* Store depth in cache */
507  if (ps->use_cache) {
508  BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
510  }
511 
513 
515  /* (fclem) This is to be on the safe side. I don't know if this is required. */
516  bool prev_depth_mask = GPU_depth_mask_get();
517  /* we want new depths every time */
518  GPU_depth_mask(true);
519  GPU_clear_depth(1.0f);
520 
521  GPU_depth_mask(prev_depth_mask);
522  }
523  }
524  }
525 
526  ps->gl.is_init = true;
527  ps->gl.prev_id = id;
528 
529  return true;
530 }
531 
533 {
534  GPUPickState *ps = &g_pick_state;
535 
536 #ifdef DEBUG_PRINT
537  printf("%s\n", __func__);
538 #endif
539 
540  if (ps->is_cached == false) {
541  if (ps->gl.is_init) {
542  /* force finishing last pass */
544  }
548  }
549 
551 
552  /* assign but never free directly since it may be in cache */
553  DepthBufCache *rect_depth_final;
554 
555  /* Store depth in cache */
556  if (ps->use_cache && !ps->is_cached) {
557  BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
558  ps->gl.rect_depth = NULL;
559  rect_depth_final = ps->cache.bufs.last;
560  }
561  else if (ps->is_cached) {
562  rect_depth_final = ps->cache.bufs.last;
563  }
564  else {
565  /* common case, no cache */
566  rect_depth_final = ps->gl.rect_depth;
567  }
568 
569  uint maxhits = g_pick_state.bufsize;
570  DepthID *depth_data;
571  uint depth_data_len = 0;
572 
574  depth_data = ps->all.hits;
575  depth_data_len = ps->all.hits_len;
576  /* move ownership */
577  ps->all.hits = NULL;
578  ps->all.hits_len = 0;
579  ps->all.hits_len_alloc = 0;
580  }
581  else {
582  /* GPU_SELECT_PICK_NEAREST */
583 
584  /* Over alloc (unlikely we have as many depths as pixels) */
585  uint depth_data_len_first_pass = 0;
586  depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
587 
588  /* Partially de-duplicating copy,
589  * when contiguous ID's are found - update their closest depth.
590  * This isn't essential but means there is less data to sort. */
591 
592 #define EVAL_TEST(i_src, i_dst) \
593  { \
594  const uint id = ps->nearest.rect_id[i_dst]; \
595  if (id != SELECT_ID_NONE) { \
596  const depth_t depth = rect_depth_final->buf[i_src]; \
597  if (depth_last == NULL || depth_last->id != id) { \
598  DepthID *d = &depth_data[depth_data_len_first_pass++]; \
599  d->id = id; \
600  d->depth = depth; \
601  } \
602  else if (depth_last->depth > depth) { \
603  depth_last->depth = depth; \
604  } \
605  } \
606  } \
607  ((void)0)
608 
609  {
610  DepthID *depth_last = NULL;
611  if (ps->is_cached == false) {
612  for (uint i = 0; i < ps->src.rect_len; i++) {
613  EVAL_TEST(i, i);
614  }
615  }
616  else {
617  /* same as above but different rect sizes */
618  uint i_src = ps->cache.sub_rect.start, i_dst = 0;
619  for (uint j = 0; j < ps->cache.sub_rect.span_len; j++) {
620  const uint i_src_end = i_src + ps->cache.sub_rect.span;
621  for (; i_src < i_src_end; i_src++, i_dst++) {
622  EVAL_TEST(i_src, i_dst);
623  }
624  i_src += ps->cache.sub_rect.skip;
625  }
626  }
627  }
628 
629 #undef EVAL_TEST
630 
631  qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
632 
633  /* Sort by ID's then keep the best depth for each ID */
634  depth_data_len = 0;
635  {
636  DepthID *depth_last = NULL;
637  for (uint i = 0; i < depth_data_len_first_pass; i++) {
638  if (depth_last == NULL || depth_last->id != depth_data[i].id) {
639  depth_last = &depth_data[depth_data_len++];
640  *depth_last = depth_data[i];
641  }
642  else if (depth_last->depth > depth_data[i].depth) {
643  depth_last->depth = depth_data[i].depth;
644  }
645  }
646  }
647  }
648 
649  /* Finally sort each unique (id, depth) pair by depth
650  * so the final hit-list is sorted by depth (nearest first) */
651  uint hits = 0;
652 
653  if (depth_data_len > maxhits) {
654  hits = (uint)-1;
655  }
656  else {
657  /* leave sorting up to the caller */
658  qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
659 
660  for (uint i = 0; i < depth_data_len; i++) {
661 #ifdef DEBUG_PRINT
662  printf(" hit: %u: depth %u\n", depth_data[i].id, depth_data[i].depth);
663 #endif
664  /* first 3 are dummy values */
665  g_pick_state.buffer[hits][0] = 1;
666  g_pick_state.buffer[hits][1] = 0x0; /* depth_data[i].depth; */ /* unused */
667  g_pick_state.buffer[hits][2] = 0x0; /* z-far is currently never used. */
668  g_pick_state.buffer[hits][3] = depth_data[i].id;
669  hits++;
670  }
671  BLI_assert(hits < maxhits);
672  }
673 
674  MEM_freeN(depth_data);
675 
678 
680  /* 'hits' already freed as 'depth_data' */
681  }
682  else {
683  MEM_freeN(ps->nearest.rect_id);
684  ps->nearest.rect_id = NULL;
685  }
686 
687  if (ps->use_cache) {
688  ps->is_cached = true;
689  }
690 
691  return hits;
692 }
693 
694 /* ----------------------------------------------------------------------------
695  * Caching
696  *
697  * Support multiple begin/end's reusing depth buffers.
698  */
699 
701 {
703 #ifdef DEBUG_PRINT
704  printf("%s\n", __func__);
705 #endif
706  g_pick_state.use_cache = true;
707  g_pick_state.is_cached = false;
708 }
709 
711 {
712 #ifdef DEBUG_PRINT
713  printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
714 #endif
715  g_pick_state.use_cache = false;
716  g_pick_state.is_cached = false;
717 
719 }
720 
721 /* is drawing needed? */
723 {
724  return g_pick_state.is_cached;
725 }
726 
728 {
730  GPUPickState *ps = &g_pick_state;
731 #ifdef DEBUG_PRINT
732  printf("%s (building depth from cache)\n", __func__);
733 #endif
734  LISTBASE_FOREACH (DepthBufCache *, rect_depth, &ps->cache.bufs) {
735  if (rect_depth->next != NULL) {
736  /* we know the buffers differ, but this sub-region may not.
737  * double check before adding an id-pass */
739  if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
740  gpu_select_load_id_pass_all(rect_depth->next);
741  }
742  }
743  else {
745  rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
746  gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
747  }
748  }
749  }
750  }
751 }
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:547
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition: BLI_rect.h:157
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition: BLI_rect.h:153
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
unsigned int uint
Definition: BLI_sys_types.h:83
#define UNPACK4(a)
#define SWAP(type, a, b)
#define UNLIKELY(x)
void GPU_debug_group_end(void)
Definition: gpu_debug.cc:48
void GPU_debug_group_begin(const char *name)
Definition: gpu_debug.cc:37
struct GPUFrameBuffer GPUFrameBuffer
GPUFrameBuffer * GPU_framebuffer_active_get(void)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
@ GPU_SELECT_PICK_ALL
Definition: GPU_select.h:41
void GPU_write_mask(eGPUWriteMask mask)
Definition: gpu_state.cc:100
eGPUWriteMask
Definition: GPU_state.h:25
void GPU_depth_mask(bool depth)
Definition: gpu_state.cc:117
void GPU_color_mask(bool r, bool g, bool b, bool a)
Definition: gpu_state.cc:105
eGPUWriteMask GPU_write_mask_get(void)
Definition: gpu_state.cc:243
void GPU_viewport(int x, int y, int width, int height)
Definition: gpu_state.cc:210
bool GPU_depth_mask_get(void)
Definition: gpu_state.cc:293
eGPUDepthTest
Definition: GPU_state.h:77
@ GPU_DEPTH_LESS_EQUAL
Definition: GPU_state.h:81
eGPUDepthTest GPU_depth_test_get(void)
Definition: gpu_state.cc:255
void GPU_depth_test(eGPUDepthTest test)
Definition: gpu_state.cc:75
void GPU_viewport_size_get_f(float coords[4])
Definition: gpu_state.cc:279
void GPU_scissor_get(int coords[4])
Definition: gpu_state.cc:274
@ GPU_DATA_UINT
Definition: GPU_texture.h:174
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
#define MEM_reallocN(vmemh, len)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
void GPU_clear_depth(float depth)
void GPU_framebuffer_read_depth(GPUFrameBuffer *gpu_fb, int x, int y, int w, int h, eGPUDataFormat format, void *data)
bool gpu_select_pick_load_id(uint id, bool end)
#define DEPTH_MAX
static bool depth_buf_subrect_depth_any(const DepthBufCache *rect_depth, const SubRectStride *sub_rect)
#define ALLOC_DEPTHS
struct GPUPickState GPUPickState
static int depth_cmp(const void *v1, const void *v2)
uint depth_t
struct SubRectStride SubRectStride
BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
bool gpu_select_pick_is_cached(void)
struct DepthID DepthID
static bool depth_buf_subrect_depth_any_filled(const DepthBufCache *rect_src, const DepthBufCache *rect_dst, const SubRectStride *sub_rect)
static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
#define EVAL_TEST()
void gpu_select_pick_begin(uint(*buffer)[4], uint bufsize, const rcti *input, char mode)
void gpu_select_pick_cache_end(void)
static int depth_id_cmp(const void *v1, const void *v2)
uint gpu_select_pick_end(void)
static DepthBufCache * depth_buf_malloc(uint rect_len)
static GPUPickState g_pick_state
struct DepthBufCache DepthBufCache
static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
static bool depth_buf_rect_depth_any_filled(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr, uint rect_len)
static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
void gpu_select_pick_cache_begin(void)
static bool depth_buf_rect_depth_any(const DepthBufCache *rect_depth, uint rect_len)
void gpu_select_pick_cache_load_id(void)
#define SELECT_ID_NONE
BLI_INLINE float fb(float length, float L)
__kernel void ccl_constant KernelData ccl_global void ccl_global char ccl_global int ccl_global char ccl_global unsigned int ccl_global float * buffer
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:47
depth_t buf[0]
struct DepthBufCache * next
struct DepthBufCache * prev
depth_t depth
struct GPUPickState::@630 src
DepthBufCache * rect_depth
eGPUWriteMask write_mask
eGPUDepthTest depth_test
SubRectStride sub_rect
int clip_readpixels[4]
DepthID * hits
struct GPUPickState::@630 dst
uint(* buffer)[4]
struct GPUPickState::@629 gl
DepthBufCache * rect_depth_test
struct GPUPickState::@632::@635 nearest
struct GPUPickState::@632::@634 all
struct GPUPickState::@631 cache
void * last
Definition: DNA_listBase.h:47
int ymin
Definition: DNA_vec_types.h:80
int ymax
Definition: DNA_vec_types.h:80
int xmin
Definition: DNA_vec_types.h:79
int xmax
Definition: DNA_vec_types.h:79