Blender  V2.93
gpu_buffers.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2005 Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include <limits.h>
27 #include <stddef.h>
28 #include <string.h>
29 
30 #include "MEM_guardedalloc.h"
31 
32 #include "BLI_bitmap.h"
33 #include "BLI_ghash.h"
34 #include "BLI_hash.h"
35 #include "BLI_math.h"
36 #include "BLI_math_color.h"
37 #include "BLI_math_color_blend.h"
38 #include "BLI_utildefines.h"
39 
40 #include "DNA_meshdata_types.h"
41 #include "DNA_userdef_types.h"
42 
43 #include "BKE_DerivedMesh.h"
44 #include "BKE_ccg.h"
45 #include "BKE_mesh.h"
46 #include "BKE_paint.h"
47 #include "BKE_pbvh.h"
48 #include "BKE_subdiv_ccg.h"
49 
50 #include "GPU_batch.h"
51 #include "GPU_buffers.h"
52 
53 #include "gpu_private.h"
54 
55 #include "bmesh.h"
56 
57 /* XXX: the rest of the code in this file is used for optimized PBVH
58  * drawing and doesn't interact at all with the buffer code above */
59 
64 
69 
70  /* mesh pointers in case buffer allocation fails */
71  const MPoly *mpoly;
72  const MLoop *mloop;
73  const MLoopTri *looptri;
74  const MVert *mvert;
75 
76  const int *face_indices;
78 
79  /* grid pointers */
84  const int *grid_indices;
85  int totgrid;
86 
87  bool use_bmesh;
89 
91 
93 
94  /* The PBVH ensures that either all faces in the node are
95  * smooth-shaded or all faces are flat-shaded */
96  bool smooth;
97 
99 };
100 
101 static struct {
104 } g_vbo_id = {{0}};
105 
108 /* -------------------------------------------------------------------- */
113 {
114  /* Initialize vertex buffer (match 'VertexBufferFormat'). */
115  if (g_vbo_id.format.attr_len == 0) {
117  &g_vbo_id.format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
120  /* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
122  &g_vbo_id.format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
126  &g_vbo_id.format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
127  }
128 }
129 
131 {
132  /* Nothing to do. */
133 }
134 
135 /* Allocates a non-initialized buffer to be sent to GPU.
136  * Return is false it indicates that the memory map failed. */
137 static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
138 {
139  /* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
140  * Not that format initialization match in both blocks.
141  * Do this to keep braces balanced - otherwise indentation breaks. */
142 #if 0
143  if (buffers->vert_buf == NULL) {
144  /* Initialize vertex buffer (match 'VertexBufferFormat'). */
146  GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
147  }
148  else if (vert_len != buffers->vert_buf->vertex_len) {
149  GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
150  }
151 #else
152  if (buffers->vert_buf == NULL) {
153  /* Initialize vertex buffer (match 'VertexBufferFormat'). */
155  }
156  if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
157  GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
158  /* Allocate buffer if not allocated yet or size changed. */
159  GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
160  }
161 #endif
162 
163  return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
164 }
165 
167 {
168  if (buffers->triangles == NULL) {
169  buffers->triangles = GPU_batch_create(prim,
170  buffers->vert_buf,
171  /* can be NULL if buffer is empty */
172  buffers->index_buf);
173  }
174 
175  if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
176  buffers->triangles_fast = GPU_batch_create(prim, buffers->vert_buf, buffers->index_buf_fast);
177  }
178 
179  if (buffers->lines == NULL) {
181  buffers->vert_buf,
182  /* can be NULL if buffer is empty */
183  buffers->index_lines_buf);
184  }
185 
186  if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
187  buffers->lines_fast = GPU_batch_create(
188  GPU_PRIM_LINES, buffers->vert_buf, buffers->index_lines_buf_fast);
189  }
190 }
191 
194 /* -------------------------------------------------------------------- */
198 static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt,
199  const MVert *mvert,
200  const MLoop *mloop,
201  const int *sculpt_face_sets)
202 {
203  return (!paint_is_face_hidden(lt, mvert, mloop) && sculpt_face_sets &&
204  sculpt_face_sets[lt->poly] > SCULPT_FACE_SET_NONE);
205 }
206 
207 /* Threaded - do not call any functions that use OpenGL calls! */
209  const MVert *mvert,
210  const float *vmask,
211  const MLoopCol *vcol,
212  const int *sculpt_face_sets,
213  const int face_sets_color_seed,
214  const int face_sets_color_default,
215  const MPropCol *vtcol,
216  const int update_flags)
217 {
218  const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
219  const bool show_face_sets = sculpt_face_sets &&
220  (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
221  const bool show_vcol = (vcol || (vtcol && U.experimental.use_sculpt_vertex_colors)) &&
222  (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
223  bool empty_mask = true;
224  bool default_face_set = true;
225 
226  {
227  const int totelem = buffers->tot_tri * 3;
228 
229  /* Build VBO */
230  if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) {
231  GPUVertBufRaw pos_step = {0};
232  GPUVertBufRaw nor_step = {0};
233  GPUVertBufRaw msk_step = {0};
234  GPUVertBufRaw fset_step = {0};
235  GPUVertBufRaw col_step = {0};
236 
237  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.pos, &pos_step);
238  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.nor, &nor_step);
239  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.msk, &msk_step);
240  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.fset, &fset_step);
241  if (show_vcol) {
242  GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.col, &col_step);
243  }
244 
245  /* calculate normal for each polygon only once */
246  uint mpoly_prev = UINT_MAX;
247  short no[3] = {0, 0, 0};
248 
249  for (uint i = 0; i < buffers->face_indices_len; i++) {
250  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
251  const uint vtri[3] = {
252  buffers->mloop[lt->tri[0]].v,
253  buffers->mloop[lt->tri[1]].v,
254  buffers->mloop[lt->tri[2]].v,
255  };
256 
257  if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
258  continue;
259  }
260 
261  /* Face normal and mask */
262  if (lt->poly != mpoly_prev && !buffers->smooth) {
263  const MPoly *mp = &buffers->mpoly[lt->poly];
264  float fno[3];
265  BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
266  normal_float_to_short_v3(no, fno);
267  mpoly_prev = lt->poly;
268  }
269 
270  uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
271  if (show_face_sets) {
272  const int fset = abs(sculpt_face_sets[lt->poly]);
273  /* Skip for the default color Face Set to render it white. */
274  if (fset != face_sets_color_default) {
275  BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
276  default_face_set = false;
277  }
278  }
279 
280  float fmask = 0.0f;
281  uchar cmask = 0;
282  if (show_mask && !buffers->smooth) {
283  fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
284  cmask = (uchar)(fmask * 255);
285  }
286 
287  for (uint j = 0; j < 3; j++) {
288  const MVert *v = &mvert[vtri[j]];
289  copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co);
290 
291  if (buffers->smooth) {
292  copy_v3_v3_short(no, v->no);
293  }
294  copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), no);
295 
296  if (show_mask && buffers->smooth) {
297  cmask = (uchar)(vmask[vtri[j]] * 255);
298  }
299 
300  *(uchar *)GPU_vertbuf_raw_step(&msk_step) = cmask;
301  empty_mask = empty_mask && (cmask == 0);
302  /* Vertex Colors. */
303  if (show_vcol) {
304  ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
305  if (vtcol && U.experimental.use_sculpt_vertex_colors) {
306  scol[0] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[0]);
307  scol[1] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[1]);
308  scol[2] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[2]);
309  scol[3] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[3]);
310  memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
311  }
312  else {
313  const uint loop_index = lt->tri[j];
314  const MLoopCol *mcol = &vcol[loop_index];
318  scol[3] = unit_float_to_ushort_clamp(mcol->a * (1.0f / 255.0f));
319  memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
320  }
321  }
322  /* Face Sets. */
323  memcpy(GPU_vertbuf_raw_step(&fset_step), face_set_color, sizeof(uchar[3]));
324  }
325  }
326  }
327 
329  }
330 
331  /* Get material index from the first face of this buffer. */
332  const MLoopTri *lt = &buffers->looptri[buffers->face_indices[0]];
333  const MPoly *mp = &buffers->mpoly[lt->poly];
334  buffers->material_index = mp->mat_nr;
335 
336  buffers->show_overlay = !empty_mask || !default_face_set;
337  buffers->mvert = mvert;
338 }
339 
340 /* Threaded - do not call any functions that use OpenGL calls! */
342  const MLoop *mloop,
343  const MLoopTri *looptri,
344  const MVert *mvert,
345  const int *face_indices,
346  const int *sculpt_face_sets,
347  const int face_indices_len,
348  const struct Mesh *mesh)
349 {
350  GPU_PBVH_Buffers *buffers;
351  int i, tottri;
352  int tot_real_edges = 0;
353 
354  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
355 
356  /* smooth or flat for all */
357  buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
358 
359  buffers->show_overlay = false;
360 
361  /* Count the number of visible triangles */
362  for (i = 0, tottri = 0; i < face_indices_len; i++) {
363  const MLoopTri *lt = &looptri[face_indices[i]];
364  if (gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
365  int r_edges[3];
367  for (int j = 0; j < 3; j++) {
368  if (r_edges[j] != -1) {
369  tot_real_edges++;
370  }
371  }
372  tottri++;
373  }
374  }
375 
376  if (tottri == 0) {
377  buffers->tot_tri = 0;
378 
379  buffers->mpoly = mpoly;
380  buffers->mloop = mloop;
381  buffers->looptri = looptri;
382  buffers->face_indices = face_indices;
383  buffers->face_indices_len = 0;
384 
385  return buffers;
386  }
387 
388  /* Fill the only the line buffer. */
389  GPUIndexBufBuilder elb_lines;
390  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
391  int vert_idx = 0;
392 
393  for (i = 0; i < face_indices_len; i++) {
394  const MLoopTri *lt = &looptri[face_indices[i]];
395 
396  /* Skip hidden faces */
397  if (!gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
398  continue;
399  }
400 
401  int r_edges[3];
403  if (r_edges[0] != -1) {
404  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
405  }
406  if (r_edges[1] != -1) {
407  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
408  }
409  if (r_edges[2] != -1) {
410  GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
411  }
412 
413  vert_idx++;
414  }
415  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
416 
417  buffers->tot_tri = tottri;
418 
419  buffers->mpoly = mpoly;
420  buffers->mloop = mloop;
421  buffers->looptri = looptri;
422 
423  buffers->face_indices = face_indices;
424  buffers->face_indices_len = face_indices_len;
425 
426  return buffers;
427 }
428 
431 /* -------------------------------------------------------------------- */
436  SubdivCCG *UNUSED(subdiv_ccg),
437  const int *UNUSED(face_sets),
438  const int *grid_indices,
439  uint visible_quad_len,
440  int totgrid,
441  int gridsize)
442 {
443  GPUIndexBufBuilder elb, elb_lines;
444  GPUIndexBufBuilder elb_fast, elb_lines_fast;
445 
446  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
447  GPU_indexbuf_init(&elb_fast, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
448  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
449  GPU_indexbuf_init(&elb_lines_fast, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
450 
451  if (buffers->smooth) {
452  uint offset = 0;
453  const uint grid_vert_len = gridsize * gridsize;
454  for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
455  uint v0, v1, v2, v3;
456  bool grid_visible = false;
457 
458  BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
459 
460  for (int j = 0; j < gridsize - 1; j++) {
461  for (int k = 0; k < gridsize - 1; k++) {
462  /* Skip hidden grid face */
463  if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
464  continue;
465  }
466  /* Indices in a Clockwise QUAD disposition. */
467  v0 = offset + j * gridsize + k;
468  v1 = v0 + 1;
469  v2 = v1 + gridsize;
470  v3 = v2 - 1;
471 
472  GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
473  GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
474 
475  GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
476  GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
477 
478  if (j + 2 == gridsize) {
479  GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
480  }
481  grid_visible = true;
482  }
483 
484  if (grid_visible) {
485  GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
486  }
487  }
488 
489  if (grid_visible) {
490  /* Grid corners */
491  v0 = offset;
492  v1 = offset + gridsize - 1;
493  v2 = offset + grid_vert_len - 1;
494  v3 = offset + grid_vert_len - gridsize;
495 
496  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
497  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
498 
499  GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
500  GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
501  GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
502  GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
503  }
504  }
505  }
506  else {
507  uint offset = 0;
508  const uint grid_vert_len = square_uint(gridsize - 1) * 4;
509  for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
510  bool grid_visible = false;
511  BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
512 
513  uint v0, v1, v2, v3;
514  for (int j = 0; j < gridsize - 1; j++) {
515  for (int k = 0; k < gridsize - 1; k++) {
516  /* Skip hidden grid face */
517  if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
518  continue;
519  }
520  /* VBO data are in a Clockwise QUAD disposition. */
521  v0 = offset + (j * (gridsize - 1) + k) * 4;
522  v1 = v0 + 1;
523  v2 = v0 + 2;
524  v3 = v0 + 3;
525 
526  GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
527  GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
528 
529  GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
530  GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
531 
532  if (j + 2 == gridsize) {
533  GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
534  }
535  grid_visible = true;
536  }
537 
538  if (grid_visible) {
539  GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
540  }
541  }
542 
543  if (grid_visible) {
544  /* Grid corners */
545  v0 = offset;
546  v1 = offset + (gridsize - 1) * 4 - 3;
547  v2 = offset + grid_vert_len - 2;
548  v3 = offset + grid_vert_len - (gridsize - 1) * 4 + 3;
549 
550  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
551  GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
552 
553  GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
554  GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
555  GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
556  GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
557  }
558  }
559  }
560 
561  buffers->index_buf = GPU_indexbuf_build(&elb);
562  buffers->index_buf_fast = GPU_indexbuf_build(&elb_fast);
563  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
564  buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines_fast);
565 }
566 
568  const struct DMFlagMat *grid_flag_mats,
569  const int *grid_indices)
570 {
571  const bool smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
572 
573  if (buffers->smooth != smooth) {
574  buffers->smooth = smooth;
577  GPU_BATCH_DISCARD_SAFE(buffers->lines);
579 
584  }
585 }
586 
587 /* Threaded - do not call any functions that use OpenGL calls! */
589  SubdivCCG *subdiv_ccg,
590  CCGElem **grids,
591  const struct DMFlagMat *grid_flag_mats,
592  int *grid_indices,
593  int totgrid,
594  const int *sculpt_face_sets,
595  const int face_sets_color_seed,
596  const int face_sets_color_default,
597  const struct CCGKey *key,
598  const int update_flags)
599 {
600  const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
601  const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
602  const bool show_face_sets = sculpt_face_sets &&
603  (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
604  bool empty_mask = true;
605  bool default_face_set = true;
606 
607  int i, j, k, x, y;
608 
609  /* Build VBO */
610  const int has_mask = key->has_mask;
611 
612  buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
613 
614  uint vert_per_grid = (buffers->smooth) ? key->grid_area : (square_i(key->grid_size - 1) * 4);
615  uint vert_count = totgrid * vert_per_grid;
616 
617  if (buffers->index_buf == NULL) {
618  uint visible_quad_len = BKE_pbvh_count_grid_quads(
619  (BLI_bitmap **)buffers->grid_hidden, grid_indices, totgrid, key->grid_size);
620 
621  /* totally hidden node, return here to avoid BufferData with zero below. */
622  if (visible_quad_len == 0) {
623  return;
624  }
625 
627  subdiv_ccg,
628  sculpt_face_sets,
629  grid_indices,
630  visible_quad_len,
631  totgrid,
632  key->grid_size);
633  }
634 
635  uint vbo_index_offset = 0;
636  /* Build VBO */
637  if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
638  GPUIndexBufBuilder elb_lines;
639 
640  if (buffers->index_lines_buf == NULL) {
641  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
642  }
643 
644  for (i = 0; i < totgrid; i++) {
645  const int grid_index = grid_indices[i];
646  CCGElem *grid = grids[grid_index];
647  int vbo_index = vbo_index_offset;
648 
649  uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
650 
651  if (show_face_sets && subdiv_ccg && sculpt_face_sets) {
652  const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, grid_index);
653 
654  const int fset = abs(sculpt_face_sets[face_index]);
655  /* Skip for the default color Face Set to render it white. */
656  if (fset != face_sets_color_default) {
657  BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
658  default_face_set = false;
659  }
660  }
661 
662  if (buffers->smooth) {
663  for (y = 0; y < key->grid_size; y++) {
664  for (x = 0; x < key->grid_size; x++) {
665  CCGElem *elem = CCG_grid_elem(key, grid, x, y);
667  buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
668 
669  short no_short[3];
670  normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
671  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
672 
673  if (has_mask && show_mask) {
674  float fmask = *CCG_elem_mask(key, elem);
675  uchar cmask = (uchar)(fmask * 255);
676  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &cmask);
677  empty_mask = empty_mask && (cmask == 0);
678  }
679 
680  if (show_vcol) {
681  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
682  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, &vcol);
683  }
684 
685  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index, &face_set_color);
686 
687  vbo_index += 1;
688  }
689  }
690  vbo_index_offset += key->grid_area;
691  }
692  else {
693  for (j = 0; j < key->grid_size - 1; j++) {
694  for (k = 0; k < key->grid_size - 1; k++) {
695  CCGElem *elems[4] = {
696  CCG_grid_elem(key, grid, k, j),
697  CCG_grid_elem(key, grid, k + 1, j),
698  CCG_grid_elem(key, grid, k + 1, j + 1),
699  CCG_grid_elem(key, grid, k, j + 1),
700  };
701  float *co[4] = {
702  CCG_elem_co(key, elems[0]),
703  CCG_elem_co(key, elems[1]),
704  CCG_elem_co(key, elems[2]),
705  CCG_elem_co(key, elems[3]),
706  };
707 
708  float fno[3];
709  short no_short[3];
710  /* Note: Clockwise indices ordering, that's why we invert order here. */
711  normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
712  normal_float_to_short_v3(no_short, fno);
713 
714  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 0, co[0]);
715  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 0, no_short);
716  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 1, co[1]);
717  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 1, no_short);
718  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 2, co[2]);
719  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 2, no_short);
720  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[3]);
721  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
722 
723  if (has_mask && show_mask) {
724  float fmask = (*CCG_elem_mask(key, elems[0]) + *CCG_elem_mask(key, elems[1]) +
725  *CCG_elem_mask(key, elems[2]) + *CCG_elem_mask(key, elems[3])) *
726  0.25f;
727  uchar cmask = (uchar)(fmask * 255);
728  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 0, &cmask);
729  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 1, &cmask);
730  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 2, &cmask);
731  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 3, &cmask);
732  empty_mask = empty_mask && (cmask == 0);
733  }
734 
735  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
736  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 0, &vcol);
737  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 1, &vcol);
738  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 2, &vcol);
739  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 3, &vcol);
740 
741  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 0, &face_set_color);
742  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 1, &face_set_color);
743  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 2, &face_set_color);
744  GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 3, &face_set_color);
745 
746  vbo_index += 4;
747  }
748  }
749  vbo_index_offset += square_i(key->grid_size - 1) * 4;
750  }
751  }
752 
754  }
755 
756  /* Get material index from the first face of this buffer. */
757  buffers->material_index = grid_flag_mats[grid_indices[0]].mat_nr;
758 
759  buffers->grids = grids;
760  buffers->grid_indices = grid_indices;
761  buffers->totgrid = totgrid;
762  buffers->grid_flag_mats = grid_flag_mats;
763  buffers->gridkey = *key;
764  buffers->show_overlay = !empty_mask || !default_face_set;
765 }
766 
767 /* Threaded - do not call any functions that use OpenGL calls! */
769 {
770  GPU_PBVH_Buffers *buffers;
771 
772  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
773  buffers->grid_hidden = grid_hidden;
774  buffers->totgrid = totgrid;
775 
776  buffers->show_overlay = false;
777 
778  return buffers;
779 }
780 
781 #undef FILL_QUAD_BUFFER
782 
785 /* -------------------------------------------------------------------- */
789 /* Output a BMVert into a VertexBufferFormat array at v_index. */
791  GPUVertBuf *vert_buf,
792  int v_index,
793  const float fno[3],
794  const float *fmask,
795  const int cd_vert_mask_offset,
796  const bool show_mask,
797  const bool show_vcol,
798  bool *empty_mask)
799 {
800  /* Vertex should always be visible if it's used by a visible face. */
802 
803  /* Set coord, normal, and mask */
804  GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
805 
806  short no_short[3];
807  normal_float_to_short_v3(no_short, fno ? fno : v->no);
808  GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
809 
810  if (show_mask) {
811  float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
812  uchar cmask = (uchar)(effective_mask * 255);
813  GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &cmask);
814  *empty_mask = *empty_mask && (cmask == 0);
815  }
816 
817  if (show_vcol) {
818  const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
819  GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, &vcol);
820  }
821 
822  /* Add default face sets color to avoid artifacts. */
823  const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
824  GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, &face_set);
825 }
826 
827 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
828 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
829 {
830  GSetIterator gs_iter;
831  int totvert = 0;
832 
833  GSET_ITER (gs_iter, bm_unique_verts) {
834  BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
836  totvert++;
837  }
838  }
839  GSET_ITER (gs_iter, bm_other_verts) {
840  BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
842  totvert++;
843  }
844  }
845 
846  return totvert;
847 }
848 
849 /* Return the total number of visible faces */
850 static int gpu_bmesh_face_visible_count(GSet *bm_faces)
851 {
852  GSetIterator gh_iter;
853  int totface = 0;
854 
855  GSET_ITER (gh_iter, bm_faces) {
856  BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
857 
859  totface++;
860  }
861  }
862 
863  return totface;
864 }
865 
867 {
868  if (buffers->smooth) {
869  /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
871  GPU_BATCH_DISCARD_SAFE(buffers->lines);
874  }
875  else {
876  GPU_BATCH_DISCARD_SAFE(buffers->lines);
878  }
879 }
880 
881 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
882  * shading, an element index buffer.
883  * Threaded - do not call any functions that use OpenGL calls! */
885  BMesh *bm,
886  GSet *bm_faces,
887  GSet *bm_unique_verts,
888  GSet *bm_other_verts,
889  const int update_flags)
890 {
891  const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
892  const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
893  int tottri, totvert;
894  bool empty_mask = true;
895  BMFace *f = NULL;
896 
897  /* Count visible triangles */
898  tottri = gpu_bmesh_face_visible_count(bm_faces);
899 
900  if (buffers->smooth) {
901  /* Count visible vertices */
902  totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
903  }
904  else {
905  totvert = tottri * 3;
906  }
907 
908  if (!tottri) {
909  if (BLI_gset_len(bm_faces) != 0) {
910  /* Node is just hidden. */
911  }
912  else {
913  buffers->clear_bmesh_on_flush = true;
914  }
915  buffers->tot_tri = 0;
916  return;
917  }
918 
919  /* TODO, make mask layer optional for bmesh buffer */
920  const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
921 
922  /* Fill vertex buffer */
923  if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
924  /* Memory map failed */
925  return;
926  }
927 
928  int v_index = 0;
929 
930  if (buffers->smooth) {
931  /* Fill the vertex and triangle buffer in one pass over faces. */
932  GPUIndexBufBuilder elb, elb_lines;
933  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
934  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
935 
936  GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
937 
938  GSetIterator gs_iter;
939  GSET_ITER (gs_iter, bm_faces) {
940  f = BLI_gsetIterator_getKey(&gs_iter);
941 
943  BMVert *v[3];
945 
946  uint idx[3];
947  for (int i = 0; i < 3; i++) {
948  void **idx_p;
949  if (!BLI_ghash_ensure_p(bm_vert_to_index, v[i], &idx_p)) {
950  /* Add vertex to the vertex buffer each time a new one is encountered */
951  *idx_p = POINTER_FROM_UINT(v_index);
952 
954  buffers->vert_buf,
955  v_index,
956  NULL,
957  NULL,
958  cd_vert_mask_offset,
959  show_mask,
960  show_vcol,
961  &empty_mask);
962 
963  idx[i] = v_index;
964  v_index++;
965  }
966  else {
967  /* Vertex already in the vertex buffer, just get the index. */
968  idx[i] = POINTER_AS_UINT(*idx_p);
969  }
970  }
971 
972  GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
973 
974  GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
975  GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
976  GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
977  }
978  }
979 
980  BLI_ghash_free(bm_vert_to_index, NULL, NULL);
981 
982  buffers->tot_tri = tottri;
983  if (buffers->index_buf == NULL) {
984  buffers->index_buf = GPU_indexbuf_build(&elb);
985  }
986  else {
987  GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
988  }
989  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
990  }
991  else {
992  GSetIterator gs_iter;
993 
994  GPUIndexBufBuilder elb_lines;
995  GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
996 
997  GSET_ITER (gs_iter, bm_faces) {
998  f = BLI_gsetIterator_getKey(&gs_iter);
999 
1000  BLI_assert(f->len == 3);
1001 
1002  if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
1003  BMVert *v[3];
1004  float fmask = 0.0f;
1005  int i;
1006 
1008 
1009  /* Average mask value */
1010  for (i = 0; i < 3; i++) {
1011  fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
1012  }
1013  fmask /= 3.0f;
1014 
1015  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
1016  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
1017  GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
1018 
1019  for (i = 0; i < 3; i++) {
1021  buffers->vert_buf,
1022  v_index++,
1023  f->no,
1024  &fmask,
1025  cd_vert_mask_offset,
1026  show_mask,
1027  show_vcol,
1028  &empty_mask);
1029  }
1030  }
1031  }
1032 
1033  buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
1034  buffers->tot_tri = tottri;
1035  }
1036 
1037  /* Get material index from the last face we iterated on. */
1038  buffers->material_index = (f) ? f->mat_nr : 0;
1039 
1040  buffers->show_overlay = !empty_mask;
1041 
1043 }
1044 
1047 /* -------------------------------------------------------------------- */
1051 /* Threaded - do not call any functions that use OpenGL calls! */
1053 {
1054  GPU_PBVH_Buffers *buffers;
1055 
1056  buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
1057  buffers->use_bmesh = true;
1058  buffers->smooth = smooth_shading;
1059  buffers->show_overlay = true;
1060 
1061  return buffers;
1062 }
1063 
1064 GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
1065 {
1066  if (wires) {
1067  return (fast && buffers->lines_fast) ? buffers->lines_fast : buffers->lines;
1068  }
1069 
1070  return (fast && buffers->triangles_fast) ? buffers->triangles_fast : buffers->triangles;
1071 }
1072 
1074 {
1075  return buffers->show_overlay;
1076 }
1077 
1079 {
1080  return buffers->material_index;
1081 }
1082 
1084 {
1085  GPU_BATCH_DISCARD_SAFE(buffers->lines);
1094 }
1095 
1097 {
1098  /* Free empty bmesh node buffers. */
1099  if (buffers->clear_bmesh_on_flush) {
1100  gpu_pbvh_buffers_clear(buffers);
1101  buffers->clear_bmesh_on_flush = false;
1102  }
1103 
1104  /* Force flushing to the GPU. */
1105  if (buffers->vert_buf && GPU_vertbuf_get_data(buffers->vert_buf)) {
1106  GPU_vertbuf_use(buffers->vert_buf);
1107  }
1108 }
1109 
1111 {
1112  if (buffers) {
1113  gpu_pbvh_buffers_clear(buffers);
1114  MEM_freeN(buffers);
1115  }
1116 }
1117 
BLI_INLINE CCGElem * CCG_grid_elem(const CCGKey *key, CCGElem *elem, int x, int y)
Definition: BKE_ccg.h:124
BLI_INLINE float * CCG_elem_mask(const CCGKey *key, CCGElem *elem)
Definition: BKE_ccg.h:113
BLI_INLINE float * CCG_elem_no(const CCGKey *key, CCGElem *elem)
Definition: BKE_ccg.h:107
struct CCGElem CCGElem
Definition: BKE_ccg.h:46
BLI_INLINE float * CCG_elem_co(const CCGKey *key, CCGElem *elem)
int CustomData_get_offset(const struct CustomData *data, int type)
void BKE_mesh_looptri_get_real_edges(const struct Mesh *mesh, const struct MLoopTri *looptri, int r_edges[3])
void BKE_mesh_calc_poly_normal(const struct MPoly *mpoly, const struct MLoop *loopstart, const struct MVert *mvarray, float r_no[3])
bool paint_is_face_hidden(const struct MLoopTri *lt, const struct MVert *mvert, const struct MLoop *mloop)
#define SCULPT_FACE_SET_NONE
Definition: BKE_paint.h:230
bool paint_is_grid_face_hidden(const unsigned int *grid_hidden, int gridsize, int x, int y)
Definition: paint.c:1247
void BKE_paint_face_set_overlay_color_get(const int face_set, const int seed, uchar r_color[4])
Definition: paint.c:2249
A BVH for high poly meshes.
int BKE_pbvh_count_grid_quads(BLI_bitmap **grid_hidden, const int *grid_indices, int totgrid, int gridsize)
Definition: pbvh.c:355
int BKE_subdiv_ccg_grid_to_face_index(const SubdivCCG *subdiv_ccg, const int grid_index)
Definition: subdiv_ccg.c:1832
#define BLI_assert(a)
Definition: BLI_assert.h:58
unsigned int BLI_bitmap
Definition: BLI_bitmap.h:32
struct GSet GSet
Definition: BLI_ghash.h:189
unsigned int BLI_gset_len(GSet *gs) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:1138
GHash * BLI_ghash_int_new_ex(const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
#define GSET_ITER(gs_iter_, gset_)
Definition: BLI_ghash.h:268
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition: BLI_ghash.c:1008
BLI_INLINE void * BLI_gsetIterator_getKey(GSetIterator *gsi)
Definition: BLI_ghash.h:255
bool BLI_ghash_ensure_p(GHash *gh, void *key, void ***r_val) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:851
MINLINE int square_i(int a)
MINLINE unsigned int square_uint(unsigned int a)
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition: math_geom.c:68
MINLINE void normal_float_to_short_v3(short r[3], const float n[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_v3_short(short r[3], const short a[3])
unsigned char uchar
Definition: BLI_sys_types.h:86
unsigned int uint
Definition: BLI_sys_types.h:83
unsigned short ushort
Definition: BLI_sys_types.h:84
#define POINTER_AS_UINT(i)
#define UNUSED(x)
#define POINTER_FROM_UINT(i)
@ CD_PAINT_MASK
@ ME_SMOOTH
GPUBatch
Definition: GPU_batch.h:93
#define GPU_batch_create(prim, verts, elem)
Definition: GPU_batch.h:107
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:199
@ GPU_PBVH_BUFFERS_SHOW_MASK
Definition: GPU_buffers.h:73
@ GPU_PBVH_BUFFERS_SHOW_VCOL
Definition: GPU_buffers.h:74
@ GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS
Definition: GPU_buffers.h:75
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, GPUIndexBuf *)
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
GPUPrimType
Definition: GPU_primitive.h:34
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_use(GPUVertBuf *)
GPUVertBuf * GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
@ GPU_USAGE_STATIC
@ GPU_USAGE_DYNAMIC
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_U16
@ GPU_COMP_F32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define BM_ELEM_CD_GET_FLOAT(ele, offset)
Definition: bmesh_class.h:542
@ BM_ELEM_HIDDEN
Definition: bmesh_class.h:472
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:26
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_face_as_array_vert_tri(BMFace *f, BMVert *r_verts[3])
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned int U
Definition: btGjkEpa3.h:78
short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1078
void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers, SubdivCCG *subdiv_ccg, CCGElem **grids, const struct DMFlagMat *grid_flag_mats, int *grid_indices, int totgrid, const int *sculpt_face_sets, const int face_sets_color_seed, const int face_sets_color_default, const struct CCGKey *key, const int update_flags)
Definition: gpu_buffers.c:588
GPUVertFormat format
Definition: gpu_buffers.c:102
uint pos
Definition: gpu_buffers.c:103
static struct @618 g_vbo_id
static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
Definition: gpu_buffers.c:137
static void gpu_bmesh_vert_to_buffer_copy(BMVert *v, GPUVertBuf *vert_buf, int v_index, const float fno[3], const float *fmask, const int cd_vert_mask_offset, const bool show_mask, const bool show_vcol, bool *empty_mask)
Definition: gpu_buffers.c:790
void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers, const MVert *mvert, const float *vmask, const MLoopCol *vcol, const int *sculpt_face_sets, const int face_sets_color_seed, const int face_sets_color_default, const MPropCol *vtcol, const int update_flags)
Definition: gpu_buffers.c:208
uint msk
Definition: gpu_buffers.c:103
static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
Definition: gpu_buffers.c:166
static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
Definition: gpu_buffers.c:828
void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1110
static void gpu_pbvh_grid_fill_index_buffers(GPU_PBVH_Buffers *buffers, SubdivCCG *UNUSED(subdiv_ccg), const int *UNUSED(face_sets), const int *grid_indices, uint visible_quad_len, int totgrid, int gridsize)
Definition: gpu_buffers.c:435
void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:866
uint fset
Definition: gpu_buffers.c:103
static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt, const MVert *mvert, const MLoop *mloop, const int *sculpt_face_sets)
Definition: gpu_buffers.c:198
bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1073
void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1096
static int gpu_bmesh_face_visible_count(GSet *bm_faces)
Definition: gpu_buffers.c:850
uint nor
Definition: gpu_buffers.c:103
static void gpu_pbvh_buffers_clear(GPU_PBVH_Buffers *buffers)
Definition: gpu_buffers.c:1083
GPUBatch * GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
Definition: gpu_buffers.c:1064
uint col
Definition: gpu_buffers.c:103
GPU_PBVH_Buffers * GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
Definition: gpu_buffers.c:1052
void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers, BMesh *bm, GSet *bm_faces, GSet *bm_unique_verts, GSet *bm_other_verts, const int update_flags)
Definition: gpu_buffers.c:884
GPU_PBVH_Buffers * GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hidden)
Definition: gpu_buffers.c:768
void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers, const struct DMFlagMat *grid_flag_mats, const int *grid_indices)
Definition: gpu_buffers.c:567
GPU_PBVH_Buffers * GPU_pbvh_mesh_buffers_build(const MPoly *mpoly, const MLoop *mloop, const MLoopTri *looptri, const MVert *mvert, const int *face_indices, const int *sculpt_face_sets, const int face_indices_len, const struct Mesh *mesh)
Definition: gpu_buffers.c:341
void gpu_pbvh_exit()
Definition: gpu_buffers.c:130
void gpu_pbvh_init()
Definition: gpu_buffers.c:112
#define UINT_MAX
Definition: hash_md5.c:58
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
MINLINE unsigned short unit_float_to_ushort_clamp(float val)
float BLI_color_from_srgb_table[256]
Definition: math_color.c:556
short mat_nr
Definition: bmesh_class.h:281
int len
Definition: bmesh_class.h:279
float no[3]
Definition: bmesh_class.h:280
float co[3]
Definition: bmesh_class.h:99
float no[3]
Definition: bmesh_class.h:100
CustomData vdata
Definition: bmesh_class.h:337
Definition: BKE_ccg.h:48
int has_mask
Definition: BKE_ccg.h:71
int grid_size
Definition: BKE_ccg.h:56
int grid_area
Definition: BKE_ccg.h:58
const int * grid_indices
Definition: gpu_buffers.c:84
const int * face_indices
Definition: gpu_buffers.c:76
const MVert * mvert
Definition: gpu_buffers.c:74
const MPoly * mpoly
Definition: gpu_buffers.c:71
GPUIndexBuf * index_lines_buf_fast
Definition: gpu_buffers.c:62
GPUBatch * lines_fast
Definition: gpu_buffers.c:66
CCGElem ** grids
Definition: gpu_buffers.c:81
GPUBatch * triangles_fast
Definition: gpu_buffers.c:68
GPUIndexBuf * index_lines_buf
Definition: gpu_buffers.c:62
GPUVertBuf * vert_buf
Definition: gpu_buffers.c:63
const MLoopTri * looptri
Definition: gpu_buffers.c:73
const MLoop * mloop
Definition: gpu_buffers.c:72
bool clear_bmesh_on_flush
Definition: gpu_buffers.c:88
GPUBatch * triangles
Definition: gpu_buffers.c:67
GPUBatch * lines
Definition: gpu_buffers.c:65
GPUIndexBuf * index_buf_fast
Definition: gpu_buffers.c:61
const DMFlagMat * grid_flag_mats
Definition: gpu_buffers.c:82
short material_index
Definition: gpu_buffers.c:92
GPUIndexBuf * index_buf
Definition: gpu_buffers.c:61
BLI_bitmap *const * grid_hidden
Definition: gpu_buffers.c:83
unsigned char a
unsigned char b
unsigned char r
unsigned char g
unsigned int poly
unsigned int tri[3]
unsigned int v
short mat_nr
__forceinline const avxi abs(const avxi &a)
Definition: util_avxi.h:186