Blender  V2.93
draw_cache_impl_curve.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include "MEM_guardedalloc.h"
27 
28 #include "BLI_listbase.h"
29 #include "BLI_math_vector.h"
30 #include "BLI_utildefines.h"
31 
32 #include "DNA_curve_types.h"
33 
34 #include "BKE_curve.h"
35 #include "BKE_displist.h"
36 #include "BKE_font.h"
37 
38 #include "GPU_batch.h"
39 #include "GPU_capabilities.h"
40 #include "GPU_material.h"
41 #include "GPU_texture.h"
42 
43 #include "UI_resources.h"
44 
45 #include "DRW_render.h"
46 
47 #include "draw_cache_inline.h"
48 
49 #include "draw_cache_impl.h" /* own include */
50 
51 /* See: edit_curve_point_vert.glsl for duplicate includes. */
52 #define SELECT 1
53 #define ACTIVE_NURB (1 << 2)
54 #define BEZIER_HANDLE (1 << 3)
55 #define EVEN_U_BIT (1 << 4) /* Alternate this bit for every U vert. */
56 #define COLOR_SHIFT 5
57 
58 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
59 enum {
61 
63 };
64 
72 static void curve_batch_cache_clear(Curve *cu);
73 
74 /* ---------------------------------------------------------------------- */
75 /* Curve Interface, direct access to basic data. */
76 
78  int *r_vert_len,
79  int *r_edge_len)
80 {
81  BLI_assert(r_vert_len || r_edge_len);
82  int vert_len = 0;
83  int edge_len = 0;
84  LISTBASE_FOREACH (Nurb *, nu, lb) {
85  if (nu->bezt) {
86  vert_len += nu->pntsu * 3;
87  /* 2x handles per point*/
88  edge_len += 2 * nu->pntsu;
89  }
90  else if (nu->bp) {
91  vert_len += nu->pntsu * nu->pntsv;
92  /* segments between points */
93  edge_len += (nu->pntsu - 1) * nu->pntsv;
94  edge_len += (nu->pntsv - 1) * nu->pntsu;
95  }
96  }
97  if (r_vert_len) {
98  *r_vert_len = vert_len;
99  }
100  if (r_edge_len) {
101  *r_edge_len = edge_len;
102  }
103 }
104 
105 static void curve_render_wire_verts_edges_len_get(const CurveCache *ob_curve_cache,
106  int *r_curve_len,
107  int *r_vert_len,
108  int *r_edge_len)
109 {
110  BLI_assert(r_vert_len || r_edge_len);
111  int vert_len = 0;
112  int edge_len = 0;
113  int curve_len = 0;
114  LISTBASE_FOREACH (const BevList *, bl, &ob_curve_cache->bev) {
115  if (bl->nr > 0) {
116  const bool is_cyclic = bl->poly != -1;
117  edge_len += (is_cyclic) ? bl->nr : bl->nr - 1;
118  vert_len += bl->nr;
119  curve_len += 1;
120  }
121  }
122  LISTBASE_FOREACH (const DispList *, dl, &ob_curve_cache->disp) {
123  if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
124  BLI_assert(dl->parts == 1);
125  const bool is_cyclic = dl->type == DL_POLY;
126  edge_len += (is_cyclic) ? dl->nr : dl->nr - 1;
127  vert_len += dl->nr;
128  curve_len += 1;
129  }
130  }
131  if (r_vert_len) {
132  *r_vert_len = vert_len;
133  }
134  if (r_edge_len) {
135  *r_edge_len = edge_len;
136  }
137  if (r_curve_len) {
138  *r_curve_len = curve_len;
139  }
140 }
141 
142 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
143 {
144  int normal_len = 0;
145  const BevList *bl;
146  const Nurb *nu;
147  for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
148  int nr = bl->nr;
149  int skip = nu->resolu / 16;
150 #if 0
151  while (nr-- > 0) { /* accounts for empty bevel lists */
152  normal_len += 1;
153  nr -= skip;
154  }
155 #else
156  /* Same as loop above */
157  normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
158 #endif
159  }
160  return normal_len;
161 }
162 
163 /* ---------------------------------------------------------------------- */
164 /* Curve Interface, indirect, partially cached access to complex data. */
165 
166 typedef struct CurveRenderData {
167  int types;
168 
169  struct {
170  int vert_len;
171  int edge_len;
173 
174  struct {
176  int vert_len;
177  int edge_len;
178  } wire;
179 
180  /* edit mode normal's */
181  struct {
182  /* 'edge_len == len * 2'
183  * 'vert_len == len * 3' */
184  int len;
186 
187  struct {
189  } text;
190 
191  /* borrow from 'Object' */
193 
194  /* borrow from 'Curve' */
196 
197  /* edit, index in nurb list */
198  int actnu;
199  /* edit, index in active nurb (BPoint or BezTriple) */
200  int actvert;
202 
203 enum {
204  /* Wire center-line */
206  /* Edit-mode verts and optionally handles */
208  /* Edit-mode normals */
210  /* Geometry */
212  /* Text */
214 };
215 
216 /*
217  * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
218  */
220  CurveCache *ob_curve_cache,
221  const int types)
222 {
223  CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
224  rdata->types = types;
225  ListBase *nurbs;
226 
227  rdata->actnu = cu->actnu;
228  rdata->actvert = cu->actvert;
229 
230  rdata->ob_curve_cache = ob_curve_cache;
231 
232  if (types & CU_DATATYPE_WIRE) {
234  &rdata->wire.curve_len,
235  &rdata->wire.vert_len,
236  &rdata->wire.edge_len);
237  }
238 
239  if (cu->editnurb) {
240  EditNurb *editnurb = cu->editnurb;
241  nurbs = &editnurb->nurbs;
242 
243  if (types & CU_DATATYPE_OVERLAY) {
245  nurbs, &rdata->overlay.vert_len, &rdata->overlay.edge_len);
246 
247  rdata->actnu = cu->actnu;
248  rdata->actvert = cu->actvert;
249  }
250  if (types & CU_DATATYPE_NORMAL) {
251  rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
252  }
253  }
254  else {
255  nurbs = &cu->nurb;
256  }
257 
258  rdata->nurbs = nurbs;
259 
260  rdata->text.edit_font = cu->editfont;
261 
262  return rdata;
263 }
264 
266 {
267 #if 0
268  if (rdata->loose_verts) {
269  MEM_freeN(rdata->loose_verts);
270  }
271 #endif
272  MEM_freeN(rdata);
273 }
274 
276 {
278  return rdata->overlay.vert_len;
279 }
280 
282 {
284  return rdata->overlay.edge_len;
285 }
286 
288 {
290  return rdata->wire.vert_len;
291 }
292 
294 {
296  return rdata->wire.edge_len;
297 }
298 
300 {
302  return rdata->wire.curve_len;
303 }
304 
306 {
308  return rdata->normal.len;
309 }
310 
312  struct GPUMaterial **gpumat_array,
313  int gpumat_array_len)
314 {
315  for (int i = 0; i < gpumat_array_len; i++) {
316  struct GPUMaterial *gpumat = gpumat_array[i];
317  if (gpumat == NULL) {
318  continue;
319  }
320 
321  ListBase gpu_attrs = GPU_material_attributes(gpumat);
322  LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
323  const char *name = gpu_attr->name;
324  int type = gpu_attr->type;
325 
326  /* Curves cannot have named layers.
327  * Note: We could relax this assumption later. */
328  if (name[0] != '\0') {
329  continue;
330  }
331 
332  if (type == CD_AUTO_FROM_NAME) {
333  type = CD_MTFACE;
334  }
335 
336  switch (type) {
337  case CD_MTFACE:
338  *cd_layers |= CD_MASK_MLOOPUV;
339  break;
340  case CD_TANGENT:
341  *cd_layers |= CD_MASK_TANGENT;
342  break;
343  case CD_MCOL:
344  /* Curve object don't have Color data. */
345  break;
346  case CD_ORCO:
347  *cd_layers |= CD_MASK_ORCO;
348  break;
349  }
350  }
351  }
352 }
353 
354 /* ---------------------------------------------------------------------- */
355 /* Curve GPUBatch Cache */
356 
357 typedef struct CurveBatchCache {
358  struct {
362 
367 
368  struct {
369  /* Curve points. Aligned with ordered.pos_nor */
371  GPUVertBuf *curves_weight; /* TODO. */
372  /* Edit points (beztriples and bpoints) */
375  } edit;
376 
377  struct {
382  /* Edit mode */
385  } ibo;
386 
387  struct {
391  /* control handles and vertices */
396  } batch;
397 
400  int mat_len;
402 
403  /* settings to determine if cache is invalid */
404  bool is_dirty;
406 
407  /* Valid only if edge_detection is up to date. */
410 
411 /* GPUBatch cache management. */
412 
414 {
415  CurveBatchCache *cache = cu->batch_cache;
416 
417  if (cache == NULL) {
418  return false;
419  }
420 
421  if (cache->mat_len != DRW_curve_material_count_get(cu)) {
422  return false;
423  }
424 
425  if (cache->is_dirty) {
426  return false;
427  }
428 
429  if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
430  return false;
431  }
432 
433  if (cache->is_editmode) {
434  if (cu->editfont) {
435  /* TODO */
436  }
437  }
438 
439  return true;
440 }
441 
443 {
444  CurveBatchCache *cache = cu->batch_cache;
445 
446  if (!cache) {
447  cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
448  }
449  else {
450  memset(cache, 0, sizeof(*cache));
451  }
452 
453 #if 0
454  ListBase *nurbs;
455  if (cu->editnurb) {
456  EditNurb *editnurb = cu->editnurb;
457  nurbs = &editnurb->nurbs;
458  }
459  else {
460  nurbs = &cu->nurb;
461  }
462 #endif
463 
464  cache->cd_used = 0;
466  cache->surf_per_mat_tris = MEM_callocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len,
467  __func__);
468  cache->surf_per_mat = MEM_callocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
469 
470  cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
471 
472  cache->is_dirty = false;
473 }
474 
476 {
477  if (!curve_batch_cache_valid(cu)) {
480  }
481 }
482 
484 {
485  return cu->batch_cache;
486 }
487 
489 {
490  CurveBatchCache *cache = cu->batch_cache;
491  if (cache == NULL) {
492  return;
493  }
494  switch (mode) {
496  cache->is_dirty = true;
497  break;
500 
503  break;
504  default:
505  BLI_assert(0);
506  }
507 }
508 
510 {
511  CurveBatchCache *cache = cu->batch_cache;
512  if (!cache) {
513  return;
514  }
515 
516  for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); i++) {
517  GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
518  GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
519  }
520  for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); i++) {
521  GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
522  GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
523  }
524  for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); i++) {
525  GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
527  }
528  for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
529  GPUBatch **batch = (GPUBatch **)&cache->batch;
531  }
532 
533  for (int i = 0; i < cache->mat_len; i++) {
536  }
538  MEM_SAFE_FREE(cache->surf_per_mat);
539  cache->mat_len = 0;
540  cache->cd_used = 0;
541 }
542 
544 {
547 }
548 
549 /* -------------------------------------------------------------------- */
553 /* GPUBatch cache usage. */
554 static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
555 {
556  BLI_assert(rdata->ob_curve_cache != NULL);
557 
558  static GPUVertFormat format = {0};
559  static struct {
560  uint pos;
561  } attr_id;
562  if (format.attr_len == 0) {
564  }
565 
566  const int vert_len = curve_render_data_wire_verts_len_get(rdata);
567  GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
568  GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
569 
570  int v_idx = 0;
571  LISTBASE_FOREACH (const BevList *, bl, &rdata->ob_curve_cache->bev) {
572  if (bl->nr <= 0) {
573  continue;
574  }
575  const int i_end = v_idx + bl->nr;
576  for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
577  GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
578  }
579  }
580  LISTBASE_FOREACH (const DispList *, dl, &rdata->ob_curve_cache->disp) {
581  if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
582  for (int i = 0; i < dl->nr; v_idx++, i++) {
583  GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, &((float(*)[3])dl->verts)[i]);
584  }
585  }
586  }
587  BLI_assert(v_idx == vert_len);
588 }
589 
590 static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
591 {
592  BLI_assert(rdata->ob_curve_cache != NULL);
593 
594  const int vert_len = curve_render_data_wire_verts_len_get(rdata);
595  const int edge_len = curve_render_data_wire_edges_len_get(rdata);
596  const int curve_len = curve_render_data_wire_curve_len_get(rdata);
597  /* Count the last vertex or each strip and the primitive restart. */
598  const int index_len = edge_len + curve_len * 2;
599 
600  GPUIndexBufBuilder elb;
601  GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len);
602 
603  int v_idx = 0;
604  LISTBASE_FOREACH (const BevList *, bl, &rdata->ob_curve_cache->bev) {
605  if (bl->nr <= 0) {
606  continue;
607  }
608  const bool is_cyclic = bl->poly != -1;
609  if (is_cyclic) {
610  GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
611  }
612  for (int i = 0; i < bl->nr; i++) {
613  GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
614  }
616  v_idx += bl->nr;
617  }
618  LISTBASE_FOREACH (const DispList *, dl, &rdata->ob_curve_cache->disp) {
619  if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
620  const bool is_cyclic = dl->type == DL_POLY;
621  if (is_cyclic) {
622  GPU_indexbuf_add_generic_vert(&elb, v_idx + (dl->nr - 1));
623  }
624  for (int i = 0; i < dl->nr; i++) {
625  GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
626  }
628  v_idx += dl->nr;
629  }
630  }
631  GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
632 }
633 
635  GPUVertBuf *vbo_curves_nor,
636  const Scene *scene)
637 {
638  const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
640 
641  static GPUVertFormat format = {0};
642  static GPUVertFormat format_hq = {0};
643  static struct {
644  uint pos, nor, tan, rad;
645  uint pos_hq, nor_hq, tan_hq, rad_hq;
646  } attr_id;
647  if (format.attr_len == 0) {
648  /* initialize vertex formats */
655 
656  attr_id.pos_hq = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
657  attr_id.rad_hq = GPU_vertformat_attr_add(&format_hq, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
659  &format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
661  &format_hq, "tan", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
662  }
663 
664  const GPUVertFormat *format_ptr = do_hq_normals ? &format_hq : &format;
665 
666  int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
667  int vbo_len_used = 0;
668 
669  GPU_vertbuf_init_with_format(vbo_curves_nor, format_ptr);
670  GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
671 
672  const BevList *bl;
673  const Nurb *nu;
674 
675  const uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
676  const uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
677  const uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
678  const uint rad_id = do_hq_normals ? attr_id.rad_hq : attr_id.rad;
679 
680  for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first; nu && bl;
681  bl = bl->next, nu = nu->next) {
682  const BevPoint *bevp = bl->bevpoints;
683  int nr = bl->nr;
684  int skip = nu->resolu / 16;
685 
686  while (nr-- > 0) { /* accounts for empty bevel lists */
687  float nor[3] = {1.0f, 0.0f, 0.0f};
688  mul_qt_v3(bevp->quat, nor);
689 
690  GPUNormal pnor;
691  GPUNormal ptan;
692  GPU_normal_convert_v3(&pnor, nor, do_hq_normals);
693  GPU_normal_convert_v3(&ptan, bevp->dir, do_hq_normals);
694  /* Only set attributes for one vertex. */
695  GPU_vertbuf_attr_set(vbo_curves_nor, pos_id, vbo_len_used, bevp->vec);
696  GPU_vertbuf_attr_set(vbo_curves_nor, rad_id, vbo_len_used, &bevp->radius);
697  GPU_vertbuf_attr_set(vbo_curves_nor, nor_id, vbo_len_used, &pnor);
698  GPU_vertbuf_attr_set(vbo_curves_nor, tan_id, vbo_len_used, &ptan);
699  vbo_len_used++;
700 
701  /* Skip the other vertex (it does not need to be offsetted). */
702  GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
703  vbo_len_used++;
704 
705  bevp += skip + 1;
706  nr -= skip;
707  }
708  }
709  BLI_assert(vbo_len_used == verts_len_capacity);
710 }
711 
713  uint8_t flag,
714  uint8_t col_id,
715  int v_idx,
716  int nu_id,
717  bool handle_point,
718  const bool handle_selected)
719 {
720  uint8_t vflag = 0;
722  SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert && nu_id == rdata->actnu), VFLAG_VERT_ACTIVE);
723  SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
724  SET_FLAG_FROM_TEST(vflag, handle_point, BEZIER_HANDLE);
725  SET_FLAG_FROM_TEST(vflag, handle_selected, VFLAG_VERT_SELECTED_BEZT_HANDLE);
726  /* Setting flags that overlap with will cause the color id not to work properly. */
727  BLI_assert((vflag >> COLOR_SHIFT) == 0);
728  /* handle color id */
729  vflag |= col_id << COLOR_SHIFT;
730  return vflag;
731 }
732 
733 static uint8_t bpoint_vflag_get(CurveRenderData *rdata, uint8_t flag, int v_idx, int nu_id, int u)
734 {
735  uint8_t vflag = 0;
737  SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert && nu_id == rdata->actnu), VFLAG_VERT_ACTIVE);
738  SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
739  SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
740  /* Setting flags that overlap with will cause the color id not to work properly. */
741  BLI_assert((vflag >> COLOR_SHIFT) == 0);
742  vflag |= COLOR_NURB_ULINE_ID << COLOR_SHIFT;
743  return vflag;
744 }
745 
747  GPUVertBuf *vbo_pos,
748  GPUVertBuf *vbo_data,
749  GPUIndexBuf *ibo_edit_verts_points,
750  GPUIndexBuf *ibo_edit_lines)
751 {
752  static GPUVertFormat format_pos = {0};
753  static GPUVertFormat format_data = {0};
754  static struct {
755  uint pos, data;
756  } attr_id;
757  if (format_pos.attr_len == 0) {
758  /* initialize vertex formats */
759  attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
760  attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
761  }
762 
763  int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
764  int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
765  int vbo_len_used = 0;
766 
767  if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
768  GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
769  GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
770  }
771  if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
772  GPU_vertbuf_init_with_format(vbo_data, &format_data);
773  GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
774  }
775 
776  GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
777  GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
778  if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
779  elbp_verts = &elb_verts;
780  GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
781  }
782  if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
783  elbp_lines = &elb_lines;
784  GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
785  }
786 
787  int nu_id = 0;
788  for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
789  const BezTriple *bezt = nu->bezt;
790  const BPoint *bp = nu->bp;
791 
792  if (bezt) {
793  for (int a = 0; a < nu->pntsu; a++, bezt++) {
794  if (bezt->hide == true) {
795  continue;
796  }
797  const bool handle_selected = BEZT_ISSEL_ANY(bezt);
798 
799  if (elbp_verts) {
800  GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 0);
801  GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
802  GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 2);
803  }
804  if (elbp_lines) {
805  GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 0);
806  GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 2);
807  }
808  if (vbo_data) {
809  const uint8_t vflag[3] = {
810  beztriple_vflag_get(rdata, bezt->f1, bezt->h1, a, nu_id, true, handle_selected),
811  beztriple_vflag_get(rdata, bezt->f2, bezt->h1, a, nu_id, false, handle_selected),
812  beztriple_vflag_get(rdata, bezt->f3, bezt->h2, a, nu_id, true, handle_selected),
813  };
814  for (int j = 0; j < 3; j++) {
815  GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
816  }
817  }
818  if (vbo_pos) {
819  for (int j = 0; j < 3; j++) {
820  GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
821  }
822  }
823  vbo_len_used += 3;
824  }
825  }
826  else if (bp) {
827  int pt_len = nu->pntsu * nu->pntsv;
828  for (int a = 0; a < pt_len; a++, bp++, vbo_len_used += 1) {
829  if (bp->hide == true) {
830  continue;
831  }
832  int u = (a % nu->pntsu);
833  int v = (a / nu->pntsu);
834  /* Use indexed rendering for bezier.
835  * Specify all points and use indices to hide/show. */
836  if (elbp_verts) {
837  GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
838  }
839  if (elbp_lines) {
840  const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
841  const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
842  if (bp_next_u && (bp_next_u->hide == false)) {
843  GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
844  }
845  if (bp_next_v && (bp_next_v->hide == false)) {
846  GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
847  }
848  }
849  if (vbo_data) {
850  uint8_t vflag = bpoint_vflag_get(rdata, bp->f1, a, nu_id, u);
851  GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
852  }
853  if (vbo_pos) {
854  GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
855  }
856  }
857  }
858  }
859 
860  /* Resize & Finish */
861  if (elbp_verts != NULL) {
862  GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
863  }
864  if (elbp_lines != NULL) {
865  GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
866  }
867  if (vbo_len_used != verts_len_capacity) {
868  if (vbo_pos != NULL) {
869  GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
870  }
871  if (vbo_data != NULL) {
872  GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
873  }
874  }
875 }
876 
879 /* -------------------------------------------------------------------- */
884 {
886  return DRW_batch_request(&cache->batch.curves);
887 }
888 
890 {
892  return DRW_batch_request(&cache->batch.edit_normals);
893 }
894 
896 {
898  return DRW_batch_request(&cache->batch.edit_edges);
899 }
900 
902 {
904  return DRW_batch_request(&cache->batch.edit_verts);
905 }
906 
908 {
910  return DRW_batch_request(&cache->batch.surfaces);
911 }
912 
914  struct GPUMaterial **gpumat_array,
915  uint gpumat_array_len)
916 {
918 
919  BLI_assert(gpumat_array_len == cache->mat_len);
920 
921  curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
922 
923  for (int i = 0; i < cache->mat_len; i++) {
924  DRW_batch_request(&cache->surf_per_mat[i]);
925  }
926  return cache->surf_per_mat;
927 }
928 
930 {
932  /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
934 
936  return cache->ordered.loop_pos_nor;
937 }
938 
940 {
942  return DRW_batch_request(&cache->batch.surfaces_edges);
943 }
944 
946 {
948  /* Even if is_manifold is not correct (not updated),
949  * the default (not manifold) is just the worst case. */
950  if (r_is_manifold) {
951  *r_is_manifold = cache->is_manifold;
952  }
953  return DRW_batch_request(&cache->batch.edge_detection);
954 }
955 
957 {
958  return max_ii(1, cu->totcol);
959 }
960 
963 /* -------------------------------------------------------------------- */
968 {
970 
971  Curve *cu = ob->data;
973 
974  /* Verify that all surface batches have needed attribute layers. */
975  /* TODO(fclem): We could be a bit smarter here and only do it per material. */
976  if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
977  for (int i = 0; i < cache->mat_len; i++) {
978  /* We can't discard batches at this point as they have been
979  * referenced for drawing. Just clear them in place. */
981  }
982 
983  cache->cd_used |= cache->cd_needed;
984  cache->cd_needed = 0;
985  }
986 
987  /* Init batches and request VBOs & IBOs */
990  }
995  }
997  DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
998  DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
999  }
1003  }
1004 
1005  /* Edit mode */
1007  DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
1008  DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
1009  DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
1010  }
1012  DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts);
1013  DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
1014  DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
1015  }
1018  }
1019  for (int i = 0; i < cache->mat_len; i++) {
1021  if (cache->mat_len > 1) {
1022  DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
1023  }
1024  if (cache->cd_used & CD_MASK_MLOOPUV) {
1025  DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
1026  }
1027  if (cache->cd_used & CD_MASK_TANGENT) {
1028  DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_tan);
1029  }
1030  DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
1031  }
1032  }
1033 
1034 #ifdef DRW_DEBUG_MESH_CACHE_REQUEST
1035  printf("-- %s %s --\n", __func__, ob->id.name + 2);
1036 #endif
1037 
1038  /* Generate MeshRenderData flags */
1039  int mr_flag = 0;
1050 
1057 
1058  for (int i = 0; i < cache->mat_len; i++) {
1060  }
1061 
1062 #ifdef DRW_DEBUG_MESH_CACHE_REQUEST
1063  printf(" mr_flag %d\n\n", mr_flag);
1064 #endif
1065 
1066  CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
1067 
1068  /* DispLists */
1069  ListBase *lb = &rdata->ob_curve_cache->disp;
1070 
1071  /* Generate VBOs */
1072  if (DRW_vbo_requested(cache->ordered.pos_nor)) {
1074  }
1075  if (DRW_vbo_requested(cache->ordered.edge_fac)) {
1077  }
1078  if (DRW_vbo_requested(cache->ordered.curves_pos)) {
1080  }
1081 
1082  if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
1085  lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan, scene);
1086  }
1087 
1088  if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
1090  lb, cache->surf_per_mat_tris, cache->mat_len);
1091  }
1092 
1093  if (DRW_ibo_requested(cache->ibo.curves_lines)) {
1095  }
1096  if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
1098  }
1099  if (DRW_ibo_requested(cache->ibo.surfaces_lines)) {
1101  }
1102  if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
1104  lb, cache->ibo.edges_adj_lines, &cache->is_manifold);
1105  }
1106 
1107  if (DRW_vbo_requested(cache->edit.pos) || DRW_vbo_requested(cache->edit.data) ||
1110  rdata, cache->edit.pos, cache->edit.data, cache->ibo.edit_verts, cache->ibo.edit_lines);
1111  }
1112  if (DRW_vbo_requested(cache->edit.curves_nor)) {
1114  }
1115 
1116  curve_render_data_free(rdata);
1117 
1118 #ifdef DEBUG
1119  /* Make sure all requested batches have been setup. */
1120  for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
1121  BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
1122  }
1123 #endif
1124 }
1125 
@ BKE_CURVE_BATCH_DIRTY_SELECT
Definition: BKE_curve.h:287
@ BKE_CURVE_BATCH_DIRTY_ALL
Definition: BKE_curve.h:286
uint64_t CustomDataMask
display list (or rather multi purpose list) stuff.
@ DL_POLY
Definition: BKE_displist.h:36
@ DL_SEGM
Definition: BKE_displist.h:38
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
MINLINE int max_ii(int a, int b)
void mul_qt_v3(const float q[4], float r[3])
Definition: math_rotation.c:97
unsigned int uint
Definition: BLI_sys_types.h:83
#define SET_FLAG_FROM_TEST(value, test, flag)
#define ELEM(...)
#define BEZT_ISSEL_ANY(bezt)
#define CD_MASK_ORCO
#define CD_MASK_TANGENT
@ CD_AUTO_FROM_NAME
@ CD_TANGENT
#define CD_MASK_MLOOPUV
@ OB_SURF
@ OB_FONT
@ OB_CURVE
@ SCE_PERF_HQ_NORMALS
GPUBatch
Definition: GPU_batch.h:93
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition: GPU_batch.h:207
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:199
bool GPU_use_hq_normals_workaround(void)
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *)
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *, uint v)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *, uint v)
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *, GPUPrimType, uint index_len, uint vertex_len)
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, GPUIndexBuf *)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
ListBase GPU_material_attributes(GPUMaterial *material)
Definition: gpu_material.c:572
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:35
@ GPU_PRIM_LINES_ADJ
Definition: GPU_primitive.h:43
@ GPU_PRIM_LINE_STRIP
Definition: GPU_primitive.h:38
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
#define GPU_vertbuf_init_with_format(verts, format)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
BLI_INLINE void GPU_normal_convert_v3(GPUNormal *gpu_normal, const float data[3], const bool do_hq_normals)
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
@ TH_HANDLE_FREE
Definition: UI_resources.h:125
@ TH_HANDLE_AUTOCLAMP
Definition: UI_resources.h:129
ATTR_WARN_UNUSED_RESULT const BMVert * v
Scene scene
void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold)
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(struct ListBase *lb, struct GPUIndexBuf **ibo_mat, uint mat_len)
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo)
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(struct ListBase *lb, struct GPUVertBuf *vbo_pos_nor, struct GPUVertBuf *vbo_uv, struct GPUVertBuf *vbo_tan, const struct Scene *scene)
@ VFLAG_VERT_SELECTED_BEZT_HANDLE
@ VFLAG_VERT_SELECTED
@ VFLAG_VERT_ACTIVE
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo)
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo)
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo, const struct Scene *scene)
static bool curve_batch_cache_valid(Curve *cu)
static void curve_create_edit_data_and_handles(CurveRenderData *rdata, GPUVertBuf *vbo_pos, GPUVertBuf *vbo_data, GPUIndexBuf *ibo_edit_verts_points, GPUIndexBuf *ibo_edit_lines)
static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
struct CurveRenderData CurveRenderData
static uint8_t bpoint_vflag_get(CurveRenderData *rdata, uint8_t flag, int v_idx, int nu_id, int u)
static void curve_batch_cache_init(Curve *cu)
GPUBatch * DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
@ CU_DATATYPE_TEXT_SELECT
@ CU_DATATYPE_NORMAL
@ CU_DATATYPE_OVERLAY
@ CU_DATATYPE_WIRE
@ CU_DATATYPE_SURFACE
GPUBatch * DRW_curve_batch_cache_get_edit_edges(Curve *cu)
static CurveBatchCache * curve_batch_cache_get(Curve *cu)
static void curve_cd_calc_used_gpu_layers(CustomDataMask *cd_layers, struct GPUMaterial **gpumat_array, int gpumat_array_len)
static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
#define EVEN_U_BIT
static void curve_batch_cache_clear(Curve *cu)
static CurveRenderData * curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
#define SELECT
GPUVertBuf * DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu)
static void curve_render_data_free(CurveRenderData *rdata)
@ COLOR_NURB_ULINE_ID
@ TOT_HANDLE_COL
struct CurveBatchCache CurveBatchCache
static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor, const Scene *scene)
#define COLOR_SHIFT
static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
int DRW_curve_material_count_get(Curve *cu)
static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
GPUBatch * DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
GPUBatch * DRW_curve_batch_cache_get_normal_edge(Curve *cu)
void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
GPUBatch ** DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
static int curve_render_data_wire_curve_len_get(const CurveRenderData *rdata)
GPUBatch * DRW_curve_batch_cache_get_edge_detection(Curve *cu, bool *r_is_manifold)
static void curve_render_wire_verts_edges_len_get(const CurveCache *ob_curve_cache, int *r_curve_len, int *r_vert_len, int *r_edge_len)
static void curve_render_overlay_verts_edges_len_get(ListBase *lb, int *r_vert_len, int *r_edge_len)
#define BEZIER_HANDLE
static uint8_t beztriple_vflag_get(CurveRenderData *rdata, uint8_t flag, uint8_t col_id, int v_idx, int nu_id, bool handle_point, const bool handle_selected)
GPUBatch * DRW_curve_batch_cache_get_edit_verts(Curve *cu)
void DRW_curve_batch_cache_validate(Curve *cu)
void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scene)
#define ACTIVE_NURB
void DRW_curve_batch_cache_free(Curve *cu)
static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
GPUBatch * DRW_curve_batch_cache_get_wire_edge(Curve *cu)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
#define DRW_TEST_ASSIGN_VBO(v)
#define DRW_TEST_ASSIGN_IBO(v)
#define DRW_ADD_FLAG_FROM_IBO_REQUEST(flag, ibo, value)
#define DRW_ADD_FLAG_FROM_VBO_REQUEST(flag, vbo, value)
BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, int prim_type)
BLI_INLINE void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
BLI_INLINE GPUBatch * DRW_batch_request(GPUBatch **batch)
GPUBatch * batch
Definition: drawnode.c:3779
uint pos
struct @612::@615 attr_id
uint nor
format
Definition: logImageCore.h:47
static char ** types
Definition: makesdna.c:164
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
static unsigned a[3]
Definition: RandGen.cpp:92
INLINE Rall1d< T, V, S > tan(const Rall1d< T, V, S > &arg)
Definition: rall1d.h:327
unsigned char uint8_t
Definition: stdint.h:81
short hide
struct BevList * next
BevPoint * bevpoints
float radius
float dir[3]
float quat[4]
float vec[3]
float vec[3][3]
struct CurveBatchCache::@274 batch
GPUIndexBuf ** surf_per_mat_tris
struct CurveBatchCache::@273 ibo
GPUIndexBuf * surfaces_tris
GPUIndexBuf * edit_verts
GPUIndexBuf * surfaces_lines
struct CurveBatchCache::@272 edit
GPUIndexBuf * curves_lines
CustomDataMask cd_needed
GPUIndexBuf * edit_lines
GPUVertBuf * curves_weight
struct CurveBatchCache::@271 ordered
GPUIndexBuf * edges_adj_lines
CustomDataMask cd_used
ListBase bev
Definition: BKE_curve.h:50
ListBase disp
Definition: BKE_curve.h:49
struct CurveRenderData::@270 text
struct CurveRenderData::@269 normal
struct CurveRenderData::@268 wire
struct CurveRenderData::@267 overlay
CurveCache * ob_curve_cache
short totcol
void * batch_cache
struct EditFont * editfont
EditNurb * editnurb
ListBase nurb
ListBase nurbs
char name[64]
Definition: gpu_material.c:109
char name[66]
Definition: DNA_ID.h:283
void * first
Definition: DNA_listBase.h:47
struct Nurb * next
short resolu
struct CurveCache * curve_cache
Object_Runtime runtime
void * data
struct RenderData r