Blender  V2.93
draw_cache_impl_displist.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19 
28 #include "BLI_alloca.h"
29 #include "BLI_edgehash.h"
30 #include "BLI_listbase.h"
31 #include "BLI_math_vector.h"
32 #include "BLI_utildefines.h"
33 
34 #include "DNA_curve_types.h"
35 #include "DNA_scene_types.h"
36 
37 #include "BKE_displist.h"
38 #include "BKE_displist_tangent.h"
39 
40 #include "GPU_batch.h"
41 #include "GPU_capabilities.h"
42 
43 #include "draw_cache_inline.h"
44 
45 #include "draw_cache_impl.h" /* own include */
46 
47 static int dl_vert_len(const DispList *dl)
48 {
49  switch (dl->type) {
50  case DL_INDEX3:
51  case DL_INDEX4:
52  return dl->nr;
53  case DL_SURF:
54  return dl->parts * dl->nr;
55  }
56  return 0;
57 }
58 
59 static int dl_tri_len(const DispList *dl)
60 {
61  switch (dl->type) {
62  case DL_INDEX3:
63  return dl->parts;
64  case DL_INDEX4:
65  return dl->parts * 2;
66  case DL_SURF:
67  return dl->totindex * 2;
68  }
69  return 0;
70 }
71 
72 /* see: displist_vert_coords_alloc */
74 {
75  int vert_len = 0;
76  LISTBASE_FOREACH (const DispList *, dl, lb) {
77  vert_len += dl_vert_len(dl);
78  }
79  return vert_len;
80 }
81 
83 {
84  int tri_len = 0;
85  LISTBASE_FOREACH (const DispList *, dl, lb) {
86  tri_len += dl_tri_len(dl);
87  }
88  return tri_len;
89 }
90 
91 typedef void(SetTriIndicesFn)(void *thunk, uint v1, uint v2, uint v3);
92 
94  SetTriIndicesFn *set_tri_indices,
95  SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
96  void *thunk,
97  const DispList *dl,
98  const int ofs)
99 {
100  if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
101  const int *idx = dl->index;
102  if (dl->type == DL_INDEX3) {
103  const int i_end = dl->parts;
104  for (int i = 0; i < i_end; i++, idx += 3) {
105  set_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
106  }
107  }
108  else if (dl->type == DL_SURF) {
109  const int i_end = dl->totindex;
110  for (int i = 0; i < i_end; i++, idx += 4) {
111  set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
112  set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[3] + ofs);
113  }
114  }
115  else {
116  BLI_assert(dl->type == DL_INDEX4);
117  const int i_end = dl->parts;
118  for (int i = 0; i < i_end; i++, idx += 4) {
119  if (idx[2] != idx[3]) {
120  set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
121  set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[3] + ofs);
122  }
123  else {
124  set_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
125  }
126  }
127  }
128  }
129 }
130 
132  SetTriIndicesFn *set_tri_indices,
133  SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
134  void *thunk,
135  const DispList *dl,
136  const int ofs)
137 {
138  int v_idx = ofs;
139  if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
140  if (dl->type == DL_INDEX3) {
141  for (int i = 0; i < dl->parts; i++) {
142  set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
143  v_idx += 3;
144  }
145  }
146  else if (dl->type == DL_SURF) {
147  for (int a = 0; a < dl->parts; a++) {
148  if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
149  break;
150  }
151  int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
152  for (; b < dl->nr; b++) {
153  set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
154  set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
155  v_idx += 6;
156  }
157  }
158  }
159  else {
160  BLI_assert(dl->type == DL_INDEX4);
161  const int *idx = dl->index;
162  for (int i = 0; i < dl->parts; i++, idx += 4) {
163  if (idx[2] != idx[3]) {
164  set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
165  set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
166  v_idx += 6;
167  }
168  else {
169  set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
170  v_idx += 3;
171  }
172  }
173  }
174  }
175  return v_idx;
176 }
177 
179 {
180  const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
182 
183  static GPUVertFormat format = {0};
184  static GPUVertFormat format_hq = {0};
185  static struct {
186  uint pos, nor;
187  uint pos_hq, nor_hq;
188  } attr_id;
189  if (format.attr_len == 0) {
190  /* initialize vertex format */
194  /* initialize vertex format */
195  attr_id.pos_hq = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
197  &format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
198  }
199 
200  uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
201  uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
202 
203  GPU_vertbuf_init_with_format(vbo, do_hq_normals ? &format_hq : &format);
205 
207 
208  int vbo_len_used = 0;
209  LISTBASE_FOREACH (const DispList *, dl, lb) {
210  const bool ndata_is_single = dl->type == DL_INDEX3;
211  if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
212  const float *fp_co = dl->verts;
213  const float *fp_no = dl->nors;
214  const int vbo_end = vbo_len_used + dl_vert_len(dl);
215  while (vbo_len_used < vbo_end) {
216  GPU_vertbuf_attr_set(vbo, pos_id, vbo_len_used, fp_co);
217  if (fp_no) {
218  GPUNormal vnor_pack;
219  GPU_normal_convert_v3(&vnor_pack, fp_no, do_hq_normals);
220  GPU_vertbuf_attr_set(vbo, nor_id, vbo_len_used, &vnor_pack);
221  if (ndata_is_single == false) {
222  fp_no += 3;
223  }
224  }
225  fp_co += 3;
226  vbo_len_used += 1;
227  }
228  }
229  }
230 }
231 
232 void DRW_vertbuf_create_wiredata(GPUVertBuf *vbo, const int vert_len)
233 {
234  static GPUVertFormat format = {0};
235  static struct {
236  uint wd;
237  } attr_id;
238  if (format.attr_len == 0) {
239  /* initialize vertex format */
240  if (!GPU_crappy_amd_driver()) {
241  /* Some AMD drivers strangely crash with a vbo with this format. */
244  }
245  else {
247  }
248  }
249 
251  GPU_vertbuf_data_alloc(vbo, vert_len);
252 
253  if (GPU_vertbuf_get_format(vbo)->stride == 1) {
254  memset(GPU_vertbuf_get_data(vbo), 0xFF, (size_t)vert_len);
255  }
256  else {
257  GPUVertBufRaw wd_step;
258  GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
259  for (int i = 0; i < vert_len; i++) {
260  *((float *)GPU_vertbuf_raw_step(&wd_step)) = 1.0f;
261  }
262  }
263 }
264 
266 {
267  const int vert_len = curve_render_surface_vert_len_get(lb);
268  DRW_vertbuf_create_wiredata(vbo, vert_len);
269 }
270 
272 {
273  const int tri_len = curve_render_surface_tri_len_get(lb);
274  const int vert_len = curve_render_surface_vert_len_get(lb);
275 
276  GPUIndexBufBuilder elb;
277  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len);
278 
279  int ofs = 0;
280  LISTBASE_FOREACH (const DispList *, dl, lb) {
283  &elb,
284  dl,
285  ofs);
286  ofs += dl_vert_len(dl);
287  }
288 
289  GPU_indexbuf_build_in_place(&elb, ibo);
290 }
291 
293  GPUIndexBuf **ibo_mats,
294  uint mat_len)
295 {
296  GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
297 
298  const int tri_len = curve_render_surface_tri_len_get(lb);
299 
300  /* Init each index buffer builder */
301  for (int i = 0; i < mat_len; i++) {
302  GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
303  }
304 
305  /* calc each index buffer builder */
306  uint v_idx = 0;
307  LISTBASE_FOREACH (const DispList *, dl, lb) {
310  &elb[dl->col],
311  dl,
312  v_idx);
313  }
314 
315  /* build each indexbuf */
316  for (int i = 0; i < mat_len; i++) {
317  GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
318  }
319 }
320 
321 static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
322 {
323  GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
327 }
328 
329 static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, uint v3)
330 {
331  GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
334 }
335 
337 {
338  const int tri_len = curve_render_surface_tri_len_get(lb);
339  const int vert_len = curve_render_surface_vert_len_get(lb);
340 
341  GPUIndexBufBuilder elb;
342  GPU_indexbuf_init(&elb, GPU_PRIM_LINES, tri_len * 3, vert_len);
343 
344  int ofs = 0;
345  LISTBASE_FOREACH (const DispList *, dl, lb) {
348  ofs += dl_vert_len(dl);
349  }
350 
351  GPU_indexbuf_build_in_place(&elb, ibo);
352 }
353 
354 static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
355 {
356  int orco_sizeu = dl->nr - 1;
357  int orco_sizev = dl->parts - 1;
358 
359  /* exception as handled in convertblender.c too */
360  if (dl->flag & DL_CYCL_U) {
361  orco_sizeu++;
362  }
363  if (dl->flag & DL_CYCL_V) {
364  orco_sizev++;
365  }
366 
367  for (int i = 0; i < 4; i++) {
368  /* Note: For some reason the shading U and V are swapped compared to the
369  * one described in the surface format. */
370  /* find uv based on vertex index into grid array */
371  r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
372  r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
373 
374  /* cyclic correction */
375  if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
376  r_uv[i][0] = 1.0f;
377  }
378  if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
379  r_uv[i][1] = 1.0f;
380  }
381  }
382 }
383 
385  const GPUNormal *n1,
386  const GPUNormal *n2,
387  const GPUNormal *n3,
388  const bool do_hq_normals)
389 {
390  if (do_hq_normals) {
394  }
395  else {
396  *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n1->low;
397  *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n2->low;
398  *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n3->low;
399  }
400 }
401 
403  GPUVertBufRaw *nor_step,
404  GPUVertBufRaw *uv_step,
405  GPUVertBufRaw *tan_step,
406  const float v1[3],
407  const float v2[3],
408  const float v3[3],
409  const GPUNormal *n1,
410  const GPUNormal *n2,
411  const GPUNormal *n3,
412  const GPUNormal *t1,
413  const GPUNormal *t2,
414  const GPUNormal *t3,
415  const float uv1[2],
416  const float uv2[2],
417  const float uv3[2],
418  const bool do_hq_normals)
419 {
420  if (pos_step->size != 0) {
421  copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
422  copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
423  copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
424  displist_vertbuf_attr_set_nor(nor_step, n1, n2, n3, do_hq_normals);
425  }
426  if (uv_step->size != 0) {
430  }
431  if (tan_step->size != 0) {
432  displist_vertbuf_attr_set_nor(tan_step, t1, t2, t3, do_hq_normals);
433  }
434 }
435 
436 #define SURFACE_QUAD_ITER_BEGIN(dl) \
437  { \
438  uint quad[4]; \
439  int quad_index = 0; \
440  int max_v = (dl->flag & DL_CYCL_V) ? dl->parts : (dl->parts - 1); \
441  int max_u = (dl->flag & DL_CYCL_U) ? dl->nr : (dl->nr - 1); \
442  for (int v = 0; v < max_v; v++) { \
443  quad[3] = dl->nr * v; \
444  quad[0] = quad[3] + 1; \
445  quad[2] = quad[3] + dl->nr; \
446  quad[1] = quad[0] + dl->nr; \
447  /* Cyclic wrap */ \
448  if (v == dl->parts - 1) { \
449  quad[1] -= dl->parts * dl->nr; \
450  quad[2] -= dl->parts * dl->nr; \
451  } \
452  for (int u = 0; u < max_u; u++, quad_index++) { \
453  /* Cyclic wrap */ \
454  if (u == dl->nr - 1) { \
455  quad[0] -= dl->nr; \
456  quad[1] -= dl->nr; \
457  }
458 
459 #define SURFACE_QUAD_ITER_END \
460  quad[2] = quad[1]; \
461  quad[1]++; \
462  quad[3] = quad[0]; \
463  quad[0]++; \
464  } \
465  } \
466  }
467 
468 static void displist_surf_fnors_ensure(const DispList *dl, float (**fnors)[3])
469 {
470  int u_len = dl->nr - ((dl->flag & DL_CYCL_U) ? 0 : 1);
471  int v_len = dl->parts - ((dl->flag & DL_CYCL_V) ? 0 : 1);
472  const float(*verts)[3] = (float(*)[3])dl->verts;
473  float(*nor_flat)[3] = MEM_mallocN(sizeof(float[3]) * u_len * v_len, __func__);
474  *fnors = nor_flat;
475 
477  normal_quad_v3(*nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
478  nor_flat++;
479  }
481 }
482 
484  GPUVertBuf *vbo_pos_nor,
485  GPUVertBuf *vbo_uv,
486  GPUVertBuf *vbo_tan,
487  const Scene *scene)
488 {
489  const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
491 
492  static GPUVertFormat format_pos_nor = {0};
493  static GPUVertFormat format_pos_nor_hq = {0};
494  static GPUVertFormat format_uv = {0};
495  static GPUVertFormat format_tan = {0};
496  static GPUVertFormat format_tan_hq = {0};
497  static struct {
498  uint pos, nor, uv, tan;
499  uint pos_hq, nor_hq, tan_hq;
500  } attr_id;
501  if (format_pos_nor.attr_len == 0) {
502  /* initialize vertex format */
504  &format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
506  &format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
508  &format_pos_nor_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
510  &format_pos_nor_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
511 
512  /* UVs are in [0..1] range. We can compress them. */
514  &format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
515  GPU_vertformat_alias_add(&format_uv, "au");
516 
518  &format_tan, "t", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
519  GPU_vertformat_alias_add(&format_tan, "at");
521  &format_tan_hq, "t", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
522  GPU_vertformat_alias_add(&format_tan_hq, "at");
523  }
524  uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
525  uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
526  uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
527 
528  int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
529 
530  GPUVertBufRaw pos_step = {0};
531  GPUVertBufRaw nor_step = {0};
532  GPUVertBufRaw uv_step = {0};
533  GPUVertBufRaw tan_step = {0};
534 
535  if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
536  GPU_vertbuf_init_with_format(vbo_pos_nor,
537  do_hq_normals ? &format_pos_nor_hq : &format_pos_nor);
538  GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
539  GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, pos_id, &pos_step);
540  GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, nor_id, &nor_step);
541  }
542  if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
543  GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
544  GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
545  GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
546  }
547  if (DRW_TEST_ASSIGN_VBO(vbo_tan)) {
548  GPU_vertbuf_init_with_format(vbo_tan, do_hq_normals ? &format_tan_hq : &format_tan);
549  GPU_vertbuf_data_alloc(vbo_tan, vbo_len_capacity);
550  GPU_vertbuf_attr_get_raw_data(vbo_tan, tan_id, &tan_step);
551  }
552 
554 
555  LISTBASE_FOREACH (const DispList *, dl, lb) {
556  const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
557  if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
558  const float(*verts)[3] = (float(*)[3])dl->verts;
559  const float(*nors)[3] = (float(*)[3])dl->nors;
560  const int *idx = dl->index;
561  float uv[4][2];
562 
563  if (dl->type == DL_INDEX3) {
564  /* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
565  GPUNormal tangent_packed;
566  GPUNormal normal_packed;
567  GPU_normal_convert_v3(&normal_packed, dl->nors, do_hq_normals);
568  if (vbo_tan) {
569  float tan[4];
570  float(*tan_ptr)[4] = &tan;
571  BKE_displist_tangent_calc(dl, NULL, &tan_ptr);
572  GPU_normal_convert_v3(&tangent_packed, tan, do_hq_normals);
573  normal_float_to_short_v3(tangent_packed.high, tan);
574  }
575  else {
576  if (do_hq_normals) {
577  tangent_packed.high[0] = 0;
578  tangent_packed.high[1] = 0;
579  tangent_packed.high[2] = 0;
580  }
581  else {
582  tangent_packed.low = (GPUPackedNormal){0, 0, 0, 1};
583  }
584  }
585 
586  const float x_max = (float)(dl->nr - 1);
587  uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
588  const int i_end = dl->parts;
589  for (int i = 0; i < i_end; i++, idx += 3) {
590  if (vbo_uv) {
591  uv[0][0] = idx[0] / x_max;
592  uv[1][0] = idx[1] / x_max;
593  uv[2][0] = idx[2] / x_max;
594  }
595 
597  &nor_step,
598  &uv_step,
599  &tan_step,
600  verts[idx[0]],
601  verts[idx[2]],
602  verts[idx[1]],
603  &normal_packed,
604  &normal_packed,
605  &normal_packed,
606  &tangent_packed,
607  &tangent_packed,
608  &tangent_packed,
609  uv[0],
610  uv[2],
611  uv[1],
612  do_hq_normals);
613  }
614  }
615  else if (dl->type == DL_SURF) {
616  float(*tangents)[4] = NULL;
617  float(*fnors)[3] = NULL;
618 
619  if (!is_smooth) {
620  displist_surf_fnors_ensure(dl, &fnors);
621  }
622 
623  if (vbo_tan) {
624  BKE_displist_tangent_calc(dl, fnors, &tangents);
625  }
626 
628  if (vbo_uv) {
629  surf_uv_quad(dl, quad, uv);
630  }
631  GPUNormal pnors_quad[4];
632  GPUNormal ptans_quad[4];
633 
634  if (is_smooth) {
635  for (int j = 0; j < 4; j++) {
636  GPU_normal_convert_v3(&pnors_quad[j], nors[quad[j]], do_hq_normals);
637  }
638  }
639  else {
640  GPU_normal_convert_v3(&pnors_quad[0], fnors[quad_index], do_hq_normals);
641  pnors_quad[1] = pnors_quad[2] = pnors_quad[3] = pnors_quad[0];
642  }
643 
644  if (vbo_tan) {
645  for (int j = 0; j < 4; j++) {
646  float *tan = tangents[quad_index * 4 + j];
647  GPU_normal_convert_v3(&ptans_quad[j], tan, do_hq_normals);
648  }
649  }
650 
652  &nor_step,
653  &uv_step,
654  &tan_step,
655  verts[quad[2]],
656  verts[quad[0]],
657  verts[quad[1]],
658  &pnors_quad[2],
659  &pnors_quad[0],
660  &pnors_quad[1],
661  &ptans_quad[2],
662  &ptans_quad[0],
663  &ptans_quad[1],
664  uv[2],
665  uv[0],
666  uv[1],
667  do_hq_normals);
668 
670  &nor_step,
671  &uv_step,
672  &tan_step,
673  verts[quad[0]],
674  verts[quad[2]],
675  verts[quad[3]],
676  &pnors_quad[0],
677  &pnors_quad[2],
678  &pnors_quad[3],
679  &ptans_quad[0],
680  &ptans_quad[2],
681  &ptans_quad[3],
682  uv[0],
683  uv[2],
684  uv[3],
685  do_hq_normals);
686  }
688 
689  MEM_SAFE_FREE(tangents);
690  MEM_SAFE_FREE(fnors);
691  }
692  else {
693  BLI_assert(dl->type == DL_INDEX4);
694  uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
695  uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
696 
697  const int i_end = dl->parts;
698  for (int i = 0; i < i_end; i++, idx += 4) {
699  const bool is_tri = idx[2] != idx[3];
700 
701  GPUNormal ptan = {0};
702  GPUNormal pnors_idx[4];
703  if (is_smooth) {
704  int idx_len = is_tri ? 3 : 4;
705  for (int j = 0; j < idx_len; j++) {
706  GPU_normal_convert_v3(&pnors_idx[j], nors[idx[j]], do_hq_normals);
707  }
708  }
709  else {
710  float nor_flat[3];
711  if (is_tri) {
712  normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
713  }
714  else {
715  normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
716  }
717  GPU_normal_convert_v3(&pnors_idx[0], nor_flat, do_hq_normals);
718  pnors_idx[1] = pnors_idx[2] = pnors_idx[3] = pnors_idx[0];
719  }
720 
722  &nor_step,
723  &uv_step,
724  &tan_step,
725  verts[idx[0]],
726  verts[idx[2]],
727  verts[idx[1]],
728  &pnors_idx[0],
729  &pnors_idx[2],
730  &pnors_idx[1],
731  &ptan,
732  &ptan,
733  &ptan,
734  uv[0],
735  uv[2],
736  uv[1],
737  do_hq_normals);
738 
739  if (is_tri) {
741  &nor_step,
742  &uv_step,
743  &tan_step,
744  verts[idx[2]],
745  verts[idx[0]],
746  verts[idx[3]],
747  &pnors_idx[2],
748  &pnors_idx[0],
749  &pnors_idx[3],
750  &ptan,
751  &ptan,
752  &ptan,
753  uv[2],
754  uv[0],
755  uv[3],
756  do_hq_normals);
757  }
758  }
759  }
760  }
761  }
762  /* Resize and finish. */
763  if (pos_step.size != 0) {
764  int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
765  if (vbo_len_used < vbo_len_capacity) {
766  GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
767  }
768  }
769  if (uv_step.size != 0) {
770  int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
771  if (vbo_len_used < vbo_len_capacity) {
772  GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
773  }
774  }
775 }
776 
777 /* Edge detection/adjacency. */
778 #define NO_EDGE INT_MAX
780  EdgeHash *eh, GPUIndexBufBuilder *elb, bool *r_is_manifold, uint v1, uint v2, uint v3)
781 {
782  bool inv_indices = (v2 > v3);
783  void **pval;
784  bool value_is_init = BLI_edgehash_ensure_p(eh, v2, v3, &pval);
785  int v_data = POINTER_AS_INT(*pval);
786  if (!value_is_init || v_data == NO_EDGE) {
787  /* Save the winding order inside the sign bit. Because the
788  * edgehash sort the keys and we need to compare winding later. */
789  int value = (int)v1 + 1; /* Int 0 bm_looptricannot be signed */
790  *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
791  }
792  else {
793  /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
794  *pval = POINTER_FROM_INT(NO_EDGE);
795  bool inv_opposite = (v_data < 0);
796  uint v_opposite = (uint)abs(v_data) - 1;
797 
798  if (inv_opposite == inv_indices) {
799  /* Don't share edge if triangles have non matching winding. */
801  GPU_indexbuf_add_line_adj_verts(elb, v_opposite, v2, v3, v_opposite);
802  *r_is_manifold = false;
803  }
804  else {
805  GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v_opposite);
806  }
807  }
808 }
809 
810 static void set_edges_adjacency_lines_indices(void *thunk, uint v1, uint v2, uint v3)
811 {
812  void **packed = (void **)thunk;
813  GPUIndexBufBuilder *elb = (GPUIndexBufBuilder *)packed[0];
814  EdgeHash *eh = (EdgeHash *)packed[1];
815  bool *r_is_manifold = (bool *)packed[2];
816 
817  set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v1, v2, v3);
818  set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v2, v3, v1);
819  set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v3, v1, v2);
820 }
821 
823  struct GPUIndexBuf *ibo,
824  bool *r_is_manifold)
825 {
826  const int tri_len = curve_render_surface_tri_len_get(lb);
827  const int vert_len = curve_render_surface_vert_len_get(lb);
828 
829  *r_is_manifold = true;
830 
831  /* Allocate max but only used indices are sent to GPU. */
832  GPUIndexBufBuilder elb;
833  GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
834 
835  EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
836 
837  /* pack values to pass to `set_edges_adjacency_lines_indices` function. */
838  void *thunk[3] = {&elb, eh, r_is_manifold};
839  int v_idx = 0;
840  LISTBASE_FOREACH (const DispList *, dl, lb) {
843  thunk,
844  dl,
845  v_idx);
846  v_idx += dl_vert_len(dl);
847  }
848 
849  /* Create edges for remaining non manifold edges. */
850  EdgeHashIterator *ehi;
851  for (ehi = BLI_edgehashIterator_new(eh); BLI_edgehashIterator_isDone(ehi) == false;
853  uint v1, v2;
855  if (v_data == NO_EDGE) {
856  continue;
857  }
859  uint v0 = (uint)abs(v_data) - 1;
860  if (v_data < 0) { /* inv_opposite */
861  SWAP(uint, v1, v2);
862  }
863  GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
864  *r_is_manifold = false;
865  }
867  BLI_edgehash_free(eh, NULL);
868 
869  GPU_indexbuf_build_in_place(&elb, ibo);
870 }
871 #undef NO_EDGE
typedef float(TangentPoint)[2]
display list (or rather multi purpose list) stuff.
@ DL_CYCL_V
Definition: BKE_displist.h:54
@ DL_CYCL_U
Definition: BKE_displist.h:53
@ DL_SURF
Definition: BKE_displist.h:40
@ DL_INDEX4
Definition: BKE_displist.h:44
@ DL_INDEX3
Definition: BKE_displist.h:42
void BKE_displist_normals_add(struct ListBase *lb)
Definition: displist.c:140
void BKE_displist_tangent_calc(const DispList *dl, float(*fnormals)[3], float(**r_tangent)[4])
#define BLI_array_alloca(arr, realsize)
Definition: BLI_alloca.h:36
#define BLI_assert(a)
Definition: BLI_assert.h:58
void BLI_edgehash_free(EdgeHash *eh, EdgeHashFreeFP free_value)
Definition: edgehash.c:244
BLI_INLINE void BLI_edgehashIterator_getKey(EdgeHashIterator *ehi, unsigned int *r_v0, unsigned int *r_v1)
Definition: BLI_edgehash.h:93
BLI_INLINE bool BLI_edgehashIterator_isDone(EdgeHashIterator *ehi)
Definition: BLI_edgehash.h:89
EdgeHash * BLI_edgehash_new_ex(const char *info, const unsigned int nentries_reserve)
Definition: edgehash.c:226
EdgeHashIterator * BLI_edgehashIterator_new(EdgeHash *eh) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
Definition: edgehash.c:471
bool BLI_edgehash_ensure_p(EdgeHash *eh, unsigned int v0, unsigned int v1, void ***r_val) ATTR_WARN_UNUSED_RESULT
Definition: edgehash.c:355
BLI_INLINE void BLI_edgehashIterator_step(EdgeHashIterator *ehi)
Definition: BLI_edgehash.h:85
void BLI_edgehashIterator_free(EdgeHashIterator *ehi)
Definition: edgehash.c:496
BLI_INLINE void * BLI_edgehashIterator_getValue(EdgeHashIterator *ehi)
Definition: BLI_edgehash.h:101
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition: math_geom.c:68
float normal_tri_v3(float n[3], const float v1[3], const float v2[3], const float v3[3])
Definition: math_geom.c:51
MINLINE void normal_float_to_short_v3(short r[3], const float n[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_v3_short(short r[3], const short a[3])
MINLINE void normal_float_to_short_v2(short r[2], const float n[2])
unsigned int uint
Definition: BLI_sys_types.h:83
#define SWAP(type, a, b)
#define POINTER_FROM_INT(i)
#define POINTER_AS_INT(i)
#define ELEM(...)
@ CU_SMOOTH
@ SCE_PERF_HQ_NORMALS
bool GPU_use_hq_normals_workaround(void)
bool GPU_crappy_amd_driver(void)
struct GPUIndexBuf GPUIndexBuf
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3, uint v4)
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, GPUIndexBuf *)
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei stride
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_LINES_ADJ
Definition: GPU_primitive.h:43
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
const GPUVertFormat * GPU_vertbuf_get_format(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
GPU_INLINE uint GPU_vertbuf_raw_used(GPUVertBufRaw *a)
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
#define GPU_vertbuf_init_with_format(verts, format)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len)
struct GPUPackedNormal GPUPackedNormal
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
BLI_INLINE void GPU_normal_convert_v3(GPUNormal *gpu_normal, const float data[3], const bool do_hq_normals)
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias)
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I16
@ GPU_COMP_U8
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
Scene scene
void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold)
static int displist_indexbufbuilder_tess_set(SetTriIndicesFn *set_tri_indices, SetTriIndicesFn *set_quad_tri_indices, void *thunk, const DispList *dl, const int ofs)
void() SetTriIndicesFn(void *thunk, uint v1, uint v2, uint v3)
static int curve_render_surface_vert_len_get(const ListBase *lb)
static void set_edges_adjacency_lines_indices(void *thunk, uint v1, uint v2, uint v3)
#define NO_EDGE
static void displist_indexbufbuilder_set(SetTriIndicesFn *set_tri_indices, SetTriIndicesFn *set_quad_tri_indices, void *thunk, const DispList *dl, const int ofs)
static void displist_vertbuf_attr_set_nor(GPUVertBufRaw *step, const GPUNormal *n1, const GPUNormal *n2, const GPUNormal *n3, const bool do_hq_normals)
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb, GPUVertBuf *vbo_pos_nor, GPUVertBuf *vbo_uv, GPUVertBuf *vbo_tan, const Scene *scene)
void DRW_displist_vertbuf_create_wiredata(ListBase *lb, GPUVertBuf *vbo)
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo, const Scene *scene)
static int dl_vert_len(const DispList *dl)
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(ListBase *lb, GPUIndexBuf **ibo_mats, uint mat_len)
static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
#define SURFACE_QUAD_ITER_BEGIN(dl)
static int curve_render_surface_tri_len_get(const ListBase *lb)
void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
static void displist_surf_fnors_ensure(const DispList *dl, float(**fnors)[3])
void DRW_vertbuf_create_wiredata(GPUVertBuf *vbo, const int vert_len)
static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, GPUVertBufRaw *uv_step, GPUVertBufRaw *tan_step, const float v1[3], const float v2[3], const float v3[3], const GPUNormal *n1, const GPUNormal *n2, const GPUNormal *n3, const GPUNormal *t1, const GPUNormal *t2, const GPUNormal *t3, const float uv1[2], const float uv2[2], const float uv3[2], const bool do_hq_normals)
static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, uint v3)
static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
void DRW_displist_indexbuf_create_lines_in_order(ListBase *lb, GPUIndexBuf *ibo)
#define SURFACE_QUAD_ITER_END
static int dl_tri_len(const DispList *dl)
static void set_edge_adjacency_lines_indices(EdgeHash *eh, GPUIndexBufBuilder *elb, bool *r_is_manifold, uint v1, uint v2, uint v3)
#define DRW_TEST_ASSIGN_VBO(v)
static float verts[][3]
uint pos
struct @612::@615 attr_id
uint nor
GPUBatch * quad
format
Definition: logImageCore.h:47
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:47
static unsigned a[3]
Definition: RandGen.cpp:92
INLINE Rall1d< T, V, S > tan(const Rall1d< T, V, S > &arg)
Definition: rall1d.h:327
short type
Definition: BKE_displist.h:71
int totindex
Definition: BKE_displist.h:77
float * verts
Definition: BKE_displist.h:74
int * index
Definition: BKE_displist.h:75
short rt
Definition: BKE_displist.h:73
float * nors
Definition: BKE_displist.h:74
short flag
Definition: BKE_displist.h:71
GPUPackedNormal low
struct RenderData r
__forceinline const avxi abs(const avxi &a)
Definition: util_avxi.h:186