Blender  V2.93
draw_cache_impl_volume.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include <string.h>
27 
28 #include "MEM_guardedalloc.h"
29 
30 #include "BLI_listbase.h"
31 #include "BLI_math_base.h"
32 #include "BLI_math_vector.h"
33 #include "BLI_utildefines.h"
34 
35 #include "DNA_object_types.h"
36 #include "DNA_volume_types.h"
37 
38 #include "BKE_global.h"
39 #include "BKE_volume.h"
40 #include "BKE_volume_render.h"
41 
42 #include "GPU_batch.h"
43 #include "GPU_capabilities.h"
44 #include "GPU_texture.h"
45 
46 #include "DEG_depsgraph_query.h"
47 
48 #include "DRW_render.h"
49 
50 #include "draw_cache.h" /* own include */
51 #include "draw_cache_impl.h" /* own include */
52 
53 static void volume_batch_cache_clear(Volume *volume);
54 
55 /* ---------------------------------------------------------------------- */
56 /* Volume GPUBatch Cache */
57 
58 typedef struct VolumeBatchCache {
59  /* 3D textures */
61 
62  /* Wireframe */
63  struct {
67 
68  /* Surface for selection */
70 
71  /* settings to determine if cache is invalid */
72  bool is_dirty;
74 
75 /* GPUBatch cache management. */
76 
77 static bool volume_batch_cache_valid(Volume *volume)
78 {
79  VolumeBatchCache *cache = volume->batch_cache;
80  return (cache && cache->is_dirty == false);
81 }
82 
83 static void volume_batch_cache_init(Volume *volume)
84 {
85  VolumeBatchCache *cache = volume->batch_cache;
86 
87  if (!cache) {
88  cache = volume->batch_cache = MEM_callocN(sizeof(*cache), __func__);
89  }
90  else {
91  memset(cache, 0, sizeof(*cache));
92  }
93 
94  cache->is_dirty = false;
95 }
96 
98 {
99  if (!volume_batch_cache_valid(volume)) {
100  volume_batch_cache_clear(volume);
101  volume_batch_cache_init(volume);
102  }
103 }
104 
106 {
108  return volume->batch_cache;
109 }
110 
112 {
113  VolumeBatchCache *cache = volume->batch_cache;
114  if (cache == NULL) {
115  return;
116  }
117  switch (mode) {
119  cache->is_dirty = true;
120  break;
121  default:
122  BLI_assert(0);
123  }
124 }
125 
126 static void volume_batch_cache_clear(Volume *volume)
127 {
128  VolumeBatchCache *cache = volume->batch_cache;
129  if (!cache) {
130  return;
131  }
132 
133  LISTBASE_FOREACH (DRWVolumeGrid *, grid, &cache->grids) {
134  MEM_SAFE_FREE(grid->name);
135  DRW_TEXTURE_FREE_SAFE(grid->texture);
136  }
137  BLI_freelistN(&cache->grids);
138 
142 }
143 
145 {
146  volume_batch_cache_clear(volume);
147  MEM_SAFE_FREE(volume->batch_cache);
148 }
149 typedef struct VolumeWireframeUserData {
153 
155  void *userdata, float (*verts)[3], int (*edges)[2], int totvert, int totedge)
156 {
157  VolumeWireframeUserData *data = userdata;
158  Scene *scene = data->scene;
159  Volume *volume = data->volume;
160  VolumeBatchCache *cache = volume->batch_cache;
161  const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
163 
164  /* Create vertex buffer. */
165  static GPUVertFormat format = {0};
166  static GPUVertFormat format_hq = {0};
167  static struct {
168  uint pos_id, nor_id;
169  uint pos_hq_id, nor_hq_id;
170  } attr_id;
171 
172  if (format.attr_len == 0) {
176  attr_id.pos_id = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
178  &format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
179  }
180 
181  static float normal[3] = {1.0f, 0.0f, 0.0f};
182  GPUNormal packed_normal;
183  GPU_normal_convert_v3(&packed_normal, normal, do_hq_normals);
184  uint pos_id = do_hq_normals ? attr_id.pos_hq_id : attr_id.pos_id;
185  uint nor_id = do_hq_normals ? attr_id.nor_hq_id : attr_id.nor_id;
186 
187  cache->face_wire.pos_nor_in_order = GPU_vertbuf_create_with_format(do_hq_normals ? &format_hq :
188  &format);
191  GPU_vertbuf_attr_fill_stride(cache->face_wire.pos_nor_in_order, nor_id, 0, &packed_normal);
192 
193  /* Create wiredata. */
194  GPUVertBuf *vbo_wiredata = GPU_vertbuf_calloc();
195  DRW_vertbuf_create_wiredata(vbo_wiredata, totvert);
196 
198  /* Create batch. */
201  }
202  else {
203  /* Create edge index buffer. */
204  GPUIndexBufBuilder elb;
205  GPU_indexbuf_init(&elb, GPU_PRIM_LINES, totedge, totvert);
206  for (int i = 0; i < totedge; i++) {
207  GPU_indexbuf_add_line_verts(&elb, edges[i][0], edges[i][1]);
208  }
209  GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
210 
211  /* Create batch. */
214  }
215 
216  GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
217 }
218 
220 {
222  return NULL;
223  }
224 
225  VolumeBatchCache *cache = volume_batch_cache_get(volume);
226 
227  if (cache->face_wire.batch == NULL) {
228  const VolumeGrid *volume_grid = BKE_volume_grid_active_get_for_read(volume);
229  if (volume_grid == NULL) {
230  return NULL;
231  }
232 
233  /* Create wireframe from OpenVDB tree. */
234  const DRWContextState *draw_ctx = DRW_context_state_get();
235  VolumeWireframeUserData userdata;
236  userdata.volume = volume;
237  userdata.scene = draw_ctx->scene;
238  BKE_volume_grid_wireframe(volume, volume_grid, drw_volume_wireframe_cb, &userdata);
239  }
240 
241  return cache->face_wire.batch;
242 }
243 
245  void *userdata, float (*verts)[3], int (*tris)[3], int totvert, int tottris)
246 {
247  Volume *volume = userdata;
248  VolumeBatchCache *cache = volume->batch_cache;
249 
250  static GPUVertFormat format = {0};
251  static uint pos_id;
252  if (format.attr_len == 0) {
254  }
255 
256  /* Create vertex buffer. */
258  GPU_vertbuf_data_alloc(vbo_surface, totvert);
259  GPU_vertbuf_attr_fill(vbo_surface, pos_id, verts);
260 
261  /* Create index buffer. */
262  GPUIndexBufBuilder elb;
263  GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottris, totvert);
264  for (int i = 0; i < tottris; i++) {
265  GPU_indexbuf_add_tri_verts(&elb, UNPACK3(tris[i]));
266  }
267  GPUIndexBuf *ibo_surface = GPU_indexbuf_build(&elb);
268 
270  GPU_PRIM_TRIS, vbo_surface, ibo_surface, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
271 }
272 
274 {
275  VolumeBatchCache *cache = volume_batch_cache_get(volume);
276  if (cache->selection_surface == NULL) {
277  const VolumeGrid *volume_grid = BKE_volume_grid_active_get_for_read(volume);
278  if (volume_grid == NULL) {
279  return NULL;
280  }
282  volume, volume_grid, drw_volume_selection_surface_cb, volume);
283  }
284  return cache->selection_surface;
285 }
286 
288  const VolumeGrid *grid,
289  VolumeBatchCache *cache)
290 {
291  const char *name = BKE_volume_grid_name(grid);
292 
293  /* Return cached grid. */
294  DRWVolumeGrid *cache_grid;
295  for (cache_grid = cache->grids.first; cache_grid; cache_grid = cache_grid->next) {
296  if (STREQ(cache_grid->name, name)) {
297  return cache_grid;
298  }
299  }
300 
301  /* Allocate new grid. */
302  cache_grid = MEM_callocN(sizeof(DRWVolumeGrid), __func__);
303  cache_grid->name = BLI_strdup(name);
304  BLI_addtail(&cache->grids, cache_grid);
305 
306  /* TODO: can we load this earlier, avoid accessing the global and take
307  * advantage of dependency graph multithreading? */
308  BKE_volume_load(volume, G.main);
309 
310  /* Test if we support textures with the number of channels. */
311  size_t channels = BKE_volume_grid_channels(grid);
312  if (!ELEM(channels, 1, 3)) {
313  return cache_grid;
314  }
315 
316  /* Remember if grid was loaded. If it was not, we want to unload it after the GPUTexture has been
317  * created. */
318  const bool was_loaded = BKE_volume_grid_is_loaded(grid);
319 
320  DenseFloatVolumeGrid dense_grid;
321  if (BKE_volume_grid_dense_floats(volume, grid, &dense_grid)) {
322  copy_m4_m4(cache_grid->texture_to_object, dense_grid.texture_to_object);
323  invert_m4_m4(cache_grid->object_to_texture, dense_grid.texture_to_object);
324 
325  /* Create GPU texture. */
326  eGPUTextureFormat format = (channels == 3) ? GPU_RGB16F : GPU_R16F;
327  cache_grid->texture = GPU_texture_create_3d("volume_grid",
328  UNPACK3(dense_grid.resolution),
329  1,
330  format,
332  dense_grid.voxels);
333  /* The texture can be null if the resolution along one axis is larger than
334  * GL_MAX_3D_TEXTURE_SIZE. */
335  if (cache_grid->texture != NULL) {
336  GPU_texture_swizzle_set(cache_grid->texture, (channels == 3) ? "rgb1" : "rrr1");
337  GPU_texture_wrap_mode(cache_grid->texture, false, false);
339  }
340  else {
341  MEM_freeN(dense_grid.voxels);
342  printf("Error: Could not allocate 3D texture for volume.\n");
343  }
344  }
345 
346  /* Free grid from memory if it wasn't previously loaded. */
347  if (!was_loaded) {
348  BKE_volume_grid_unload(volume, grid);
349  }
350 
351  return cache_grid;
352 }
353 
355 {
356  VolumeBatchCache *cache = volume_batch_cache_get(volume);
357  DRWVolumeGrid *grid = volume_grid_cache_get(volume, volume_grid, cache);
358  return (grid->texture != NULL) ? grid : NULL;
359 }
360 
362 {
363  return max_ii(1, volume->totcol);
364 }
Volume datablock.
@ BKE_VOLUME_BATCH_DIRTY_ALL
Definition: BKE_volume.h:64
int BKE_volume_grid_channels(const struct VolumeGrid *grid)
void BKE_volume_grid_unload(const struct Volume *volume, const struct VolumeGrid *grid)
bool BKE_volume_grid_is_loaded(const struct VolumeGrid *grid)
const char * BKE_volume_grid_name(const struct VolumeGrid *grid)
const VolumeGrid * BKE_volume_grid_active_get_for_read(const struct Volume *volume)
bool BKE_volume_load(const struct Volume *volume, const struct Main *bmain)
Volume data-block rendering and viewport drawing utilities.
void BKE_volume_dense_float_grid_clear(DenseFloatVolumeGrid *dense_grid)
bool BKE_volume_grid_dense_floats(const struct Volume *volume, const struct VolumeGrid *volume_grid, DenseFloatVolumeGrid *r_dense_grid)
void BKE_volume_grid_wireframe(const struct Volume *volume, const struct VolumeGrid *volume_grid, BKE_volume_wireframe_cb cb, void *cb_userdata)
void BKE_volume_grid_selection_surface(const struct Volume *volume, const struct VolumeGrid *volume_grid, BKE_volume_selection_surface_cb cb, void *cb_userdata)
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:547
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
MINLINE int max_ii(int a, int b)
bool invert_m4_m4(float R[4][4], const float A[4][4])
Definition: math_matrix.c:1278
void copy_m4_m4(float m1[4][4], const float m2[4][4])
Definition: math_matrix.c:95
char * BLI_strdup(const char *str) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL() ATTR_MALLOC
Definition: string.c:70
unsigned int uint
Definition: BLI_sys_types.h:83
#define UNPACK3(a)
#define ELEM(...)
#define STREQ(a, b)
Object is a sort of wrapper for general info.
@ SCE_PERF_HQ_NORMALS
@ VOLUME_WIREFRAME_NONE
@ VOLUME_WIREFRAME_POINTS
#define DRW_TEXTURE_FREE_SAFE(tex)
Definition: DRW_render.h:180
GPUBatch
Definition: GPU_batch.h:93
#define GPU_batch_create(prim, verts, elem)
Definition: GPU_batch.h:107
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo)
Definition: gpu_batch.cc:192
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:199
GPUBatch * GPU_batch_create_ex(GPUPrimType prim, GPUVertBuf *vert, GPUIndexBuf *elem, eGPUBatchFlag owns_flag)
Definition: gpu_batch.cc:60
@ GPU_BATCH_OWNS_INDEX
Definition: GPU_batch.h:54
@ GPU_BATCH_OWNS_VBO
Definition: GPU_batch.h:45
bool GPU_use_hq_normals_workaround(void)
struct GPUIndexBuf GPUIndexBuf
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3)
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:35
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4])
Definition: gpu_texture.cc:503
void GPU_texture_wrap_mode(GPUTexture *tex, bool use_repeat, bool use_clamp)
Definition: gpu_texture.cc:496
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:172
eGPUTextureFormat
Definition: GPU_texture.h:84
@ GPU_R16F
Definition: GPU_texture.h:114
@ GPU_RGB16F
Definition: GPU_texture.h:128
GPUTexture * GPU_texture_create_3d(const char *name, int w, int h, int d, int mip_len, eGPUTextureFormat texture_format, eGPUDataFormat data_format, const void *data)
Definition: gpu_texture.cc:263
#define GPU_vertbuf_create_with_format(format)
struct GPUVertBuf GPUVertBuf
GPUVertBuf * GPU_vertbuf_calloc(void)
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *, uint a_idx, uint stride, const void *data)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_attr_fill(GPUVertBuf *, uint a_idx, const void *data)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
BLI_INLINE void GPU_normal_convert_v3(GPUNormal *gpu_normal, const float data[3], const bool do_hq_normals)
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I16
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Scene scene
void DRW_vertbuf_create_wiredata(struct GPUVertBuf *vbo, const int vert_len)
void DRW_volume_batch_cache_free(Volume *volume)
static DRWVolumeGrid * volume_grid_cache_get(const Volume *volume, const VolumeGrid *grid, VolumeBatchCache *cache)
void DRW_volume_batch_cache_dirty_tag(Volume *volume, int mode)
static void drw_volume_selection_surface_cb(void *userdata, float(*verts)[3], int(*tris)[3], int totvert, int tottris)
DRWVolumeGrid * DRW_volume_batch_cache_get_grid(Volume *volume, const VolumeGrid *volume_grid)
static void volume_batch_cache_init(Volume *volume)
struct VolumeBatchCache VolumeBatchCache
GPUBatch * DRW_volume_batch_cache_get_selection_surface(Volume *volume)
static VolumeBatchCache * volume_batch_cache_get(Volume *volume)
static void volume_batch_cache_clear(Volume *volume)
static bool volume_batch_cache_valid(Volume *volume)
static void drw_volume_wireframe_cb(void *userdata, float(*verts)[3], int(*edges)[2], int totvert, int totedge)
struct VolumeWireframeUserData VolumeWireframeUserData
GPUBatch * DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
int DRW_volume_material_count_get(Volume *volume)
void DRW_volume_batch_cache_validate(Volume *volume)
const DRWContextState * DRW_context_state_get(void)
static float verts[][3]
struct @612::@615 attr_id
IconTextureDrawCall normal
format
Definition: logImageCore.h:47
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
struct Scene * scene
Definition: DRW_render.h:745
float texture_to_object[4][4]
Definition: draw_cache.h:239
struct DRWVolumeGrid * next
Definition: draw_cache.h:230
struct GPUTexture * texture
Definition: draw_cache.h:236
float object_to_texture[4][4]
Definition: draw_cache.h:240
float texture_to_object[4][4]
void * first
Definition: DNA_listBase.h:47
struct RenderData r
struct VolumeBatchCache::@279 face_wire
GPUVertBuf * pos_nor_in_order
void * batch_cache
short totcol
VolumeDisplay display
#define G(x, y, z)