Blender V4.5
volume_grid_function_eval.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2025 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BKE_customdata.hh"
6#include "BLT_translation.hh"
8
10#include "BKE_node.hh"
12#include "BKE_volume_grid.hh"
14#include "BKE_volume_openvdb.hh"
15
16#include <fmt/format.h>
17
18#ifdef WITH_OPENVDB
19
20# include <openvdb/Grid.h>
21# include <openvdb/math/Transform.h>
22# include <openvdb/tools/Merge.h>
23
24#endif
25
27
28namespace blender::nodes {
29
30#ifdef WITH_OPENVDB
31
32template<typename GridT>
33static constexpr bool is_supported_grid_type = is_same_any_v<GridT,
34 openvdb::FloatGrid,
35 openvdb::Vec3fGrid,
36 openvdb::BoolGrid,
37 openvdb::Int32Grid,
38 openvdb::Vec4fGrid>;
39
40template<typename Fn> static void to_typed_grid(const openvdb::GridBase &grid_base, Fn &&fn)
41{
42 const VolumeGridType grid_type = bke::volume_grid::get_type(grid_base);
43 BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) {
44 using GridT = typename decltype(type_tag)::type;
45 if constexpr (is_supported_grid_type<GridT>) {
46 fn(static_cast<const GridT &>(grid_base));
47 }
48 else {
50 }
51 });
52}
53
54template<typename Fn> static void to_typed_grid(openvdb::GridBase &grid_base, Fn &&fn)
55{
56 const VolumeGridType grid_type = bke::volume_grid::get_type(grid_base);
57 BKE_volume_grid_type_to_static_type(grid_type, [&](auto type_tag) {
58 using GridT = typename decltype(type_tag)::type;
59 if constexpr (is_supported_grid_type<GridT>) {
60 fn(static_cast<GridT &>(grid_base));
61 }
62 else {
64 }
65 });
66}
67
68static std::optional<VolumeGridType> cpp_type_to_grid_type(const CPPType &cpp_type)
69{
70 const std::optional<eCustomDataType> cd_type = bke::cpp_type_to_custom_data_type(cpp_type);
71 if (!cd_type) {
72 return std::nullopt;
73 }
75}
76
77using LeafNodeMask = openvdb::util::NodeMask<3u>;
78using GetVoxelsFn = FunctionRef<void(MutableSpan<openvdb::Coord> r_voxels)>;
79using ProcessLeafFn = FunctionRef<void(const LeafNodeMask &leaf_node_mask,
80 const openvdb::CoordBBox &leaf_bbox,
81 GetVoxelsFn get_voxels_fn)>;
82using ProcessTilesFn = FunctionRef<void(Span<openvdb::CoordBBox> tiles)>;
83using ProcessVoxelsFn = FunctionRef<void(Span<openvdb::Coord> voxels)>;
84
89template<typename LeafNodeT>
90static void parallel_grid_topology_tasks_leaf_node(const LeafNodeT &node,
91 const ProcessLeafFn process_leaf_fn,
93{
94 using NodeMaskT = typename LeafNodeT::NodeMaskType;
95
96 const int on_count = node.onVoxelCount();
97 /* This number is somewhat arbitrary. 64 is a 1/8th of the number of voxels in a standard leaf
98 * which is 8x8x8. It's a trade-off between benefiting from the better performance of
99 * leaf-processing vs. processing more voxels in a batch. */
100 const int on_count_threshold = 64;
101 if (on_count <= on_count_threshold) {
102 /* The leaf contains only a few active voxels. It's beneficial to process them in a batch with
103 * active voxels from other leafs. So only gather them here for later processing. */
104 for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) {
105 const openvdb::Coord coord = value_iter.getCoord();
106 r_coords.append(coord);
107 }
108 return;
109 }
110 /* Process entire leaf at once. This is especially beneficial when very many of the voxels in
111 * the leaf are active. In that case, one can work on the openvdb arrays stored in the leafs
112 * directly. */
113 const NodeMaskT &value_mask = node.getValueMask();
114 const openvdb::CoordBBox bbox = node.getNodeBoundingBox();
115 process_leaf_fn(value_mask, bbox, [&](MutableSpan<openvdb::Coord> r_voxels) {
116 for (auto value_iter = node.cbeginValueOn(); value_iter.test(); ++value_iter) {
117 r_voxels[value_iter.pos()] = value_iter.getCoord();
118 }
119 });
120}
121
125template<typename InternalNodeT>
126static void parallel_grid_topology_tasks_internal_node(const InternalNodeT &node,
127 const ProcessLeafFn process_leaf_fn,
128 const ProcessVoxelsFn process_voxels_fn,
129 const ProcessTilesFn process_tiles_fn)
130{
131 using ChildNodeT = typename InternalNodeT::ChildNodeType;
132 using LeafNodeT = typename InternalNodeT::LeafNodeType;
133 using NodeMaskT = typename InternalNodeT::NodeMaskType;
134 using UnionT = typename InternalNodeT::UnionType;
135
136 /* Gather the active sub-nodes first, to be able to parallelize over them more easily. */
137 const NodeMaskT &child_mask = node.getChildMask();
138 const UnionT *table = node.getTable();
139 Vector<int, 512> child_indices;
140 for (auto child_mask_iter = child_mask.beginOn(); child_mask_iter.test(); ++child_mask_iter) {
141 child_indices.append(child_mask_iter.pos());
142 }
143
144 threading::parallel_for(child_indices.index_range(), 8, [&](const IndexRange range) {
145 /* Voxels collected from potentially multiple leaf nodes to be processed in one batch. This
146 * inline buffer size is sufficient to avoid an allocation in all cases (a single standard leaf
147 * has 512 voxels). */
148 Vector<openvdb::Coord, 1024> gathered_voxels;
149 for (const int child_index : child_indices.as_span().slice(range)) {
150 const ChildNodeT &child = *table[child_index].getChild();
151 if constexpr (std::is_same_v<ChildNodeT, LeafNodeT>) {
152 parallel_grid_topology_tasks_leaf_node(child, process_leaf_fn, gathered_voxels);
153 /* If enough voxels have been gathered, process them in one batch. */
154 if (gathered_voxels.size() >= 512) {
155 process_voxels_fn(gathered_voxels);
156 gathered_voxels.clear();
157 }
158 }
159 else {
160 /* Recurse into lower-level internal nodes. */
161 parallel_grid_topology_tasks_internal_node(
162 child, process_leaf_fn, process_voxels_fn, process_tiles_fn);
163 }
164 }
165 /* Process any remaining voxels. */
166 if (!gathered_voxels.is_empty()) {
167 process_voxels_fn(gathered_voxels);
168 gathered_voxels.clear();
169 }
170 });
171
172 /* Process the active tiles within the internal node. Note that these are not processed above
173 * already because there only sub-nodes are handled, but tiles are "inlined" into internal nodes.
174 * All tiles are first gathered and then processed in one batch. */
175 const NodeMaskT &value_mask = node.getValueMask();
176 Vector<openvdb::CoordBBox> tile_bboxes;
177 for (auto value_mask_iter = value_mask.beginOn(); value_mask_iter.test(); ++value_mask_iter) {
178 const openvdb::Index32 index = value_mask_iter.pos();
179 const openvdb::Coord tile_origin = node.offsetToGlobalCoord(index);
180 const openvdb::CoordBBox tile_bbox = openvdb::CoordBBox::createCube(tile_origin,
181 ChildNodeT::DIM);
182 tile_bboxes.append(tile_bbox);
183 }
184 if (!tile_bboxes.is_empty()) {
185 process_tiles_fn(tile_bboxes);
186 }
187}
188
189/* Call the process functions on all active tiles and voxels in the given tree. */
190static void parallel_grid_topology_tasks(const openvdb::MaskTree &mask_tree,
191 const ProcessLeafFn process_leaf_fn,
192 const ProcessVoxelsFn process_voxels_fn,
193 const ProcessTilesFn process_tiles_fn)
194{
195 /* Iterate over the root internal nodes. */
196 for (auto root_child_iter = mask_tree.cbeginRootChildren(); root_child_iter.test();
197 ++root_child_iter)
198 {
199 const auto &internal_node = *root_child_iter;
200 parallel_grid_topology_tasks_internal_node(
201 internal_node, process_leaf_fn, process_voxels_fn, process_tiles_fn);
202 }
203}
204
219BLI_NOINLINE static void process_leaf_node(const mf::MultiFunction &fn,
220 const Span<bke::SocketValueVariant *> input_values,
221 const Span<const openvdb::GridBase *> input_grids,
223 const openvdb::math::Transform &transform,
224 const LeafNodeMask &leaf_node_mask,
225 const openvdb::CoordBBox &leaf_bbox,
226 const GetVoxelsFn get_voxels_fn)
227{
228 /* Create an index mask for all the active voxels in the leaf. */
229 IndexMaskMemory memory;
230 const IndexMask index_mask = IndexMask::from_predicate(
231 IndexRange(LeafNodeMask::SIZE), GrainSize(LeafNodeMask::SIZE), memory, [&](const int64_t i) {
232 return leaf_node_mask.isOn(i);
233 });
234
235 AlignedBuffer<8192, 8> allocation_buffer;
236 ResourceScope scope;
237 scope.allocator().provide_buffer(allocation_buffer);
238 mf::ParamsBuilder params{fn, &index_mask};
239 mf::ContextBuilder context;
240
241 /* We need to find the corresponding leaf nodes in all the input and output grids. That's done by
242 * finding the leaf that contains this voxel. */
243 const openvdb::Coord any_voxel_in_leaf = leaf_bbox.min();
244
245 std::optional<MutableSpan<openvdb::Coord>> voxel_coords_opt;
246 auto ensure_voxel_coords = [&]() {
247 if (!voxel_coords_opt.has_value()) {
248 voxel_coords_opt = scope.allocator().allocate_array<openvdb::Coord>(
249 index_mask.min_array_size());
250 get_voxels_fn(voxel_coords_opt.value());
251 }
252 return *voxel_coords_opt;
253 };
254
255 for (const int input_i : input_values.index_range()) {
256 const bke::SocketValueVariant &value_variant = *input_values[input_i];
257 const mf::ParamType param_type = fn.param_type(params.next_param_index());
258 const CPPType &param_cpp_type = param_type.data_type().single_type();
259
260 if (const openvdb::GridBase *grid_base = input_grids[input_i]) {
261 /* The input is a grid, so we can attempt to reference the grid values directly. */
262 to_typed_grid(*grid_base, [&](const auto &grid) {
263 using GridT = typename std::decay_t<decltype(grid)>;
264 using ValueT = typename GridT::ValueType;
265 BLI_assert(param_cpp_type.size == sizeof(ValueT));
266 const auto &tree = grid.tree();
267
268 if (const auto *leaf_node = tree.probeLeaf(any_voxel_in_leaf)) {
269 /* Boolean grids are special because they encode the values as bitmask. So create a
270 * temporary buffer for the inputs. */
271 if constexpr (std::is_same_v<ValueT, bool>) {
272 const Span<openvdb::Coord> voxels = ensure_voxel_coords();
273 MutableSpan<bool> values = scope.allocator().allocate_array<bool>(
274 index_mask.min_array_size());
275 index_mask.foreach_index([&](const int64_t i) {
276 const openvdb::Coord &coord = voxels[i];
277 values[i] = tree.getValue(coord);
278 });
279 params.add_readonly_single_input(values);
280 }
281 else {
282 const Span<ValueT> values(leaf_node->buffer().data(), LeafNodeMask::SIZE);
283 const LeafNodeMask &input_leaf_mask = leaf_node->valueMask();
284 const LeafNodeMask missing_mask = leaf_node_mask & !input_leaf_mask;
285 if (missing_mask.isOff()) {
286 /* All values available, so reference the data directly. */
287 params.add_readonly_single_input(
288 GSpan(param_cpp_type, values.data(), values.size()));
289 }
290 else {
291 /* Fill in the missing values with the background value. */
292 MutableSpan copied_values = scope.allocator().construct_array_copy(values);
293 const auto &background = tree.background();
294 for (auto missing_it = missing_mask.beginOn(); missing_it.test(); ++missing_it) {
295 const int index = missing_it.pos();
296 copied_values[index] = background;
297 }
298 params.add_readonly_single_input(
299 GSpan(param_cpp_type, copied_values.data(), copied_values.size()));
300 }
301 }
302 }
303 else {
304 /* The input does not have this leaf node, so just get the value that's used for the
305 * entire leaf. The leaf may be in a tile or is inactive in which case the background
306 * value is used. */
307 const auto single_value = tree.getValue(any_voxel_in_leaf);
308 params.add_readonly_single_input(GPointer(param_cpp_type, &single_value));
309 }
310 });
311 }
312 else if (value_variant.is_context_dependent_field()) {
313 /* Compute the field on all active voxels in the leaf and pass the result to the
314 * multi-function. */
315 const fn::GField field = value_variant.get<fn::GField>();
316 const CPPType &type = field.cpp_type();
317 const Span<openvdb::Coord> voxels = ensure_voxel_coords();
318 bke::VoxelFieldContext field_context{transform, voxels};
319 fn::FieldEvaluator evaluator{field_context, &index_mask};
320 GMutableSpan values{
321 type, scope.allocator().allocate_array(type, voxels.size()), voxels.size()};
322 evaluator.add_with_destination(field, values);
323 evaluator.evaluate();
324 params.add_readonly_single_input(values);
325 }
326 else {
327 /* Pass the single value directly to the multi-function. */
328 params.add_readonly_single_input(value_variant.get_single_ptr());
329 }
330 }
331
332 for (const int output_i : output_grids.index_range()) {
333 const mf::ParamType param_type = fn.param_type(params.next_param_index());
334 const CPPType &param_cpp_type = param_type.data_type().single_type();
335
336 openvdb::GridBase &grid_base = *output_grids[output_i];
337 to_typed_grid(grid_base, [&](auto &grid) {
338 using GridT = typename std::decay_t<decltype(grid)>;
339 using ValueT = typename GridT::ValueType;
340
341 auto &tree = grid.tree();
342 auto *leaf_node = tree.probeLeaf(any_voxel_in_leaf);
343 /* Should have been added before. */
344 BLI_assert(leaf_node);
345
346 /* Boolean grids are special because they encode the values as bitmask. */
347 if constexpr (std::is_same_v<ValueT, bool>) {
348 MutableSpan<bool> values = scope.allocator().allocate_array<bool>(
349 index_mask.min_array_size());
350 params.add_uninitialized_single_output(values);
351 }
352 else {
353 /* Write directly into the buffer of the output leaf node. */
354 ValueT *values = leaf_node->buffer().data();
355 params.add_uninitialized_single_output(
356 GMutableSpan(param_cpp_type, values, LeafNodeMask::SIZE));
357 }
358 });
359 }
360
361 /* Actually call the multi-function which will write the results into the output grids (except
362 * for boolean grids). */
363 fn.call_auto(index_mask, params, context);
364
365 for (const int output_i : output_grids.index_range()) {
366 const int param_index = input_values.size() + output_i;
367 const mf::ParamType param_type = fn.param_type(param_index);
368 const CPPType &param_cpp_type = param_type.data_type().single_type();
369 if (!param_cpp_type.is<bool>()) {
370 continue;
371 }
372 openvdb::BoolGrid &grid = static_cast<openvdb::BoolGrid &>(*output_grids[output_i]);
373 const Span<bool> values = params.computed_array(param_index).typed<bool>();
374 auto accessor = grid.getUnsafeAccessor();
375 const Span<openvdb::Coord> voxels = ensure_voxel_coords();
376 index_mask.foreach_index([&](const int64_t i) {
377 const openvdb::Coord &coord = voxels[i];
378 accessor.setValue(coord, values[i]);
379 });
380 }
381}
382
394BLI_NOINLINE static void process_voxels(const mf::MultiFunction &fn,
395 const Span<bke::SocketValueVariant *> input_values,
396 const Span<const openvdb::GridBase *> input_grids,
398 const openvdb::math::Transform &transform,
399 const Span<openvdb::Coord> voxels)
400{
401 const int64_t voxels_num = voxels.size();
402 const IndexMask index_mask{voxels_num};
403 AlignedBuffer<8192, 8> allocation_buffer;
404 ResourceScope scope;
405 scope.allocator().provide_buffer(allocation_buffer);
406 mf::ParamsBuilder params{fn, &index_mask};
407 mf::ContextBuilder context;
408
409 for (const int input_i : input_values.index_range()) {
410 const bke::SocketValueVariant &value_variant = *input_values[input_i];
411 const mf::ParamType param_type = fn.param_type(params.next_param_index());
412 const CPPType &param_cpp_type = param_type.data_type().single_type();
413
414 if (const openvdb::GridBase *grid_base = input_grids[input_i]) {
415 /* Retrieve all voxel values from the input grid. */
416 to_typed_grid(*grid_base, [&](const auto &grid) {
417 using ValueType = typename std::decay_t<decltype(grid)>::ValueType;
418 const auto &tree = grid.tree();
419 /* Could try to cache the accessor across batches, but it's not straight forward since its
420 * type depends on the grid type and thread-safety has to be maintained. It's likely not
421 * worth it because the cost is already negligible since we are processing a full batch. */
422 auto accessor = grid.getConstUnsafeAccessor();
423
424 MutableSpan<ValueType> values = scope.allocator().allocate_array<ValueType>(voxels_num);
425 for (const int64_t i : IndexRange(voxels_num)) {
426 const openvdb::Coord &coord = voxels[i];
427 values[i] = tree.getValue(coord, accessor);
428 }
429 BLI_assert(param_cpp_type.size == sizeof(ValueType));
430 params.add_readonly_single_input(GSpan(param_cpp_type, values.data(), voxels_num));
431 });
432 }
433 else if (value_variant.is_context_dependent_field()) {
434 /* Evaluate the field on all voxels. */
435 const fn::GField field = value_variant.get<fn::GField>();
436 const CPPType &type = field.cpp_type();
437 bke::VoxelFieldContext field_context{transform, voxels};
438 fn::FieldEvaluator evaluator{field_context, voxels_num};
439 GMutableSpan values{type, scope.allocator().allocate_array(type, voxels_num), voxels_num};
440 evaluator.add_with_destination(field, values);
441 evaluator.evaluate();
442 params.add_readonly_single_input(values);
443 }
444 else {
445 /* Pass the single value directly to the multi-function. */
446 params.add_readonly_single_input(value_variant.get_single_ptr());
447 }
448 }
449
450 /* Prepare temporary output buffers for the field evaluation. Those will later be copied into the
451 * output grids. */
452 for ([[maybe_unused]] const int output_i : output_grids.index_range()) {
453 const int param_index = input_values.size() + output_i;
454 const mf::ParamType param_type = fn.param_type(param_index);
455 const CPPType &type = param_type.data_type().single_type();
456 void *buffer = scope.allocator().allocate_array(type, voxels_num);
457 params.add_uninitialized_single_output(GMutableSpan{type, buffer, voxels_num});
458 }
459
460 /* Actually call the multi-function which will fill the temporary output buffers. */
461 fn.call_auto(index_mask, params, context);
462
463 /* Copy the values from the temporary buffers into the output grids. */
464 for (const int output_i : output_grids.index_range()) {
465 openvdb::GridBase &grid_base = *output_grids[output_i];
466 to_typed_grid(grid_base, [&](auto &grid) {
467 using GridT = std::decay_t<decltype(grid)>;
468 using ValueType = typename GridT::ValueType;
469 const int param_index = input_values.size() + output_i;
470 const ValueType *computed_values = static_cast<const ValueType *>(
471 params.computed_array(param_index).data());
472
473 auto accessor = grid.getUnsafeAccessor();
474 for (const int64_t i : IndexRange(voxels_num)) {
475 const openvdb::Coord &coord = voxels[i];
476 const ValueType &value = computed_values[i];
477 accessor.setValue(coord, value);
478 }
479 });
480 }
481}
482
495BLI_NOINLINE static void process_tiles(const mf::MultiFunction &fn,
496 const Span<bke::SocketValueVariant *> input_values,
497 const Span<const openvdb::GridBase *> input_grids,
499 const openvdb::math::Transform &transform,
501{
502 const int64_t tiles_num = tiles.size();
503 const IndexMask index_mask{tiles_num};
504
505 AlignedBuffer<8192, 8> allocation_buffer;
506 ResourceScope scope;
507 scope.allocator().provide_buffer(allocation_buffer);
508 mf::ParamsBuilder params{fn, &index_mask};
509 mf::ContextBuilder context;
510
511 for (const int input_i : input_values.index_range()) {
512 const bke::SocketValueVariant &value_variant = *input_values[input_i];
513 const mf::ParamType param_type = fn.param_type(params.next_param_index());
514 const CPPType &param_cpp_type = param_type.data_type().single_type();
515
516 if (const openvdb::GridBase *grid_base = input_grids[input_i]) {
517 /* Sample the tile values from the input grid. */
518 to_typed_grid(*grid_base, [&](const auto &grid) {
519 using GridT = std::decay_t<decltype(grid)>;
520 using ValueType = typename GridT::ValueType;
521 const auto &tree = grid.tree();
522 auto accessor = grid.getConstUnsafeAccessor();
523
524 MutableSpan<ValueType> values = scope.allocator().allocate_array<ValueType>(tiles_num);
525 for (const int64_t i : IndexRange(tiles_num)) {
526 const openvdb::CoordBBox &tile = tiles[i];
527 /* The tile is assumed to have a single constant value. Therefore, we can get the value
528 * from any voxel in that tile as representative. */
529 const openvdb::Coord any_coord_in_tile = tile.min();
530 values[i] = tree.getValue(any_coord_in_tile, accessor);
531 }
532 BLI_assert(param_cpp_type.size == sizeof(ValueType));
533 params.add_readonly_single_input(GSpan(param_cpp_type, values.data(), tiles_num));
534 });
535 }
536 else if (value_variant.is_context_dependent_field()) {
537 /* Evaluate the field on all tiles. */
538 const fn::GField field = value_variant.get<fn::GField>();
539 const CPPType &type = field.cpp_type();
540 bke::TilesFieldContext field_context{transform, tiles};
541 fn::FieldEvaluator evaluator{field_context, tiles_num};
542 GMutableSpan values{type, scope.allocator().allocate_array(type, tiles_num), tiles_num};
543 evaluator.add_with_destination(field, values);
544 evaluator.evaluate();
545 params.add_readonly_single_input(values);
546 }
547 else {
548 /* Pass the single value directly to the multi-function. */
549 params.add_readonly_single_input(value_variant.get_single_ptr());
550 }
551 }
552
553 /* Prepare temporary output buffers for the field evaluation. Those will later be copied into the
554 * output grids. */
555 for ([[maybe_unused]] const int output_i : output_grids.index_range()) {
556 const int param_index = input_values.size() + output_i;
557 const mf::ParamType param_type = fn.param_type(param_index);
558 const CPPType &type = param_type.data_type().single_type();
559 void *buffer = scope.allocator().allocate_array(type, tiles_num);
560 params.add_uninitialized_single_output(GMutableSpan{type, buffer, tiles_num});
561 }
562
563 /* Actually call the multi-function which will fill the temporary output buffers. */
564 fn.call_auto(index_mask, params, context);
565
566 /* Copy the values from the temporary buffers into the output grids. */
567 for (const int output_i : output_grids.index_range()) {
568 const int param_index = input_values.size() + output_i;
569 openvdb::GridBase &grid_base = *output_grids[output_i];
570 to_typed_grid(grid_base, [&](auto &grid) {
571 using GridT = typename std::decay_t<decltype(grid)>;
572 using TreeT = typename GridT::TreeType;
573 using ValueType = typename GridT::ValueType;
574 auto &tree = grid.tree();
575
576 const ValueType *computed_values = static_cast<const ValueType *>(
577 params.computed_array(param_index).data());
578
579 const auto set_tile_value =
580 [&](auto &node, const openvdb::Coord &coord_in_tile, auto value) {
581 const openvdb::Index n = node.coordToOffset(coord_in_tile);
582 BLI_assert(node.isChildMaskOff(n));
583 /* TODO: Figure out how to do this without const_cast, although the same is done in
584 * `openvdb_ax/openvdb_ax/compiler/VolumeExecutable.cc` which has a similar purpose.
585 * It seems like OpenVDB generally allows that, but it does not have a proper public
586 * API for this yet. */
587 using UnionType = typename std::decay_t<decltype(node)>::UnionType;
588 auto *table = const_cast<UnionType *>(node.getTable());
589 table[n].setValue(value);
590 };
591
592 for (const int i : IndexRange(tiles_num)) {
593 const openvdb::CoordBBox tile = tiles[i];
594 const openvdb::Coord coord_in_tile = tile.min();
595 const auto &computed_value = computed_values[i];
596 using InternalNode1 = typename TreeT::RootNodeType::ChildNodeType;
597 using InternalNode2 = typename InternalNode1::ChildNodeType;
598 /* Find the internal node that contains the tile and update the value in there. */
599 if (auto *node = tree.template probeNode<InternalNode2>(coord_in_tile)) {
600 set_tile_value(*node, coord_in_tile, computed_value);
601 }
602 else if (auto *node = tree.template probeNode<InternalNode1>(coord_in_tile)) {
603 set_tile_value(*node, coord_in_tile, computed_value);
604 }
605 else {
607 }
608 }
609 });
610 }
611}
612
614 const mf::MultiFunction &fn,
615 const Span<bke::SocketValueVariant *> input_values,
616 const Span<bke::SocketValueVariant *> output_values,
617 std::string &r_error_message)
618{
619 const int inputs_num = input_values.size();
620 Array<bke::VolumeTreeAccessToken> input_volume_tokens(inputs_num);
621 Array<const openvdb::GridBase *> input_grids(inputs_num, nullptr);
622
623 for (const int input_i : IndexRange(inputs_num)) {
624 bke::SocketValueVariant &value_variant = *input_values[input_i];
625 if (value_variant.is_volume_grid()) {
626 const bke::GVolumeGrid g_volume_grid = value_variant.get<bke::GVolumeGrid>();
627 input_grids[input_i] = &g_volume_grid->grid(input_volume_tokens[input_i]);
628 }
629 else if (value_variant.is_context_dependent_field()) {
630 /* Nothing to do here. The field is evaluated later. */
631 }
632 else {
633 value_variant.convert_to_single();
634 }
635 }
636
637 const openvdb::math::Transform *transform = nullptr;
638 for (const openvdb::GridBase *grid : input_grids) {
639 if (!grid) {
640 continue;
641 }
642 const openvdb::math::Transform &other_transform = grid->transform();
643 if (!transform) {
644 transform = &other_transform;
645 continue;
646 }
647 if (*transform != other_transform) {
648 r_error_message = TIP_("Input grids have incompatible transforms");
649 return false;
650 }
651 }
652 if (transform == nullptr) {
653 r_error_message = TIP_("No input grid found that can determine the topology");
654 return false;
655 }
656
657 openvdb::MaskTree mask_tree;
658 for (const openvdb::GridBase *grid : input_grids) {
659 if (!grid) {
660 continue;
661 }
662 to_typed_grid(*grid, [&](const auto &grid) { mask_tree.topologyUnion(grid.tree()); });
663 }
664
665 Array<openvdb::GridBase::Ptr> output_grids(output_values.size());
666 for (const int i : output_values.index_range()) {
667 const int param_index = input_values.size() + i;
668 const mf::ParamType param_type = fn.param_type(param_index);
669 const CPPType &cpp_type = param_type.data_type().single_type();
670 const std::optional<VolumeGridType> grid_type = cpp_type_to_grid_type(cpp_type);
671 if (!grid_type) {
672 r_error_message = TIP_("Grid type not supported");
673 return false;
674 }
675
676 openvdb::GridBase::Ptr grid;
677 BKE_volume_grid_type_to_static_type(*grid_type, [&](auto type_tag) {
678 using GridT = typename decltype(type_tag)::type;
679 using TreeT = typename GridT::TreeType;
680 using ValueType = typename TreeT::ValueType;
681 const ValueType background{};
682 auto tree = std::make_shared<TreeT>(mask_tree, background, openvdb::TopologyCopy());
683 grid = openvdb::createGrid(std::move(tree));
684 });
685
686 grid->setTransform(transform->copy());
687 output_grids[i] = std::move(grid);
688 }
689
690 parallel_grid_topology_tasks(
691 mask_tree,
692 [&](const LeafNodeMask &leaf_node_mask,
693 const openvdb::CoordBBox &leaf_bbox,
694 const GetVoxelsFn get_voxels_fn) {
695 process_leaf_node(fn,
696 input_values,
697 input_grids,
698 output_grids,
699 *transform,
700 leaf_node_mask,
701 leaf_bbox,
702 get_voxels_fn);
703 },
704 [&](const Span<openvdb::Coord> voxels) {
705 process_voxels(fn, input_values, input_grids, output_grids, *transform, voxels);
706 },
708 process_tiles(fn, input_values, input_grids, output_grids, *transform, tiles);
709 });
710
711 for (const int i : output_values.index_range()) {
712 if (bke::SocketValueVariant *output_value = output_values[i]) {
713 output_value->set(bke::GVolumeGrid(std::move(output_grids[i])));
714 }
715 }
716
717 return true;
718}
719
720#else
721
723 const mf::MultiFunction & /*fn*/,
724 const Span<bke::SocketValueVariant *> /*input_values*/,
725 const Span<bke::SocketValueVariant *> /*output_values*/,
726 std::string &r_error_message)
727{
728 r_error_message = TIP_("Compiled without OpenVDB");
729 return false;
730}
731
732#endif
733
734} // namespace blender::nodes
CustomData interface, see also DNA_customdata_types.h.
VolumeGridType
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_NOINLINE
#define TIP_(msgid)
float[3] Vector
BMesh const char void * data
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
long long int int64_t
int64_t size
bool is() const
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
int64_t min_array_size() const
void foreach_index(Fn &&fn) const
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr T * data() const
Definition BLI_span.hh:539
constexpr IndexRange index_range() const
Definition BLI_span.hh:670
LinearAllocator & allocator()
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
void append(const T &value)
bool is_empty() const
IndexRange index_range() const
KDTree_3d * tree
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
ccl_gpu_kernel_postfix ccl_global KernelWorkTile * tiles
const ccl_global KernelWorkTile * tile
if(state< num_states)
VolumeGridType get_type(const VolumeGridData &grid)
eCustomDataType cpp_type_to_custom_data_type(const CPPType &type)
std::optional< VolumeGridType > custom_data_type_to_volume_grid_type(eCustomDataType type)
int context(const bContext *C, const char *member, bContextDataResult *result)
bool execute_multi_function_on_value_variant__volume_grid(const mf::MultiFunction &, const Span< bke::SocketValueVariant * >, const Span< bke::SocketValueVariant * >, std::string &r_error_message)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
constexpr bool is_same_any_v
i
Definition text_draw.cc:230