Blender V4.5
node_composite_dilate.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <limits>
10
11#include "BLI_assert.h"
12#include "BLI_index_range.hh"
13#include "BLI_math_base.hh"
15#include "BLI_task.hh"
16
17#include "RNA_access.hh"
18
19#include "UI_interface.hh"
20#include "UI_resources.hh"
21
22#include "GPU_shader.hh"
23
26#include "COM_algorithm_smaa.hh"
27#include "COM_node_operation.hh"
28#include "COM_utilities.hh"
29
31
32/* **************** Dilate/Erode ******************** */
33
35
37
39{
40 b.add_input<decl::Float>("Mask").default_value(0.0f).min(0.0f).max(1.0f);
41 b.add_input<decl::Int>("Size")
43 .description(
44 "The size of dilation/erosion in pixels. Positive values dilates and negative values "
45 "erodes")
46 .compositor_expects_single_value();
47 b.add_input<decl::Float>("Falloff Size")
48 .default_value(0.0f)
49 .min(0.0f)
50 .description(
51 "The size of the falloff from the edges in pixels. If less than two pixels, the edges "
52 "will be anti-aliased")
53 .compositor_expects_single_value();
54
55 b.add_output<decl::Float>("Mask");
56}
57
58static void node_composit_init_dilateerode(bNodeTree * /*ntree*/, bNode *node)
59{
61 data->falloff = PROP_SMOOTH;
62 node->storage = data;
63}
64
66{
67 layout->prop(ptr, "mode", UI_ITEM_R_SPLIT_EMPTY_NAME, std::nullopt, ICON_NONE);
69 layout->prop(ptr, "falloff", UI_ITEM_R_SPLIT_EMPTY_NAME, std::nullopt, ICON_NONE);
70 }
71}
72
73static void node_update(bNodeTree *ntree, bNode *node)
74{
75 bNodeSocket *falloff_size_input = bke::node_find_socket(*node, SOCK_IN, "Falloff Size");
76 const bool is_falloff_size_needed = node->custom1 == CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD;
77 blender::bke::node_set_socket_availability(*ntree, *falloff_size_input, is_falloff_size_needed);
78}
79
80using namespace blender::compositor;
81
83 public:
85
86 void execute() override
87 {
88 if (this->is_identity()) {
89 const Result &input = this->get_input("Mask");
90 Result &output = this->get_result("Mask");
91 output.share_data(input);
92 return;
93 }
94
95 switch (get_method()) {
98 return;
101 return;
104 return;
107 return;
108 default:
110 return;
111 }
112 }
113
114 /* ----------------------------
115 * Step Morphological Operator.
116 * ---------------------------- */
117
119 {
120 Result horizontal_pass_result = execute_step_horizontal_pass();
121 execute_step_vertical_pass(horizontal_pass_result);
122 horizontal_pass_result.release();
123 }
124
126 {
127 if (this->context().use_gpu()) {
129 }
131 }
132
134 {
136 GPU_shader_bind(shader);
137
138 GPU_shader_uniform_1i(shader, "radius", this->get_structuring_element_size() / 2);
139
140 const Result &input_mask = get_input("Mask");
141 input_mask.bind_as_texture(shader, "input_tx");
142
143 /* We allocate an output image of a transposed size, that is, with a height equivalent to the
144 * width of the input and vice versa. This is done as a performance optimization. The shader
145 * will process the image horizontally and write it to the intermediate output transposed. Then
146 * the vertical pass will execute the same horizontal pass shader, but since its input is
147 * transposed, it will effectively do a vertical pass and write to the output transposed,
148 * effectively undoing the transposition in the horizontal pass. This is done to improve
149 * spatial cache locality in the shader and to avoid having two separate shaders for each of
150 * the passes. */
151 const Domain domain = compute_domain();
152 const int2 transposed_domain = int2(domain.size.y, domain.size.x);
153
154 Result horizontal_pass_result = context().create_result(ResultType::Float);
155 horizontal_pass_result.allocate_texture(transposed_domain);
156 horizontal_pass_result.bind_as_image(shader, "output_img");
157
159
161 input_mask.unbind_as_texture();
162 horizontal_pass_result.unbind_as_image();
163
164 return horizontal_pass_result;
165 }
166
168 {
169 const Result &input = get_input("Mask");
170
171 /* We allocate an output image of a transposed size, that is, with a height equivalent to the
172 * width of the input and vice versa. This is done as a performance optimization. The shader
173 * will process the image horizontally and write it to the intermediate output transposed. Then
174 * the vertical pass will execute the same horizontal pass shader, but since its input is
175 * transposed, it will effectively do a vertical pass and write to the output transposed,
176 * effectively undoing the transposition in the horizontal pass. This is done to improve
177 * spatial cache locality in the shader and to avoid having two separate shaders for each of
178 * the passes. */
179 const Domain domain = compute_domain();
180 const int2 transposed_domain = int2(domain.size.y, domain.size.x);
181
182 Result horizontal_pass_result = context().create_result(ResultType::Float);
183 horizontal_pass_result.allocate_texture(transposed_domain);
184
185 if (this->is_dilation()) {
186 this->execute_step_pass_cpu<true>(input, horizontal_pass_result);
187 }
188 else {
189 this->execute_step_pass_cpu<false>(input, horizontal_pass_result);
190 }
191
192 return horizontal_pass_result;
193 }
194
195 void execute_step_vertical_pass(Result &horizontal_pass_result)
196 {
197 if (this->context().use_gpu()) {
198 this->execute_step_vertical_pass_gpu(horizontal_pass_result);
199 }
200 else {
201 this->execute_step_vertical_pass_cpu(horizontal_pass_result);
202 }
203 }
204
205 void execute_step_vertical_pass_gpu(Result &horizontal_pass_result)
206 {
208 GPU_shader_bind(shader);
209
210 GPU_shader_uniform_1i(shader, "radius", this->get_structuring_element_size() / 2);
211
212 horizontal_pass_result.bind_as_texture(shader, "input_tx");
213
214 const Domain domain = compute_domain();
215 Result &output_mask = get_result("Mask");
216 output_mask.allocate_texture(domain);
217 output_mask.bind_as_image(shader, "output_img");
218
219 /* Notice that the domain is transposed, see the note on the horizontal pass method for more
220 * information on the reasoning behind this. */
221 compute_dispatch_threads_at_least(shader, int2(domain.size.y, domain.size.x));
222
224 horizontal_pass_result.unbind_as_texture();
225 output_mask.unbind_as_image();
226 }
227
229 {
230 if (this->is_dilation()) {
231 return "compositor_morphological_step_dilate";
232 }
233 return "compositor_morphological_step_erode";
234 }
235
236 void execute_step_vertical_pass_cpu(Result &horizontal_pass_result)
237 {
238 const Domain domain = compute_domain();
239 Result &output_mask = get_result("Mask");
240 output_mask.allocate_texture(domain);
241
242 if (this->is_dilation()) {
243 this->execute_step_pass_cpu<true>(horizontal_pass_result, output_mask);
244 }
245 else {
246 this->execute_step_pass_cpu<false>(horizontal_pass_result, output_mask);
247 }
248 }
249
250 /* Apply a van Herk/Gil-Werman algorithm on the input based on:
251 *
252 * Domanski, Luke, Pascal Vallotton, and Dadong Wang. "Parallel van Herk/Gil-Werman image
253 * morphology on GPUs using CUDA." GTC 2009 Conference posters. 2009.
254 *
255 * The output is written transposed for more efficient execution, see the horizontal pass method
256 * for more information. The template argument IsDilate decides if dilation or erosion will be
257 * performed. */
258 template<bool IsDilate> void execute_step_pass_cpu(const Result &input, Result &output)
259 {
260 const float limit = IsDilate ? std::numeric_limits<float>::lowest() :
261 std::numeric_limits<float>::max();
262 const auto morphology_operator = [](const float a, const float b) {
263 if constexpr (IsDilate) {
264 return math::max(a, b);
265 }
266 else {
267 return math::min(a, b);
268 }
269 };
270
271 /* Notice that the domain is transposed, see the note on the horizontal pass method for more
272 * information on the reasoning behind this. */
273 const int2 image_size = int2(output.domain().size.y, output.domain().size.x);
274
275 /* We process rows in tiles whose size is the same as the structuring element size. So we
276 * compute the number of tiles using ceiling division, noting that the last tile might not be
277 * complete. */
278 const int size = this->get_structuring_element_size();
279 const int tiles_count = int(math::ceil(float(image_size.x) / size));
280
281 /* Process along rows in parallel. */
282 threading::parallel_for(IndexRange(image_size.y), 1, [&](const IndexRange sub_y_range) {
283 Array<float> prefix_table(size);
284 Array<float> suffix_table(size);
285 for (const int64_t y : sub_y_range) {
286 for (const int64_t tile_index : IndexRange(tiles_count)) {
287 const int64_t tile_start = tile_index * size;
288 /* Compute the x texel location of the pixel at the center of the tile. Noting that the
289 * size of the structuring element is guaranteed to be odd. */
290 const int64_t tile_center = tile_start + size / 2;
291
292 float prefix_value = limit;
293 float suffix_value = limit;
294 /* Starting from the pixel at the center of the tile, recursively compute the prefix
295 * table to the right and the suffix table to the left by applying the morphology
296 * operator. */
297 for (const int64_t i : IndexRange(size)) {
298 const float right_value = input.load_pixel_fallback(int2(tile_center + i, y), limit);
299 prefix_value = morphology_operator(prefix_value, right_value);
300 prefix_table[i] = prefix_value;
301
302 /* Note that we access pixels increasingly to the left, so invert the suffix table when
303 * writing to it. */
304 const float left_value = input.load_pixel_fallback(int2(tile_center - i, y), limit);
305 suffix_value = morphology_operator(suffix_value, left_value);
306 suffix_table[size - 1 - i] = suffix_value;
307 }
308
309 const IndexRange tile_range = IndexRange(tile_start, size);
310 const IndexRange safe_tile_range = tile_range.intersect(IndexRange(image_size.x));
311 /* For each pixel in the tile, write the result of applying the morphology operator on
312 * the prefix and suffix values. */
313 for (const int64_t x : safe_tile_range) {
314 /* Compute the local table index, since the prefix and suffix tables are local to each
315 * tile. */
316 const int64_t table_index = x - tile_start;
317 const float prefix_value = prefix_table[table_index];
318 const float suffix_value = suffix_table[table_index];
319
320 const float value = morphology_operator(prefix_value, suffix_value);
321
322 /* Write the value using the transposed texel. See the horizontal pass method for more
323 * information on the rational behind this. */
324 output.store_pixel(int2(y, x), value);
325 }
326 }
327 }
328 });
329 }
330
331 /* --------------------------------
332 * Distance Morphological Operator.
333 * -------------------------------- */
334
336 {
337 morphological_distance(context(), get_input("Mask"), get_result("Mask"), this->get_size());
338 }
339
340 /* ------------------------------------------
341 * Distance Threshold Morphological Operator.
342 * ------------------------------------------ */
343
345 {
346 Result output_mask = context().create_result(ResultType::Float);
347
348 if (this->context().use_gpu()) {
349 this->execute_distance_threshold_gpu(output_mask);
350 }
351 else {
352 this->execute_distance_threshold_cpu(output_mask);
353 }
354
355 /* For configurations where there is little user-specified falloff size, anti-alias the result
356 * for smoother edges. */
357 Result &output = this->get_result("Mask");
358 if (this->get_falloff_size() < 2.0f) {
359 smaa(this->context(), output_mask, output);
360 output_mask.release();
361 }
362 else {
363 output.steal_data(output_mask);
364 }
365 }
366
368 {
369 GPUShader *shader = context().get_shader("compositor_morphological_distance_threshold");
370 GPU_shader_bind(shader);
371
372 GPU_shader_uniform_1f(shader, "inset", math::max(this->get_falloff_size(), 10e-6f));
374 GPU_shader_uniform_1i(shader, "distance", this->get_size());
375
376 const Result &input_mask = get_input("Mask");
377 input_mask.bind_as_texture(shader, "input_tx");
378
379 const Domain domain = compute_domain();
380 output.allocate_texture(domain);
381 output.bind_as_image(shader, "output_img");
382
384
386 output.unbind_as_image();
387 input_mask.unbind_as_texture();
388 }
389
391 {
392 const Result &input = get_input("Mask");
393
394 const Domain domain = compute_domain();
395 output.allocate_texture(domain);
396
397 const int2 image_size = input.domain().size;
398
399 const float inset = math::max(this->get_falloff_size(), 10e-6f);
400 const int radius = this->get_morphological_distance_threshold_radius();
401 const int distance = this->get_size();
402
403 /* The Morphological Distance Threshold operation is effectively three consecutive operations
404 * implemented as a single operation. The three operations are as follows:
405 *
406 * .-----------. .--------------. .----------------.
407 * | Threshold |-->| Dilate/Erode |-->| Distance Inset |
408 * '-----------' '--------------' '----------------'
409 *
410 * The threshold operation just converts the input into a binary image, where the pixel is 1 if
411 * it is larger than 0.5 and 0 otherwise. Pixels that are 1 in the output of the threshold
412 * operation are said to be masked. The dilate/erode operation is a dilate or erode
413 * morphological operation with a circular structuring element depending on the sign of the
414 * distance, where it is a dilate operation if the distance is positive and an erode operation
415 * otherwise. This is equivalent to the Morphological Distance operation, see its
416 * implementation for more information. Finally, the distance inset is an operation that
417 * converts the binary image into a narrow band distance field. That is, pixels that are
418 * unmasked will remain 0, while pixels that are masked will start from zero at the boundary of
419 * the masked region and linearly increase until reaching 1 in the span of a number pixels
420 * given by the inset value.
421 *
422 * As a performance optimization, the dilate/erode operation is omitted and its effective
423 * result is achieved by slightly adjusting the distance inset operation. The base distance
424 * inset operation works by computing the signed distance from the current center pixel to the
425 * nearest pixel with a different value. Since our image is a binary image, that means that if
426 * the pixel is masked, we compute the signed distance to the nearest unmasked pixel, and if
427 * the pixel unmasked, we compute the signed distance to the nearest masked pixel. The distance
428 * is positive if the pixel is masked and negative otherwise. The distance is then normalized
429 * by dividing by the given inset value and clamped to the [0, 1] range. Since distances larger
430 * than the inset value are eventually clamped, the distance search window is limited to a
431 * radius equivalent to the inset value.
432 *
433 * To archive the effective result of the omitted dilate/erode operation, we adjust the
434 * distance inset operation as follows. First, we increase the radius of the distance search
435 * window by the radius of the dilate/erode operation. Then we adjust the resulting narrow band
436 * signed distance field as follows.
437 *
438 * For the erode case, we merely subtract the erode distance, which makes the outermost erode
439 * distance number of pixels zero due to clamping, consequently achieving the result of the
440 * erode, while retaining the needed inset because we increased the distance search window by
441 * the same amount we subtracted.
442 *
443 * Similarly, for the dilate case, we add the dilate distance, which makes the dilate distance
444 * number of pixels just outside of the masked region positive and part of the narrow band
445 * distance field, consequently achieving the result of the dilate, while at the same time, the
446 * innermost dilate distance number of pixels become 1 due to clamping, retaining the needed
447 * inset because we increased the distance search window by the same amount we added.
448 *
449 * Since the erode/dilate distance is already signed appropriately as described before, we just
450 * add it in both cases. */
451 parallel_for(domain.size, [&](const int2 texel) {
452 /* Apply a threshold operation on the center pixel, where the threshold is currently
453 * hard-coded at 0.5. The pixels with values larger than the threshold are said to be
454 * masked. */
455 bool is_center_masked = input.load_pixel<float>(texel) > 0.5f;
456
457 /* Since the distance search window is limited to the given radius, the maximum possible
458 * squared distance to the center is double the squared radius. */
459 int minimum_squared_distance = radius * radius * 2;
460
461 /* Compute the start and end bounds of the window such that no out-of-bounds processing
462 * happen in the loops. */
463 const int2 start = math::max(texel - radius, int2(0)) - texel;
464 const int2 end = math::min(texel + radius + 1, image_size) - texel;
465
466 /* Find the squared distance to the nearest different pixel in the search window of the given
467 * radius. */
468 for (int y = start.y; y < end.y; y++) {
469 const int yy = y * y;
470 for (int x = start.x; x < end.x; x++) {
471 bool is_sample_masked = input.load_pixel<float>(texel + int2(x, y)) > 0.5f;
472 if (is_center_masked != is_sample_masked) {
473 minimum_squared_distance = math::min(minimum_squared_distance, x * x + yy);
474 }
475 }
476 }
477
478 /* Compute the actual distance from the squared distance and assign it an appropriate sign
479 * depending on whether it lies in a masked region or not. */
480 float signed_minimum_distance = math::sqrt(float(minimum_squared_distance)) *
481 (is_center_masked ? 1.0f : -1.0f);
482
483 /* Add the erode/dilate distance and divide by the inset amount as described in the
484 * discussion, then clamp to the [0, 1] range. */
485 float value = math::clamp((signed_minimum_distance + distance) / inset, 0.0f, 1.0f);
486
487 output.store_pixel(texel, value);
488 });
489 }
490
491 /* See the discussion in the implementation for more information. */
493 {
494 return int(math::ceil(this->get_falloff_size())) + math::abs(this->get_size());
495 }
496
497 /* ----------------------------------------
498 * Distance Feather Morphological Operator.
499 * ---------------------------------------- */
500
502 {
504 get_input("Mask"),
505 get_result("Mask"),
506 this->get_size(),
507 node_storage(bnode()).falloff);
508 }
509
510 /* ---------------
511 * Common Methods.
512 * --------------- */
513
515 {
516 const Result &input = get_input("Mask");
517 if (input.is_single_value()) {
518 return true;
519 }
520
522 this->get_falloff_size() != 0.0f)
523 {
524 return false;
525 }
526
527 if (this->get_size() == 0) {
528 return true;
529 }
530
531 return false;
532 }
533
534 /* Gets the size of the structuring element. See the get_size method for more information. */
536 {
537 return math::abs(this->get_size()) * 2 + 1;
538 }
539
540 /* Returns true if dilation should be performed, as opposed to erosion. See the get_size()
541 * method for more information. */
543 {
544 return this->get_size() > 0;
545 }
546
547 /* The signed radius of the structuring element, that is, half the structuring element size. The
548 * sign indicates either dilation or erosion, where negative values means erosion. */
550 {
551 return this->get_input("Size").get_single_value_default(0);
552 }
553
555 {
556 return math::max(0.0f, this->get_input("Falloff Size").get_single_value_default(0.0f));
557 }
558
560 {
561 return static_cast<CMPNodeDilateErodeMethod>(bnode().custom1);
562 }
563};
564
566{
567 return new DilateErodeOperation(context, node);
568}
569
570} // namespace blender::nodes::node_composite_dilate_cc
571
573{
575
576 static blender::bke::bNodeType ntype;
577
578 cmp_node_type_base(&ntype, "CompositorNodeDilateErode", CMP_NODE_DILATEERODE);
579 ntype.ui_name = "Dilate/Erode";
580 ntype.ui_description = "Expand and shrink masks";
581 ntype.enum_name_legacy = "DILATEERODE";
583 ntype.draw_buttons = file_ns::node_composit_buts_dilateerode;
584 ntype.declare = file_ns::cmp_node_dilate_declare;
585 ntype.updatefunc = file_ns::node_update;
586 ntype.initfunc = file_ns::node_composit_init_dilateerode;
588 ntype, "NodeDilateErode", node_free_standard_storage, node_copy_standard_storage);
589 ntype.get_compositor_operation = file_ns::get_compositor_operation;
590
592}
#define NODE_STORAGE_FUNCS(StorageT)
Definition BKE_node.hh:1215
#define NODE_CLASS_OP_FILTER
Definition BKE_node.hh:437
#define CMP_NODE_DILATEERODE
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
CMPNodeDilateErodeMethod
@ CMP_NODE_DILATE_ERODE_STEP
@ CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER
@ CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD
@ CMP_NODE_DILATE_ERODE_DISTANCE
@ SOCK_IN
@ PROP_SMOOTH
void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value)
void GPU_shader_uniform_1f(GPUShader *sh, const char *name, float value)
void GPU_shader_bind(GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_unbind()
#define NOD_REGISTER_NODE(REGISTER_FUNC)
@ UI_ITEM_R_SPLIT_EMPTY_NAME
BMesh const char void * data
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
Result create_result(ResultType type, ResultPrecision precision)
GPUShader * get_shader(const char *info_name, ResultPrecision precision)
NodeOperation(Context &context, DNode node)
Result & get_result(StringRef identifier)
Definition operation.cc:39
Result & get_input(StringRef identifier) const
Definition operation.cc:138
virtual Domain compute_domain()
Definition operation.cc:56
void share_data(const Result &source)
Definition result.cc:401
void allocate_texture(Domain domain, bool from_pool=true)
Definition result.cc:309
void unbind_as_texture() const
Definition result.cc:389
void bind_as_texture(GPUShader *shader, const char *texture_name) const
Definition result.cc:365
void bind_as_image(GPUShader *shader, const char *image_name, bool read=false) const
Definition result.cc:376
void unbind_as_image() const
Definition result.cc:395
#define input
#define output
float distance(VecOp< float, D >, VecOp< float, D >) RET
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
bNodeSocket * node_find_socket(bNode &node, eNodeSocketInOut in_out, StringRef identifier)
Definition node.cc:2864
void node_register_type(bNodeType &ntype)
Definition node.cc:2748
void node_set_socket_availability(bNodeTree &ntree, bNodeSocket &sock, bool is_available)
Definition node.cc:5011
void node_type_storage(bNodeType &ntype, std::optional< StringRefNull > storagename, void(*freefunc)(bNode *node), void(*copyfunc)(bNodeTree *dest_ntree, bNode *dest_node, const bNode *src_node))
Definition node.cc:5603
void morphological_distance(Context &context, const Result &input, Result &output, const int distance)
void morphological_distance_feather(Context &context, const Result &input, Result &output, const int distance, const int falloff_type=PROP_SMOOTH)
void compute_dispatch_threads_at_least(GPUShader *shader, int2 threads_range, int2 local_size=int2(16))
Definition utilities.cc:170
void smaa(Context &context, const Result &input, Result &output, const float threshold=0.1f, const float local_contrast_adaptation_factor=2.0f, const int corner_rounding=25)
Definition smaa.cc:1646
void parallel_for(const int2 range, const Function &function)
T clamp(const T &a, const T &min, const T &max)
T sqrt(const T &a)
T min(const T &a, const T &b)
T ceil(const T &a)
T max(const T &a, const T &b)
T abs(const T &a)
static void cmp_node_dilate_declare(NodeDeclarationBuilder &b)
static void node_composit_init_dilateerode(bNodeTree *, bNode *node)
static void node_composit_buts_dilateerode(uiLayout *layout, bContext *, PointerRNA *ptr)
static void node_update(bNodeTree *ntree, bNode *node)
static NodeOperation * get_compositor_operation(Context &context, DNode node)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< int32_t, 2 > int2
static void register_node_type_cmp_dilateerode()
void cmp_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
void node_free_standard_storage(bNode *node)
Definition node_util.cc:42
void node_copy_standard_storage(bNodeTree *, bNode *dest_node, const bNode *src_node)
Definition node_util.cc:54
int RNA_enum_get(PointerRNA *ptr, const char *name)
#define min(a, b)
Definition sort.cc:36
int16_t custom1
void * storage
Defines a node type.
Definition BKE_node.hh:226
std::string ui_description
Definition BKE_node.hh:232
NodeGetCompositorOperationFunction get_compositor_operation
Definition BKE_node.hh:336
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:277
const char * enum_name_legacy
Definition BKE_node.hh:235
void(* draw_buttons)(uiLayout *, bContext *C, PointerRNA *ptr)
Definition BKE_node.hh:247
NodeDeclareFunction declare
Definition BKE_node.hh:355
void(* updatefunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:269
void prop(PointerRNA *ptr, PropertyRNA *prop, int index, int value, eUI_Item_Flag flag, std::optional< blender::StringRef > name_opt, int icon, std::optional< blender::StringRef > placeholder=std::nullopt)
max
Definition text_draw.cc:251
static pxr::UsdShadeInput get_input(const pxr::UsdShadeShader &usd_shader, const pxr::TfToken &input_name)
PointerRNA * ptr
Definition wm_files.cc:4226