Blender V4.5
node_runtime.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BKE_node.hh"
6#include "BKE_node_runtime.hh"
7
8#include "DNA_node_types.h"
9
10#include "BLI_function_ref.hh"
11#include "BLI_listbase.h"
12#include "BLI_stack.hh"
13#include "BLI_task.hh"
14
18
20
22{
23 BLI_assert(tree_cow.type == NTREE_GEOMETRY);
24 /* Rebuild geometry nodes lazy function graph. */
25 tree_cow.runtime->geometry_nodes_lazy_function_graph_info.reset();
27}
28
29static void update_node_vector(const bNodeTree &ntree)
30{
31 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
32 const Span<bNode *> nodes = tree_runtime.nodes_by_id;
33 tree_runtime.group_nodes.clear();
34 tree_runtime.has_undefined_nodes_or_sockets = false;
35 for (const int i : nodes.index_range()) {
36 bNode &node = *nodes[i];
37 node.runtime->index_in_tree = i;
38 node.runtime->owner_tree = const_cast<bNodeTree *>(&ntree);
39 tree_runtime.has_undefined_nodes_or_sockets |= node.is_undefined();
40 if (node.is_group()) {
41 tree_runtime.group_nodes.append(&node);
42 }
43 }
44}
45
46static void update_link_vector(const bNodeTree &ntree)
47{
48 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
49 tree_runtime.links.clear();
50 LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
51 /* Check that the link connects nodes within this tree. */
52 BLI_assert(tree_runtime.nodes_by_id.contains(link->fromnode));
53 BLI_assert(tree_runtime.nodes_by_id.contains(link->tonode));
54
55 tree_runtime.links.append(link);
56 }
57}
58
60{
61 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
62 tree_runtime.sockets.clear();
63 tree_runtime.input_sockets.clear();
64 tree_runtime.output_sockets.clear();
65 for (bNode *node : tree_runtime.nodes_by_id) {
66 bNodeRuntime &node_runtime = *node->runtime;
67 node_runtime.inputs.clear();
68 node_runtime.outputs.clear();
69 LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
70 socket->runtime->index_in_node = node_runtime.inputs.append_and_get_index(socket);
71 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
72 socket->runtime->index_in_inout_sockets = tree_runtime.input_sockets.append_and_get_index(
73 socket);
74 socket->runtime->owner_node = node;
75 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
77 }
78 LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
79 socket->runtime->index_in_node = node_runtime.outputs.append_and_get_index(socket);
80 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
81 socket->runtime->index_in_inout_sockets = tree_runtime.output_sockets.append_and_get_index(
82 socket);
83 socket->runtime->owner_node = node;
84 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
86 }
87 }
88}
89
90static void update_panels(const bNodeTree &ntree)
91{
92 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
93 for (bNode *node : tree_runtime.nodes_by_id) {
94 bNodeRuntime &node_runtime = *node->runtime;
95 node_runtime.panels.reinitialize(node->num_panel_states);
96 }
97}
98
99static void update_internal_link_inputs(const bNodeTree &ntree)
100{
101 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
102 for (bNode *node : tree_runtime.nodes_by_id) {
103 for (bNodeSocket *socket : node->runtime->outputs) {
104 socket->runtime->internal_link_input = nullptr;
105 }
106 for (bNodeLink &link : node->runtime->internal_links) {
107 link.tosock->runtime->internal_link_input = link.fromsock;
108 }
109 }
110}
111
113{
114 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
115 for (bNode *node : tree_runtime.nodes_by_id) {
116 for (bNodeSocket *socket : node->runtime->inputs) {
117 socket->runtime->directly_linked_links.clear();
118 socket->runtime->directly_linked_sockets.clear();
119 }
120 for (bNodeSocket *socket : node->runtime->outputs) {
121 socket->runtime->directly_linked_links.clear();
122 socket->runtime->directly_linked_sockets.clear();
123 }
124 node->runtime->has_available_linked_inputs = false;
125 node->runtime->has_available_linked_outputs = false;
126 }
127 for (bNodeLink *link : tree_runtime.links) {
128 link->fromsock->runtime->directly_linked_links.append(link);
129 link->fromsock->runtime->directly_linked_sockets.append(link->tosock);
130 link->tosock->runtime->directly_linked_links.append(link);
131 if (link->is_available()) {
132 link->fromnode->runtime->has_available_linked_outputs = true;
133 link->tonode->runtime->has_available_linked_inputs = true;
134 }
135 }
136 for (bNodeSocket *socket : tree_runtime.input_sockets) {
137 if (socket->flag & SOCK_MULTI_INPUT) {
138 std::sort(socket->runtime->directly_linked_links.begin(),
139 socket->runtime->directly_linked_links.end(),
140 [&](const bNodeLink *a, const bNodeLink *b) {
141 return a->multi_input_sort_id > b->multi_input_sort_id;
142 });
143 }
144 }
145 for (bNodeSocket *socket : tree_runtime.input_sockets) {
146 for (bNodeLink *link : socket->runtime->directly_linked_links) {
147 /* Do this after sorting the input links. */
148 socket->runtime->directly_linked_sockets.append(link->fromsock);
149 }
150 }
151}
152
154 bNodeSocket &input_socket,
155 bool only_follow_first_input_link,
156 Vector<bNodeSocket *, 16> &sockets_in_current_chain,
157 Vector<bNodeSocket *> &r_logical_origins,
158 Vector<bNodeSocket *> &r_skipped_origins)
159{
160 if (sockets_in_current_chain.contains(&input_socket)) {
161 /* Protect against reroute recursions. */
162 return;
163 }
164 sockets_in_current_chain.append(&input_socket);
165
166 Span<bNodeLink *> links_to_check = input_socket.runtime->directly_linked_links;
167 if (only_follow_first_input_link) {
168 links_to_check = links_to_check.take_front(1);
169 }
170 for (bNodeLink *link : links_to_check) {
171 if (link->is_muted()) {
172 continue;
173 }
174 if (!link->is_available()) {
175 continue;
176 }
177 bNodeSocket &origin_socket = *link->fromsock;
178 bNode &origin_node = *link->fromnode;
179 if (!origin_socket.is_available()) {
180 /* Non available sockets are ignored. */
181 continue;
182 }
183 if (origin_node.is_reroute()) {
184 bNodeSocket &reroute_input = *origin_node.runtime->inputs[0];
185 bNodeSocket &reroute_output = *origin_node.runtime->outputs[0];
186 r_skipped_origins.append(&reroute_input);
187 r_skipped_origins.append(&reroute_output);
189 reroute_input, false, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
190 continue;
191 }
192 if (origin_node.is_muted()) {
193 if (bNodeSocket *mute_input = origin_socket.runtime->internal_link_input) {
194 r_skipped_origins.append(&origin_socket);
195 r_skipped_origins.append(mute_input);
197 *mute_input, true, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
198 }
199 continue;
200 }
201 r_logical_origins.append(&origin_socket);
202 }
203
204 sockets_in_current_chain.pop_last();
205}
206
208{
209 /* Compute logically linked sockets to inputs. */
210 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
211 Span<bNode *> nodes = tree_runtime.nodes_by_id;
212 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
213 for (const int i : range) {
214 bNode &node = *nodes[i];
215 for (bNodeSocket *socket : node.runtime->inputs) {
216 Vector<bNodeSocket *, 16> sockets_in_current_chain;
217 socket->runtime->logically_linked_sockets.clear();
218 socket->runtime->logically_linked_skipped_sockets.clear();
219 find_logical_origins_for_socket_recursive(
220 *socket,
221 false,
222 sockets_in_current_chain,
223 socket->runtime->logically_linked_sockets,
224 socket->runtime->logically_linked_skipped_sockets);
225 }
226 }
227 });
228
229 /* Clear logically linked sockets to outputs. */
230 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
231 for (const int i : range) {
232 bNode &node = *nodes[i];
233 for (bNodeSocket *socket : node.runtime->outputs) {
234 socket->runtime->logically_linked_sockets.clear();
235 }
236 }
237 });
238
239 /* Compute logically linked sockets to outputs using the previously computed logically linked
240 * sockets to inputs. */
241 for (const bNode *node : nodes) {
242 for (bNodeSocket *input_socket : node->runtime->inputs) {
243 for (bNodeSocket *output_socket : input_socket->runtime->logically_linked_sockets) {
244 output_socket->runtime->logically_linked_sockets.append(input_socket);
245 }
246 }
247 }
248}
249
250static void update_nodes_by_type(const bNodeTree &ntree)
251{
252 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
253 tree_runtime.nodes_by_type.clear();
254 for (bNode *node : tree_runtime.nodes_by_id) {
255 tree_runtime.nodes_by_type.add(node->typeinfo, node);
256 }
257}
258
260{
261 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
262 Span<bNode *> nodes = tree_runtime.nodes_by_id;
263 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
264 for (bNode *node : nodes.slice(range)) {
265 node->runtime->inputs_by_identifier.clear();
266 node->runtime->outputs_by_identifier.clear();
267 for (bNodeSocket *socket : node->runtime->inputs) {
268 node->runtime->inputs_by_identifier.add_new(socket->identifier, socket);
269 }
270 for (bNodeSocket *socket : node->runtime->outputs) {
271 node->runtime->outputs_by_identifier.add_new(socket->identifier, socket);
272 }
273 }
274 });
275}
276
281
283 bool is_done = false;
284 bool is_in_stack = false;
285};
286
288{
289 Vector<const bNode *> origin_nodes;
290 if (all_zone_output_node_types().contains(node.type_legacy)) {
291 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type_legacy);
292 /* Can't use #zone_type.get_corresponding_input because that expects the topology cache to be
293 * build already, but we are still building it here. */
294 for (const bNode *input_node :
295 ntree.runtime->nodes_by_type.lookup(bke::node_type_find(zone_type.input_idname.c_str())))
296 {
297 if (zone_type.get_corresponding_output_id(*input_node) == node.identifier) {
298 origin_nodes.append(input_node);
299 }
300 }
301 }
302 return origin_nodes;
303}
304
306{
307 Vector<const bNode *> target_nodes;
308 if (all_zone_input_node_types().contains(node.type_legacy)) {
309 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type_legacy);
310 if (const bNode *output_node = zone_type.get_corresponding_output(ntree, node)) {
311 target_nodes.append(output_node);
312 }
313 }
314 return target_nodes;
315}
316
317static void toposort_from_start_node(const bNodeTree &ntree,
318 const ToposortDirection direction,
319 bNode &start_node,
321 Vector<bNode *> &r_sorted_nodes,
322 bool &r_cycle_detected)
323{
324 struct Item {
325 bNode *node;
326 int socket_index = 0;
327 int link_index = 0;
328 int implicit_link_index = 0;
329 };
330
331 Stack<Item, 64> nodes_to_check;
332 nodes_to_check.push({&start_node});
333 node_states[start_node.index()].is_in_stack = true;
334 while (!nodes_to_check.is_empty()) {
335 Item &item = nodes_to_check.peek();
336 bNode &node = *item.node;
337 bool pushed_node = false;
338
339 auto handle_linked_node = [&](bNode &linked_node) {
340 ToposortNodeState &linked_node_state = node_states[linked_node.index()];
341 if (linked_node_state.is_done) {
342 /* The linked node has already been visited. */
343 return true;
344 }
345 if (linked_node_state.is_in_stack) {
346 r_cycle_detected = true;
347 }
348 else {
349 nodes_to_check.push({&linked_node});
350 linked_node_state.is_in_stack = true;
351 pushed_node = true;
352 }
353 return false;
354 };
355
356 const Span<bNodeSocket *> sockets = (direction == ToposortDirection::LeftToRight) ?
357 node.runtime->inputs :
358 node.runtime->outputs;
359 while (true) {
360 if (item.socket_index == sockets.size()) {
361 /* All sockets have already been visited. */
362 break;
363 }
364 bNodeSocket &socket = *sockets[item.socket_index];
365 const Span<bNodeLink *> linked_links = socket.runtime->directly_linked_links;
366 if (item.link_index == linked_links.size()) {
367 /* All links connected to this socket have already been visited. */
368 item.socket_index++;
369 item.link_index = 0;
370 continue;
371 }
372 bNodeLink &link = *linked_links[item.link_index];
373 if (!link.is_available()) {
374 /* Ignore unavailable links. */
375 item.link_index++;
376 continue;
377 }
378 bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
379 bNode &linked_node = *linked_socket.runtime->owner_node;
380 if (handle_linked_node(linked_node)) {
381 /* The linked node has already been visited. */
382 item.link_index++;
383 continue;
384 }
385 break;
386 }
387
388 if (!pushed_node) {
389 /* Some nodes are internally linked without an explicit `bNodeLink`. The toposort should
390 * still order them correctly and find cycles. */
391 const Vector<const bNode *> implicitly_linked_nodes =
392 (direction == ToposortDirection::LeftToRight) ? get_implicit_origin_nodes(ntree, node) :
393 get_implicit_target_nodes(ntree, node);
394 while (true) {
395 if (item.implicit_link_index == implicitly_linked_nodes.size()) {
396 /* All implicitly linked nodes have already been visited. */
397 break;
398 }
399 const bNode &linked_node = *implicitly_linked_nodes[item.implicit_link_index];
400 if (handle_linked_node(const_cast<bNode &>(linked_node))) {
401 /* The implicitly linked node has already been visited. */
402 item.implicit_link_index++;
403 continue;
404 }
405 break;
406 }
407 }
408
409 /* If no other element has been pushed, the current node can be pushed to the sorted list.
410 */
411 if (!pushed_node) {
412 ToposortNodeState &node_state = node_states[node.index()];
413 node_state.is_done = true;
414 node_state.is_in_stack = false;
415 r_sorted_nodes.append(&node);
416 nodes_to_check.pop();
417 }
418 }
419}
420
421static void update_toposort(const bNodeTree &ntree,
422 const ToposortDirection direction,
423 Vector<bNode *> &r_sorted_nodes,
424 bool &r_cycle_detected)
425{
426 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
427 r_sorted_nodes.clear();
428 r_sorted_nodes.reserve(tree_runtime.nodes_by_id.size());
429 r_cycle_detected = false;
430
431 Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
432 for (bNode *node : tree_runtime.nodes_by_id) {
433 if (node_states[node->index()].is_done) {
434 /* Ignore nodes that are done already. */
435 continue;
436 }
437 if ((direction == ToposortDirection::LeftToRight) ?
438 node->runtime->has_available_linked_outputs :
439 node->runtime->has_available_linked_inputs)
440 {
441 /* Ignore non-start nodes. */
442 continue;
443 }
445 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
446 }
447
448 if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
449 r_cycle_detected = true;
450 for (bNode *node : tree_runtime.nodes_by_id) {
451 if (node_states[node->index()].is_done) {
452 /* Ignore nodes that are done already. */
453 continue;
454 }
455 /* Start toposort at this node which is somewhere in the middle of a loop. */
457 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
458 }
459 }
460
461 BLI_assert(tree_runtime.nodes_by_id.size() == r_sorted_nodes.size());
462}
463
464static void update_root_frames(const bNodeTree &ntree)
465{
466 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
467 Span<bNode *> nodes = tree_runtime.nodes_by_id;
468
469 tree_runtime.root_frames.clear();
470
471 for (bNode *node : nodes) {
472 if (!node->parent && node->is_frame()) {
473 tree_runtime.root_frames.append(node);
474 }
475 }
476}
477
479{
480 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
481 Span<bNode *> nodes = tree_runtime.nodes_by_id;
482
483 for (bNode *node : nodes) {
484 node->runtime->direct_children_in_frame.clear();
485 }
486
487 for (bNode *node : nodes) {
488 if (const bNode *frame = node->parent) {
489 frame->runtime->direct_children_in_frame.append(node);
490 }
491 }
492}
493
494static void update_group_output_node(const bNodeTree &ntree)
495{
496 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
497 const bke::bNodeType *node_type = bke::node_type_find("NodeGroupOutput");
498 const Span<bNode *> group_output_nodes = tree_runtime.nodes_by_type.lookup(node_type);
499 if (group_output_nodes.is_empty()) {
500 tree_runtime.group_output_node = nullptr;
501 }
502 else if (group_output_nodes.size() == 1) {
503 tree_runtime.group_output_node = group_output_nodes[0];
504 }
505 else {
506 tree_runtime.group_output_node = nullptr;
507 for (bNode *group_output : group_output_nodes) {
508 if (group_output->flag & NODE_DO_OUTPUT) {
509 tree_runtime.group_output_node = group_output;
510 break;
511 }
512 }
513 }
514}
515
517{
518 for (const bNode *node : ntree.runtime->toposort_left_to_right) {
519 bNodeRuntime &node_runtime = *node->runtime;
520 if (!node->is_reroute()) {
521 node_runtime.is_dangling_reroute = false;
522 continue;
523 }
524 const Span<const bNodeLink *> links = node_runtime.inputs[0]->runtime->directly_linked_links;
525 if (links.is_empty()) {
526 node_runtime.is_dangling_reroute = true;
527 continue;
528 }
529 BLI_assert(links.size() == 1);
530 const bNode &source_node = *links.first()->fromnode;
531 node_runtime.is_dangling_reroute = source_node.runtime->is_dangling_reroute;
532 }
533}
534
535static void ensure_topology_cache(const bNodeTree &ntree)
536{
537 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
538 tree_runtime.topology_cache_mutex.ensure([&]() {
539 update_node_vector(ntree);
540 update_link_vector(ntree);
542 update_panels(ntree);
547 tree_runtime.nodes_by_id.size() > 32,
548 [&]() { update_logically_linked_sockets(ntree); },
549 [&]() { update_sockets_by_identifier(ntree); },
550 [&]() {
551 update_toposort(ntree,
552 ToposortDirection::LeftToRight,
553 tree_runtime.toposort_left_to_right,
554 tree_runtime.has_available_link_cycle);
555 for (const int i : tree_runtime.toposort_left_to_right.index_range()) {
556 const bNode &node = *tree_runtime.toposort_left_to_right[i];
557 node.runtime->toposort_left_to_right_index = i;
558 }
559 },
560 [&]() {
561 bool dummy;
563 ntree, ToposortDirection::RightToLeft, tree_runtime.toposort_right_to_left, dummy);
564 for (const int i : tree_runtime.toposort_right_to_left.index_range()) {
565 const bNode &node = *tree_runtime.toposort_right_to_left[i];
566 node.runtime->toposort_right_to_left_index = i;
567 }
568 },
569 [&]() { update_root_frames(ntree); },
570 [&]() { update_direct_frames_childrens(ntree); });
573 tree_runtime.topology_cache_exists = true;
574 });
575}
576
577} // namespace blender::bke::node_tree_runtime
578
579namespace blender::bke {
580
582{
583 to_node_id_ = link.tonode->identifier;
584 input_socket_index_ = link.tosock->index();
585 input_link_index_ =
586 const_cast<const bNodeSocket *>(link.tosock)->directly_linked_links().first_index(&link);
587}
588
590{
591 return const_cast<bNodeLink *>(this->try_find(const_cast<const bNodeTree &>(ntree)));
592}
593
594const bNodeLink *NodeLinkKey::try_find(const bNodeTree &ntree) const
595{
596 const bNode *to_node = ntree.node_by_id(to_node_id_);
597 if (!to_node) {
598 return nullptr;
599 }
600 if (input_socket_index_ >= to_node->input_sockets().size()) {
601 return nullptr;
602 }
603 const bNodeSocket &input_socket = to_node->input_socket(input_socket_index_);
604 if (input_link_index_ >= input_socket.directly_linked_links().size()) {
605 return nullptr;
606 }
607 return input_socket.directly_linked_links()[input_link_index_];
608}
609
610} // namespace blender::bke
611
612void bNodeTree::ensure_topology_cache() const
613{
615}
616
617const bNestedNodeRef *bNodeTree::find_nested_node_ref(const int32_t nested_node_id) const
618{
619 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
620 if (ref.id == nested_node_id) {
621 return &ref;
622 }
623 }
624 return nullptr;
625}
626
627const bNestedNodeRef *bNodeTree::nested_node_ref_from_node_id_path(
628 const blender::Span<int32_t> node_ids) const
629{
630 if (node_ids.is_empty()) {
631 return nullptr;
632 }
633 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
634 blender::Vector<int> current_node_ids;
635 if (this->node_id_path_from_nested_node_ref(ref.id, current_node_ids)) {
636 if (current_node_ids.as_span() == node_ids) {
637 return &ref;
638 }
639 }
640 }
641 return nullptr;
642}
643
644bool bNodeTree::node_id_path_from_nested_node_ref(const int32_t nested_node_id,
645 blender::Vector<int> &r_node_ids) const
646{
647 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
648 if (ref == nullptr) {
649 return false;
650 }
651 const int32_t node_id = ref->path.node_id;
652 const bNode *node = this->node_by_id(node_id);
653 if (node == nullptr) {
654 return false;
655 }
656 r_node_ids.append(node_id);
657 if (!node->is_group()) {
658 return true;
659 }
660 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
661 if (group == nullptr) {
662 return false;
663 }
664 return group->node_id_path_from_nested_node_ref(ref->path.id_in_node, r_node_ids);
665}
666
667const bNode *bNodeTree::find_nested_node(const int32_t nested_node_id,
668 const bNodeTree **r_tree) const
669{
670 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
671 if (ref == nullptr) {
672 return nullptr;
673 }
674 const int32_t node_id = ref->path.node_id;
675 const bNode *node = this->node_by_id(node_id);
676 if (node == nullptr) {
677 return nullptr;
678 }
679 if (!node->is_group()) {
680 if (r_tree) {
681 *r_tree = this;
682 }
683 return node;
684 }
685 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
686 if (group == nullptr) {
687 return nullptr;
688 }
689 return group->find_nested_node(ref->path.id_in_node, r_tree);
690}
691
692const bNodeSocket &bNode::socket_by_decl(const blender::nodes::SocketDeclaration &decl) const
693{
694 return decl.in_out == SOCK_IN ? this->input_socket(decl.index) : this->output_socket(decl.index);
695}
696
697bNodeSocket &bNode::socket_by_decl(const blender::nodes::SocketDeclaration &decl)
698{
699 return decl.in_out == SOCK_IN ? this->input_socket(decl.index) : this->output_socket(decl.index);
700}
701
703{
704 tree.runtime->inferenced_input_socket_usage_mutex.ensure([&]() {
705 tree.runtime->inferenced_input_socket_usage =
707 });
708}
709
710bool bNodeSocket::affects_node_output() const
711{
712 BLI_assert(this->is_input());
714 const bNodeTree &tree = this->owner_tree();
716 return tree.runtime->inferenced_input_socket_usage[this->index_in_all_inputs()].is_used;
717}
718
719bool bNodeSocket::inferred_input_socket_visibility() const
720{
721 BLI_assert(this->is_input());
723 const bNode &node = this->owner_node();
724 if (node.typeinfo->ignore_inferred_input_socket_visibility) {
725 return true;
726 }
727 const bNodeTree &tree = this->owner_tree();
728
730 return tree.runtime->inferenced_input_socket_usage[this->index_in_all_inputs()].is_visible;
731}
#define BLI_assert(a)
Definition BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
@ NODE_DO_OUTPUT
@ NTREE_GEOMETRY
struct bNestedNodeRef bNestedNodeRef
@ SOCK_IN
@ SOCK_MULTI_INPUT
struct bNode bNode
struct bNodeTree bNodeTree
struct bNodeSocket bNodeSocket
int64_t append_and_get_index(const T &value)
void append(const T &value)
IndexRange index_range() const
void clear()
void ensure(FunctionRef< void()> compute_cache)
constexpr const T & first() const
Definition BLI_span.hh:315
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:193
constexpr bool is_empty() const
Definition BLI_span.hh:260
bool is_empty() const
Definition BLI_stack.hh:308
void push(const T &value)
Definition BLI_stack.hh:213
int64_t size() const
bool contains(const Key &key) const
int64_t size() const
bool contains(const T &value) const
void append(const T &value)
void reserve(const int64_t min_capacity)
Span< T > as_span() const
Array< bNodePanelRuntime > panels
Vector< bNodeSocket * > outputs
Vector< bNodeSocket * > inputs
Vector< bNodeSocket * > output_sockets
Vector< bNodeSocket * > sockets
std::atomic< bool > topology_cache_exists
MultiValueMap< const bNodeType *, bNode * > nodes_by_type
Vector< bNodeSocket * > input_sockets
virtual const int & get_corresponding_output_id(const bNode &input_bnode) const =0
const bNode * get_corresponding_output(const bNodeTree &tree, const bNode &input_bnode) const
KDTree_3d * tree
#define this
static void toposort_from_start_node(const bNodeTree &ntree, const ToposortDirection direction, bNode &start_node, MutableSpan< ToposortNodeState > node_states, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
static void update_dangling_reroute_nodes(const bNodeTree &ntree)
static void update_nodes_by_type(const bNodeTree &ntree)
static void update_direct_frames_childrens(const bNodeTree &ntree)
static Vector< const bNode * > get_implicit_origin_nodes(const bNodeTree &ntree, bNode &node)
static void update_group_output_node(const bNodeTree &ntree)
static void update_link_vector(const bNodeTree &ntree)
static void update_sockets_by_identifier(const bNodeTree &ntree)
static void update_logically_linked_sockets(const bNodeTree &ntree)
static void ensure_topology_cache(const bNodeTree &ntree)
static void find_logical_origins_for_socket_recursive(bNodeSocket &input_socket, bool only_follow_first_input_link, Vector< bNodeSocket *, 16 > &sockets_in_current_chain, Vector< bNodeSocket * > &r_logical_origins, Vector< bNodeSocket * > &r_skipped_origins)
static void update_internal_link_inputs(const bNodeTree &ntree)
bool topology_cache_is_available(const bNodeTree &tree)
static void update_panels(const bNodeTree &ntree)
static void update_node_vector(const bNodeTree &ntree)
static void update_directly_linked_links_and_sockets(const bNodeTree &ntree)
static void update_root_frames(const bNodeTree &ntree)
static void update_toposort(const bNodeTree &ntree, const ToposortDirection direction, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
void preprocess_geometry_node_tree_for_evaluation(bNodeTree &tree_cow)
static Vector< const bNode * > get_implicit_target_nodes(const bNodeTree &ntree, bNode &node)
static void update_socket_vectors_and_owner_node(const bNodeTree &ntree)
const bNodeZoneType * zone_type_by_node_type(const int node_type)
bNodeSocketType NodeSocketTypeUndefined
Definition node.cc:123
Span< int > all_zone_output_node_types()
Span< int > all_zone_input_node_types()
bNodeType * node_type_find(StringRef idname)
Definition node.cc:2711
Array< SocketUsage > infer_all_input_sockets_usage(const bNodeTree &tree)
const GeometryNodesLazyFunctionGraphInfo * ensure_geometry_nodes_lazy_function_graph(const bNodeTree &btree)
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:221
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
static void ensure_inference_usage_cache(const bNodeTree &tree)
bNestedNodePath path
bNodeSocketRuntimeHandle * runtime
bNodeTreeRuntimeHandle * runtime
ListBase links
bNodeTypeHandle * typeinfo
ListBase inputs
int num_panel_states
struct ID * id
struct bNode * parent
int16_t type_legacy
bNodeRuntimeHandle * runtime
ListBase outputs
int32_t identifier
bNodeLink * try_find(bNodeTree &ntree) const
NodeLinkKey(const bNodeLink &link)
Defines a node type.
Definition BKE_node.hh:226
i
Definition text_draw.cc:230