Blender  V2.93
deg_eval.cc
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2013 Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include "intern/eval/deg_eval.h"
27 
28 #include "PIL_time.h"
29 
30 #include "BLI_compiler_attrs.h"
31 #include "BLI_gsqueue.h"
32 #include "BLI_task.h"
33 #include "BLI_utildefines.h"
34 
35 #include "BKE_global.h"
36 
37 #include "DNA_node_types.h"
38 #include "DNA_object_types.h"
39 #include "DNA_scene_types.h"
40 
41 #include "DEG_depsgraph.h"
42 #include "DEG_depsgraph_query.h"
43 
44 #include "atomic_ops.h"
45 
46 #include "intern/depsgraph.h"
51 #include "intern/node/deg_node.h"
56 
57 namespace blender::deg {
58 
59 namespace {
60 
61 struct DepsgraphEvalState;
62 
63 void deg_task_run_func(TaskPool *pool, void *taskdata);
64 
65 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
66 void schedule_children(DepsgraphEvalState *state,
67  OperationNode *node,
68  ScheduleFunction *schedule_function,
69  ScheduleFunctionArgs... schedule_function_args);
70 
71 void schedule_node_to_pool(OperationNode *node, const int UNUSED(thread_id), TaskPool *pool)
72 {
73  BLI_task_pool_push(pool, deg_task_run_func, node, false, nullptr);
74 }
75 
76 /* Denotes which part of dependency graph is being evaluated. */
77 enum class EvaluationStage {
78  /* Stage 1: Only Copy-on-Write operations are to be evaluated, prior to anything else.
79  * This allows other operations to access its dependencies when there is a dependency cycle
80  * involved. */
81  COPY_ON_WRITE,
82 
83  /* Threaded evaluation of all possible operations. */
84  THREADED_EVALUATION,
85 
86  /* Workaround for areas which can not be evaluated in threads.
87  *
88  * For example, meta-balls, which are iterating over all bases and are requesting dupli-lists
89  * to see whether there are meta-balls inside. */
90  SINGLE_THREADED_WORKAROUND,
91 };
92 
93 struct DepsgraphEvalState {
95  bool do_stats;
96  EvaluationStage stage;
98 };
99 
100 void evaluate_node(const DepsgraphEvalState *state, OperationNode *operation_node)
101 {
102  ::Depsgraph *depsgraph = reinterpret_cast<::Depsgraph *>(state->graph);
103 
104  /* Sanity checks. */
105  BLI_assert(!operation_node->is_noop() && "NOOP nodes should not actually be scheduled");
106  /* Perform operation. */
107  if (state->do_stats) {
108  const double start_time = PIL_check_seconds_timer();
109  operation_node->evaluate(depsgraph);
110  operation_node->stats.current_time += PIL_check_seconds_timer() - start_time;
111  }
112  else {
113  operation_node->evaluate(depsgraph);
114  }
115 }
116 
117 void deg_task_run_func(TaskPool *pool, void *taskdata)
118 {
119  void *userdata_v = BLI_task_pool_user_data(pool);
120  DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
121 
122  /* Evaluate node. */
123  OperationNode *operation_node = reinterpret_cast<OperationNode *>(taskdata);
124  evaluate_node(state, operation_node);
125 
126  /* Schedule children. */
127  schedule_children(state, operation_node, schedule_node_to_pool, pool);
128 }
129 
130 bool check_operation_node_visible(OperationNode *op_node)
131 {
132  const ComponentNode *comp_node = op_node->owner;
133  /* Special exception, copy on write component is to be always evaluated,
134  * to keep copied "database" in a consistent state. */
135  if (comp_node->type == NodeType::COPY_ON_WRITE) {
136  return true;
137  }
138  return comp_node->affects_directly_visible;
139 }
140 
141 void calculate_pending_parents_for_node(OperationNode *node)
142 {
143  /* Update counters, applies for both visible and invisible IDs. */
144  node->num_links_pending = 0;
145  node->scheduled = false;
146  /* Invisible IDs requires no pending operations. */
147  if (!check_operation_node_visible(node)) {
148  return;
149  }
150  /* No need to bother with anything if node is not tagged for update. */
151  if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
152  return;
153  }
154  for (Relation *rel : node->inlinks) {
155  if (rel->from->type == NodeType::OPERATION && (rel->flag & RELATION_FLAG_CYCLIC) == 0) {
156  OperationNode *from = (OperationNode *)rel->from;
157  /* TODO(sergey): This is how old layer system was checking for the
158  * calculation, but how is it possible that visible object depends
159  * on an invisible? This is something what is prohibited after
160  * deg_graph_build_flush_layers(). */
161  if (!check_operation_node_visible(from)) {
162  continue;
163  }
164  /* No need to wait for operation which is up to date. */
165  if ((from->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
166  continue;
167  }
169  }
170  }
171 }
172 
173 void calculate_pending_parents(Depsgraph *graph)
174 {
175  for (OperationNode *node : graph->operations) {
176  calculate_pending_parents_for_node(node);
177  }
178 }
179 
180 void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)
181 {
182  const bool do_stats = state->do_stats;
183  calculate_pending_parents(graph);
184  /* Clear tags and other things which needs to be clear. */
185  for (OperationNode *node : graph->operations) {
186  if (do_stats) {
188  }
189  }
190 }
191 
192 bool is_metaball_object_operation(const OperationNode *operation_node)
193 {
194  const ComponentNode *component_node = operation_node->owner;
195  const IDNode *id_node = component_node->owner;
196  if (GS(id_node->id_cow->name) != ID_OB) {
197  return false;
198  }
199  const Object *object = reinterpret_cast<const Object *>(id_node->id_cow);
200  return object->type == OB_MBALL;
201 }
202 
203 bool need_evaluate_operation_at_stage(DepsgraphEvalState *state,
204  const OperationNode *operation_node)
205 {
206  const ComponentNode *component_node = operation_node->owner;
207  switch (state->stage) {
208  case EvaluationStage::COPY_ON_WRITE:
209  return (component_node->type == NodeType::COPY_ON_WRITE);
210 
211  case EvaluationStage::THREADED_EVALUATION:
212  /* Sanity check: copy-on-write node should be evaluated already. This will be indicated by
213  * scheduled flag (we assume that scheduled operations have been actually handled by previous
214  * stage). */
215  BLI_assert(operation_node->scheduled || component_node->type != NodeType::COPY_ON_WRITE);
216  if (is_metaball_object_operation(operation_node)) {
217  state->need_single_thread_pass = true;
218  return false;
219  }
220  return true;
221 
222  case EvaluationStage::SINGLE_THREADED_WORKAROUND:
223  return true;
224  }
225  BLI_assert(!"Unhandled evaluation stage, should never happen.");
226  return false;
227 }
228 
229 /* Schedule a node if it needs evaluation.
230  * dec_parents: Decrement pending parents count, true when child nodes are
231  * scheduled after a task has been completed.
232  */
233 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
234 void schedule_node(DepsgraphEvalState *state,
235  OperationNode *node,
236  bool dec_parents,
237  ScheduleFunction *schedule_function,
238  ScheduleFunctionArgs... schedule_function_args)
239 {
240  /* No need to schedule nodes of invisible ID. */
241  if (!check_operation_node_visible(node)) {
242  return;
243  }
244  /* No need to schedule operations which are not tagged for update, they are
245  * considered to be up to date. */
246  if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
247  return;
248  }
249  /* TODO(sergey): This is not strictly speaking safe to read
250  * num_links_pending. */
251  if (dec_parents) {
254  }
255  /* Cal not schedule operation while its dependencies are not yet
256  * evaluated. */
257  if (node->num_links_pending != 0) {
258  return;
259  }
260  /* During the COW stage only schedule COW nodes. */
261  if (!need_evaluate_operation_at_stage(state, node)) {
262  return;
263  }
264  /* Actually schedule the node. */
265  bool is_scheduled = atomic_fetch_and_or_uint8((uint8_t *)&node->scheduled, (uint8_t) true);
266  if (!is_scheduled) {
267  if (node->is_noop()) {
268  /* skip NOOP node, schedule children right away */
269  schedule_children(state, node, schedule_function, schedule_function_args...);
270  }
271  else {
272  /* children are scheduled once this task is completed */
273  schedule_function(node, 0, schedule_function_args...);
274  }
275  }
276 }
277 
278 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
279 void schedule_graph(DepsgraphEvalState *state,
280  ScheduleFunction *schedule_function,
281  ScheduleFunctionArgs... schedule_function_args)
282 {
283  for (OperationNode *node : state->graph->operations) {
284  schedule_node(state, node, false, schedule_function, schedule_function_args...);
285  }
286 }
287 
288 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
289 void schedule_children(DepsgraphEvalState *state,
290  OperationNode *node,
291  ScheduleFunction *schedule_function,
292  ScheduleFunctionArgs... schedule_function_args)
293 {
294  for (Relation *rel : node->outlinks) {
295  OperationNode *child = (OperationNode *)rel->to;
296  BLI_assert(child->type == NodeType::OPERATION);
297  if (child->scheduled) {
298  /* Happens when having cyclic dependencies. */
299  continue;
300  }
301  schedule_node(state,
302  child,
303  (rel->flag & RELATION_FLAG_CYCLIC) == 0,
304  schedule_function,
305  schedule_function_args...);
306  }
307 }
308 
309 void schedule_node_to_queue(OperationNode *node,
310  const int /*thread_id*/,
311  GSQueue *evaluation_queue)
312 {
313  BLI_gsqueue_push(evaluation_queue, &node);
314 }
315 
316 void evaluate_graph_single_threaded(DepsgraphEvalState *state)
317 {
318  GSQueue *evaluation_queue = BLI_gsqueue_new(sizeof(OperationNode *));
319  schedule_graph(state, schedule_node_to_queue, evaluation_queue);
320 
321  while (!BLI_gsqueue_is_empty(evaluation_queue)) {
322  OperationNode *operation_node;
323  BLI_gsqueue_pop(evaluation_queue, &operation_node);
324 
325  evaluate_node(state, operation_node);
326  schedule_children(state, operation_node, schedule_node_to_queue, evaluation_queue);
327  }
328 
329  BLI_gsqueue_free(evaluation_queue);
330 }
331 
332 void depsgraph_ensure_view_layer(Depsgraph *graph)
333 {
334  /* We update copy-on-write scene in the following cases:
335  * - It was not expanded yet.
336  * - It was tagged for update of CoW component.
337  * This allows us to have proper view layer pointer. */
338  Scene *scene_cow = graph->scene_cow;
339  if (deg_copy_on_write_is_expanded(&scene_cow->id) &&
340  (scene_cow->id.recalc & ID_RECALC_COPY_ON_WRITE) == 0) {
341  return;
342  }
343 
344  const IDNode *scene_id_node = graph->find_id_node(&graph->scene->id);
346 }
347 
348 } // namespace
349 
350 static TaskPool *deg_evaluate_task_pool_create(DepsgraphEvalState *state)
351 {
352  if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
354  }
355 
357 }
358 
367 {
368  /* Nothing to update, early out. */
369  if (graph->entry_tags.is_empty()) {
370  return;
371  }
372 
374 
375  graph->is_evaluating = true;
376  depsgraph_ensure_view_layer(graph);
377  /* Set up evaluation state. */
378  DepsgraphEvalState state;
379  state.graph = graph;
380  state.do_stats = graph->debug.do_time_debug();
381  state.need_single_thread_pass = false;
382  /* Prepare all nodes for evaluation. */
383  initialize_execution(&state, graph);
384 
385  /* Do actual evaluation now. */
386  /* First, process all Copy-On-Write nodes. */
387  state.stage = EvaluationStage::COPY_ON_WRITE;
389  schedule_graph(&state, schedule_node_to_pool, task_pool);
392 
393  /* After that, process all other nodes. */
394  state.stage = EvaluationStage::THREADED_EVALUATION;
396  schedule_graph(&state, schedule_node_to_pool, task_pool);
399 
400  if (state.need_single_thread_pass) {
401  state.stage = EvaluationStage::SINGLE_THREADED_WORKAROUND;
402  evaluate_graph_single_threaded(&state);
403  }
404 
405  /* Finalize statistics gathering. This is because we only gather single
406  * operation timing here, without aggregating anything to avoid any extra
407  * synchronization. */
408  if (state.do_stats) {
410  }
411  /* Clear any uncleared tags - just in case. */
413  graph->is_evaluating = false;
414 
416 }
417 
418 } // namespace blender::deg
@ G_DEBUG_DEPSGRAPH_NO_THREADS
Definition: BKE_global.h:145
#define BLI_assert(a)
Definition: BLI_assert.h:58
void BLI_gsqueue_free(GSQueue *queue)
Definition: gsqueue.c:107
void BLI_gsqueue_push(GSQueue *queue, const void *item)
Definition: gsqueue.c:122
GSQueue * BLI_gsqueue_new(const size_t elem_size)
Definition: gsqueue.c:83
void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
Definition: gsqueue.c:162
bool BLI_gsqueue_is_empty(const GSQueue *queue)
Definition: gsqueue.c:193
void * BLI_task_pool_user_data(TaskPool *pool)
Definition: task_pool.cc:541
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition: task_pool.cc:496
TaskPool * BLI_task_pool_create_no_threads(void *userdata)
Definition: task_pool.cc:442
TaskPool * BLI_task_pool_create_suspended(void *userdata, TaskPriority priority)
Definition: task_pool.cc:433
void BLI_task_pool_free(TaskPool *pool)
Definition: task_pool.cc:456
@ TASK_PRIORITY_HIGH
Definition: BLI_task.h:67
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition: task_pool.cc:475
#define UNUSED(x)
struct Depsgraph Depsgraph
Definition: DEG_depsgraph.h:51
@ ID_RECALC_COPY_ON_WRITE
Definition: DNA_ID.h:654
@ ID_OB
Definition: DNA_ID_enums.h:59
Object is a sort of wrapper for general info.
@ OB_MBALL
Platform independent time functions.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
OperationNode * node
StackEntry * from
const IDNode * id_node
bool do_stats
Definition: deg_eval.cc:95
bool need_single_thread_pass
Definition: deg_eval.cc:97
EvaluationStage stage
Definition: deg_eval.cc:96
Depsgraph * graph
Definition: deg_eval.cc:94
const Depsgraph * depsgraph
TaskPool * task_pool
#define GS(x)
Definition: iris.c:241
static ulong state[N]
void deg_eval_stats_aggregate(Depsgraph *graph)
ID * deg_update_copy_on_write_datablock(const Depsgraph *depsgraph, const IDNode *id_node)
void deg_graph_clear_tags(Depsgraph *graph)
static TaskPool * deg_evaluate_task_pool_create(DepsgraphEvalState *state)
Definition: deg_eval.cc:350
void deg_evaluate_on_refresh(Depsgraph *graph)
Definition: deg_eval.cc:366
bool deg_copy_on_write_is_expanded(const ID *id_cow)
unsigned char uint8_t
Definition: stdint.h:81
int recalc
Definition: DNA_ID.h:295
char name[66]
Definition: DNA_ID.h:283
IDNode * find_id_node(const ID *id) const
Definition: depsgraph.cc:112
OperationNodes operations
Definition: depsgraph.h:126
DepsgraphDebug debug
Definition: depsgraph.h:154
Set< OperationNode * > entry_tags
Definition: depsgraph.h:120
Relations inlinks
Definition: deg_node.h:175
Relations outlinks
Definition: deg_node.h:176
double PIL_check_seconds_timer(void)
Definition: time.c:80
#define G(x, y, z)