Blender  V2.93
gpu_codegen.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2005 Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include "MEM_guardedalloc.h"
27 
28 #include "DNA_customdata_types.h"
29 #include "DNA_image_types.h"
30 
31 #include "BLI_blenlib.h"
32 #include "BLI_dynstr.h"
33 #include "BLI_ghash.h"
34 #include "BLI_hash_mm2a.h"
35 #include "BLI_link_utils.h"
36 #include "BLI_threads.h"
37 #include "BLI_utildefines.h"
38 
39 #include "PIL_time.h"
40 
41 #include "BKE_material.h"
42 
43 #include "GPU_capabilities.h"
44 #include "GPU_material.h"
45 #include "GPU_shader.h"
46 #include "GPU_uniform_buffer.h"
47 #include "GPU_vertex_format.h"
48 
49 #include "BLI_sys_types.h" /* for intptr_t support */
50 
51 #include "gpu_codegen.h"
52 #include "gpu_material_library.h"
53 #include "gpu_node_graph.h"
54 
55 #include <stdarg.h>
56 #include <string.h>
57 
60 
61 /* -------------------- GPUPass Cache ------------------ */
68 /* Only use one linklist that contains the GPUPasses grouped by hash. */
71 
72 static uint32_t gpu_pass_hash(const char *frag_gen, const char *defs, ListBase *attributes)
73 {
74  BLI_HashMurmur2A hm2a;
75  BLI_hash_mm2a_init(&hm2a, 0);
76  BLI_hash_mm2a_add(&hm2a, (uchar *)frag_gen, strlen(frag_gen));
77  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, attributes) {
78  BLI_hash_mm2a_add(&hm2a, (uchar *)attr->name, strlen(attr->name));
79  }
80  if (defs) {
81  BLI_hash_mm2a_add(&hm2a, (uchar *)defs, strlen(defs));
82  }
83 
84  return BLI_hash_mm2a_end(&hm2a);
85 }
86 
87 /* Search by hash only. Return first pass with the same hash.
88  * There is hash collision if (pass->next && pass->next->hash == hash) */
90 {
92  /* Could be optimized with a Lookup table. */
93  for (GPUPass *pass = pass_cache; pass; pass = pass->next) {
94  if (pass->hash == hash) {
96  return pass;
97  }
98  }
100  return NULL;
101 }
102 
103 /* Check all possible passes with the same hash. */
105  const char *vert,
106  const char *geom,
107  const char *frag,
108  const char *defs,
109  uint32_t hash)
110 {
112  /* Collision, need to `strcmp` the whole shader. */
113  for (; pass && (pass->hash == hash); pass = pass->next) {
114  if ((defs != NULL) && (!STREQ(pass->defines, defs))) { /* Pass */
115  }
116  else if ((geom != NULL) && (!STREQ(pass->geometrycode, geom))) { /* Pass */
117  }
118  else if ((!STREQ(pass->fragmentcode, frag) == 0) && (STREQ(pass->vertexcode, vert))) {
120  return pass;
121  }
122  }
124  return NULL;
125 }
126 
127 /* GLSL code generation */
128 
129 static void codegen_convert_datatype(DynStr *ds, int from, int to, const char *tmp, int id)
130 {
131  char name[1024];
132 
133  BLI_snprintf(name, sizeof(name), "%s%d", tmp, id);
134 
135  if (from == to) {
136  BLI_dynstr_append(ds, name);
137  }
138  else if (to == GPU_FLOAT) {
139  if (from == GPU_VEC4) {
140  BLI_dynstr_appendf(ds, "dot(%s.rgb, vec3(0.2126, 0.7152, 0.0722))", name);
141  }
142  else if (from == GPU_VEC3) {
143  BLI_dynstr_appendf(ds, "(%s.r + %s.g + %s.b) / 3.0", name, name, name);
144  }
145  else if (from == GPU_VEC2) {
146  BLI_dynstr_appendf(ds, "%s.r", name);
147  }
148  }
149  else if (to == GPU_VEC2) {
150  if (from == GPU_VEC4) {
151  BLI_dynstr_appendf(ds, "vec2((%s.r + %s.g + %s.b) / 3.0, %s.a)", name, name, name, name);
152  }
153  else if (from == GPU_VEC3) {
154  BLI_dynstr_appendf(ds, "vec2((%s.r + %s.g + %s.b) / 3.0, 1.0)", name, name, name);
155  }
156  else if (from == GPU_FLOAT) {
157  BLI_dynstr_appendf(ds, "vec2(%s, 1.0)", name);
158  }
159  }
160  else if (to == GPU_VEC3) {
161  if (from == GPU_VEC4) {
162  BLI_dynstr_appendf(ds, "%s.rgb", name);
163  }
164  else if (from == GPU_VEC2) {
165  BLI_dynstr_appendf(ds, "vec3(%s.r, %s.r, %s.r)", name, name, name);
166  }
167  else if (from == GPU_FLOAT) {
168  BLI_dynstr_appendf(ds, "vec3(%s, %s, %s)", name, name, name);
169  }
170  }
171  else if (to == GPU_VEC4) {
172  if (from == GPU_VEC3) {
173  BLI_dynstr_appendf(ds, "vec4(%s, 1.0)", name);
174  }
175  else if (from == GPU_VEC2) {
176  BLI_dynstr_appendf(ds, "vec4(%s.r, %s.r, %s.r, %s.g)", name, name, name, name);
177  }
178  else if (from == GPU_FLOAT) {
179  BLI_dynstr_appendf(ds, "vec4(%s, %s, %s, 1.0)", name, name, name);
180  }
181  }
182  else if (to == GPU_CLOSURE) {
183  if (from == GPU_VEC4) {
184  BLI_dynstr_appendf(ds, "closure_emission(%s.rgb)", name);
185  }
186  else if (from == GPU_VEC3) {
187  BLI_dynstr_appendf(ds, "closure_emission(%s.rgb)", name);
188  }
189  else if (from == GPU_VEC2) {
190  BLI_dynstr_appendf(ds, "closure_emission(%s.rrr)", name);
191  }
192  else if (from == GPU_FLOAT) {
193  BLI_dynstr_appendf(ds, "closure_emission(vec3(%s, %s, %s))", name, name, name);
194  }
195  }
196  else {
197  BLI_dynstr_append(ds, name);
198  }
199 }
200 
201 static void codegen_print_datatype(DynStr *ds, const eGPUType type, float *data)
202 {
203  int i;
204 
206 
207  for (i = 0; i < type; i++) {
208  BLI_dynstr_appendf(ds, "%.12f", data[i]);
209  if (i == type - 1) {
210  BLI_dynstr_append(ds, ")");
211  }
212  else {
213  BLI_dynstr_append(ds, ", ");
214  }
215  }
216 }
217 
218 static const char *gpu_builtin_name(eGPUBuiltin builtin)
219 {
220  if (builtin == GPU_VIEW_MATRIX) {
221  return "unfviewmat";
222  }
223  if (builtin == GPU_OBJECT_MATRIX) {
224  return "unfobmat";
225  }
226  if (builtin == GPU_INVERSE_VIEW_MATRIX) {
227  return "unfinvviewmat";
228  }
229  if (builtin == GPU_INVERSE_OBJECT_MATRIX) {
230  return "unfinvobmat";
231  }
232  if (builtin == GPU_LOC_TO_VIEW_MATRIX) {
233  return "unflocaltoviewmat";
234  }
235  if (builtin == GPU_INVERSE_LOC_TO_VIEW_MATRIX) {
236  return "unfinvlocaltoviewmat";
237  }
238  if (builtin == GPU_VIEW_POSITION) {
239  return "varposition";
240  }
241  if (builtin == GPU_WORLD_NORMAL) {
242  return "varwnormal";
243  }
244  if (builtin == GPU_VIEW_NORMAL) {
245  return "varnormal";
246  }
247  if (builtin == GPU_OBJECT_COLOR) {
248  return "unfobjectcolor";
249  }
250  if (builtin == GPU_AUTO_BUMPSCALE) {
251  return "unfobautobumpscale";
252  }
253  if (builtin == GPU_CAMERA_TEXCO_FACTORS) {
254  return "unfcameratexfactors";
255  }
256  if (builtin == GPU_PARTICLE_SCALAR_PROPS) {
257  return "unfparticlescalarprops";
258  }
259  if (builtin == GPU_PARTICLE_LOCATION) {
260  return "unfparticleco";
261  }
262  if (builtin == GPU_PARTICLE_VELOCITY) {
263  return "unfparticlevel";
264  }
265  if (builtin == GPU_PARTICLE_ANG_VELOCITY) {
266  return "unfparticleangvel";
267  }
268  if (builtin == GPU_OBJECT_INFO) {
269  return "unfobjectinfo";
270  }
271  if (builtin == GPU_BARYCENTRIC_TEXCO) {
272  return "unfbarycentrictex";
273  }
274  if (builtin == GPU_BARYCENTRIC_DIST) {
275  return "unfbarycentricdist";
276  }
277  return "";
278 }
279 
281 {
282  int id = 1;
283 
284  LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
285  LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
286  /* set id for unique names of uniform variables */
287  input->id = id++;
288  }
289 
290  LISTBASE_FOREACH (GPUOutput *, output, &node->outputs) {
291  /* set id for unique names of tmp variables storing output */
292  output->id = id++;
293  }
294  }
295 }
296 
301  DynStr *ds,
303 {
304  const char *name;
305  int builtins = 0;
306  ListBase ubo_inputs = {NULL, NULL};
307 
308  /* Textures */
309  LISTBASE_FOREACH (GPUMaterialTexture *, tex, &graph->textures) {
310  if (tex->colorband) {
311  BLI_dynstr_appendf(ds, "uniform sampler1DArray %s;\n", tex->sampler_name);
312  }
313  else if (tex->tiled_mapping_name[0]) {
314  BLI_dynstr_appendf(ds, "uniform sampler2DArray %s;\n", tex->sampler_name);
315  BLI_dynstr_appendf(ds, "uniform sampler1DArray %s;\n", tex->tiled_mapping_name);
316  }
317  else {
318  BLI_dynstr_appendf(ds, "uniform sampler2D %s;\n", tex->sampler_name);
319  }
320  }
321 
322  /* Volume Grids */
323  LISTBASE_FOREACH (GPUMaterialVolumeGrid *, grid, &graph->volume_grids) {
324  BLI_dynstr_appendf(ds, "uniform sampler3D %s;\n", grid->sampler_name);
325  BLI_dynstr_appendf(ds, "uniform mat4 %s = mat4(0.0);\n", grid->transform_name);
326  }
327 
328  /* Print other uniforms */
329 
330  LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
331  LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
332  if (input->source == GPU_SOURCE_BUILTIN) {
333  /* only define each builtin uniform/varying once */
334  if (!(builtins & input->builtin)) {
335  builtins |= input->builtin;
336  name = gpu_builtin_name(input->builtin);
337 
338  if (BLI_str_startswith(name, "unf")) {
339  BLI_dynstr_appendf(ds, "uniform %s %s;\n", gpu_data_type_to_string(input->type), name);
340  }
341  else {
342  BLI_dynstr_appendf(ds, "in %s %s;\n", gpu_data_type_to_string(input->type), name);
343  }
344  }
345  }
346  else if (input->source == GPU_SOURCE_STRUCT) {
347  /* Add other struct here if needed. */
348  BLI_dynstr_appendf(ds, "Closure strct%d = CLOSURE_DEFAULT;\n", input->id);
349  }
350  else if (input->source == GPU_SOURCE_UNIFORM) {
351  if (!input->link) {
352  /* We handle the UBOuniforms separately. */
353  BLI_addtail(&ubo_inputs, BLI_genericNodeN(input));
354  }
355  }
356  else if (input->source == GPU_SOURCE_CONSTANT) {
358  ds, "const %s cons%d = ", gpu_data_type_to_string(input->type), input->id);
359  codegen_print_datatype(ds, input->type, input->vec);
360  BLI_dynstr_append(ds, ";\n");
361  }
362  }
363  }
364 
365  /* Handle the UBO block separately. */
366  if ((material != NULL) && !BLI_listbase_is_empty(&ubo_inputs)) {
368 
369  /* Inputs are sorted */
370  BLI_dynstr_appendf(ds, "\nlayout (std140) uniform %s {\n", GPU_UBO_BLOCK_NAME);
371 
372  LISTBASE_FOREACH (LinkData *, link, &ubo_inputs) {
373  GPUInput *input = (GPUInput *)(link->data);
374  BLI_dynstr_appendf(ds, " %s unf%d;\n", gpu_data_type_to_string(input->type), input->id);
375  }
376  BLI_dynstr_append(ds, "};\n");
377  BLI_freelistN(&ubo_inputs);
378  }
379 
380  /* Generate the uniform attribute UBO if necessary. */
381  if (!BLI_listbase_is_empty(&graph->uniform_attrs.list)) {
382  BLI_dynstr_append(ds, "\nstruct UniformAttributes {\n");
383  LISTBASE_FOREACH (GPUUniformAttr *, attr, &graph->uniform_attrs.list) {
384  BLI_dynstr_appendf(ds, " vec4 attr%d;\n", attr->id);
385  }
386  BLI_dynstr_append(ds, "};\n");
387  BLI_dynstr_appendf(ds, "layout (std140) uniform %s {\n", GPU_ATTRIBUTE_UBO_BLOCK_NAME);
388  BLI_dynstr_append(ds, " UniformAttributes uniform_attrs[DRW_RESOURCE_CHUNK_LEN];\n");
389  BLI_dynstr_append(ds, "};\n");
390  BLI_dynstr_append(ds, "#define GET_UNIFORM_ATTR(name) (uniform_attrs[resource_id].name)\n");
391  }
392 
393  BLI_dynstr_append(ds, "\n");
394 
395  return builtins;
396 }
397 
399 {
400  LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
401  /* declare temporary variables for node output storage */
402  LISTBASE_FOREACH (GPUOutput *, output, &node->outputs) {
403  if (output->type == GPU_CLOSURE) {
404  BLI_dynstr_appendf(ds, " Closure tmp%d;\n", output->id);
405  }
406  else {
407  BLI_dynstr_appendf(ds, " %s tmp%d;\n", gpu_data_type_to_string(output->type), output->id);
408  }
409  }
410  }
411  BLI_dynstr_append(ds, "\n");
412 }
413 
415 {
416  LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
417  BLI_dynstr_appendf(ds, " %s(", node->name);
418 
419  LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
420  if (input->source == GPU_SOURCE_TEX) {
421  BLI_dynstr_append(ds, input->texture->sampler_name);
422  }
423  else if (input->source == GPU_SOURCE_TEX_TILED_MAPPING) {
424  BLI_dynstr_append(ds, input->texture->tiled_mapping_name);
425  }
426  else if (input->source == GPU_SOURCE_VOLUME_GRID) {
427  BLI_dynstr_append(ds, input->volume_grid->sampler_name);
428  }
429  else if (input->source == GPU_SOURCE_VOLUME_GRID_TRANSFORM) {
430  BLI_dynstr_append(ds, input->volume_grid->transform_name);
431  }
432  else if (input->source == GPU_SOURCE_OUTPUT) {
434  ds, input->link->output->type, input->type, "tmp", input->link->output->id);
435  }
436  else if (input->source == GPU_SOURCE_BUILTIN) {
437  /* TODO(fclem): get rid of that. */
438  if (input->builtin == GPU_INVERSE_VIEW_MATRIX) {
439  BLI_dynstr_append(ds, "viewinv");
440  }
441  else if (input->builtin == GPU_VIEW_MATRIX) {
442  BLI_dynstr_append(ds, "viewmat");
443  }
444  else if (input->builtin == GPU_CAMERA_TEXCO_FACTORS) {
445  BLI_dynstr_append(ds, "camtexfac");
446  }
447  else if (input->builtin == GPU_LOC_TO_VIEW_MATRIX) {
448  BLI_dynstr_append(ds, "localtoviewmat");
449  }
450  else if (input->builtin == GPU_INVERSE_LOC_TO_VIEW_MATRIX) {
451  BLI_dynstr_append(ds, "invlocaltoviewmat");
452  }
453  else if (input->builtin == GPU_BARYCENTRIC_DIST) {
454  BLI_dynstr_append(ds, "barycentricDist");
455  }
456  else if (input->builtin == GPU_BARYCENTRIC_TEXCO) {
457  BLI_dynstr_append(ds, "barytexco");
458  }
459  else if (input->builtin == GPU_OBJECT_MATRIX) {
460  BLI_dynstr_append(ds, "objmat");
461  }
462  else if (input->builtin == GPU_OBJECT_INFO) {
463  BLI_dynstr_append(ds, "ObjectInfo");
464  }
465  else if (input->builtin == GPU_OBJECT_COLOR) {
466  BLI_dynstr_append(ds, "ObjectColor");
467  }
468  else if (input->builtin == GPU_INVERSE_OBJECT_MATRIX) {
469  BLI_dynstr_append(ds, "objinv");
470  }
471  else if (input->builtin == GPU_VIEW_POSITION) {
472  BLI_dynstr_append(ds, "viewposition");
473  }
474  else if (input->builtin == GPU_VIEW_NORMAL) {
475  BLI_dynstr_append(ds, "facingnormal");
476  }
477  else if (input->builtin == GPU_WORLD_NORMAL) {
478  BLI_dynstr_append(ds, "facingwnormal");
479  }
480  else {
481  BLI_dynstr_append(ds, gpu_builtin_name(input->builtin));
482  }
483  }
484  else if (input->source == GPU_SOURCE_STRUCT) {
485  BLI_dynstr_appendf(ds, "strct%d", input->id);
486  }
487  else if (input->source == GPU_SOURCE_UNIFORM) {
488  BLI_dynstr_appendf(ds, "unf%d", input->id);
489  }
490  else if (input->source == GPU_SOURCE_CONSTANT) {
491  BLI_dynstr_appendf(ds, "cons%d", input->id);
492  }
493  else if (input->source == GPU_SOURCE_ATTR) {
494  codegen_convert_datatype(ds, input->attr->gputype, input->type, "var", input->attr->id);
495  }
496  else if (input->source == GPU_SOURCE_UNIFORM_ATTR) {
497  BLI_dynstr_appendf(ds, "GET_UNIFORM_ATTR(attr%d)", input->uniform_attr->id);
498  }
499 
500  BLI_dynstr_append(ds, ", ");
501  }
502 
503  LISTBASE_FOREACH (GPUOutput *, output, &node->outputs) {
504  BLI_dynstr_appendf(ds, "tmp%d", output->id);
505  if (output->next) {
506  BLI_dynstr_append(ds, ", ");
507  }
508  }
509 
510  BLI_dynstr_append(ds, ");\n");
511  }
512 }
513 
514 static void codegen_final_output(DynStr *ds, GPUOutput *finaloutput)
515 {
516  BLI_dynstr_appendf(ds, "return tmp%d;\n", finaloutput->id);
517 }
518 
521  const char *interface_str)
522 {
523  DynStr *ds = BLI_dynstr_new();
524  char *code;
525  int builtins;
526 
528 
529  /* Attributes, Shader stage interface. */
530  if (interface_str) {
531  BLI_dynstr_appendf(ds, "in codegenInterface {%s};\n\n", interface_str);
532  }
533 
535 
536  if (builtins & (GPU_OBJECT_INFO | GPU_OBJECT_COLOR)) {
538  }
539 
540  if (builtins & GPU_BARYCENTRIC_TEXCO) {
542  }
543 
544  BLI_dynstr_append(ds, "Closure nodetree_exec(void)\n{\n");
545 
546  if (builtins & GPU_BARYCENTRIC_TEXCO) {
547  BLI_dynstr_append(ds, " vec2 barytexco = barycentric_resolve(barycentricTexCo);\n");
548  }
549  /* TODO(fclem): get rid of that. */
550  if (builtins & GPU_VIEW_MATRIX) {
551  BLI_dynstr_append(ds, " #define viewmat ViewMatrix\n");
552  }
553  if (builtins & GPU_CAMERA_TEXCO_FACTORS) {
554  BLI_dynstr_append(ds, " #define camtexfac CameraTexCoFactors\n");
555  }
556  if (builtins & GPU_OBJECT_MATRIX) {
557  BLI_dynstr_append(ds, " #define objmat ModelMatrix\n");
558  }
559  if (builtins & GPU_INVERSE_OBJECT_MATRIX) {
560  BLI_dynstr_append(ds, " #define objinv ModelMatrixInverse\n");
561  }
562  if (builtins & GPU_INVERSE_VIEW_MATRIX) {
563  BLI_dynstr_append(ds, " #define viewinv ViewMatrixInverse\n");
564  }
565  if (builtins & GPU_LOC_TO_VIEW_MATRIX) {
566  BLI_dynstr_append(ds, " #define localtoviewmat (ViewMatrix * ModelMatrix)\n");
567  }
568  if (builtins & GPU_INVERSE_LOC_TO_VIEW_MATRIX) {
570  " #define invlocaltoviewmat (ModelMatrixInverse * ViewMatrixInverse)\n");
571  }
572  if (builtins & GPU_VIEW_NORMAL) {
573  BLI_dynstr_append(ds, "#ifdef HAIR_SHADER\n");
574  BLI_dynstr_append(ds, " vec3 n;\n");
575  BLI_dynstr_append(ds, " world_normals_get(n);\n");
576  BLI_dynstr_append(ds, " vec3 facingnormal = transform_direction(ViewMatrix, n);\n");
577  BLI_dynstr_append(ds, "#else\n");
578  BLI_dynstr_append(ds, " vec3 facingnormal = gl_FrontFacing ? viewNormal: -viewNormal;\n");
579  BLI_dynstr_append(ds, "#endif\n");
580  }
581  if (builtins & GPU_WORLD_NORMAL) {
582  BLI_dynstr_append(ds, " vec3 facingwnormal;\n");
583  if (builtins & GPU_VIEW_NORMAL) {
584  BLI_dynstr_append(ds, "#ifdef HAIR_SHADER\n");
585  BLI_dynstr_append(ds, " facingwnormal = n;\n");
586  BLI_dynstr_append(ds, "#else\n");
587  BLI_dynstr_append(ds, " world_normals_get(facingwnormal);\n");
588  BLI_dynstr_append(ds, "#endif\n");
589  }
590  else {
591  BLI_dynstr_append(ds, " world_normals_get(facingwnormal);\n");
592  }
593  }
594  if (builtins & GPU_VIEW_POSITION) {
595  BLI_dynstr_append(ds, " #define viewposition viewPosition\n");
596  }
597 
600 
601  BLI_dynstr_append(ds, " #ifndef VOLUMETRICS\n");
602  BLI_dynstr_append(ds, " if (renderPassAOV) {\n");
603  BLI_dynstr_append(ds, " switch (render_pass_aov_hash()) {\n");
604  GSet *aovhashes_added = BLI_gset_int_new(__func__);
605  LISTBASE_FOREACH (GPUNodeGraphOutputLink *, aovlink, &graph->outlink_aovs) {
606  void *aov_key = POINTER_FROM_INT(aovlink->hash);
607  if (BLI_gset_haskey(aovhashes_added, aov_key)) {
608  continue;
609  }
610  BLI_dynstr_appendf(ds, " case %d: {\n ", aovlink->hash);
611  codegen_final_output(ds, aovlink->outlink->output);
612  BLI_dynstr_append(ds, " }\n");
613  BLI_gset_add(aovhashes_added, aov_key);
614  }
615  BLI_gset_free(aovhashes_added, NULL);
616  BLI_dynstr_append(ds, " default: {\n");
617  BLI_dynstr_append(ds, " Closure no_aov = CLOSURE_DEFAULT;\n");
618  BLI_dynstr_append(ds, " no_aov.holdout = 1.0;\n");
619  BLI_dynstr_append(ds, " return no_aov;\n");
620  BLI_dynstr_append(ds, " }\n");
621  BLI_dynstr_append(ds, " }\n");
622  BLI_dynstr_append(ds, " } else {\n");
623  BLI_dynstr_append(ds, " #else /* VOLUMETRICS */\n");
624  BLI_dynstr_append(ds, " {\n");
625  BLI_dynstr_append(ds, " #endif /* VOLUMETRICS */\n ");
626  codegen_final_output(ds, graph->outlink->output);
627  BLI_dynstr_append(ds, " }\n");
628 
629  BLI_dynstr_append(ds, "}\n");
630 
631  /* create shader */
632  code = BLI_dynstr_get_cstring(ds);
633  BLI_dynstr_free(ds);
634 
635 #if 0
636  if (G.debug & G_DEBUG) {
637  printf("%s\n", code);
638  }
639 #endif
640 
641  return code;
642 }
643 
645 {
646  switch (type) {
647  case CD_ORCO:
648  return "orco";
649  case CD_MTFACE:
650  return "u";
651  case CD_TANGENT:
652  return "t";
653  case CD_MCOL:
654  return "c";
655  case CD_PROP_COLOR:
656  return "c";
657  case CD_AUTO_FROM_NAME:
658  return "a";
659  default:
660  BLI_assert(false && "GPUVertAttr Prefix type not found : This should not happen!");
661  return "";
662  }
663 }
664 
665 /* We talk about shader stage interface, not to be mistaken with GPUShaderInterface. */
666 static char *code_generate_interface(GPUNodeGraph *graph, int builtins)
667 {
668  if (BLI_listbase_is_empty(&graph->attributes) &&
669  (builtins & (GPU_BARYCENTRIC_DIST | GPU_BARYCENTRIC_TEXCO)) == 0) {
670  return NULL;
671  }
672 
673  DynStr *ds = BLI_dynstr_new();
674 
675  BLI_dynstr_append(ds, "\n");
676 
677  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph->attributes) {
678  BLI_dynstr_appendf(ds, "%s var%d;\n", gpu_data_type_to_string(attr->gputype), attr->id);
679  }
680  if (builtins & GPU_BARYCENTRIC_TEXCO) {
681  BLI_dynstr_append(ds, "vec2 barycentricTexCo;\n");
682  }
683  if (builtins & GPU_BARYCENTRIC_DIST) {
684  BLI_dynstr_append(ds, "vec3 barycentricDist;\n");
685  }
686 
687  char *code = BLI_dynstr_get_cstring(ds);
688 
689  BLI_dynstr_free(ds);
690 
691  return code;
692 }
693 
695  const char *interface_str,
696  const char *vert_code,
697  int builtins)
698 {
699  DynStr *ds = BLI_dynstr_new();
700 
702 
703  /* Inputs */
704  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph->attributes) {
705  const char *type_str = gpu_data_type_to_string(attr->gputype);
706  const char *prefix = attr_prefix_get(attr->type);
707  /* XXX FIXME : see notes in mesh_render_data_create() */
708  /* NOTE : Replicate changes to mesh_render_data_create() in draw_cache_impl_mesh.c */
709  if (attr->type == CD_ORCO) {
710  /* OPTI : orco is computed from local positions, but only if no modifier is present. */
712  BLI_dynstr_append(ds, "DEFINE_ATTR(vec4, orco);\n");
713  }
714  else if (attr->name[0] == '\0') {
715  BLI_dynstr_appendf(ds, "DEFINE_ATTR(%s, %s);\n", type_str, prefix);
716  BLI_dynstr_appendf(ds, "#define att%d %s\n", attr->id, prefix);
717  }
718  else {
719  char attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
720  GPU_vertformat_safe_attr_name(attr->name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
721  BLI_dynstr_appendf(ds, "DEFINE_ATTR(%s, %s%s);\n", type_str, prefix, attr_safe_name);
722  BLI_dynstr_appendf(ds, "#define att%d %s%s\n", attr->id, prefix, attr_safe_name);
723  }
724  }
725 
726  /* Outputs interface */
727  if (interface_str) {
728  BLI_dynstr_appendf(ds, "out codegenInterface {%s};\n\n", interface_str);
729  }
730 
731  /* Prototype. Needed for hair functions. */
732  BLI_dynstr_append(ds, "void pass_attr(vec3 position, mat3 normalmat, mat4 modelmatinv);\n");
733  BLI_dynstr_append(ds, "#define USE_ATTR\n\n");
734 
735  BLI_dynstr_append(ds, vert_code);
736  BLI_dynstr_append(ds, "\n\n");
737 
738  BLI_dynstr_append(ds, "void pass_attr(vec3 position, mat3 normalmat, mat4 modelmatinv) {\n");
739 
740  /* GPU_BARYCENTRIC_TEXCO cannot be computed based on gl_VertexID
741  * for MESH_SHADER because of indexed drawing. In this case a
742  * geometry shader is needed. */
743  if (builtins & GPU_BARYCENTRIC_TEXCO) {
744  BLI_dynstr_appendf(ds, " barycentricTexCo = barycentric_get();\n");
745  }
746  if (builtins & GPU_BARYCENTRIC_DIST) {
747  BLI_dynstr_appendf(ds, " barycentricDist = vec3(0);\n");
748  }
749 
750  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph->attributes) {
751  if (attr->type == CD_TANGENT) { /* silly exception */
752  BLI_dynstr_appendf(ds, " var%d = tangent_get(att%d, normalmat);\n", attr->id, attr->id);
753  }
754  else if (attr->type == CD_ORCO) {
756  ds, " var%d = orco_get(position, modelmatinv, OrcoTexCoFactors, orco);\n", attr->id);
757  }
758  else {
759  const char *type_str = gpu_data_type_to_string(attr->gputype);
760  BLI_dynstr_appendf(ds, " var%d = GET_ATTR(%s, att%d);\n", attr->id, type_str, attr->id);
761  }
762  }
763 
764  BLI_dynstr_append(ds, "}\n");
765 
766  char *code = BLI_dynstr_get_cstring(ds);
767 
768  BLI_dynstr_free(ds);
769 
770 #if 0
771  if (G.debug & G_DEBUG) {
772  printf("%s\n", code);
773  }
774 #endif
775 
776  return code;
777 }
778 
780  const char *interface_str,
781  const char *geom_code,
782  int builtins)
783 {
784  if (!geom_code) {
785  return NULL;
786  }
787 
788  DynStr *ds = BLI_dynstr_new();
789 
790  /* Attributes, Shader interface; */
791  if (interface_str) {
792  BLI_dynstr_appendf(ds, "in codegenInterface {%s} dataAttrIn[];\n\n", interface_str);
793  BLI_dynstr_appendf(ds, "out codegenInterface {%s} dataAttrOut;\n\n", interface_str);
794  }
795 
797 
798  if (builtins & GPU_BARYCENTRIC_DIST) {
799  /* geom_code should do something with this, but may not. */
800  BLI_dynstr_append(ds, "#define DO_BARYCENTRIC_DISTANCES\n");
801  }
802 
803  /* Generate varying assignments. */
804  BLI_dynstr_append(ds, "#define USE_ATTR\n");
805  /* This needs to be a define. Some drivers don't like variable vert index inside dataAttrIn. */
806  BLI_dynstr_append(ds, "#define pass_attr(vert) {\\\n");
807 
808  if (builtins & GPU_BARYCENTRIC_TEXCO) {
809  BLI_dynstr_append(ds, "dataAttrOut.barycentricTexCo = calc_barycentric_co(vert);\\\n");
810  }
811 
812  LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph->attributes) {
813  /* TODO let shader choose what to do depending on what the attribute is. */
814  BLI_dynstr_appendf(ds, "dataAttrOut.var%d = dataAttrIn[vert].var%d;\\\n", attr->id, attr->id);
815  }
816  BLI_dynstr_append(ds, "}\n\n");
817 
818  BLI_dynstr_append(ds, geom_code);
819 
820  char *code = BLI_dynstr_get_cstring(ds);
821  BLI_dynstr_free(ds);
822 
823  return code;
824 }
825 
827 {
828  return pass->shader;
829 }
830 
831 /* Pass create/free */
832 
833 static bool gpu_pass_is_valid(GPUPass *pass)
834 {
835  /* Shader is not null if compilation is successful. */
836  return (pass->compiled == false || pass->shader != NULL);
837 }
838 
841  const char *vert_code,
842  const char *geom_code,
843  const char *frag_lib,
844  const char *defines)
845 {
846  /* Prune the unused nodes and extract attributes before compiling so the
847  * generated VBOs are ready to accept the future shader. */
850 
851  int builtins = 0;
852  LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
853  LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
854  if (input->source == GPU_SOURCE_BUILTIN) {
855  builtins |= input->builtin;
856  }
857  }
858  }
859  /* generate code */
860  char *interface_str = code_generate_interface(graph, builtins);
861  char *fragmentgen = code_generate_fragment(material, graph, interface_str);
862 
863  /* Cache lookup: Reuse shaders already compiled */
864  uint32_t hash = gpu_pass_hash(fragmentgen, defines, &graph->attributes);
865  GPUPass *pass_hash = gpu_pass_cache_lookup(hash);
866 
867  if (pass_hash && (pass_hash->next == NULL || pass_hash->next->hash != hash)) {
868  /* No collision, just return the pass. */
869  MEM_SAFE_FREE(interface_str);
870  MEM_freeN(fragmentgen);
871  if (!gpu_pass_is_valid(pass_hash)) {
872  /* Shader has already been created but failed to compile. */
873  return NULL;
874  }
875  pass_hash->refcount += 1;
876  return pass_hash;
877  }
878 
879  /* Either the shader is not compiled or there is a hash collision...
880  * continue generating the shader strings. */
881  GSet *used_libraries = gpu_material_used_libraries(material);
882  char *tmp = gpu_material_library_generate_code(used_libraries, frag_lib);
883 
884  char *geometrycode = code_generate_geometry(graph, interface_str, geom_code, builtins);
885  char *vertexcode = code_generate_vertex(graph, interface_str, vert_code, builtins);
886  char *fragmentcode = BLI_strdupcat(tmp, fragmentgen);
887 
888  MEM_SAFE_FREE(interface_str);
889  MEM_freeN(fragmentgen);
890  MEM_freeN(tmp);
891 
892  GPUPass *pass = NULL;
893  if (pass_hash) {
894  /* Cache lookup: Reuse shaders already compiled */
896  pass_hash, vertexcode, geometrycode, fragmentcode, defines, hash);
897  }
898 
899  if (pass) {
900  MEM_SAFE_FREE(vertexcode);
901  MEM_SAFE_FREE(fragmentcode);
902  MEM_SAFE_FREE(geometrycode);
903 
904  /* Cache hit. Reuse the same GPUPass and GPUShader. */
905  if (!gpu_pass_is_valid(pass)) {
906  /* Shader has already been created but failed to compile. */
907  return NULL;
908  }
909 
910  pass->refcount += 1;
911  }
912  else {
913  /* We still create a pass even if shader compilation
914  * fails to avoid trying to compile again and again. */
915  pass = MEM_callocN(sizeof(GPUPass), "GPUPass");
916  pass->shader = NULL;
917  pass->refcount = 1;
918  pass->hash = hash;
919  pass->vertexcode = vertexcode;
920  pass->fragmentcode = fragmentcode;
921  pass->geometrycode = geometrycode;
922  pass->defines = (defines) ? BLI_strdup(defines) : NULL;
923  pass->compiled = false;
924 
926  if (pass_hash != NULL) {
927  /* Add after the first pass having the same hash. */
928  pass->next = pass_hash->next;
929  pass_hash->next = pass;
930  }
931  else {
932  /* No other pass have same hash, just prepend to the list. */
934  }
936  }
937 
938  return pass;
939 }
940 
942 {
943  char *code = source;
944 
945  /* Remember this is per stage. */
946  GSet *sampler_ids = BLI_gset_int_new(__func__);
947  int num_samplers = 0;
948 
949  while ((code = strstr(code, "uniform "))) {
950  /* Move past "uniform". */
951  code += 7;
952  /* Skip following spaces. */
953  while (*code == ' ') {
954  code++;
955  }
956  /* Skip "i" from potential isamplers. */
957  if (*code == 'i') {
958  code++;
959  }
960  /* Skip following spaces. */
961  if (BLI_str_startswith(code, "sampler")) {
962  /* Move past "uniform". */
963  code += 7;
964  /* Skip sampler type suffix. */
965  while (!ELEM(*code, ' ', '\0')) {
966  code++;
967  }
968  /* Skip following spaces. */
969  while (*code == ' ') {
970  code++;
971  }
972 
973  if (*code != '\0') {
974  char sampler_name[64];
975  code = gpu_str_skip_token(code, sampler_name, sizeof(sampler_name));
976  int id = GPU_shader_get_uniform(shader, sampler_name);
977 
978  if (id == -1) {
979  continue;
980  }
981  /* Catch duplicates. */
982  if (BLI_gset_add(sampler_ids, POINTER_FROM_INT(id))) {
983  num_samplers++;
984  }
985  }
986  }
987  }
988 
989  BLI_gset_free(sampler_ids, NULL);
990 
991  return num_samplers;
992 }
993 
995 {
996  if (shader == NULL) {
997  return false;
998  }
999 
1000  /* NOTE: The only drawback of this method is that it will count a sampler
1001  * used in the fragment shader and only declared (but not used) in the vertex
1002  * shader as used by both. But this corner case is not happening for now. */
1003  int vert_samplers_len = count_active_texture_sampler(shader, pass->vertexcode);
1004  int frag_samplers_len = count_active_texture_sampler(shader, pass->fragmentcode);
1005 
1006  int total_samplers_len = vert_samplers_len + frag_samplers_len;
1007 
1008  /* Validate against opengl limit. */
1009  if ((frag_samplers_len > GPU_max_textures_frag()) ||
1010  (vert_samplers_len > GPU_max_textures_vert())) {
1011  return false;
1012  }
1013 
1014  if (pass->geometrycode) {
1015  int geom_samplers_len = count_active_texture_sampler(shader, pass->geometrycode);
1016  total_samplers_len += geom_samplers_len;
1017  if (geom_samplers_len > GPU_max_textures_geom()) {
1018  return false;
1019  }
1020  }
1021 
1022  return (total_samplers_len <= GPU_max_textures());
1023 }
1024 
1025 bool GPU_pass_compile(GPUPass *pass, const char *shname)
1026 {
1027  bool success = true;
1028  if (!pass->compiled) {
1030  pass->vertexcode, pass->fragmentcode, pass->geometrycode, NULL, pass->defines, shname);
1031 
1032  /* NOTE: Some drivers / gpu allows more active samplers than the opengl limit.
1033  * We need to make sure to count active samplers to avoid undefined behavior. */
1034  if (!gpu_pass_shader_validate(pass, shader)) {
1035  success = false;
1036  if (shader != NULL) {
1037  fprintf(stderr, "GPUShader: error: too many samplers in shader.\n");
1039  shader = NULL;
1040  }
1041  }
1042  pass->shader = shader;
1043  pass->compiled = true;
1044  }
1045 
1046  return success;
1047 }
1048 
1050 {
1051  BLI_assert(pass->refcount > 0);
1052  pass->refcount--;
1053 }
1054 
1055 static void gpu_pass_free(GPUPass *pass)
1056 {
1057  BLI_assert(pass->refcount == 0);
1058  if (pass->shader) {
1059  GPU_shader_free(pass->shader);
1060  }
1061  MEM_SAFE_FREE(pass->fragmentcode);
1062  MEM_SAFE_FREE(pass->geometrycode);
1063  MEM_SAFE_FREE(pass->vertexcode);
1064  MEM_SAFE_FREE(pass->defines);
1065  MEM_freeN(pass);
1066 }
1067 
1069 {
1070  static int lasttime = 0;
1071  const int shadercollectrate = 60; /* hardcoded for now. */
1072  int ctime = (int)PIL_check_seconds_timer();
1073 
1074  if (ctime < shadercollectrate + lasttime) {
1075  return;
1076  }
1077 
1078  lasttime = ctime;
1079 
1081  GPUPass *next, **prev_pass = &pass_cache;
1082  for (GPUPass *pass = pass_cache; pass; pass = next) {
1083  next = pass->next;
1084  if (pass->refcount == 0) {
1085  /* Remove from list */
1086  *prev_pass = next;
1087  gpu_pass_free(pass);
1088  }
1089  else {
1090  prev_pass = &pass->next;
1091  }
1092  }
1094 }
1095 
1097 {
1099 }
1100 
1102 {
1104  while (pass_cache) {
1107  pass_cache = next;
1108  }
1110 
1112 }
1113 
1114 /* Module */
1115 
1117 {
1118 }
1119 
1121 {
1124 }
@ G_DEBUG
Definition: BKE_global.h:133
General operations, lookup, etc. for materials.
void BKE_material_defaults_free_gpu(void)
Definition: material.c:1844
#define BLI_assert(a)
Definition: BLI_assert.h:58
A dynamically sized string ADT.
DynStr * BLI_dynstr_new(void) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
Definition: BLI_dynstr.c:71
void BLI_dynstr_free(DynStr *ds) ATTR_NONNULL()
Definition: BLI_dynstr.c:358
void BLI_dynstr_appendf(DynStr *__restrict ds, const char *__restrict format,...) ATTR_PRINTF_FORMAT(2
char * BLI_dynstr_get_cstring(DynStr *ds) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_dynstr.c:323
void BLI_dynstr_append(DynStr *__restrict ds, const char *cstr) ATTR_NONNULL()
Definition: BLI_dynstr.c:107
struct GSet GSet
Definition: BLI_ghash.h:189
GSet * BLI_gset_int_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
bool BLI_gset_haskey(GSet *gs, const void *key) ATTR_WARN_UNUSED_RESULT
Definition: BLI_ghash.c:1216
void BLI_gset_free(GSet *gs, GSetKeyFreeFP keyfreefp)
Definition: BLI_ghash.c:1253
bool BLI_gset_add(GSet *gs, void *key)
Definition: BLI_ghash.c:1160
void BLI_hash_mm2a_init(BLI_HashMurmur2A *mm2, uint32_t seed)
Definition: hash_mm2a.c:75
void BLI_hash_mm2a_add(BLI_HashMurmur2A *mm2, const unsigned char *data, size_t len)
Definition: hash_mm2a.c:83
uint32_t BLI_hash_mm2a_end(BLI_HashMurmur2A *mm2)
Definition: hash_mm2a.c:103
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
Definition: BLI_listbase.h:124
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
struct LinkData * BLI_genericNodeN(void *data)
Definition: listbase.c:923
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:547
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
bool BLI_str_startswith(const char *__restrict str, const char *__restrict start) ATTR_NONNULL()
Definition: string.c:1006
char * BLI_strdup(const char *str) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL() ATTR_MALLOC
Definition: string.c:70
char * BLI_strdupcat(const char *__restrict str1, const char *__restrict str2) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL() ATTR_MALLOC
Definition: string.c:81
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
unsigned char uchar
Definition: BLI_sys_types.h:86
pthread_spinlock_t SpinLock
Definition: BLI_threads.h:111
void BLI_spin_init(SpinLock *spin)
Definition: threads.cc:447
void BLI_spin_unlock(SpinLock *spin)
Definition: threads.cc:480
void BLI_spin_lock(SpinLock *spin)
Definition: threads.cc:461
void BLI_spin_end(SpinLock *spin)
Definition: threads.cc:495
#define POINTER_FROM_INT(i)
#define ELEM(...)
#define STREQ(a, b)
CustomDataType
@ CD_PROP_COLOR
@ CD_AUTO_FROM_NAME
@ CD_TANGENT
int GPU_max_textures_vert(void)
int GPU_max_textures_frag(void)
int GPU_max_textures_geom(void)
int GPU_max_textures(void)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
eGPUBuiltin
Definition: GPU_material.h:86
@ GPU_WORLD_NORMAL
Definition: GPU_material.h:105
@ GPU_PARTICLE_ANG_VELOCITY
Definition: GPU_material.h:99
@ GPU_OBJECT_COLOR
Definition: GPU_material.h:93
@ GPU_VIEW_POSITION
Definition: GPU_material.h:91
@ GPU_BARYCENTRIC_TEXCO
Definition: GPU_material.h:103
@ GPU_INVERSE_OBJECT_MATRIX
Definition: GPU_material.h:90
@ GPU_OBJECT_MATRIX
Definition: GPU_material.h:88
@ GPU_INVERSE_VIEW_MATRIX
Definition: GPU_material.h:89
@ GPU_PARTICLE_SCALAR_PROPS
Definition: GPU_material.h:96
@ GPU_PARTICLE_VELOCITY
Definition: GPU_material.h:98
@ GPU_PARTICLE_LOCATION
Definition: GPU_material.h:97
@ GPU_CAMERA_TEXCO_FACTORS
Definition: GPU_material.h:95
@ GPU_AUTO_BUMPSCALE
Definition: GPU_material.h:94
@ GPU_OBJECT_INFO
Definition: GPU_material.h:102
@ GPU_INVERSE_LOC_TO_VIEW_MATRIX
Definition: GPU_material.h:101
@ GPU_LOC_TO_VIEW_MATRIX
Definition: GPU_material.h:100
@ GPU_VIEW_NORMAL
Definition: GPU_material.h:92
@ GPU_VIEW_MATRIX
Definition: GPU_material.h:87
@ GPU_BARYCENTRIC_DIST
Definition: GPU_material.h:104
eGPUType
Definition: GPU_material.h:59
@ GPU_VEC2
Definition: GPU_material.h:64
@ GPU_VEC4
Definition: GPU_material.h:66
@ GPU_CLOSURE
Definition: GPU_material.h:80
@ GPU_VEC3
Definition: GPU_material.h:65
@ GPU_FLOAT
Definition: GPU_material.h:63
void GPU_material_uniform_buffer_create(GPUMaterial *material, ListBase *inputs)
Definition: gpu_material.c:233
GPUShader * GPU_shader_create(const char *vertcode, const char *fragcode, const char *geomcode, const char *libcode, const char *defines, const char *shname)
Definition: gpu_shader.cc:376
int GPU_shader_get_uniform(GPUShader *shader, const char *name)
Definition: gpu_shader.cc:551
struct GPUShader GPUShader
Definition: GPU_shader.h:33
void GPU_shader_free_builtin_shaders(void)
void GPU_shader_free(GPUShader *shader)
Definition: gpu_shader.cc:365
#define GPU_UBO_BLOCK_NAME
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
void GPU_vertformat_safe_attr_name(const char *attr_name, char *r_safe_name, uint max_len)
#define GPU_MAX_SAFE_ATTR_NAME
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Platform independent time functions.
#define output
OperationNode * node
Depsgraph * graph
StackEntry * from
Material material
char datatoc_gpu_shader_common_obinfos_lib_glsl[]
char datatoc_gpu_shader_codegen_lib_glsl[]
static GPUPass * gpu_pass_cache_lookup(uint32_t hash)
Definition: gpu_codegen.c:89
static void codegen_call_functions(DynStr *ds, GPUNodeGraph *graph)
Definition: gpu_codegen.c:414
static SpinLock pass_cache_spin
Definition: gpu_codegen.c:70
static void codegen_final_output(DynStr *ds, GPUOutput *finaloutput)
Definition: gpu_codegen.c:514
static void gpu_pass_free(GPUPass *pass)
Definition: gpu_codegen.c:1055
static char * code_generate_interface(GPUNodeGraph *graph, int builtins)
Definition: gpu_codegen.c:666
static bool gpu_pass_is_valid(GPUPass *pass)
Definition: gpu_codegen.c:833
static const char * attr_prefix_get(CustomDataType type)
Definition: gpu_codegen.c:644
static GPUPass * pass_cache
Definition: gpu_codegen.c:69
void GPU_pass_cache_free(void)
Definition: gpu_codegen.c:1101
static void codegen_declare_tmps(DynStr *ds, GPUNodeGraph *graph)
Definition: gpu_codegen.c:398
void GPU_pass_release(GPUPass *pass)
Definition: gpu_codegen.c:1049
static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds, GPUNodeGraph *graph)
Definition: gpu_codegen.c:300
static void codegen_convert_datatype(DynStr *ds, int from, int to, const char *tmp, int id)
Definition: gpu_codegen.c:129
void gpu_codegen_init(void)
Definition: gpu_codegen.c:1116
static void codegen_set_unique_ids(GPUNodeGraph *graph)
Definition: gpu_codegen.c:280
static void codegen_print_datatype(DynStr *ds, const eGPUType type, float *data)
Definition: gpu_codegen.c:201
GPUPass * GPU_generate_pass(GPUMaterial *material, GPUNodeGraph *graph, const char *vert_code, const char *geom_code, const char *frag_lib, const char *defines)
Definition: gpu_codegen.c:839
static char * code_generate_vertex(GPUNodeGraph *graph, const char *interface_str, const char *vert_code, int builtins)
Definition: gpu_codegen.c:694
static const char * gpu_builtin_name(eGPUBuiltin builtin)
Definition: gpu_codegen.c:218
bool GPU_pass_compile(GPUPass *pass, const char *shname)
Definition: gpu_codegen.c:1025
static bool gpu_pass_shader_validate(GPUPass *pass, GPUShader *shader)
Definition: gpu_codegen.c:994
static char * code_generate_geometry(GPUNodeGraph *graph, const char *interface_str, const char *geom_code, int builtins)
Definition: gpu_codegen.c:779
void GPU_pass_cache_garbage_collect(void)
Definition: gpu_codegen.c:1068
static uint32_t gpu_pass_hash(const char *frag_gen, const char *defs, ListBase *attributes)
Definition: gpu_codegen.c:72
void GPU_pass_cache_init(void)
Definition: gpu_codegen.c:1096
static GPUPass * gpu_pass_cache_resolve_collision(GPUPass *pass, const char *vert, const char *geom, const char *frag, const char *defs, uint32_t hash)
Definition: gpu_codegen.c:104
static int count_active_texture_sampler(GPUShader *shader, char *source)
Definition: gpu_codegen.c:941
static char * code_generate_fragment(GPUMaterial *material, GPUNodeGraph *graph, const char *interface_str)
Definition: gpu_codegen.c:519
GPUShader * GPU_pass_shader_get(GPUPass *pass)
Definition: gpu_codegen.c:826
void gpu_codegen_exit(void)
Definition: gpu_codegen.c:1120
GSet * gpu_material_used_libraries(GPUMaterial *material)
Definition: gpu_material.c:613
const char * gpu_data_type_to_string(const eGPUType type)
char * gpu_str_skip_token(char *str, char *token, int max)
char * gpu_material_library_generate_code(GSet *used_libraries, const char *frag_lib)
void gpu_node_graph_finalize_uniform_attrs(GPUNodeGraph *graph)
void gpu_node_graph_prune_unused(GPUNodeGraph *graph)
@ GPU_SOURCE_CONSTANT
@ GPU_SOURCE_ATTR
@ GPU_SOURCE_VOLUME_GRID_TRANSFORM
@ GPU_SOURCE_UNIFORM
@ GPU_SOURCE_VOLUME_GRID
@ GPU_SOURCE_OUTPUT
@ GPU_SOURCE_TEX_TILED_MAPPING
@ GPU_SOURCE_BUILTIN
@ GPU_SOURCE_UNIFORM_ATTR
@ GPU_SOURCE_STRUCT
@ GPU_SOURCE_TEX
void KERNEL_FUNCTION_FULL_NAME() shader(KernelGlobals *kg, uint4 *input, float4 *output, int type, int filter, int i, int offset, int sample)
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
static ulong * next
#define hash
Definition: noise.c:169
unsigned int uint32_t
Definition: stdint.h:83
eGPUType type
bool compiled
Definition: gpu_codegen.h:46
uint32_t hash
Definition: gpu_codegen.h:45
char * vertexcode
Definition: gpu_codegen.h:42
struct GPUShader * shader
Definition: gpu_codegen.h:39
uint refcount
Definition: gpu_codegen.h:44
char * geometrycode
Definition: gpu_codegen.h:41
char * defines
Definition: gpu_codegen.h:43
struct GPUPass * next
Definition: gpu_codegen.h:37
char * fragmentcode
Definition: gpu_codegen.h:40
double PIL_check_seconds_timer(void)
Definition: time.c:80
#define G(x, y, z)