Blender  V2.93
workbench_shader.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2020, Blender Foundation.
17  */
18 
23 #include "DRW_render.h"
24 
25 #include "BLI_dynstr.h"
26 #include "BLI_string_utils.h"
27 
28 #include "workbench_engine.h"
29 #include "workbench_private.h"
30 
31 extern char datatoc_common_math_lib_glsl[];
33 extern char datatoc_common_hair_lib_glsl[];
35 extern char datatoc_common_view_lib_glsl[];
36 extern char datatoc_common_smaa_lib_glsl[];
37 
42 
49 
51 
54 
56 
61 
64 
74 
77 
78 /* Maximum number of variations. */
79 #define MAX_LIGHTING 3
80 #define MAX_COLOR 3
81 
82 enum {
86 };
87 
88 #define VOLUME_SH_MAX (1 << (VOLUME_SH_CUBIC + 1))
89 
90 static struct {
94 
99 
102 
103  struct GPUShader *cavity_sh[2][2];
104 
110 
112  struct GPUShader *smaa_sh[3];
113 
114  struct GPUShader *volume_sh[2][2][3][2];
115 
117 } e_data = {{{{NULL}}}};
118 
120 {
121  if (e_data.lib == NULL) {
123  /* NOTE: These need to be ordered by dependencies. */
124  DRW_SHADER_LIB_ADD(e_data.lib, common_math_lib);
125  DRW_SHADER_LIB_ADD(e_data.lib, common_math_geom_lib);
126  DRW_SHADER_LIB_ADD(e_data.lib, common_hair_lib);
127  DRW_SHADER_LIB_ADD(e_data.lib, common_view_lib);
128  DRW_SHADER_LIB_ADD(e_data.lib, common_pointcloud_lib);
129  DRW_SHADER_LIB_ADD(e_data.lib, gpu_shader_common_obinfos_lib);
130  DRW_SHADER_LIB_ADD(e_data.lib, workbench_shader_interface_lib);
131  DRW_SHADER_LIB_ADD(e_data.lib, workbench_common_lib);
132  DRW_SHADER_LIB_ADD(e_data.lib, workbench_image_lib);
133  DRW_SHADER_LIB_ADD(e_data.lib, workbench_material_lib);
134  DRW_SHADER_LIB_ADD(e_data.lib, workbench_data_lib);
135  DRW_SHADER_LIB_ADD(e_data.lib, workbench_matcap_lib);
136  DRW_SHADER_LIB_ADD(e_data.lib, workbench_cavity_lib);
137  DRW_SHADER_LIB_ADD(e_data.lib, workbench_curvature_lib);
138  DRW_SHADER_LIB_ADD(e_data.lib, workbench_world_light_lib);
139  }
140 }
141 
143  WORKBENCH_PrivateData *wpd, bool textured, bool tiled, bool cavity, bool curvature)
144 {
145  char *str = NULL;
146 
147  DynStr *ds = BLI_dynstr_new();
148 
149  if (wpd && wpd->shading.light == V3D_LIGHTING_STUDIO) {
150  BLI_dynstr_append(ds, "#define V3D_LIGHTING_STUDIO\n");
151  }
152  else if (wpd && wpd->shading.light == V3D_LIGHTING_MATCAP) {
153  BLI_dynstr_append(ds, "#define V3D_LIGHTING_MATCAP\n");
154  }
155  else {
156  BLI_dynstr_append(ds, "#define V3D_LIGHTING_FLAT\n");
157  }
158 
159  if (NORMAL_ENCODING_ENABLED()) {
160  BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
161  }
162 
163  if (textured) {
164  BLI_dynstr_append(ds, "#define V3D_SHADING_TEXTURE_COLOR\n");
165  }
166  if (tiled) {
167  BLI_dynstr_append(ds, "#define TEXTURE_IMAGE_ARRAY\n");
168  }
169  if (cavity) {
170  BLI_dynstr_append(ds, "#define USE_CAVITY\n");
171  }
172  if (curvature) {
173  BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
174  }
175 
177  BLI_dynstr_free(ds);
178  return str;
179 }
180 
181 static int workbench_color_index(WORKBENCH_PrivateData *UNUSED(wpd), bool textured, bool tiled)
182 {
183  BLI_assert(2 < MAX_COLOR);
184  return (textured) ? (tiled ? 2 : 1) : 0;
185 }
186 
188  bool transp,
189  eWORKBENCH_DataType datatype,
190  bool textured,
191  bool tiled)
192 {
193  int color = workbench_color_index(wpd, textured, tiled);
194  int light = wpd->shading.light;
195  BLI_assert(light < MAX_LIGHTING);
196  struct GPUShader **shader =
197  (transp) ? &e_data.transp_prepass_sh_cache[wpd->sh_cfg][datatype][light][color] :
198  &e_data.opaque_prepass_sh_cache[wpd->sh_cfg][datatype][color];
199 
200  if (*shader == NULL) {
201  char *defines = workbench_build_defines(wpd, textured, tiled, false, false);
202 
203  char *frag_file = transp ? datatoc_workbench_transparent_accum_frag_glsl :
205  char *frag_src = DRW_shader_library_create_shader_string(e_data.lib, frag_file);
206 
207  char *vert_file = (datatype == WORKBENCH_DATATYPE_HAIR) ?
209  ((datatype == WORKBENCH_DATATYPE_POINTCLOUD) ?
212  char *vert_src = DRW_shader_library_create_shader_string(e_data.lib, vert_file);
213 
214  const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[wpd->sh_cfg];
215 
217  .vert = (const char *[]){sh_cfg_data->lib, vert_src, NULL},
218  .frag = (const char *[]){frag_src, NULL},
219  .defs = (const char *[]){sh_cfg_data->def,
220  defines,
221  transp ? "#define TRANSPARENT_MATERIAL\n" :
222  "#define OPAQUE_MATERIAL\n",
223  (datatype == WORKBENCH_DATATYPE_POINTCLOUD) ?
224  "#define UNIFORM_RESOURCE_ID\n"
225  "#define INSTANCED_ATTR\n" :
226  NULL,
227  NULL},
228  });
229 
230  MEM_freeN(defines);
231  MEM_freeN(frag_src);
232  MEM_freeN(vert_src);
233  }
234  return *shader;
235 }
236 
238 {
239  return workbench_shader_get_ex(wpd, false, datatype, false, false);
240 }
241 
243  eWORKBENCH_DataType datatype,
244  bool tiled)
245 {
246  return workbench_shader_get_ex(wpd, false, datatype, true, tiled);
247 }
248 
250  eWORKBENCH_DataType datatype)
251 {
252  return workbench_shader_get_ex(wpd, true, datatype, false, false);
253 }
254 
256  eWORKBENCH_DataType datatype,
257  bool tiled)
258 {
259  return workbench_shader_get_ex(wpd, true, datatype, true, tiled);
260 }
261 
263 {
264  int light = wpd->shading.light;
265  struct GPUShader **shader = &e_data.opaque_composite_sh[light];
266  BLI_assert(light < MAX_LIGHTING);
267 
268  if (*shader == NULL) {
269  char *defines = workbench_build_defines(wpd, false, false, false, false);
272 
273  *shader = DRW_shader_create_fullscreen(frag, defines);
274 
275  MEM_freeN(defines);
276  MEM_freeN(frag);
277  }
278  return *shader;
279 }
280 
282 {
283  if (e_data.merge_infront_sh == NULL) {
286 
287  e_data.merge_infront_sh = DRW_shader_create_fullscreen(frag, NULL);
288 
289  MEM_freeN(frag);
290  }
291  return e_data.merge_infront_sh;
292 }
293 
295 {
296  if (e_data.oit_resolve_sh == NULL) {
297  char *defines = workbench_build_defines(wpd, false, false, false, false);
298 
299  e_data.oit_resolve_sh = DRW_shader_create_fullscreen(
301 
302  MEM_freeN(defines);
303  }
304  return e_data.oit_resolve_sh;
305 }
306 
307 static GPUShader *workbench_shader_shadow_pass_get_ex(bool depth_pass, bool manifold, bool cap)
308 {
309  struct GPUShader **shader = (depth_pass) ? &e_data.shadow_depth_pass_sh[manifold] :
310  &e_data.shadow_depth_fail_sh[manifold][cap];
311 
312  if (*shader == NULL) {
313 #if DEBUG_SHADOW_VOLUME
314  const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
315 #else
316  const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
317 #endif
318 
320  .vert = (const char *[]){datatoc_common_view_lib_glsl,
322  NULL},
323  .geom = (const char *[]){(cap) ? datatoc_workbench_shadow_caps_geom_glsl :
325  NULL},
326  .frag = (const char *[]){shadow_frag, NULL},
327  .defs = (const char *[]){(depth_pass) ? "#define SHADOW_PASS\n" : "#define SHADOW_FAIL\n",
328  (manifold) ? "" : "#define DOUBLE_MANIFOLD\n",
329  NULL},
330  });
331  }
332  return *shader;
333 }
334 
336 {
337  return workbench_shader_shadow_pass_get_ex(true, manifold, false);
338 }
339 
341 {
342  return workbench_shader_shadow_pass_get_ex(false, manifold, cap);
343 }
344 
345 GPUShader *workbench_shader_cavity_get(bool cavity, bool curvature)
346 {
347  BLI_assert(cavity || curvature);
348  struct GPUShader **shader = &e_data.cavity_sh[cavity][curvature];
349 
350  if (*shader == NULL) {
351  char *defines = workbench_build_defines(NULL, false, false, cavity, curvature);
354 
355  *shader = DRW_shader_create_fullscreen(frag, defines);
356 
357  MEM_freeN(defines);
358  MEM_freeN(frag);
359  }
360  return *shader;
361 }
362 
364 {
365  if (e_data.outline_sh == NULL) {
368 
369  e_data.outline_sh = DRW_shader_create_fullscreen(frag, NULL);
370 
371  MEM_freeN(frag);
372  }
373  return e_data.outline_sh;
374 }
375 
378  GPUShader **blur1_sh,
379  GPUShader **blur2_sh,
380  GPUShader **resolve_sh)
381 {
382  if (e_data.dof_prepare_sh == NULL) {
384  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define PREPARE\n");
386  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DOWNSAMPLE\n");
387 #if 0 /* TODO(fclem): finish COC min_max optimization */
389  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define FLATTEN_VERTICAL\n");
391  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define FLATTEN_HORIZONTAL\n");
393  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DILATE_VERTICAL\n");
395  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DILATE_HORIZONTAL\n");
396 #endif
398  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define BLUR1\n");
400  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define BLUR2\n");
402  datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define RESOLVE\n");
403  }
404 
405  *prepare_sh = e_data.dof_prepare_sh;
406  *downsample_sh = e_data.dof_downsample_sh;
407  *blur1_sh = e_data.dof_blur1_sh;
408  *blur2_sh = e_data.dof_blur2_sh;
409  *resolve_sh = e_data.dof_resolve_sh;
410 }
411 
413 {
414  if (e_data.aa_accum_sh == NULL) {
417 
418  e_data.aa_accum_sh = DRW_shader_create_fullscreen(frag, NULL);
419 
420  MEM_freeN(frag);
421  }
422  return e_data.aa_accum_sh;
423 }
424 
426 {
427  BLI_assert(stage < 3);
428  if (!e_data.smaa_sh[stage]) {
429  char stage_define[32];
430  BLI_snprintf(stage_define, sizeof(stage_define), "#define SMAA_STAGE %d\n", stage);
431 
433  .vert =
434  (const char *[]){
435  "#define SMAA_INCLUDE_VS 1\n",
436  "#define SMAA_INCLUDE_PS 0\n",
437  "uniform vec4 viewportMetrics;\n",
440  NULL,
441  },
442  .frag =
443  (const char *[]){
444  "#define SMAA_INCLUDE_VS 0\n",
445  "#define SMAA_INCLUDE_PS 1\n",
446  "uniform vec4 viewportMetrics;\n",
449  NULL,
450  },
451  .defs =
452  (const char *[]){
453  "#define SMAA_GLSL_3\n",
454  "#define SMAA_RT_METRICS viewportMetrics\n",
455  "#define SMAA_PRESET_HIGH\n",
456  "#define SMAA_LUMA_WEIGHT float4(1.0, 1.0, 1.0, 1.0)\n",
457  "#define SMAA_NO_DISCARD\n",
458  stage_define,
459  NULL,
460  },
461  });
462  }
463  return e_data.smaa_sh[stage];
464 }
465 
467  bool coba,
468  eWORKBENCH_VolumeInterpType interp_type,
469  bool smoke)
470 {
471  GPUShader **shader = &e_data.volume_sh[slice][coba][interp_type][smoke];
472 
473  if (*shader == NULL) {
474  DynStr *ds = BLI_dynstr_new();
475 
476  if (slice) {
477  BLI_dynstr_append(ds, "#define VOLUME_SLICE\n");
478  }
479  if (coba) {
480  BLI_dynstr_append(ds, "#define USE_COBA\n");
481  }
482  switch (interp_type) {
484  BLI_dynstr_append(ds, "#define USE_TRILINEAR\n");
485  break;
487  BLI_dynstr_append(ds, "#define USE_TRICUBIC\n");
488  break;
490  BLI_dynstr_append(ds, "#define USE_CLOSEST\n");
491  break;
492  }
493  if (smoke) {
494  BLI_dynstr_append(ds, "#define VOLUME_SMOKE\n");
495  }
496 
497  char *defines = BLI_dynstr_get_cstring(ds);
498  BLI_dynstr_free(ds);
499 
504 
505  *shader = DRW_shader_create(vert, NULL, frag, defines);
506 
507  MEM_freeN(vert);
508  MEM_freeN(frag);
509  MEM_freeN(defines);
510  }
511  return *shader;
512 }
513 
515 {
516  for (int j = 0; j < sizeof(e_data.opaque_prepass_sh_cache) / sizeof(void *); j++) {
517  struct GPUShader **sh_array = &e_data.opaque_prepass_sh_cache[0][0][0];
518  DRW_SHADER_FREE_SAFE(sh_array[j]);
519  }
520  for (int j = 0; j < sizeof(e_data.transp_prepass_sh_cache) / sizeof(void *); j++) {
521  struct GPUShader **sh_array = &e_data.transp_prepass_sh_cache[0][0][0][0];
522  DRW_SHADER_FREE_SAFE(sh_array[j]);
523  }
524  for (int j = 0; j < ARRAY_SIZE(e_data.opaque_composite_sh); j++) {
525  struct GPUShader **sh_array = &e_data.opaque_composite_sh[0];
526  DRW_SHADER_FREE_SAFE(sh_array[j]);
527  }
528  for (int j = 0; j < ARRAY_SIZE(e_data.shadow_depth_pass_sh); j++) {
529  struct GPUShader **sh_array = &e_data.shadow_depth_pass_sh[0];
530  DRW_SHADER_FREE_SAFE(sh_array[j]);
531  }
532  for (int j = 0; j < sizeof(e_data.shadow_depth_fail_sh) / sizeof(void *); j++) {
533  struct GPUShader **sh_array = &e_data.shadow_depth_fail_sh[0][0];
534  DRW_SHADER_FREE_SAFE(sh_array[j]);
535  }
536  for (int j = 0; j < sizeof(e_data.cavity_sh) / sizeof(void *); j++) {
537  struct GPUShader **sh_array = &e_data.cavity_sh[0][0];
538  DRW_SHADER_FREE_SAFE(sh_array[j]);
539  }
540  for (int j = 0; j < ARRAY_SIZE(e_data.smaa_sh); j++) {
541  struct GPUShader **sh_array = &e_data.smaa_sh[0];
542  DRW_SHADER_FREE_SAFE(sh_array[j]);
543  }
544  for (int j = 0; j < sizeof(e_data.volume_sh) / sizeof(void *); j++) {
545  struct GPUShader **sh_array = &e_data.volume_sh[0][0][0][0];
546  DRW_SHADER_FREE_SAFE(sh_array[j]);
547  }
548 
549  DRW_SHADER_FREE_SAFE(e_data.oit_resolve_sh);
550  DRW_SHADER_FREE_SAFE(e_data.outline_sh);
551  DRW_SHADER_FREE_SAFE(e_data.merge_infront_sh);
552 
553  DRW_SHADER_FREE_SAFE(e_data.dof_prepare_sh);
554  DRW_SHADER_FREE_SAFE(e_data.dof_downsample_sh);
555  DRW_SHADER_FREE_SAFE(e_data.dof_blur1_sh);
556  DRW_SHADER_FREE_SAFE(e_data.dof_blur2_sh);
557  DRW_SHADER_FREE_SAFE(e_data.dof_resolve_sh);
558 
559  DRW_SHADER_FREE_SAFE(e_data.aa_accum_sh);
560 
562 }
#define BLI_assert(a)
Definition: BLI_assert.h:58
A dynamically sized string ADT.
DynStr * BLI_dynstr_new(void) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
Definition: BLI_dynstr.c:71
void BLI_dynstr_free(DynStr *ds) ATTR_NONNULL()
Definition: BLI_dynstr.c:358
char * BLI_dynstr_get_cstring(DynStr *ds) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_dynstr.c:323
void BLI_dynstr_append(DynStr *__restrict ds, const char *cstr) ATTR_NONNULL()
Definition: BLI_dynstr.c:107
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
#define ARRAY_SIZE(arr)
#define UNUSED(x)
@ V3D_LIGHTING_STUDIO
@ V3D_LIGHTING_MATCAP
#define DRW_SHADER_LIB_FREE_SAFE(lib)
Definition: DRW_render.h:298
#define DRW_shader_create_fullscreen(frag, defines)
Definition: DRW_render.h:241
#define DRW_SHADER_LIB_ADD(lib, lib_name)
Definition: DRW_render.h:291
#define DRW_SHADER_FREE_SAFE(shader)
Definition: DRW_render.h:279
#define DRW_shader_create(vert, geom, frag, defines)
Definition: DRW_render.h:235
#define DRW_shader_create_fullscreen_with_shaderlib(frag, lib, defines)
Definition: DRW_render.h:243
struct GPUShader GPUShader
Definition: GPU_shader.h:33
const GPUShaderConfigData GPU_shader_cfg_data[GPU_SHADER_CFG_LEN]
#define GPU_shader_create_from_arrays(...)
Definition: GPU_shader.h:69
#define GPU_SHADER_CFG_LEN
Definition: GPU_shader.h:395
EvaluationStage stage
Definition: deg_eval.cc:96
DRWShaderLibrary * DRW_shader_library_create(void)
char * DRW_shader_library_create_shader_string(const DRWShaderLibrary *lib, const char *shader_code)
struct GPUShader * downsample_sh
#define str(s)
void KERNEL_FUNCTION_FULL_NAME() shader(KernelGlobals *kg, uint4 *input, float4 *output, int type, int filter, int i, int offset, int sample)
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
const char * lib
Definition: GPU_shader.h:398
const char * def
Definition: GPU_shader.h:399
eGPUShaderConfig sh_cfg
#define NORMAL_ENCODING_ENABLED()
eWORKBENCH_VolumeInterpType
@ WORKBENCH_VOLUME_INTERP_CUBIC
@ WORKBENCH_VOLUME_INTERP_LINEAR
@ WORKBENCH_VOLUME_INTERP_CLOSEST
eWORKBENCH_DataType
@ WORKBENCH_DATATYPE_MAX
@ WORKBENCH_DATATYPE_HAIR
@ WORKBENCH_DATATYPE_POINTCLOUD
struct GPUShader * dof_blur2_sh
#define MAX_COLOR
char datatoc_workbench_shader_interface_lib_glsl[]
char datatoc_gpu_shader_common_obinfos_lib_glsl[]
char datatoc_workbench_material_lib_glsl[]
GPUShader * workbench_shader_transparent_resolve_get(WORKBENCH_PrivateData *wpd)
char datatoc_common_pointcloud_lib_glsl[]
char datatoc_workbench_volume_frag_glsl[]
char datatoc_workbench_effect_smaa_frag_glsl[]
char datatoc_workbench_prepass_frag_glsl[]
@ VOLUME_SH_SLICE
@ VOLUME_SH_COBA
@ VOLUME_SH_CUBIC
void workbench_shader_library_ensure(void)
static char * workbench_build_defines(WORKBENCH_PrivateData *wpd, bool textured, bool tiled, bool cavity, bool curvature)
struct GPUShader * outline_sh
struct GPUShader * dof_downsample_sh
GPUShader * workbench_shader_shadow_fail_get(bool manifold, bool cap)
char datatoc_workbench_composite_frag_glsl[]
GPUShader * workbench_shader_merge_infront_get(WORKBENCH_PrivateData *UNUSED(wpd))
char datatoc_workbench_prepass_pointcloud_vert_glsl[]
char datatoc_gpu_shader_depth_only_frag_glsl[]
char datatoc_workbench_prepass_vert_glsl[]
GPUShader * workbench_shader_shadow_pass_get(bool manifold)
struct GPUShader * shadow_depth_fail_sh[2][2]
static GPUShader * workbench_shader_shadow_pass_get_ex(bool depth_pass, bool manifold, bool cap)
char datatoc_workbench_shadow_caps_geom_glsl[]
char datatoc_common_smaa_lib_glsl[]
GPUShader * workbench_shader_composite_get(WORKBENCH_PrivateData *wpd)
struct GPUShader * shadow_depth_pass_sh[2]
struct GPUShader * aa_accum_sh
GPUShader * workbench_shader_volume_get(bool slice, bool coba, eWORKBENCH_VolumeInterpType interp_type, bool smoke)
char datatoc_workbench_shadow_geom_glsl[]
void workbench_shader_free(void)
char datatoc_workbench_data_lib_glsl[]
char datatoc_workbench_shadow_vert_glsl[]
struct GPUShader * transp_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX][MAX_LIGHTING][MAX_COLOR]
char datatoc_workbench_effect_cavity_frag_glsl[]
GPUShader * workbench_shader_antialiasing_get(int stage)
struct GPUShader * merge_infront_sh
struct GPUShader * cavity_sh[2][2]
char datatoc_workbench_curvature_lib_glsl[]
struct GPUShader * dof_resolve_sh
#define MAX_LIGHTING
char datatoc_common_hair_lib_glsl[]
GPUShader * workbench_shader_antialiasing_accumulation_get(void)
char datatoc_common_view_lib_glsl[]
GPUShader * workbench_shader_opaque_image_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype, bool tiled)
char datatoc_workbench_merge_infront_frag_glsl[]
struct GPUShader * opaque_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX][MAX_COLOR]
char datatoc_workbench_cavity_lib_glsl[]
char datatoc_workbench_effect_smaa_vert_glsl[]
static int workbench_color_index(WORKBENCH_PrivateData *UNUSED(wpd), bool textured, bool tiled)
char datatoc_workbench_transparent_resolve_frag_glsl[]
struct DRWShaderLibrary * lib
struct GPUShader * oit_resolve_sh
struct GPUShader * volume_sh[2][2][3][2]
struct GPUShader * dof_prepare_sh
struct GPUShader * smaa_sh[3]
char datatoc_workbench_image_lib_glsl[]
char datatoc_common_math_lib_glsl[]
char datatoc_workbench_volume_vert_glsl[]
GPUShader * workbench_shader_cavity_get(bool cavity, bool curvature)
char datatoc_workbench_effect_dof_frag_glsl[]
char datatoc_workbench_prepass_hair_vert_glsl[]
char datatoc_workbench_effect_taa_frag_glsl[]
char datatoc_workbench_world_light_lib_glsl[]
char datatoc_workbench_shadow_debug_frag_glsl[]
static struct @258 e_data
struct GPUShader * dof_blur1_sh
GPUShader * workbench_shader_opaque_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype)
char datatoc_workbench_matcap_lib_glsl[]
char datatoc_workbench_transparent_accum_frag_glsl[]
char datatoc_workbench_effect_outline_frag_glsl[]
GPUShader * workbench_shader_transparent_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype)
struct GPUShader * opaque_composite_sh[MAX_LIGHTING]
char datatoc_common_math_geom_lib_glsl[]
GPUShader * workbench_shader_transparent_image_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype, bool tiled)
GPUShader * workbench_shader_outline_get(void)
void workbench_shader_depth_of_field_get(GPUShader **prepare_sh, GPUShader **downsample_sh, GPUShader **blur1_sh, GPUShader **blur2_sh, GPUShader **resolve_sh)
char datatoc_workbench_common_lib_glsl[]
static GPUShader * workbench_shader_get_ex(WORKBENCH_PrivateData *wpd, bool transp, eWORKBENCH_DataType datatype, bool textured, bool tiled)