Blender V4.5
gpu_vertex_format.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 by Mike Erwin. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "GPU_vertex_format.hh"
12#include "BLI_assert.h"
13#include "BLI_math_base.h"
14#include "GPU_capabilities.hh"
15
17#include "gpu_shader_private.hh"
19
20#include <cstddef>
21#include <cstring>
22
23#include "BLI_hash_mm2a.hh"
24#include "BLI_string.h"
25#include "BLI_utildefines.h"
26
27#define PACK_DEBUG 0
28
29#if PACK_DEBUG
30# include <stdio.h>
31#endif
32
33namespace blender::gpu {
34
35/* Used to combine legacy enums into new vertex attribute type. */
37 GPUVertFetchMode fetch_mode,
38 uint32_t component_len)
39{
40 switch (component_type) {
41 case GPU_COMP_I8: {
42 switch (fetch_mode) {
44 switch (component_len) {
45 case 1:
46 return VertAttrType::SNORM_8_DEPRECATED;
47 case 2:
48 return VertAttrType::SNORM_8_8_DEPRECATED;
49 case 3:
50 return VertAttrType::SNORM_8_8_8_DEPRECATED;
51 case 4:
52 return VertAttrType::SNORM_8_8_8_8;
53 }
54 break;
55 case GPU_FETCH_INT:
56 switch (component_len) {
57 case 1:
58 return VertAttrType::SINT_8_DEPRECATED;
59 case 2:
60 return VertAttrType::SINT_8_8_DEPRECATED;
61 case 3:
62 return VertAttrType::SINT_8_8_8_DEPRECATED;
63 case 4:
64 return VertAttrType::SINT_8_8_8_8;
65 }
66 break;
67 default:
68 break;
69 }
70 break;
71 }
72 case GPU_COMP_U8: {
73 switch (fetch_mode) {
75 switch (component_len) {
76 case 1:
77 return VertAttrType::UNORM_8_DEPRECATED;
78 case 2:
79 return VertAttrType::UNORM_8_8_DEPRECATED;
80 case 3:
81 return VertAttrType::UNORM_8_8_8_DEPRECATED;
82 case 4:
83 return VertAttrType::UNORM_8_8_8_8;
84 }
85 break;
86 case GPU_FETCH_INT:
87 switch (component_len) {
88 case 1:
89 return VertAttrType::UINT_8_DEPRECATED;
90 case 2:
91 return VertAttrType::UINT_8_8_DEPRECATED;
92 case 3:
93 return VertAttrType::UINT_8_8_8_DEPRECATED;
94 case 4:
95 return VertAttrType::UINT_8_8_8_8;
96 }
97 break;
98 default:
99 break;
100 }
101 break;
102 }
103 case GPU_COMP_I16: {
104 switch (fetch_mode) {
106 switch (component_len) {
107 case 1:
108 return VertAttrType::SNORM_16_DEPRECATED;
109 case 2:
110 return VertAttrType::SNORM_16_16;
111 case 3:
112 return VertAttrType::SNORM_16_16_16_DEPRECATED;
113 case 4:
114 return VertAttrType::SNORM_16_16_16_16;
115 }
116 break;
117 case GPU_FETCH_INT:
118 switch (component_len) {
119 case 1:
120 return VertAttrType::SINT_16_DEPRECATED;
121 case 2:
122 return VertAttrType::SINT_16_16;
123 case 3:
124 return VertAttrType::SINT_16_16_16_DEPRECATED;
125 case 4:
126 return VertAttrType::SINT_16_16_16_16;
127 }
128 break;
129 default:
130 break;
131 }
132 break;
133 }
134 case GPU_COMP_U16: {
135 switch (fetch_mode) {
137 switch (component_len) {
138 case 1:
139 return VertAttrType::UNORM_16_DEPRECATED;
140 case 2:
141 return VertAttrType::UNORM_16_16;
142 case 3:
143 return VertAttrType::UNORM_16_16_16_DEPRECATED;
144 case 4:
145 return VertAttrType::UNORM_16_16_16_16;
146 }
147 break;
148 case GPU_FETCH_INT:
149 switch (component_len) {
150 case 1:
151 return VertAttrType::UINT_16_DEPRECATED;
152 case 2:
153 return VertAttrType::UINT_16_16;
154 case 3:
155 return VertAttrType::UINT_16_16_16_DEPRECATED;
156 case 4:
157 return VertAttrType::UINT_16_16_16_16;
158 }
159 break;
160 default:
161 break;
162 }
163 break;
164 }
165 case GPU_COMP_I32: {
166 switch (fetch_mode) {
167 case GPU_FETCH_INT:
168 switch (component_len) {
169 case 1:
170 return VertAttrType::SINT_32;
171 case 2:
172 return VertAttrType::SINT_32_32;
173 case 3:
174 return VertAttrType::SINT_32_32_32;
175 case 4:
176 return VertAttrType::SINT_32_32_32_32;
177 }
178 break;
179 default:
180 break;
181 }
182 break;
183 }
184 case GPU_COMP_U32: {
185 switch (fetch_mode) {
186 case GPU_FETCH_INT:
187 switch (component_len) {
188 case 1:
189 return VertAttrType::UINT_32;
190 case 2:
191 return VertAttrType::UINT_32_32;
192 case 3:
193 return VertAttrType::UINT_32_32_32;
194 case 4:
195 return VertAttrType::UINT_32_32_32_32;
196 }
197 break;
198 default:
199 break;
200 }
201 break;
202 }
203 case GPU_COMP_F32: {
204 switch (fetch_mode) {
205 case GPU_FETCH_FLOAT:
206 switch (component_len) {
207 case 1:
208 return VertAttrType::SFLOAT_32;
209 case 2:
210 return VertAttrType::SFLOAT_32_32;
211 case 3:
212 return VertAttrType::SFLOAT_32_32_32;
213 case 4:
214 return VertAttrType::SFLOAT_32_32_32_32;
215 }
216 break;
217 default:
218 break;
219 }
220 break;
221 }
222 case GPU_COMP_I10: {
223 switch (fetch_mode) {
225 return VertAttrType::SNORM_10_10_10_2;
226 default:
227 break;
228 }
229 break;
230 }
231 case GPU_COMP_MAX:
232 break;
233 }
234
236};
237
239{
240 switch (attr_type) {
241 case VertAttrType::SNORM_8_8_8_8:
242 case VertAttrType::SNORM_16_16:
243 case VertAttrType::SNORM_16_16_16_16:
244 case VertAttrType::UNORM_8_8_8_8:
245 case VertAttrType::UNORM_16_16:
246 case VertAttrType::UNORM_16_16_16_16:
247 case VertAttrType::SNORM_10_10_10_2:
248 case VertAttrType::UNORM_10_10_10_2:
249 return true;
250 default:
251 return false;
252 }
253};
254
256{
257 switch (attr_type) {
258 case VertAttrType::SINT_TO_FLT_32:
259 case VertAttrType::SINT_TO_FLT_32_32:
260 case VertAttrType::SINT_TO_FLT_32_32_32:
261 case VertAttrType::SINT_TO_FLT_32_32_32_32:
262 return true;
263 default:
264 return false;
265 }
266};
267
269{
270 switch (attr_type) {
271 case VertAttrType::SFLOAT_32:
272 case VertAttrType::SFLOAT_32_32:
273 case VertAttrType::SFLOAT_32_32_32:
274 case VertAttrType::SFLOAT_32_32_32_32:
275 return true;
276 default:
277 return false;
278 }
279};
280
281} // namespace blender::gpu
282
284using namespace blender::gpu;
285using namespace blender::gpu::shader;
286
288{
289#ifndef NDEBUG
290 memset(format, 0, sizeof(GPUVertFormat));
291#else
292 format->attr_len = 0;
293 format->packed = false;
294 format->name_offset = 0;
295 format->name_len = 0;
296 format->deinterleaved = false;
297
298 for (uint i = 0; i < GPU_VERT_ATTR_MAX_LEN; i++) {
299 format->attrs[i].name_len = 0;
300 }
301#endif
302}
303
305{
306 /* copy regular struct fields */
307 memcpy(dest, &src, sizeof(GPUVertFormat));
308}
309
311{
312 BLI_assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */
313 const uint sizes[] = {1, 1, 2, 2, 4, 4, 4};
314 return sizes[type];
315}
316
317static uint attr_size(const GPUVertAttr *a)
318{
319 if (a->comp_type == GPU_COMP_I10) {
320 return 4; /* always packed as 10_10_10_2 */
321 }
322 return a->comp_len * comp_size(static_cast<GPUVertCompType>(a->comp_type));
323}
324
325static uint attr_align(const GPUVertAttr *a, uint minimum_stride)
326{
327 if (a->comp_type == GPU_COMP_I10) {
328 return 4; /* always packed as 10_10_10_2 */
329 }
330 uint c = comp_size(static_cast<GPUVertCompType>(a->comp_type));
331 if (a->comp_len == 3 && c <= 2) {
332 return 4 * c; /* AMD HW can't fetch these well, so pad it out (other vendors too?) */
333 }
334
335 /* Most fetches are ok if components are naturally aligned.
336 * However, in Metal,the minimum supported per-vertex stride is 4,
337 * so we must query the GPU and pad out the size accordingly. */
338 return max_ii(minimum_stride, c);
339}
340
342{
343 BLI_assert(format->packed && format->stride > 0);
344 return format->stride * vertex_len;
345}
346
348{
349 const uchar name_offset = format->name_offset;
350 /* Subtract one to make sure there's enough space for the last null terminator. */
351 const int64_t available = GPU_VERT_ATTR_NAMES_BUF_LEN - name_offset - 1;
352 const int64_t chars_to_copy = std::min(name.size(), available);
353
354 name.substr(0, available).copy_unsafe(format->names + name_offset);
355 format->name_offset += chars_to_copy + 1;
356
358 return name_offset;
359}
360
362 const StringRef name,
363 GPUVertCompType comp_type,
364 uint comp_len,
365 GPUVertFetchMode fetch_mode)
366{
367 BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
368 BLI_assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
369 BLI_assert(!format->packed); /* packed means frozen/locked */
370 BLI_assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 ||
371 comp_len == 16);
372
373 switch (comp_type) {
374 case GPU_COMP_F32:
375 /* float type can only kept as float */
376 BLI_assert(fetch_mode == GPU_FETCH_FLOAT);
377 break;
378 case GPU_COMP_I10:
379 /* 10_10_10 format intended for normals (XYZ) or colors (RGB)
380 * extra component packed.w can be manually set to { -2, -1, 0, 1 } */
381 BLI_assert(ELEM(comp_len, 3, 4));
382
383 /* Not strictly required, may relax later. */
385
386 break;
387 default:
388 /* integer types can be kept as int or converted/normalized to float */
389 BLI_assert(fetch_mode != GPU_FETCH_FLOAT);
390 /* only support float matrices (see Batch_update_program_bindings) */
391 BLI_assert(!ELEM(comp_len, 8, 12, 16));
392 }
393
394 format->name_len++; /* Multi-name support. */
395
396 const uint attr_id = format->attr_len++;
397 GPUVertAttr *attr = &format->attrs[attr_id];
398
399 attr->names[attr->name_len++] = copy_attr_name(format, name);
400 attr->comp_type = comp_type;
401 attr->comp_len = (comp_type == GPU_COMP_I10) ?
402 4 :
403 comp_len; /* system needs 10_10_10_2 to be 4 or BGRA */
404 attr->size = attr_size(attr);
405 attr->offset = 0; /* offsets & stride are calculated later (during pack) */
406 attr->fetch_mode = fetch_mode;
407 attr->format = vertex_format_combine(comp_type, fetch_mode, comp_len);
409
410 return attr_id;
411}
412
414{
415 GPUVertAttr *attr = &format->attrs[format->attr_len - 1];
416 BLI_assert(format->name_len < GPU_VERT_FORMAT_MAX_NAMES); /* there's room for more */
418 format->name_len++; /* Multi-name support. */
419 attr->names[attr->name_len++] = copy_attr_name(format, alias);
420}
421
423 const GPUVertCompType comp_type,
424 const uint comp_len,
425 const GPUVertFetchMode fetch_mode)
426{
428 GPU_vertformat_attr_add(&format, name, comp_type, comp_len, fetch_mode);
429 return format;
430}
431
433{
434 /* Sanity check. Maximum can be upgraded if needed. */
435 BLI_assert(load_count > 1 && load_count < 5);
436 /* We need a packed format because of format->stride. */
437 if (!format->packed) {
439 }
440
441 BLI_assert((format->name_len + 1) * load_count < GPU_VERT_FORMAT_MAX_NAMES);
442 BLI_assert(format->attr_len * load_count <= GPU_VERT_ATTR_MAX_LEN);
443 BLI_assert(format->name_offset * load_count < GPU_VERT_ATTR_NAMES_BUF_LEN);
444
445 const GPUVertAttr *attr = format->attrs;
446 int attr_len = format->attr_len;
447 for (int i = 0; i < attr_len; i++, attr++) {
448 const char *attr_name = GPU_vertformat_attr_name_get(format, attr, 0);
449 for (int j = 1; j < load_count; j++) {
450 char load_name[68 /* MAX_CUSTOMDATA_LAYER_NAME */];
451 SNPRINTF(load_name, "%s%d", attr_name, j);
452 GPUVertAttr *dst_attr = &format->attrs[format->attr_len++];
453 *dst_attr = *attr;
454
455 dst_attr->names[0] = copy_attr_name(format, load_name);
456 dst_attr->name_len = 1;
457 dst_attr->offset += format->stride * j;
458 }
459 }
460}
461
463{
464 for (int i = 0; i < format->attr_len; i++) {
465 const GPUVertAttr *attr = &format->attrs[i];
466 for (int j = 0; j < attr->name_len; j++) {
467 const char *attr_name = GPU_vertformat_attr_name_get(format, attr, j);
468 if (name == attr_name) {
469 return i;
470 }
471 }
472 }
473 return -1;
474}
475
477{
478 BLI_assert(attr_id > -1 && attr_id < format->attr_len);
479 GPUVertAttr *attr = &format->attrs[attr_id];
480 char *attr_name = (char *)GPU_vertformat_attr_name_get(format, attr, 0);
481 BLI_assert(strlen(attr_name) == strlen(new_name));
482 int i = 0;
483 while (attr_name[i] != '\0') {
484 attr_name[i] = new_name[i];
485 i++;
486 }
487 attr->name_len = 1;
488}
489
490/* Encode 8 original bytes into 11 safe bytes. */
491static void safe_bytes(char out[11], const char data[8])
492{
493 const char safe_chars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
494
495 uint64_t in = *(uint64_t *)data;
496 for (int i = 0; i < 11; i++) {
497 out[i] = safe_chars[in % 62lu];
498 in /= 62lu;
499 }
500}
501
502void GPU_vertformat_safe_attr_name(const StringRef attr_name, char *r_safe_name, uint /*max_len*/)
503{
504 char data[8] = {0};
505 uint len = attr_name.size();
506
507 if (len > 8) {
508 /* Start with the first 4 chars of the name. */
509 memcpy(data, attr_name.data(), 4);
510 /* We use a hash to identify each data layer based on its name.
511 * NOTE: This is still prone to hash collision but the risks are very low. */
512 /* Start hashing after the first 2 chars. */
513 const StringRef to_hash = attr_name.drop_prefix(4);
514 *(uint *)&data[4] = BLI_hash_mm2(
515 reinterpret_cast<const uchar *>(to_hash.data()), to_hash.size(), 0);
516 }
517 else {
518 /* Copy the whole name. Collision is barely possible
519 * (hash would have to be equal to the last 4 bytes). */
520 memcpy(data, attr_name.data(), std::min<int>(8, len));
521 }
522 /* Convert to safe bytes characters. */
523 safe_bytes(r_safe_name, data);
524 /* End the string */
525 r_safe_name[11] = '\0';
526
528#if 0 /* For debugging */
529 printf("%s > %lx > %s\n", attr_name, *(uint64_t *)data, r_safe_name);
530#endif
531}
532
534{
535 /* Ideally we should change the stride and offset here. This would allow
536 * us to use GPU_vertbuf_attr_set / GPU_vertbuf_attr_fill. But since
537 * we use only 11 bits for attr->offset this limits the size of the
538 * buffer considerably. So instead we do the conversion when creating
539 * bindings in create_bindings(). */
540 format->deinterleaved = true;
541}
542
543uint padding(uint offset, uint alignment)
544{
545 const uint mod = offset % alignment;
546 return (mod == 0) ? 0 : (alignment - mod);
547}
548
549#if PACK_DEBUG
550static void show_pack(uint a_idx, uint size, uint pad)
551{
552 const char c = 'A' + a_idx;
553 for (uint i = 0; i < pad; i++) {
554 putchar('-');
555 }
556 for (uint i = 0; i < size; i++) {
557 putchar(c);
558 }
559}
560#endif
561
562static void VertexFormat_pack_impl(GPUVertFormat *format, uint minimum_stride)
563{
564 GPUVertAttr *a0 = &format->attrs[0];
565 a0->offset = 0;
566 uint offset = a0->size;
567
568#if PACK_DEBUG
569 show_pack(0, a0->size, 0);
570#endif
571
572 for (uint a_idx = 1; a_idx < format->attr_len; a_idx++) {
573 GPUVertAttr *a = &format->attrs[a_idx];
574 uint mid_padding = padding(offset, attr_align(a, minimum_stride));
575 offset += mid_padding;
576 a->offset = offset;
577 offset += a->size;
578
579#if PACK_DEBUG
580 show_pack(a_idx, a->size, mid_padding);
581#endif
582 }
583
584 uint end_padding = padding(offset, attr_align(a0, minimum_stride));
585
586#if PACK_DEBUG
587 show_pack(0, 0, end_padding);
588 putchar('\n');
589#endif
590 format->stride = offset + end_padding;
591 format->packed = true;
592}
593
595{
596 /* Perform standard vertex packing, ensuring vertex format satisfies
597 * minimum stride requirements for vertex assembly. */
599}
600
602{
603 /* Validates packing for vertex formats used with texture buffers.
604 * In these cases, there must only be a single vertex attribute.
605 * This attribute should be tightly packed without padding, to ensure
606 * it aligns with the backing texture data format, skipping
607 * minimum per-vertex stride, which mandates 4-byte alignment in Metal.
608 * This additional alignment padding caused smaller data types, e.g. U16,
609 * to mis-align. */
610 for (int i = 0; i < format->attr_len; i++) {
611 /* The buffer texture setup uses the first attribute for type and size.
612 * Make sure all attributes use the same size. */
613 BLI_assert_msg(format->attrs[i].size == format->attrs[0].size,
614 "Texture buffer mode should only use a attributes with the same size.");
615 }
616
617 /* Pack vertex format without minimum stride, as this is not required by texture buffers. */
619}
620
621static uint component_size_get(const Type gpu_type)
622{
623 switch (gpu_type) {
624 case Type::float2_t:
625 case Type::int2_t:
626 case Type::uint2_t:
627 return 2;
628 case Type::float3_t:
629 case Type::int3_t:
630 case Type::uint3_t:
631 return 3;
632 case Type::float4_t:
633 case Type::int4_t:
634 case Type::uint4_t:
635 return 4;
636 case Type::float3x3_t:
637 return 12;
638 case Type::float4x4_t:
639 return 16;
640 default:
641 return 1;
642 }
643}
644
646 GPUVertCompType *r_comp_type,
647 GPUVertFetchMode *r_fetch_mode)
648{
649 switch (gpu_type) {
650 case Type::float_t:
651 case Type::float2_t:
652 case Type::float3_t:
653 case Type::float4_t:
654 case Type::float3x3_t:
655 case Type::float4x4_t:
656 *r_comp_type = GPU_COMP_F32;
657 *r_fetch_mode = GPU_FETCH_FLOAT;
658 break;
659 case Type::int_t:
660 case Type::int2_t:
661 case Type::int3_t:
662 case Type::int4_t:
663 *r_comp_type = GPU_COMP_I32;
664 *r_fetch_mode = GPU_FETCH_INT;
665 break;
666 case Type::uint_t:
667 case Type::uint2_t:
668 case Type::uint3_t:
669 case Type::uint4_t:
670 *r_comp_type = GPU_COMP_U32;
671 *r_fetch_mode = GPU_FETCH_INT;
672 break;
673 default:
674 BLI_assert(0);
675 }
676}
677
679{
681
683 int location_test = 0, attrs_added = 0;
684 while (attrs_added < attr_len) {
685 char name[256];
686 Type gpu_type;
687 if (!GPU_shader_get_attribute_info(shader, location_test++, name, (int *)&gpu_type)) {
688 continue;
689 }
690
691 GPUVertCompType comp_type;
692 GPUVertFetchMode fetch_mode;
693 recommended_fetch_mode_and_comp_type(gpu_type, &comp_type, &fetch_mode);
694
695 int comp_len = component_size_get(gpu_type);
696
697 GPU_vertformat_attr_add(format, name, comp_type, comp_len, fetch_mode);
698 attrs_added++;
699 }
700}
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
uint32_t BLI_hash_mm2(const unsigned char *data, size_t len, uint32_t seed)
Definition hash_mm2a.cc:100
MINLINE int max_ii(int a, int b)
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:599
unsigned char uchar
unsigned int uint
#define ELEM(...)
int GPU_minimum_per_vertex_stride()
bool GPU_shader_get_attribute_info(const GPUShader *shader, int attr_location, char r_name[256], int *r_type)
uint GPU_shader_get_attribute_len(const GPUShader *shader)
static constexpr int GPU_VERT_ATTR_MAX_LEN
BLI_INLINE const char * GPU_vertformat_attr_name_get(const GPUVertFormat *format, const GPUVertAttr *attr, uint n_idx)
static constexpr int GPU_VERT_ATTR_MAX_NAMES
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
static constexpr int GPU_VERT_FORMAT_MAX_NAMES
static constexpr int GPU_VERT_ATTR_NAMES_BUF_LEN
static constexpr int GPU_MAX_SAFE_ATTR_NAME
GPUVertCompType
@ GPU_COMP_U16
@ GPU_COMP_MAX
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_I8
@ GPU_COMP_U32
@ GPU_COMP_I16
@ GPU_COMP_U8
int pad[32 - sizeof(int)]
BMesh const char void * data
long long int int64_t
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
void copy_unsafe(char *dst) const
constexpr StringRef substr(int64_t start, int64_t size) const
constexpr int64_t size() const
constexpr const char * data() const
constexpr StringRef drop_prefix(int64_t n) const
struct @064345207361167251075330302113175271221317160336::@201157344026354305110036153026103256267276205234 attr_id
#define in
VecBase< float, D > constexpr mod(VecOp< float, D >, VecOp< float, D >) RET
#define out
#define printf(...)
static void safe_bytes(char out[11], const char data[8])
static void recommended_fetch_mode_and_comp_type(Type gpu_type, GPUVertCompType *r_comp_type, GPUVertFetchMode *r_fetch_mode)
static uchar copy_attr_name(GPUVertFormat *format, const StringRef name)
void GPU_vertformat_alias_add(GPUVertFormat *format, const StringRef alias)
static uint attr_align(const GPUVertAttr *a, uint minimum_stride)
static uint component_size_get(const Type gpu_type)
GPUVertFormat GPU_vertformat_from_attribute(const StringRef name, const GPUVertCompType comp_type, const uint comp_len, const GPUVertFetchMode fetch_mode)
uint vertex_buffer_size(const GPUVertFormat *format, uint vertex_len)
void GPU_vertformat_attr_rename(GPUVertFormat *format, int attr_id, const char *new_name)
uint padding(uint offset, uint alignment)
uint GPU_vertformat_attr_add(GPUVertFormat *format, const StringRef name, GPUVertCompType comp_type, uint comp_len, GPUVertFetchMode fetch_mode)
void GPU_vertformat_multiload_enable(GPUVertFormat *format, int load_count)
void GPU_vertformat_safe_attr_name(const StringRef attr_name, char *r_safe_name, uint)
void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat &src)
void GPU_vertformat_clear(GPUVertFormat *format)
static uint attr_size(const GPUVertAttr *a)
static void VertexFormat_pack_impl(GPUVertFormat *format, uint minimum_stride)
void VertexFormat_texture_buffer_pack(GPUVertFormat *format)
void GPU_vertformat_deinterleave(GPUVertFormat *format)
void VertexFormat_pack(GPUVertFormat *format)
int GPU_vertformat_attr_id_get(const GPUVertFormat *format, const StringRef name)
static uint comp_size(GPUVertCompType type)
void GPU_vertformat_from_shader(GPUVertFormat *format, const GPUShader *shader)
format
static VertAttrType vertex_format_combine(GPUVertCompType component_type, GPUVertFetchMode fetch_mode, uint32_t component_len)
bool is_fetch_int_to_float(VertAttrType attr_type)
bool is_fetch_normalized(VertAttrType attr_type)
bool is_fetch_float(VertAttrType attr_type)
uchar names[GPU_VERT_ATTR_MAX_NAMES]
blender::gpu::VertAttrType format
i
Definition text_draw.cc:230
uint len