Blender  V2.93
gpu_py_vertex_buffer.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
24 #include <Python.h>
25 
26 #include "GPU_vertex_buffer.h"
27 
28 #include "BLI_math.h"
29 
30 #include "MEM_guardedalloc.h"
31 
32 #include "../generic/py_capi_utils.h"
33 #include "../generic/python_utildefines.h"
34 
35 #include "gpu_py_vertex_buffer.h" /* own include */
36 #include "gpu_py_vertex_format.h"
37 
38 /* -------------------------------------------------------------------- */
42 #define PYGPU_AS_NATIVE_SWITCH(attr) \
43  switch (attr->comp_type) { \
44  case GPU_COMP_I8: { \
45  PY_AS_NATIVE(int8_t, PyC_Long_AsI8); \
46  break; \
47  } \
48  case GPU_COMP_U8: { \
49  PY_AS_NATIVE(uint8_t, PyC_Long_AsU8); \
50  break; \
51  } \
52  case GPU_COMP_I16: { \
53  PY_AS_NATIVE(int16_t, PyC_Long_AsI16); \
54  break; \
55  } \
56  case GPU_COMP_U16: { \
57  PY_AS_NATIVE(uint16_t, PyC_Long_AsU16); \
58  break; \
59  } \
60  case GPU_COMP_I32: { \
61  PY_AS_NATIVE(int32_t, PyC_Long_AsI32); \
62  break; \
63  } \
64  case GPU_COMP_U32: { \
65  PY_AS_NATIVE(uint32_t, PyC_Long_AsU32); \
66  break; \
67  } \
68  case GPU_COMP_F32: { \
69  PY_AS_NATIVE(float, PyFloat_AsDouble); \
70  break; \
71  } \
72  default: \
73  BLI_assert_unreachable(); \
74  break; \
75  } \
76  ((void)0)
77 
78 /* No error checking, callers must run PyErr_Occurred */
79 static void pygpu_fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
80 {
81 #define PY_AS_NATIVE(ty_dst, py_as_native) \
82  { \
83  ty_dst *data_dst = data_dst_void; \
84  *data_dst = py_as_native(py_src); \
85  } \
86  ((void)0)
87 
89 
90 #undef PY_AS_NATIVE
91 }
92 
93 /* No error checking, callers must run PyErr_Occurred */
94 static void pygpu_fill_format_sequence(void *data_dst_void,
95  PyObject *py_seq_fast,
96  const GPUVertAttr *attr)
97 {
98  const uint len = attr->comp_len;
99  PyObject **value_fast_items = PySequence_Fast_ITEMS(py_seq_fast);
100 
104 #define PY_AS_NATIVE(ty_dst, py_as_native) \
105  ty_dst *data_dst = data_dst_void; \
106  for (uint i = 0; i < len; i++) { \
107  data_dst[i] = py_as_native(value_fast_items[i]); \
108  } \
109  ((void)0)
110 
112 
113 #undef PY_AS_NATIVE
114 }
115 
116 #undef PYGPU_AS_NATIVE_SWITCH
117 #undef WARN_TYPE_LIMIT_PUSH
118 #undef WARN_TYPE_LIMIT_POP
119 
121  uint data_id,
122  PyObject *seq,
123  const char *error_prefix)
124 {
125  const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
126 
127  bool ok = true;
128  const GPUVertAttr *attr = &GPU_vertbuf_get_format(vbo)->attrs[data_id];
129  uint vert_len = GPU_vertbuf_get_vertex_len(vbo);
130 
131  if (PyObject_CheckBuffer(seq)) {
132  Py_buffer pybuffer;
133 
134  if (PyObject_GetBuffer(seq, &pybuffer, PyBUF_STRIDES | PyBUF_ND) == -1) {
135  /* PyObject_GetBuffer raise a PyExc_BufferError */
136  return false;
137  }
138 
139  const uint comp_len = pybuffer.ndim == 1 ? 1 : (uint)pybuffer.shape[1];
140 
141  if (pybuffer.shape[0] != vert_len) {
142  PyErr_Format(
143  PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, pybuffer.shape[0]);
144  ok = false;
145  }
146  else if (comp_len != attr->comp_len) {
147  PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "component", attr->comp_len, comp_len);
148  ok = false;
149  }
150  else {
151  GPU_vertbuf_attr_fill_stride(vbo, data_id, pybuffer.strides[0], pybuffer.buf);
152  }
153 
154  PyBuffer_Release(&pybuffer);
155  }
156  else {
157  GPUVertBufRaw data_step;
158  GPU_vertbuf_attr_get_raw_data(vbo, data_id, &data_step);
159 
160  PyObject *seq_fast = PySequence_Fast(seq, "Vertex buffer fill");
161  if (seq_fast == NULL) {
162  return false;
163  }
164 
165  const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
166 
167  if (seq_len != vert_len) {
168  PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, seq_len);
169  }
170 
171  PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
172 
173  if (attr->comp_len == 1) {
174  for (uint i = 0; i < seq_len; i++) {
175  uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
176  PyObject *item = seq_items[i];
177  pygpu_fill_format_elem(data, item, attr);
178  }
179  }
180  else {
181  for (uint i = 0; i < seq_len; i++) {
182  uchar *data = (uchar *)GPU_vertbuf_raw_step(&data_step);
183  PyObject *seq_fast_item = PySequence_Fast(seq_items[i], error_prefix);
184 
185  if (seq_fast_item == NULL) {
186  ok = false;
187  goto finally;
188  }
189  if (PySequence_Fast_GET_SIZE(seq_fast_item) != attr->comp_len) {
190  PyErr_Format(PyExc_ValueError,
191  exc_str_size_mismatch,
192  "sequence",
193  attr->comp_len,
194  PySequence_Fast_GET_SIZE(seq_fast_item));
195  ok = false;
196  Py_DECREF(seq_fast_item);
197  goto finally;
198  }
199 
200  /* May trigger error, check below */
201  pygpu_fill_format_sequence(data, seq_fast_item, attr);
202  Py_DECREF(seq_fast_item);
203  }
204  }
205 
206  if (PyErr_Occurred()) {
207  ok = false;
208  }
209 
210  finally:
211 
212  Py_DECREF(seq_fast);
213  }
214  return ok;
215 }
216 
218  int id,
219  PyObject *py_seq_data,
220  const char *error_prefix)
221 {
222  if (id < 0 || id >= GPU_vertbuf_get_format(buf)->attr_len) {
223  PyErr_Format(PyExc_ValueError, "Format id %d out of range", id);
224  return 0;
225  }
226 
227  if (GPU_vertbuf_get_data(buf) == NULL) {
228  PyErr_SetString(PyExc_ValueError, "Can't fill, static buffer already in use");
229  return 0;
230  }
231 
232  if (!pygpu_vertbuf_fill_impl(buf, (uint)id, py_seq_data, error_prefix)) {
233  return 0;
234  }
235 
236  return 1;
237 }
238 
241 /* -------------------------------------------------------------------- */
245 static PyObject *pygpu_vertbuf__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
246 {
247  struct {
248  PyObject *py_fmt;
249  uint len;
250  } params;
251 
252  static const char *_keywords[] = {"format", "len", NULL};
253  static _PyArg_Parser _parser = {"O!I:GPUVertBuf.__new__", _keywords, 0};
254  if (!_PyArg_ParseTupleAndKeywordsFast(
255  args, kwds, &_parser, &BPyGPUVertFormat_Type, &params.py_fmt, &params.len)) {
256  return NULL;
257  }
258 
259  const GPUVertFormat *fmt = &((BPyGPUVertFormat *)params.py_fmt)->fmt;
261 
262  GPU_vertbuf_data_alloc(vbo, params.len);
263 
264  return BPyGPUVertBuf_CreatePyObject(vbo);
265 }
266 
267 PyDoc_STRVAR(pygpu_vertbuf_attr_fill_doc,
268  ".. method:: attr_fill(id, data)\n"
269  "\n"
270  " Insert data into the buffer for a single attribute.\n"
271  "\n"
272  " :param id: Either the name or the id of the attribute.\n"
273  " :type id: int or str\n"
274  " :param data: Sequence of data that should be stored in the buffer\n"
275  " :type data: sequence of floats, ints, vectors or matrices\n");
276 static PyObject *pygpu_vertbuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
277 {
278  PyObject *data;
279  PyObject *identifier;
280 
281  static const char *_keywords[] = {"id", "data", NULL};
282  static _PyArg_Parser _parser = {"OO:attr_fill", _keywords, 0};
283  if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &identifier, &data)) {
284  return NULL;
285  }
286 
287  int id;
288 
289  if (PyLong_Check(identifier)) {
290  id = PyLong_AsLong(identifier);
291  }
292  else if (PyUnicode_Check(identifier)) {
294  const char *name = PyUnicode_AsUTF8(identifier);
296  if (id == -1) {
297  PyErr_SetString(PyExc_ValueError, "Unknown attribute name");
298  return NULL;
299  }
300  }
301  else {
302  PyErr_SetString(PyExc_TypeError, "expected int or str type as identifier");
303  return NULL;
304  }
305 
306  if (!pygpu_vertbuf_fill(self->buf, id, data, "GPUVertBuf.attr_fill")) {
307  return NULL;
308  }
309 
310  Py_RETURN_NONE;
311 }
312 
313 static struct PyMethodDef pygpu_vertbuf__tp_methods[] = {
314  {"attr_fill",
315  (PyCFunction)pygpu_vertbuf_attr_fill,
316  METH_VARARGS | METH_KEYWORDS,
317  pygpu_vertbuf_attr_fill_doc},
318  {NULL, NULL, 0, NULL},
319 };
320 
322 {
324  Py_TYPE(self)->tp_free(self);
325 }
326 
327 PyDoc_STRVAR(pygpu_vertbuf__tp_doc,
328  ".. class:: GPUVertBuf(len, format)\n"
329  "\n"
330  " Contains a VBO.\n"
331  "\n"
332  " :param len: Amount of vertices that will fit into this buffer.\n"
333  " :type type: `int`\n"
334  " :param format: Vertex format.\n"
335  " :type buf: :class:`gpu.types.GPUVertFormat`\n");
336 PyTypeObject BPyGPUVertBuf_Type = {
337  PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUVertBuf",
338  .tp_basicsize = sizeof(BPyGPUVertBuf),
339  .tp_dealloc = (destructor)pygpu_vertbuf__tp_dealloc,
340  .tp_flags = Py_TPFLAGS_DEFAULT,
341  .tp_doc = pygpu_vertbuf__tp_doc,
342  .tp_methods = pygpu_vertbuf__tp_methods,
343  .tp_new = pygpu_vertbuf__tp_new,
344 };
345 
348 /* -------------------------------------------------------------------- */
353 {
354  BPyGPUVertBuf *self;
355 
356  self = PyObject_New(BPyGPUVertBuf, &BPyGPUVertBuf_Type);
357  self->buf = buf;
358 
359  return (PyObject *)self;
360 }
361 
unsigned char uchar
Definition: BLI_sys_types.h:86
unsigned int uint
Definition: BLI_sys_types.h:83
#define UNUSED(x)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
const GPUVertFormat * GPU_vertbuf_get_format(const GPUVertBuf *verts)
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_discard(GPUVertBuf *)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *, uint a_idx, uint stride, const void *data)
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
GPU_INLINE void * GPU_vertbuf_raw_step(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access)
int GPU_vertformat_attr_id_get(const GPUVertFormat *, const char *name)
Read Guarded memory(de)allocation.
PyObject * self
Definition: bpy_driver.c:185
PyTypeObject BPyGPUVertBuf_Type
static PyObject * pygpu_vertbuf__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static int pygpu_vertbuf_fill(GPUVertBuf *buf, int id, PyObject *py_seq_data, const char *error_prefix)
static struct PyMethodDef pygpu_vertbuf__tp_methods[]
PyObject * BPyGPUVertBuf_CreatePyObject(GPUVertBuf *buf)
#define PYGPU_AS_NATIVE_SWITCH(attr)
static void pygpu_fill_format_sequence(void *data_dst_void, PyObject *py_seq_fast, const GPUVertAttr *attr)
static PyObject * pygpu_vertbuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, PyObject *kwds)
static void pygpu_fill_format_elem(void *data_dst_void, PyObject *py_src, const GPUVertAttr *attr)
PyDoc_STRVAR(pygpu_vertbuf_attr_fill_doc, ".. method:: attr_fill(id, data)\n" "\n" " Insert data into the buffer for a single attribute.\n" "\n" " :param id: Either the name or the id of the attribute.\n" " :type id: int or str\n" " :param data: Sequence of data that should be stored in the buffer\n" " :type data: sequence of floats, ints, vectors or matrices\n")
static void pygpu_vertbuf__tp_dealloc(BPyGPUVertBuf *self)
static bool pygpu_vertbuf_fill_impl(GPUVertBuf *vbo, uint data_id, PyObject *seq, const char *error_prefix)
struct BPyGPUVertBuf BPyGPUVertBuf
PyTypeObject BPyGPUVertFormat_Type
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
format
Definition: logImageCore.h:47
GPUVertAttr attrs[GPU_VERT_ATTR_MAX_LEN]
uint len