Blender  V2.93
util_guarded_allocator.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef __UTIL_GUARDED_ALLOCATOR_H__
18 #define __UTIL_GUARDED_ALLOCATOR_H__
19 
20 #include <cstddef>
21 #include <cstdlib>
22 #include <memory>
23 
24 #ifdef WITH_BLENDER_GUARDEDALLOC
25 # include "../../guardedalloc/MEM_guardedalloc.h"
26 #endif
27 
29 
30 /* Internal use only. */
31 void util_guarded_mem_alloc(size_t n);
32 void util_guarded_mem_free(size_t n);
33 
34 /* Guarded allocator for the use with STL. */
35 template<typename T> class GuardedAllocator {
36  public:
37  typedef size_t size_type;
38  typedef ptrdiff_t difference_type;
39  typedef T *pointer;
40  typedef const T *const_pointer;
41  typedef T &reference;
42  typedef const T &const_reference;
43  typedef T value_type;
44 
46  {
47  }
49  {
50  }
51 
52  T *allocate(size_t n, const void *hint = 0)
53  {
54  (void)hint;
55  size_t size = n * sizeof(T);
57  if (n == 0) {
58  return NULL;
59  }
60  T *mem;
61 #ifdef WITH_BLENDER_GUARDEDALLOC
62  /* C++ standard requires allocation functions to allocate memory suitably
63  * aligned for any standard type. This is 16 bytes for 64 bit platform as
64  * far as i concerned. We might over-align on 32bit here, but that should
65  * be all safe actually.
66  */
67  mem = (T *)MEM_mallocN_aligned(size, 16, "Cycles Alloc");
68 #else
69  mem = (T *)malloc(size);
70 #endif
71  if (mem == NULL) {
72  throw std::bad_alloc();
73  }
74  return mem;
75  }
76 
77  void deallocate(T *p, size_t n)
78  {
79  util_guarded_mem_free(n * sizeof(T));
80  if (p != NULL) {
81 #ifdef WITH_BLENDER_GUARDEDALLOC
82  MEM_freeN(p);
83 #else
84  free(p);
85 #endif
86  }
87  }
88 
89  T *address(T &x) const
90  {
91  return &x;
92  }
93 
94  const T *address(const T &x) const
95  {
96  return &x;
97  }
98 
100  {
101  return *this;
102  }
103 
104  size_t max_size() const
105  {
106  return size_t(-1);
107  }
108 
109  template<class U> struct rebind {
111  };
112 
113  template<class U> GuardedAllocator(const GuardedAllocator<U> &)
114  {
115  }
116 
117  template<class U> GuardedAllocator &operator=(const GuardedAllocator<U> &)
118  {
119  return *this;
120  }
121 
122  inline bool operator==(GuardedAllocator const & /*other*/) const
123  {
124  return true;
125  }
126  inline bool operator!=(GuardedAllocator const &other) const
127  {
128  return !operator==(other);
129  }
130 
131 #ifdef _MSC_VER
132  /* Welcome to the black magic here.
133  *
134  * The issue is that MSVC C++ allocates container proxy on any
135  * vector initialization, including static vectors which don't
136  * have any data yet. This leads to several issues:
137  *
138  * - Static objects initialization fiasco (global_stats from
139  * util_stats.h might not be initialized yet).
140  * - If main() function changes allocator type (for example,
141  * this might happen with `blender --debug-memory`) nobody
142  * will know how to convert already allocated memory to a new
143  * guarded allocator.
144  *
145  * Here we work this around by making it so container proxy does
146  * not use guarded allocation. A bit fragile, unfortunately.
147  */
148  template<> struct rebind<std::_Container_proxy> {
149  typedef std::allocator<std::_Container_proxy> other;
150  };
151 
152  operator std::allocator<std::_Container_proxy>() const
153  {
154  return std::allocator<std::_Container_proxy>();
155  }
156 #endif
157 };
158 
159 /* Get memory usage and peak from the guarded STL allocator. */
162 
163 /* Call given function and keep track if it runs out of memory.
164  *
165  * If it does run out f memory, stop execution and set progress
166  * to do a global cancel.
167  *
168  * It's not fully robust, but good enough to catch obvious issues
169  * when running out of memory.
170  */
171 #define MEM_GUARDED_CALL(progress, func, ...) \
172  do { \
173  try { \
174  (func)(__VA_ARGS__); \
175  } \
176  catch (std::bad_alloc &) { \
177  fprintf(stderr, "Error: run out of memory!\n"); \
178  fflush(stderr); \
179  (progress)->set_error("Out of memory"); \
180  } \
181  } while (false)
182 
184 
185 #endif /* __UTIL_GUARDED_ALLOCATOR_H__ */
void BLI_kdtree_nd_() free(KDTree *tree)
Definition: kdtree_impl.h:116
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
T * allocate(size_t n, const void *hint=0)
const T * address(const T &x) const
GuardedAllocator(const GuardedAllocator &)
bool operator==(GuardedAllocator const &) const
bool operator!=(GuardedAllocator const &other) const
GuardedAllocator & operator=(const GuardedAllocator< U > &)
GuardedAllocator(const GuardedAllocator< U > &)
GuardedAllocator< T > & operator=(const GuardedAllocator &)
void deallocate(T *p, size_t n)
T * address(T &x) const
#define CCL_NAMESPACE_END
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_mallocN_aligned)(size_t len, size_t alignment, const char *str)
Definition: mallocn.c:49
#define T
size_t util_guarded_get_mem_used()
size_t util_guarded_get_mem_peak()
void util_guarded_mem_free(size_t n)
CCL_NAMESPACE_BEGIN void util_guarded_mem_alloc(size_t n)