Blender  V2.93
BLI_linear_allocator.hh
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
25 #pragma once
26 
27 #include "BLI_string_ref.hh"
28 #include "BLI_utility_mixins.hh"
29 #include "BLI_vector.hh"
30 
31 namespace blender {
32 
33 template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopyable, NonMovable {
34  private:
35  Allocator allocator_;
36  Vector<void *> owned_buffers_;
37  Vector<Span<char>> unused_borrowed_buffers_;
38 
39  uintptr_t current_begin_;
40  uintptr_t current_end_;
41 
42 #ifdef DEBUG
43  int64_t debug_allocated_amount_ = 0;
44 #endif
45 
46  /* Buffers larger than that are not packed together with smaller allocations to avoid wasting
47  * memory. */
48  constexpr static inline int64_t large_buffer_threshold = 4096;
49 
50  public:
52  {
53  current_begin_ = 0;
54  current_end_ = 0;
55  }
56 
58  {
59  for (void *ptr : owned_buffers_) {
60  allocator_.deallocate(ptr);
61  }
62  }
63 
70  void *allocate(const int64_t size, const int64_t alignment)
71  {
72  BLI_assert(size >= 0);
73  BLI_assert(alignment >= 1);
74  BLI_assert(is_power_of_2_i(alignment));
75 
76  const uintptr_t alignment_mask = alignment - 1;
77  const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) &
78  ~alignment_mask;
79  const uintptr_t potential_allocation_end = potential_allocation_begin + size;
80 
81  if (potential_allocation_end <= current_end_) {
82 #ifdef DEBUG
83  debug_allocated_amount_ += size;
84 #endif
85  current_begin_ = potential_allocation_end;
86  return reinterpret_cast<void *>(potential_allocation_begin);
87  }
88  if (size <= large_buffer_threshold) {
89  this->allocate_new_buffer(size + alignment, alignment);
90  return this->allocate(size, alignment);
91  }
92  return this->allocator_large_buffer(size, alignment);
93  };
94 
100  template<typename T> T *allocate()
101  {
102  return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
103  }
104 
111  {
112  T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
113  return MutableSpan<T>(array, size);
114  }
115 
124  template<typename T, typename... Args> destruct_ptr<T> construct(Args &&... args)
125  {
126  void *buffer = this->allocate(sizeof(T), alignof(T));
127  T *value = new (buffer) T(std::forward<Args>(args)...);
128  return destruct_ptr<T>(value);
129  }
130 
134  template<typename T> MutableSpan<T> construct_array_copy(Span<T> src)
135  {
136  if (src.is_empty()) {
137  return {};
138  }
139  MutableSpan<T> dst = this->allocate_array<T>(src.size());
140  uninitialized_copy_n(src.data(), src.size(), dst.data());
141  return dst;
142  }
143 
149  {
150  const int64_t alloc_size = str.size() + 1;
151  char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
152  str.copy(buffer, alloc_size);
153  return StringRefNull(static_cast<const char *>(buffer));
154  }
155 
157  int64_t element_size,
158  int64_t element_alignment)
159  {
160  void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
161  void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
162 
163  MutableSpan<void *> pointers((void **)pointer_buffer, element_amount);
164  void *next_element_buffer = elements_buffer;
165  for (int64_t i : IndexRange(element_amount)) {
166  pointers[i] = next_element_buffer;
167  next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
168  }
169 
170  return pointers;
171  }
172 
173  template<typename T, typename... Args>
175  {
177  n, sizeof(T), alignof(T));
178  MutableSpan<T *> pointers = void_pointers.cast<T *>();
179 
180  for (int64_t i : IndexRange(n)) {
181  new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
182  }
183 
184  return pointers;
185  }
186 
192  {
193  unused_borrowed_buffers_.append(Span<char>(static_cast<char *>(buffer), size));
194  }
195 
196  template<size_t Size, size_t Alignment>
198  {
199  this->provide_buffer(aligned_buffer.ptr(), Size);
200  }
201 
202  private:
203  void allocate_new_buffer(int64_t min_allocation_size, int64_t min_alignment)
204  {
205  for (int64_t i : unused_borrowed_buffers_.index_range()) {
206  Span<char> buffer = unused_borrowed_buffers_[i];
207  if (buffer.size() >= min_allocation_size) {
208  unused_borrowed_buffers_.remove_and_reorder(i);
209  current_begin_ = (uintptr_t)buffer.begin();
210  current_end_ = (uintptr_t)buffer.end();
211  return;
212  }
213  }
214 
215  /* Possibly allocate more bytes than necessary for the current allocation. This way more small
216  * allocations can be packed together. Large buffers are allocated exactly to avoid wasting too
217  * much memory. */
218  int64_t size_in_bytes = min_allocation_size;
219  if (size_in_bytes <= large_buffer_threshold) {
220  /* Gradually grow buffer size with each allocation, up to a maximum. */
221  const int grow_size = 1 << std::min<int>(owned_buffers_.size() + 6, 20);
222  size_in_bytes = std::min(large_buffer_threshold,
223  std::max<int64_t>(size_in_bytes, grow_size));
224  }
225 
226  void *buffer = allocator_.allocate(size_in_bytes, min_alignment, __func__);
227  owned_buffers_.append(buffer);
228  current_begin_ = (uintptr_t)buffer;
229  current_end_ = current_begin_ + size_in_bytes;
230  }
231 
232  void *allocator_large_buffer(const int64_t size, const int64_t alignment)
233  {
234  void *buffer = allocator_.allocate(size, alignment, __func__);
235  owned_buffers_.append(buffer);
236  return buffer;
237  }
238 };
239 
240 } // namespace blender
#define BLI_assert(a)
Definition: BLI_assert.h:58
MINLINE int is_power_of_2_i(int n)
unsigned int uint
Definition: BLI_sys_types.h:83
#define POINTER_OFFSET(v, ofs)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
MutableSpan< T > construct_array_copy(Span< T > src)
MutableSpan< void * > allocate_elements_and_pointer_array(int64_t element_amount, int64_t element_size, int64_t element_alignment)
StringRefNull copy_string(StringRef str)
void provide_buffer(AlignedBuffer< Size, Alignment > &aligned_buffer)
void provide_buffer(void *buffer, uint size)
Span< T * > construct_elements_and_pointer_array(int64_t n, Args &&... args)
MutableSpan< T > allocate_array(int64_t size)
void * allocate(const int64_t size, const int64_t alignment)
destruct_ptr< T > construct(Args &&... args)
constexpr MutableSpan< NewT > cast() const
Definition: BLI_span.hh:704
constexpr T * data() const
Definition: BLI_span.hh:561
constexpr const T * data() const
Definition: BLI_span.hh:217
constexpr int64_t size() const
Definition: BLI_span.hh:254
constexpr bool is_empty() const
Definition: BLI_span.hh:262
int64_t size() const
Definition: BLI_vector.hh:662
void remove_and_reorder(const int64_t index)
Definition: BLI_vector.hh:711
void append(const T &value)
Definition: BLI_vector.hh:438
IndexRange index_range() const
Definition: BLI_vector.hh:887
#define str(s)
__kernel void ccl_constant KernelData ccl_global void ccl_global char ccl_global int ccl_global char ccl_global unsigned int ccl_global float * buffer
#define T
std::unique_ptr< T, DestructValueAtAddress< T > > destruct_ptr
void uninitialized_copy_n(const T *src, int64_t n, T *dst)
#define min(a, b)
Definition: sort.c:51
_W64 unsigned int uintptr_t
Definition: stdint.h:122
__int64 int64_t
Definition: stdint.h:92
PointerRNA * ptr
Definition: wm_files.c:3157