Line data Source code
1 : /**
2 : Copyright (c) 2020-2022 Roman Katuntsev <sbkarr@stappler.org>
3 : Copyright (c) 2023 Stappler LLC <admin@stappler.dev>
4 :
5 : Permission is hereby granted, free of charge, to any person obtaining a copy
6 : of this software and associated documentation files (the "Software"), to deal
7 : in the Software without restriction, including without limitation the rights
8 : to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 : copies of the Software, and to permit persons to whom the Software is
10 : furnished to do so, subject to the following conditions:
11 :
12 : The above copyright notice and this permission notice shall be included in
13 : all copies or substantial portions of the Software.
14 :
15 : THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 : AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 : OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 : THE SOFTWARE.
22 : **/
23 :
24 : #include "SPMemPoolInterface.h"
25 : #include "SPMemPoolStruct.h"
26 : #include "SPMemPoolApi.h"
27 :
28 : namespace STAPPLER_VERSIONIZED stappler::mempool::base::pool {
29 :
30 : SPUNUSED static void popPoolInfo(pool_t *pool);
31 :
32 : }
33 :
34 : namespace STAPPLER_VERSIONIZED stappler::mempool::custom {
35 :
36 : SPUNUSED static Allocator *s_global_allocator = nullptr;
37 : SPUNUSED static Pool *s_global_pool = nullptr;
38 : SPUNUSED static std::atomic<int> s_global_init = 0;
39 :
40 : static std::atomic<size_t> s_nPools = 0;
41 :
42 22501437 : void *Pool::alloc(size_t &sizeInBytes) {
43 22501437 : std::unique_lock<Pool> lock(*this);
44 22501714 : if (sizeInBytes >= BlockThreshold) {
45 5517775 : return allocmngr.alloc(sizeInBytes, [] (void *p, size_t s) { return((Pool *)p)->palloc(s); });
46 : }
47 :
48 19502979 : allocmngr.increment_alloc(sizeInBytes);
49 19504803 : return palloc(sizeInBytes);
50 22503565 : }
51 :
52 3976885 : void Pool::free(void *ptr, size_t sizeInBytes) {
53 3976885 : if (sizeInBytes >= BlockThreshold) {
54 1456205 : std::unique_lock<Pool> lock(*this);
55 2427915 : allocmngr.free(ptr, sizeInBytes, [] (void *p, size_t s) { return((Pool *)p)->palloc_self(s); });
56 1455884 : }
57 3976578 : }
58 :
59 25125672 : void *Pool::palloc(size_t in_size) {
60 : MemNode *active, *node;
61 : void *mem;
62 : size_t size, free_index;
63 :
64 25125672 : size = SPALIGN_DEFAULT(in_size);
65 25126172 : if (size < in_size) {
66 0 : return nullptr;
67 : }
68 25126172 : active = this->active;
69 :
70 : /* If the active node has enough bytes left, use it. */
71 25126172 : if (size <= active->free_space()) {
72 24316123 : mem = active->first_avail;
73 24316123 : active->first_avail += size;
74 24316123 : return mem;
75 : }
76 :
77 809740 : node = active->next;
78 809740 : if (size <= node->free_space()) {
79 175240 : node->remove();
80 : } else {
81 635969 : if ((node = allocator->alloc(size)) == NULL) {
82 0 : return nullptr;
83 : }
84 : }
85 :
86 811218 : node->free_index = 0;
87 :
88 811218 : mem = node->first_avail;
89 811218 : node->first_avail += size;
90 :
91 811218 : node->insert(active);
92 :
93 811205 : this->active = node;
94 :
95 811205 : free_index = (SPALIGN(active->endp - active->first_avail + 1, BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
96 :
97 811184 : active->free_index = (uint32_t)free_index;
98 811184 : node = active->next;
99 811184 : if (free_index >= node->free_index) {
100 804709 : return mem;
101 : }
102 :
103 : do {
104 6775 : node = node->next;
105 6775 : } while (free_index < node->free_index);
106 :
107 6475 : active->remove();
108 6475 : active->insert(node);
109 :
110 6475 : return mem;
111 : }
112 :
113 971747 : void *Pool::palloc_self(size_t in_size) {
114 : void *mem;
115 971747 : auto size = SPALIGN_DEFAULT(in_size);
116 971842 : if (size < in_size) {
117 0 : return nullptr;
118 : }
119 :
120 : /* If the active node has enough bytes left, use it. */
121 971842 : if (size <= self->free_space()) {
122 867516 : mem = self->first_avail;
123 867516 : self->first_avail += size;
124 867516 : return mem;
125 : }
126 :
127 104305 : return palloc(in_size);
128 : }
129 :
130 21889 : void *Pool::calloc(size_t count, size_t eltsize) {
131 21889 : size_t s = count * eltsize;
132 21889 : auto ptr = alloc(s);
133 21892 : memset(ptr, 0, s);
134 21892 : return ptr;
135 : }
136 :
137 18564 : void *Pool::pmemdup(const void *m, size_t n) {
138 18564 : if (m == nullptr) {
139 0 : return nullptr;
140 : }
141 18564 : void *res = palloc(n);
142 18558 : memcpy(res, m, n);
143 18558 : return res;
144 : }
145 :
146 18564 : char *Pool::pstrdup(const char *s) {
147 18564 : if (s == nullptr) {
148 0 : return nullptr;
149 : }
150 18564 : size_t len = strlen(s) + 1;
151 18564 : char *res = (char *)pmemdup(s, len);
152 18555 : return res;
153 : }
154 :
155 219405939 : void Pool::clear() {
156 219405939 : stappler::memory::pool::push((stappler::memory::pool_t *)this);
157 219436168 : Cleanup::run(&this->pre_cleanups);
158 219424926 : stappler::memory::pool::pop();
159 219381422 : this->pre_cleanups = nullptr;
160 :
161 219375042 : while (this->child) {
162 25 : this->child->~Pool();
163 : }
164 :
165 : /* Run cleanups */
166 219375017 : stappler::memory::pool::push((stappler::memory::pool_t *)this);
167 219467081 : Cleanup::run(&this->cleanups);
168 219443490 : stappler::memory::pool::pop();
169 219327873 : this->cleanups = nullptr;
170 219327873 : this->free_cleanups = nullptr;
171 219327873 : this->user_data = nullptr;
172 :
173 : /* Find the node attached to the pool structure, reset it, make
174 : * it the active node and free the rest of the nodes.
175 : */
176 219327873 : MemNode *active = this->active = this->self;
177 219327873 : active->first_avail = this->self_first_avail;
178 :
179 219327873 : if (active->next == active) {
180 219325298 : this->allocmngr.reset(this);
181 219299022 : return;
182 : }
183 :
184 2575 : *active->ref = nullptr;
185 2575 : if (active->next) {
186 2575 : this->allocator->free(active->next);
187 : }
188 2575 : active->next = active;
189 2575 : active->ref = &active->next;
190 2575 : this->allocmngr.reset(this);
191 : }
192 :
193 6598 : Pool *Pool::create(Allocator *alloc, PoolFlags flags) {
194 6598 : Allocator *allocator = alloc;
195 6598 : if (allocator == nullptr) {
196 6299 : allocator = new Allocator((flags & PoolFlags::ThreadSafeAllocator) != PoolFlags::None);
197 : }
198 :
199 6599 : auto node = allocator->alloc(MIN_ALLOC - SIZEOF_MEMNODE);
200 6599 : node->next = node;
201 6599 : node->ref = &node->next;
202 :
203 6599 : Pool *pool = new (node->first_avail) Pool(allocator, node, (flags & PoolFlags::ThreadSafePool) == PoolFlags::ThreadSafePool);
204 6600 : node->first_avail = pool->self_first_avail = (uint8_t *)pool + SIZEOF_POOL;
205 :
206 6600 : if (!alloc) {
207 6300 : allocator->owner = pool;
208 : }
209 :
210 6600 : return pool;
211 : }
212 :
213 192913 : void Pool::destroy(Pool *pool) {
214 : // SP_POOL_LOG("destroy %p %s", pool, pool->tag);
215 192913 : pool->~Pool();
216 192902 : }
217 :
218 0 : size_t Pool::getPoolsCount() {
219 0 : return s_nPools.load();
220 : }
221 :
222 0 : Pool::Pool() : allocmngr{this} { ++ s_nPools; }
223 :
224 6599 : Pool::Pool(Allocator *alloc, MemNode *node, bool threadSafe)
225 6599 : : allocator(alloc), active(node), self(node), allocmngr{this}, threadSafe(threadSafe) {
226 6599 : ++ s_nPools;
227 6600 : }
228 :
229 188814 : Pool::Pool(Pool *p, Allocator *alloc, MemNode *node, bool threadSafe)
230 188814 : : allocator(alloc), active(node), self(node), allocmngr{this}, threadSafe(threadSafe) {
231 188814 : if ((parent = p) != nullptr) {
232 188839 : std::unique_lock<Allocator> lock(*allocator);
233 188904 : sibling = parent->child;
234 188904 : if (sibling != nullptr) {
235 51130 : sibling->ref = &sibling;
236 : }
237 :
238 188904 : parent->child = this;
239 188904 : ref = &parent->child;
240 188904 : }
241 188870 : ++ s_nPools;
242 189026 : }
243 :
244 195010 : Pool::~Pool() {
245 195010 : stappler::memory::pool::push((stappler::memory::pool_t *)this);
246 194977 : Cleanup::run(&this->pre_cleanups);
247 194986 : stappler::memory::pool::pop();
248 194987 : this->pre_cleanups = nullptr;
249 :
250 197066 : while (this->child) {
251 2074 : this->child->~Pool();
252 : }
253 :
254 194992 : memory::pool::popPoolInfo((memory::pool_t *)this);
255 :
256 195007 : stappler::memory::pool::push((stappler::memory::pool_t *)this);
257 194977 : Cleanup::run(&this->cleanups);
258 194968 : stappler::memory::pool::pop();
259 194935 : this->cleanups = nullptr;
260 194935 : this->free_cleanups = nullptr;
261 194935 : this->user_data = nullptr;
262 :
263 : /* Remove the pool from the parents child list */
264 194935 : if (this->parent) {
265 188773 : std::unique_lock<Allocator> lock(*allocator);
266 188829 : auto sib = this->sibling;
267 188829 : *this->ref = this->sibling;
268 188829 : if (sib != nullptr) {
269 50908 : sib->ref = this->ref;
270 : }
271 188829 : }
272 :
273 195022 : Allocator *allocator = this->allocator;
274 195022 : MemNode *active = this->self;
275 195022 : *active->ref = NULL;
276 :
277 195022 : allocator->free(active);
278 195063 : if (allocator->owner == this) {
279 5956 : delete allocator;
280 : }
281 :
282 195080 : -- s_nPools;
283 195063 : }
284 :
285 :
286 188903 : Pool *Pool::make_child() {
287 188903 : return make_child(allocator);
288 : }
289 :
290 188897 : Pool *Pool::make_child(Allocator *allocator) {
291 188897 : Pool *parent = this;
292 188897 : if (allocator == nullptr) {
293 0 : allocator = parent->allocator;
294 : }
295 :
296 : MemNode *node;
297 188897 : if ((node = allocator->alloc(MIN_ALLOC - SIZEOF_MEMNODE)) == nullptr) {
298 0 : return nullptr;
299 : }
300 :
301 188828 : node->next = node;
302 188828 : node->ref = &node->next;
303 :
304 188828 : Pool *pool = new (node->first_avail) Pool(parent, allocator, node, threadSafe);
305 188861 : node->first_avail = pool->self_first_avail = (uint8_t *)pool + SIZEOF_POOL;
306 188861 : return pool;
307 : }
308 :
309 17960 : void Pool::cleanup_register(const void *data, Cleanup::Callback cb) {
310 : Cleanup *c;
311 :
312 17960 : if (free_cleanups) {
313 : /* reuse a cleanup structure */
314 0 : c = free_cleanups;
315 0 : free_cleanups = c->next;
316 : } else {
317 17960 : c = (Cleanup *)palloc(sizeof(Cleanup));
318 : }
319 :
320 17961 : c->data = data;
321 17961 : c->fn = cb;
322 17961 : c->next = cleanups;
323 17961 : cleanups = c;
324 17961 : }
325 :
326 304 : void Pool::pre_cleanup_register(const void *data, Cleanup::Callback cb) {
327 : Cleanup *c;
328 :
329 304 : if (free_cleanups) {
330 : /* reuse a cleanup structure */
331 0 : c = free_cleanups;
332 0 : free_cleanups = c->next;
333 : } else {
334 304 : c = (Cleanup *)palloc(sizeof(Cleanup));
335 : }
336 304 : c->data = data;
337 304 : c->fn = cb;
338 304 : c->next = pre_cleanups;
339 304 : pre_cleanups = c;
340 304 : }
341 :
342 0 : void Pool::cleanup_kill(void *data, Cleanup::Callback cb) {
343 : Cleanup *c, **lastp;
344 :
345 0 : c = cleanups;
346 0 : lastp = &cleanups;
347 0 : while (c) {
348 0 : if (c->data == data && c->fn == cb) {
349 0 : *lastp = c->next;
350 : /* move to freelist */
351 0 : c->next = free_cleanups;
352 0 : free_cleanups = c;
353 0 : break;
354 : }
355 :
356 0 : lastp = &c->next;
357 0 : c = c->next;
358 : }
359 :
360 : /* Remove any pre-cleanup as well */
361 0 : c = pre_cleanups;
362 0 : lastp = &pre_cleanups;
363 0 : while (c) {
364 0 : if (c->data == data && c->fn == cb) {
365 0 : *lastp = c->next;
366 : /* move to freelist */
367 0 : c->next = free_cleanups;
368 0 : free_cleanups = c;
369 0 : break;
370 : }
371 :
372 0 : lastp = &c->next;
373 0 : c = c->next;
374 : }
375 0 : }
376 :
377 0 : void Pool::cleanup_run(void *data, Cleanup::Callback cb) {
378 0 : cleanup_kill(data, cb);
379 0 : (*cb)(data);
380 0 : }
381 :
382 24183 : Status Pool::userdata_set(const void *data, const char *key, Cleanup::Callback cleanup) {
383 24183 : if (user_data == nullptr) {
384 6146 : user_data = HashTable::make(this);
385 : }
386 :
387 24180 : if (user_data->get(key, -1) == NULL) {
388 18564 : char *new_key = pstrdup(key);
389 18555 : user_data->set(new_key, -1, data);
390 : } else {
391 5624 : user_data->set(key, -1, data);
392 : }
393 :
394 24184 : if (cleanup) {
395 15015 : cleanup_register(data, cleanup);
396 : }
397 24181 : return SUCCESS;
398 : }
399 :
400 0 : Status Pool::userdata_setn(const void *data, const char *key, Cleanup::Callback cleanup) {
401 0 : if (user_data == nullptr) {
402 0 : user_data = HashTable::make(this);
403 : }
404 :
405 0 : user_data->set(key, -1, data);
406 :
407 0 : if (cleanup) {
408 0 : cleanup_register(data, cleanup);
409 : }
410 0 : return SUCCESS;
411 : }
412 :
413 82874 : Status Pool::userdata_get(void **data, const char *key) {
414 82874 : if (user_data == nullptr) {
415 574 : *data = nullptr;
416 : } else {
417 82300 : *data = user_data->get(key, -1);
418 : }
419 82874 : return SUCCESS;
420 : }
421 :
422 108203 : Status Pool::userdata_get(void **data, const char *key, size_t klen) {
423 108203 : if (user_data == nullptr) {
424 6398 : *data = nullptr;
425 : } else {
426 101805 : *data = user_data->get(key, klen);
427 : }
428 108215 : return SUCCESS;
429 : }
430 :
431 23954901 : void Pool::lock() {
432 23954901 : if (threadSafe && allocator->mutex) {
433 0 : allocator->mutex->lock();
434 : }
435 23954901 : }
436 :
437 23954099 : void Pool::unlock() {
438 23954099 : if (threadSafe && allocator->mutex) {
439 0 : allocator->mutex->unlock();
440 : }
441 23954099 : }
442 :
443 : struct StaticHolder {
444 25 : StaticHolder() {
445 25 : initialize();
446 25 : }
447 :
448 25 : ~StaticHolder() {
449 25 : terminate();
450 25 : }
451 : } s_global_holder;
452 :
453 1300 : void initialize() {
454 1300 : if (s_global_init.fetch_add(1) == 0) {
455 25 : if (!s_global_allocator) {
456 25 : s_global_allocator = new Allocator();
457 : }
458 25 : s_global_pool = Pool::create(s_global_allocator);
459 25 : s_global_pool->allocmngr.name = "Global";
460 : #ifndef MODULE_STAPPLER_APR
461 25 : stappler::memory::pool::push(s_global_pool);
462 : #endif
463 : }
464 1300 : }
465 :
466 1108 : void terminate() {
467 1108 : if (s_global_init.fetch_sub(1) == 1) {
468 : #ifndef MODULE_STAPPLER_APR
469 0 : stappler::memory::pool::pop();
470 : #endif
471 0 : Pool::destroy(s_global_pool);
472 0 : delete s_global_allocator;
473 : }
474 1108 : }
475 :
476 188902 : Pool *create(Pool *p) {
477 188902 : if (p) {
478 143660 : return p->make_child();
479 : } else {
480 45242 : return s_global_pool->make_child();
481 : }
482 : }
483 :
484 192900 : void destroy(Pool *p) {
485 192900 : Pool::destroy(p);
486 192875 : }
487 :
488 : }
|