Line data Source code
1 : /**
2 : Copyright (c) 2020-2022 Roman Katuntsev <sbkarr@stappler.org>
3 : Copyright (c) 2023 Stappler LLC <admin@stappler.dev>
4 :
5 : Permission is hereby granted, free of charge, to any person obtaining a copy
6 : of this software and associated documentation files (the "Software"), to deal
7 : in the Software without restriction, including without limitation the rights
8 : to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 : copies of the Software, and to permit persons to whom the Software is
10 : furnished to do so, subject to the following conditions:
11 :
12 : The above copyright notice and this permission notice shall be included in
13 : all copies or substantial portions of the Software.
14 :
15 : THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 : AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 : OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 : THE SOFTWARE.
22 : **/
23 :
24 : #include "SPMemPoolStruct.h"
25 :
26 : #if LINUX
27 : #include "SPPlatformUnistd.h"
28 :
29 : namespace STAPPLER_VERSIONIZED stappler::mempool::base {
30 :
31 : static std::atomic<size_t> s_mappedRegions = 0;
32 :
33 0 : size_t get_mapped_regions_count() {
34 0 : return s_mappedRegions.load();
35 : }
36 :
37 0 : void *sp_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
38 0 : auto ret = ::mmap(addr, length, prot, flags, fd, offset);
39 0 : if (ret && ret != MAP_FAILED) {
40 0 : ++ s_mappedRegions;
41 0 : return ret;
42 : }
43 0 : return ret;
44 : }
45 :
46 0 : int sp_munmap(void *addr, size_t length) {
47 0 : auto ret = ::munmap(addr, length);
48 0 : if (ret == 0) {
49 0 : -- s_mappedRegions;
50 : }
51 0 : return ret;
52 : }
53 :
54 : }
55 : #else
56 :
57 : namespace STAPPLER_VERSIONIZED stappler::mempool::base {
58 :
59 : size_t get_mapped_regions_count() {
60 : return 0;
61 : }
62 :
63 : void *sp_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
64 : return nullptr;
65 : }
66 :
67 : int sp_munmap(void *addr, size_t length) {
68 : return -1;
69 : }
70 :
71 : }
72 :
73 : #endif
74 :
75 : namespace STAPPLER_VERSIONIZED stappler::mempool::custom {
76 :
77 : static std::atomic<size_t> s_nAllocators = 0;
78 :
79 : #if LINUX
80 0 : static uint64_t allocator_mmap_realloc(int filedes, void *ptr, uint64_t idx, uint64_t required) {
81 0 : auto oldSize = idx * BOUNDARY_SIZE;
82 0 : auto newSize = idx * 2 * BOUNDARY_SIZE;
83 0 : if (newSize / BOUNDARY_SIZE < required) {
84 0 : newSize = required * BOUNDARY_SIZE;
85 : }
86 :
87 0 : if (newSize > ALLOCATOR_MMAP_RESERVED) {
88 0 : perror("ALLOCATOR_MMAP_RESERVED exceeded");
89 0 : return 0;
90 : }
91 :
92 0 : if (lseek(filedes, newSize - 1, SEEK_SET) == -1) {
93 0 : close(filedes);
94 0 : perror("Error calling lseek() to 'stretch' the file");
95 0 : return 0;
96 : }
97 :
98 0 : if (write(filedes, "", 1) == -1) {
99 0 : close(filedes);
100 0 : perror("Error writing last byte of the file");
101 0 : return 0;
102 : }
103 :
104 0 : base::sp_munmap((char *)ptr + oldSize, newSize - oldSize);
105 0 : auto err = mremap(ptr, oldSize, newSize, 0);
106 0 : if (err != MAP_FAILED) {
107 0 : return newSize / BOUNDARY_SIZE;
108 : }
109 0 : auto memerr = errno;
110 0 : switch (memerr) {
111 0 : case EAGAIN: perror("EAGAIN"); break;
112 0 : case EFAULT: perror("EFAULT"); break;
113 0 : case EINVAL: perror("EINVAL"); break;
114 0 : case ENOMEM: perror("ENOMEM"); break;
115 0 : default: break;
116 : }
117 0 : return 0;
118 : }
119 :
120 0 : bool Allocator::run_mmap(uint64_t idx) {
121 0 : if (idx == 0) {
122 0 : idx = 1_KiB;
123 : }
124 :
125 0 : std::unique_lock<Allocator> lock(*this);
126 :
127 0 : if (mmapdes != -1) {
128 0 : return true;
129 : }
130 :
131 0 : char nameBuff[256] = { 0 };
132 0 : snprintf(nameBuff, 255, "/tmp/stappler.mmap.%d.%p.XXXXXX", getpid(), (void *)this);
133 :
134 0 : mmapdes = mkstemp(nameBuff);
135 0 : unlink(nameBuff);
136 :
137 0 : size_t size = BOUNDARY_SIZE * idx;
138 :
139 0 : if (lseek(mmapdes, size - 1, SEEK_SET) == -1) {
140 0 : close(mmapdes);
141 0 : perror("Error calling lseek() to 'stretch' the file");
142 0 : return false;
143 : }
144 :
145 0 : if (write(mmapdes, "", 1) == -1) {
146 0 : close(mmapdes);
147 0 : perror("Error writing last byte of the file");
148 0 : return false;
149 : }
150 :
151 0 : void *reserveMem = base::sp_mmap(NULL, ALLOCATOR_MMAP_RESERVED, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
152 :
153 : // Now the file is ready to be mmapped.
154 0 : void *map = base::sp_mmap(reserveMem, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_NORESERVE, mmapdes, 0);
155 0 : if (map == MAP_FAILED) {
156 0 : close(mmapdes);
157 0 : perror("Error mmapping the file");
158 0 : return false;
159 : }
160 :
161 0 : mmapPtr = map;
162 0 : mmapMax = idx;
163 0 : return true;
164 0 : }
165 :
166 : #endif
167 :
168 0 : size_t Allocator::getAllocatorsCount() {
169 0 : return s_nAllocators.load();
170 : }
171 :
172 6599 : Allocator::Allocator(bool threadSafe) {
173 6600 : ++ s_nAllocators;
174 6599 : buf.fill(nullptr);
175 :
176 6598 : if (threadSafe) {
177 6348 : mutex = new AllocMutex;
178 : }
179 6599 : }
180 :
181 12433 : Allocator::~Allocator() {
182 : MemNode *node, **ref;
183 :
184 6192 : if (mutex) {
185 5950 : delete mutex;
186 : }
187 :
188 : #if LINUX
189 6212 : if (mmapPtr) {
190 0 : base::sp_munmap(mmapPtr, mmapMax * BOUNDARY_SIZE);
191 0 : close(mmapdes);
192 0 : return;
193 : }
194 : #endif
195 :
196 129354 : for (uint32_t index = 0; index < MAX_INDEX; index++) {
197 123170 : ref = &buf[index];
198 286396 : while ((node = *ref) != nullptr) {
199 163254 : *ref = node->next;
200 163254 : allocated -= node->endp - (uint8_t *)node;
201 163277 : ::free(node);
202 : }
203 : }
204 :
205 6184 : -- s_nAllocators;
206 6241 : }
207 :
208 25 : void Allocator::set_max(uint32_t size) {
209 25 : std::unique_lock<Allocator> lock(*this);
210 :
211 25 : uint32_t max_free_index = uint32_t(SPALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX);
212 25 : current += max_free_index;
213 25 : current -= max;
214 25 : max = max_free_index;
215 25 : if (current > max) {
216 0 : current = max;
217 : }
218 25 : }
219 :
220 831373 : MemNode *Allocator::alloc(uint32_t in_size) {
221 831373 : std::unique_lock<Allocator> lock;
222 :
223 831350 : uint32_t size = uint32_t(SPALIGN(in_size + SIZEOF_MEMNODE, BOUNDARY_SIZE));
224 831331 : if (size < in_size) {
225 0 : return nullptr;
226 : }
227 831331 : if (size < MIN_ALLOC) {
228 606211 : size = MIN_ALLOC;
229 : }
230 :
231 831331 : size_t index = (size >> BOUNDARY_INDEX) - 1;
232 831331 : if (index > maxOf<uint32_t>()) {
233 0 : return nullptr;
234 : }
235 :
236 : /* First see if there are any nodes in the area we know
237 : * our node will fit into.
238 : */
239 831358 : lock = std::unique_lock<Allocator>(*this);
240 831303 : if (index <= last) {
241 : /* Walk the free list to see if there are
242 : * any nodes on it of the requested size
243 : */
244 653542 : uint32_t max_index = last;
245 653542 : MemNode **ref = &buf[index];
246 653554 : uint32_t i = index;
247 656085 : while (*ref == nullptr && i < max_index) {
248 2531 : ref++;
249 2531 : i++;
250 : }
251 :
252 653554 : MemNode *node = nullptr;
253 653554 : if ((node = *ref) != nullptr) {
254 : /* If we have found a node and it doesn't have any
255 : * nodes waiting in line behind it _and_ we are on
256 : * the highest available index, find the new highest
257 : * available index
258 : */
259 653544 : if ((*ref = node->next) == nullptr && i >= max_index) {
260 : do {
261 7830 : ref--;
262 7830 : max_index--;
263 : }
264 7830 : while (*ref == NULL && max_index > 0);
265 :
266 6288 : last = max_index;
267 : }
268 :
269 653544 : current += node->index + 1;
270 653544 : if (current > max) {
271 650399 : current = max;
272 : }
273 :
274 653544 : node->next = nullptr;
275 653544 : node->first_avail = (uint8_t *)node + SIZEOF_MEMNODE;
276 :
277 653544 : return node;
278 : }
279 177761 : } else if (buf[0]) {
280 : /* If we found nothing, seek the sink (at index 0), if
281 : * it is not empty.
282 : */
283 :
284 : /* Walk the free list to see if there are
285 : * any nodes on it of the requested size
286 : */
287 3385 : MemNode *node = nullptr;
288 3385 : MemNode **ref = &buf[0];
289 3599 : while ((node = *ref) != nullptr && index > node->index) {
290 225 : ref = &node->next;
291 : }
292 :
293 3374 : if (node) {
294 3324 : *ref = node->next;
295 :
296 3324 : current += node->index + 1;
297 3324 : if (current > max) {
298 3324 : current = max;
299 : }
300 :
301 3324 : node->next = nullptr;
302 3324 : node->first_avail = (uint8_t *)node + SIZEOF_MEMNODE;
303 :
304 3324 : return node;
305 : }
306 : }
307 :
308 : /* If we haven't got a suitable node, malloc a new one
309 : * and initialize it.
310 : */
311 174437 : MemNode *node = nullptr;
312 :
313 : #if LINUX
314 174437 : if (mmapPtr) {
315 0 : if (mmapCurrent + (index + 1) > mmapMax) {
316 0 : auto newMax = allocator_mmap_realloc(mmapdes, mmapPtr, mmapMax, mmapCurrent + index + 1);
317 0 : if (!newMax) {
318 0 : return nullptr;
319 : } else {
320 0 : mmapMax = newMax;
321 : }
322 : }
323 :
324 0 : node = (MemNode *) ((char *)mmapPtr + mmapCurrent * BOUNDARY_SIZE);
325 0 : mmapCurrent += index + 1;
326 :
327 0 : if (lock.owns_lock()) {
328 0 : lock.unlock();
329 : }
330 : } else {
331 174437 : if (lock.owns_lock()) {
332 174439 : lock.unlock();
333 : }
334 :
335 174449 : if ((node = (MemNode *)malloc(size)) == nullptr) {
336 0 : return nullptr;
337 : }
338 174449 : allocated += size;
339 174448 : if (allocationTracker) {
340 0 : allocationTracker(node, size);
341 : }
342 : }
343 : #else
344 : if (lock.owns_lock()) {
345 : lock.unlock();
346 : }
347 :
348 : if ((node = (MemNode *)malloc(size)) == nullptr) {
349 : return nullptr;
350 : }
351 : allocated += size;
352 : if (allocationTracker) {
353 : allocationTracker(node, size);
354 : }
355 : #endif
356 :
357 174448 : node->next = nullptr;
358 174448 : node->index = (uint32_t)index;
359 174448 : node->first_avail = (uint8_t *)node + SIZEOF_MEMNODE;
360 174448 : node->endp = (uint8_t *)node + size;
361 :
362 174448 : return node;
363 831316 : }
364 :
365 197590 : void Allocator::free(MemNode *node) {
366 197590 : MemNode *next, *freelist = nullptr;
367 :
368 197590 : std::unique_lock<Allocator> lock(*this);
369 :
370 197604 : uint32_t max_index = last;
371 197604 : uint32_t max_free_index = max;
372 197604 : uint32_t current_free_index = current;
373 :
374 : /* Walk the list of submitted nodes and free them one by one,
375 : * shoving them in the right 'size' buckets as we go.
376 : */
377 : do {
378 830487 : next = node->next;
379 830487 : uint32_t index = node->index;
380 :
381 830487 : if (max_free_index != ALLOCATOR_MAX_FREE_UNLIMITED && index + 1 > current_free_index) {
382 0 : node->next = freelist;
383 0 : freelist = node;
384 830487 : } else if (index < MAX_INDEX) {
385 : /* Add the node to the appropiate 'size' bucket. Adjust
386 : * the max_index when appropiate.
387 : */
388 819742 : if ((node->next = buf[index]) == nullptr && index > max_index) {
389 14620 : max_index = index;
390 : }
391 819677 : buf[index] = node;
392 819698 : if (current_free_index >= index + 1) {
393 3524 : current_free_index -= index + 1;
394 : } else {
395 816174 : current_free_index = 0;
396 : }
397 : } else {
398 : /* This node is too large to keep in a specific size bucket,
399 : * just add it to the sink (at index 0).
400 : */
401 10745 : node->next = buf[0];
402 10745 : buf[0] = node;
403 10745 : if (current_free_index >= index + 1) {
404 0 : current_free_index -= index + 1;
405 : } else {
406 10745 : current_free_index = 0;
407 : }
408 : }
409 830443 : } while ((node = next) != nullptr);
410 :
411 : #if DEBUG
412 197560 : int i = 0;
413 197560 : auto n = buf[1];
414 2582783 : while (n && i < 1024 * 16) {
415 2385233 : n = n->next;
416 2385233 : ++ i;
417 : }
418 :
419 197550 : if (i >= 1024 * 128) {
420 0 : printf("ERRER: pool double-free detected!\n");
421 0 : abort();
422 : }
423 : #endif
424 :
425 197550 : last = max_index;
426 197550 : current = current_free_index;
427 :
428 197550 : if (lock.owns_lock()) {
429 197686 : lock.unlock();
430 : }
431 :
432 : #if LINUX
433 197659 : if (mmapPtr) {
434 0 : return;
435 : }
436 : #endif
437 :
438 197659 : while (freelist != NULL) {
439 0 : node = freelist;
440 0 : freelist = node->next;
441 0 : allocated -= node->endp - (uint8_t *)node;
442 0 : ::free(node);
443 : }
444 197659 : }
445 :
446 1405969 : void Allocator::lock() {
447 1405969 : if (mutex) {
448 1396151 : mutex->lock();
449 : }
450 1406906 : }
451 :
452 1406493 : void Allocator::unlock() {
453 1406493 : if (mutex) {
454 1396682 : mutex->unlock();
455 : }
456 1406833 : }
457 :
458 : }
|