1/* ----------------------------------------------------------------------------
2Copyright (c) 2019-2022, Microsoft Research, Daan Leijen
3This is free software; you can redistribute it and/or modify it under the
4terms of the MIT license. A copy of the license can be found in the file
5"LICENSE" at the root of this distribution.
6-----------------------------------------------------------------------------*/
7
8/* ----------------------------------------------------------------------------
9"Arenas" are fixed area's of OS memory from which we can allocate
10large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
11In contrast to the rest of mimalloc, the arenas are shared between
12threads and need to be accessed using atomic operations.
13
14Currently arenas are only used to for huge OS page (1GiB) reservations,
15or direct OS memory reservations -- otherwise it delegates to direct allocation from the OS.
16In the future, we can expose an API to manually add more kinds of arenas
17which is sometimes needed for embedded devices or shared memory for example.
18(We can also employ this with WASI or `sbrk` systems to reserve large arenas
19 on demand and be able to reuse them efficiently).
20
21The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
22-----------------------------------------------------------------------------*/
23#include "mimalloc.h"
24#include "mimalloc-internal.h"
25#include "mimalloc-atomic.h"
26
27#include <string.h> // memset
28#include <errno.h> // ENOMEM
29
30#include "bitmap.h" // atomic bitmap
31
32
33// os.c
34void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats);
35void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
36
37void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
38void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
39
40bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
41bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
42
43
44/* -----------------------------------------------------------
45 Arena allocation
46----------------------------------------------------------- */
47
48// Block info: bit 0 contains the `in_use` bit, the upper bits the
49// size in count of arena blocks.
50typedef uintptr_t mi_block_info_t;
51#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 8MiB (must be at least MI_SEGMENT_ALIGN)
52#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 4MiB
53#define MI_MAX_ARENAS (64) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
54
55// A memory arena descriptor
56typedef struct mi_arena_s {
57 mi_arena_id_t id; // arena id; 0 for non-specific
58 bool exclusive; // only allow allocations if specifically for this arena
59 _Atomic(uint8_t*) start; // the start of the memory area
60 size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
61 size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
62 int numa_node; // associated NUMA node
63 bool is_zero_init; // is the arena zero initialized?
64 bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
65 bool is_large; // large- or huge OS pages (always committed)
66 _Atomic(size_t) search_idx; // optimization to start the search for free blocks
67 mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
68 mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
69 mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
70} mi_arena_t;
71
72
73// The available arenas
74static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
75static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
76
77
78/* -----------------------------------------------------------
79 Arena id's
80 0 is used for non-arena's (like OS memory)
81 id = arena_index + 1
82----------------------------------------------------------- */
83
84static size_t mi_arena_id_index(mi_arena_id_t id) {
85 return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
86}
87
88static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
89 mi_assert_internal(arena_index < MI_MAX_ARENAS);
90 mi_assert_internal(MI_MAX_ARENAS <= 126);
91 int id = (int)arena_index + 1;
92 mi_assert_internal(id >= 1 && id <= 127);
93 return id;
94}
95
96mi_arena_id_t _mi_arena_id_none(void) {
97 return 0;
98}
99
100static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
101 return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
102 (arena_id == req_arena_id));
103}
104
105
106/* -----------------------------------------------------------
107 Arena allocations get a memory id where the lower 8 bits are
108 the arena id, and the upper bits the block index.
109----------------------------------------------------------- */
110
111// Use `0` as a special id for direct OS allocated memory.
112#define MI_MEMID_OS 0
113
114static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) {
115 mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow?
116 mi_assert_internal(id >= 0 && id <= 0x7F);
117 return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0));
118}
119
120static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
121 *bitmap_index = (arena_memid >> 8);
122 mi_arena_id_t id = (int)(arena_memid & 0x7F);
123 *arena_index = mi_arena_id_index(id);
124 return ((arena_memid & 0x80) != 0);
125}
126
127bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
128 mi_arena_id_t id = (int)(arena_memid & 0x7F);
129 bool exclusive = ((arena_memid & 0x80) != 0);
130 return mi_arena_id_is_suitable(id, exclusive, request_arena_id);
131}
132
133static size_t mi_block_count_of_size(size_t size) {
134 return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
135}
136
137/* -----------------------------------------------------------
138 Thread safe allocation in an arena
139----------------------------------------------------------- */
140static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
141{
142 size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
143 if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
144 mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
145 return true;
146 };
147 return false;
148}
149
150
151/* -----------------------------------------------------------
152 Arena Allocation
153----------------------------------------------------------- */
154
155static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
156 bool* commit, bool* large, bool* is_pinned, bool* is_zero,
157 mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
158{
159 MI_UNUSED(arena_index);
160 mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
161 if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
162
163 mi_bitmap_index_t bitmap_index;
164 if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
165
166 // claimed it! set the dirty bits (todo: no need for an atomic op here?)
167 void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
168 *memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
169 *is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
170 *large = arena->is_large;
171 *is_pinned = (arena->is_large || !arena->allow_decommit);
172 if (arena->blocks_committed == NULL) {
173 // always committed
174 *commit = true;
175 }
176 else if (*commit) {
177 // arena not committed as a whole, but commit requested: ensure commit now
178 bool any_uncommitted;
179 _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
180 if (any_uncommitted) {
181 bool commit_zero;
182 _mi_os_commit(p, needed_bcount * MI_ARENA_BLOCK_SIZE, &commit_zero, tld->stats);
183 if (commit_zero) *is_zero = true;
184 }
185 }
186 else {
187 // no need to commit, but check if already fully committed
188 *commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
189 }
190 return p;
191}
192
193static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
194 bool* is_pinned, bool* is_zero,
195 mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
196{
197 MI_UNUSED_RELEASE(alignment);
198 mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
199 const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
200 const size_t bcount = mi_block_count_of_size(size);
201 if mi_likely(max_arena == 0) return NULL;
202 mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
203
204 size_t arena_index = mi_arena_id_index(req_arena_id);
205 if (arena_index < MI_MAX_ARENAS) {
206 // try a specific arena if requested
207 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
208 if (arena != NULL &&
209 (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
210 (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
211 {
212 void* p = mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
213 mi_assert_internal((uintptr_t)p % alignment == 0);
214 if (p != NULL) return p;
215 }
216 }
217 else {
218 // try numa affine allocation
219 for (size_t i = 0; i < max_arena; i++) {
220 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
221 if (arena == NULL) break; // end reached
222 if ((arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
223 (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
224 {
225 void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
226 mi_assert_internal((uintptr_t)p % alignment == 0);
227 if (p != NULL) return p;
228 }
229 }
230
231 // try from another numa node instead..
232 for (size_t i = 0; i < max_arena; i++) {
233 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
234 if (arena == NULL) break; // end reached
235 if ((arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
236 (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
237 {
238 void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
239 mi_assert_internal((uintptr_t)p % alignment == 0);
240 if (p != NULL) return p;
241 }
242 }
243 }
244 return NULL;
245}
246
247
248void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
249 mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
250{
251 mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
252 mi_assert_internal(size > 0);
253 *memid = MI_MEMID_OS;
254 *is_zero = false;
255 *is_pinned = false;
256
257 bool default_large = false;
258 if (large==NULL) large = &default_large; // ensure `large != NULL`
259 const int numa_node = _mi_os_numa_node(tld); // current numa node
260
261 // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
262 if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) {
263 void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
264 if (p != NULL) return p;
265 }
266
267 // finally, fall back to the OS
268 if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
269 errno = ENOMEM;
270 return NULL;
271 }
272 *is_zero = true;
273 *memid = MI_MEMID_OS;
274 void* p = _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
275 if (p != NULL) *is_pinned = *large;
276 return p;
277}
278
279void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
280{
281 return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
282}
283
284void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
285 if (size != NULL) *size = 0;
286 size_t arena_index = mi_arena_id_index(arena_id);
287 if (arena_index >= MI_MAX_ARENAS) return NULL;
288 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
289 if (arena == NULL) return NULL;
290 if (size != NULL) *size = arena->block_count * MI_ARENA_BLOCK_SIZE;
291 return arena->start;
292}
293
294/* -----------------------------------------------------------
295 Arena free
296----------------------------------------------------------- */
297
298void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_os_tld_t* tld) {
299 mi_assert_internal(size > 0 && tld->stats != NULL);
300 if (p==NULL) return;
301 if (size==0) return;
302
303 if (memid == MI_MEMID_OS) {
304 // was a direct OS allocation, pass through
305 _mi_os_free_ex(p, size, all_committed, tld->stats);
306 }
307 else {
308 // allocated in an arena
309 size_t arena_idx;
310 size_t bitmap_idx;
311 mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
312 mi_assert_internal(arena_idx < MI_MAX_ARENAS);
313 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
314 mi_assert_internal(arena != NULL);
315 const size_t blocks = mi_block_count_of_size(size);
316 // checks
317 if (arena == NULL) {
318 _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
319 return;
320 }
321 mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
322 if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
323 _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
324 return;
325 }
326 // potentially decommit
327 if (!arena->allow_decommit || arena->blocks_committed == NULL) {
328 mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
329 }
330 else {
331 mi_assert_internal(arena->blocks_committed != NULL);
332 _mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, tld->stats); // ok if this fails
333 _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
334 }
335 // and make it available to others again
336 bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
337 if (!all_inuse) {
338 _mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
339 return;
340 };
341 }
342}
343
344/* -----------------------------------------------------------
345 Add an arena.
346----------------------------------------------------------- */
347
348static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
349 mi_assert_internal(arena != NULL);
350 mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
351 mi_assert_internal(arena->block_count > 0);
352 if (arena_id != NULL) *arena_id = -1;
353
354 size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
355 if (i >= MI_MAX_ARENAS) {
356 mi_atomic_decrement_acq_rel(&mi_arena_count);
357 return false;
358 }
359 mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
360 arena->id = mi_arena_id_create(i);
361 if (arena_id != NULL) *arena_id = arena->id;
362 return true;
363}
364
365bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
366{
367 if (arena_id != NULL) *arena_id = _mi_arena_id_none();
368 if (size < MI_ARENA_BLOCK_SIZE) return false;
369
370 if (is_large) {
371 mi_assert_internal(is_committed);
372 is_committed = true;
373 }
374
375 const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
376 const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
377 const size_t bitmaps = (is_committed ? 2 : 3);
378 const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
379 mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
380 if (arena == NULL) return false;
381
382 arena->id = _mi_arena_id_none();
383 arena->exclusive = exclusive;
384 arena->block_count = bcount;
385 arena->field_count = fields;
386 arena->start = (uint8_t*)start;
387 arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
388 arena->is_large = is_large;
389 arena->is_zero_init = is_zero;
390 arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory
391 arena->search_idx = 0;
392 arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
393 arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
394 // the bitmaps are already zero initialized due to os_alloc
395 // initialize committed bitmap?
396 if (arena->blocks_committed != NULL && is_committed) {
397 memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
398 }
399 // and claim leftover blocks if needed (so we never allocate there)
400 ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
401 mi_assert_internal(post >= 0);
402 if (post > 0) {
403 // don't use leftover bits at the end
404 mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
405 _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
406 }
407
408 return mi_arena_add(arena, arena_id);
409
410}
411
412// Reserve a range of regular OS memory
413int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
414{
415 if (arena_id != NULL) *arena_id = _mi_arena_id_none();
416 size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
417 bool large = allow_large;
418 void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
419 if (start==NULL) return ENOMEM;
420 if (!mi_manage_os_memory_ex(start, size, (large || commit), large, true, -1, exclusive, arena_id)) {
421 _mi_os_free_ex(start, size, commit, &_mi_stats_main);
422 _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
423 return ENOMEM;
424 }
425 _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : "");
426 return 0;
427}
428
429bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
430 return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false, NULL);
431}
432
433int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
434 return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
435}
436
437
438/* -----------------------------------------------------------
439 Debugging
440----------------------------------------------------------- */
441
442static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
443 size_t inuse_count = 0;
444 for (size_t i = 0; i < field_count; i++) {
445 char buf[MI_BITMAP_FIELD_BITS + 1];
446 uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
447 for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
448 bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
449 if (inuse) inuse_count++;
450 buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
451 }
452 buf[MI_BITMAP_FIELD_BITS] = 0;
453 _mi_verbose_message("%s%s\n", prefix, buf);
454 }
455 return inuse_count;
456}
457
458void mi_debug_show_arenas(void) mi_attr_noexcept {
459 size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
460 for (size_t i = 0; i < max_arenas; i++) {
461 mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
462 if (arena == NULL) break;
463 size_t inuse_count = 0;
464 _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
465 inuse_count += mi_debug_show_bitmap(" ", arena->blocks_inuse, arena->field_count);
466 _mi_verbose_message(" blocks in use ('x'): %zu\n", inuse_count);
467 }
468}
469
470
471/* -----------------------------------------------------------
472 Reserve a huge page arena.
473----------------------------------------------------------- */
474// reserve at a specific numa node
475int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
476 if (arena_id != NULL) *arena_id = -1;
477 if (pages==0) return 0;
478 if (numa_node < -1) numa_node = -1;
479 if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
480 size_t hsize = 0;
481 size_t pages_reserved = 0;
482 void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize);
483 if (p==NULL || pages_reserved==0) {
484 _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
485 return ENOMEM;
486 }
487 _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
488
489 if (!mi_manage_os_memory_ex(p, hsize, true, true, true, numa_node, exclusive, arena_id)) {
490 _mi_os_free_huge_pages(p, hsize, &_mi_stats_main);
491 return ENOMEM;
492 }
493 return 0;
494}
495
496int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
497 return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
498}
499
500// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
501int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
502 if (pages == 0) return 0;
503
504 // pages per numa node
505 size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count());
506 if (numa_count <= 0) numa_count = 1;
507 const size_t pages_per = pages / numa_count;
508 const size_t pages_mod = pages % numa_count;
509 const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
510
511 // reserve evenly among numa nodes
512 for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
513 size_t node_pages = pages_per; // can be 0
514 if (numa_node < pages_mod) node_pages++;
515 int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per);
516 if (err) return err;
517 if (pages < node_pages) {
518 pages = 0;
519 }
520 else {
521 pages -= node_pages;
522 }
523 }
524
525 return 0;
526}
527
528int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
529 MI_UNUSED(max_secs);
530 _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
531 if (pages_reserved != NULL) *pages_reserved = 0;
532 int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
533 if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
534 return err;
535}
536