1 | /*---------------------------------------------------------------------------- |
2 | Copyright (c) 2018-2021, Microsoft Research, Daan Leijen |
3 | This is free software; you can redistribute it and/or modify it under the |
4 | terms of the MIT license. A copy of the license can be found in the file |
5 | "LICENSE" at the root of this distribution. |
6 | -----------------------------------------------------------------------------*/ |
7 | |
8 | #include "mimalloc.h" |
9 | #include "mimalloc-internal.h" |
10 | #include "mimalloc-atomic.h" |
11 | |
12 | #include <string.h> // memset, memcpy |
13 | |
14 | #if defined(_MSC_VER) && (_MSC_VER < 1920) |
15 | #pragma warning(disable:4204) // non-constant aggregate initializer |
16 | #endif |
17 | |
18 | /* ----------------------------------------------------------- |
19 | Helpers |
20 | ----------------------------------------------------------- */ |
21 | |
22 | // return `true` if ok, `false` to break |
23 | typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); |
24 | |
25 | // Visit all pages in a heap; returns `false` if break was called. |
26 | static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) |
27 | { |
28 | if (heap==NULL || heap->page_count==0) return 0; |
29 | |
30 | // visit all pages |
31 | #if MI_DEBUG>1 |
32 | size_t total = heap->page_count; |
33 | #endif |
34 | size_t count = 0; |
35 | for (size_t i = 0; i <= MI_BIN_FULL; i++) { |
36 | mi_page_queue_t* pq = &heap->pages[i]; |
37 | mi_page_t* page = pq->first; |
38 | while(page != NULL) { |
39 | mi_page_t* next = page->next; // save next in case the page gets removed from the queue |
40 | mi_assert_internal(mi_page_heap(page) == heap); |
41 | count++; |
42 | if (!fn(heap, pq, page, arg1, arg2)) return false; |
43 | page = next; // and continue |
44 | } |
45 | } |
46 | mi_assert_internal(count == total); |
47 | return true; |
48 | } |
49 | |
50 | |
51 | #if MI_DEBUG>=2 |
52 | static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
53 | MI_UNUSED(arg1); |
54 | MI_UNUSED(arg2); |
55 | MI_UNUSED(pq); |
56 | mi_assert_internal(mi_page_heap(page) == heap); |
57 | mi_segment_t* segment = _mi_page_segment(page); |
58 | mi_assert_internal(segment->thread_id == heap->thread_id); |
59 | mi_assert_expensive(_mi_page_is_valid(page)); |
60 | return true; |
61 | } |
62 | #endif |
63 | #if MI_DEBUG>=3 |
64 | static bool mi_heap_is_valid(mi_heap_t* heap) { |
65 | mi_assert_internal(heap!=NULL); |
66 | mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); |
67 | return true; |
68 | } |
69 | #endif |
70 | |
71 | |
72 | |
73 | |
74 | /* ----------------------------------------------------------- |
75 | "Collect" pages by migrating `local_free` and `thread_free` |
76 | lists and freeing empty pages. This is done when a thread |
77 | stops (and in that case abandons pages if there are still |
78 | blocks alive) |
79 | ----------------------------------------------------------- */ |
80 | |
81 | typedef enum mi_collect_e { |
82 | MI_NORMAL, |
83 | MI_FORCE, |
84 | MI_ABANDON |
85 | } mi_collect_t; |
86 | |
87 | |
88 | static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { |
89 | MI_UNUSED(arg2); |
90 | MI_UNUSED(heap); |
91 | mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL)); |
92 | mi_collect_t collect = *((mi_collect_t*)arg_collect); |
93 | _mi_page_free_collect(page, collect >= MI_FORCE); |
94 | if (mi_page_all_free(page)) { |
95 | // no more used blocks, free the page. |
96 | // note: this will free retired pages as well. |
97 | _mi_page_free(page, pq, collect >= MI_FORCE); |
98 | } |
99 | else if (collect == MI_ABANDON) { |
100 | // still used blocks but the thread is done; abandon the page |
101 | _mi_page_abandon(page, pq); |
102 | } |
103 | return true; // don't break |
104 | } |
105 | |
106 | static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
107 | MI_UNUSED(arg1); |
108 | MI_UNUSED(arg2); |
109 | MI_UNUSED(heap); |
110 | MI_UNUSED(pq); |
111 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); |
112 | return true; // don't break |
113 | } |
114 | |
115 | static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) |
116 | { |
117 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
118 | |
119 | const bool force = collect >= MI_FORCE; |
120 | _mi_deferred_free(heap, force); |
121 | |
122 | // note: never reclaim on collect but leave it to threads that need storage to reclaim |
123 | const bool force_main = |
124 | #ifdef NDEBUG |
125 | collect == MI_FORCE |
126 | #else |
127 | collect >= MI_FORCE |
128 | #endif |
129 | && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim; |
130 | |
131 | if (force_main) { |
132 | // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. |
133 | // if all memory is freed by now, all segments should be freed. |
134 | _mi_abandoned_reclaim_all(heap, &heap->tld->segments); |
135 | } |
136 | |
137 | // if abandoning, mark all pages to no longer add to delayed_free |
138 | if (collect == MI_ABANDON) { |
139 | mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); |
140 | } |
141 | |
142 | // free all current thread delayed blocks. |
143 | // (if abandoning, after this there are no more thread-delayed references into the pages.) |
144 | _mi_heap_delayed_free_all(heap); |
145 | |
146 | // collect retired pages |
147 | _mi_heap_collect_retired(heap, force); |
148 | |
149 | // collect all pages owned by this thread |
150 | mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); |
151 | mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); |
152 | |
153 | // collect abandoned segments (in particular, decommit expired parts of segments in the abandoned segment list) |
154 | // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment |
155 | _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); |
156 | |
157 | // collect segment local caches |
158 | if (force) { |
159 | _mi_segment_thread_collect(&heap->tld->segments); |
160 | } |
161 | |
162 | // decommit in global segment caches |
163 | // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment |
164 | _mi_segment_cache_collect( collect == MI_FORCE, &heap->tld->os); |
165 | |
166 | // collect regions on program-exit (or shared library unload) |
167 | if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) { |
168 | //_mi_mem_collect(&heap->tld->os); |
169 | } |
170 | } |
171 | |
172 | void _mi_heap_collect_abandon(mi_heap_t* heap) { |
173 | mi_heap_collect_ex(heap, MI_ABANDON); |
174 | } |
175 | |
176 | void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { |
177 | mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL)); |
178 | } |
179 | |
180 | void mi_collect(bool force) mi_attr_noexcept { |
181 | mi_heap_collect(mi_get_default_heap(), force); |
182 | } |
183 | |
184 | |
185 | /* ----------------------------------------------------------- |
186 | Heap new |
187 | ----------------------------------------------------------- */ |
188 | |
189 | mi_heap_t* mi_heap_get_default(void) { |
190 | mi_thread_init(); |
191 | return mi_get_default_heap(); |
192 | } |
193 | |
194 | mi_heap_t* mi_heap_get_backing(void) { |
195 | mi_heap_t* heap = mi_heap_get_default(); |
196 | mi_assert_internal(heap!=NULL); |
197 | mi_heap_t* bheap = heap->tld->heap_backing; |
198 | mi_assert_internal(bheap!=NULL); |
199 | mi_assert_internal(bheap->thread_id == _mi_thread_id()); |
200 | return bheap; |
201 | } |
202 | |
203 | mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena( mi_arena_id_t arena_id ) { |
204 | mi_heap_t* bheap = mi_heap_get_backing(); |
205 | mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? |
206 | if (heap==NULL) return NULL; |
207 | _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); |
208 | heap->tld = bheap->tld; |
209 | heap->thread_id = _mi_thread_id(); |
210 | heap->arena_id = arena_id; |
211 | _mi_random_split(&bheap->random, &heap->random); |
212 | heap->cookie = _mi_heap_random_next(heap) | 1; |
213 | heap->keys[0] = _mi_heap_random_next(heap); |
214 | heap->keys[1] = _mi_heap_random_next(heap); |
215 | heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe |
216 | // push on the thread local heaps list |
217 | heap->next = heap->tld->heaps; |
218 | heap->tld->heaps = heap; |
219 | return heap; |
220 | } |
221 | |
222 | mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { |
223 | return mi_heap_new_in_arena(_mi_arena_id_none()); |
224 | } |
225 | |
226 | bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) { |
227 | return _mi_arena_memid_is_suitable(memid, heap->arena_id); |
228 | } |
229 | |
230 | uintptr_t _mi_heap_random_next(mi_heap_t* heap) { |
231 | return _mi_random_next(&heap->random); |
232 | } |
233 | |
234 | // zero out the page queues |
235 | static void mi_heap_reset_pages(mi_heap_t* heap) { |
236 | mi_assert_internal(heap != NULL); |
237 | mi_assert_internal(mi_heap_is_initialized(heap)); |
238 | // TODO: copy full empty heap instead? |
239 | memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); |
240 | #ifdef MI_MEDIUM_DIRECT |
241 | memset(&heap->pages_free_medium, 0, sizeof(heap->pages_free_medium)); |
242 | #endif |
243 | _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); |
244 | heap->thread_delayed_free = NULL; |
245 | heap->page_count = 0; |
246 | } |
247 | |
248 | // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. |
249 | static void mi_heap_free(mi_heap_t* heap) { |
250 | mi_assert(heap != NULL); |
251 | mi_assert_internal(mi_heap_is_initialized(heap)); |
252 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
253 | if (mi_heap_is_backing(heap)) return; // dont free the backing heap |
254 | |
255 | // reset default |
256 | if (mi_heap_is_default(heap)) { |
257 | _mi_heap_set_default_direct(heap->tld->heap_backing); |
258 | } |
259 | |
260 | // remove ourselves from the thread local heaps list |
261 | // linear search but we expect the number of heaps to be relatively small |
262 | mi_heap_t* prev = NULL; |
263 | mi_heap_t* curr = heap->tld->heaps; |
264 | while (curr != heap && curr != NULL) { |
265 | prev = curr; |
266 | curr = curr->next; |
267 | } |
268 | mi_assert_internal(curr == heap); |
269 | if (curr == heap) { |
270 | if (prev != NULL) { prev->next = heap->next; } |
271 | else { heap->tld->heaps = heap->next; } |
272 | } |
273 | mi_assert_internal(heap->tld->heaps != NULL); |
274 | |
275 | // and free the used memory |
276 | mi_free(heap); |
277 | } |
278 | |
279 | |
280 | /* ----------------------------------------------------------- |
281 | Heap destroy |
282 | ----------------------------------------------------------- */ |
283 | |
284 | static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
285 | MI_UNUSED(arg1); |
286 | MI_UNUSED(arg2); |
287 | MI_UNUSED(heap); |
288 | MI_UNUSED(pq); |
289 | |
290 | // ensure no more thread_delayed_free will be added |
291 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); |
292 | |
293 | // stats |
294 | const size_t bsize = mi_page_block_size(page); |
295 | if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { |
296 | if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
297 | mi_heap_stat_decrease(heap, large, bsize); |
298 | } |
299 | else { |
300 | mi_heap_stat_decrease(heap, huge, bsize); |
301 | } |
302 | } |
303 | #if (MI_STAT) |
304 | _mi_page_free_collect(page, false); // update used count |
305 | const size_t inuse = page->used; |
306 | if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { |
307 | mi_heap_stat_decrease(heap, normal, bsize * inuse); |
308 | #if (MI_STAT>1) |
309 | mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); |
310 | #endif |
311 | } |
312 | mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... |
313 | #endif |
314 | |
315 | /// pretend it is all free now |
316 | mi_assert_internal(mi_page_thread_free(page) == NULL); |
317 | page->used = 0; |
318 | |
319 | // and free the page |
320 | // mi_page_free(page,false); |
321 | page->next = NULL; |
322 | page->prev = NULL; |
323 | _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); |
324 | |
325 | return true; // keep going |
326 | } |
327 | |
328 | void _mi_heap_destroy_pages(mi_heap_t* heap) { |
329 | mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); |
330 | mi_heap_reset_pages(heap); |
331 | } |
332 | |
333 | void mi_heap_destroy(mi_heap_t* heap) { |
334 | mi_assert(heap != NULL); |
335 | mi_assert(mi_heap_is_initialized(heap)); |
336 | mi_assert(heap->no_reclaim); |
337 | mi_assert_expensive(mi_heap_is_valid(heap)); |
338 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
339 | if (!heap->no_reclaim) { |
340 | // don't free in case it may contain reclaimed pages |
341 | mi_heap_delete(heap); |
342 | } |
343 | else { |
344 | // free all pages |
345 | _mi_heap_destroy_pages(heap); |
346 | mi_heap_free(heap); |
347 | } |
348 | } |
349 | |
350 | |
351 | |
352 | /* ----------------------------------------------------------- |
353 | Safe Heap delete |
354 | ----------------------------------------------------------- */ |
355 | |
356 | // Transfer the pages from one heap to the other |
357 | static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { |
358 | mi_assert_internal(heap!=NULL); |
359 | if (from==NULL || from->page_count == 0) return; |
360 | |
361 | // reduce the size of the delayed frees |
362 | _mi_heap_delayed_free_partial(from); |
363 | |
364 | // transfer all pages by appending the queues; this will set a new heap field |
365 | // so threads may do delayed frees in either heap for a while. |
366 | // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state |
367 | // so after this only the new heap will get delayed frees |
368 | for (size_t i = 0; i <= MI_BIN_FULL; i++) { |
369 | mi_page_queue_t* pq = &heap->pages[i]; |
370 | mi_page_queue_t* append = &from->pages[i]; |
371 | size_t pcount = _mi_page_queue_append(heap, pq, append); |
372 | heap->page_count += pcount; |
373 | from->page_count -= pcount; |
374 | } |
375 | mi_assert_internal(from->page_count == 0); |
376 | |
377 | // and do outstanding delayed frees in the `from` heap |
378 | // note: be careful here as the `heap` field in all those pages no longer point to `from`, |
379 | // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a |
380 | // the regular `_mi_free_delayed_block` which is safe. |
381 | _mi_heap_delayed_free_all(from); |
382 | #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353 |
383 | mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); |
384 | #endif |
385 | |
386 | // and reset the `from` heap |
387 | mi_heap_reset_pages(from); |
388 | } |
389 | |
390 | // Safe delete a heap without freeing any still allocated blocks in that heap. |
391 | void mi_heap_delete(mi_heap_t* heap) |
392 | { |
393 | mi_assert(heap != NULL); |
394 | mi_assert(mi_heap_is_initialized(heap)); |
395 | mi_assert_expensive(mi_heap_is_valid(heap)); |
396 | if (heap==NULL || !mi_heap_is_initialized(heap)) return; |
397 | |
398 | if (!mi_heap_is_backing(heap)) { |
399 | // tranfer still used pages to the backing heap |
400 | mi_heap_absorb(heap->tld->heap_backing, heap); |
401 | } |
402 | else { |
403 | // the backing heap abandons its pages |
404 | _mi_heap_collect_abandon(heap); |
405 | } |
406 | mi_assert_internal(heap->page_count==0); |
407 | mi_heap_free(heap); |
408 | } |
409 | |
410 | mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { |
411 | mi_assert(heap != NULL); |
412 | mi_assert(mi_heap_is_initialized(heap)); |
413 | if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; |
414 | mi_assert_expensive(mi_heap_is_valid(heap)); |
415 | mi_heap_t* old = mi_get_default_heap(); |
416 | _mi_heap_set_default_direct(heap); |
417 | return old; |
418 | } |
419 | |
420 | |
421 | |
422 | |
423 | /* ----------------------------------------------------------- |
424 | Analysis |
425 | ----------------------------------------------------------- */ |
426 | |
427 | // static since it is not thread safe to access heaps from other threads. |
428 | static mi_heap_t* mi_heap_of_block(const void* p) { |
429 | if (p == NULL) return NULL; |
430 | mi_segment_t* segment = _mi_ptr_segment(p); |
431 | bool valid = (_mi_ptr_cookie(segment) == segment->cookie); |
432 | mi_assert_internal(valid); |
433 | if mi_unlikely(!valid) return NULL; |
434 | return mi_page_heap(_mi_segment_page_of(segment,p)); |
435 | } |
436 | |
437 | bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { |
438 | mi_assert(heap != NULL); |
439 | if (heap==NULL || !mi_heap_is_initialized(heap)) return false; |
440 | return (heap == mi_heap_of_block(p)); |
441 | } |
442 | |
443 | |
444 | static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { |
445 | MI_UNUSED(heap); |
446 | MI_UNUSED(pq); |
447 | bool* found = (bool*)vfound; |
448 | mi_segment_t* segment = _mi_page_segment(page); |
449 | void* start = _mi_page_start(segment, page, NULL); |
450 | void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); |
451 | *found = (p >= start && p < end); |
452 | return (!*found); // continue if not found |
453 | } |
454 | |
455 | bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { |
456 | mi_assert(heap != NULL); |
457 | if (heap==NULL || !mi_heap_is_initialized(heap)) return false; |
458 | if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers |
459 | bool found = false; |
460 | mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); |
461 | return found; |
462 | } |
463 | |
464 | bool mi_check_owned(const void* p) { |
465 | return mi_heap_check_owned(mi_get_default_heap(), p); |
466 | } |
467 | |
468 | /* ----------------------------------------------------------- |
469 | Visit all heap blocks and areas |
470 | Todo: enable visiting abandoned pages, and |
471 | enable visiting all blocks of all heaps across threads |
472 | ----------------------------------------------------------- */ |
473 | |
474 | // Separate struct to keep `mi_page_t` out of the public interface |
475 | typedef struct mi_heap_area_ex_s { |
476 | mi_heap_area_t area; |
477 | mi_page_t* page; |
478 | } mi_heap_area_ex_t; |
479 | |
480 | static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { |
481 | mi_assert(xarea != NULL); |
482 | if (xarea==NULL) return true; |
483 | const mi_heap_area_t* area = &xarea->area; |
484 | mi_page_t* page = xarea->page; |
485 | mi_assert(page != NULL); |
486 | if (page == NULL) return true; |
487 | |
488 | _mi_page_free_collect(page,true); |
489 | mi_assert_internal(page->local_free == NULL); |
490 | if (page->used == 0) return true; |
491 | |
492 | const size_t bsize = mi_page_block_size(page); |
493 | const size_t ubsize = mi_page_usable_block_size(page); // without padding |
494 | size_t psize; |
495 | uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); |
496 | |
497 | if (page->capacity == 1) { |
498 | // optimize page with one block |
499 | mi_assert_internal(page->used == 1 && page->free == NULL); |
500 | return visitor(mi_page_heap(page), area, pstart, ubsize, arg); |
501 | } |
502 | |
503 | // create a bitmap of free blocks. |
504 | #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) |
505 | uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; |
506 | memset(free_map, 0, sizeof(free_map)); |
507 | |
508 | size_t free_count = 0; |
509 | for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { |
510 | free_count++; |
511 | mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); |
512 | size_t offset = (uint8_t*)block - pstart; |
513 | mi_assert_internal(offset % bsize == 0); |
514 | size_t blockidx = offset / bsize; // Todo: avoid division? |
515 | mi_assert_internal( blockidx < MI_MAX_BLOCKS); |
516 | size_t bitidx = (blockidx / sizeof(uintptr_t)); |
517 | size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); |
518 | free_map[bitidx] |= ((uintptr_t)1 << bit); |
519 | } |
520 | mi_assert_internal(page->capacity == (free_count + page->used)); |
521 | |
522 | // walk through all blocks skipping the free ones |
523 | size_t used_count = 0; |
524 | for (size_t i = 0; i < page->capacity; i++) { |
525 | size_t bitidx = (i / sizeof(uintptr_t)); |
526 | size_t bit = i - (bitidx * sizeof(uintptr_t)); |
527 | uintptr_t m = free_map[bitidx]; |
528 | if (bit == 0 && m == UINTPTR_MAX) { |
529 | i += (sizeof(uintptr_t) - 1); // skip a run of free blocks |
530 | } |
531 | else if ((m & ((uintptr_t)1 << bit)) == 0) { |
532 | used_count++; |
533 | uint8_t* block = pstart + (i * bsize); |
534 | if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false; |
535 | } |
536 | } |
537 | mi_assert_internal(page->used == used_count); |
538 | return true; |
539 | } |
540 | |
541 | typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); |
542 | |
543 | |
544 | static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { |
545 | MI_UNUSED(heap); |
546 | MI_UNUSED(pq); |
547 | mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; |
548 | mi_heap_area_ex_t xarea; |
549 | const size_t bsize = mi_page_block_size(page); |
550 | const size_t ubsize = mi_page_usable_block_size(page); |
551 | xarea.page = page; |
552 | xarea.area.reserved = page->reserved * bsize; |
553 | xarea.area.committed = page->capacity * bsize; |
554 | xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL); |
555 | xarea.area.used = page->used; // number of blocks in use (#553) |
556 | xarea.area.block_size = ubsize; |
557 | xarea.area.full_block_size = bsize; |
558 | return fun(heap, &xarea, arg); |
559 | } |
560 | |
561 | // Visit all heap pages as areas |
562 | static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { |
563 | if (visitor == NULL) return false; |
564 | return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ |
565 | } |
566 | |
567 | // Just to pass arguments |
568 | typedef struct mi_visit_blocks_args_s { |
569 | bool visit_blocks; |
570 | mi_block_visit_fun* visitor; |
571 | void* arg; |
572 | } mi_visit_blocks_args_t; |
573 | |
574 | static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { |
575 | mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; |
576 | if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; |
577 | if (args->visit_blocks) { |
578 | return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); |
579 | } |
580 | else { |
581 | return true; |
582 | } |
583 | } |
584 | |
585 | // Visit all blocks in a heap |
586 | bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { |
587 | mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; |
588 | return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); |
589 | } |
590 | |