1/*----------------------------------------------------------------------------
2Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
3This is free software; you can redistribute it and/or modify it under the
4terms of the MIT license. A copy of the license can be found in the file
5"LICENSE" at the root of this distribution.
6-----------------------------------------------------------------------------*/
7
8/* -----------------------------------------------------------
9 Definition of page queues for each block size
10----------------------------------------------------------- */
11
12#ifndef MI_IN_PAGE_C
13#error "this file should be included from 'page.c'"
14#endif
15
16/* -----------------------------------------------------------
17 Minimal alignment in machine words (i.e. `sizeof(void*)`)
18----------------------------------------------------------- */
19
20#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE)
21 #error "define alignment for more than 4x word size for this platform"
22#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE)
23 #define MI_ALIGN4W // 4 machine words minimal alignment
24#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE)
25 #define MI_ALIGN2W // 2 machine words minimal alignment
26#else
27 // ok, default alignment is 1 word
28#endif
29
30
31/* -----------------------------------------------------------
32 Queue query
33----------------------------------------------------------- */
34
35
36static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
37 return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
38}
39
40static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
41 return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
42}
43
44static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
45 return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
46}
47
48/* -----------------------------------------------------------
49 Bins
50----------------------------------------------------------- */
51
52// Return the bin for a given field size.
53// Returns MI_BIN_HUGE if the size is too large.
54// We use `wsize` for the size in "machine word sizes",
55// i.e. byte size == `wsize*sizeof(void*)`.
56static inline uint8_t mi_bin(size_t size) {
57 size_t wsize = _mi_wsize_from_size(size);
58 uint8_t bin;
59 if (wsize <= 1) {
60 bin = 1;
61 }
62 #if defined(MI_ALIGN4W)
63 else if (wsize <= 4) {
64 bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
65 }
66 #elif defined(MI_ALIGN2W)
67 else if (wsize <= 8) {
68 bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
69 }
70 #else
71 else if (wsize <= 8) {
72 bin = (uint8_t)wsize;
73 }
74 #endif
75 else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
76 bin = MI_BIN_HUGE;
77 }
78 else {
79 #if defined(MI_ALIGN4W)
80 if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
81 #endif
82 wsize--;
83 // find the highest bit
84 uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0
85 // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
86 // - adjust with 3 because we use do not round the first 8 sizes
87 // which each get an exact bin
88 bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
89 mi_assert_internal(bin < MI_BIN_HUGE);
90 }
91 mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE);
92 return bin;
93}
94
95
96
97/* -----------------------------------------------------------
98 Queue of pages with free blocks
99----------------------------------------------------------- */
100
101uint8_t _mi_bin(size_t size) {
102 return mi_bin(size);
103}
104
105size_t _mi_bin_size(uint8_t bin) {
106 return _mi_heap_empty.pages[bin].block_size;
107}
108
109// Good size for allocation
110size_t mi_good_size(size_t size) mi_attr_noexcept {
111 if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
112 return _mi_bin_size(mi_bin(size));
113 }
114 else {
115 return _mi_align_up(size,_mi_os_page_size());
116 }
117}
118
119#if (MI_DEBUG>1)
120static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) {
121 mi_assert_internal(page != NULL);
122 mi_page_t* list = queue->first;
123 while (list != NULL) {
124 mi_assert_internal(list->next == NULL || list->next->prev == list);
125 mi_assert_internal(list->prev == NULL || list->prev->next == list);
126 if (list == page) break;
127 list = list->next;
128 }
129 return (list == page);
130}
131
132#endif
133
134#if (MI_DEBUG>1)
135static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) {
136 return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]);
137}
138#endif
139
140static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
141 uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
142 mi_heap_t* heap = mi_page_heap(page);
143 mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
144 mi_page_queue_t* pq = &heap->pages[bin];
145 mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
146 mi_assert_expensive(mi_page_queue_contains(pq, page));
147 return pq;
148}
149
150static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
151 uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
152 mi_assert_internal(bin <= MI_BIN_FULL);
153 mi_page_queue_t* pq = &heap->pages[bin];
154 mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
155 return pq;
156}
157
158// The current small page array is for efficiency and for each
159// small size (up to 256) it points directly to the page for that
160// size without having to compute the bin. This means when the
161// current free page queue is updated for a small bin, we need to update a
162// range of entries in `_mi_page_small_free`.
163static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) {
164 mi_assert_internal(mi_heap_contains_queue(heap,pq));
165 size_t size = pq->block_size;
166 if (size > MI_SMALL_SIZE_MAX) return;
167
168 mi_page_t* page = pq->first;
169 if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty;
170
171 // find index in the right direct page array
172 size_t start;
173 size_t idx = _mi_wsize_from_size(size);
174 mi_page_t** pages_free = heap->pages_free_direct;
175
176 if (pages_free[idx] == page) return; // already set
177
178 // find start slot
179 if (idx<=1) {
180 start = 0;
181 }
182 else {
183 // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
184 uint8_t bin = mi_bin(size);
185 const mi_page_queue_t* prev = pq - 1;
186 while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
187 prev--;
188 }
189 start = 1 + _mi_wsize_from_size(prev->block_size);
190 if (start > idx) start = idx;
191 }
192
193 // set size range to the right page
194 mi_assert(start <= idx);
195 for (size_t sz = start; sz <= idx; sz++) {
196 pages_free[sz] = page;
197 }
198}
199
200/*
201static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
202 return (queue->first == NULL);
203}
204*/
205
206static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
207 mi_assert_internal(page != NULL);
208 mi_assert_expensive(mi_page_queue_contains(queue, page));
209 mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
210 mi_heap_t* heap = mi_page_heap(page);
211
212 if (page->prev != NULL) page->prev->next = page->next;
213 if (page->next != NULL) page->next->prev = page->prev;
214 if (page == queue->last) queue->last = page->prev;
215 if (page == queue->first) {
216 queue->first = page->next;
217 // update first
218 mi_assert_internal(mi_heap_contains_queue(heap, queue));
219 mi_heap_queue_first_update(heap,queue);
220 }
221 heap->page_count--;
222 page->next = NULL;
223 page->prev = NULL;
224 // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL);
225 mi_page_set_in_full(page,false);
226}
227
228
229static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
230 mi_assert_internal(mi_page_heap(page) == heap);
231 mi_assert_internal(!mi_page_queue_contains(queue, page));
232
233 mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
234 mi_assert_internal(page->xblock_size == queue->block_size ||
235 (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
236 (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
237
238 mi_page_set_in_full(page, mi_page_queue_is_full(queue));
239 // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap);
240 page->next = queue->first;
241 page->prev = NULL;
242 if (queue->first != NULL) {
243 mi_assert_internal(queue->first->prev == NULL);
244 queue->first->prev = page;
245 queue->first = page;
246 }
247 else {
248 queue->first = queue->last = page;
249 }
250
251 // update direct
252 mi_heap_queue_first_update(heap, queue);
253 heap->page_count++;
254}
255
256
257static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
258 mi_assert_internal(page != NULL);
259 mi_assert_expensive(mi_page_queue_contains(from, page));
260 mi_assert_expensive(!mi_page_queue_contains(to, page));
261
262 mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
263 (page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
264 (page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
265 (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
266 (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
267
268 mi_heap_t* heap = mi_page_heap(page);
269 if (page->prev != NULL) page->prev->next = page->next;
270 if (page->next != NULL) page->next->prev = page->prev;
271 if (page == from->last) from->last = page->prev;
272 if (page == from->first) {
273 from->first = page->next;
274 // update first
275 mi_assert_internal(mi_heap_contains_queue(heap, from));
276 mi_heap_queue_first_update(heap, from);
277 }
278
279 page->prev = to->last;
280 page->next = NULL;
281 if (to->last != NULL) {
282 mi_assert_internal(heap == mi_page_heap(to->last));
283 to->last->next = page;
284 to->last = page;
285 }
286 else {
287 to->first = page;
288 to->last = page;
289 mi_heap_queue_first_update(heap, to);
290 }
291
292 mi_page_set_in_full(page, mi_page_queue_is_full(to));
293}
294
295// Only called from `mi_heap_absorb`.
296size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
297 mi_assert_internal(mi_heap_contains_queue(heap,pq));
298 mi_assert_internal(pq->block_size == append->block_size);
299
300 if (append->first==NULL) return 0;
301
302 // set append pages to new heap and count
303 size_t count = 0;
304 for (mi_page_t* page = append->first; page != NULL; page = page->next) {
305 // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
306 // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
307 mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
308 // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
309 // side effect that it spins until any DELAYED_FREEING is finished. This ensures
310 // that after appending only the new heap will be used for delayed free operations.
311 _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
312 count++;
313 }
314
315 if (pq->last==NULL) {
316 // take over afresh
317 mi_assert_internal(pq->first==NULL);
318 pq->first = append->first;
319 pq->last = append->last;
320 mi_heap_queue_first_update(heap, pq);
321 }
322 else {
323 // append to end
324 mi_assert_internal(pq->last!=NULL);
325 mi_assert_internal(append->first!=NULL);
326 pq->last->next = append->first;
327 append->first->prev = pq->last;
328 pq->last = append->last;
329 }
330 return count;
331}
332