1/* ----------------------------------------------------------------------------
2Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3This is free software; you can redistribute it and/or modify it under the
4terms of the MIT license. A copy of the license can be found in the file
5"LICENSE" at the root of this distribution.
6-----------------------------------------------------------------------------*/
7
8#include "mimalloc.h"
9#include "mimalloc-internal.h"
10
11#include <string.h> // memset
12
13// ------------------------------------------------------
14// Aligned Allocation
15// ------------------------------------------------------
16
17// Fallback primitive aligned allocation -- split out for better codegen
18static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
19{
20 mi_assert_internal(size <= PTRDIFF_MAX);
21 mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX);
22
23 const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
24 const size_t padsize = size + MI_PADDING_SIZE;
25
26 // use regular allocation if it is guaranteed to fit the alignment constraints
27 if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
28 void* p = _mi_heap_malloc_zero(heap, size, zero);
29 mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
30 return p;
31 }
32
33 // otherwise over-allocate
34 const size_t oversize = size + alignment - 1;
35 void* p = _mi_heap_malloc_zero(heap, oversize, zero);
36 if (p == NULL) return NULL;
37
38 // .. and align within the allocation
39 uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
40 mi_assert_internal(adjust <= alignment);
41 void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
42 if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
43 mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
44 mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
45
46 #if MI_TRACK_ENABLED
47 if (p != aligned_p) {
48 mi_track_free(p);
49 mi_track_malloc(aligned_p,size,zero);
50 }
51 else {
52 mi_track_resize(aligned_p,oversize,size);
53 }
54 #endif
55 return aligned_p;
56}
57
58// Primitive aligned allocation
59static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
60{
61 // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
62 mi_assert(alignment > 0);
63 if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
64 #if MI_DEBUG > 0
65 _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
66 #endif
67 return NULL;
68 }
69 if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
70 #if MI_DEBUG > 0
71 _mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
72 #endif
73 return NULL;
74 }
75 if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
76 #if MI_DEBUG > 0
77 _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
78 #endif
79 return NULL;
80 }
81 const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
82 const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
83
84 // try first if there happens to be a small block available with just the right alignment
85 if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
86 mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
87 const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
88 if mi_likely(page->free != NULL && is_aligned)
89 {
90 #if MI_STAT>1
91 mi_heap_stat_increase(heap, malloc, size);
92 #endif
93 void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
94 mi_assert_internal(p != NULL);
95 mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
96 mi_track_malloc(p,size,zero);
97 return p;
98 }
99 }
100 // fallback
101 return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
102}
103
104
105// ------------------------------------------------------
106// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
107// ------------------------------------------------------
108
109mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
110 return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
111}
112
113mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
114 #if !MI_PADDING
115 // without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
116 if (!_mi_is_power_of_two(alignment)) return NULL;
117 if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
118 #else
119 // with padding, we can only guarantee this for fixed alignments
120 if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
121 && size <= MI_SMALL_SIZE_MAX)
122 #endif
123 {
124 // fast path for common alignment and size
125 return mi_heap_malloc_small(heap, size);
126 }
127 else {
128 return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
129 }
130}
131
132// ------------------------------------------------------
133// Aligned Allocation
134// ------------------------------------------------------
135
136mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
137 return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
138}
139
140mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
141 return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
142}
143
144mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
145 size_t total;
146 if (mi_count_size_overflow(count, size, &total)) return NULL;
147 return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
148}
149
150mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
151 return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
152}
153
154mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
155 return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
156}
157
158mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
159 return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
160}
161
162mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
163 return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
164}
165
166mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
167 return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
168}
169
170mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
171 return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
172}
173
174mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
175 return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
176}
177
178
179// ------------------------------------------------------
180// Aligned re-allocation
181// ------------------------------------------------------
182
183static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
184 mi_assert(alignment > 0);
185 if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
186 if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero);
187 size_t size = mi_usable_size(p);
188 if (newsize <= size && newsize >= (size - (size / 2))
189 && (((uintptr_t)p + offset) % alignment) == 0) {
190 return p; // reallocation still fits, is aligned and not more than 50% waste
191 }
192 else {
193 void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
194 if (newp != NULL) {
195 if (zero && newsize > size) {
196 const mi_page_t* page = _mi_ptr_page(newp);
197 if (page->is_zero) {
198 // already zero initialized
199 mi_assert_expensive(mi_mem_is_zero(newp,newsize));
200 }
201 else {
202 // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
203 size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
204 memset((uint8_t*)newp + start, 0, newsize - start);
205 }
206 }
207 _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
208 mi_free(p); // only free if successful
209 }
210 return newp;
211 }
212}
213
214static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
215 mi_assert(alignment > 0);
216 if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
217 size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
218 return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
219}
220
221mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
222 return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
223}
224
225mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
226 return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
227}
228
229mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
230 return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
231}
232
233mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
234 return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
235}
236
237mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
238 size_t total;
239 if (mi_count_size_overflow(newcount, size, &total)) return NULL;
240 return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
241}
242
243mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
244 size_t total;
245 if (mi_count_size_overflow(newcount, size, &total)) return NULL;
246 return mi_heap_rezalloc_aligned(heap, p, total, alignment);
247}
248
249mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
250 return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
251}
252
253mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
254 return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
255}
256
257mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
258 return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
259}
260
261mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
262 return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
263}
264
265mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
266 return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
267}
268
269mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
270 return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
271}
272
273