1 | #ifndef JEMALLOC_INTERNAL_PAI_H |
2 | #define JEMALLOC_INTERNAL_PAI_H |
3 | |
4 | /* An interface for page allocation. */ |
5 | |
6 | typedef struct pai_s pai_t; |
7 | struct pai_s { |
8 | /* Returns NULL on failure. */ |
9 | edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size, |
10 | size_t alignment, bool zero, bool guarded, bool frequent_reuse, |
11 | bool *deferred_work_generated); |
12 | /* |
13 | * Returns the number of extents added to the list (which may be fewer |
14 | * than requested, in case of OOM). The list should already be |
15 | * initialized. The only alignment guarantee is page-alignment, and |
16 | * the results are not necessarily zeroed. |
17 | */ |
18 | size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size, |
19 | size_t nallocs, edata_list_active_t *results, |
20 | bool *deferred_work_generated); |
21 | bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata, |
22 | size_t old_size, size_t new_size, bool zero, |
23 | bool *deferred_work_generated); |
24 | bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata, |
25 | size_t old_size, size_t new_size, bool *deferred_work_generated); |
26 | void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata, |
27 | bool *deferred_work_generated); |
28 | /* This function empties out list as a side-effect of being called. */ |
29 | void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self, |
30 | edata_list_active_t *list, bool *deferred_work_generated); |
31 | uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self); |
32 | }; |
33 | |
34 | /* |
35 | * These are just simple convenience functions to avoid having to reference the |
36 | * same pai_t twice on every invocation. |
37 | */ |
38 | |
39 | static inline edata_t * |
40 | pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, |
41 | bool zero, bool guarded, bool frequent_reuse, |
42 | bool *deferred_work_generated) { |
43 | return self->alloc(tsdn, self, size, alignment, zero, guarded, |
44 | frequent_reuse, deferred_work_generated); |
45 | } |
46 | |
47 | static inline size_t |
48 | pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, |
49 | edata_list_active_t *results, bool *deferred_work_generated) { |
50 | return self->alloc_batch(tsdn, self, size, nallocs, results, |
51 | deferred_work_generated); |
52 | } |
53 | |
54 | static inline bool |
55 | pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, |
56 | size_t new_size, bool zero, bool *deferred_work_generated) { |
57 | return self->expand(tsdn, self, edata, old_size, new_size, zero, |
58 | deferred_work_generated); |
59 | } |
60 | |
61 | static inline bool |
62 | pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, |
63 | size_t new_size, bool *deferred_work_generated) { |
64 | return self->shrink(tsdn, self, edata, old_size, new_size, |
65 | deferred_work_generated); |
66 | } |
67 | |
68 | static inline void |
69 | pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, |
70 | bool *deferred_work_generated) { |
71 | self->dalloc(tsdn, self, edata, deferred_work_generated); |
72 | } |
73 | |
74 | static inline void |
75 | pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, |
76 | bool *deferred_work_generated) { |
77 | self->dalloc_batch(tsdn, self, list, deferred_work_generated); |
78 | } |
79 | |
80 | static inline uint64_t |
81 | pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { |
82 | return self->time_until_deferred_work(tsdn, self); |
83 | } |
84 | |
85 | /* |
86 | * An implementation of batch allocation that simply calls alloc once for |
87 | * each item in the list. |
88 | */ |
89 | size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, |
90 | size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated); |
91 | /* Ditto, for dalloc. */ |
92 | void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, |
93 | edata_list_active_t *list, bool *deferred_work_generated); |
94 | |
95 | #endif /* JEMALLOC_INTERNAL_PAI_H */ |
96 | |