1#ifndef JEMALLOC_INTERNAL_PAC_H
2#define JEMALLOC_INTERNAL_PAC_H
3
4#include "jemalloc/internal/exp_grow.h"
5#include "jemalloc/internal/pai.h"
6#include "san_bump.h"
7
8
9/*
10 * Page allocator classic; an implementation of the PAI interface that:
11 * - Can be used for arenas with custom extent hooks.
12 * - Can always satisfy any allocation request (including highly-fragmentary
13 * ones).
14 * - Can use efficient OS-level zeroing primitives for demand-filled pages.
15 */
16
17/* How "eager" decay/purging should be. */
18enum pac_purge_eagerness_e {
19 PAC_PURGE_ALWAYS,
20 PAC_PURGE_NEVER,
21 PAC_PURGE_ON_EPOCH_ADVANCE
22};
23typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
24
25typedef struct pac_decay_stats_s pac_decay_stats_t;
26struct pac_decay_stats_s {
27 /* Total number of purge sweeps. */
28 locked_u64_t npurge;
29 /* Total number of madvise calls made. */
30 locked_u64_t nmadvise;
31 /* Total number of pages purged. */
32 locked_u64_t purged;
33};
34
35typedef struct pac_estats_s pac_estats_t;
36struct pac_estats_s {
37 /*
38 * Stats for a given index in the range [0, SC_NPSIZES] in the various
39 * ecache_ts.
40 * We track both bytes and # of extents: two extents in the same bucket
41 * may have different sizes if adjacent size classes differ by more than
42 * a page, so bytes cannot always be derived from # of extents.
43 */
44 size_t ndirty;
45 size_t dirty_bytes;
46 size_t nmuzzy;
47 size_t muzzy_bytes;
48 size_t nretained;
49 size_t retained_bytes;
50};
51
52typedef struct pac_stats_s pac_stats_t;
53struct pac_stats_s {
54 pac_decay_stats_t decay_dirty;
55 pac_decay_stats_t decay_muzzy;
56
57 /*
58 * Number of unused virtual memory bytes currently retained. Retained
59 * bytes are technically mapped (though always decommitted or purged),
60 * but they are excluded from the mapped statistic (above).
61 */
62 size_t retained; /* Derived. */
63
64 /*
65 * Number of bytes currently mapped, excluding retained memory (and any
66 * base-allocated memory, which is tracked by the arena stats).
67 *
68 * We name this "pac_mapped" to avoid confusion with the arena_stats
69 * "mapped".
70 */
71 atomic_zu_t pac_mapped;
72
73 /* VM space had to be leaked (undocumented). Normally 0. */
74 atomic_zu_t abandoned_vm;
75};
76
77typedef struct pac_s pac_t;
78struct pac_s {
79 /*
80 * Must be the first member (we convert it to a PAC given only a
81 * pointer). The handle to the allocation interface.
82 */
83 pai_t pai;
84 /*
85 * Collections of extents that were previously allocated. These are
86 * used when allocating extents, in an attempt to re-use address space.
87 *
88 * Synchronization: internal.
89 */
90 ecache_t ecache_dirty;
91 ecache_t ecache_muzzy;
92 ecache_t ecache_retained;
93
94 base_t *base;
95 emap_t *emap;
96 edata_cache_t *edata_cache;
97
98 /* The grow info for the retained ecache. */
99 exp_grow_t exp_grow;
100 malloc_mutex_t grow_mtx;
101
102 /* Special allocator for guarded frequently reused extents. */
103 san_bump_alloc_t sba;
104
105 /* How large extents should be before getting auto-purged. */
106 atomic_zu_t oversize_threshold;
107
108 /*
109 * Decay-based purging state, responsible for scheduling extent state
110 * transitions.
111 *
112 * Synchronization: via the internal mutex.
113 */
114 decay_t decay_dirty; /* dirty --> muzzy */
115 decay_t decay_muzzy; /* muzzy --> retained */
116
117 malloc_mutex_t *stats_mtx;
118 pac_stats_t *stats;
119
120 /* Extent serial number generator state. */
121 atomic_zu_t extent_sn_next;
122};
123
124bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
125 edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
126 ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
127 malloc_mutex_t *stats_mtx);
128
129static inline size_t
130pac_mapped(pac_t *pac) {
131 return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
132}
133
134static inline ehooks_t *
135pac_ehooks_get(pac_t *pac) {
136 return base_ehooks_get(pac->base);
137}
138
139/*
140 * All purging functions require holding decay->mtx. This is one of the few
141 * places external modules are allowed to peek inside pa_shard_t internals.
142 */
143
144/*
145 * Decays the number of pages currently in the ecache. This might not leave the
146 * ecache empty if other threads are inserting dirty objects into it
147 * concurrently with the call.
148 */
149void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
150 pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
151/*
152 * Updates decay settings for the current time, and conditionally purges in
153 * response (depending on decay_purge_setting). Returns whether or not the
154 * epoch advanced.
155 */
156bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
157 pac_decay_stats_t *decay_stats, ecache_t *ecache,
158 pac_purge_eagerness_t eagerness);
159
160/*
161 * Gets / sets the maximum amount that we'll grow an arena down the
162 * grow-retained pathways (unless forced to by an allocaction request).
163 *
164 * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
165 * care about the previous value.
166 *
167 * Returns true on error (if the new limit is not valid).
168 */
169bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
170 size_t *new_limit);
171
172bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
173 ssize_t decay_ms, pac_purge_eagerness_t eagerness);
174ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
175
176void pac_reset(tsdn_t *tsdn, pac_t *pac);
177void pac_destroy(tsdn_t *tsdn, pac_t *pac);
178
179#endif /* JEMALLOC_INTERNAL_PAC_H */
180