1#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
2#define JEMALLOC_INTERNAL_HPA_OPTS_H
3
4#include "jemalloc/internal/fxp.h"
5
6/*
7 * This file is morally part of hpa.h, but is split out for header-ordering
8 * reasons.
9 */
10
11typedef struct hpa_shard_opts_s hpa_shard_opts_t;
12struct hpa_shard_opts_s {
13 /*
14 * The largest size we'll allocate out of the shard. For those
15 * allocations refused, the caller (in practice, the PA module) will
16 * fall back to the more general (for now) PAC, which can always handle
17 * any allocation request.
18 */
19 size_t slab_max_alloc;
20
21 /*
22 * When the number of active bytes in a hugepage is >=
23 * hugification_threshold, we force hugify it.
24 */
25 size_t hugification_threshold;
26
27 /*
28 * The HPA purges whenever the number of pages exceeds dirty_mult *
29 * active_pages. This may be set to (fxp_t)-1 to disable purging.
30 */
31 fxp_t dirty_mult;
32
33 /*
34 * Whether or not the PAI methods are allowed to defer work to a
35 * subsequent hpa_shard_do_deferred_work() call. Practically, this
36 * corresponds to background threads being enabled. We track this
37 * ourselves for encapsulation purposes.
38 */
39 bool deferral_allowed;
40
41 /*
42 * How long a hugepage has to be a hugification candidate before it will
43 * actually get hugified.
44 */
45 uint64_t hugify_delay_ms;
46
47 /*
48 * Minimum amount of time between purges.
49 */
50 uint64_t min_purge_interval_ms;
51};
52
53#define HPA_SHARD_OPTS_DEFAULT { \
54 /* slab_max_alloc */ \
55 64 * 1024, \
56 /* hugification_threshold */ \
57 HUGEPAGE * 95 / 100, \
58 /* dirty_mult */ \
59 FXP_INIT_PERCENT(25), \
60 /* \
61 * deferral_allowed \
62 * \
63 * Really, this is always set by the arena during creation \
64 * or by an hpa_shard_set_deferral_allowed call, so the value \
65 * we put here doesn't matter. \
66 */ \
67 false, \
68 /* hugify_delay_ms */ \
69 10 * 1000, \
70 /* min_purge_interval_ms */ \
71 5 * 1000 \
72}
73
74#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
75