1#ifndef JEMALLOC_INTERNAL_TYPES_H
2#define JEMALLOC_INTERNAL_TYPES_H
3
4#include "jemalloc/internal/quantum.h"
5
6/* Page size index type. */
7typedef unsigned pszind_t;
8
9/* Size class index type. */
10typedef unsigned szind_t;
11
12/* Processor / core id type. */
13typedef int malloc_cpuid_t;
14
15/*
16 * Flags bits:
17 *
18 * a: arena
19 * t: tcache
20 * 0: unused
21 * z: zero
22 * n: alignment
23 *
24 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
25 */
26#define MALLOCX_ARENA_BITS 12
27#define MALLOCX_TCACHE_BITS 12
28#define MALLOCX_LG_ALIGN_BITS 6
29#define MALLOCX_ARENA_SHIFT 20
30#define MALLOCX_TCACHE_SHIFT 8
31#define MALLOCX_ARENA_MASK \
32 (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
33/* NB: Arena index bias decreases the maximum number of arenas by 1. */
34#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
35#define MALLOCX_TCACHE_MASK \
36 (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
37#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
38#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
39/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
40#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
41 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
42#define MALLOCX_ALIGN_GET(flags) \
43 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
44#define MALLOCX_ZERO_GET(flags) \
45 ((bool)(flags & MALLOCX_ZERO))
46
47#define MALLOCX_TCACHE_GET(flags) \
48 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
49#define MALLOCX_ARENA_GET(flags) \
50 (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
51
52/* Smallest size class to support. */
53#define TINY_MIN (1U << LG_TINY_MIN)
54
55#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
56#define LONG_MASK (LONG - 1)
57
58/* Return the smallest long multiple that is >= a. */
59#define LONG_CEILING(a) \
60 (((a) + LONG_MASK) & ~LONG_MASK)
61
62#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
63#define PTR_MASK (SIZEOF_PTR - 1)
64
65/* Return the smallest (void *) multiple that is >= a. */
66#define PTR_CEILING(a) \
67 (((a) + PTR_MASK) & ~PTR_MASK)
68
69/*
70 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
71 * In addition, this controls the spacing of cacheline-spaced size classes.
72 *
73 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
74 * only handle raw constants.
75 */
76#define LG_CACHELINE 6
77#define CACHELINE 64
78#define CACHELINE_MASK (CACHELINE - 1)
79
80/* Return the smallest cacheline multiple that is >= s. */
81#define CACHELINE_CEILING(s) \
82 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
83
84/* Return the nearest aligned address at or below a. */
85#define ALIGNMENT_ADDR2BASE(a, alignment) \
86 ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
87
88/* Return the offset between a and the nearest aligned address at or below a. */
89#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
90 ((size_t)((uintptr_t)(a) & (alignment - 1)))
91
92/* Return the smallest alignment multiple that is >= s. */
93#define ALIGNMENT_CEILING(s, alignment) \
94 (((s) + (alignment - 1)) & ((~(alignment)) + 1))
95
96/* Declare a variable-length array. */
97#if __STDC_VERSION__ < 199901L
98# ifdef _MSC_VER
99# include <malloc.h>
100# define alloca _alloca
101# else
102# ifdef JEMALLOC_HAS_ALLOCA_H
103# include <alloca.h>
104# else
105# include <stdlib.h>
106# endif
107# endif
108# define VARIABLE_ARRAY(type, name, count) \
109 type *name = alloca(sizeof(type) * (count))
110#else
111# define VARIABLE_ARRAY(type, name, count) type name[(count)]
112#endif
113
114#endif /* JEMALLOC_INTERNAL_TYPES_H */
115