1 | #ifndef Py_INTERNAL_GC_H |
2 | #define Py_INTERNAL_GC_H |
3 | #ifdef __cplusplus |
4 | extern "C" { |
5 | #endif |
6 | |
7 | #ifndef Py_BUILD_CORE |
8 | # error "this header requires Py_BUILD_CORE define" |
9 | #endif |
10 | |
11 | /* GC information is stored BEFORE the object structure. */ |
12 | typedef struct { |
13 | // Pointer to next object in the list. |
14 | // 0 means the object is not tracked |
15 | uintptr_t _gc_next; |
16 | |
17 | // Pointer to previous object in the list. |
18 | // Lowest two bits are used for flags documented later. |
19 | uintptr_t _gc_prev; |
20 | } PyGC_Head; |
21 | |
22 | #define _Py_AS_GC(o) ((PyGC_Head *)(o)-1) |
23 | |
24 | /* True if the object is currently tracked by the GC. */ |
25 | #define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0) |
26 | |
27 | /* True if the object may be tracked by the GC in the future, or already is. |
28 | This can be useful to implement some optimizations. */ |
29 | #define _PyObject_GC_MAY_BE_TRACKED(obj) \ |
30 | (PyObject_IS_GC(obj) && \ |
31 | (!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj))) |
32 | |
33 | |
34 | /* Bit flags for _gc_prev */ |
35 | /* Bit 0 is set when tp_finalize is called */ |
36 | #define _PyGC_PREV_MASK_FINALIZED (1) |
37 | /* Bit 1 is set when the object is in generation which is GCed currently. */ |
38 | #define _PyGC_PREV_MASK_COLLECTING (2) |
39 | /* The (N-2) most significant bits contain the real address. */ |
40 | #define _PyGC_PREV_SHIFT (2) |
41 | #define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT) |
42 | |
43 | // Lowest bit of _gc_next is used for flags only in GC. |
44 | // But it is always 0 for normal code. |
45 | #define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next) |
46 | #define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p)) |
47 | |
48 | // Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags. |
49 | #define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK)) |
50 | #define _PyGCHead_SET_PREV(g, p) do { \ |
51 | assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \ |
52 | (g)->_gc_prev = ((g)->_gc_prev & ~_PyGC_PREV_MASK) \ |
53 | | ((uintptr_t)(p)); \ |
54 | } while (0) |
55 | |
56 | #define _PyGCHead_FINALIZED(g) \ |
57 | (((g)->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0) |
58 | #define _PyGCHead_SET_FINALIZED(g) \ |
59 | ((g)->_gc_prev |= _PyGC_PREV_MASK_FINALIZED) |
60 | |
61 | #define _PyGC_FINALIZED(o) \ |
62 | _PyGCHead_FINALIZED(_Py_AS_GC(o)) |
63 | #define _PyGC_SET_FINALIZED(o) \ |
64 | _PyGCHead_SET_FINALIZED(_Py_AS_GC(o)) |
65 | |
66 | |
67 | /* GC runtime state */ |
68 | |
69 | /* If we change this, we need to change the default value in the |
70 | signature of gc.collect. */ |
71 | #define NUM_GENERATIONS 3 |
72 | /* |
73 | NOTE: about untracking of mutable objects. |
74 | |
75 | Certain types of container cannot participate in a reference cycle, and |
76 | so do not need to be tracked by the garbage collector. Untracking these |
77 | objects reduces the cost of garbage collections. However, determining |
78 | which objects may be untracked is not free, and the costs must be |
79 | weighed against the benefits for garbage collection. |
80 | |
81 | There are two possible strategies for when to untrack a container: |
82 | |
83 | i) When the container is created. |
84 | ii) When the container is examined by the garbage collector. |
85 | |
86 | Tuples containing only immutable objects (integers, strings etc, and |
87 | recursively, tuples of immutable objects) do not need to be tracked. |
88 | The interpreter creates a large number of tuples, many of which will |
89 | not survive until garbage collection. It is therefore not worthwhile |
90 | to untrack eligible tuples at creation time. |
91 | |
92 | Instead, all tuples except the empty tuple are tracked when created. |
93 | During garbage collection it is determined whether any surviving tuples |
94 | can be untracked. A tuple can be untracked if all of its contents are |
95 | already not tracked. Tuples are examined for untracking in all garbage |
96 | collection cycles. It may take more than one cycle to untrack a tuple. |
97 | |
98 | Dictionaries containing only immutable objects also do not need to be |
99 | tracked. Dictionaries are untracked when created. If a tracked item is |
100 | inserted into a dictionary (either as a key or value), the dictionary |
101 | becomes tracked. During a full garbage collection (all generations), |
102 | the collector will untrack any dictionaries whose contents are not |
103 | tracked. |
104 | |
105 | The module provides the python function is_tracked(obj), which returns |
106 | the CURRENT tracking status of the object. Subsequent garbage |
107 | collections may change the tracking status of the object. |
108 | |
109 | Untracking of certain containers was introduced in issue #4688, and |
110 | the algorithm was refined in response to issue #14775. |
111 | */ |
112 | |
113 | struct gc_generation { |
114 | PyGC_Head head; |
115 | int threshold; /* collection threshold */ |
116 | int count; /* count of allocations or collections of younger |
117 | generations */ |
118 | }; |
119 | |
120 | /* Running stats per generation */ |
121 | struct gc_generation_stats { |
122 | /* total number of collections */ |
123 | Py_ssize_t collections; |
124 | /* total number of collected objects */ |
125 | Py_ssize_t collected; |
126 | /* total number of uncollectable objects (put into gc.garbage) */ |
127 | Py_ssize_t uncollectable; |
128 | }; |
129 | |
130 | struct _gc_runtime_state { |
131 | /* List of objects that still need to be cleaned up, singly linked |
132 | * via their gc headers' gc_prev pointers. */ |
133 | PyObject *trash_delete_later; |
134 | /* Current call-stack depth of tp_dealloc calls. */ |
135 | int trash_delete_nesting; |
136 | |
137 | int enabled; |
138 | int debug; |
139 | /* linked lists of container objects */ |
140 | struct gc_generation generations[NUM_GENERATIONS]; |
141 | PyGC_Head *generation0; |
142 | /* a permanent generation which won't be collected */ |
143 | struct gc_generation permanent_generation; |
144 | struct gc_generation_stats generation_stats[NUM_GENERATIONS]; |
145 | /* true if we are currently running the collector */ |
146 | int collecting; |
147 | /* list of uncollectable objects */ |
148 | PyObject *garbage; |
149 | /* a list of callbacks to be invoked when collection is performed */ |
150 | PyObject *callbacks; |
151 | /* This is the number of objects that survived the last full |
152 | collection. It approximates the number of long lived objects |
153 | tracked by the GC. |
154 | |
155 | (by "full collection", we mean a collection of the oldest |
156 | generation). */ |
157 | Py_ssize_t long_lived_total; |
158 | /* This is the number of objects that survived all "non-full" |
159 | collections, and are awaiting to undergo a full collection for |
160 | the first time. */ |
161 | Py_ssize_t long_lived_pending; |
162 | }; |
163 | |
164 | extern void _PyGC_InitState(struct _gc_runtime_state *); |
165 | |
166 | extern Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate); |
167 | |
168 | |
169 | // Functions to clear types free lists |
170 | extern void _PyFrame_ClearFreeList(PyInterpreterState *interp); |
171 | extern void _PyTuple_ClearFreeList(PyInterpreterState *interp); |
172 | extern void _PyFloat_ClearFreeList(PyInterpreterState *interp); |
173 | extern void _PyList_ClearFreeList(PyInterpreterState *interp); |
174 | extern void _PyDict_ClearFreeList(PyInterpreterState *interp); |
175 | extern void _PyAsyncGen_ClearFreeLists(PyInterpreterState *interp); |
176 | extern void _PyContext_ClearFreeList(PyInterpreterState *interp); |
177 | |
178 | #ifdef __cplusplus |
179 | } |
180 | #endif |
181 | #endif /* !Py_INTERNAL_GC_H */ |
182 | |