1#ifndef JEMALLOC_INTERNAL_COUNTER_H
2#define JEMALLOC_INTERNAL_COUNTER_H
3
4#include "jemalloc/internal/mutex.h"
5
6typedef struct counter_accum_s {
7 LOCKEDINT_MTX_DECLARE(mtx)
8 locked_u64_t accumbytes;
9 uint64_t interval;
10} counter_accum_t;
11
12JEMALLOC_ALWAYS_INLINE bool
13counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
14 uint64_t interval = counter->interval;
15 assert(interval > 0);
16 LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
17 /*
18 * If the event moves fast enough (and/or if the event handling is slow
19 * enough), extreme overflow can cause counter trigger coalescing.
20 * This is an intentional mechanism that avoids rate-limiting
21 * allocation.
22 */
23 bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
24 &counter->accumbytes, bytes, interval);
25 LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
26 return overflow;
27}
28
29bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
30void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
31void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
32void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
33
34#endif /* JEMALLOC_INTERNAL_COUNTER_H */
35