1 | #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H |
2 | #define JEMALLOC_INTERNAL_MUTEX_PROF_H |
3 | |
4 | #include "jemalloc/internal/atomic.h" |
5 | #include "jemalloc/internal/nstime.h" |
6 | #include "jemalloc/internal/tsd_types.h" |
7 | |
8 | #define MUTEX_PROF_GLOBAL_MUTEXES \ |
9 | OP(background_thread) \ |
10 | OP(max_per_bg_thd) \ |
11 | OP(ctl) \ |
12 | OP(prof) \ |
13 | OP(prof_thds_data) \ |
14 | OP(prof_dump) \ |
15 | OP(prof_recent_alloc) \ |
16 | OP(prof_recent_dump) \ |
17 | OP(prof_stats) |
18 | |
19 | typedef enum { |
20 | #define OP(mtx) global_prof_mutex_##mtx, |
21 | MUTEX_PROF_GLOBAL_MUTEXES |
22 | #undef OP |
23 | mutex_prof_num_global_mutexes |
24 | } mutex_prof_global_ind_t; |
25 | |
26 | #define MUTEX_PROF_ARENA_MUTEXES \ |
27 | OP(large) \ |
28 | OP(extent_avail) \ |
29 | OP(extents_dirty) \ |
30 | OP(extents_muzzy) \ |
31 | OP(extents_retained) \ |
32 | OP(decay_dirty) \ |
33 | OP(decay_muzzy) \ |
34 | OP(base) \ |
35 | OP(tcache_list) \ |
36 | OP(hpa_shard) \ |
37 | OP(hpa_shard_grow) \ |
38 | OP(hpa_sec) |
39 | |
40 | typedef enum { |
41 | #define OP(mtx) arena_prof_mutex_##mtx, |
42 | MUTEX_PROF_ARENA_MUTEXES |
43 | #undef OP |
44 | mutex_prof_num_arena_mutexes |
45 | } mutex_prof_arena_ind_t; |
46 | |
47 | /* |
48 | * The forth parameter is a boolean value that is true for derived rate counters |
49 | * and false for real ones. |
50 | */ |
51 | #define MUTEX_PROF_UINT64_COUNTERS \ |
52 | OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ |
53 | OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ |
54 | OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ |
55 | OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ |
56 | OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ |
57 | OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ |
58 | OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ |
59 | OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ |
60 | OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ |
61 | OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ |
62 | OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) |
63 | |
64 | #define MUTEX_PROF_UINT32_COUNTERS \ |
65 | OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) |
66 | |
67 | #define MUTEX_PROF_COUNTERS \ |
68 | MUTEX_PROF_UINT64_COUNTERS \ |
69 | MUTEX_PROF_UINT32_COUNTERS |
70 | |
71 | #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, |
72 | |
73 | #define COUNTER_ENUM(counter_list, t) \ |
74 | typedef enum { \ |
75 | counter_list \ |
76 | mutex_prof_num_##t##_counters \ |
77 | } mutex_prof_##t##_counter_ind_t; |
78 | |
79 | COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) |
80 | COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) |
81 | |
82 | #undef COUNTER_ENUM |
83 | #undef OP |
84 | |
85 | typedef struct { |
86 | /* |
87 | * Counters touched on the slow path, i.e. when there is lock |
88 | * contention. We update them once we have the lock. |
89 | */ |
90 | /* Total time (in nano seconds) spent waiting on this mutex. */ |
91 | nstime_t tot_wait_time; |
92 | /* Max time (in nano seconds) spent on a single lock operation. */ |
93 | nstime_t max_wait_time; |
94 | /* # of times have to wait for this mutex (after spinning). */ |
95 | uint64_t n_wait_times; |
96 | /* # of times acquired the mutex through local spinning. */ |
97 | uint64_t n_spin_acquired; |
98 | /* Max # of threads waiting for the mutex at the same time. */ |
99 | uint32_t max_n_thds; |
100 | /* Current # of threads waiting on the lock. Atomic synced. */ |
101 | atomic_u32_t n_waiting_thds; |
102 | |
103 | /* |
104 | * Data touched on the fast path. These are modified right after we |
105 | * grab the lock, so it's placed closest to the end (i.e. right before |
106 | * the lock) so that we have a higher chance of them being on the same |
107 | * cacheline. |
108 | */ |
109 | /* # of times the mutex holder is different than the previous one. */ |
110 | uint64_t n_owner_switches; |
111 | /* Previous mutex holder, to facilitate n_owner_switches. */ |
112 | tsdn_t *prev_owner; |
113 | /* # of lock() operations in total. */ |
114 | uint64_t n_lock_ops; |
115 | } mutex_prof_data_t; |
116 | |
117 | #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ |
118 | |