1 | /* zmalloc - total amount of allocated memory aware version of malloc() |
2 | * |
3 | * Copyright (c) 2009-2010, Salvatore Sanfilippo <antirez at gmail dot com> |
4 | * All rights reserved. |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions are met: |
8 | * |
9 | * * Redistributions of source code must retain the above copyright notice, |
10 | * this list of conditions and the following disclaimer. |
11 | * * Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * * Neither the name of Redis nor the names of its contributors may be used |
15 | * to endorse or promote products derived from this software without |
16 | * specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
28 | * POSSIBILITY OF SUCH DAMAGE. |
29 | */ |
30 | |
31 | #include "fmacros.h" |
32 | #include "config.h" |
33 | #include "solarisfixes.h" |
34 | |
35 | #include <stdio.h> |
36 | #include <stdlib.h> |
37 | #include <stdint.h> |
38 | #include <unistd.h> |
39 | #include <assert.h> |
40 | |
41 | #ifdef __linux__ |
42 | #include <sys/mman.h> |
43 | #endif |
44 | |
45 | /* This function provide us access to the original libc free(). This is useful |
46 | * for instance to free results obtained by backtrace_symbols(). We need |
47 | * to define this function before including zmalloc.h that may shadow the |
48 | * free implementation if we use jemalloc or another non standard allocator. */ |
49 | void zlibc_free(void *ptr) { |
50 | free(ptr); |
51 | } |
52 | |
53 | #include <string.h> |
54 | #include <pthread.h> |
55 | #include "zmalloc.h" |
56 | #include "atomicvar.h" |
57 | |
58 | #define UNUSED(x) ((void)(x)) |
59 | |
60 | #ifdef HAVE_MALLOC_SIZE |
61 | #define PREFIX_SIZE (0) |
62 | #define ASSERT_NO_SIZE_OVERFLOW(sz) |
63 | #else |
64 | #if defined(__sun) || defined(__sparc) || defined(__sparc__) |
65 | #define PREFIX_SIZE (sizeof(long long)) |
66 | #else |
67 | #define PREFIX_SIZE (sizeof(size_t)) |
68 | #endif |
69 | #define ASSERT_NO_SIZE_OVERFLOW(sz) assert((sz) + PREFIX_SIZE > (sz)) |
70 | #endif |
71 | |
72 | /* When using the libc allocator, use a minimum allocation size to match the |
73 | * jemalloc behavior that doesn't return NULL in this case. |
74 | */ |
75 | #define MALLOC_MIN_SIZE(x) ((x) > 0 ? (x) : sizeof(long)) |
76 | |
77 | /* Explicitly override malloc/free etc when using tcmalloc. */ |
78 | #if defined(USE_TCMALLOC) |
79 | #define malloc(size) tc_malloc(size) |
80 | #define calloc(count,size) tc_calloc(count,size) |
81 | #define realloc(ptr,size) tc_realloc(ptr,size) |
82 | #define free(ptr) tc_free(ptr) |
83 | #elif defined(USE_JEMALLOC) |
84 | #define malloc(size) je_malloc(size) |
85 | #define calloc(count,size) je_calloc(count,size) |
86 | #define realloc(ptr,size) je_realloc(ptr,size) |
87 | #define free(ptr) je_free(ptr) |
88 | #define mallocx(size,flags) je_mallocx(size,flags) |
89 | #define dallocx(ptr,flags) je_dallocx(ptr,flags) |
90 | #endif |
91 | |
92 | #define update_zmalloc_stat_alloc(__n) atomicIncr(used_memory,(__n)) |
93 | #define update_zmalloc_stat_free(__n) atomicDecr(used_memory,(__n)) |
94 | |
95 | static redisAtomic size_t used_memory = 0; |
96 | |
97 | static void zmalloc_default_oom(size_t size) { |
98 | fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n" , |
99 | size); |
100 | fflush(stderr); |
101 | abort(); |
102 | } |
103 | |
104 | static void (*zmalloc_oom_handler)(size_t) = zmalloc_default_oom; |
105 | |
106 | /* Try allocating memory, and return NULL if failed. |
107 | * '*usable' is set to the usable size if non NULL. */ |
108 | void *ztrymalloc_usable(size_t size, size_t *usable) { |
109 | ASSERT_NO_SIZE_OVERFLOW(size); |
110 | void *ptr = malloc(MALLOC_MIN_SIZE(size)+PREFIX_SIZE); |
111 | |
112 | if (!ptr) return NULL; |
113 | #ifdef HAVE_MALLOC_SIZE |
114 | size = zmalloc_size(ptr); |
115 | update_zmalloc_stat_alloc(size); |
116 | if (usable) *usable = size; |
117 | return ptr; |
118 | #else |
119 | *((size_t*)ptr) = size; |
120 | update_zmalloc_stat_alloc(size+PREFIX_SIZE); |
121 | if (usable) *usable = size; |
122 | return (char*)ptr+PREFIX_SIZE; |
123 | #endif |
124 | } |
125 | |
126 | /* Allocate memory or panic */ |
127 | void *zmalloc(size_t size) { |
128 | void *ptr = ztrymalloc_usable(size, NULL); |
129 | if (!ptr) zmalloc_oom_handler(size); |
130 | return ptr; |
131 | } |
132 | |
133 | /* Try allocating memory, and return NULL if failed. */ |
134 | void *ztrymalloc(size_t size) { |
135 | void *ptr = ztrymalloc_usable(size, NULL); |
136 | return ptr; |
137 | } |
138 | |
139 | /* Allocate memory or panic. |
140 | * '*usable' is set to the usable size if non NULL. */ |
141 | void *zmalloc_usable(size_t size, size_t *usable) { |
142 | void *ptr = ztrymalloc_usable(size, usable); |
143 | if (!ptr) zmalloc_oom_handler(size); |
144 | return ptr; |
145 | } |
146 | |
147 | /* Allocation and free functions that bypass the thread cache |
148 | * and go straight to the allocator arena bins. |
149 | * Currently implemented only for jemalloc. Used for online defragmentation. */ |
150 | #ifdef HAVE_DEFRAG |
151 | void *zmalloc_no_tcache(size_t size) { |
152 | ASSERT_NO_SIZE_OVERFLOW(size); |
153 | void *ptr = mallocx(size+PREFIX_SIZE, MALLOCX_TCACHE_NONE); |
154 | if (!ptr) zmalloc_oom_handler(size); |
155 | update_zmalloc_stat_alloc(zmalloc_size(ptr)); |
156 | return ptr; |
157 | } |
158 | |
159 | void zfree_no_tcache(void *ptr) { |
160 | if (ptr == NULL) return; |
161 | update_zmalloc_stat_free(zmalloc_size(ptr)); |
162 | dallocx(ptr, MALLOCX_TCACHE_NONE); |
163 | } |
164 | #endif |
165 | |
166 | /* Try allocating memory and zero it, and return NULL if failed. |
167 | * '*usable' is set to the usable size if non NULL. */ |
168 | void *ztrycalloc_usable(size_t size, size_t *usable) { |
169 | ASSERT_NO_SIZE_OVERFLOW(size); |
170 | void *ptr = calloc(1, MALLOC_MIN_SIZE(size)+PREFIX_SIZE); |
171 | if (ptr == NULL) return NULL; |
172 | |
173 | #ifdef HAVE_MALLOC_SIZE |
174 | size = zmalloc_size(ptr); |
175 | update_zmalloc_stat_alloc(size); |
176 | if (usable) *usable = size; |
177 | return ptr; |
178 | #else |
179 | *((size_t*)ptr) = size; |
180 | update_zmalloc_stat_alloc(size+PREFIX_SIZE); |
181 | if (usable) *usable = size; |
182 | return (char*)ptr+PREFIX_SIZE; |
183 | #endif |
184 | } |
185 | |
186 | /* Allocate memory and zero it or panic. |
187 | * We need this wrapper to have a calloc compatible signature */ |
188 | void *zcalloc_num(size_t num, size_t size) { |
189 | /* Ensure that the arguments to calloc(), when multiplied, do not wrap. |
190 | * Division operations are susceptible to divide-by-zero errors so we also check it. */ |
191 | if ((size == 0) || (num > SIZE_MAX/size)) { |
192 | zmalloc_oom_handler(SIZE_MAX); |
193 | return NULL; |
194 | } |
195 | void *ptr = ztrycalloc_usable(num*size, NULL); |
196 | if (!ptr) zmalloc_oom_handler(num*size); |
197 | return ptr; |
198 | } |
199 | |
200 | /* Allocate memory and zero it or panic */ |
201 | void *zcalloc(size_t size) { |
202 | void *ptr = ztrycalloc_usable(size, NULL); |
203 | if (!ptr) zmalloc_oom_handler(size); |
204 | return ptr; |
205 | } |
206 | |
207 | /* Try allocating memory, and return NULL if failed. */ |
208 | void *ztrycalloc(size_t size) { |
209 | void *ptr = ztrycalloc_usable(size, NULL); |
210 | return ptr; |
211 | } |
212 | |
213 | /* Allocate memory or panic. |
214 | * '*usable' is set to the usable size if non NULL. */ |
215 | void *zcalloc_usable(size_t size, size_t *usable) { |
216 | void *ptr = ztrycalloc_usable(size, usable); |
217 | if (!ptr) zmalloc_oom_handler(size); |
218 | return ptr; |
219 | } |
220 | |
221 | /* Try reallocating memory, and return NULL if failed. |
222 | * '*usable' is set to the usable size if non NULL. */ |
223 | void *ztryrealloc_usable(void *ptr, size_t size, size_t *usable) { |
224 | ASSERT_NO_SIZE_OVERFLOW(size); |
225 | #ifndef HAVE_MALLOC_SIZE |
226 | void *realptr; |
227 | #endif |
228 | size_t oldsize; |
229 | void *newptr; |
230 | |
231 | /* not allocating anything, just redirect to free. */ |
232 | if (size == 0 && ptr != NULL) { |
233 | zfree(ptr); |
234 | if (usable) *usable = 0; |
235 | return NULL; |
236 | } |
237 | /* Not freeing anything, just redirect to malloc. */ |
238 | if (ptr == NULL) |
239 | return ztrymalloc_usable(size, usable); |
240 | |
241 | #ifdef HAVE_MALLOC_SIZE |
242 | oldsize = zmalloc_size(ptr); |
243 | newptr = realloc(ptr,size); |
244 | if (newptr == NULL) { |
245 | if (usable) *usable = 0; |
246 | return NULL; |
247 | } |
248 | |
249 | update_zmalloc_stat_free(oldsize); |
250 | size = zmalloc_size(newptr); |
251 | update_zmalloc_stat_alloc(size); |
252 | if (usable) *usable = size; |
253 | return newptr; |
254 | #else |
255 | realptr = (char*)ptr-PREFIX_SIZE; |
256 | oldsize = *((size_t*)realptr); |
257 | newptr = realloc(realptr,size+PREFIX_SIZE); |
258 | if (newptr == NULL) { |
259 | if (usable) *usable = 0; |
260 | return NULL; |
261 | } |
262 | |
263 | *((size_t*)newptr) = size; |
264 | update_zmalloc_stat_free(oldsize); |
265 | update_zmalloc_stat_alloc(size); |
266 | if (usable) *usable = size; |
267 | return (char*)newptr+PREFIX_SIZE; |
268 | #endif |
269 | } |
270 | |
271 | /* Reallocate memory and zero it or panic */ |
272 | void *zrealloc(void *ptr, size_t size) { |
273 | ptr = ztryrealloc_usable(ptr, size, NULL); |
274 | if (!ptr && size != 0) zmalloc_oom_handler(size); |
275 | return ptr; |
276 | } |
277 | |
278 | /* Try Reallocating memory, and return NULL if failed. */ |
279 | void *ztryrealloc(void *ptr, size_t size) { |
280 | ptr = ztryrealloc_usable(ptr, size, NULL); |
281 | return ptr; |
282 | } |
283 | |
284 | /* Reallocate memory or panic. |
285 | * '*usable' is set to the usable size if non NULL. */ |
286 | void *zrealloc_usable(void *ptr, size_t size, size_t *usable) { |
287 | ptr = ztryrealloc_usable(ptr, size, usable); |
288 | if (!ptr && size != 0) zmalloc_oom_handler(size); |
289 | return ptr; |
290 | } |
291 | |
292 | /* Provide zmalloc_size() for systems where this function is not provided by |
293 | * malloc itself, given that in that case we store a header with this |
294 | * information as the first bytes of every allocation. */ |
295 | #ifndef HAVE_MALLOC_SIZE |
296 | size_t zmalloc_size(void *ptr) { |
297 | void *realptr = (char*)ptr-PREFIX_SIZE; |
298 | size_t size = *((size_t*)realptr); |
299 | return size+PREFIX_SIZE; |
300 | } |
301 | size_t zmalloc_usable_size(void *ptr) { |
302 | return zmalloc_size(ptr)-PREFIX_SIZE; |
303 | } |
304 | #endif |
305 | |
306 | void zfree(void *ptr) { |
307 | #ifndef HAVE_MALLOC_SIZE |
308 | void *realptr; |
309 | size_t oldsize; |
310 | #endif |
311 | |
312 | if (ptr == NULL) return; |
313 | #ifdef HAVE_MALLOC_SIZE |
314 | update_zmalloc_stat_free(zmalloc_size(ptr)); |
315 | free(ptr); |
316 | #else |
317 | realptr = (char*)ptr-PREFIX_SIZE; |
318 | oldsize = *((size_t*)realptr); |
319 | update_zmalloc_stat_free(oldsize+PREFIX_SIZE); |
320 | free(realptr); |
321 | #endif |
322 | } |
323 | |
324 | /* Similar to zfree, '*usable' is set to the usable size being freed. */ |
325 | void zfree_usable(void *ptr, size_t *usable) { |
326 | #ifndef HAVE_MALLOC_SIZE |
327 | void *realptr; |
328 | size_t oldsize; |
329 | #endif |
330 | |
331 | if (ptr == NULL) return; |
332 | #ifdef HAVE_MALLOC_SIZE |
333 | update_zmalloc_stat_free(*usable = zmalloc_size(ptr)); |
334 | free(ptr); |
335 | #else |
336 | realptr = (char*)ptr-PREFIX_SIZE; |
337 | *usable = oldsize = *((size_t*)realptr); |
338 | update_zmalloc_stat_free(oldsize+PREFIX_SIZE); |
339 | free(realptr); |
340 | #endif |
341 | } |
342 | |
343 | char *zstrdup(const char *s) { |
344 | size_t l = strlen(s)+1; |
345 | char *p = zmalloc(l); |
346 | |
347 | memcpy(p,s,l); |
348 | return p; |
349 | } |
350 | |
351 | size_t zmalloc_used_memory(void) { |
352 | size_t um; |
353 | atomicGet(used_memory,um); |
354 | return um; |
355 | } |
356 | |
357 | void zmalloc_set_oom_handler(void (*oom_handler)(size_t)) { |
358 | zmalloc_oom_handler = oom_handler; |
359 | } |
360 | |
361 | /* Use 'MADV_DONTNEED' to release memory to operating system quickly. |
362 | * We do that in a fork child process to avoid CoW when the parent modifies |
363 | * these shared pages. */ |
364 | void zmadvise_dontneed(void *ptr) { |
365 | #if defined(USE_JEMALLOC) && defined(__linux__) |
366 | static size_t page_size = 0; |
367 | if (page_size == 0) page_size = sysconf(_SC_PAGESIZE); |
368 | size_t page_size_mask = page_size - 1; |
369 | |
370 | size_t real_size = zmalloc_size(ptr); |
371 | if (real_size < page_size) return; |
372 | |
373 | /* We need to align the pointer upwards according to page size, because |
374 | * the memory address is increased upwards and we only can free memory |
375 | * based on page. */ |
376 | char *aligned_ptr = (char *)(((size_t)ptr+page_size_mask) & ~page_size_mask); |
377 | real_size -= (aligned_ptr-(char*)ptr); |
378 | if (real_size >= page_size) { |
379 | madvise((void *)aligned_ptr, real_size&~page_size_mask, MADV_DONTNEED); |
380 | } |
381 | #else |
382 | (void)(ptr); |
383 | #endif |
384 | } |
385 | |
386 | /* Get the RSS information in an OS-specific way. |
387 | * |
388 | * WARNING: the function zmalloc_get_rss() is not designed to be fast |
389 | * and may not be called in the busy loops where Redis tries to release |
390 | * memory expiring or swapping out objects. |
391 | * |
392 | * For this kind of "fast RSS reporting" usages use instead the |
393 | * function RedisEstimateRSS() that is a much faster (and less precise) |
394 | * version of the function. */ |
395 | |
396 | #if defined(HAVE_PROC_STAT) |
397 | #include <sys/types.h> |
398 | #include <sys/stat.h> |
399 | #include <fcntl.h> |
400 | #endif |
401 | |
402 | /* Get the i'th field from "/proc/self/stats" note i is 1 based as appears in the 'proc' man page */ |
403 | int get_proc_stat_ll(int i, long long *res) { |
404 | #if defined(HAVE_PROC_STAT) |
405 | char buf[4096]; |
406 | int fd, l; |
407 | char *p, *x; |
408 | |
409 | if ((fd = open("/proc/self/stat" ,O_RDONLY)) == -1) return 0; |
410 | if ((l = read(fd,buf,sizeof(buf)-1)) <= 0) { |
411 | close(fd); |
412 | return 0; |
413 | } |
414 | close(fd); |
415 | buf[l] = '\0'; |
416 | if (buf[l-1] == '\n') buf[l-1] = '\0'; |
417 | |
418 | /* Skip pid and process name (surrounded with parentheses) */ |
419 | p = strrchr(buf, ')'); |
420 | if (!p) return 0; |
421 | p++; |
422 | while (*p == ' ') p++; |
423 | if (*p == '\0') return 0; |
424 | i -= 3; |
425 | if (i < 0) return 0; |
426 | |
427 | while (p && i--) { |
428 | p = strchr(p, ' '); |
429 | if (p) p++; |
430 | else return 0; |
431 | } |
432 | x = strchr(p,' '); |
433 | if (x) *x = '\0'; |
434 | |
435 | *res = strtoll(p,&x,10); |
436 | if (*x != '\0') return 0; |
437 | return 1; |
438 | #else |
439 | UNUSED(i); |
440 | UNUSED(res); |
441 | return 0; |
442 | #endif |
443 | } |
444 | |
445 | #if defined(HAVE_PROC_STAT) |
446 | size_t (void) { |
447 | int page = sysconf(_SC_PAGESIZE); |
448 | long long ; |
449 | |
450 | /* RSS is the 24th field in /proc/<pid>/stat */ |
451 | if (!get_proc_stat_ll(24, &rss)) return 0; |
452 | rss *= page; |
453 | return rss; |
454 | } |
455 | #elif defined(HAVE_TASKINFO) |
456 | #include <sys/types.h> |
457 | #include <sys/sysctl.h> |
458 | #include <mach/task.h> |
459 | #include <mach/mach_init.h> |
460 | |
461 | size_t zmalloc_get_rss(void) { |
462 | task_t task = MACH_PORT_NULL; |
463 | struct task_basic_info t_info; |
464 | mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT; |
465 | |
466 | if (task_for_pid(current_task(), getpid(), &task) != KERN_SUCCESS) |
467 | return 0; |
468 | task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count); |
469 | |
470 | return t_info.resident_size; |
471 | } |
472 | #elif defined(__FreeBSD__) || defined(__DragonFly__) |
473 | #include <sys/types.h> |
474 | #include <sys/sysctl.h> |
475 | #include <sys/user.h> |
476 | |
477 | size_t zmalloc_get_rss(void) { |
478 | struct kinfo_proc info; |
479 | size_t infolen = sizeof(info); |
480 | int mib[4]; |
481 | mib[0] = CTL_KERN; |
482 | mib[1] = KERN_PROC; |
483 | mib[2] = KERN_PROC_PID; |
484 | mib[3] = getpid(); |
485 | |
486 | if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0) |
487 | #if defined(__FreeBSD__) |
488 | return (size_t)info.ki_rssize * getpagesize(); |
489 | #else |
490 | return (size_t)info.kp_vm_rssize * getpagesize(); |
491 | #endif |
492 | |
493 | return 0L; |
494 | } |
495 | #elif defined(__NetBSD__) || defined(__OpenBSD__) |
496 | #include <sys/types.h> |
497 | #include <sys/sysctl.h> |
498 | |
499 | #if defined(__OpenBSD__) |
500 | #define kinfo_proc2 kinfo_proc |
501 | #define KERN_PROC2 KERN_PROC |
502 | #define __arraycount(a) (sizeof(a) / sizeof(a[0])) |
503 | #endif |
504 | |
505 | size_t zmalloc_get_rss(void) { |
506 | struct kinfo_proc2 info; |
507 | size_t infolen = sizeof(info); |
508 | int mib[6]; |
509 | mib[0] = CTL_KERN; |
510 | mib[1] = KERN_PROC2; |
511 | mib[2] = KERN_PROC_PID; |
512 | mib[3] = getpid(); |
513 | mib[4] = sizeof(info); |
514 | mib[5] = 1; |
515 | if (sysctl(mib, __arraycount(mib), &info, &infolen, NULL, 0) == 0) |
516 | return (size_t)info.p_vm_rssize * getpagesize(); |
517 | |
518 | return 0L; |
519 | } |
520 | #elif defined(__HAIKU__) |
521 | #include <OS.h> |
522 | |
523 | size_t zmalloc_get_rss(void) { |
524 | area_info info; |
525 | thread_info th; |
526 | size_t rss = 0; |
527 | ssize_t cookie = 0; |
528 | |
529 | if (get_thread_info(find_thread(0), &th) != B_OK) |
530 | return 0; |
531 | |
532 | while (get_next_area_info(th.team, &cookie, &info) == B_OK) |
533 | rss += info.ram_size; |
534 | |
535 | return rss; |
536 | } |
537 | #elif defined(HAVE_PSINFO) |
538 | #include <unistd.h> |
539 | #include <sys/procfs.h> |
540 | #include <fcntl.h> |
541 | |
542 | size_t zmalloc_get_rss(void) { |
543 | struct prpsinfo info; |
544 | char filename[256]; |
545 | int fd; |
546 | |
547 | snprintf(filename,256,"/proc/%ld/psinfo" ,(long) getpid()); |
548 | |
549 | if ((fd = open(filename,O_RDONLY)) == -1) return 0; |
550 | if (ioctl(fd, PIOCPSINFO, &info) == -1) { |
551 | close(fd); |
552 | return 0; |
553 | } |
554 | |
555 | close(fd); |
556 | return info.pr_rssize; |
557 | } |
558 | #else |
559 | size_t zmalloc_get_rss(void) { |
560 | /* If we can't get the RSS in an OS-specific way for this system just |
561 | * return the memory usage we estimated in zmalloc().. |
562 | * |
563 | * Fragmentation will appear to be always 1 (no fragmentation) |
564 | * of course... */ |
565 | return zmalloc_used_memory(); |
566 | } |
567 | #endif |
568 | |
569 | #if defined(USE_JEMALLOC) |
570 | |
571 | int zmalloc_get_allocator_info(size_t *allocated, |
572 | size_t *active, |
573 | size_t *resident) { |
574 | uint64_t epoch = 1; |
575 | size_t sz; |
576 | *allocated = *resident = *active = 0; |
577 | /* Update the statistics cached by mallctl. */ |
578 | sz = sizeof(epoch); |
579 | je_mallctl("epoch" , &epoch, &sz, &epoch, sz); |
580 | sz = sizeof(size_t); |
581 | /* Unlike RSS, this does not include RSS from shared libraries and other non |
582 | * heap mappings. */ |
583 | je_mallctl("stats.resident" , resident, &sz, NULL, 0); |
584 | /* Unlike resident, this doesn't not include the pages jemalloc reserves |
585 | * for re-use (purge will clean that). */ |
586 | je_mallctl("stats.active" , active, &sz, NULL, 0); |
587 | /* Unlike zmalloc_used_memory, this matches the stats.resident by taking |
588 | * into account all allocations done by this process (not only zmalloc). */ |
589 | je_mallctl("stats.allocated" , allocated, &sz, NULL, 0); |
590 | return 1; |
591 | } |
592 | |
593 | void set_jemalloc_bg_thread(int enable) { |
594 | /* let jemalloc do purging asynchronously, required when there's no traffic |
595 | * after flushdb */ |
596 | char val = !!enable; |
597 | je_mallctl("background_thread" , NULL, 0, &val, 1); |
598 | } |
599 | |
600 | int jemalloc_purge() { |
601 | /* return all unused (reserved) pages to the OS */ |
602 | char tmp[32]; |
603 | unsigned narenas = 0; |
604 | size_t sz = sizeof(unsigned); |
605 | if (!je_mallctl("arenas.narenas" , &narenas, &sz, NULL, 0)) { |
606 | sprintf(tmp, "arena.%d.purge" , narenas); |
607 | if (!je_mallctl(tmp, NULL, 0, NULL, 0)) |
608 | return 0; |
609 | } |
610 | return -1; |
611 | } |
612 | |
613 | #else |
614 | |
615 | int zmalloc_get_allocator_info(size_t *allocated, |
616 | size_t *active, |
617 | size_t *resident) { |
618 | *allocated = *resident = *active = 0; |
619 | return 1; |
620 | } |
621 | |
622 | void set_jemalloc_bg_thread(int enable) { |
623 | ((void)(enable)); |
624 | } |
625 | |
626 | int jemalloc_purge() { |
627 | return 0; |
628 | } |
629 | |
630 | #endif |
631 | |
632 | #if defined(__APPLE__) |
633 | /* For proc_pidinfo() used later in zmalloc_get_smap_bytes_by_field(). |
634 | * Note that this file cannot be included in zmalloc.h because it includes |
635 | * a Darwin queue.h file where there is a "LIST_HEAD" macro (!) defined |
636 | * conficting with Redis user code. */ |
637 | #include <libproc.h> |
638 | #endif |
639 | |
640 | /* Get the sum of the specified field (converted form kb to bytes) in |
641 | * /proc/self/smaps. The field must be specified with trailing ":" as it |
642 | * apperas in the smaps output. |
643 | * |
644 | * If a pid is specified, the information is extracted for such a pid, |
645 | * otherwise if pid is -1 the information is reported is about the |
646 | * current process. |
647 | * |
648 | * Example: zmalloc_get_smap_bytes_by_field("Rss:",-1); |
649 | */ |
650 | #if defined(HAVE_PROC_SMAPS) |
651 | size_t zmalloc_get_smap_bytes_by_field(char *field, long pid) { |
652 | char line[1024]; |
653 | size_t bytes = 0; |
654 | int flen = strlen(field); |
655 | FILE *fp; |
656 | |
657 | if (pid == -1) { |
658 | fp = fopen("/proc/self/smaps" ,"r" ); |
659 | } else { |
660 | char filename[128]; |
661 | snprintf(filename,sizeof(filename),"/proc/%ld/smaps" ,pid); |
662 | fp = fopen(filename,"r" ); |
663 | } |
664 | |
665 | if (!fp) return 0; |
666 | while(fgets(line,sizeof(line),fp) != NULL) { |
667 | if (strncmp(line,field,flen) == 0) { |
668 | char *p = strchr(line,'k'); |
669 | if (p) { |
670 | *p = '\0'; |
671 | bytes += strtol(line+flen,NULL,10) * 1024; |
672 | } |
673 | } |
674 | } |
675 | fclose(fp); |
676 | return bytes; |
677 | } |
678 | #else |
679 | /* Get sum of the specified field from libproc api call. |
680 | * As there are per page value basis we need to convert |
681 | * them accordingly. |
682 | * |
683 | * Note that AnonHugePages is a no-op as THP feature |
684 | * is not supported in this platform |
685 | */ |
686 | size_t zmalloc_get_smap_bytes_by_field(char *field, long pid) { |
687 | #if defined(__APPLE__) |
688 | struct proc_regioninfo pri; |
689 | if (pid == -1) pid = getpid(); |
690 | if (proc_pidinfo(pid, PROC_PIDREGIONINFO, 0, &pri, |
691 | PROC_PIDREGIONINFO_SIZE) == PROC_PIDREGIONINFO_SIZE) |
692 | { |
693 | int pagesize = getpagesize(); |
694 | if (!strcmp(field, "Private_Dirty:" )) { |
695 | return (size_t)pri.pri_pages_dirtied * pagesize; |
696 | } else if (!strcmp(field, "Rss:" )) { |
697 | return (size_t)pri.pri_pages_resident * pagesize; |
698 | } else if (!strcmp(field, "AnonHugePages:" )) { |
699 | return 0; |
700 | } |
701 | } |
702 | return 0; |
703 | #endif |
704 | ((void) field); |
705 | ((void) pid); |
706 | return 0; |
707 | } |
708 | #endif |
709 | |
710 | /* Return the total number bytes in pages marked as Private Dirty. |
711 | * |
712 | * Note: depending on the platform and memory footprint of the process, this |
713 | * call can be slow, exceeding 1000ms! |
714 | */ |
715 | size_t zmalloc_get_private_dirty(long pid) { |
716 | return zmalloc_get_smap_bytes_by_field("Private_Dirty:" ,pid); |
717 | } |
718 | |
719 | /* Returns the size of physical memory (RAM) in bytes. |
720 | * It looks ugly, but this is the cleanest way to achieve cross platform results. |
721 | * Cleaned up from: |
722 | * |
723 | * http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system |
724 | * |
725 | * Note that this function: |
726 | * 1) Was released under the following CC attribution license: |
727 | * http://creativecommons.org/licenses/by/3.0/deed.en_US. |
728 | * 2) Was originally implemented by David Robert Nadeau. |
729 | * 3) Was modified for Redis by Matt Stancliff. |
730 | * 4) This note exists in order to comply with the original license. |
731 | */ |
732 | size_t zmalloc_get_memory_size(void) { |
733 | #if defined(__unix__) || defined(__unix) || defined(unix) || \ |
734 | (defined(__APPLE__) && defined(__MACH__)) |
735 | #if defined(CTL_HW) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM64)) |
736 | int mib[2]; |
737 | mib[0] = CTL_HW; |
738 | #if defined(HW_MEMSIZE) |
739 | mib[1] = HW_MEMSIZE; /* OSX. --------------------- */ |
740 | #elif defined(HW_PHYSMEM64) |
741 | mib[1] = HW_PHYSMEM64; /* NetBSD, OpenBSD. --------- */ |
742 | #endif |
743 | int64_t size = 0; /* 64-bit */ |
744 | size_t len = sizeof(size); |
745 | if (sysctl( mib, 2, &size, &len, NULL, 0) == 0) |
746 | return (size_t)size; |
747 | return 0L; /* Failed? */ |
748 | |
749 | #elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) |
750 | /* FreeBSD, Linux, OpenBSD, and Solaris. -------------------- */ |
751 | return (size_t)sysconf(_SC_PHYS_PAGES) * (size_t)sysconf(_SC_PAGESIZE); |
752 | |
753 | #elif defined(CTL_HW) && (defined(HW_PHYSMEM) || defined(HW_REALMEM)) |
754 | /* DragonFly BSD, FreeBSD, NetBSD, OpenBSD, and OSX. -------- */ |
755 | int mib[2]; |
756 | mib[0] = CTL_HW; |
757 | #if defined(HW_REALMEM) |
758 | mib[1] = HW_REALMEM; /* FreeBSD. ----------------- */ |
759 | #elif defined(HW_PHYSMEM) |
760 | mib[1] = HW_PHYSMEM; /* Others. ------------------ */ |
761 | #endif |
762 | unsigned int size = 0; /* 32-bit */ |
763 | size_t len = sizeof(size); |
764 | if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) |
765 | return (size_t)size; |
766 | return 0L; /* Failed? */ |
767 | #else |
768 | return 0L; /* Unknown method to get the data. */ |
769 | #endif |
770 | #else |
771 | return 0L; /* Unknown OS. */ |
772 | #endif |
773 | } |
774 | |
775 | #ifdef REDIS_TEST |
776 | int zmalloc_test(int argc, char **argv, int flags) { |
777 | void *ptr; |
778 | |
779 | UNUSED(argc); |
780 | UNUSED(argv); |
781 | UNUSED(flags); |
782 | printf("Malloc prefix size: %d\n" , (int) PREFIX_SIZE); |
783 | printf("Initial used memory: %zu\n" , zmalloc_used_memory()); |
784 | ptr = zmalloc(123); |
785 | printf("Allocated 123 bytes; used: %zu\n" , zmalloc_used_memory()); |
786 | ptr = zrealloc(ptr, 456); |
787 | printf("Reallocated to 456 bytes; used: %zu\n" , zmalloc_used_memory()); |
788 | zfree(ptr); |
789 | printf("Freed pointer; used: %zu\n" , zmalloc_used_memory()); |
790 | return 0; |
791 | } |
792 | #endif |
793 | |