1 2 /* 3 * Copyright (C) Igor Sysoev 4 * Copyright (C) NGINX, Inc. 5 */ 6 7 #ifndef _NXT_UNIX_MALLOC_H_INCLUDED_ 8 #define _NXT_UNIX_MALLOC_H_INCLUDED_ 9 10 11 NXT_EXPORT void *nxt_malloc(size_t size) 12 NXT_MALLOC_LIKE; 13 NXT_EXPORT void *nxt_zalloc(size_t size) 14 NXT_MALLOC_LIKE; 15 NXT_EXPORT void *nxt_realloc(void *p, size_t size) 16 NXT_MALLOC_LIKE; 17 NXT_EXPORT void *nxt_memalign(size_t alignment, size_t size) 18 NXT_MALLOC_LIKE; 19 20 21 #if (NXT_DEBUG) 22 23 NXT_EXPORT void nxt_free(void *p); 24 25 #else 26 27 #define nxt_free(p) \ 28 free(p) 29 30 #endif 31 32 33 #if (NXT_HAVE_MALLOC_USABLE_SIZE) 34 35 /* 36 * Due to allocation strategies malloc() allocators may allocate more 37 * memory than is requested, so malloc_usable_size() allows to use all 38 * allocated memory. It is helpful for socket buffers or unaligned disk 39 * file I/O. However, they may be suboptimal for aligned disk file I/O. 40 */ 41 42 #if (NXT_LINUX) 43 44 /* 45 * Linux glibc stores bookkeeping information together with allocated 46 * memory itself. Size of the bookkeeping information is 12 or 24 bytes 47 * on 32-bit and 64-bit platforms respectively. Due to alignment there 48 * are usually 4 or 8 spare bytes respectively. However, if allocation 49 * is larger than about 128K, spare size may be up to one page: glibc aligns 50 * sum of allocation and bookkeeping size to a page. So if requirement 51 * of the large allocation size is not strict it is better to allocate 52 * with small cutback and then to adjust size with malloc_usable_size(). 53 * Glibc malloc_usable_size() is fast operation. 54 */ 55 56 #define nxt_malloc_usable_size(p, size) \ 57 size = malloc_usable_size(p) 58 59 #define nxt_malloc_cutback(cutback, size) \ 60 size = ((cutback) && size > 127 * 1024) ? size - 32 : size 61 62 #elif (NXT_FREEBSD) 63 64 /* 65 * FreeBSD prior to 7.0 (phkmalloc) aligns sizes to 66 * 16 - 2048 a power of two 67 * 2049 - ... aligned to 4K 68 * 69 * FreeBSD 7.0 (jemalloc) aligns sizes to: 70 * 2 - 8 a power of two 71 * 9 - 512 aligned to 16 72 * 513 - 2048 a power of two, i.e. aligned to 1K 73 * 2049 - 1M aligned to 4K 74 * 1M- ... aligned to 1M 75 * See table in src/lib/libc/stdlib/malloc.c 76 * 77 * FreeBSD 7.0 malloc_usable_size() is fast for allocations, which 78 * are lesser than 1M. Larger allocations require mutex acquiring. 79 */ 80 81 #define nxt_malloc_usable_size(p, size) \ 82 size = malloc_usable_size(p) 83 84 #define nxt_malloc_cutback(cutback, size) 85 86 #endif 87 88 #elif (NXT_HAVE_MALLOC_GOOD_SIZE) 89 90 /* 91 * MacOSX aligns sizes to 92 * 16 - 496 aligned to 16, 32-bit 93 * 16 - 992 aligned to 16, 64-bit 94 * 497/993 - 15K aligned to 512, if lesser than 1G RAM 95 * 497/993 - 127K aligned to 512, otherwise 96 * 15K/127K- ... aligned to 4K 97 * 98 * malloc_good_size() is faster than malloc_size() 99 */ 100 101 #define nxt_malloc_usable_size(p, size) \ 102 size = malloc_good_size(size) 103 104 #define nxt_malloc_cutback(cutback, size) 105 106 #else 107 108 #define nxt_malloc_usable_size(p, size) 109 110 #define nxt_malloc_cutback(cutback, size) 111 112 #endif 113 114 115 #if (NXT_HAVE_POSIX_MEMALIGN || NXT_HAVE_MEMALIGN) 116 #define NXT_MAX_MEMALIGN_SHIFT 32 117 118 #elif (NXT_FREEBSD) 119 #define NXT_MAX_MEMALIGN_SHIFT 12 120 121 #else 122 #define NXT_MAX_MEMALIGN_SHIFT 3 123 #endif 124 125 126 #endif /* _NXT_UNIX_MALLOC_H_INCLUDED_ */ 127