xref: /unit/src/nxt_malloc.h (revision 2084:7d479274f334)
10Sigor@sysoev.ru 
20Sigor@sysoev.ru /*
30Sigor@sysoev.ru  * Copyright (C) Igor Sysoev
40Sigor@sysoev.ru  * Copyright (C) NGINX, Inc.
50Sigor@sysoev.ru  */
60Sigor@sysoev.ru 
70Sigor@sysoev.ru #ifndef _NXT_UNIX_MALLOC_H_INCLUDED_
80Sigor@sysoev.ru #define _NXT_UNIX_MALLOC_H_INCLUDED_
90Sigor@sysoev.ru 
100Sigor@sysoev.ru 
110Sigor@sysoev.ru NXT_EXPORT void *nxt_malloc(size_t size)
120Sigor@sysoev.ru     NXT_MALLOC_LIKE;
130Sigor@sysoev.ru NXT_EXPORT void *nxt_zalloc(size_t size)
140Sigor@sysoev.ru     NXT_MALLOC_LIKE;
150Sigor@sysoev.ru NXT_EXPORT void *nxt_realloc(void *p, size_t size)
160Sigor@sysoev.ru     NXT_MALLOC_LIKE;
170Sigor@sysoev.ru NXT_EXPORT void *nxt_memalign(size_t alignment, size_t size)
180Sigor@sysoev.ru     NXT_MALLOC_LIKE;
190Sigor@sysoev.ru 
200Sigor@sysoev.ru 
210Sigor@sysoev.ru #if (NXT_DEBUG)
220Sigor@sysoev.ru 
230Sigor@sysoev.ru NXT_EXPORT void nxt_free(void *p);
240Sigor@sysoev.ru 
250Sigor@sysoev.ru #else
260Sigor@sysoev.ru 
27*2084Salx.manpages@gmail.com #define nxt_free(p)                                                           \
280Sigor@sysoev.ru     free(p)
290Sigor@sysoev.ru 
300Sigor@sysoev.ru #endif
310Sigor@sysoev.ru 
320Sigor@sysoev.ru 
330Sigor@sysoev.ru #if (NXT_HAVE_MALLOC_USABLE_SIZE)
340Sigor@sysoev.ru 
350Sigor@sysoev.ru /*
360Sigor@sysoev.ru  * Due to allocation strategies malloc() allocators may allocate more
370Sigor@sysoev.ru  * memory than is requested, so malloc_usable_size() allows to use all
380Sigor@sysoev.ru  * allocated memory.  It is helpful for socket buffers or unaligned disk
390Sigor@sysoev.ru  * file I/O.  However, they may be suboptimal for aligned disk file I/O.
400Sigor@sysoev.ru  */
410Sigor@sysoev.ru 
420Sigor@sysoev.ru #if (NXT_LINUX)
430Sigor@sysoev.ru 
440Sigor@sysoev.ru /*
450Sigor@sysoev.ru  * Linux glibc stores bookkeeping information together with allocated
460Sigor@sysoev.ru  * memory itself.  Size of the bookkeeping information is 12 or 24 bytes
470Sigor@sysoev.ru  * on 32-bit and 64-bit platforms respectively.  Due to alignment there
480Sigor@sysoev.ru  * are usually 4 or 8 spare bytes respectively.  However, if allocation
490Sigor@sysoev.ru  * is larger than about 128K, spare size may be up to one page: glibc aligns
500Sigor@sysoev.ru  * sum of allocation and bookkeeping size to a page.  So if requirement
510Sigor@sysoev.ru  * of the large allocation size is not strict it is better to allocate
520Sigor@sysoev.ru  * with small cutback and then to adjust size with malloc_usable_size().
530Sigor@sysoev.ru  * Glibc malloc_usable_size() is fast operation.
540Sigor@sysoev.ru  */
550Sigor@sysoev.ru 
56*2084Salx.manpages@gmail.com #define nxt_malloc_usable_size(p, size)                                       \
570Sigor@sysoev.ru     size = malloc_usable_size(p)
580Sigor@sysoev.ru 
59*2084Salx.manpages@gmail.com #define nxt_malloc_cutback(cutback, size)                                     \
600Sigor@sysoev.ru     size = ((cutback) && size > 127 * 1024) ? size - 32 : size
610Sigor@sysoev.ru 
620Sigor@sysoev.ru #elif (NXT_FREEBSD)
630Sigor@sysoev.ru 
640Sigor@sysoev.ru /*
650Sigor@sysoev.ru  * FreeBSD prior to 7.0 (phkmalloc) aligns sizes to
660Sigor@sysoev.ru  *        16 - 2048   a power of two
670Sigor@sysoev.ru  *      2049 -  ...   aligned to 4K
680Sigor@sysoev.ru  *
690Sigor@sysoev.ru  * FreeBSD 7.0 (jemalloc) aligns sizes to:
700Sigor@sysoev.ru  *         2 -    8   a power of two
710Sigor@sysoev.ru  *         9 -  512   aligned to 16
720Sigor@sysoev.ru  *       513 - 2048   a power of two, i.e. aligned to 1K
730Sigor@sysoev.ru  *      2049 -    1M  aligned to 4K
740Sigor@sysoev.ru  *         1M-  ...   aligned to 1M
750Sigor@sysoev.ru  * See table in src/lib/libc/stdlib/malloc.c
760Sigor@sysoev.ru  *
770Sigor@sysoev.ru  * FreeBSD 7.0 malloc_usable_size() is fast for allocations, which
780Sigor@sysoev.ru  * are lesser than 1M.  Larger allocations require mutex acquiring.
790Sigor@sysoev.ru  */
800Sigor@sysoev.ru 
81*2084Salx.manpages@gmail.com #define nxt_malloc_usable_size(p, size)                                       \
820Sigor@sysoev.ru     size = malloc_usable_size(p)
830Sigor@sysoev.ru 
84*2084Salx.manpages@gmail.com #define nxt_malloc_cutback(cutback, size)
850Sigor@sysoev.ru 
860Sigor@sysoev.ru #endif
870Sigor@sysoev.ru 
880Sigor@sysoev.ru #elif (NXT_HAVE_MALLOC_GOOD_SIZE)
890Sigor@sysoev.ru 
900Sigor@sysoev.ru /*
910Sigor@sysoev.ru  * MacOSX aligns sizes to
920Sigor@sysoev.ru  *        16 -  496   aligned to 16, 32-bit
930Sigor@sysoev.ru  *        16 -  992   aligned to 16, 64-bit
940Sigor@sysoev.ru  *   497/993 -   15K  aligned to 512, if lesser than 1G RAM
950Sigor@sysoev.ru  *   497/993 -  127K  aligned to 512, otherwise
960Sigor@sysoev.ru  *   15K/127K-  ...   aligned to 4K
970Sigor@sysoev.ru  *
980Sigor@sysoev.ru  * malloc_good_size() is faster than malloc_size()
990Sigor@sysoev.ru  */
1000Sigor@sysoev.ru 
101*2084Salx.manpages@gmail.com #define nxt_malloc_usable_size(p, size)                                       \
102521Szelenkov@nginx.com     size = malloc_good_size(size)
1030Sigor@sysoev.ru 
104*2084Salx.manpages@gmail.com #define nxt_malloc_cutback(cutback, size)
1050Sigor@sysoev.ru 
1060Sigor@sysoev.ru #else
1070Sigor@sysoev.ru 
108*2084Salx.manpages@gmail.com #define nxt_malloc_usable_size(p, size)
1090Sigor@sysoev.ru 
110*2084Salx.manpages@gmail.com #define nxt_malloc_cutback(cutback, size)
1110Sigor@sysoev.ru 
1120Sigor@sysoev.ru #endif
1130Sigor@sysoev.ru 
1140Sigor@sysoev.ru 
1150Sigor@sysoev.ru #if (NXT_HAVE_POSIX_MEMALIGN || NXT_HAVE_MEMALIGN)
1160Sigor@sysoev.ru #define NXT_MAX_MEMALIGN_SHIFT  32
1170Sigor@sysoev.ru 
1180Sigor@sysoev.ru #elif (NXT_FREEBSD)
1190Sigor@sysoev.ru #define NXT_MAX_MEMALIGN_SHIFT  12
1200Sigor@sysoev.ru 
1210Sigor@sysoev.ru #else
1220Sigor@sysoev.ru #define NXT_MAX_MEMALIGN_SHIFT  3
1230Sigor@sysoev.ru #endif
1240Sigor@sysoev.ru 
1250Sigor@sysoev.ru 
1260Sigor@sysoev.ru #endif /* _NXT_UNIX_MALLOC_H_INCLUDED_ */
127