xref: /unit/src/nxt_mp.c (revision 147:4ed5e68f196c)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     /* Lists of nxt_mp_page_t. */
118     nxt_queue_t          free_pages;
119     nxt_queue_t          nget_pages;
120     nxt_queue_t          get_pages;
121     nxt_queue_t          chunk_pages[];
122 };
123 
124 
125 #define nxt_mp_chunk_get_free(map)                                            \
126     (__builtin_ffs(map) - 1)
127 
128 
129 #define nxt_mp_chunk_is_free(map, chunk)                                      \
130     ((map & (1 << chunk)) != 0)
131 
132 
133 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
134     map &= ~(1 << chunk)
135 
136 
137 #define nxt_mp_chunk_set_free(map, chunk)                                     \
138     map |= (1 << chunk)
139 
140 
141 #define nxt_mp_free_junk(p, size)                                             \
142     memset((p), 0x5A, size)
143 
144 
145 #if !(NXT_DEBUG_MEMORY)
146 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
147 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
148 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
149 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
150 #endif
151 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
152 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
153     nxt_rbtree_node_t *node2);
154 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
155 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
156     u_char *p);
157 
158 
159 #if (NXT_HAVE_BUILTIN_CLZ)
160 
161 #define nxt_lg2(value)                                                        \
162     (31 - __builtin_clz(value))
163 
164 #else
165 
166 static const int nxt_lg2_tab64[64] = {
167     63,  0, 58,  1, 59, 47, 53,  2,
168     60, 39, 48, 27, 54, 33, 42,  3,
169     61, 51, 37, 40, 49, 18, 28, 20,
170     55, 30, 34, 11, 43, 14, 22,  4,
171     62, 57, 46, 52, 38, 26, 32, 41,
172     50, 36, 17, 19, 29, 10, 13, 21,
173     56, 45, 25, 31, 35, 16,  9, 12,
174     44, 24, 15,  8, 23,  7,  6,  5
175 };
176 
177 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
178 
179 static int
180 nxt_lg2(uint64_t v)
181 {
182     v |= v >> 1;
183     v |= v >> 2;
184     v |= v >> 4;
185     v |= v >> 8;
186     v |= v >> 16;
187     v |= v >> 32;
188     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
189 }
190 
191 #endif
192 
193 
194 #if (NXT_DEBUG)
195 
196 nxt_inline void
197 nxt_mp_thread_assert(nxt_mp_t *mp)
198 {
199     nxt_tid_t     tid;
200     nxt_thread_t  *thread;
201 
202     thread = nxt_thread();
203     tid = nxt_thread_tid(thread);
204 
205     if (nxt_fast_path(mp->tid == tid)) {
206         return;
207     }
208 
209     if (nxt_slow_path(nxt_pid != mp->pid)) {
210         mp->pid = nxt_pid;
211         mp->tid = tid;
212 
213         return;
214     }
215 
216     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
217     nxt_abort();
218 }
219 
220 #else
221 
222 #define nxt_mp_thread_assert(mp)
223 
224 #endif
225 
226 
227 void
228 nxt_mp_thread_adopt(nxt_mp_t *mp)
229 {
230 #if (NXT_DEBUG)
231     mp->pid = nxt_pid;
232     mp->tid = nxt_thread_tid(NULL);
233 #endif
234 }
235 
236 
237 nxt_mp_t *
238 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
239     size_t min_chunk_size)
240 {
241     nxt_mp_t     *mp;
242     uint32_t     pages, chunk_size_shift, page_size_shift;
243     nxt_queue_t  *chunk_pages;
244 
245     chunk_size_shift = nxt_lg2(min_chunk_size);
246     page_size_shift = nxt_lg2(page_size);
247 
248     pages = page_size_shift - chunk_size_shift;
249 
250     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
251 
252     if (nxt_fast_path(mp != NULL)) {
253         mp->retain = 1;
254         mp->chunk_size_shift = chunk_size_shift;
255         mp->page_size_shift = page_size_shift;
256         mp->page_size = page_size;
257         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
258         mp->cluster_size = cluster_size;
259 
260         chunk_pages = mp->chunk_pages;
261 
262         while (pages != 0) {
263             nxt_queue_init(chunk_pages);
264             chunk_pages++;
265             pages--;
266         }
267 
268         nxt_queue_init(&mp->free_pages);
269         nxt_queue_init(&mp->nget_pages);
270         nxt_queue_init(&mp->get_pages);
271 
272         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
273     }
274 
275     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
276                     page_alignment, page_size, min_chunk_size);
277 
278     return mp;
279 }
280 
281 
282 void
283 nxt_mp_destroy(nxt_mp_t *mp)
284 {
285     void               *p;
286     nxt_mp_block_t     *block;
287     nxt_rbtree_node_t  *node, *next;
288 
289     nxt_debug_alloc("mp %p destroy", mp);
290 
291     nxt_mp_thread_assert(mp);
292 
293     next = nxt_rbtree_root(&mp->blocks);
294 
295     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
296 
297         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
298         block = (nxt_mp_block_t *) node;
299 
300         p = block->start;
301 
302         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
303             nxt_free(block);
304         }
305 
306         nxt_free(p);
307     }
308 
309     nxt_free(mp);
310 }
311 
312 
313 nxt_bool_t
314 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
315     size_t min_chunk_size)
316 {
317     nxt_bool_t  valid;
318 
319     /* Alignment and sizes must be a power of 2. */
320 
321     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
322                            && nxt_is_power_of_two(page_size)
323                            && nxt_is_power_of_two(min_chunk_size)));
324     if (!valid) {
325         return 0;
326     }
327 
328     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
329 
330     valid = nxt_expect(1, (page_size >= 64
331                            && page_size >= page_alignment
332                            && page_size >= min_chunk_size
333                            && min_chunk_size * 32 >= page_size
334                            && cluster_size >= page_size
335                            && cluster_size / page_size <= 256
336                            && cluster_size % page_size == 0));
337     if (!valid) {
338         return 0;
339     }
340 
341     return 1;
342 }
343 
344 
345 nxt_bool_t
346 nxt_mp_is_empty(nxt_mp_t *mp)
347 {
348     return (nxt_rbtree_is_empty(&mp->blocks)
349             && nxt_queue_is_empty(&mp->free_pages));
350 }
351 
352 
353 void *
354 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
355 {
356     void  *p;
357 
358 #if !(NXT_DEBUG_MEMORY)
359 
360     if (size <= mp->page_size) {
361         p = nxt_mp_alloc_small(mp, size);
362 
363     } else {
364         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
365     }
366 
367 #else
368 
369     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
370 
371 #endif
372 
373     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
374 
375     return p;
376 }
377 
378 
379 void *
380 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
381 {
382     void  *p;
383 
384     p = nxt_mp_alloc(mp, size);
385 
386     if (nxt_fast_path(p != NULL)) {
387         memset(p, 0, size);
388     }
389 
390     return p;
391 }
392 
393 
394 void *
395 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
396 {
397     void    *p;
398 
399     /* Alignment must be a power of 2. */
400 
401     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
402 
403 #if !(NXT_DEBUG_MEMORY)
404 
405         size_t  aligned_size;
406 
407         aligned_size = nxt_max(size, alignment);
408 
409         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
410             p = nxt_mp_alloc_small(mp, aligned_size);
411 
412         } else {
413             p = nxt_mp_alloc_large(mp, alignment, size);
414         }
415 
416 #else
417 
418         p = nxt_mp_alloc_large(mp, alignment, size);
419 
420 #endif
421 
422     } else {
423         p = NULL;
424     }
425 
426     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
427 
428     return p;
429 }
430 
431 
432 void *
433 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
434 {
435     void  *p;
436 
437     p = nxt_mp_align(mp, alignment, size);
438 
439     if (nxt_fast_path(p != NULL)) {
440         memset(p, 0, size);
441     }
442 
443     return p;
444 }
445 
446 
447 nxt_inline nxt_uint_t
448 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
449 {
450     nxt_int_t  n, index;
451 
452     index = 0;
453 
454     if (size > 1) {
455         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
456 
457         if (n > 0) {
458             index = n;
459         }
460     }
461 
462     return index;
463 }
464 
465 
466 #if !(NXT_DEBUG_MEMORY)
467 
468 nxt_inline u_char *
469 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
470 {
471     size_t          page_offset;
472     nxt_mp_block_t  *block;
473 
474     page_offset = page->number * sizeof(nxt_mp_page_t)
475                   + offsetof(nxt_mp_block_t, pages);
476 
477     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
478 
479     return block->start + (page->number << mp->page_size_shift);
480 }
481 
482 
483 static void *
484 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
485 {
486     u_char            *p;
487     nxt_uint_t        n, index;
488     nxt_queue_t       *chunk_pages;
489     nxt_mp_page_t     *page;
490     nxt_queue_link_t  *link;
491 
492     nxt_mp_thread_assert(mp);
493 
494     p = NULL;
495 
496     if (size <= mp->page_size / 2) {
497 
498         index = nxt_mp_chunk_pages_index(mp, size);
499         chunk_pages = &mp->chunk_pages[index];
500 
501         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
502 
503             link = nxt_queue_first(chunk_pages);
504             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
505 
506             p = nxt_mp_page_addr(mp, page);
507 
508             n = nxt_mp_chunk_get_free(page->u.map);
509             nxt_mp_chunk_set_busy(page->u.map, n);
510 
511             p += ((n << index) << mp->chunk_size_shift);
512 
513             page->chunks--;
514 
515             if (page->chunks == 0) {
516                 /*
517                  * Remove full page from the pool chunk pages list
518                  * of pages with free chunks.
519                  */
520                 nxt_queue_remove(&page->link);
521             }
522 
523         } else {
524             page = nxt_mp_alloc_page(mp);
525 
526             if (nxt_fast_path(page != NULL)) {
527                 page->size = (1 << index);
528 
529                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
530                 page->chunks = (1 << n) - 1;
531 
532                 nxt_queue_insert_head(chunk_pages, &page->link);
533 
534                 /* Mark the first chunk as busy. */
535                 page->u.map = 0xFFFFFFFE;
536 
537                 p = nxt_mp_page_addr(mp, page);
538             }
539         }
540 
541     } else {
542         page = nxt_mp_alloc_page(mp);
543 
544         if (nxt_fast_path(page != NULL)) {
545             page->size = mp->page_size >> mp->chunk_size_shift;
546 
547             p = nxt_mp_page_addr(mp, page);
548         }
549     }
550 
551     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
552                     page->size << mp->chunk_size_shift, p);
553 
554     return p;
555 }
556 
557 
558 static void *
559 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
560 {
561     u_char            *p;
562     uint32_t          available;
563     nxt_mp_page_t     *page;
564     nxt_queue_link_t  *link, *next;
565 
566     nxt_mp_thread_assert(mp);
567 
568     for (link = nxt_queue_first(pages);
569          link != nxt_queue_tail(pages);
570          link = next)
571     {
572         next = nxt_queue_next(link);
573         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
574 
575         available = mp->page_size - page->u.taken;
576 
577         if (size <= available) {
578             goto found;
579         }
580 
581         if (available == 0 || page->fails++ > 100) {
582             nxt_queue_remove(link);
583         }
584     }
585 
586     page = nxt_mp_alloc_page(mp);
587 
588     if (nxt_slow_path(page == NULL)) {
589         return page;
590     }
591 
592     nxt_queue_insert_head(pages, &page->link);
593 
594     page->size = 0xFF;
595     page->u.taken = 0;
596 
597 found:
598 
599     p = nxt_mp_page_addr(mp, page);
600 
601     p += page->u.taken;
602     page->u.taken += size;
603 
604     return p;
605 }
606 
607 
608 static nxt_mp_page_t *
609 nxt_mp_alloc_page(nxt_mp_t *mp)
610 {
611     nxt_mp_page_t     *page;
612     nxt_mp_block_t    *cluster;
613     nxt_queue_link_t  *link;
614 
615     if (nxt_queue_is_empty(&mp->free_pages)) {
616         cluster = nxt_mp_alloc_cluster(mp);
617         if (nxt_slow_path(cluster == NULL)) {
618             return NULL;
619         }
620     }
621 
622     link = nxt_queue_first(&mp->free_pages);
623     nxt_queue_remove(link);
624 
625     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
626 
627     return page;
628 }
629 
630 
631 static nxt_mp_block_t *
632 nxt_mp_alloc_cluster(nxt_mp_t *mp)
633 {
634     nxt_uint_t      n;
635     nxt_mp_block_t  *cluster;
636 
637     n = mp->cluster_size >> mp->page_size_shift;
638 
639     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
640 
641     if (nxt_slow_path(cluster == NULL)) {
642         return NULL;
643     }
644 
645     /* NXT_MP_CLUSTER_BLOCK type is zero. */
646 
647     cluster->size = mp->cluster_size;
648 
649     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
650     if (nxt_slow_path(cluster->start == NULL)) {
651         nxt_free(cluster);
652         return NULL;
653     }
654 
655     n--;
656     cluster->pages[n].number = n;
657     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
658 
659     while (n != 0) {
660         n--;
661         cluster->pages[n].number = n;
662         nxt_queue_insert_before(&cluster->pages[n + 1].link,
663                                 &cluster->pages[n].link);
664     }
665 
666     nxt_rbtree_insert(&mp->blocks, &cluster->node);
667 
668     return cluster;
669 }
670 
671 #endif
672 
673 
674 static void *
675 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
676 {
677     u_char          *p;
678     size_t          aligned_size;
679     uint8_t         type;
680     nxt_mp_block_t  *block;
681 
682     nxt_mp_thread_assert(mp);
683 
684     /* Allocation must be less than 4G. */
685     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
686         return NULL;
687     }
688 
689     if (nxt_is_power_of_two(size)) {
690         block = nxt_malloc(sizeof(nxt_mp_block_t));
691         if (nxt_slow_path(block == NULL)) {
692             return NULL;
693         }
694 
695         p = nxt_memalign(alignment, size);
696         if (nxt_slow_path(p == NULL)) {
697             nxt_free(block);
698             return NULL;
699         }
700 
701         type = NXT_MP_DISCRETE_BLOCK;
702 
703     } else {
704         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
705 
706         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
707         if (nxt_slow_path(p == NULL)) {
708             return NULL;
709         }
710 
711         block = (nxt_mp_block_t *) (p + aligned_size);
712         type = NXT_MP_EMBEDDED_BLOCK;
713     }
714 
715     block->type = type;
716     block->size = size;
717     block->start = p;
718 
719     nxt_rbtree_insert(&mp->blocks, &block->node);
720 
721     return p;
722 }
723 
724 
725 static intptr_t
726 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
727 {
728     nxt_mp_block_t  *block1, *block2;
729 
730     block1 = (nxt_mp_block_t *) node1;
731     block2 = (nxt_mp_block_t *) node2;
732 
733     return (uintptr_t) block1->start - (uintptr_t) block2->start;
734 }
735 
736 
737 void
738 nxt_mp_free(nxt_mp_t *mp, void *p)
739 {
740     const char      *err;
741     nxt_thread_t    *thread;
742     nxt_mp_block_t  *block;
743 
744     nxt_mp_thread_assert(mp);
745 
746     nxt_debug_alloc("mp %p free(%p)", mp, p);
747 
748     block = nxt_mp_find_block(&mp->blocks, p);
749 
750     if (nxt_fast_path(block != NULL)) {
751 
752         if (block->type == NXT_MP_CLUSTER_BLOCK) {
753             err = nxt_mp_chunk_free(mp, block, p);
754 
755             if (nxt_fast_path(err == NULL)) {
756                 return;
757             }
758 
759         } else if (nxt_fast_path(p == block->start)) {
760             nxt_rbtree_delete(&mp->blocks, &block->node);
761 
762             if (block->type == NXT_MP_DISCRETE_BLOCK) {
763                 nxt_free(block);
764             }
765 
766             nxt_free(p);
767 
768             return;
769 
770         } else {
771             err = "freed pointer points to middle of block: %p";
772         }
773 
774     } else {
775         err = "freed pointer is out of pool: %p";
776     }
777 
778     thread = nxt_thread();
779 
780     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
781 }
782 
783 
784 static nxt_mp_block_t *
785 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
786 {
787     nxt_mp_block_t     *block;
788     nxt_rbtree_node_t  *node, *sentinel;
789 
790     node = nxt_rbtree_root(tree);
791     sentinel = nxt_rbtree_sentinel(tree);
792 
793     while (node != sentinel) {
794 
795         block = (nxt_mp_block_t *) node;
796 
797         if (p < block->start) {
798             node = node->left;
799 
800         } else if (p >= block->start + block->size) {
801             node = node->right;
802 
803         } else {
804             return block;
805         }
806     }
807 
808     return NULL;
809 }
810 
811 
812 static const char *
813 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
814 {
815     u_char         *start;
816     uintptr_t      offset;
817     nxt_uint_t     n, size, chunk;
818     nxt_queue_t    *chunk_pages;
819     nxt_mp_page_t  *page;
820 
821     n = (p - cluster->start) >> mp->page_size_shift;
822     start = cluster->start + (n << mp->page_size_shift);
823 
824     page = &cluster->pages[n];
825 
826     if (nxt_slow_path(page->size == 0)) {
827         return "freed pointer points to already free page: %p";
828     }
829 
830     if (nxt_slow_path(page->size == 0xFF)) {
831         return "freed pointer points to non-freeble page: %p";
832     }
833 
834     size = page->size << mp->chunk_size_shift;
835 
836     if (size != mp->page_size) {
837 
838         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
839         chunk = offset / size;
840 
841         if (nxt_slow_path(offset != chunk * size)) {
842             return "freed pointer points to wrong chunk: %p";
843         }
844 
845         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
846             return "freed pointer points to already free chunk: %p";
847         }
848 
849         nxt_mp_chunk_set_free(page->u.map, chunk);
850 
851         if (page->u.map != 0xFFFFFFFF) {
852             page->chunks++;
853 
854             if (page->chunks == 1) {
855                 /*
856                  * Add the page to the head of pool chunk pages list
857                  * of pages with free chunks.
858                  */
859                 n = nxt_mp_chunk_pages_index(mp, size);
860                 chunk_pages = &mp->chunk_pages[n];
861 
862                 nxt_queue_insert_head(chunk_pages, &page->link);
863             }
864 
865             nxt_mp_free_junk(p, size);
866 
867             return NULL;
868 
869         } else {
870             /*
871              * All chunks are free, remove the page from pool
872              * chunk pages list of pages with free chunks.
873              */
874             nxt_queue_remove(&page->link);
875         }
876 
877     } else if (nxt_slow_path(p != start)) {
878         return "invalid pointer to chunk: %p";
879     }
880 
881     /* Add the free page to the pool's free pages tree. */
882 
883     page->size = 0;
884     nxt_queue_insert_head(&mp->free_pages, &page->link);
885 
886     nxt_mp_free_junk(p, size);
887 
888     /* Test if all pages in the cluster are free. */
889 
890     n = mp->cluster_size >> mp->page_size_shift;
891     page = cluster->pages;
892 
893     do {
894          if (page->size != 0) {
895              return NULL;
896          }
897 
898          page++;
899          n--;
900     } while (n != 0);
901 
902     /* Free cluster. */
903 
904     n = mp->cluster_size >> mp->page_size_shift;
905     page = cluster->pages;
906 
907     do {
908          nxt_queue_remove(&page->link);
909          page++;
910          n--;
911     } while (n != 0);
912 
913     nxt_rbtree_delete(&mp->blocks, &cluster->node);
914 
915     p = cluster->start;
916 
917     nxt_free(cluster);
918     nxt_free(p);
919 
920     return NULL;
921 }
922 
923 
924 void *
925 nxt_mp_retain(nxt_mp_t *mp, size_t size)
926 {
927     void  *p;
928 
929     p = nxt_mp_alloc(mp, size);
930 
931     if (nxt_fast_path(p != NULL)) {
932         mp->retain++;
933         nxt_debug_alloc("mp %p retain: %uD", mp, mp->retain);
934     }
935 
936     return p;
937 }
938 
939 
940 uint32_t
941 nxt_mp_release(nxt_mp_t *mp, void *p)
942 {
943     nxt_mp_free(mp, p);
944 
945     mp->retain--;
946 
947     nxt_debug_alloc("mp %p release: %uD", mp, mp->retain);
948 
949     if (mp->retain == 0) {
950         nxt_mp_destroy(mp);
951 
952         return 0;
953     }
954 
955     return mp->retain;
956 }
957 
958 
959 void *
960 nxt_mp_nget(nxt_mp_t *mp, size_t size)
961 {
962     void  *p;
963 
964 #if !(NXT_DEBUG_MEMORY)
965 
966     if (size <= mp->page_size) {
967         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
968 
969     } else {
970         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
971     }
972 
973 #else
974 
975     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
976 
977 #endif
978 
979     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
980 
981     return p;
982 }
983 
984 
985 void *
986 nxt_mp_get(nxt_mp_t *mp, size_t size)
987 {
988     void  *p;
989 
990 #if !(NXT_DEBUG_MEMORY)
991 
992     if (size <= mp->page_size) {
993         size = nxt_max(size, NXT_MAX_ALIGNMENT);
994         p = nxt_mp_get_small(mp, &mp->get_pages, size);
995 
996     } else {
997         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
998     }
999 
1000 #else
1001 
1002     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
1003 
1004 #endif
1005 
1006     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1007 
1008     return p;
1009 }
1010 
1011 
1012 void *
1013 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1014 {
1015     void  *p;
1016 
1017     p = nxt_mp_get(mp, size);
1018 
1019     if (nxt_fast_path(p != NULL)) {
1020         memset(p, 0, size);
1021     }
1022 
1023     return p;
1024 }
1025