xref: /unit/src/nxt_mp.c (revision 141:96a65c601420)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     /* Lists of nxt_mp_page_t. */
118     nxt_queue_t          free_pages;
119     nxt_queue_t          nget_pages;
120     nxt_queue_t          get_pages;
121     nxt_queue_t          chunk_pages[];
122 };
123 
124 
125 #define nxt_mp_chunk_get_free(map)                                            \
126     (__builtin_ffs(map) - 1)
127 
128 
129 #define nxt_mp_chunk_is_free(map, chunk)                                      \
130     ((map & (1 << chunk)) != 0)
131 
132 
133 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
134     map &= ~(1 << chunk)
135 
136 
137 #define nxt_mp_chunk_set_free(map, chunk)                                     \
138     map |= (1 << chunk)
139 
140 
141 #define nxt_mp_free_junk(p, size)                                             \
142     memset((p), 0x5A, size)
143 
144 
145 #if !(NXT_DEBUG_MEMORY)
146 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
147 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
148 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
149 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
150 #endif
151 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
152 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
153     nxt_rbtree_node_t *node2);
154 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
155 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
156     u_char *p);
157 
158 
159 #if (NXT_HAVE_BUILTIN_CLZ)
160 
161 #define nxt_lg2(value)                                                        \
162     (31 - __builtin_clz(value))
163 
164 #else
165 
166 static const int nxt_lg2_tab64[64] = {
167     63,  0, 58,  1, 59, 47, 53,  2,
168     60, 39, 48, 27, 54, 33, 42,  3,
169     61, 51, 37, 40, 49, 18, 28, 20,
170     55, 30, 34, 11, 43, 14, 22,  4,
171     62, 57, 46, 52, 38, 26, 32, 41,
172     50, 36, 17, 19, 29, 10, 13, 21,
173     56, 45, 25, 31, 35, 16,  9, 12,
174     44, 24, 15,  8, 23,  7,  6,  5
175 };
176 
177 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
178 
179 static int
180 nxt_lg2(uint64_t v)
181 {
182     v |= v >> 1;
183     v |= v >> 2;
184     v |= v >> 4;
185     v |= v >> 8;
186     v |= v >> 16;
187     v |= v >> 32;
188     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
189 }
190 
191 #endif
192 
193 
194 #if (NXT_DEBUG)
195 
196 nxt_inline void
197 nxt_mp_thread_assert(nxt_mp_t *mp)
198 {
199     nxt_tid_t     tid;
200     nxt_thread_t  *thread;
201 
202     thread = nxt_thread();
203     tid = nxt_thread_tid(thread);
204 
205     if (nxt_fast_path(mp->tid == tid)) {
206         return;
207     }
208 
209     if (nxt_slow_path(nxt_pid != mp->pid)) {
210         mp->pid = nxt_pid;
211         mp->tid = tid;
212 
213         return;
214     }
215 
216     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
217     nxt_abort();
218 }
219 
220 #else
221 
222 #define nxt_mp_thread_assert(mp)
223 
224 #endif
225 
226 
227 void
228 nxt_mp_thread_adopt(nxt_mp_t *mp)
229 {
230 #if (NXT_DEBUG)
231     mp->pid = nxt_pid;
232     mp->tid = nxt_thread_tid(NULL);
233 #endif
234 }
235 
236 
237 nxt_mp_t *
238 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
239     size_t min_chunk_size)
240 {
241     nxt_mp_t     *mp;
242     uint32_t     pages, chunk_size_shift, page_size_shift;
243     nxt_queue_t  *chunk_pages;
244 
245     chunk_size_shift = nxt_lg2(min_chunk_size);
246     page_size_shift = nxt_lg2(page_size);
247 
248     pages = page_size_shift - chunk_size_shift;
249 
250     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
251 
252     if (nxt_fast_path(mp != NULL)) {
253         mp->retain = 1;
254         mp->chunk_size_shift = chunk_size_shift;
255         mp->page_size_shift = page_size_shift;
256         mp->page_size = page_size;
257         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
258         mp->cluster_size = cluster_size;
259 
260         chunk_pages = mp->chunk_pages;
261 
262         while (pages != 0) {
263             nxt_queue_init(chunk_pages);
264             chunk_pages++;
265             pages--;
266         }
267 
268         nxt_queue_init(&mp->free_pages);
269         nxt_queue_init(&mp->nget_pages);
270         nxt_queue_init(&mp->get_pages);
271 
272         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
273     }
274 
275     return mp;
276 }
277 
278 
279 void
280 nxt_mp_destroy(nxt_mp_t *mp)
281 {
282     void               *p;
283     nxt_mp_block_t     *block;
284     nxt_rbtree_node_t  *node, *next;
285 
286     nxt_debug_alloc("mp destroy");
287 
288     nxt_mp_thread_assert(mp);
289 
290     next = nxt_rbtree_root(&mp->blocks);
291 
292     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
293 
294         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
295         block = (nxt_mp_block_t *) node;
296 
297         p = block->start;
298 
299         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
300             nxt_free(block);
301         }
302 
303         nxt_free(p);
304     }
305 
306     nxt_free(mp);
307 }
308 
309 
310 nxt_bool_t
311 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
312     size_t min_chunk_size)
313 {
314     nxt_bool_t  valid;
315 
316     /* Alignment and sizes must be a power of 2. */
317 
318     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
319                            && nxt_is_power_of_two(page_size)
320                            && nxt_is_power_of_two(min_chunk_size)));
321     if (!valid) {
322         return 0;
323     }
324 
325     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
326 
327     valid = nxt_expect(1, (page_size >= 64
328                            && page_size >= page_alignment
329                            && page_size >= min_chunk_size
330                            && min_chunk_size * 32 >= page_size
331                            && cluster_size >= page_size
332                            && cluster_size / page_size <= 256
333                            && cluster_size % page_size == 0));
334     if (!valid) {
335         return 0;
336     }
337 
338     return 1;
339 }
340 
341 
342 nxt_bool_t
343 nxt_mp_is_empty(nxt_mp_t *mp)
344 {
345     return (nxt_rbtree_is_empty(&mp->blocks)
346             && nxt_queue_is_empty(&mp->free_pages));
347 }
348 
349 
350 void *
351 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
352 {
353     nxt_debug_alloc("mp alloc: %uz", size);
354 
355 #if !(NXT_DEBUG_MEMORY)
356 
357     if (size <= mp->page_size) {
358         return nxt_mp_alloc_small(mp, size);
359     }
360 
361 #endif
362 
363     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
364 }
365 
366 
367 void *
368 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
369 {
370     void  *p;
371 
372     p = nxt_mp_alloc(mp, size);
373 
374     if (nxt_fast_path(p != NULL)) {
375         memset(p, 0, size);
376     }
377 
378     return p;
379 }
380 
381 
382 void *
383 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
384 {
385     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
386 
387     /* Alignment must be a power of 2. */
388 
389     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
390 
391 #if !(NXT_DEBUG_MEMORY)
392 
393         if (size <= mp->page_size && alignment <= mp->page_alignment) {
394             size = nxt_max(size, alignment);
395 
396             if (size <= mp->page_size) {
397                 return nxt_mp_alloc_small(mp, size);
398             }
399         }
400 
401 #endif
402 
403         return nxt_mp_alloc_large(mp, alignment, size);
404     }
405 
406     return NULL;
407 }
408 
409 
410 void *
411 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
412 {
413     void  *p;
414 
415     p = nxt_mp_align(mp, alignment, size);
416 
417     if (nxt_fast_path(p != NULL)) {
418         memset(p, 0, size);
419     }
420 
421     return p;
422 }
423 
424 
425 nxt_inline nxt_uint_t
426 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
427 {
428     nxt_int_t  n, index;
429 
430     index = 0;
431 
432     if (size > 1) {
433         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
434 
435         if (n > 0) {
436             index = n;
437         }
438     }
439 
440     return index;
441 }
442 
443 
444 #if !(NXT_DEBUG_MEMORY)
445 
446 nxt_inline u_char *
447 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
448 {
449     size_t          page_offset;
450     nxt_mp_block_t  *block;
451 
452     page_offset = page->number * sizeof(nxt_mp_page_t)
453                   + offsetof(nxt_mp_block_t, pages);
454 
455     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
456 
457     return block->start + (page->number << mp->page_size_shift);
458 }
459 
460 
461 static void *
462 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
463 {
464     u_char            *p;
465     nxt_uint_t        n, index;
466     nxt_queue_t       *chunk_pages;
467     nxt_mp_page_t     *page;
468     nxt_queue_link_t  *link;
469 
470     nxt_mp_thread_assert(mp);
471 
472     p = NULL;
473 
474     if (size <= mp->page_size / 2) {
475 
476         index = nxt_mp_chunk_pages_index(mp, size);
477         chunk_pages = &mp->chunk_pages[index];
478 
479         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
480 
481             link = nxt_queue_first(chunk_pages);
482             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
483 
484             p = nxt_mp_page_addr(mp, page);
485 
486             n = nxt_mp_chunk_get_free(page->u.map);
487             nxt_mp_chunk_set_busy(page->u.map, n);
488 
489             p += ((n << index) << mp->chunk_size_shift);
490 
491             page->chunks--;
492 
493             if (page->chunks == 0) {
494                 /*
495                  * Remove full page from the pool chunk pages list
496                  * of pages with free chunks.
497                  */
498                 nxt_queue_remove(&page->link);
499             }
500 
501         } else {
502             page = nxt_mp_alloc_page(mp);
503 
504             if (nxt_fast_path(page != NULL)) {
505                 page->size = (1 << index);
506 
507                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
508                 page->chunks = (1 << n) - 1;
509 
510                 nxt_queue_insert_head(chunk_pages, &page->link);
511 
512                 /* Mark the first chunk as busy. */
513                 page->u.map = 0xFFFFFFFE;
514 
515                 p = nxt_mp_page_addr(mp, page);
516             }
517         }
518 
519     } else {
520         page = nxt_mp_alloc_page(mp);
521 
522         if (nxt_fast_path(page != NULL)) {
523             page->size = mp->page_size >> mp->chunk_size_shift;
524 
525             p = nxt_mp_page_addr(mp, page);
526         }
527     }
528 
529     nxt_debug_alloc("mp chunk:%uz alloc: %p",
530                     page->size << mp->chunk_size_shift, p);
531 
532     return p;
533 }
534 
535 
536 static void *
537 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
538 {
539     u_char            *p;
540     uint32_t          available;
541     nxt_mp_page_t     *page;
542     nxt_queue_link_t  *link, *next;
543 
544     nxt_mp_thread_assert(mp);
545 
546     for (link = nxt_queue_first(pages);
547          link != nxt_queue_tail(pages);
548          link = next)
549     {
550         next = nxt_queue_next(link);
551         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
552 
553         available = mp->page_size - page->u.taken;
554 
555         if (size <= available) {
556             goto found;
557         }
558 
559         if (available == 0 || page->fails++ > 100) {
560             nxt_queue_remove(link);
561         }
562     }
563 
564     page = nxt_mp_alloc_page(mp);
565 
566     if (nxt_slow_path(page == NULL)) {
567         return page;
568     }
569 
570     nxt_queue_insert_head(pages, &page->link);
571 
572     page->size = 0xFF;
573     page->u.taken = 0;
574 
575 found:
576 
577     p = nxt_mp_page_addr(mp, page);
578 
579     p += page->u.taken;
580     page->u.taken += size;
581 
582     nxt_debug_alloc("mp get: %p", p);
583 
584     return p;
585 }
586 
587 
588 static nxt_mp_page_t *
589 nxt_mp_alloc_page(nxt_mp_t *mp)
590 {
591     nxt_mp_page_t     *page;
592     nxt_mp_block_t    *cluster;
593     nxt_queue_link_t  *link;
594 
595     if (nxt_queue_is_empty(&mp->free_pages)) {
596         cluster = nxt_mp_alloc_cluster(mp);
597         if (nxt_slow_path(cluster == NULL)) {
598             return NULL;
599         }
600     }
601 
602     link = nxt_queue_first(&mp->free_pages);
603     nxt_queue_remove(link);
604 
605     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
606 
607     return page;
608 }
609 
610 
611 static nxt_mp_block_t *
612 nxt_mp_alloc_cluster(nxt_mp_t *mp)
613 {
614     nxt_uint_t      n;
615     nxt_mp_block_t  *cluster;
616 
617     n = mp->cluster_size >> mp->page_size_shift;
618 
619     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
620 
621     if (nxt_slow_path(cluster == NULL)) {
622         return NULL;
623     }
624 
625     /* NXT_MP_CLUSTER_BLOCK type is zero. */
626 
627     cluster->size = mp->cluster_size;
628 
629     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
630     if (nxt_slow_path(cluster->start == NULL)) {
631         nxt_free(cluster);
632         return NULL;
633     }
634 
635     n--;
636     cluster->pages[n].number = n;
637     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
638 
639     while (n != 0) {
640         n--;
641         cluster->pages[n].number = n;
642         nxt_queue_insert_before(&cluster->pages[n + 1].link,
643                                 &cluster->pages[n].link);
644     }
645 
646     nxt_rbtree_insert(&mp->blocks, &cluster->node);
647 
648     return cluster;
649 }
650 
651 #endif
652 
653 
654 static void *
655 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
656 {
657     u_char          *p;
658     size_t          aligned_size;
659     uint8_t         type;
660     nxt_mp_block_t  *block;
661 
662     nxt_mp_thread_assert(mp);
663 
664     /* Allocation must be less than 4G. */
665     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
666         return NULL;
667     }
668 
669     if (nxt_is_power_of_two(size)) {
670         block = nxt_malloc(sizeof(nxt_mp_block_t));
671         if (nxt_slow_path(block == NULL)) {
672             return NULL;
673         }
674 
675         p = nxt_memalign(alignment, size);
676         if (nxt_slow_path(p == NULL)) {
677             nxt_free(block);
678             return NULL;
679         }
680 
681         type = NXT_MP_DISCRETE_BLOCK;
682 
683     } else {
684         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
685 
686         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
687         if (nxt_slow_path(p == NULL)) {
688             return NULL;
689         }
690 
691         block = (nxt_mp_block_t *) (p + aligned_size);
692         type = NXT_MP_EMBEDDED_BLOCK;
693     }
694 
695     block->type = type;
696     block->size = size;
697     block->start = p;
698 
699     nxt_rbtree_insert(&mp->blocks, &block->node);
700 
701     return p;
702 }
703 
704 
705 static intptr_t
706 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
707 {
708     nxt_mp_block_t  *block1, *block2;
709 
710     block1 = (nxt_mp_block_t *) node1;
711     block2 = (nxt_mp_block_t *) node2;
712 
713     return (uintptr_t) block1->start - (uintptr_t) block2->start;
714 }
715 
716 
717 void
718 nxt_mp_free(nxt_mp_t *mp, void *p)
719 {
720     const char      *err;
721     nxt_thread_t    *thread;
722     nxt_mp_block_t  *block;
723 
724     nxt_mp_thread_assert(mp);
725 
726     nxt_debug_alloc("mp free %p", p);
727 
728     block = nxt_mp_find_block(&mp->blocks, p);
729 
730     if (nxt_fast_path(block != NULL)) {
731 
732         if (block->type == NXT_MP_CLUSTER_BLOCK) {
733             err = nxt_mp_chunk_free(mp, block, p);
734 
735             if (nxt_fast_path(err == NULL)) {
736                 return;
737             }
738 
739         } else if (nxt_fast_path(p == block->start)) {
740             nxt_rbtree_delete(&mp->blocks, &block->node);
741 
742             if (block->type == NXT_MP_DISCRETE_BLOCK) {
743                 nxt_free(block);
744             }
745 
746             nxt_free(p);
747 
748             return;
749 
750         } else {
751             err = "freed pointer points to middle of block: %p";
752         }
753 
754     } else {
755         err = "freed pointer is out of pool: %p";
756     }
757 
758     thread = nxt_thread();
759 
760     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
761 }
762 
763 
764 static nxt_mp_block_t *
765 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
766 {
767     nxt_mp_block_t     *block;
768     nxt_rbtree_node_t  *node, *sentinel;
769 
770     node = nxt_rbtree_root(tree);
771     sentinel = nxt_rbtree_sentinel(tree);
772 
773     while (node != sentinel) {
774 
775         block = (nxt_mp_block_t *) node;
776 
777         if (p < block->start) {
778             node = node->left;
779 
780         } else if (p >= block->start + block->size) {
781             node = node->right;
782 
783         } else {
784             return block;
785         }
786     }
787 
788     return NULL;
789 }
790 
791 
792 static const char *
793 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
794 {
795     u_char         *start;
796     uintptr_t      offset;
797     nxt_uint_t     n, size, chunk;
798     nxt_queue_t    *chunk_pages;
799     nxt_mp_page_t  *page;
800 
801     n = (p - cluster->start) >> mp->page_size_shift;
802     start = cluster->start + (n << mp->page_size_shift);
803 
804     page = &cluster->pages[n];
805 
806     if (nxt_slow_path(page->size == 0)) {
807         return "freed pointer points to already free page: %p";
808     }
809 
810     if (nxt_slow_path(page->size == 0xFF)) {
811         return "freed pointer points to non-freeble page: %p";
812     }
813 
814     size = page->size << mp->chunk_size_shift;
815 
816     if (size != mp->page_size) {
817 
818         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
819         chunk = offset / size;
820 
821         if (nxt_slow_path(offset != chunk * size)) {
822             return "freed pointer points to wrong chunk: %p";
823         }
824 
825         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
826             return "freed pointer points to already free chunk: %p";
827         }
828 
829         nxt_mp_chunk_set_free(page->u.map, chunk);
830 
831         if (page->u.map != 0xFFFFFFFF) {
832             page->chunks++;
833 
834             if (page->chunks == 1) {
835                 /*
836                  * Add the page to the head of pool chunk pages list
837                  * of pages with free chunks.
838                  */
839                 n = nxt_mp_chunk_pages_index(mp, size);
840                 chunk_pages = &mp->chunk_pages[n];
841 
842                 nxt_queue_insert_head(chunk_pages, &page->link);
843             }
844 
845             nxt_mp_free_junk(p, size);
846 
847             return NULL;
848 
849         } else {
850             /*
851              * All chunks are free, remove the page from pool
852              * chunk pages list of pages with free chunks.
853              */
854             nxt_queue_remove(&page->link);
855         }
856 
857     } else if (nxt_slow_path(p != start)) {
858         return "invalid pointer to chunk: %p";
859     }
860 
861     /* Add the free page to the pool's free pages tree. */
862 
863     page->size = 0;
864     nxt_queue_insert_head(&mp->free_pages, &page->link);
865 
866     nxt_mp_free_junk(p, size);
867 
868     /* Test if all pages in the cluster are free. */
869 
870     n = mp->cluster_size >> mp->page_size_shift;
871     page = cluster->pages;
872 
873     do {
874          if (page->size != 0) {
875              return NULL;
876          }
877 
878          page++;
879          n--;
880     } while (n != 0);
881 
882     /* Free cluster. */
883 
884     n = mp->cluster_size >> mp->page_size_shift;
885     page = cluster->pages;
886 
887     do {
888          nxt_queue_remove(&page->link);
889          page++;
890          n--;
891     } while (n != 0);
892 
893     nxt_rbtree_delete(&mp->blocks, &cluster->node);
894 
895     p = cluster->start;
896 
897     nxt_free(cluster);
898     nxt_free(p);
899 
900     return NULL;
901 }
902 
903 
904 void *
905 nxt_mp_retain(nxt_mp_t *mp, size_t size)
906 {
907     void  *p;
908 
909     p = nxt_mp_alloc(mp, size);
910 
911     if (nxt_fast_path(p != NULL)) {
912         mp->retain++;
913         nxt_debug_alloc("mp retain: %uD", mp->retain);
914     }
915 
916     return p;
917 }
918 
919 
920 uint32_t
921 nxt_mp_release(nxt_mp_t *mp, void *p)
922 {
923     nxt_mp_free(mp, p);
924 
925     mp->retain--;
926 
927     nxt_debug_alloc("mp release: %uD", mp->retain);
928 
929     if (mp->retain == 0) {
930         nxt_mp_destroy(mp);
931 
932         return 0;
933     }
934 
935     return mp->retain;
936 }
937 
938 
939 void *
940 nxt_mp_nget(nxt_mp_t *mp, size_t size)
941 {
942     nxt_debug_alloc("mp nget: %uz", size);
943 
944 #if !(NXT_DEBUG_MEMORY)
945 
946     if (size <= mp->page_size) {
947         return nxt_mp_get_small(mp, &mp->nget_pages, size);
948     }
949 
950 #endif
951 
952     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
953 }
954 
955 
956 void *
957 nxt_mp_get(nxt_mp_t *mp, size_t size)
958 {
959     nxt_debug_alloc("mp get: %uz", size);
960 
961 #if !(NXT_DEBUG_MEMORY)
962 
963     if (size <= mp->page_size) {
964         size = nxt_max(size, NXT_MAX_ALIGNMENT);
965         return nxt_mp_get_small(mp, &mp->get_pages, size);
966     }
967 
968 #endif
969 
970     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
971 }
972 
973 
974 void *
975 nxt_mp_zget(nxt_mp_t *mp, size_t size)
976 {
977     void  *p;
978 
979     p = nxt_mp_get(mp, size);
980 
981     if (nxt_fast_path(p != NULL)) {
982         memset(p, 0, size);
983     }
984 
985     return p;
986 }
987