xref: /unit/src/nxt_mp.c (revision 129:0694be8daf90)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     /* Lists of nxt_mp_page_t. */
118     nxt_queue_t          free_pages;
119     nxt_queue_t          nget_pages;
120     nxt_queue_t          get_pages;
121     nxt_queue_t          chunk_pages[];
122 };
123 
124 
125 #define nxt_mp_chunk_get_free(map)                                            \
126     (__builtin_ffs(map) - 1)
127 
128 
129 #define nxt_mp_chunk_is_free(map, chunk)                                      \
130     ((map & (1 << chunk)) != 0)
131 
132 
133 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
134     map &= ~(1 << chunk)
135 
136 
137 #define nxt_mp_chunk_set_free(map, chunk)                                     \
138     map |= (1 << chunk)
139 
140 
141 #define nxt_mp_free_junk(p, size)                                             \
142     memset((p), 0x5A, size)
143 
144 
145 #if !(NXT_DEBUG_MEMORY)
146 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
147 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
148 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
149 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
150 #endif
151 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
152 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
153     nxt_rbtree_node_t *node2);
154 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
155 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
156     u_char *p);
157 
158 
159 #if (NXT_HAVE_BUILTIN_CLZ)
160 
161 #define nxt_lg2(value)                                                        \
162     (31 - __builtin_clz(value))
163 
164 #else
165 
166 static const int nxt_lg2_tab64[64] = {
167     63,  0, 58,  1, 59, 47, 53,  2,
168     60, 39, 48, 27, 54, 33, 42,  3,
169     61, 51, 37, 40, 49, 18, 28, 20,
170     55, 30, 34, 11, 43, 14, 22,  4,
171     62, 57, 46, 52, 38, 26, 32, 41,
172     50, 36, 17, 19, 29, 10, 13, 21,
173     56, 45, 25, 31, 35, 16,  9, 12,
174     44, 24, 15,  8, 23,  7,  6,  5
175 };
176 
177 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
178 
179 static int
180 nxt_lg2(uint64_t v)
181 {
182     v |= v >> 1;
183     v |= v >> 2;
184     v |= v >> 4;
185     v |= v >> 8;
186     v |= v >> 16;
187     v |= v >> 32;
188     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
189 }
190 
191 #endif
192 
193 
194 #if (NXT_DEBUG)
195 
196 nxt_inline void
197 nxt_mp_thread_assert(nxt_mp_t *mp)
198 {
199     nxt_tid_t     tid;
200     nxt_thread_t  *thread;
201 
202     thread = nxt_thread();
203     tid = nxt_thread_tid(thread);
204 
205     if (nxt_fast_path(mp->tid == tid)) {
206         return;
207     }
208 
209     if (nxt_slow_path(nxt_pid != mp->pid)) {
210         mp->pid = nxt_pid;
211         mp->tid = tid;
212 
213         return;
214     }
215 
216     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
217     nxt_abort();
218 }
219 
220 #else
221 
222 #define nxt_mp_thread_assert(mp)
223 
224 #endif
225 
226 
227 void
228 nxt_mp_thread_adopt(nxt_mp_t *mp)
229 {
230 #if (NXT_DEBUG)
231     mp->pid = nxt_pid;
232     mp->tid = nxt_thread_tid(NULL);
233 #endif
234 }
235 
236 
237 nxt_mp_t *
238 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
239     size_t min_chunk_size)
240 {
241     nxt_mp_t     *mp;
242     uint32_t     pages, chunk_size_shift, page_size_shift;
243     nxt_queue_t  *chunk_pages;
244 
245     chunk_size_shift = nxt_lg2(min_chunk_size);
246     page_size_shift = nxt_lg2(page_size);
247 
248     pages = page_size_shift - chunk_size_shift;
249 
250     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
251 
252     if (nxt_fast_path(mp != NULL)) {
253         mp->retain = 1;
254         mp->chunk_size_shift = chunk_size_shift;
255         mp->page_size_shift = page_size_shift;
256         mp->page_size = page_size;
257         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
258         mp->cluster_size = cluster_size;
259 
260         chunk_pages = mp->chunk_pages;
261 
262         while (pages != 0) {
263             nxt_queue_init(chunk_pages);
264             chunk_pages++;
265             pages--;
266         }
267 
268         nxt_queue_init(&mp->free_pages);
269         nxt_queue_init(&mp->nget_pages);
270         nxt_queue_init(&mp->get_pages);
271 
272         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
273     }
274 
275     return mp;
276 }
277 
278 
279 void
280 nxt_mp_destroy(nxt_mp_t *mp)
281 {
282     void               *p;
283     nxt_mp_block_t     *block;
284     nxt_rbtree_node_t  *node, *next;
285 
286     nxt_debug_alloc("mp destroy");
287 
288     next = nxt_rbtree_root(&mp->blocks);
289 
290     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
291 
292         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
293         block = (nxt_mp_block_t *) node;
294 
295         p = block->start;
296 
297         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
298             nxt_free(block);
299         }
300 
301         nxt_free(p);
302     }
303 
304     nxt_free(mp);
305 }
306 
307 
308 nxt_bool_t
309 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
310     size_t min_chunk_size)
311 {
312     nxt_bool_t  valid;
313 
314     /* Alignment and sizes must be a power of 2. */
315 
316     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
317                            && nxt_is_power_of_two(page_size)
318                            && nxt_is_power_of_two(min_chunk_size)));
319     if (!valid) {
320         return 0;
321     }
322 
323     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
324 
325     valid = nxt_expect(1, (page_size >= 64
326                            && page_size >= page_alignment
327                            && page_size >= min_chunk_size
328                            && min_chunk_size * 32 >= page_size
329                            && cluster_size >= page_size
330                            && cluster_size / page_size <= 256
331                            && cluster_size % page_size == 0));
332     if (!valid) {
333         return 0;
334     }
335 
336     return 1;
337 }
338 
339 
340 nxt_bool_t
341 nxt_mp_is_empty(nxt_mp_t *mp)
342 {
343     return (nxt_rbtree_is_empty(&mp->blocks)
344             && nxt_queue_is_empty(&mp->free_pages));
345 }
346 
347 
348 void *
349 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
350 {
351     nxt_debug_alloc("mp alloc: %uz", size);
352 
353 #if !(NXT_DEBUG_MEMORY)
354 
355     if (size <= mp->page_size) {
356         return nxt_mp_alloc_small(mp, size);
357     }
358 
359 #endif
360 
361     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
362 }
363 
364 
365 void *
366 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
367 {
368     void  *p;
369 
370     p = nxt_mp_alloc(mp, size);
371 
372     if (nxt_fast_path(p != NULL)) {
373         memset(p, 0, size);
374     }
375 
376     return p;
377 }
378 
379 
380 void *
381 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
382 {
383     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
384 
385     /* Alignment must be a power of 2. */
386 
387     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
388 
389 #if !(NXT_DEBUG_MEMORY)
390 
391         if (size <= mp->page_size && alignment <= mp->page_alignment) {
392             size = nxt_max(size, alignment);
393 
394             if (size <= mp->page_size) {
395                 return nxt_mp_alloc_small(mp, size);
396             }
397         }
398 
399 #endif
400 
401         return nxt_mp_alloc_large(mp, alignment, size);
402     }
403 
404     return NULL;
405 }
406 
407 
408 void *
409 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
410 {
411     void  *p;
412 
413     p = nxt_mp_align(mp, alignment, size);
414 
415     if (nxt_fast_path(p != NULL)) {
416         memset(p, 0, size);
417     }
418 
419     return p;
420 }
421 
422 
423 nxt_inline nxt_uint_t
424 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
425 {
426     nxt_int_t  n, index;
427 
428     index = 0;
429 
430     if (size > 1) {
431         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
432 
433         if (n > 0) {
434             index = n;
435         }
436     }
437 
438     return index;
439 }
440 
441 
442 #if !(NXT_DEBUG_MEMORY)
443 
444 nxt_inline u_char *
445 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
446 {
447     size_t          page_offset;
448     nxt_mp_block_t  *block;
449 
450     page_offset = page->number * sizeof(nxt_mp_page_t)
451                   + offsetof(nxt_mp_block_t, pages);
452 
453     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
454 
455     return block->start + (page->number << mp->page_size_shift);
456 }
457 
458 
459 static void *
460 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
461 {
462     u_char            *p;
463     nxt_uint_t        n, index;
464     nxt_queue_t       *chunk_pages;
465     nxt_mp_page_t     *page;
466     nxt_queue_link_t  *link;
467 
468     nxt_mp_thread_assert(mp);
469 
470     p = NULL;
471 
472     if (size <= mp->page_size / 2) {
473 
474         index = nxt_mp_chunk_pages_index(mp, size);
475         chunk_pages = &mp->chunk_pages[index];
476 
477         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
478 
479             link = nxt_queue_first(chunk_pages);
480             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
481 
482             p = nxt_mp_page_addr(mp, page);
483 
484             n = nxt_mp_chunk_get_free(page->u.map);
485             nxt_mp_chunk_set_busy(page->u.map, n);
486 
487             p += ((n << index) << mp->chunk_size_shift);
488 
489             page->chunks--;
490 
491             if (page->chunks == 0) {
492                 /*
493                  * Remove full page from the pool chunk pages list
494                  * of pages with free chunks.
495                  */
496                 nxt_queue_remove(&page->link);
497             }
498 
499         } else {
500             page = nxt_mp_alloc_page(mp);
501 
502             if (nxt_fast_path(page != NULL)) {
503                 page->size = (1 << index);
504 
505                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
506                 page->chunks = (1 << n) - 1;
507 
508                 nxt_queue_insert_head(chunk_pages, &page->link);
509 
510                 /* Mark the first chunk as busy. */
511                 page->u.map = 0xFFFFFFFE;
512 
513                 p = nxt_mp_page_addr(mp, page);
514             }
515         }
516 
517     } else {
518         page = nxt_mp_alloc_page(mp);
519 
520         if (nxt_fast_path(page != NULL)) {
521             page->size = mp->page_size >> mp->chunk_size_shift;
522 
523             p = nxt_mp_page_addr(mp, page);
524         }
525     }
526 
527     nxt_debug_alloc("mp chunk:%uz alloc: %p",
528                     page->size << mp->chunk_size_shift, p);
529 
530     return p;
531 }
532 
533 
534 static void *
535 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
536 {
537     u_char            *p;
538     uint32_t          available;
539     nxt_mp_page_t     *page;
540     nxt_queue_link_t  *link, *next;
541 
542     nxt_mp_thread_assert(mp);
543 
544     for (link = nxt_queue_first(pages);
545          link != nxt_queue_tail(pages);
546          link = next)
547     {
548         next = nxt_queue_next(link);
549         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
550 
551         available = mp->page_size - page->u.taken;
552 
553         if (size <= available) {
554             goto found;
555         }
556 
557         if (available == 0 || page->fails++ > 100) {
558             nxt_queue_remove(link);
559         }
560     }
561 
562     page = nxt_mp_alloc_page(mp);
563 
564     if (nxt_slow_path(page == NULL)) {
565         return page;
566     }
567 
568     nxt_queue_insert_head(pages, &page->link);
569 
570     page->size = 0xFF;
571     page->u.taken = 0;
572 
573 found:
574 
575     p = nxt_mp_page_addr(mp, page);
576 
577     p += page->u.taken;
578     page->u.taken += size;
579 
580     nxt_debug_alloc("mp get: %p", p);
581 
582     return p;
583 }
584 
585 
586 static nxt_mp_page_t *
587 nxt_mp_alloc_page(nxt_mp_t *mp)
588 {
589     nxt_mp_page_t     *page;
590     nxt_mp_block_t    *cluster;
591     nxt_queue_link_t  *link;
592 
593     if (nxt_queue_is_empty(&mp->free_pages)) {
594         cluster = nxt_mp_alloc_cluster(mp);
595         if (nxt_slow_path(cluster == NULL)) {
596             return NULL;
597         }
598     }
599 
600     link = nxt_queue_first(&mp->free_pages);
601     nxt_queue_remove(link);
602 
603     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
604 
605     return page;
606 }
607 
608 
609 static nxt_mp_block_t *
610 nxt_mp_alloc_cluster(nxt_mp_t *mp)
611 {
612     nxt_uint_t      n;
613     nxt_mp_block_t  *cluster;
614 
615     n = mp->cluster_size >> mp->page_size_shift;
616 
617     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
618 
619     if (nxt_slow_path(cluster == NULL)) {
620         return NULL;
621     }
622 
623     /* NXT_MP_CLUSTER_BLOCK type is zero. */
624 
625     cluster->size = mp->cluster_size;
626 
627     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
628     if (nxt_slow_path(cluster->start == NULL)) {
629         nxt_free(cluster);
630         return NULL;
631     }
632 
633     n--;
634     cluster->pages[n].number = n;
635     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
636 
637     while (n != 0) {
638         n--;
639         cluster->pages[n].number = n;
640         nxt_queue_insert_before(&cluster->pages[n + 1].link,
641                                 &cluster->pages[n].link);
642     }
643 
644     nxt_rbtree_insert(&mp->blocks, &cluster->node);
645 
646     return cluster;
647 }
648 
649 #endif
650 
651 
652 static void *
653 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
654 {
655     u_char          *p;
656     size_t          aligned_size;
657     uint8_t         type;
658     nxt_mp_block_t  *block;
659 
660     nxt_mp_thread_assert(mp);
661 
662     /* Allocation must be less than 4G. */
663     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
664         return NULL;
665     }
666 
667     if (nxt_is_power_of_two(size)) {
668         block = nxt_malloc(sizeof(nxt_mp_block_t));
669         if (nxt_slow_path(block == NULL)) {
670             return NULL;
671         }
672 
673         p = nxt_memalign(alignment, size);
674         if (nxt_slow_path(p == NULL)) {
675             nxt_free(block);
676             return NULL;
677         }
678 
679         type = NXT_MP_DISCRETE_BLOCK;
680 
681     } else {
682         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
683 
684         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
685         if (nxt_slow_path(p == NULL)) {
686             return NULL;
687         }
688 
689         block = (nxt_mp_block_t *) (p + aligned_size);
690         type = NXT_MP_EMBEDDED_BLOCK;
691     }
692 
693     block->type = type;
694     block->size = size;
695     block->start = p;
696 
697     nxt_rbtree_insert(&mp->blocks, &block->node);
698 
699     return p;
700 }
701 
702 
703 static intptr_t
704 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
705 {
706     nxt_mp_block_t  *block1, *block2;
707 
708     block1 = (nxt_mp_block_t *) node1;
709     block2 = (nxt_mp_block_t *) node2;
710 
711     return (uintptr_t) block1->start - (uintptr_t) block2->start;
712 }
713 
714 
715 void
716 nxt_mp_free(nxt_mp_t *mp, void *p)
717 {
718     const char      *err;
719     nxt_thread_t    *thread;
720     nxt_mp_block_t  *block;
721 
722     nxt_mp_thread_assert(mp);
723 
724     nxt_debug_alloc("mp free %p", p);
725 
726     block = nxt_mp_find_block(&mp->blocks, p);
727 
728     if (nxt_fast_path(block != NULL)) {
729 
730         if (block->type == NXT_MP_CLUSTER_BLOCK) {
731             err = nxt_mp_chunk_free(mp, block, p);
732 
733             if (nxt_fast_path(err == NULL)) {
734                 return;
735             }
736 
737         } else if (nxt_fast_path(p == block->start)) {
738             nxt_rbtree_delete(&mp->blocks, &block->node);
739 
740             if (block->type == NXT_MP_DISCRETE_BLOCK) {
741                 nxt_free(block);
742             }
743 
744             nxt_free(p);
745 
746             return;
747 
748         } else {
749             err = "freed pointer points to middle of block: %p";
750         }
751 
752     } else {
753         err = "freed pointer is out of pool: %p";
754     }
755 
756     thread = nxt_thread();
757 
758     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
759 }
760 
761 
762 static nxt_mp_block_t *
763 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
764 {
765     nxt_mp_block_t     *block;
766     nxt_rbtree_node_t  *node, *sentinel;
767 
768     node = nxt_rbtree_root(tree);
769     sentinel = nxt_rbtree_sentinel(tree);
770 
771     while (node != sentinel) {
772 
773         block = (nxt_mp_block_t *) node;
774 
775         if (p < block->start) {
776             node = node->left;
777 
778         } else if (p >= block->start + block->size) {
779             node = node->right;
780 
781         } else {
782             return block;
783         }
784     }
785 
786     return NULL;
787 }
788 
789 
790 static const char *
791 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
792 {
793     u_char         *start;
794     uintptr_t      offset;
795     nxt_uint_t     n, size, chunk;
796     nxt_queue_t    *chunk_pages;
797     nxt_mp_page_t  *page;
798 
799     n = (p - cluster->start) >> mp->page_size_shift;
800     start = cluster->start + (n << mp->page_size_shift);
801 
802     page = &cluster->pages[n];
803 
804     if (nxt_slow_path(page->size == 0)) {
805         return "freed pointer points to already free page: %p";
806     }
807 
808     if (nxt_slow_path(page->size == 0xFF)) {
809         return "freed pointer points to non-freeble page: %p";
810     }
811 
812     size = page->size << mp->chunk_size_shift;
813 
814     if (size != mp->page_size) {
815 
816         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
817         chunk = offset / size;
818 
819         if (nxt_slow_path(offset != chunk * size)) {
820             return "freed pointer points to wrong chunk: %p";
821         }
822 
823         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
824             return "freed pointer points to already free chunk: %p";
825         }
826 
827         nxt_mp_chunk_set_free(page->u.map, chunk);
828 
829         if (page->u.map != 0xFFFFFFFF) {
830             page->chunks++;
831 
832             if (page->chunks == 1) {
833                 /*
834                  * Add the page to the head of pool chunk pages list
835                  * of pages with free chunks.
836                  */
837                 n = nxt_mp_chunk_pages_index(mp, size);
838                 chunk_pages = &mp->chunk_pages[n];
839 
840                 nxt_queue_insert_head(chunk_pages, &page->link);
841             }
842 
843             nxt_mp_free_junk(p, size);
844 
845             return NULL;
846 
847         } else {
848             /*
849              * All chunks are free, remove the page from pool
850              * chunk pages list of pages with free chunks.
851              */
852             nxt_queue_remove(&page->link);
853         }
854 
855     } else if (nxt_slow_path(p != start)) {
856         return "invalid pointer to chunk: %p";
857     }
858 
859     /* Add the free page to the pool's free pages tree. */
860 
861     page->size = 0;
862     nxt_queue_insert_head(&mp->free_pages, &page->link);
863 
864     nxt_mp_free_junk(p, size);
865 
866     /* Test if all pages in the cluster are free. */
867 
868     n = mp->cluster_size >> mp->page_size_shift;
869     page = cluster->pages;
870 
871     do {
872          if (page->size != 0) {
873              return NULL;
874          }
875 
876          page++;
877          n--;
878     } while (n != 0);
879 
880     /* Free cluster. */
881 
882     n = mp->cluster_size >> mp->page_size_shift;
883     page = cluster->pages;
884 
885     do {
886          nxt_queue_remove(&page->link);
887          page++;
888          n--;
889     } while (n != 0);
890 
891     nxt_rbtree_delete(&mp->blocks, &cluster->node);
892 
893     p = cluster->start;
894 
895     nxt_free(cluster);
896     nxt_free(p);
897 
898     return NULL;
899 }
900 
901 
902 void *
903 nxt_mp_retain(nxt_mp_t *mp, size_t size)
904 {
905     void  *p;
906 
907     p = nxt_mp_alloc(mp, size);
908 
909     if (nxt_fast_path(p != NULL)) {
910         mp->retain++;
911         nxt_debug_alloc("mp retain: %uD", mp->retain);
912     }
913 
914     return p;
915 }
916 
917 
918 void
919 nxt_mp_release(nxt_mp_t *mp, void *p)
920 {
921     nxt_mp_free(mp, p);
922 
923     mp->retain--;
924 
925     nxt_debug_alloc("mp release: %uD", mp->retain);
926 
927     if (mp->retain == 0) {
928         nxt_mp_destroy(mp);
929     }
930 }
931 
932 
933 void *
934 nxt_mp_nget(nxt_mp_t *mp, size_t size)
935 {
936     nxt_debug_alloc("mp nget: %uz", size);
937 
938 #if !(NXT_DEBUG_MEMORY)
939 
940     if (size <= mp->page_size) {
941         return nxt_mp_get_small(mp, &mp->nget_pages, size);
942     }
943 
944 #endif
945 
946     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
947 }
948 
949 
950 void *
951 nxt_mp_get(nxt_mp_t *mp, size_t size)
952 {
953     nxt_debug_alloc("mp get: %uz", size);
954 
955 #if !(NXT_DEBUG_MEMORY)
956 
957     if (size <= mp->page_size) {
958         size = nxt_max(size, NXT_MAX_ALIGNMENT);
959         return nxt_mp_get_small(mp, &mp->get_pages, size);
960     }
961 
962 #endif
963 
964     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
965 }
966 
967 
968 void *
969 nxt_mp_zget(nxt_mp_t *mp, size_t size)
970 {
971     void  *p;
972 
973     p = nxt_mp_get(mp, size);
974 
975     if (nxt_fast_path(p != NULL)) {
976         memset(p, 0, size);
977     }
978 
979     return p;
980 }
981