xref: /unit/src/nxt_mp.c (revision 128:cfa3a5ce7f2f)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     /* Lists of nxt_mp_page_t. */
118     nxt_queue_t          free_pages;
119     nxt_queue_t          nget_pages;
120     nxt_queue_t          get_pages;
121     nxt_queue_t          chunk_pages[];
122 };
123 
124 
125 #define nxt_mp_chunk_get_free(map)                                            \
126     (__builtin_ffs(map) - 1)
127 
128 
129 #define nxt_mp_chunk_is_free(map, chunk)                                      \
130     ((map & (1 << chunk)) != 0)
131 
132 
133 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
134     map &= ~(1 << chunk)
135 
136 
137 #define nxt_mp_chunk_set_free(map, chunk)                                     \
138     map |= (1 << chunk)
139 
140 
141 #define nxt_mp_free_junk(p, size)                                             \
142     memset((p), 0x5A, size)
143 
144 
145 #if !(NXT_DEBUG_MEMORY)
146 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
147 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
148 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
149 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
150 #endif
151 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
152 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
153     nxt_rbtree_node_t *node2);
154 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
155 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
156     u_char *p);
157 
158 
159 #if (NXT_HAVE_BUILTIN_CLZ)
160 
161 #define nxt_lg2(value)                                                        \
162     (31 - __builtin_clz(value))
163 
164 #else
165 
166 static const int nxt_lg2_tab64[64] = {
167     63,  0, 58,  1, 59, 47, 53,  2,
168     60, 39, 48, 27, 54, 33, 42,  3,
169     61, 51, 37, 40, 49, 18, 28, 20,
170     55, 30, 34, 11, 43, 14, 22,  4,
171     62, 57, 46, 52, 38, 26, 32, 41,
172     50, 36, 17, 19, 29, 10, 13, 21,
173     56, 45, 25, 31, 35, 16,  9, 12,
174     44, 24, 15,  8, 23,  7,  6,  5
175 };
176 
177 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
178 
179 static int
180 nxt_lg2(uint64_t v)
181 {
182     v |= v >> 1;
183     v |= v >> 2;
184     v |= v >> 4;
185     v |= v >> 8;
186     v |= v >> 16;
187     v |= v >> 32;
188     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
189 }
190 
191 #endif
192 
193 
194 #if (NXT_DEBUG)
195 
196 nxt_inline void
197 nxt_mp_thread_assert(nxt_mp_t *mp)
198 {
199     nxt_tid_t     tid;
200     nxt_thread_t  *thread;
201 
202     thread = nxt_thread();
203     tid = nxt_thread_tid(thread);
204 
205     if (nxt_fast_path(mp->tid == tid)) {
206         return;
207     }
208 
209     if (nxt_slow_path(nxt_pid != mp->pid)) {
210         mp->pid = nxt_pid;
211         mp->tid = tid;
212 
213         return;
214     }
215 
216     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
217     nxt_abort();
218 }
219 
220 #else
221 
222 #define nxt_mp_thread_assert(mp)
223 
224 #endif
225 
226 
227 void
228 nxt_mp_thread_adopt(nxt_mp_t *mp)
229 {
230 #if (NXT_DEBUG)
231     mp->pid = nxt_pid;
232     mp->tid = nxt_thread_tid(NULL);
233 #endif
234 }
235 
236 
237 nxt_mp_t *
238 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
239     size_t min_chunk_size)
240 {
241     nxt_mp_t     *mp;
242     uint32_t     pages, chunk_size_shift, page_size_shift;
243     nxt_queue_t  *chunk_pages;
244 
245     chunk_size_shift = nxt_lg2(min_chunk_size);
246     page_size_shift = nxt_lg2(page_size);
247 
248     pages = page_size_shift - chunk_size_shift;
249 
250     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
251 
252     if (nxt_fast_path(mp != NULL)) {
253         mp->retain = 1;
254         mp->chunk_size_shift = chunk_size_shift;
255         mp->page_size_shift = page_size_shift;
256         mp->page_size = page_size;
257         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
258         mp->cluster_size = cluster_size;
259 
260         chunk_pages = mp->chunk_pages;
261 
262         while (pages != 0) {
263             nxt_queue_init(chunk_pages);
264             chunk_pages++;
265             pages--;
266         }
267 
268         nxt_queue_init(&mp->free_pages);
269         nxt_queue_init(&mp->nget_pages);
270         nxt_queue_init(&mp->get_pages);
271 
272         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
273     }
274 
275     return mp;
276 }
277 
278 
279 void
280 nxt_mp_destroy(nxt_mp_t *mp)
281 {
282     void               *p;
283     nxt_mp_block_t     *block;
284     nxt_rbtree_node_t  *node, *next;
285 
286     nxt_debug_alloc("mp destroy");
287 
288     next = nxt_rbtree_root(&mp->blocks);
289 
290     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
291 
292         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
293         block = (nxt_mp_block_t *) node;
294 
295         p = block->start;
296 
297         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
298             nxt_free(block);
299         }
300 
301         nxt_free(p);
302     }
303 
304     nxt_free(mp);
305 }
306 
307 
308 nxt_bool_t
309 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
310     size_t min_chunk_size)
311 {
312     nxt_bool_t  valid;
313 
314     /* Alignment and sizes must be a power of 2. */
315 
316     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
317                            && nxt_is_power_of_two(page_size)
318                            && nxt_is_power_of_two(min_chunk_size)));
319     if (!valid) {
320         return 0;
321     }
322 
323     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
324 
325     valid = nxt_expect(1, (page_size >= 64
326                            && page_size >= page_alignment
327                            && page_size >= min_chunk_size
328                            && min_chunk_size * 32 >= page_size
329                            && cluster_size >= page_size
330                            && cluster_size / page_size <= 256
331                            && cluster_size % page_size == 0));
332     if (!valid) {
333         return 0;
334     }
335 
336     return 1;
337 }
338 
339 
340 nxt_bool_t
341 nxt_mp_is_empty(nxt_mp_t *mp)
342 {
343     return (nxt_rbtree_is_empty(&mp->blocks)
344             && nxt_queue_is_empty(&mp->free_pages));
345 }
346 
347 
348 void *
349 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
350 {
351     nxt_debug_alloc("mp alloc: %uz", size);
352 
353 #if !(NXT_DEBUG_MEMORY)
354 
355     if (size <= mp->page_size) {
356         return nxt_mp_alloc_small(mp, size);
357     }
358 
359 #endif
360 
361     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
362 }
363 
364 
365 void *
366 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
367 {
368     void  *p;
369 
370     p = nxt_mp_alloc(mp, size);
371 
372     if (nxt_fast_path(p != NULL)) {
373         memset(p, 0, size);
374     }
375 
376     return p;
377 }
378 
379 
380 void *
381 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
382 {
383     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
384 
385     /* Alignment must be a power of 2. */
386 
387     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
388 
389 #if !(NXT_DEBUG_MEMORY)
390 
391         if (size <= mp->page_size && alignment <= mp->page_alignment) {
392             size = nxt_max(size, alignment);
393 
394             if (size <= mp->page_size) {
395                 return nxt_mp_alloc_small(mp, size);
396             }
397         }
398 
399 #endif
400 
401         return nxt_mp_alloc_large(mp, alignment, size);
402     }
403 
404     return NULL;
405 }
406 
407 
408 void *
409 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
410 {
411     void  *p;
412 
413     p = nxt_mp_align(mp, alignment, size);
414 
415     if (nxt_fast_path(p != NULL)) {
416         memset(p, 0, size);
417     }
418 
419     return p;
420 }
421 
422 
423 nxt_inline nxt_uint_t
424 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
425 {
426     nxt_int_t  n, index;
427 
428     index = 0;
429 
430     if (size > 1) {
431         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
432 
433         if (n > 0) {
434             index = n;
435         }
436     }
437 
438     return index;
439 }
440 
441 
442 #if !(NXT_DEBUG_MEMORY)
443 
444 nxt_inline u_char *
445 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
446 {
447     size_t          page_offset;
448     nxt_mp_block_t  *block;
449 
450     page_offset = page->number * sizeof(nxt_mp_page_t)
451                   + offsetof(nxt_mp_block_t, pages);
452 
453     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
454 
455     return block->start + (page->number << mp->page_size_shift);
456 }
457 
458 
459 static void *
460 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
461 {
462     u_char            *p;
463     nxt_uint_t        n, index;
464     nxt_queue_t       *chunk_pages;
465     nxt_mp_page_t     *page;
466     nxt_queue_link_t  *link;
467 
468     nxt_mp_thread_assert(mp);
469 
470     p = NULL;
471 
472     if (size <= mp->page_size / 2) {
473 
474         index = nxt_mp_chunk_pages_index(mp, size);
475         chunk_pages = &mp->chunk_pages[index];
476 
477         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
478 
479             link = nxt_queue_first(chunk_pages);
480             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
481 
482             p = nxt_mp_page_addr(mp, page);
483 
484             n = nxt_mp_chunk_get_free(page->u.map);
485             nxt_mp_chunk_set_busy(page->u.map, n);
486 
487             p += ((n << index) << mp->chunk_size_shift);
488 
489             page->chunks--;
490 
491             if (page->chunks == 0) {
492                 /*
493                  * Remove full page from the pool chunk pages list
494                  * of pages with free chunks.
495                  */
496                 nxt_queue_remove(&page->link);
497             }
498 
499         } else {
500             page = nxt_mp_alloc_page(mp);
501 
502             if (nxt_fast_path(page != NULL)) {
503                 page->size = (1 << index);
504 
505                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
506                 page->chunks = (1 << n) - 1;
507 
508                 nxt_queue_insert_head(chunk_pages, &page->link);
509 
510                 /* Mark the first chunk as busy. */
511                 page->u.map = 0xFFFFFFFE;
512 
513                 p = nxt_mp_page_addr(mp, page);
514             }
515         }
516 
517     } else {
518         page = nxt_mp_alloc_page(mp);
519 
520         if (nxt_fast_path(page != NULL)) {
521             page->size = mp->page_size >> mp->chunk_size_shift;
522 
523             p = nxt_mp_page_addr(mp, page);
524         }
525     }
526 
527     nxt_debug_alloc("mp chunk:%uz alloc: %p",
528                     page->size << mp->chunk_size_shift, p);
529 
530     return p;
531 }
532 
533 
534 static void *
535 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
536 {
537     u_char            *p;
538     uint32_t          available;
539     nxt_mp_page_t     *page;
540     nxt_queue_link_t  *link, *next;
541 
542     nxt_mp_thread_assert(mp);
543 
544     for (link = nxt_queue_first(pages);
545          link != nxt_queue_tail(pages);
546          link = next)
547     {
548         next = nxt_queue_next(link);
549         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
550 
551         available = mp->page_size - page->u.taken;
552 
553         if (size <= available) {
554             goto found;
555         }
556 
557         if (available == 0 || page->fails++ > 100) {
558             nxt_queue_remove(link);
559         }
560     }
561 
562     page = nxt_mp_alloc_page(mp);
563 
564     if (nxt_slow_path(page == NULL)) {
565         return page;
566     }
567 
568     nxt_queue_insert_head(pages, &page->link);
569 
570     page->size = 0xFF;
571 
572 found:
573 
574     p = nxt_mp_page_addr(mp, page);
575 
576     p += page->u.taken;
577     page->u.taken += size;
578 
579     nxt_debug_alloc("mp get: %p", p);
580 
581     return p;
582 }
583 
584 
585 static nxt_mp_page_t *
586 nxt_mp_alloc_page(nxt_mp_t *mp)
587 {
588     nxt_mp_page_t     *page;
589     nxt_mp_block_t    *cluster;
590     nxt_queue_link_t  *link;
591 
592     if (nxt_queue_is_empty(&mp->free_pages)) {
593         cluster = nxt_mp_alloc_cluster(mp);
594         if (nxt_slow_path(cluster == NULL)) {
595             return NULL;
596         }
597     }
598 
599     link = nxt_queue_first(&mp->free_pages);
600     nxt_queue_remove(link);
601 
602     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
603 
604     return page;
605 }
606 
607 
608 static nxt_mp_block_t *
609 nxt_mp_alloc_cluster(nxt_mp_t *mp)
610 {
611     nxt_uint_t      n;
612     nxt_mp_block_t  *cluster;
613 
614     n = mp->cluster_size >> mp->page_size_shift;
615 
616     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
617 
618     if (nxt_slow_path(cluster == NULL)) {
619         return NULL;
620     }
621 
622     /* NXT_MP_CLUSTER_BLOCK type is zero. */
623 
624     cluster->size = mp->cluster_size;
625 
626     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
627     if (nxt_slow_path(cluster->start == NULL)) {
628         nxt_free(cluster);
629         return NULL;
630     }
631 
632     n--;
633     cluster->pages[n].number = n;
634     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
635 
636     while (n != 0) {
637         n--;
638         cluster->pages[n].number = n;
639         nxt_queue_insert_before(&cluster->pages[n + 1].link,
640                                 &cluster->pages[n].link);
641     }
642 
643     nxt_rbtree_insert(&mp->blocks, &cluster->node);
644 
645     return cluster;
646 }
647 
648 #endif
649 
650 
651 static void *
652 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
653 {
654     u_char          *p;
655     size_t          aligned_size;
656     uint8_t         type;
657     nxt_mp_block_t  *block;
658 
659     nxt_mp_thread_assert(mp);
660 
661     /* Allocation must be less than 4G. */
662     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
663         return NULL;
664     }
665 
666     if (nxt_is_power_of_two(size)) {
667         block = nxt_malloc(sizeof(nxt_mp_block_t));
668         if (nxt_slow_path(block == NULL)) {
669             return NULL;
670         }
671 
672         p = nxt_memalign(alignment, size);
673         if (nxt_slow_path(p == NULL)) {
674             nxt_free(block);
675             return NULL;
676         }
677 
678         type = NXT_MP_DISCRETE_BLOCK;
679 
680     } else {
681         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
682 
683         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
684         if (nxt_slow_path(p == NULL)) {
685             return NULL;
686         }
687 
688         block = (nxt_mp_block_t *) (p + aligned_size);
689         type = NXT_MP_EMBEDDED_BLOCK;
690     }
691 
692     block->type = type;
693     block->size = size;
694     block->start = p;
695 
696     nxt_rbtree_insert(&mp->blocks, &block->node);
697 
698     return p;
699 }
700 
701 
702 static intptr_t
703 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
704 {
705     nxt_mp_block_t  *block1, *block2;
706 
707     block1 = (nxt_mp_block_t *) node1;
708     block2 = (nxt_mp_block_t *) node2;
709 
710     return (uintptr_t) block1->start - (uintptr_t) block2->start;
711 }
712 
713 
714 void
715 nxt_mp_free(nxt_mp_t *mp, void *p)
716 {
717     const char      *err;
718     nxt_thread_t    *thread;
719     nxt_mp_block_t  *block;
720 
721     nxt_mp_thread_assert(mp);
722 
723     nxt_debug_alloc("mp free %p", p);
724 
725     block = nxt_mp_find_block(&mp->blocks, p);
726 
727     if (nxt_fast_path(block != NULL)) {
728 
729         if (block->type == NXT_MP_CLUSTER_BLOCK) {
730             err = nxt_mp_chunk_free(mp, block, p);
731 
732             if (nxt_fast_path(err == NULL)) {
733                 return;
734             }
735 
736         } else if (nxt_fast_path(p == block->start)) {
737             nxt_rbtree_delete(&mp->blocks, &block->node);
738 
739             if (block->type == NXT_MP_DISCRETE_BLOCK) {
740                 nxt_free(block);
741             }
742 
743             nxt_free(p);
744 
745             return;
746 
747         } else {
748             err = "freed pointer points to middle of block: %p";
749         }
750 
751     } else {
752         err = "freed pointer is out of pool: %p";
753     }
754 
755     thread = nxt_thread();
756 
757     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
758 }
759 
760 
761 static nxt_mp_block_t *
762 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
763 {
764     nxt_mp_block_t     *block;
765     nxt_rbtree_node_t  *node, *sentinel;
766 
767     node = nxt_rbtree_root(tree);
768     sentinel = nxt_rbtree_sentinel(tree);
769 
770     while (node != sentinel) {
771 
772         block = (nxt_mp_block_t *) node;
773 
774         if (p < block->start) {
775             node = node->left;
776 
777         } else if (p >= block->start + block->size) {
778             node = node->right;
779 
780         } else {
781             return block;
782         }
783     }
784 
785     return NULL;
786 }
787 
788 
789 static const char *
790 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
791 {
792     u_char         *start;
793     uintptr_t      offset;
794     nxt_uint_t     n, size, chunk;
795     nxt_queue_t    *chunk_pages;
796     nxt_mp_page_t  *page;
797 
798     n = (p - cluster->start) >> mp->page_size_shift;
799     start = cluster->start + (n << mp->page_size_shift);
800 
801     page = &cluster->pages[n];
802 
803     if (nxt_slow_path(page->size == 0)) {
804         return "freed pointer points to already free page: %p";
805     }
806 
807     if (nxt_slow_path(page->size == 0xFF)) {
808         return "freed pointer points to non-freeble page: %p";
809     }
810 
811     size = page->size << mp->chunk_size_shift;
812 
813     if (size != mp->page_size) {
814 
815         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
816         chunk = offset / size;
817 
818         if (nxt_slow_path(offset != chunk * size)) {
819             return "freed pointer points to wrong chunk: %p";
820         }
821 
822         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
823             return "freed pointer points to already free chunk: %p";
824         }
825 
826         nxt_mp_chunk_set_free(page->u.map, chunk);
827 
828         if (page->u.map != 0xFFFFFFFF) {
829             page->chunks++;
830 
831             if (page->chunks == 1) {
832                 /*
833                  * Add the page to the head of pool chunk pages list
834                  * of pages with free chunks.
835                  */
836                 n = nxt_mp_chunk_pages_index(mp, size);
837                 chunk_pages = &mp->chunk_pages[n];
838 
839                 nxt_queue_insert_head(chunk_pages, &page->link);
840             }
841 
842             nxt_mp_free_junk(p, size);
843 
844             return NULL;
845 
846         } else {
847             /*
848              * All chunks are free, remove the page from pool
849              * chunk pages list of pages with free chunks.
850              */
851             nxt_queue_remove(&page->link);
852         }
853 
854     } else if (nxt_slow_path(p != start)) {
855         return "invalid pointer to chunk: %p";
856     }
857 
858     /* Add the free page to the pool's free pages tree. */
859 
860     page->size = 0;
861     nxt_queue_insert_head(&mp->free_pages, &page->link);
862 
863     nxt_mp_free_junk(p, size);
864 
865     /* Test if all pages in the cluster are free. */
866 
867     n = mp->cluster_size >> mp->page_size_shift;
868     page = cluster->pages;
869 
870     do {
871          if (page->size != 0) {
872              return NULL;
873          }
874 
875          page++;
876          n--;
877     } while (n != 0);
878 
879     /* Free cluster. */
880 
881     n = mp->cluster_size >> mp->page_size_shift;
882     page = cluster->pages;
883 
884     do {
885          nxt_queue_remove(&page->link);
886          page++;
887          n--;
888     } while (n != 0);
889 
890     nxt_rbtree_delete(&mp->blocks, &cluster->node);
891 
892     p = cluster->start;
893 
894     nxt_free(cluster);
895     nxt_free(p);
896 
897     return NULL;
898 }
899 
900 
901 void *
902 nxt_mp_retain(nxt_mp_t *mp, size_t size)
903 {
904     void  *p;
905 
906     p = nxt_mp_alloc(mp, size);
907 
908     if (nxt_fast_path(p != NULL)) {
909         mp->retain++;
910         nxt_debug_alloc("mp retain: %uD", mp->retain);
911     }
912 
913     return p;
914 }
915 
916 
917 void
918 nxt_mp_release(nxt_mp_t *mp, void *p)
919 {
920     nxt_mp_free(mp, p);
921 
922     mp->retain--;
923 
924     nxt_debug_alloc("mp release: %uD", mp->retain);
925 
926     if (mp->retain == 0) {
927         nxt_mp_destroy(mp);
928     }
929 }
930 
931 
932 void *
933 nxt_mp_nget(nxt_mp_t *mp, size_t size)
934 {
935     nxt_debug_alloc("mp nget: %uz", size);
936 
937 #if !(NXT_DEBUG_MEMORY)
938 
939     if (size <= mp->page_size) {
940         return nxt_mp_get_small(mp, &mp->nget_pages, size);
941     }
942 
943 #endif
944 
945     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
946 }
947 
948 
949 void *
950 nxt_mp_get(nxt_mp_t *mp, size_t size)
951 {
952     nxt_debug_alloc("mp get: %uz", size);
953 
954 #if !(NXT_DEBUG_MEMORY)
955 
956     if (size <= mp->page_size) {
957         size = nxt_max(size, NXT_MAX_ALIGNMENT);
958         return nxt_mp_get_small(mp, &mp->get_pages, size);
959     }
960 
961 #endif
962 
963     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
964 }
965 
966 
967 void *
968 nxt_mp_zget(nxt_mp_t *mp, size_t size)
969 {
970     void  *p;
971 
972     p = nxt_mp_get(mp, size);
973 
974     if (nxt_fast_path(p != NULL)) {
975         memset(p, 0, size);
976     }
977 
978     return p;
979 }
980