xref: /unit/src/nxt_mp.c (revision 93:8c66fdbc526b)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112     /* Lists of nxt_mp_page_t. */
113     nxt_queue_t          free_pages;
114     nxt_queue_t          nget_pages;
115     nxt_queue_t          get_pages;
116     nxt_queue_t          chunk_pages[];
117 };
118 
119 
120 #define nxt_mp_chunk_get_free(map)                                            \
121     (__builtin_ffs(map) - 1)
122 
123 
124 #define nxt_mp_chunk_is_free(map, chunk)                                      \
125     ((map & (1 << chunk)) != 0)
126 
127 
128 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
129     map &= ~(1 << chunk)
130 
131 
132 #define nxt_mp_chunk_set_free(map, chunk)                                     \
133     map |= (1 << chunk)
134 
135 
136 #define nxt_mp_free_junk(p, size)                                             \
137     memset((p), 0x5A, size)
138 
139 
140 #if !(NXT_DEBUG_MEMORY)
141 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
142 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
143 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
144 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
145 #endif
146 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
147 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
148     nxt_rbtree_node_t *node2);
149 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
150 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
151     u_char *p);
152 
153 
154 #if (NXT_HAVE_BUILTIN_CLZ)
155 
156 #define nxt_lg2(value)                                                        \
157     (31 - __builtin_clz(value))
158 
159 #else
160 
161 static const int nxt_lg2_tab64[64] = {
162     63,  0, 58,  1, 59, 47, 53,  2,
163     60, 39, 48, 27, 54, 33, 42,  3,
164     61, 51, 37, 40, 49, 18, 28, 20,
165     55, 30, 34, 11, 43, 14, 22,  4,
166     62, 57, 46, 52, 38, 26, 32, 41,
167     50, 36, 17, 19, 29, 10, 13, 21,
168     56, 45, 25, 31, 35, 16,  9, 12,
169     44, 24, 15,  8, 23,  7,  6,  5
170 };
171 
172 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
173 
174 static int
175 nxt_lg2(uint64_t v)
176 {
177     v |= v >> 1;
178     v |= v >> 2;
179     v |= v >> 4;
180     v |= v >> 8;
181     v |= v >> 16;
182     v |= v >> 32;
183     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
184 }
185 
186 #endif
187 
188 
189 nxt_mp_t *
190 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
191     size_t min_chunk_size)
192 {
193     nxt_mp_t     *mp;
194     uint32_t     pages, chunk_size_shift, page_size_shift;
195     nxt_queue_t  *chunk_pages;
196 
197     chunk_size_shift = nxt_lg2(min_chunk_size);
198     page_size_shift = nxt_lg2(page_size);
199 
200     pages = page_size_shift - chunk_size_shift;
201 
202     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
203 
204     if (nxt_fast_path(mp != NULL)) {
205         mp->retain = 1;
206         mp->chunk_size_shift = chunk_size_shift;
207         mp->page_size_shift = page_size_shift;
208         mp->page_size = page_size;
209         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
210         mp->cluster_size = cluster_size;
211 
212         chunk_pages = mp->chunk_pages;
213 
214         while (pages != 0) {
215             nxt_queue_init(chunk_pages);
216             chunk_pages++;
217             pages--;
218         }
219 
220         nxt_queue_init(&mp->free_pages);
221         nxt_queue_init(&mp->nget_pages);
222         nxt_queue_init(&mp->get_pages);
223 
224         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
225     }
226 
227     return mp;
228 }
229 
230 
231 void
232 nxt_mp_destroy(nxt_mp_t *mp)
233 {
234     void               *p;
235     nxt_mp_block_t     *block;
236     nxt_rbtree_node_t  *node, *next;
237 
238     nxt_debug_alloc("mp destroy");
239 
240     next = nxt_rbtree_root(&mp->blocks);
241 
242     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
243 
244         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
245         block = (nxt_mp_block_t *) node;
246 
247         p = block->start;
248 
249         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
250             nxt_free(block);
251         }
252 
253         nxt_free(p);
254     }
255 
256     nxt_free(mp);
257 }
258 
259 
260 nxt_bool_t
261 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
262     size_t min_chunk_size)
263 {
264     nxt_bool_t  valid;
265 
266     /* Alignment and sizes must be a power of 2. */
267 
268     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
269                            && nxt_is_power_of_two(page_size)
270                            && nxt_is_power_of_two(min_chunk_size)));
271     if (!valid) {
272         return 0;
273     }
274 
275     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
276 
277     valid = nxt_expect(1, (page_size >= 64
278                            && page_size >= page_alignment
279                            && page_size >= min_chunk_size
280                            && min_chunk_size * 32 >= page_size
281                            && cluster_size >= page_size
282                            && cluster_size / page_size <= 256
283                            && cluster_size % page_size == 0));
284     if (!valid) {
285         return 0;
286     }
287 
288     return 1;
289 }
290 
291 
292 nxt_bool_t
293 nxt_mp_is_empty(nxt_mp_t *mp)
294 {
295     return (nxt_rbtree_is_empty(&mp->blocks)
296             && nxt_queue_is_empty(&mp->free_pages));
297 }
298 
299 
300 void *
301 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
302 {
303     nxt_debug_alloc("mp alloc: %uz", size);
304 
305 #if !(NXT_DEBUG_MEMORY)
306 
307     if (size <= mp->page_size) {
308         return nxt_mp_alloc_small(mp, size);
309     }
310 
311 #endif
312 
313     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
314 }
315 
316 
317 void *
318 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
319 {
320     void  *p;
321 
322     p = nxt_mp_alloc(mp, size);
323 
324     if (nxt_fast_path(p != NULL)) {
325         memset(p, 0, size);
326     }
327 
328     return p;
329 }
330 
331 
332 void *
333 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
334 {
335     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
336 
337     /* Alignment must be a power of 2. */
338 
339     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
340 
341 #if !(NXT_DEBUG_MEMORY)
342 
343         if (size <= mp->page_size && alignment <= mp->page_alignment) {
344             size = nxt_max(size, alignment);
345 
346             if (size <= mp->page_size) {
347                 return nxt_mp_alloc_small(mp, size);
348             }
349         }
350 
351 #endif
352 
353         return nxt_mp_alloc_large(mp, alignment, size);
354     }
355 
356     return NULL;
357 }
358 
359 
360 void *
361 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
362 {
363     void  *p;
364 
365     p = nxt_mp_align(mp, alignment, size);
366 
367     if (nxt_fast_path(p != NULL)) {
368         memset(p, 0, size);
369     }
370 
371     return p;
372 }
373 
374 
375 nxt_inline nxt_uint_t
376 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
377 {
378     nxt_int_t  n, index;
379 
380     index = 0;
381 
382     if (size > 1) {
383         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
384 
385         if (n > 0) {
386             index = n;
387         }
388     }
389 
390     return index;
391 }
392 
393 
394 #if !(NXT_DEBUG_MEMORY)
395 
396 nxt_inline u_char *
397 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
398 {
399     size_t          page_offset;
400     nxt_mp_block_t  *block;
401 
402     page_offset = page->number * sizeof(nxt_mp_page_t)
403                   + offsetof(nxt_mp_block_t, pages);
404 
405     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
406 
407     return block->start + (page->number << mp->page_size_shift);
408 }
409 
410 
411 static void *
412 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
413 {
414     u_char            *p;
415     nxt_uint_t        n, index;
416     nxt_queue_t       *chunk_pages;
417     nxt_mp_page_t     *page;
418     nxt_queue_link_t  *link;
419 
420     p = NULL;
421 
422     if (size <= mp->page_size / 2) {
423 
424         index = nxt_mp_chunk_pages_index(mp, size);
425         chunk_pages = &mp->chunk_pages[index];
426 
427         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
428 
429             link = nxt_queue_first(chunk_pages);
430             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
431 
432             p = nxt_mp_page_addr(mp, page);
433 
434             n = nxt_mp_chunk_get_free(page->u.map);
435             nxt_mp_chunk_set_busy(page->u.map, n);
436 
437             p += ((n << index) << mp->chunk_size_shift);
438 
439             page->chunks--;
440 
441             if (page->chunks == 0) {
442                 /*
443                  * Remove full page from the pool chunk pages list
444                  * of pages with free chunks.
445                  */
446                 nxt_queue_remove(&page->link);
447             }
448 
449         } else {
450             page = nxt_mp_alloc_page(mp);
451 
452             if (nxt_fast_path(page != NULL)) {
453                 page->size = (1 << index);
454 
455                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
456                 page->chunks = (1 << n) - 1;
457 
458                 nxt_queue_insert_head(chunk_pages, &page->link);
459 
460                 /* Mark the first chunk as busy. */
461                 page->u.map = 0xFFFFFFFE;
462 
463                 p = nxt_mp_page_addr(mp, page);
464             }
465         }
466 
467     } else {
468         page = nxt_mp_alloc_page(mp);
469 
470         if (nxt_fast_path(page != NULL)) {
471             page->size = mp->page_size >> mp->chunk_size_shift;
472 
473             p = nxt_mp_page_addr(mp, page);
474         }
475     }
476 
477     nxt_debug_alloc("mp chunk:%uz alloc: %p",
478                     page->size << mp->chunk_size_shift, p);
479 
480     return p;
481 }
482 
483 
484 static void *
485 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
486 {
487     u_char            *p;
488     uint32_t          available;
489     nxt_mp_page_t     *page;
490     nxt_queue_link_t  *link, *next;
491 
492     for (link = nxt_queue_first(pages);
493          link != nxt_queue_tail(pages);
494          link = next)
495     {
496         next = nxt_queue_next(link);
497         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
498 
499         available = mp->page_size - page->u.taken;
500 
501         if (size <= available) {
502             goto found;
503         }
504 
505         if (available == 0 || page->fails++ > 100) {
506             nxt_queue_remove(link);
507         }
508     }
509 
510     page = nxt_mp_alloc_page(mp);
511 
512     if (nxt_slow_path(page == NULL)) {
513         return page;
514     }
515 
516     nxt_queue_insert_head(pages, &page->link);
517 
518     page->size = 0xFF;
519 
520 found:
521 
522     p = nxt_mp_page_addr(mp, page);
523 
524     p += page->u.taken;
525     page->u.taken += size;
526 
527     nxt_debug_alloc("mp get: %p", p);
528 
529     return p;
530 }
531 
532 
533 static nxt_mp_page_t *
534 nxt_mp_alloc_page(nxt_mp_t *mp)
535 {
536     nxt_mp_page_t     *page;
537     nxt_mp_block_t    *cluster;
538     nxt_queue_link_t  *link;
539 
540     if (nxt_queue_is_empty(&mp->free_pages)) {
541         cluster = nxt_mp_alloc_cluster(mp);
542         if (nxt_slow_path(cluster == NULL)) {
543             return NULL;
544         }
545     }
546 
547     link = nxt_queue_first(&mp->free_pages);
548     nxt_queue_remove(link);
549 
550     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
551 
552     return page;
553 }
554 
555 
556 static nxt_mp_block_t *
557 nxt_mp_alloc_cluster(nxt_mp_t *mp)
558 {
559     nxt_uint_t      n;
560     nxt_mp_block_t  *cluster;
561 
562     n = mp->cluster_size >> mp->page_size_shift;
563 
564     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
565 
566     if (nxt_slow_path(cluster == NULL)) {
567         return NULL;
568     }
569 
570     /* NXT_MP_CLUSTER_BLOCK type is zero. */
571 
572     cluster->size = mp->cluster_size;
573 
574     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
575     if (nxt_slow_path(cluster->start == NULL)) {
576         nxt_free(cluster);
577         return NULL;
578     }
579 
580     n--;
581     cluster->pages[n].number = n;
582     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
583 
584     while (n != 0) {
585         n--;
586         cluster->pages[n].number = n;
587         nxt_queue_insert_before(&cluster->pages[n + 1].link,
588                                 &cluster->pages[n].link);
589     }
590 
591     nxt_rbtree_insert(&mp->blocks, &cluster->node);
592 
593     return cluster;
594 }
595 
596 #endif
597 
598 
599 static void *
600 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
601 {
602     u_char          *p;
603     size_t          aligned_size;
604     uint8_t         type;
605     nxt_mp_block_t  *block;
606 
607     /* Allocation must be less than 4G. */
608     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
609         return NULL;
610     }
611 
612     if (nxt_is_power_of_two(size)) {
613         block = nxt_malloc(sizeof(nxt_mp_block_t));
614         if (nxt_slow_path(block == NULL)) {
615             return NULL;
616         }
617 
618         p = nxt_memalign(alignment, size);
619         if (nxt_slow_path(p == NULL)) {
620             nxt_free(block);
621             return NULL;
622         }
623 
624         type = NXT_MP_DISCRETE_BLOCK;
625 
626     } else {
627         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
628 
629         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
630         if (nxt_slow_path(p == NULL)) {
631             return NULL;
632         }
633 
634         block = (nxt_mp_block_t *) (p + aligned_size);
635         type = NXT_MP_EMBEDDED_BLOCK;
636     }
637 
638     block->type = type;
639     block->size = size;
640     block->start = p;
641 
642     nxt_rbtree_insert(&mp->blocks, &block->node);
643 
644     return p;
645 }
646 
647 
648 static intptr_t
649 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
650 {
651     nxt_mp_block_t  *block1, *block2;
652 
653     block1 = (nxt_mp_block_t *) node1;
654     block2 = (nxt_mp_block_t *) node2;
655 
656     return (uintptr_t) block1->start - (uintptr_t) block2->start;
657 }
658 
659 
660 void
661 nxt_mp_free(nxt_mp_t *mp, void *p)
662 {
663     const char      *err;
664     nxt_thread_t    *thread;
665     nxt_mp_block_t  *block;
666 
667     nxt_debug_alloc("mp free %p", p);
668 
669     block = nxt_mp_find_block(&mp->blocks, p);
670 
671     if (nxt_fast_path(block != NULL)) {
672 
673         if (block->type == NXT_MP_CLUSTER_BLOCK) {
674             err = nxt_mp_chunk_free(mp, block, p);
675 
676             if (nxt_fast_path(err == NULL)) {
677                 return;
678             }
679 
680         } else if (nxt_fast_path(p == block->start)) {
681             nxt_rbtree_delete(&mp->blocks, &block->node);
682 
683             if (block->type == NXT_MP_DISCRETE_BLOCK) {
684                 nxt_free(block);
685             }
686 
687             nxt_free(p);
688 
689             return;
690 
691         } else {
692             err = "freed pointer points to middle of block: %p";
693         }
694 
695     } else {
696         err = "freed pointer is out of pool: %p";
697     }
698 
699     thread = nxt_thread();
700 
701     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
702 }
703 
704 
705 static nxt_mp_block_t *
706 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
707 {
708     nxt_mp_block_t     *block;
709     nxt_rbtree_node_t  *node, *sentinel;
710 
711     node = nxt_rbtree_root(tree);
712     sentinel = nxt_rbtree_sentinel(tree);
713 
714     while (node != sentinel) {
715 
716         block = (nxt_mp_block_t *) node;
717 
718         if (p < block->start) {
719             node = node->left;
720 
721         } else if (p >= block->start + block->size) {
722             node = node->right;
723 
724         } else {
725             return block;
726         }
727     }
728 
729     return NULL;
730 }
731 
732 
733 static const char *
734 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
735 {
736     u_char         *start;
737     uintptr_t      offset;
738     nxt_uint_t     n, size, chunk;
739     nxt_queue_t    *chunk_pages;
740     nxt_mp_page_t  *page;
741 
742     n = (p - cluster->start) >> mp->page_size_shift;
743     start = cluster->start + (n << mp->page_size_shift);
744 
745     page = &cluster->pages[n];
746 
747     if (nxt_slow_path(page->size == 0)) {
748         return "freed pointer points to already free page: %p";
749     }
750 
751     if (nxt_slow_path(page->size == 0xFF)) {
752         return "freed pointer points to non-freeble page: %p";
753     }
754 
755     size = page->size << mp->chunk_size_shift;
756 
757     if (size != mp->page_size) {
758 
759         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
760         chunk = offset / size;
761 
762         if (nxt_slow_path(offset != chunk * size)) {
763             return "freed pointer points to wrong chunk: %p";
764         }
765 
766         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
767             return "freed pointer points to already free chunk: %p";
768         }
769 
770         nxt_mp_chunk_set_free(page->u.map, chunk);
771 
772         if (page->u.map != 0xFFFFFFFF) {
773             page->chunks++;
774 
775             if (page->chunks == 1) {
776                 /*
777                  * Add the page to the head of pool chunk pages list
778                  * of pages with free chunks.
779                  */
780                 n = nxt_mp_chunk_pages_index(mp, size);
781                 chunk_pages = &mp->chunk_pages[n];
782 
783                 nxt_queue_insert_head(chunk_pages, &page->link);
784             }
785 
786             nxt_mp_free_junk(p, size);
787 
788             return NULL;
789 
790         } else {
791             /*
792              * All chunks are free, remove the page from pool
793              * chunk pages list of pages with free chunks.
794              */
795             nxt_queue_remove(&page->link);
796         }
797 
798     } else if (nxt_slow_path(p != start)) {
799         return "invalid pointer to chunk: %p";
800     }
801 
802     /* Add the free page to the pool's free pages tree. */
803 
804     page->size = 0;
805     nxt_queue_insert_head(&mp->free_pages, &page->link);
806 
807     nxt_mp_free_junk(p, size);
808 
809     /* Test if all pages in the cluster are free. */
810 
811     n = mp->cluster_size >> mp->page_size_shift;
812     page = cluster->pages;
813 
814     do {
815          if (page->size != 0) {
816              return NULL;
817          }
818 
819          page++;
820          n--;
821     } while (n != 0);
822 
823     /* Free cluster. */
824 
825     n = mp->cluster_size >> mp->page_size_shift;
826     page = cluster->pages;
827 
828     do {
829          nxt_queue_remove(&page->link);
830          page++;
831          n--;
832     } while (n != 0);
833 
834     nxt_rbtree_delete(&mp->blocks, &cluster->node);
835 
836     p = cluster->start;
837 
838     nxt_free(cluster);
839     nxt_free(p);
840 
841     return NULL;
842 }
843 
844 
845 void *
846 nxt_mp_retain(nxt_mp_t *mp, size_t size)
847 {
848     void  *p;
849 
850     p = nxt_mp_alloc(mp, size);
851 
852     if (nxt_fast_path(p != NULL)) {
853         mp->retain++;
854         nxt_debug_alloc("mp retain: %uD", mp->retain);
855     }
856 
857     return p;
858 }
859 
860 
861 void
862 nxt_mp_release(nxt_mp_t *mp, void *p)
863 {
864     nxt_mp_free(mp, p);
865 
866     mp->retain--;
867 
868     nxt_debug_alloc("mp release: %uD", mp->retain);
869 
870     if (mp->retain == 0) {
871         nxt_mp_destroy(mp);
872     }
873 }
874 
875 
876 void *
877 nxt_mp_nget(nxt_mp_t *mp, size_t size)
878 {
879     nxt_debug_alloc("mp nget: %uz", size);
880 
881 #if !(NXT_DEBUG_MEMORY)
882 
883     if (size <= mp->page_size) {
884         return nxt_mp_get_small(mp, &mp->nget_pages, size);
885     }
886 
887 #endif
888 
889     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
890 }
891 
892 
893 void *
894 nxt_mp_get(nxt_mp_t *mp, size_t size)
895 {
896     nxt_debug_alloc("mp get: %uz", size);
897 
898 #if !(NXT_DEBUG_MEMORY)
899 
900     if (size <= mp->page_size) {
901         size = nxt_max(size, NXT_MAX_ALIGNMENT);
902         return nxt_mp_get_small(mp, &mp->get_pages, size);
903     }
904 
905 #endif
906 
907     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
908 }
909 
910 
911 void *
912 nxt_mp_zget(nxt_mp_t *mp, size_t size)
913 {
914     void  *p;
915 
916     p = nxt_mp_get(mp, size);
917 
918     if (nxt_fast_path(p != NULL)) {
919         memset(p, 0, size);
920     }
921 
922     return p;
923 }
924