xref: /unit/src/nxt_mp.c (revision 69:7286fc2bc8f7)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112     /* Lists of nxt_mp_page_t. */
113     nxt_queue_t          free_pages;
114     nxt_queue_t          nget_pages;
115     nxt_queue_t          get_pages;
116     nxt_queue_t          chunk_pages[0];
117 };
118 
119 
120 #define nxt_mp_chunk_get_free(map)                                            \
121     (__builtin_ffs(map) - 1)
122 
123 
124 #define nxt_mp_chunk_is_free(map, chunk)                                      \
125     ((map & (1 << chunk)) != 0)
126 
127 
128 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
129     map &= ~(1 << chunk)
130 
131 
132 #define nxt_mp_chunk_set_free(map, chunk)                                     \
133     map |= (1 << chunk)
134 
135 
136 #define nxt_mp_free_junk(p, size)                                             \
137     memset((p), 0x5A, size)
138 
139 
140 #if !(NXT_DEBUG_MEMORY)
141 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
142 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
143 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
144 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
145 #endif
146 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
147 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
148     nxt_rbtree_node_t *node2);
149 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
150 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
151     u_char *p);
152 
153 
154 nxt_mp_t *
155 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
156     size_t min_chunk_size)
157 {
158     nxt_mp_t     *mp;
159     uint32_t     pages, chunk_size_shift, page_size_shift;
160     nxt_queue_t  *chunk_pages;
161 
162     chunk_size_shift = nxt_lg2(min_chunk_size);
163     page_size_shift = nxt_lg2(page_size);
164 
165     pages = page_size_shift - chunk_size_shift;
166 
167     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
168 
169     if (nxt_fast_path(mp != NULL)) {
170         mp->retain = 1;
171         mp->chunk_size_shift = chunk_size_shift;
172         mp->page_size_shift = page_size_shift;
173         mp->page_size = page_size;
174         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
175         mp->cluster_size = cluster_size;
176 
177         chunk_pages = mp->chunk_pages;
178 
179         while (pages != 0) {
180             nxt_queue_init(chunk_pages);
181             chunk_pages++;
182             pages--;
183         };
184 
185         nxt_queue_init(&mp->free_pages);
186         nxt_queue_init(&mp->nget_pages);
187         nxt_queue_init(&mp->get_pages);
188 
189         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
190     }
191 
192     return mp;
193 }
194 
195 
196 void
197 nxt_mp_destroy(nxt_mp_t *mp)
198 {
199     void               *p;
200     nxt_mp_block_t     *block;
201     nxt_rbtree_node_t  *node, *next;
202 
203     nxt_debug_alloc("mp destroy");
204 
205     next = nxt_rbtree_root(&mp->blocks);
206 
207     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
208 
209         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
210         block = (nxt_mp_block_t *) node;
211 
212         p = block->start;
213 
214         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
215             nxt_free(block);
216         }
217 
218         nxt_free(p);
219     }
220 
221     nxt_free(mp);
222 }
223 
224 
225 nxt_bool_t
226 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
227     size_t min_chunk_size)
228 {
229     nxt_bool_t  valid;
230 
231     /* Alignment and sizes must be a power of 2. */
232 
233     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
234                            && nxt_is_power_of_two(page_size)
235                            && nxt_is_power_of_two(min_chunk_size)));
236     if (!valid) {
237         return 0;
238     }
239 
240     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
241 
242     valid = nxt_expect(1, (page_size >= 64
243                            && page_size >= page_alignment
244                            && page_size >= min_chunk_size
245                            && min_chunk_size * 32 >= page_size
246                            && cluster_size >= page_size
247                            && cluster_size / page_size <= 256
248                            && cluster_size % page_size == 0));
249     if (!valid) {
250         return 0;
251     }
252 
253     return 1;
254 }
255 
256 
257 nxt_bool_t
258 nxt_mp_is_empty(nxt_mp_t *mp)
259 {
260     return (nxt_rbtree_is_empty(&mp->blocks)
261             && nxt_queue_is_empty(&mp->free_pages));
262 }
263 
264 
265 void *
266 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
267 {
268     nxt_debug_alloc("mp alloc: %uz", size);
269 
270 #if !(NXT_DEBUG_MEMORY)
271 
272     if (size <= mp->page_size) {
273         return nxt_mp_alloc_small(mp, size);
274     }
275 
276 #endif
277 
278     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
279 }
280 
281 
282 void *
283 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
284 {
285     void  *p;
286 
287     p = nxt_mp_alloc(mp, size);
288 
289     if (nxt_fast_path(p != NULL)) {
290         memset(p, 0, size);
291     }
292 
293     return p;
294 }
295 
296 
297 void *
298 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
299 {
300     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
301 
302     /* Alignment must be a power of 2. */
303 
304     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
305 
306 #if !(NXT_DEBUG_MEMORY)
307 
308         if (size <= mp->page_size && alignment <= mp->page_alignment) {
309             size = nxt_max(size, alignment);
310 
311             if (size <= mp->page_size) {
312                 return nxt_mp_alloc_small(mp, size);
313             }
314         }
315 
316 #endif
317 
318         return nxt_mp_alloc_large(mp, alignment, size);
319     }
320 
321     return NULL;
322 }
323 
324 
325 void *
326 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
327 {
328     void  *p;
329 
330     p = nxt_mp_align(mp, alignment, size);
331 
332     if (nxt_fast_path(p != NULL)) {
333         memset(p, 0, size);
334     }
335 
336     return p;
337 }
338 
339 
340 nxt_inline nxt_uint_t
341 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
342 {
343     nxt_int_t  n, index;
344 
345     index = 0;
346 
347     if (size > 1) {
348         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
349 
350         if (n > 0) {
351             index = n;
352         }
353     }
354 
355     return index;
356 }
357 
358 
359 #if !(NXT_DEBUG_MEMORY)
360 
361 nxt_inline u_char *
362 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
363 {
364     size_t          page_offset;
365     nxt_mp_block_t  *block;
366 
367     page_offset = page->number * sizeof(nxt_mp_page_t)
368                   + offsetof(nxt_mp_block_t, pages);
369 
370     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
371 
372     return block->start + (page->number << mp->page_size_shift);
373 }
374 
375 
376 static void *
377 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
378 {
379     u_char            *p;
380     nxt_uint_t        n, index;
381     nxt_queue_t       *chunk_pages;
382     nxt_mp_page_t     *page;
383     nxt_queue_link_t  *link;
384 
385     p = NULL;
386 
387     if (size <= mp->page_size / 2) {
388 
389         index = nxt_mp_chunk_pages_index(mp, size);
390         chunk_pages = &mp->chunk_pages[index];
391 
392         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
393 
394             link = nxt_queue_first(chunk_pages);
395             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
396 
397             p = nxt_mp_page_addr(mp, page);
398 
399             n = nxt_mp_chunk_get_free(page->u.map);
400             nxt_mp_chunk_set_busy(page->u.map, n);
401 
402             p += ((n << index) << mp->chunk_size_shift);
403 
404             page->chunks--;
405 
406             if (page->chunks == 0) {
407                 /*
408                  * Remove full page from the pool chunk pages list
409                  * of pages with free chunks.
410                  */
411                 nxt_queue_remove(&page->link);
412             }
413 
414         } else {
415             page = nxt_mp_alloc_page(mp);
416 
417             if (nxt_fast_path(page != NULL)) {
418                 page->size = (1 << index);
419 
420                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
421                 page->chunks = (1 << n) - 1;
422 
423                 nxt_queue_insert_head(chunk_pages, &page->link);
424 
425                 /* Mark the first chunk as busy. */
426                 page->u.map = 0xFFFFFFFE;
427 
428                 p = nxt_mp_page_addr(mp, page);
429             }
430         }
431 
432     } else {
433         page = nxt_mp_alloc_page(mp);
434 
435         if (nxt_fast_path(page != NULL)) {
436             page->size = mp->page_size >> mp->chunk_size_shift;
437 
438             p = nxt_mp_page_addr(mp, page);
439         }
440     }
441 
442     nxt_debug_alloc("mp chunk:%uz alloc: %p",
443                     page->size << mp->chunk_size_shift, p);
444 
445     return p;
446 }
447 
448 
449 static void *
450 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
451 {
452     u_char            *p;
453     uint32_t          available;
454     nxt_mp_page_t     *page;
455     nxt_queue_link_t  *link, *next;
456 
457     for (link = nxt_queue_first(pages);
458          link != nxt_queue_tail(pages);
459          link = next)
460     {
461         next = nxt_queue_next(link);
462         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
463 
464         available = mp->page_size - page->u.taken;
465 
466         if (size <= available) {
467             goto found;
468         }
469 
470         if (available == 0 || page->fails++ > 100) {
471             nxt_queue_remove(link);
472         }
473     }
474 
475     page = nxt_mp_alloc_page(mp);
476 
477     if (nxt_slow_path(page == NULL)) {
478         return page;
479     }
480 
481     nxt_queue_insert_head(pages, &page->link);
482 
483     page->size = 0xFF;
484 
485 found:
486 
487     p = nxt_mp_page_addr(mp, page);
488 
489     p += page->u.taken;
490     page->u.taken += size;
491 
492     nxt_debug_alloc("mp get: %p", p);
493 
494     return p;
495 }
496 
497 
498 static nxt_mp_page_t *
499 nxt_mp_alloc_page(nxt_mp_t *mp)
500 {
501     nxt_mp_page_t     *page;
502     nxt_mp_block_t    *cluster;
503     nxt_queue_link_t  *link;
504 
505     if (nxt_queue_is_empty(&mp->free_pages)) {
506         cluster = nxt_mp_alloc_cluster(mp);
507         if (nxt_slow_path(cluster == NULL)) {
508             return NULL;
509         }
510     }
511 
512     link = nxt_queue_first(&mp->free_pages);
513     nxt_queue_remove(link);
514 
515     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
516 
517     return page;
518 }
519 
520 
521 static nxt_mp_block_t *
522 nxt_mp_alloc_cluster(nxt_mp_t *mp)
523 {
524     nxt_uint_t      n;
525     nxt_mp_block_t  *cluster;
526 
527     n = mp->cluster_size >> mp->page_size_shift;
528 
529     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
530 
531     if (nxt_slow_path(cluster == NULL)) {
532         return NULL;
533     }
534 
535     /* NXT_MP_CLUSTER_BLOCK type is zero. */
536 
537     cluster->size = mp->cluster_size;
538 
539     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
540     if (nxt_slow_path(cluster->start == NULL)) {
541         nxt_free(cluster);
542         return NULL;
543     }
544 
545     n--;
546     cluster->pages[n].number = n;
547     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
548 
549     while (n != 0) {
550         n--;
551         cluster->pages[n].number = n;
552         nxt_queue_insert_before(&cluster->pages[n + 1].link,
553                                 &cluster->pages[n].link);
554     }
555 
556     nxt_rbtree_insert(&mp->blocks, &cluster->node);
557 
558     return cluster;
559 }
560 
561 #endif
562 
563 
564 static void *
565 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
566 {
567     u_char          *p;
568     size_t          aligned_size;
569     uint8_t         type;
570     nxt_mp_block_t  *block;
571 
572     /* Allocation must be less than 4G. */
573     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
574         return NULL;
575     }
576 
577     if (nxt_is_power_of_two(size)) {
578         block = nxt_malloc(sizeof(nxt_mp_block_t));
579         if (nxt_slow_path(block == NULL)) {
580             return NULL;
581         }
582 
583         p = nxt_memalign(alignment, size);
584         if (nxt_slow_path(p == NULL)) {
585             nxt_free(block);
586             return NULL;
587         }
588 
589         type = NXT_MP_DISCRETE_BLOCK;
590 
591     } else {
592         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
593 
594         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
595         if (nxt_slow_path(p == NULL)) {
596             return NULL;
597         }
598 
599         block = (nxt_mp_block_t *) (p + aligned_size);
600         type = NXT_MP_EMBEDDED_BLOCK;
601     }
602 
603     block->type = type;
604     block->size = size;
605     block->start = p;
606 
607     nxt_rbtree_insert(&mp->blocks, &block->node);
608 
609     return p;
610 }
611 
612 
613 static intptr_t
614 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
615 {
616     nxt_mp_block_t  *block1, *block2;
617 
618     block1 = (nxt_mp_block_t *) node1;
619     block2 = (nxt_mp_block_t *) node2;
620 
621     return (uintptr_t) block1->start - (uintptr_t) block2->start;
622 }
623 
624 
625 void
626 nxt_mp_free(nxt_mp_t *mp, void *p)
627 {
628     const char      *err;
629     nxt_thread_t    *thread;
630     nxt_mp_block_t  *block;
631 
632     nxt_debug_alloc("mp free %p", p);
633 
634     block = nxt_mp_find_block(&mp->blocks, p);
635 
636     if (nxt_fast_path(block != NULL)) {
637 
638         if (block->type == NXT_MP_CLUSTER_BLOCK) {
639             err = nxt_mp_chunk_free(mp, block, p);
640 
641             if (nxt_fast_path(err == NULL)) {
642                 return;
643             }
644 
645         } else if (nxt_fast_path(p == block->start)) {
646             nxt_rbtree_delete(&mp->blocks, &block->node);
647 
648             if (block->type == NXT_MP_DISCRETE_BLOCK) {
649                 nxt_free(block);
650             }
651 
652             nxt_free(p);
653 
654             return;
655 
656         } else {
657             err = "freed pointer points to middle of block: %p";
658         }
659 
660     } else {
661         err = "freed pointer is out of pool: %p";
662     }
663 
664     thread = nxt_thread();
665 
666     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
667 }
668 
669 
670 static nxt_mp_block_t *
671 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
672 {
673     nxt_mp_block_t     *block;
674     nxt_rbtree_node_t  *node, *sentinel;
675 
676     node = nxt_rbtree_root(tree);
677     sentinel = nxt_rbtree_sentinel(tree);
678 
679     while (node != sentinel) {
680 
681         block = (nxt_mp_block_t *) node;
682 
683         if (p < block->start) {
684             node = node->left;
685 
686         } else if (p >= block->start + block->size) {
687             node = node->right;
688 
689         } else {
690             return block;
691         }
692     }
693 
694     return NULL;
695 }
696 
697 
698 static const char *
699 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
700 {
701     u_char         *start;
702     uintptr_t      offset;
703     nxt_uint_t     n, size, chunk;
704     nxt_queue_t    *chunk_pages;
705     nxt_mp_page_t  *page;
706 
707     n = (p - cluster->start) >> mp->page_size_shift;
708     start = cluster->start + (n << mp->page_size_shift);
709 
710     page = &cluster->pages[n];
711 
712     if (nxt_slow_path(page->size == 0)) {
713         return "freed pointer points to already free page: %p";
714     }
715 
716     if (nxt_slow_path(page->size == 0xFF)) {
717         return "freed pointer points to non-freeble page: %p";
718     }
719 
720     size = page->size << mp->chunk_size_shift;
721 
722     if (size != mp->page_size) {
723 
724         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
725         chunk = offset / size;
726 
727         if (nxt_slow_path(offset != chunk * size)) {
728             return "freed pointer points to wrong chunk: %p";
729         }
730 
731         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
732             return "freed pointer points to already free chunk: %p";
733         }
734 
735         nxt_mp_chunk_set_free(page->u.map, chunk);
736 
737         if (page->u.map != 0xFFFFFFFF) {
738             page->chunks++;
739 
740             if (page->chunks == 1) {
741                 /*
742                  * Add the page to the head of pool chunk pages list
743                  * of pages with free chunks.
744                  */
745                 n = nxt_mp_chunk_pages_index(mp, size);
746                 chunk_pages = &mp->chunk_pages[n];
747 
748                 nxt_queue_insert_head(chunk_pages, &page->link);
749             }
750 
751             nxt_mp_free_junk(p, size);
752 
753             return NULL;
754 
755         } else {
756             /*
757              * All chunks are free, remove the page from pool
758              * chunk pages list of pages with free chunks.
759              */
760             nxt_queue_remove(&page->link);
761         }
762 
763     } else if (nxt_slow_path(p != start)) {
764         return "invalid pointer to chunk: %p";
765     }
766 
767     /* Add the free page to the pool's free pages tree. */
768 
769     page->size = 0;
770     nxt_queue_insert_head(&mp->free_pages, &page->link);
771 
772     nxt_mp_free_junk(p, size);
773 
774     /* Test if all pages in the cluster are free. */
775 
776     n = mp->cluster_size >> mp->page_size_shift;
777     page = cluster->pages;
778 
779     do {
780          if (page->size != 0) {
781              return NULL;
782          }
783 
784          page++;
785          n--;
786     } while (n != 0);
787 
788     /* Free cluster. */
789 
790     n = mp->cluster_size >> mp->page_size_shift;
791     page = cluster->pages;
792 
793     do {
794          nxt_queue_remove(&page->link);
795          page++;
796          n--;
797     } while (n != 0);
798 
799     nxt_rbtree_delete(&mp->blocks, &cluster->node);
800 
801     p = cluster->start;
802 
803     nxt_free(cluster);
804     nxt_free(p);
805 
806     return NULL;
807 }
808 
809 
810 void *
811 nxt_mp_retain(nxt_mp_t *mp, size_t size)
812 {
813     void  *p;
814 
815     p = nxt_mp_alloc(mp, size);
816 
817     if (nxt_fast_path(p != NULL)) {
818         mp->retain++;
819         nxt_debug_alloc("mp retain: %uD", mp->retain);
820     }
821 
822     return p;
823 }
824 
825 
826 void
827 nxt_mp_release(nxt_mp_t *mp, void *p)
828 {
829     nxt_mp_free(mp, p);
830 
831     mp->retain--;
832 
833     nxt_debug_alloc("mp release: %uD", mp->retain);
834 
835     if (mp->retain == 0) {
836         nxt_mp_destroy(mp);
837     }
838 }
839 
840 
841 void *
842 nxt_mp_nget(nxt_mp_t *mp, size_t size)
843 {
844     nxt_debug_alloc("mp nget: %uz", size);
845 
846 #if !(NXT_DEBUG_MEMORY)
847 
848     if (size <= mp->page_size) {
849         return nxt_mp_get_small(mp, &mp->nget_pages, size);
850     }
851 
852 #endif
853 
854     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
855 }
856 
857 
858 void *
859 nxt_mp_get(nxt_mp_t *mp, size_t size)
860 {
861     nxt_debug_alloc("mp get: %uz", size);
862 
863 #if !(NXT_DEBUG_MEMORY)
864 
865     if (size <= mp->page_size) {
866         size = nxt_max(size, NXT_MAX_ALIGNMENT);
867         return nxt_mp_get_small(mp, &mp->get_pages, size);
868     }
869 
870 #endif
871 
872     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
873 }
874 
875 
876 void *
877 nxt_mp_zget(nxt_mp_t *mp, size_t size)
878 {
879     void  *p;
880 
881     p = nxt_mp_get(mp, size);
882 
883     if (nxt_fast_path(p != NULL)) {
884         memset(p, 0, size);
885     }
886 
887     return p;
888 }
889