xref: /unit/src/nxt_mp.c (revision 64:3728a1601b9b)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112     /* Lists of nxt_mp_page_t. */
113     nxt_queue_t          free_pages;
114     nxt_queue_t          nget_pages;
115     nxt_queue_t          get_pages;
116     nxt_queue_t          chunk_pages[0];
117 };
118 
119 
120 #define nxt_mp_chunk_get_free(map)                                            \
121     (__builtin_ffs(map) - 1)
122 
123 
124 #define nxt_mp_chunk_is_free(map, chunk)                                      \
125     ((map & (1 << chunk)) != 0)
126 
127 
128 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
129     map &= ~(1 << chunk)
130 
131 
132 #define nxt_mp_chunk_set_free(map, chunk)                                     \
133     map |= (1 << chunk)
134 
135 
136 #define nxt_mp_free_junk(p, size)                                             \
137     memset((p), 0x5A, size)
138 
139 
140 #if !(NXT_DEBUG_MEMORY)
141 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
142 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
143 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
144 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
145 #endif
146 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
147 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
148     nxt_rbtree_node_t *node2);
149 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
150 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
151     u_char *p);
152 
153 
154 nxt_mp_t *
155 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
156     size_t min_chunk_size)
157 {
158     nxt_mp_t     *mp;
159     nxt_uint_t   pages, chunk_size;
160     nxt_queue_t  *chunk_pages;
161 
162     pages = 0;
163     chunk_size = page_size;
164 
165     do {
166         pages++;
167         chunk_size /= 2;
168     } while (chunk_size > min_chunk_size);
169 
170     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
171 
172     if (nxt_fast_path(mp != NULL)) {
173         mp->retain = 1;
174         mp->page_size = page_size;
175         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
176         mp->cluster_size = cluster_size;
177 
178         chunk_pages = mp->chunk_pages;
179 
180         do {
181             nxt_queue_init(chunk_pages);
182             chunk_pages++;
183             chunk_size *= 2;
184         } while (chunk_size < page_size);
185 
186         mp->chunk_size_shift = nxt_lg2(min_chunk_size);
187         mp->page_size_shift = nxt_lg2(page_size);
188 
189         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
190 
191         nxt_queue_init(&mp->free_pages);
192         nxt_queue_init(&mp->nget_pages);
193         nxt_queue_init(&mp->get_pages);
194     }
195 
196     return mp;
197 }
198 
199 
200 void
201 nxt_mp_destroy(nxt_mp_t *mp)
202 {
203     void               *p;
204     nxt_mp_block_t     *block;
205     nxt_rbtree_node_t  *node, *next;
206 
207     nxt_debug_alloc("mp destroy");
208 
209     next = nxt_rbtree_root(&mp->blocks);
210 
211     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
212 
213         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
214         block = (nxt_mp_block_t *) node;
215 
216         p = block->start;
217 
218         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
219             nxt_free(block);
220         }
221 
222         nxt_free(p);
223     }
224 
225     nxt_free(mp);
226 }
227 
228 
229 nxt_bool_t
230 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
231     size_t min_chunk_size)
232 {
233     nxt_bool_t  valid;
234 
235     /* Alignment and sizes must be a power of 2. */
236 
237     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
238                            && nxt_is_power_of_two(page_size)
239                            && nxt_is_power_of_two(min_chunk_size)));
240     if (!valid) {
241         return 0;
242     }
243 
244     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
245 
246     valid = nxt_expect(1, (page_size >= 64
247                            && page_size >= page_alignment
248                            && page_size >= min_chunk_size
249                            && min_chunk_size * 32 >= page_size
250                            && cluster_size >= page_size
251                            && cluster_size / page_size <= 256
252                            && cluster_size % page_size == 0));
253     if (!valid) {
254         return 0;
255     }
256 
257     return 1;
258 }
259 
260 
261 nxt_bool_t
262 nxt_mp_is_empty(nxt_mp_t *mp)
263 {
264     return (nxt_rbtree_is_empty(&mp->blocks)
265             && nxt_queue_is_empty(&mp->free_pages));
266 }
267 
268 
269 void *
270 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
271 {
272     nxt_debug_alloc("mp alloc: %uz", size);
273 
274 #if !(NXT_DEBUG_MEMORY)
275 
276     if (size <= mp->page_size) {
277         return nxt_mp_alloc_small(mp, size);
278     }
279 
280 #endif
281 
282     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
283 }
284 
285 
286 void *
287 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
288 {
289     void  *p;
290 
291     p = nxt_mp_alloc(mp, size);
292 
293     if (nxt_fast_path(p != NULL)) {
294         memset(p, 0, size);
295     }
296 
297     return p;
298 }
299 
300 
301 void *
302 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
303 {
304     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
305 
306     /* Alignment must be a power of 2. */
307 
308     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
309 
310 #if !(NXT_DEBUG_MEMORY)
311 
312         if (size <= mp->page_size && alignment <= mp->page_alignment) {
313             size = nxt_max(size, alignment);
314 
315             if (size <= mp->page_size) {
316                 return nxt_mp_alloc_small(mp, size);
317             }
318         }
319 
320 #endif
321 
322         return nxt_mp_alloc_large(mp, alignment, size);
323     }
324 
325     return NULL;
326 }
327 
328 
329 void *
330 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
331 {
332     void  *p;
333 
334     p = nxt_mp_align(mp, alignment, size);
335 
336     if (nxt_fast_path(p != NULL)) {
337         memset(p, 0, size);
338     }
339 
340     return p;
341 }
342 
343 
344 #if !(NXT_DEBUG_MEMORY)
345 
346 nxt_inline u_char *
347 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
348 {
349     size_t          page_offset;
350     nxt_mp_block_t  *block;
351 
352     page_offset = page->number * sizeof(nxt_mp_page_t)
353                   + offsetof(nxt_mp_block_t, pages);
354 
355     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
356 
357     return block->start + (page->number << mp->page_size_shift);
358 }
359 
360 
361 nxt_inline nxt_uint_t
362 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
363 {
364     nxt_int_t  n, index;
365 
366     index = 0;
367 
368     if (size > 1) {
369         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
370 
371         if (n > 0) {
372             index = n;
373         }
374     }
375 
376     return index;
377 }
378 
379 
380 static void *
381 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
382 {
383     u_char            *p;
384     nxt_uint_t        n, index;
385     nxt_queue_t       *chunk_pages;
386     nxt_mp_page_t     *page;
387     nxt_queue_link_t  *link;
388 
389     p = NULL;
390 
391     if (size <= mp->page_size / 2) {
392 
393         index = nxt_mp_chunk_pages_index(mp, size);
394         chunk_pages = &mp->chunk_pages[index];
395 
396         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
397 
398             link = nxt_queue_first(chunk_pages);
399             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
400 
401             p = nxt_mp_page_addr(mp, page);
402 
403             n = nxt_mp_chunk_get_free(page->u.map);
404             nxt_mp_chunk_set_busy(page->u.map, n);
405 
406             p += ((n << index) << mp->chunk_size_shift);
407 
408             page->chunks--;
409 
410             if (page->chunks == 0) {
411                 /*
412                  * Remove full page from the pool chunk pages list
413                  * of pages with free chunks.
414                  */
415                 nxt_queue_remove(&page->link);
416             }
417 
418         } else {
419             page = nxt_mp_alloc_page(mp);
420 
421             if (nxt_fast_path(page != NULL)) {
422                 page->size = (1 << index);
423 
424                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
425                 page->chunks = (1 << n) - 1;
426 
427                 nxt_queue_insert_head(chunk_pages, &page->link);
428 
429                 /* Mark the first chunk as busy. */
430                 page->u.map = 0xFFFFFFFE;
431 
432                 p = nxt_mp_page_addr(mp, page);
433             }
434         }
435 
436     } else {
437         page = nxt_mp_alloc_page(mp);
438 
439         if (nxt_fast_path(page != NULL)) {
440             page->size = mp->page_size >> mp->chunk_size_shift;
441 
442             p = nxt_mp_page_addr(mp, page);
443         }
444     }
445 
446     nxt_debug_alloc("mp chunk:%uz alloc: %p",
447                     page->size << mp->chunk_size_shift, p);
448 
449     return p;
450 }
451 
452 
453 static nxt_mp_page_t *
454 nxt_mp_alloc_page(nxt_mp_t *mp)
455 {
456     nxt_mp_page_t     *page;
457     nxt_mp_block_t    *cluster;
458     nxt_queue_link_t  *link;
459 
460     if (nxt_queue_is_empty(&mp->free_pages)) {
461         cluster = nxt_mp_alloc_cluster(mp);
462         if (nxt_slow_path(cluster == NULL)) {
463             return NULL;
464         }
465     }
466 
467     link = nxt_queue_first(&mp->free_pages);
468     nxt_queue_remove(link);
469 
470     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
471 
472     return page;
473 }
474 
475 
476 static nxt_mp_block_t *
477 nxt_mp_alloc_cluster(nxt_mp_t *mp)
478 {
479     nxt_uint_t      n;
480     nxt_mp_block_t  *cluster;
481 
482     n = mp->cluster_size >> mp->page_size_shift;
483 
484     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
485 
486     if (nxt_slow_path(cluster == NULL)) {
487         return NULL;
488     }
489 
490     /* NXT_MP_CLUSTER_BLOCK type is zero. */
491 
492     cluster->size = mp->cluster_size;
493 
494     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
495     if (nxt_slow_path(cluster->start == NULL)) {
496         nxt_free(cluster);
497         return NULL;
498     }
499 
500     n--;
501     cluster->pages[n].number = n;
502     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
503 
504     while (n != 0) {
505         n--;
506         cluster->pages[n].number = n;
507         nxt_queue_insert_before(&cluster->pages[n + 1].link,
508                                 &cluster->pages[n].link);
509     }
510 
511     nxt_rbtree_insert(&mp->blocks, &cluster->node);
512 
513     return cluster;
514 }
515 
516 #endif
517 
518 
519 static void *
520 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
521 {
522     u_char          *p;
523     size_t          aligned_size;
524     uint8_t         type;
525     nxt_mp_block_t  *block;
526 
527     /* Allocation must be less than 4G. */
528     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
529         return NULL;
530     }
531 
532     if (nxt_is_power_of_two(size)) {
533         block = nxt_malloc(sizeof(nxt_mp_block_t));
534         if (nxt_slow_path(block == NULL)) {
535             return NULL;
536         }
537 
538         p = nxt_memalign(alignment, size);
539         if (nxt_slow_path(p == NULL)) {
540             nxt_free(block);
541             return NULL;
542         }
543 
544         type = NXT_MP_DISCRETE_BLOCK;
545 
546     } else {
547         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
548 
549         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
550         if (nxt_slow_path(p == NULL)) {
551             return NULL;
552         }
553 
554         block = (nxt_mp_block_t *) (p + aligned_size);
555         type = NXT_MP_EMBEDDED_BLOCK;
556     }
557 
558     block->type = type;
559     block->size = size;
560     block->start = p;
561 
562     nxt_rbtree_insert(&mp->blocks, &block->node);
563 
564     return p;
565 }
566 
567 
568 static intptr_t
569 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
570 {
571     nxt_mp_block_t  *block1, *block2;
572 
573     block1 = (nxt_mp_block_t *) node1;
574     block2 = (nxt_mp_block_t *) node2;
575 
576     return (uintptr_t) block1->start - (uintptr_t) block2->start;
577 }
578 
579 
580 void
581 nxt_mp_free(nxt_mp_t *mp, void *p)
582 {
583     const char      *err;
584     nxt_thread_t    *thread;
585     nxt_mp_block_t  *block;
586 
587     nxt_debug_alloc("mp free %p", p);
588 
589     block = nxt_mp_find_block(&mp->blocks, p);
590 
591     if (nxt_fast_path(block != NULL)) {
592 
593         if (block->type == NXT_MP_CLUSTER_BLOCK) {
594             err = nxt_mp_chunk_free(mp, block, p);
595 
596             if (nxt_fast_path(err == NULL)) {
597                 return;
598             }
599 
600         } else if (nxt_fast_path(p == block->start)) {
601             nxt_rbtree_delete(&mp->blocks, &block->node);
602 
603             if (block->type == NXT_MP_DISCRETE_BLOCK) {
604                 nxt_free(block);
605             }
606 
607             nxt_free(p);
608 
609             return;
610 
611         } else {
612             err = "freed pointer points to middle of block: %p";
613         }
614 
615     } else {
616         err = "freed pointer is out of pool: %p";
617     }
618 
619     thread = nxt_thread();
620 
621     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
622 }
623 
624 
625 static nxt_mp_block_t *
626 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
627 {
628     nxt_mp_block_t     *block;
629     nxt_rbtree_node_t  *node, *sentinel;
630 
631     node = nxt_rbtree_root(tree);
632     sentinel = nxt_rbtree_sentinel(tree);
633 
634     while (node != sentinel) {
635 
636         block = (nxt_mp_block_t *) node;
637 
638         if (p < block->start) {
639             node = node->left;
640 
641         } else if (p >= block->start + block->size) {
642             node = node->right;
643 
644         } else {
645             return block;
646         }
647     }
648 
649     return NULL;
650 }
651 
652 
653 static const char *
654 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
655 {
656     u_char         *start;
657     uintptr_t      offset;
658     nxt_uint_t     n, size, chunk;
659     nxt_queue_t    *chunk_pages;
660     nxt_mp_page_t  *page;
661 
662     n = (p - cluster->start) >> mp->page_size_shift;
663     start = cluster->start + (n << mp->page_size_shift);
664 
665     page = &cluster->pages[n];
666 
667     if (nxt_slow_path(page->size == 0)) {
668         return "freed pointer points to already free page: %p";
669     }
670 
671     if (nxt_slow_path(page->size == 0xFF)) {
672         return "freed pointer points to non-freeble page: %p";
673     }
674 
675     size = page->size << mp->chunk_size_shift;
676 
677     if (size != mp->page_size) {
678 
679         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
680         chunk = offset / size;
681 
682         if (nxt_slow_path(offset != chunk * size)) {
683             return "freed pointer points to wrong chunk: %p";
684         }
685 
686         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
687             return "freed pointer points to already free chunk: %p";
688         }
689 
690         nxt_mp_chunk_set_free(page->u.map, chunk);
691 
692         if (page->u.map != 0xFFFFFFFF) {
693             page->chunks++;
694 
695             if (page->chunks == 1) {
696                 /*
697                  * Add the page to the head of pool chunk pages list
698                  * of pages with free chunks.
699                  */
700                 n = nxt_mp_chunk_pages_index(mp, size);
701                 chunk_pages = &mp->chunk_pages[n];
702 
703                 nxt_queue_insert_head(chunk_pages, &page->link);
704             }
705 
706             nxt_mp_free_junk(p, size);
707 
708             return NULL;
709 
710         } else {
711             /*
712              * All chunks are free, remove the page from pool
713              * chunk pages list of pages with free chunks.
714              */
715             nxt_queue_remove(&page->link);
716         }
717 
718     } else if (nxt_slow_path(p != start)) {
719         return "invalid pointer to chunk: %p";
720     }
721 
722     /* Add the free page to the pool's free pages tree. */
723 
724     page->size = 0;
725     nxt_queue_insert_head(&mp->free_pages, &page->link);
726 
727     nxt_mp_free_junk(p, size);
728 
729     /* Test if all pages in the cluster are free. */
730 
731     n = mp->cluster_size >> mp->page_size_shift;
732     page = cluster->pages;
733 
734     do {
735          if (page->size != 0) {
736              return NULL;
737          }
738 
739          page++;
740          n--;
741     } while (n != 0);
742 
743     /* Free cluster. */
744 
745     n = mp->cluster_size >> mp->page_size_shift;
746     page = cluster->pages;
747 
748     do {
749          nxt_queue_remove(&page->link);
750          page++;
751          n--;
752     } while (n != 0);
753 
754     nxt_rbtree_delete(&mp->blocks, &cluster->node);
755 
756     p = cluster->start;
757 
758     nxt_free(cluster);
759     nxt_free(p);
760 
761     return NULL;
762 }
763 
764 
765 void *
766 nxt_mp_retain(nxt_mp_t *mp, size_t size)
767 {
768     void  *p;
769 
770     p = nxt_mp_alloc(mp, size);
771 
772     if (nxt_fast_path(p != NULL)) {
773         mp->retain++;
774         nxt_debug_alloc("mp retain: %uD", mp->retain);
775     }
776 
777     return p;
778 }
779 
780 
781 void
782 nxt_mp_release(nxt_mp_t *mp, void *p)
783 {
784     nxt_mp_free(mp, p);
785 
786     mp->retain--;
787 
788     nxt_debug_alloc("mp release: %uD", mp->retain);
789 
790     if (mp->retain == 0) {
791         nxt_mp_destroy(mp);
792     }
793 }
794 
795 
796 void *
797 nxt_mp_nget(nxt_mp_t *mp, size_t size)
798 {
799     nxt_debug_alloc("mp nget: %uz", size);
800 
801 #if !(NXT_DEBUG_MEMORY)
802 
803     if (size <= mp->page_size) {
804         return nxt_mp_get_small(mp, &mp->nget_pages, size);
805     }
806 
807 #endif
808 
809     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
810 }
811 
812 
813 void *
814 nxt_mp_get(nxt_mp_t *mp, size_t size)
815 {
816     nxt_debug_alloc("mp get: %uz", size);
817 
818 #if !(NXT_DEBUG_MEMORY)
819 
820     if (size <= mp->page_size) {
821         size = nxt_max(size, NXT_MAX_ALIGNMENT);
822         return nxt_mp_get_small(mp, &mp->get_pages, size);
823     }
824 
825 #endif
826 
827     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
828 }
829 
830 
831 void *
832 nxt_mp_zget(nxt_mp_t *mp, size_t size)
833 {
834     void  *p;
835 
836     p = nxt_mp_get(mp, size);
837 
838     if (nxt_fast_path(p != NULL)) {
839         memset(p, 0, size);
840     }
841 
842     return p;
843 }
844 
845 
846 static void *
847 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
848 {
849     u_char            *p;
850     uint32_t          available;
851     nxt_mp_page_t     *page;
852     nxt_queue_link_t  *link, *next;
853 
854     for (link = nxt_queue_first(pages);
855          link != nxt_queue_tail(pages);
856          link = next)
857     {
858         next = nxt_queue_next(link);
859         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
860 
861         available = mp->page_size - page->u.taken;
862 
863         if (size <= available) {
864             goto found;
865         }
866 
867         if (available == 0 || page->fails++ > 100) {
868             nxt_queue_remove(link);
869         }
870     }
871 
872     page = nxt_mp_alloc_page(mp);
873 
874     if (nxt_slow_path(page == NULL)) {
875         return page;
876     }
877 
878     nxt_queue_insert_head(pages, &page->link);
879 
880     page->size = 0xFF;
881 
882 found:
883 
884     p = nxt_mp_page_addr(mp, page);
885 
886     p += page->u.taken;
887     page->u.taken += size;
888 
889     nxt_debug_alloc("mp get: %p", p);
890 
891     return p;
892 }
893