xref: /unit/src/nxt_mp.c (revision 63:b79fe37d9f24)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112     /* Lists of nxt_mp_page_t. */
113     nxt_queue_t          free_pages;
114     nxt_queue_t          nget_pages;
115     nxt_queue_t          get_pages;
116     nxt_queue_t          chunk_pages[0];
117 };
118 
119 
120 #define nxt_mp_chunk_get_free(map)                                            \
121     (__builtin_ffs(map) - 1)
122 
123 
124 #define nxt_mp_chunk_is_free(map, chunk)                                      \
125     ((map & (1 << chunk)) != 0)
126 
127 
128 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
129     map &= ~(1 << chunk)
130 
131 
132 #define nxt_mp_chunk_set_free(map, chunk)                                     \
133     map |= (1 << chunk)
134 
135 
136 #define nxt_mp_free_junk(p, size)                                             \
137     memset((p), 0x5A, size)
138 
139 
140 #if !(NXT_DEBUG_MEMORY)
141 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
142 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
143 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
144 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
145 #endif
146 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
147 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
148     nxt_rbtree_node_t *node2);
149 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
150 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
151     u_char *p);
152 
153 
154 nxt_mp_t *
155 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
156     size_t min_chunk_size)
157 {
158     nxt_mp_t     *mp;
159     nxt_uint_t   pages, chunk_size;
160     nxt_queue_t  *chunk_pages;
161 
162     pages = 0;
163     chunk_size = page_size;
164 
165     do {
166         pages++;
167         chunk_size /= 2;
168     } while (chunk_size > min_chunk_size);
169 
170     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
171 
172     if (nxt_fast_path(mp != NULL)) {
173         mp->retain = 1;
174         mp->page_size = page_size;
175         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
176         mp->cluster_size = cluster_size;
177 
178         chunk_pages = mp->chunk_pages;
179 
180         do {
181             nxt_queue_init(chunk_pages);
182             chunk_pages++;
183             chunk_size *= 2;
184         } while (chunk_size < page_size);
185 
186         mp->chunk_size_shift = nxt_lg2(min_chunk_size);
187         mp->page_size_shift = nxt_lg2(page_size);
188 
189         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
190 
191         nxt_queue_init(&mp->free_pages);
192     }
193 
194     return mp;
195 }
196 
197 
198 void
199 nxt_mp_destroy(nxt_mp_t *mp)
200 {
201     void               *p;
202     nxt_mp_block_t     *block;
203     nxt_rbtree_node_t  *node, *next;
204 
205     nxt_debug_alloc("mp destroy");
206 
207     next = nxt_rbtree_root(&mp->blocks);
208 
209     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
210 
211         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
212         block = (nxt_mp_block_t *) node;
213 
214         p = block->start;
215 
216         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
217             nxt_free(block);
218         }
219 
220         nxt_free(p);
221     }
222 
223     nxt_free(mp);
224 }
225 
226 
227 nxt_bool_t
228 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
229     size_t min_chunk_size)
230 {
231     nxt_bool_t  valid;
232 
233     /* Alignment and sizes must be a power of 2. */
234 
235     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
236                            && nxt_is_power_of_two(page_size)
237                            && nxt_is_power_of_two(min_chunk_size)));
238     if (!valid) {
239         return 0;
240     }
241 
242     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
243 
244     valid = nxt_expect(1, (page_size >= 64
245                            && page_size >= page_alignment
246                            && page_size >= min_chunk_size
247                            && min_chunk_size * 32 >= page_size
248                            && cluster_size >= page_size
249                            && cluster_size / page_size <= 256
250                            && cluster_size % page_size == 0));
251     if (!valid) {
252         return 0;
253     }
254 
255     return 1;
256 }
257 
258 
259 nxt_bool_t
260 nxt_mp_is_empty(nxt_mp_t *mp)
261 {
262     return (nxt_rbtree_is_empty(&mp->blocks)
263             && nxt_queue_is_empty(&mp->free_pages));
264 }
265 
266 
267 void *
268 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
269 {
270     nxt_debug_alloc("mp alloc: %uz", size);
271 
272 #if !(NXT_DEBUG_MEMORY)
273 
274     if (size <= mp->page_size) {
275         return nxt_mp_alloc_small(mp, size);
276     }
277 
278 #endif
279 
280     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
281 }
282 
283 
284 void *
285 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
286 {
287     void  *p;
288 
289     p = nxt_mp_alloc(mp, size);
290 
291     if (nxt_fast_path(p != NULL)) {
292         memset(p, 0, size);
293     }
294 
295     return p;
296 }
297 
298 
299 void *
300 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
301 {
302     nxt_debug_alloc("mp align: @%uz:%uz", alignment, size);
303 
304     /* Alignment must be a power of 2. */
305 
306     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
307 
308 #if !(NXT_DEBUG_MEMORY)
309 
310         if (size <= mp->page_size && alignment <= mp->page_alignment) {
311             size = nxt_max(size, alignment);
312 
313             if (size <= mp->page_size) {
314                 return nxt_mp_alloc_small(mp, size);
315             }
316         }
317 
318 #endif
319 
320         return nxt_mp_alloc_large(mp, alignment, size);
321     }
322 
323     return NULL;
324 }
325 
326 
327 void *
328 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
329 {
330     void  *p;
331 
332     p = nxt_mp_align(mp, alignment, size);
333 
334     if (nxt_fast_path(p != NULL)) {
335         memset(p, 0, size);
336     }
337 
338     return p;
339 }
340 
341 
342 #if !(NXT_DEBUG_MEMORY)
343 
344 nxt_inline u_char *
345 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
346 {
347     size_t          page_offset;
348     nxt_mp_block_t  *block;
349 
350     page_offset = page->number * sizeof(nxt_mp_page_t)
351                   + offsetof(nxt_mp_block_t, pages);
352 
353     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
354 
355     return block->start + (page->number << mp->page_size_shift);
356 }
357 
358 
359 nxt_inline nxt_uint_t
360 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
361 {
362     nxt_int_t  n, index;
363 
364     index = 0;
365 
366     if (size > 1) {
367         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
368 
369         if (n > 0) {
370             index = n;
371         }
372     }
373 
374     return index;
375 }
376 
377 
378 static void *
379 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
380 {
381     u_char            *p;
382     nxt_uint_t        n, index;
383     nxt_queue_t       *chunk_pages;
384     nxt_mp_page_t     *page;
385     nxt_queue_link_t  *link;
386 
387     p = NULL;
388 
389     if (size <= mp->page_size / 2) {
390 
391         index = nxt_mp_chunk_pages_index(mp, size);
392         chunk_pages = &mp->chunk_pages[index];
393 
394         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
395 
396             link = nxt_queue_first(chunk_pages);
397             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
398 
399             p = nxt_mp_page_addr(mp, page);
400 
401             n = nxt_mp_chunk_get_free(page->u.map);
402             nxt_mp_chunk_set_busy(page->u.map, n);
403 
404             p += ((n << index) << mp->chunk_size_shift);
405 
406             page->chunks--;
407 
408             if (page->chunks == 0) {
409                 /*
410                  * Remove full page from the pool chunk pages list
411                  * of pages with free chunks.
412                  */
413                 nxt_queue_remove(&page->link);
414             }
415 
416         } else {
417             page = nxt_mp_alloc_page(mp);
418 
419             if (nxt_fast_path(page != NULL)) {
420                 page->size = (1 << index);
421 
422                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
423                 page->chunks = (1 << n) - 1;
424 
425                 nxt_queue_insert_head(chunk_pages, &page->link);
426 
427                 /* Mark the first chunk as busy. */
428                 page->u.map = 0xFFFFFFFE;
429 
430                 p = nxt_mp_page_addr(mp, page);
431             }
432         }
433 
434     } else {
435         page = nxt_mp_alloc_page(mp);
436 
437         if (nxt_fast_path(page != NULL)) {
438             page->size = mp->page_size >> mp->chunk_size_shift;
439 
440             p = nxt_mp_page_addr(mp, page);
441         }
442     }
443 
444     nxt_debug_alloc("mp chunk:%uz alloc: %p",
445                     page->size << mp->chunk_size_shift, p);
446 
447     return p;
448 }
449 
450 
451 static nxt_mp_page_t *
452 nxt_mp_alloc_page(nxt_mp_t *mp)
453 {
454     nxt_mp_page_t     *page;
455     nxt_mp_block_t    *cluster;
456     nxt_queue_link_t  *link;
457 
458     if (nxt_queue_is_empty(&mp->free_pages)) {
459         cluster = nxt_mp_alloc_cluster(mp);
460         if (nxt_slow_path(cluster == NULL)) {
461             return NULL;
462         }
463     }
464 
465     link = nxt_queue_first(&mp->free_pages);
466     nxt_queue_remove(link);
467 
468     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
469 
470     return page;
471 }
472 
473 
474 static nxt_mp_block_t *
475 nxt_mp_alloc_cluster(nxt_mp_t *mp)
476 {
477     nxt_uint_t      n;
478     nxt_mp_block_t  *cluster;
479 
480     n = mp->cluster_size >> mp->page_size_shift;
481 
482     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
483 
484     if (nxt_slow_path(cluster == NULL)) {
485         return NULL;
486     }
487 
488     /* NXT_MP_CLUSTER_BLOCK type is zero. */
489 
490     cluster->size = mp->cluster_size;
491 
492     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
493     if (nxt_slow_path(cluster->start == NULL)) {
494         nxt_free(cluster);
495         return NULL;
496     }
497 
498     n--;
499     cluster->pages[n].number = n;
500     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
501 
502     while (n != 0) {
503         n--;
504         cluster->pages[n].number = n;
505         nxt_queue_insert_before(&cluster->pages[n + 1].link,
506                                 &cluster->pages[n].link);
507     }
508 
509     nxt_rbtree_insert(&mp->blocks, &cluster->node);
510 
511     return cluster;
512 }
513 
514 #endif
515 
516 
517 static void *
518 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
519 {
520     u_char          *p;
521     size_t          aligned_size;
522     uint8_t         type;
523     nxt_mp_block_t  *block;
524 
525     /* Allocation must be less than 4G. */
526     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
527         return NULL;
528     }
529 
530     if (nxt_is_power_of_two(size)) {
531         block = nxt_malloc(sizeof(nxt_mp_block_t));
532         if (nxt_slow_path(block == NULL)) {
533             return NULL;
534         }
535 
536         p = nxt_memalign(alignment, size);
537         if (nxt_slow_path(p == NULL)) {
538             nxt_free(block);
539             return NULL;
540         }
541 
542         type = NXT_MP_DISCRETE_BLOCK;
543 
544     } else {
545         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
546 
547         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
548         if (nxt_slow_path(p == NULL)) {
549             return NULL;
550         }
551 
552         block = (nxt_mp_block_t *) (p + aligned_size);
553         type = NXT_MP_EMBEDDED_BLOCK;
554     }
555 
556     block->type = type;
557     block->size = size;
558     block->start = p;
559 
560     nxt_rbtree_insert(&mp->blocks, &block->node);
561 
562     return p;
563 }
564 
565 
566 static intptr_t
567 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
568 {
569     nxt_mp_block_t  *block1, *block2;
570 
571     block1 = (nxt_mp_block_t *) node1;
572     block2 = (nxt_mp_block_t *) node2;
573 
574     return (uintptr_t) block1->start - (uintptr_t) block2->start;
575 }
576 
577 
578 void
579 nxt_mp_free(nxt_mp_t *mp, void *p)
580 {
581     const char      *err;
582     nxt_thread_t    *thread;
583     nxt_mp_block_t  *block;
584 
585     nxt_debug_alloc("mp free %p", p);
586 
587     block = nxt_mp_find_block(&mp->blocks, p);
588 
589     if (nxt_fast_path(block != NULL)) {
590 
591         if (block->type == NXT_MP_CLUSTER_BLOCK) {
592             err = nxt_mp_chunk_free(mp, block, p);
593 
594             if (nxt_fast_path(err == NULL)) {
595                 return;
596             }
597 
598         } else if (nxt_fast_path(p == block->start)) {
599             nxt_rbtree_delete(&mp->blocks, &block->node);
600 
601             if (block->type == NXT_MP_DISCRETE_BLOCK) {
602                 nxt_free(block);
603             }
604 
605             nxt_free(p);
606 
607             return;
608 
609         } else {
610             err = "freed pointer points to middle of block: %p";
611         }
612 
613     } else {
614         err = "freed pointer is out of pool: %p";
615     }
616 
617     thread = nxt_thread();
618 
619     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
620 }
621 
622 
623 static nxt_mp_block_t *
624 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
625 {
626     nxt_mp_block_t     *block;
627     nxt_rbtree_node_t  *node, *sentinel;
628 
629     node = nxt_rbtree_root(tree);
630     sentinel = nxt_rbtree_sentinel(tree);
631 
632     while (node != sentinel) {
633 
634         block = (nxt_mp_block_t *) node;
635 
636         if (p < block->start) {
637             node = node->left;
638 
639         } else if (p >= block->start + block->size) {
640             node = node->right;
641 
642         } else {
643             return block;
644         }
645     }
646 
647     return NULL;
648 }
649 
650 
651 static const char *
652 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
653 {
654     u_char         *start;
655     uintptr_t      offset;
656     nxt_uint_t     n, size, chunk;
657     nxt_queue_t    *chunk_pages;
658     nxt_mp_page_t  *page;
659 
660     n = (p - cluster->start) >> mp->page_size_shift;
661     start = cluster->start + (n << mp->page_size_shift);
662 
663     page = &cluster->pages[n];
664 
665     if (nxt_slow_path(page->size == 0)) {
666         return "freed pointer points to already free page: %p";
667     }
668 
669     if (nxt_slow_path(page->size == 0xFF)) {
670         return "freed pointer points to non-freeble page: %p";
671     }
672 
673     size = page->size << mp->chunk_size_shift;
674 
675     if (size != mp->page_size) {
676 
677         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
678         chunk = offset / size;
679 
680         if (nxt_slow_path(offset != chunk * size)) {
681             return "freed pointer points to wrong chunk: %p";
682         }
683 
684         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
685             return "freed pointer points to already free chunk: %p";
686         }
687 
688         nxt_mp_chunk_set_free(page->u.map, chunk);
689 
690         if (page->u.map != 0xFFFFFFFF) {
691             page->chunks++;
692 
693             if (page->chunks == 1) {
694                 /*
695                  * Add the page to the head of pool chunk pages list
696                  * of pages with free chunks.
697                  */
698                 n = nxt_mp_chunk_pages_index(mp, size);
699                 chunk_pages = &mp->chunk_pages[n];
700 
701                 nxt_queue_insert_head(chunk_pages, &page->link);
702             }
703 
704             nxt_mp_free_junk(p, size);
705 
706             return NULL;
707 
708         } else {
709             /*
710              * All chunks are free, remove the page from pool
711              * chunk pages list of pages with free chunks.
712              */
713             nxt_queue_remove(&page->link);
714         }
715 
716     } else if (nxt_slow_path(p != start)) {
717         return "invalid pointer to chunk: %p";
718     }
719 
720     /* Add the free page to the pool's free pages tree. */
721 
722     page->size = 0;
723     nxt_queue_insert_head(&mp->free_pages, &page->link);
724 
725     nxt_mp_free_junk(p, size);
726 
727     /* Test if all pages in the cluster are free. */
728 
729     n = mp->cluster_size >> mp->page_size_shift;
730     page = cluster->pages;
731 
732     do {
733          if (page->size != 0) {
734              return NULL;
735          }
736 
737          page++;
738          n--;
739     } while (n != 0);
740 
741     /* Free cluster. */
742 
743     n = mp->cluster_size >> mp->page_size_shift;
744     page = cluster->pages;
745 
746     do {
747          nxt_queue_remove(&page->link);
748          page++;
749          n--;
750     } while (n != 0);
751 
752     nxt_rbtree_delete(&mp->blocks, &cluster->node);
753 
754     p = cluster->start;
755 
756     nxt_free(cluster);
757     nxt_free(p);
758 
759     return NULL;
760 }
761 
762 
763 void *
764 nxt_mp_retain(nxt_mp_t *mp, size_t size)
765 {
766     void  *p;
767 
768     p = nxt_mp_alloc(mp, size);
769 
770     if (nxt_fast_path(p != NULL)) {
771         mp->retain++;
772         nxt_debug_alloc("mp retain: %uD", mp->retain);
773     }
774 
775     return p;
776 }
777 
778 
779 void
780 nxt_mp_release(nxt_mp_t *mp, void *p)
781 {
782     nxt_mp_free(mp, p);
783 
784     mp->retain--;
785 
786     nxt_debug_alloc("mp release: %uD", mp->retain);
787 
788     if (mp->retain == 0) {
789         nxt_mp_destroy(mp);
790     }
791 }
792 
793 
794 void *
795 nxt_mp_nget(nxt_mp_t *mp, size_t size)
796 {
797     nxt_debug_alloc("mp nget: %uz", size);
798 
799 #if !(NXT_DEBUG_MEMORY)
800 
801     if (size <= mp->page_size) {
802         return nxt_mp_get_small(mp, &mp->nget_pages, size);
803     }
804 
805 #endif
806 
807     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
808 }
809 
810 
811 void *
812 nxt_mp_get(nxt_mp_t *mp, size_t size)
813 {
814     nxt_debug_alloc("mp get: %uz", size);
815 
816 #if !(NXT_DEBUG_MEMORY)
817 
818     if (size <= mp->page_size) {
819         size = nxt_max(size, NXT_MAX_ALIGNMENT);
820         return nxt_mp_get_small(mp, &mp->get_pages, size);
821     }
822 
823 #endif
824 
825     return nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
826 }
827 
828 
829 void *
830 nxt_mp_zget(nxt_mp_t *mp, size_t size)
831 {
832     void  *p;
833 
834     p = nxt_mp_get(mp, size);
835 
836     if (nxt_fast_path(p != NULL)) {
837         memset(p, 0, size);
838     }
839 
840     return p;
841 }
842 
843 
844 static void *
845 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
846 {
847     u_char            *p;
848     uint32_t          available;
849     nxt_mp_page_t     *page;
850     nxt_queue_link_t  *link, *next;
851 
852     for (link = nxt_queue_first(pages);
853          link != nxt_queue_tail(pages);
854          link = next)
855     {
856         next = nxt_queue_next(link);
857         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
858 
859         available = mp->page_size - page->u.taken;
860 
861         if (size <= available) {
862             goto found;
863         }
864 
865         if (available == 0 || page->fails++ > 100) {
866             nxt_queue_remove(link);
867         }
868     }
869 
870     page = nxt_mp_alloc_page(mp);
871 
872     if (nxt_slow_path(page == NULL)) {
873         return page;
874     }
875 
876     nxt_queue_insert_head(pages, &page->link);
877 
878     page->size = 0xFF;
879 
880 found:
881 
882     p = nxt_mp_page_addr(mp, page);
883 
884     p += page->u.taken;
885     page->u.taken += size;
886 
887     nxt_debug_alloc("mp get: %p", p);
888 
889     return p;
890 }
891