xref: /unit/src/nxt_mp.c (revision 164:7449e4954471)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     nxt_work_t           *cleanup;
118 
119     /* Lists of nxt_mp_page_t. */
120     nxt_queue_t          free_pages;
121     nxt_queue_t          nget_pages;
122     nxt_queue_t          get_pages;
123     nxt_queue_t          chunk_pages[];
124 };
125 
126 
127 #define nxt_mp_chunk_get_free(map)                                            \
128     (__builtin_ffs(map) - 1)
129 
130 
131 #define nxt_mp_chunk_is_free(map, chunk)                                      \
132     ((map & (1 << chunk)) != 0)
133 
134 
135 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
136     map &= ~(1 << chunk)
137 
138 
139 #define nxt_mp_chunk_set_free(map, chunk)                                     \
140     map |= (1 << chunk)
141 
142 
143 #define nxt_mp_free_junk(p, size)                                             \
144     memset((p), 0x5A, size)
145 
146 
147 #if !(NXT_DEBUG_MEMORY)
148 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
149 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
150 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
151 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
152 #endif
153 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
154 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
155     nxt_rbtree_node_t *node2);
156 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
157 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
158     u_char *p);
159 
160 
161 #if (NXT_HAVE_BUILTIN_CLZ)
162 
163 #define nxt_lg2(value)                                                        \
164     (31 - __builtin_clz(value))
165 
166 #else
167 
168 static const int nxt_lg2_tab64[64] = {
169     63,  0, 58,  1, 59, 47, 53,  2,
170     60, 39, 48, 27, 54, 33, 42,  3,
171     61, 51, 37, 40, 49, 18, 28, 20,
172     55, 30, 34, 11, 43, 14, 22,  4,
173     62, 57, 46, 52, 38, 26, 32, 41,
174     50, 36, 17, 19, 29, 10, 13, 21,
175     56, 45, 25, 31, 35, 16,  9, 12,
176     44, 24, 15,  8, 23,  7,  6,  5
177 };
178 
179 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
180 
181 static int
182 nxt_lg2(uint64_t v)
183 {
184     v |= v >> 1;
185     v |= v >> 2;
186     v |= v >> 4;
187     v |= v >> 8;
188     v |= v >> 16;
189     v |= v >> 32;
190     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
191 }
192 
193 #endif
194 
195 
196 #if (NXT_DEBUG)
197 
198 nxt_inline void
199 nxt_mp_thread_assert(nxt_mp_t *mp)
200 {
201     nxt_tid_t     tid;
202     nxt_thread_t  *thread;
203 
204     thread = nxt_thread();
205     tid = nxt_thread_tid(thread);
206 
207     if (nxt_fast_path(mp->tid == tid)) {
208         return;
209     }
210 
211     if (nxt_slow_path(nxt_pid != mp->pid)) {
212         mp->pid = nxt_pid;
213         mp->tid = tid;
214 
215         return;
216     }
217 
218     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
219     nxt_abort();
220 }
221 
222 #else
223 
224 #define nxt_mp_thread_assert(mp)
225 
226 #endif
227 
228 
229 void
230 nxt_mp_thread_adopt(nxt_mp_t *mp)
231 {
232 #if (NXT_DEBUG)
233     mp->pid = nxt_pid;
234     mp->tid = nxt_thread_tid(NULL);
235 #endif
236 }
237 
238 
239 nxt_mp_t *
240 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
241     size_t min_chunk_size)
242 {
243     nxt_mp_t     *mp;
244     uint32_t     pages, chunk_size_shift, page_size_shift;
245     nxt_queue_t  *chunk_pages;
246 
247     chunk_size_shift = nxt_lg2(min_chunk_size);
248     page_size_shift = nxt_lg2(page_size);
249 
250     pages = page_size_shift - chunk_size_shift;
251 
252     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
253 
254     if (nxt_fast_path(mp != NULL)) {
255         mp->retain = 1;
256         mp->chunk_size_shift = chunk_size_shift;
257         mp->page_size_shift = page_size_shift;
258         mp->page_size = page_size;
259         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
260         mp->cluster_size = cluster_size;
261 
262         chunk_pages = mp->chunk_pages;
263 
264         while (pages != 0) {
265             nxt_queue_init(chunk_pages);
266             chunk_pages++;
267             pages--;
268         }
269 
270         nxt_queue_init(&mp->free_pages);
271         nxt_queue_init(&mp->nget_pages);
272         nxt_queue_init(&mp->get_pages);
273 
274         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
275     }
276 
277     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
278                     page_alignment, page_size, min_chunk_size);
279 
280     return mp;
281 }
282 
283 
284 void
285 nxt_mp_destroy(nxt_mp_t *mp)
286 {
287     void               *p;
288     nxt_work_t         *work, *next_work;
289     nxt_mp_block_t     *block;
290     nxt_rbtree_node_t  *node, *next;
291 
292     nxt_debug_alloc("mp %p destroy", mp);
293 
294     nxt_mp_thread_assert(mp);
295 
296     while (mp->cleanup != NULL) {
297         work = mp->cleanup;
298         next_work = work->next;
299 
300         work->handler(work->task, work->obj, work->data);
301 
302         mp->cleanup = next_work;
303     }
304 
305     next = nxt_rbtree_root(&mp->blocks);
306 
307     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
308 
309         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
310         block = (nxt_mp_block_t *) node;
311 
312         p = block->start;
313 
314         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
315             nxt_free(block);
316         }
317 
318         nxt_free(p);
319     }
320 
321     nxt_free(mp);
322 }
323 
324 
325 nxt_bool_t
326 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
327     size_t min_chunk_size)
328 {
329     nxt_bool_t  valid;
330 
331     /* Alignment and sizes must be a power of 2. */
332 
333     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
334                            && nxt_is_power_of_two(page_size)
335                            && nxt_is_power_of_two(min_chunk_size)));
336     if (!valid) {
337         return 0;
338     }
339 
340     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
341 
342     valid = nxt_expect(1, (page_size >= 64
343                            && page_size >= page_alignment
344                            && page_size >= min_chunk_size
345                            && min_chunk_size * 32 >= page_size
346                            && cluster_size >= page_size
347                            && cluster_size / page_size <= 256
348                            && cluster_size % page_size == 0));
349     if (!valid) {
350         return 0;
351     }
352 
353     return 1;
354 }
355 
356 
357 nxt_bool_t
358 nxt_mp_is_empty(nxt_mp_t *mp)
359 {
360     return (nxt_rbtree_is_empty(&mp->blocks)
361             && nxt_queue_is_empty(&mp->free_pages));
362 }
363 
364 
365 void *
366 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
367 {
368     void  *p;
369 
370 #if !(NXT_DEBUG_MEMORY)
371 
372     if (size <= mp->page_size) {
373         p = nxt_mp_alloc_small(mp, size);
374 
375     } else {
376         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
377     }
378 
379 #else
380 
381     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
382 
383 #endif
384 
385     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
386 
387     return p;
388 }
389 
390 
391 void *
392 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
393 {
394     void  *p;
395 
396     p = nxt_mp_alloc(mp, size);
397 
398     if (nxt_fast_path(p != NULL)) {
399         memset(p, 0, size);
400     }
401 
402     return p;
403 }
404 
405 
406 void *
407 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
408 {
409     void    *p;
410 
411     /* Alignment must be a power of 2. */
412 
413     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
414 
415 #if !(NXT_DEBUG_MEMORY)
416 
417         size_t  aligned_size;
418 
419         aligned_size = nxt_max(size, alignment);
420 
421         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
422             p = nxt_mp_alloc_small(mp, aligned_size);
423 
424         } else {
425             p = nxt_mp_alloc_large(mp, alignment, size);
426         }
427 
428 #else
429 
430         p = nxt_mp_alloc_large(mp, alignment, size);
431 
432 #endif
433 
434     } else {
435         p = NULL;
436     }
437 
438     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
439 
440     return p;
441 }
442 
443 
444 void *
445 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
446 {
447     void  *p;
448 
449     p = nxt_mp_align(mp, alignment, size);
450 
451     if (nxt_fast_path(p != NULL)) {
452         memset(p, 0, size);
453     }
454 
455     return p;
456 }
457 
458 
459 nxt_inline nxt_uint_t
460 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
461 {
462     nxt_int_t  n, index;
463 
464     index = 0;
465 
466     if (size > 1) {
467         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
468 
469         if (n > 0) {
470             index = n;
471         }
472     }
473 
474     return index;
475 }
476 
477 
478 #if !(NXT_DEBUG_MEMORY)
479 
480 nxt_inline u_char *
481 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
482 {
483     size_t          page_offset;
484     nxt_mp_block_t  *block;
485 
486     page_offset = page->number * sizeof(nxt_mp_page_t)
487                   + offsetof(nxt_mp_block_t, pages);
488 
489     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
490 
491     return block->start + (page->number << mp->page_size_shift);
492 }
493 
494 
495 static void *
496 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
497 {
498     u_char            *p;
499     nxt_uint_t        n, index;
500     nxt_queue_t       *chunk_pages;
501     nxt_mp_page_t     *page;
502     nxt_queue_link_t  *link;
503 
504     nxt_mp_thread_assert(mp);
505 
506     p = NULL;
507 
508     if (size <= mp->page_size / 2) {
509 
510         index = nxt_mp_chunk_pages_index(mp, size);
511         chunk_pages = &mp->chunk_pages[index];
512 
513         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
514 
515             link = nxt_queue_first(chunk_pages);
516             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
517 
518             p = nxt_mp_page_addr(mp, page);
519 
520             n = nxt_mp_chunk_get_free(page->u.map);
521             nxt_mp_chunk_set_busy(page->u.map, n);
522 
523             p += ((n << index) << mp->chunk_size_shift);
524 
525             page->chunks--;
526 
527             if (page->chunks == 0) {
528                 /*
529                  * Remove full page from the pool chunk pages list
530                  * of pages with free chunks.
531                  */
532                 nxt_queue_remove(&page->link);
533             }
534 
535         } else {
536             page = nxt_mp_alloc_page(mp);
537 
538             if (nxt_fast_path(page != NULL)) {
539                 page->size = (1 << index);
540 
541                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
542                 page->chunks = (1 << n) - 1;
543 
544                 nxt_queue_insert_head(chunk_pages, &page->link);
545 
546                 /* Mark the first chunk as busy. */
547                 page->u.map = 0xFFFFFFFE;
548 
549                 p = nxt_mp_page_addr(mp, page);
550             }
551         }
552 
553     } else {
554         page = nxt_mp_alloc_page(mp);
555 
556         if (nxt_fast_path(page != NULL)) {
557             page->size = mp->page_size >> mp->chunk_size_shift;
558 
559             p = nxt_mp_page_addr(mp, page);
560         }
561     }
562 
563     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
564                     page->size << mp->chunk_size_shift, p);
565 
566     return p;
567 }
568 
569 
570 static void *
571 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
572 {
573     u_char            *p;
574     uint32_t          available;
575     nxt_mp_page_t     *page;
576     nxt_queue_link_t  *link, *next;
577 
578     nxt_mp_thread_assert(mp);
579 
580     for (link = nxt_queue_first(pages);
581          link != nxt_queue_tail(pages);
582          link = next)
583     {
584         next = nxt_queue_next(link);
585         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
586 
587         available = mp->page_size - page->u.taken;
588 
589         if (size <= available) {
590             goto found;
591         }
592 
593         if (available == 0 || page->fails++ > 100) {
594             nxt_queue_remove(link);
595         }
596     }
597 
598     page = nxt_mp_alloc_page(mp);
599 
600     if (nxt_slow_path(page == NULL)) {
601         return page;
602     }
603 
604     nxt_queue_insert_head(pages, &page->link);
605 
606     page->size = 0xFF;
607     page->u.taken = 0;
608 
609 found:
610 
611     p = nxt_mp_page_addr(mp, page);
612 
613     p += page->u.taken;
614     page->u.taken += size;
615 
616     return p;
617 }
618 
619 
620 static nxt_mp_page_t *
621 nxt_mp_alloc_page(nxt_mp_t *mp)
622 {
623     nxt_mp_page_t     *page;
624     nxt_mp_block_t    *cluster;
625     nxt_queue_link_t  *link;
626 
627     if (nxt_queue_is_empty(&mp->free_pages)) {
628         cluster = nxt_mp_alloc_cluster(mp);
629         if (nxt_slow_path(cluster == NULL)) {
630             return NULL;
631         }
632     }
633 
634     link = nxt_queue_first(&mp->free_pages);
635     nxt_queue_remove(link);
636 
637     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
638 
639     return page;
640 }
641 
642 
643 static nxt_mp_block_t *
644 nxt_mp_alloc_cluster(nxt_mp_t *mp)
645 {
646     nxt_uint_t      n;
647     nxt_mp_block_t  *cluster;
648 
649     n = mp->cluster_size >> mp->page_size_shift;
650 
651     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
652 
653     if (nxt_slow_path(cluster == NULL)) {
654         return NULL;
655     }
656 
657     /* NXT_MP_CLUSTER_BLOCK type is zero. */
658 
659     cluster->size = mp->cluster_size;
660 
661     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
662     if (nxt_slow_path(cluster->start == NULL)) {
663         nxt_free(cluster);
664         return NULL;
665     }
666 
667     n--;
668     cluster->pages[n].number = n;
669     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
670 
671     while (n != 0) {
672         n--;
673         cluster->pages[n].number = n;
674         nxt_queue_insert_before(&cluster->pages[n + 1].link,
675                                 &cluster->pages[n].link);
676     }
677 
678     nxt_rbtree_insert(&mp->blocks, &cluster->node);
679 
680     return cluster;
681 }
682 
683 #endif
684 
685 
686 static void *
687 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
688 {
689     u_char          *p;
690     size_t          aligned_size;
691     uint8_t         type;
692     nxt_mp_block_t  *block;
693 
694     nxt_mp_thread_assert(mp);
695 
696     /* Allocation must be less than 4G. */
697     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
698         return NULL;
699     }
700 
701     if (nxt_is_power_of_two(size)) {
702         block = nxt_malloc(sizeof(nxt_mp_block_t));
703         if (nxt_slow_path(block == NULL)) {
704             return NULL;
705         }
706 
707         p = nxt_memalign(alignment, size);
708         if (nxt_slow_path(p == NULL)) {
709             nxt_free(block);
710             return NULL;
711         }
712 
713         type = NXT_MP_DISCRETE_BLOCK;
714 
715     } else {
716         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
717 
718         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
719         if (nxt_slow_path(p == NULL)) {
720             return NULL;
721         }
722 
723         block = (nxt_mp_block_t *) (p + aligned_size);
724         type = NXT_MP_EMBEDDED_BLOCK;
725     }
726 
727     block->type = type;
728     block->size = size;
729     block->start = p;
730 
731     nxt_rbtree_insert(&mp->blocks, &block->node);
732 
733     return p;
734 }
735 
736 
737 static intptr_t
738 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
739 {
740     nxt_mp_block_t  *block1, *block2;
741 
742     block1 = (nxt_mp_block_t *) node1;
743     block2 = (nxt_mp_block_t *) node2;
744 
745     return (uintptr_t) block1->start - (uintptr_t) block2->start;
746 }
747 
748 
749 void
750 nxt_mp_free(nxt_mp_t *mp, void *p)
751 {
752     const char      *err;
753     nxt_thread_t    *thread;
754     nxt_mp_block_t  *block;
755 
756     nxt_mp_thread_assert(mp);
757 
758     nxt_debug_alloc("mp %p free(%p)", mp, p);
759 
760     block = nxt_mp_find_block(&mp->blocks, p);
761 
762     if (nxt_fast_path(block != NULL)) {
763 
764         if (block->type == NXT_MP_CLUSTER_BLOCK) {
765             err = nxt_mp_chunk_free(mp, block, p);
766 
767             if (nxt_fast_path(err == NULL)) {
768                 return;
769             }
770 
771         } else if (nxt_fast_path(p == block->start)) {
772             nxt_rbtree_delete(&mp->blocks, &block->node);
773 
774             if (block->type == NXT_MP_DISCRETE_BLOCK) {
775                 nxt_free(block);
776             }
777 
778             nxt_free(p);
779 
780             return;
781 
782         } else {
783             err = "freed pointer points to middle of block: %p";
784         }
785 
786     } else {
787         err = "freed pointer is out of pool: %p";
788     }
789 
790     thread = nxt_thread();
791 
792     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
793 }
794 
795 
796 static nxt_mp_block_t *
797 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
798 {
799     nxt_mp_block_t     *block;
800     nxt_rbtree_node_t  *node, *sentinel;
801 
802     node = nxt_rbtree_root(tree);
803     sentinel = nxt_rbtree_sentinel(tree);
804 
805     while (node != sentinel) {
806 
807         block = (nxt_mp_block_t *) node;
808 
809         if (p < block->start) {
810             node = node->left;
811 
812         } else if (p >= block->start + block->size) {
813             node = node->right;
814 
815         } else {
816             return block;
817         }
818     }
819 
820     return NULL;
821 }
822 
823 
824 static const char *
825 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
826 {
827     u_char         *start;
828     uintptr_t      offset;
829     nxt_uint_t     n, size, chunk;
830     nxt_queue_t    *chunk_pages;
831     nxt_mp_page_t  *page;
832 
833     n = (p - cluster->start) >> mp->page_size_shift;
834     start = cluster->start + (n << mp->page_size_shift);
835 
836     page = &cluster->pages[n];
837 
838     if (nxt_slow_path(page->size == 0)) {
839         return "freed pointer points to already free page: %p";
840     }
841 
842     if (nxt_slow_path(page->size == 0xFF)) {
843         return "freed pointer points to non-freeble page: %p";
844     }
845 
846     size = page->size << mp->chunk_size_shift;
847 
848     if (size != mp->page_size) {
849 
850         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
851         chunk = offset / size;
852 
853         if (nxt_slow_path(offset != chunk * size)) {
854             return "freed pointer points to wrong chunk: %p";
855         }
856 
857         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
858             return "freed pointer points to already free chunk: %p";
859         }
860 
861         nxt_mp_chunk_set_free(page->u.map, chunk);
862 
863         if (page->u.map != 0xFFFFFFFF) {
864             page->chunks++;
865 
866             if (page->chunks == 1) {
867                 /*
868                  * Add the page to the head of pool chunk pages list
869                  * of pages with free chunks.
870                  */
871                 n = nxt_mp_chunk_pages_index(mp, size);
872                 chunk_pages = &mp->chunk_pages[n];
873 
874                 nxt_queue_insert_head(chunk_pages, &page->link);
875             }
876 
877             nxt_mp_free_junk(p, size);
878 
879             return NULL;
880 
881         } else {
882             /*
883              * All chunks are free, remove the page from pool
884              * chunk pages list of pages with free chunks.
885              */
886             nxt_queue_remove(&page->link);
887         }
888 
889     } else if (nxt_slow_path(p != start)) {
890         return "invalid pointer to chunk: %p";
891     }
892 
893     /* Add the free page to the pool's free pages tree. */
894 
895     page->size = 0;
896     nxt_queue_insert_head(&mp->free_pages, &page->link);
897 
898     nxt_mp_free_junk(p, size);
899 
900     /* Test if all pages in the cluster are free. */
901 
902     n = mp->cluster_size >> mp->page_size_shift;
903     page = cluster->pages;
904 
905     do {
906          if (page->size != 0) {
907              return NULL;
908          }
909 
910          page++;
911          n--;
912     } while (n != 0);
913 
914     /* Free cluster. */
915 
916     n = mp->cluster_size >> mp->page_size_shift;
917     page = cluster->pages;
918 
919     do {
920          nxt_queue_remove(&page->link);
921          page++;
922          n--;
923     } while (n != 0);
924 
925     nxt_rbtree_delete(&mp->blocks, &cluster->node);
926 
927     p = cluster->start;
928 
929     nxt_free(cluster);
930     nxt_free(p);
931 
932     return NULL;
933 }
934 
935 
936 void *
937 nxt_mp_retain(nxt_mp_t *mp, size_t size)
938 {
939     void  *p;
940 
941     p = nxt_mp_alloc(mp, size);
942 
943     if (nxt_fast_path(p != NULL)) {
944         mp->retain++;
945         nxt_debug_alloc("mp %p retain: %uD", mp, mp->retain);
946     }
947 
948     return p;
949 }
950 
951 
952 uint32_t
953 nxt_mp_release(nxt_mp_t *mp, void *p)
954 {
955     nxt_mp_free(mp, p);
956 
957     mp->retain--;
958 
959     nxt_debug_alloc("mp %p release: %uD", mp, mp->retain);
960 
961     if (mp->retain == 0) {
962         nxt_mp_destroy(mp);
963 
964         return 0;
965     }
966 
967     return mp->retain;
968 }
969 
970 
971 void *
972 nxt_mp_nget(nxt_mp_t *mp, size_t size)
973 {
974     void  *p;
975 
976 #if !(NXT_DEBUG_MEMORY)
977 
978     if (size <= mp->page_size) {
979         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
980 
981     } else {
982         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
983     }
984 
985 #else
986 
987     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
988 
989 #endif
990 
991     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
992 
993     return p;
994 }
995 
996 
997 void *
998 nxt_mp_get(nxt_mp_t *mp, size_t size)
999 {
1000     void  *p;
1001 
1002 #if !(NXT_DEBUG_MEMORY)
1003 
1004     if (size <= mp->page_size) {
1005         size = nxt_max(size, NXT_MAX_ALIGNMENT);
1006         p = nxt_mp_get_small(mp, &mp->get_pages, size);
1007 
1008     } else {
1009         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
1010     }
1011 
1012 #else
1013 
1014     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
1015 
1016 #endif
1017 
1018     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1019 
1020     return p;
1021 }
1022 
1023 
1024 void *
1025 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1026 {
1027     void  *p;
1028 
1029     p = nxt_mp_get(mp, size);
1030 
1031     if (nxt_fast_path(p != NULL)) {
1032         memset(p, 0, size);
1033     }
1034 
1035     return p;
1036 }
1037 
1038 
1039 nxt_int_t
1040 nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler,
1041     nxt_task_t *task, void *obj, void *data)
1042 {
1043     nxt_work_t  *work;
1044 
1045     work = nxt_mp_get(mp, sizeof(nxt_work_t));
1046 
1047     if (nxt_slow_path(work == NULL)) {
1048         return NXT_ERROR;
1049     }
1050 
1051     work->next = mp->cleanup;
1052     work->handler = handler;
1053     work->task = task;
1054     work->obj = obj;
1055     work->data = data;
1056 
1057     mp->cleanup = work;
1058 
1059     return NXT_OK;
1060 }
1061