xref: /unit/src/nxt_mp.c (revision 564:762f8c976ead)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     nxt_work_t           *cleanup;
118 
119     /* Lists of nxt_mp_page_t. */
120     nxt_queue_t          free_pages;
121     nxt_queue_t          nget_pages;
122     nxt_queue_t          get_pages;
123     nxt_queue_t          chunk_pages[];
124 };
125 
126 
127 #define nxt_mp_chunk_get_free(map)                                            \
128     (__builtin_ffs(map) - 1)
129 
130 
131 #define nxt_mp_chunk_is_free(map, chunk)                                      \
132     ((map & (1 << chunk)) != 0)
133 
134 
135 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
136     map &= ~(1 << chunk)
137 
138 
139 #define nxt_mp_chunk_set_free(map, chunk)                                     \
140     map |= (1 << chunk)
141 
142 
143 #define nxt_mp_free_junk(p, size)                                             \
144     memset((p), 0x5A, size)
145 
146 
147 #if !(NXT_DEBUG_MEMORY)
148 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
149 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
150 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
151 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
152 #endif
153 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
154 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
155     nxt_rbtree_node_t *node2);
156 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
157 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
158     u_char *p);
159 
160 
161 #if (NXT_HAVE_BUILTIN_CLZ)
162 
163 #define nxt_lg2(value)                                                        \
164     (31 - __builtin_clz(value))
165 
166 #else
167 
168 static const int nxt_lg2_tab64[64] = {
169     63,  0, 58,  1, 59, 47, 53,  2,
170     60, 39, 48, 27, 54, 33, 42,  3,
171     61, 51, 37, 40, 49, 18, 28, 20,
172     55, 30, 34, 11, 43, 14, 22,  4,
173     62, 57, 46, 52, 38, 26, 32, 41,
174     50, 36, 17, 19, 29, 10, 13, 21,
175     56, 45, 25, 31, 35, 16,  9, 12,
176     44, 24, 15,  8, 23,  7,  6,  5
177 };
178 
179 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
180 
181 static int
182 nxt_lg2(uint64_t v)
183 {
184     v |= v >> 1;
185     v |= v >> 2;
186     v |= v >> 4;
187     v |= v >> 8;
188     v |= v >> 16;
189     v |= v >> 32;
190     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
191 }
192 
193 #endif
194 
195 
196 #if (NXT_DEBUG)
197 
198 nxt_inline void
199 nxt_mp_thread_assert(nxt_mp_t *mp)
200 {
201     nxt_tid_t     tid;
202     nxt_thread_t  *thread;
203 
204     thread = nxt_thread();
205     tid = nxt_thread_tid(thread);
206 
207     if (nxt_fast_path(mp->tid == tid)) {
208         return;
209     }
210 
211     if (nxt_slow_path(nxt_pid != mp->pid)) {
212         mp->pid = nxt_pid;
213         mp->tid = tid;
214 
215         return;
216     }
217 
218     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
219     nxt_abort();
220 }
221 
222 #else
223 
224 #define nxt_mp_thread_assert(mp)
225 
226 #endif
227 
228 
229 void
230 nxt_mp_thread_adopt(nxt_mp_t *mp)
231 {
232 #if (NXT_DEBUG)
233     mp->pid = nxt_pid;
234     mp->tid = nxt_thread_tid(NULL);
235 #endif
236 }
237 
238 
239 nxt_mp_t *
240 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
241     size_t min_chunk_size)
242 {
243     nxt_mp_t     *mp;
244     uint32_t     pages, chunk_size_shift, page_size_shift;
245     nxt_queue_t  *chunk_pages;
246 
247     chunk_size_shift = nxt_lg2(min_chunk_size);
248     page_size_shift = nxt_lg2(page_size);
249 
250     pages = page_size_shift - chunk_size_shift;
251 
252     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
253 
254     if (nxt_fast_path(mp != NULL)) {
255         mp->retain = 1;
256         mp->chunk_size_shift = chunk_size_shift;
257         mp->page_size_shift = page_size_shift;
258         mp->page_size = page_size;
259         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
260         mp->cluster_size = cluster_size;
261 
262         chunk_pages = mp->chunk_pages;
263 
264         while (pages != 0) {
265             nxt_queue_init(chunk_pages);
266             chunk_pages++;
267             pages--;
268         }
269 
270         nxt_queue_init(&mp->free_pages);
271         nxt_queue_init(&mp->nget_pages);
272         nxt_queue_init(&mp->get_pages);
273 
274         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
275     }
276 
277     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
278                     page_alignment, page_size, min_chunk_size);
279 
280     return mp;
281 }
282 
283 
284 void
285 nxt_mp_retain(nxt_mp_t *mp)
286 {
287     mp->retain++;
288 
289     nxt_thread_log_debug("mp %p retain: %uD", mp, mp->retain);
290 }
291 
292 
293 void
294 nxt_mp_release(nxt_mp_t *mp)
295 {
296     mp->retain--;
297 
298     nxt_thread_log_debug("mp %p release: %uD", mp, mp->retain);
299 
300     if (mp->retain == 0) {
301         nxt_mp_destroy(mp);
302     }
303 }
304 
305 
306 void
307 nxt_mp_destroy(nxt_mp_t *mp)
308 {
309     void               *p;
310     nxt_work_t         *work, *next_work;
311     nxt_mp_block_t     *block;
312     nxt_rbtree_node_t  *node, *next;
313 
314     nxt_debug_alloc("mp %p destroy", mp);
315 
316     nxt_mp_thread_assert(mp);
317 
318     while (mp->cleanup != NULL) {
319         work = mp->cleanup;
320         next_work = work->next;
321 
322         work->handler(work->task, work->obj, work->data);
323 
324         mp->cleanup = next_work;
325     }
326 
327     next = nxt_rbtree_root(&mp->blocks);
328 
329     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
330 
331         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
332         block = (nxt_mp_block_t *) node;
333 
334         p = block->start;
335 
336         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
337             nxt_free(block);
338         }
339 
340         nxt_free(p);
341     }
342 
343     nxt_free(mp);
344 }
345 
346 
347 nxt_bool_t
348 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
349     size_t min_chunk_size)
350 {
351     nxt_bool_t  valid;
352 
353     /* Alignment and sizes must be a power of 2. */
354 
355     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
356                            && nxt_is_power_of_two(page_size)
357                            && nxt_is_power_of_two(min_chunk_size)));
358     if (!valid) {
359         return 0;
360     }
361 
362     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
363 
364     valid = nxt_expect(1, (page_size >= 64
365                            && page_size >= page_alignment
366                            && page_size >= min_chunk_size
367                            && min_chunk_size * 32 >= page_size
368                            && cluster_size >= page_size
369                            && cluster_size / page_size <= 256
370                            && cluster_size % page_size == 0));
371     if (!valid) {
372         return 0;
373     }
374 
375     return 1;
376 }
377 
378 
379 nxt_bool_t
380 nxt_mp_is_empty(nxt_mp_t *mp)
381 {
382     return (nxt_rbtree_is_empty(&mp->blocks)
383             && nxt_queue_is_empty(&mp->free_pages));
384 }
385 
386 
387 void *
388 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
389 {
390     void  *p;
391 
392 #if !(NXT_DEBUG_MEMORY)
393 
394     if (size <= mp->page_size) {
395         p = nxt_mp_alloc_small(mp, size);
396 
397     } else {
398         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
399     }
400 
401 #else
402 
403     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
404 
405 #endif
406 
407     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
408 
409     return p;
410 }
411 
412 
413 void *
414 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
415 {
416     void  *p;
417 
418     p = nxt_mp_alloc(mp, size);
419 
420     if (nxt_fast_path(p != NULL)) {
421         memset(p, 0, size);
422     }
423 
424     return p;
425 }
426 
427 
428 void *
429 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
430 {
431     void    *p;
432 
433     /* Alignment must be a power of 2. */
434 
435     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
436 
437 #if !(NXT_DEBUG_MEMORY)
438 
439         size_t  aligned_size;
440 
441         aligned_size = nxt_max(size, alignment);
442 
443         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
444             p = nxt_mp_alloc_small(mp, aligned_size);
445 
446         } else {
447             p = nxt_mp_alloc_large(mp, alignment, size);
448         }
449 
450 #else
451 
452         p = nxt_mp_alloc_large(mp, alignment, size);
453 
454 #endif
455 
456     } else {
457         p = NULL;
458     }
459 
460     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
461 
462     return p;
463 }
464 
465 
466 void *
467 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
468 {
469     void  *p;
470 
471     p = nxt_mp_align(mp, alignment, size);
472 
473     if (nxt_fast_path(p != NULL)) {
474         memset(p, 0, size);
475     }
476 
477     return p;
478 }
479 
480 
481 nxt_inline nxt_uint_t
482 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
483 {
484     nxt_int_t  n, index;
485 
486     index = 0;
487 
488     if (size > 1) {
489         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
490 
491         if (n > 0) {
492             index = n;
493         }
494     }
495 
496     return index;
497 }
498 
499 
500 #if !(NXT_DEBUG_MEMORY)
501 
502 nxt_inline u_char *
503 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
504 {
505     size_t          page_offset;
506     nxt_mp_block_t  *block;
507 
508     page_offset = page->number * sizeof(nxt_mp_page_t)
509                   + offsetof(nxt_mp_block_t, pages);
510 
511     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
512 
513     return block->start + (page->number << mp->page_size_shift);
514 }
515 
516 
517 static void *
518 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
519 {
520     u_char            *p;
521     nxt_uint_t        n, index;
522     nxt_queue_t       *chunk_pages;
523     nxt_mp_page_t     *page;
524     nxt_queue_link_t  *link;
525 
526     nxt_mp_thread_assert(mp);
527 
528     p = NULL;
529 
530     if (size <= mp->page_size / 2) {
531 
532         index = nxt_mp_chunk_pages_index(mp, size);
533         chunk_pages = &mp->chunk_pages[index];
534 
535         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
536 
537             link = nxt_queue_first(chunk_pages);
538             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
539 
540             p = nxt_mp_page_addr(mp, page);
541 
542             n = nxt_mp_chunk_get_free(page->u.map);
543             nxt_mp_chunk_set_busy(page->u.map, n);
544 
545             p += ((n << index) << mp->chunk_size_shift);
546 
547             page->chunks--;
548 
549             if (page->chunks == 0) {
550                 /*
551                  * Remove full page from the pool chunk pages list
552                  * of pages with free chunks.
553                  */
554                 nxt_queue_remove(&page->link);
555             }
556 
557         } else {
558             page = nxt_mp_alloc_page(mp);
559 
560             if (nxt_fast_path(page != NULL)) {
561                 page->size = (1 << index);
562 
563                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
564                 page->chunks = (1 << n) - 1;
565 
566                 nxt_queue_insert_head(chunk_pages, &page->link);
567 
568                 /* Mark the first chunk as busy. */
569                 page->u.map = 0xFFFFFFFE;
570 
571                 p = nxt_mp_page_addr(mp, page);
572             }
573         }
574 
575     } else {
576         page = nxt_mp_alloc_page(mp);
577 
578         if (nxt_fast_path(page != NULL)) {
579             page->size = mp->page_size >> mp->chunk_size_shift;
580 
581             p = nxt_mp_page_addr(mp, page);
582         }
583     }
584 
585     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
586                     page->size << mp->chunk_size_shift, p);
587 
588     return p;
589 }
590 
591 
592 static void *
593 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
594 {
595     u_char            *p;
596     uint32_t          available;
597     nxt_mp_page_t     *page;
598     nxt_queue_link_t  *link, *next;
599 
600     nxt_mp_thread_assert(mp);
601 
602     for (link = nxt_queue_first(pages);
603          link != nxt_queue_tail(pages);
604          link = next)
605     {
606         next = nxt_queue_next(link);
607         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
608 
609         available = mp->page_size - page->u.taken;
610 
611         if (size <= available) {
612             goto found;
613         }
614 
615         if (available == 0 || page->fails++ > 100) {
616             nxt_queue_remove(link);
617         }
618     }
619 
620     page = nxt_mp_alloc_page(mp);
621 
622     if (nxt_slow_path(page == NULL)) {
623         return page;
624     }
625 
626     nxt_queue_insert_head(pages, &page->link);
627 
628     page->size = 0xFF;
629     page->u.taken = 0;
630 
631 found:
632 
633     p = nxt_mp_page_addr(mp, page);
634 
635     p += page->u.taken;
636     page->u.taken += size;
637 
638     return p;
639 }
640 
641 
642 static nxt_mp_page_t *
643 nxt_mp_alloc_page(nxt_mp_t *mp)
644 {
645     nxt_mp_page_t     *page;
646     nxt_mp_block_t    *cluster;
647     nxt_queue_link_t  *link;
648 
649     if (nxt_queue_is_empty(&mp->free_pages)) {
650         cluster = nxt_mp_alloc_cluster(mp);
651         if (nxt_slow_path(cluster == NULL)) {
652             return NULL;
653         }
654     }
655 
656     link = nxt_queue_first(&mp->free_pages);
657     nxt_queue_remove(link);
658 
659     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
660 
661     return page;
662 }
663 
664 
665 static nxt_mp_block_t *
666 nxt_mp_alloc_cluster(nxt_mp_t *mp)
667 {
668     nxt_uint_t      n;
669     nxt_mp_block_t  *cluster;
670 
671     n = mp->cluster_size >> mp->page_size_shift;
672 
673     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
674 
675     if (nxt_slow_path(cluster == NULL)) {
676         return NULL;
677     }
678 
679     /* NXT_MP_CLUSTER_BLOCK type is zero. */
680 
681     cluster->size = mp->cluster_size;
682 
683     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
684     if (nxt_slow_path(cluster->start == NULL)) {
685         nxt_free(cluster);
686         return NULL;
687     }
688 
689     n--;
690     cluster->pages[n].number = n;
691     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
692 
693     while (n != 0) {
694         n--;
695         cluster->pages[n].number = n;
696         nxt_queue_insert_before(&cluster->pages[n + 1].link,
697                                 &cluster->pages[n].link);
698     }
699 
700     nxt_rbtree_insert(&mp->blocks, &cluster->node);
701 
702     return cluster;
703 }
704 
705 #endif
706 
707 
708 static void *
709 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
710 {
711     u_char          *p;
712     size_t          aligned_size;
713     uint8_t         type;
714     nxt_mp_block_t  *block;
715 
716     nxt_mp_thread_assert(mp);
717 
718     /* Allocation must be less than 4G. */
719     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
720         return NULL;
721     }
722 
723     if (nxt_is_power_of_two(size)) {
724         block = nxt_malloc(sizeof(nxt_mp_block_t));
725         if (nxt_slow_path(block == NULL)) {
726             return NULL;
727         }
728 
729         p = nxt_memalign(alignment, size);
730         if (nxt_slow_path(p == NULL)) {
731             nxt_free(block);
732             return NULL;
733         }
734 
735         type = NXT_MP_DISCRETE_BLOCK;
736 
737     } else {
738         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
739 
740         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
741         if (nxt_slow_path(p == NULL)) {
742             return NULL;
743         }
744 
745         block = (nxt_mp_block_t *) (p + aligned_size);
746         type = NXT_MP_EMBEDDED_BLOCK;
747     }
748 
749     block->type = type;
750     block->size = size;
751     block->start = p;
752 
753     nxt_rbtree_insert(&mp->blocks, &block->node);
754 
755     return p;
756 }
757 
758 
759 static intptr_t
760 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
761 {
762     nxt_mp_block_t  *block1, *block2;
763 
764     block1 = (nxt_mp_block_t *) node1;
765     block2 = (nxt_mp_block_t *) node2;
766 
767     return (uintptr_t) block1->start - (uintptr_t) block2->start;
768 }
769 
770 
771 void
772 nxt_mp_free(nxt_mp_t *mp, void *p)
773 {
774     const char      *err;
775     nxt_mp_block_t  *block;
776 
777     nxt_mp_thread_assert(mp);
778 
779     nxt_debug_alloc("mp %p free(%p)", mp, p);
780 
781     block = nxt_mp_find_block(&mp->blocks, p);
782 
783     if (nxt_fast_path(block != NULL)) {
784 
785         if (block->type == NXT_MP_CLUSTER_BLOCK) {
786             err = nxt_mp_chunk_free(mp, block, p);
787 
788             if (nxt_fast_path(err == NULL)) {
789                 return;
790             }
791 
792         } else if (nxt_fast_path(p == block->start)) {
793             nxt_rbtree_delete(&mp->blocks, &block->node);
794 
795             if (block->type == NXT_MP_DISCRETE_BLOCK) {
796                 nxt_free(block);
797             }
798 
799             nxt_free(p);
800 
801             return;
802 
803         } else {
804             err = "freed pointer points to middle of block: %p";
805         }
806 
807     } else {
808         err = "freed pointer is out of pool: %p";
809     }
810 
811     nxt_thread_log_alert(err, p);
812 }
813 
814 
815 static nxt_mp_block_t *
816 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
817 {
818     nxt_mp_block_t     *block;
819     nxt_rbtree_node_t  *node, *sentinel;
820 
821     node = nxt_rbtree_root(tree);
822     sentinel = nxt_rbtree_sentinel(tree);
823 
824     while (node != sentinel) {
825 
826         block = (nxt_mp_block_t *) node;
827 
828         if (p < block->start) {
829             node = node->left;
830 
831         } else if (p >= block->start + block->size) {
832             node = node->right;
833 
834         } else {
835             return block;
836         }
837     }
838 
839     return NULL;
840 }
841 
842 
843 static const char *
844 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
845 {
846     u_char         *start;
847     uintptr_t      offset;
848     nxt_uint_t     n, size, chunk;
849     nxt_queue_t    *chunk_pages;
850     nxt_mp_page_t  *page;
851 
852     n = (p - cluster->start) >> mp->page_size_shift;
853     start = cluster->start + (n << mp->page_size_shift);
854 
855     page = &cluster->pages[n];
856 
857     if (nxt_slow_path(page->size == 0)) {
858         return "freed pointer points to already free page: %p";
859     }
860 
861     if (nxt_slow_path(page->size == 0xFF)) {
862         return "freed pointer points to non-freeble page: %p";
863     }
864 
865     size = page->size << mp->chunk_size_shift;
866 
867     if (size != mp->page_size) {
868 
869         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
870         chunk = offset / size;
871 
872         if (nxt_slow_path(offset != chunk * size)) {
873             return "freed pointer points to wrong chunk: %p";
874         }
875 
876         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
877             return "freed pointer points to already free chunk: %p";
878         }
879 
880         nxt_mp_chunk_set_free(page->u.map, chunk);
881 
882         if (page->u.map != 0xFFFFFFFF) {
883             page->chunks++;
884 
885             if (page->chunks == 1) {
886                 /*
887                  * Add the page to the head of pool chunk pages list
888                  * of pages with free chunks.
889                  */
890                 n = nxt_mp_chunk_pages_index(mp, size);
891                 chunk_pages = &mp->chunk_pages[n];
892 
893                 nxt_queue_insert_head(chunk_pages, &page->link);
894             }
895 
896             nxt_mp_free_junk(p, size);
897 
898             return NULL;
899 
900         } else {
901             /*
902              * All chunks are free, remove the page from pool
903              * chunk pages list of pages with free chunks.
904              */
905             nxt_queue_remove(&page->link);
906         }
907 
908     } else if (nxt_slow_path(p != start)) {
909         return "invalid pointer to chunk: %p";
910     }
911 
912     /* Add the free page to the pool's free pages tree. */
913 
914     page->size = 0;
915     nxt_queue_insert_head(&mp->free_pages, &page->link);
916 
917     nxt_mp_free_junk(p, size);
918 
919     /* Test if all pages in the cluster are free. */
920 
921     n = mp->cluster_size >> mp->page_size_shift;
922     page = cluster->pages;
923 
924     do {
925          if (page->size != 0) {
926              return NULL;
927          }
928 
929          page++;
930          n--;
931     } while (n != 0);
932 
933     /* Free cluster. */
934 
935     n = mp->cluster_size >> mp->page_size_shift;
936     page = cluster->pages;
937 
938     do {
939          nxt_queue_remove(&page->link);
940          page++;
941          n--;
942     } while (n != 0);
943 
944     nxt_rbtree_delete(&mp->blocks, &cluster->node);
945 
946     p = cluster->start;
947 
948     nxt_free(cluster);
949     nxt_free(p);
950 
951     return NULL;
952 }
953 
954 
955 void *
956 nxt_mp_nget(nxt_mp_t *mp, size_t size)
957 {
958     void  *p;
959 
960 #if !(NXT_DEBUG_MEMORY)
961 
962     if (size <= mp->page_size) {
963         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
964 
965     } else {
966         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
967     }
968 
969 #else
970 
971     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
972 
973 #endif
974 
975     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
976 
977     return p;
978 }
979 
980 
981 void *
982 nxt_mp_get(nxt_mp_t *mp, size_t size)
983 {
984     void  *p;
985 
986 #if !(NXT_DEBUG_MEMORY)
987 
988     if (size <= mp->page_size) {
989         size = nxt_max(size, NXT_MAX_ALIGNMENT);
990         p = nxt_mp_get_small(mp, &mp->get_pages, size);
991 
992     } else {
993         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
994     }
995 
996 #else
997 
998     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
999 
1000 #endif
1001 
1002     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1003 
1004     return p;
1005 }
1006 
1007 
1008 void *
1009 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1010 {
1011     void  *p;
1012 
1013     p = nxt_mp_get(mp, size);
1014 
1015     if (nxt_fast_path(p != NULL)) {
1016         memset(p, 0, size);
1017     }
1018 
1019     return p;
1020 }
1021 
1022 
1023 nxt_int_t
1024 nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler,
1025     nxt_task_t *task, void *obj, void *data)
1026 {
1027     nxt_work_t  *work;
1028 
1029     work = nxt_mp_get(mp, sizeof(nxt_work_t));
1030 
1031     if (nxt_slow_path(work == NULL)) {
1032         return NXT_ERROR;
1033     }
1034 
1035     work->next = mp->cleanup;
1036     work->handler = handler;
1037     work->task = task;
1038     work->obj = obj;
1039     work->data = data;
1040 
1041     mp->cleanup = work;
1042 
1043     return NXT_OK;
1044 }
1045