xref: /unit/src/nxt_mp.c (revision 430:3a24c399394f)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     nxt_work_t           *cleanup;
118 
119     /* Lists of nxt_mp_page_t. */
120     nxt_queue_t          free_pages;
121     nxt_queue_t          nget_pages;
122     nxt_queue_t          get_pages;
123     nxt_queue_t          chunk_pages[];
124 };
125 
126 
127 #define nxt_mp_chunk_get_free(map)                                            \
128     (__builtin_ffs(map) - 1)
129 
130 
131 #define nxt_mp_chunk_is_free(map, chunk)                                      \
132     ((map & (1 << chunk)) != 0)
133 
134 
135 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
136     map &= ~(1 << chunk)
137 
138 
139 #define nxt_mp_chunk_set_free(map, chunk)                                     \
140     map |= (1 << chunk)
141 
142 
143 #define nxt_mp_free_junk(p, size)                                             \
144     memset((p), 0x5A, size)
145 
146 
147 #if !(NXT_DEBUG_MEMORY)
148 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
149 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
150 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
151 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
152 #endif
153 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
154 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
155     nxt_rbtree_node_t *node2);
156 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
157 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
158     u_char *p);
159 
160 
161 #if (NXT_HAVE_BUILTIN_CLZ)
162 
163 #define nxt_lg2(value)                                                        \
164     (31 - __builtin_clz(value))
165 
166 #else
167 
168 static const int nxt_lg2_tab64[64] = {
169     63,  0, 58,  1, 59, 47, 53,  2,
170     60, 39, 48, 27, 54, 33, 42,  3,
171     61, 51, 37, 40, 49, 18, 28, 20,
172     55, 30, 34, 11, 43, 14, 22,  4,
173     62, 57, 46, 52, 38, 26, 32, 41,
174     50, 36, 17, 19, 29, 10, 13, 21,
175     56, 45, 25, 31, 35, 16,  9, 12,
176     44, 24, 15,  8, 23,  7,  6,  5
177 };
178 
179 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
180 
181 static int
182 nxt_lg2(uint64_t v)
183 {
184     v |= v >> 1;
185     v |= v >> 2;
186     v |= v >> 4;
187     v |= v >> 8;
188     v |= v >> 16;
189     v |= v >> 32;
190     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
191 }
192 
193 #endif
194 
195 
196 #if (NXT_DEBUG)
197 
198 nxt_inline void
199 nxt_mp_thread_assert(nxt_mp_t *mp)
200 {
201     nxt_tid_t     tid;
202     nxt_thread_t  *thread;
203 
204     thread = nxt_thread();
205     tid = nxt_thread_tid(thread);
206 
207     if (nxt_fast_path(mp->tid == tid)) {
208         return;
209     }
210 
211     if (nxt_slow_path(nxt_pid != mp->pid)) {
212         mp->pid = nxt_pid;
213         mp->tid = tid;
214 
215         return;
216     }
217 
218     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
219     nxt_abort();
220 }
221 
222 #else
223 
224 #define nxt_mp_thread_assert(mp)
225 
226 #endif
227 
228 
229 void
230 nxt_mp_thread_adopt(nxt_mp_t *mp)
231 {
232 #if (NXT_DEBUG)
233     mp->pid = nxt_pid;
234     mp->tid = nxt_thread_tid(NULL);
235 #endif
236 }
237 
238 
239 nxt_mp_t *
240 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
241     size_t min_chunk_size)
242 {
243     nxt_mp_t     *mp;
244     uint32_t     pages, chunk_size_shift, page_size_shift;
245     nxt_queue_t  *chunk_pages;
246 
247     chunk_size_shift = nxt_lg2(min_chunk_size);
248     page_size_shift = nxt_lg2(page_size);
249 
250     pages = page_size_shift - chunk_size_shift;
251 
252     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
253 
254     if (nxt_fast_path(mp != NULL)) {
255         mp->retain = 1;
256         mp->chunk_size_shift = chunk_size_shift;
257         mp->page_size_shift = page_size_shift;
258         mp->page_size = page_size;
259         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
260         mp->cluster_size = cluster_size;
261 
262         chunk_pages = mp->chunk_pages;
263 
264         while (pages != 0) {
265             nxt_queue_init(chunk_pages);
266             chunk_pages++;
267             pages--;
268         }
269 
270         nxt_queue_init(&mp->free_pages);
271         nxt_queue_init(&mp->nget_pages);
272         nxt_queue_init(&mp->get_pages);
273 
274         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
275     }
276 
277     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
278                     page_alignment, page_size, min_chunk_size);
279 
280     return mp;
281 }
282 
283 
284 void
285 nxt_mp_retain(nxt_mp_t *mp)
286 {
287     mp->retain++;
288 
289     nxt_thread_log_debug("mp %p retain: %uD", mp, mp->retain);
290 }
291 
292 
293 void
294 nxt_mp_release(nxt_mp_t *mp)
295 {
296     mp->retain--;
297 
298     nxt_thread_log_debug("mp %p release: %uD", mp, mp->retain);
299 
300     if (mp->retain == 0) {
301         nxt_mp_destroy(mp);
302     }
303 }
304 
305 
306 void
307 nxt_mp_destroy(nxt_mp_t *mp)
308 {
309     void               *p;
310     nxt_work_t         *work, *next_work;
311     nxt_mp_block_t     *block;
312     nxt_rbtree_node_t  *node, *next;
313 
314     nxt_debug_alloc("mp %p destroy", mp);
315 
316     nxt_mp_thread_assert(mp);
317 
318     while (mp->cleanup != NULL) {
319         work = mp->cleanup;
320         next_work = work->next;
321 
322         work->handler(work->task, work->obj, work->data);
323 
324         mp->cleanup = next_work;
325     }
326 
327     next = nxt_rbtree_root(&mp->blocks);
328 
329     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
330 
331         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
332         block = (nxt_mp_block_t *) node;
333 
334         p = block->start;
335 
336         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
337             nxt_free(block);
338         }
339 
340         nxt_free(p);
341     }
342 
343     nxt_free(mp);
344 }
345 
346 
347 nxt_bool_t
348 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
349     size_t min_chunk_size)
350 {
351     nxt_bool_t  valid;
352 
353     /* Alignment and sizes must be a power of 2. */
354 
355     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
356                            && nxt_is_power_of_two(page_size)
357                            && nxt_is_power_of_two(min_chunk_size)));
358     if (!valid) {
359         return 0;
360     }
361 
362     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
363 
364     valid = nxt_expect(1, (page_size >= 64
365                            && page_size >= page_alignment
366                            && page_size >= min_chunk_size
367                            && min_chunk_size * 32 >= page_size
368                            && cluster_size >= page_size
369                            && cluster_size / page_size <= 256
370                            && cluster_size % page_size == 0));
371     if (!valid) {
372         return 0;
373     }
374 
375     return 1;
376 }
377 
378 
379 nxt_bool_t
380 nxt_mp_is_empty(nxt_mp_t *mp)
381 {
382     return (nxt_rbtree_is_empty(&mp->blocks)
383             && nxt_queue_is_empty(&mp->free_pages));
384 }
385 
386 
387 void *
388 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
389 {
390     void  *p;
391 
392 #if !(NXT_DEBUG_MEMORY)
393 
394     if (size <= mp->page_size) {
395         p = nxt_mp_alloc_small(mp, size);
396 
397     } else {
398         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
399     }
400 
401 #else
402 
403     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
404 
405 #endif
406 
407     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
408 
409     return p;
410 }
411 
412 
413 void *
414 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
415 {
416     void  *p;
417 
418     p = nxt_mp_alloc(mp, size);
419 
420     if (nxt_fast_path(p != NULL)) {
421         memset(p, 0, size);
422     }
423 
424     return p;
425 }
426 
427 
428 void *
429 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
430 {
431     void    *p;
432 
433     /* Alignment must be a power of 2. */
434 
435     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
436 
437 #if !(NXT_DEBUG_MEMORY)
438 
439         size_t  aligned_size;
440 
441         aligned_size = nxt_max(size, alignment);
442 
443         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
444             p = nxt_mp_alloc_small(mp, aligned_size);
445 
446         } else {
447             p = nxt_mp_alloc_large(mp, alignment, size);
448         }
449 
450 #else
451 
452         p = nxt_mp_alloc_large(mp, alignment, size);
453 
454 #endif
455 
456     } else {
457         p = NULL;
458     }
459 
460     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
461 
462     return p;
463 }
464 
465 
466 void *
467 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
468 {
469     void  *p;
470 
471     p = nxt_mp_align(mp, alignment, size);
472 
473     if (nxt_fast_path(p != NULL)) {
474         memset(p, 0, size);
475     }
476 
477     return p;
478 }
479 
480 
481 nxt_inline nxt_uint_t
482 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
483 {
484     nxt_int_t  n, index;
485 
486     index = 0;
487 
488     if (size > 1) {
489         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
490 
491         if (n > 0) {
492             index = n;
493         }
494     }
495 
496     return index;
497 }
498 
499 
500 #if !(NXT_DEBUG_MEMORY)
501 
502 nxt_inline u_char *
503 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
504 {
505     size_t          page_offset;
506     nxt_mp_block_t  *block;
507 
508     page_offset = page->number * sizeof(nxt_mp_page_t)
509                   + offsetof(nxt_mp_block_t, pages);
510 
511     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
512 
513     return block->start + (page->number << mp->page_size_shift);
514 }
515 
516 
517 static void *
518 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
519 {
520     u_char            *p;
521     nxt_uint_t        n, index;
522     nxt_queue_t       *chunk_pages;
523     nxt_mp_page_t     *page;
524     nxt_queue_link_t  *link;
525 
526     nxt_mp_thread_assert(mp);
527 
528     p = NULL;
529 
530     if (size <= mp->page_size / 2) {
531 
532         index = nxt_mp_chunk_pages_index(mp, size);
533         chunk_pages = &mp->chunk_pages[index];
534 
535         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
536 
537             link = nxt_queue_first(chunk_pages);
538             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
539 
540             p = nxt_mp_page_addr(mp, page);
541 
542             n = nxt_mp_chunk_get_free(page->u.map);
543             nxt_mp_chunk_set_busy(page->u.map, n);
544 
545             p += ((n << index) << mp->chunk_size_shift);
546 
547             page->chunks--;
548 
549             if (page->chunks == 0) {
550                 /*
551                  * Remove full page from the pool chunk pages list
552                  * of pages with free chunks.
553                  */
554                 nxt_queue_remove(&page->link);
555             }
556 
557         } else {
558             page = nxt_mp_alloc_page(mp);
559 
560             if (nxt_fast_path(page != NULL)) {
561                 page->size = (1 << index);
562 
563                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
564                 page->chunks = (1 << n) - 1;
565 
566                 nxt_queue_insert_head(chunk_pages, &page->link);
567 
568                 /* Mark the first chunk as busy. */
569                 page->u.map = 0xFFFFFFFE;
570 
571                 p = nxt_mp_page_addr(mp, page);
572             }
573         }
574 
575     } else {
576         page = nxt_mp_alloc_page(mp);
577 
578         if (nxt_fast_path(page != NULL)) {
579             page->size = mp->page_size >> mp->chunk_size_shift;
580 
581             p = nxt_mp_page_addr(mp, page);
582         }
583     }
584 
585     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
586                     page->size << mp->chunk_size_shift, p);
587 
588     return p;
589 }
590 
591 
592 static void *
593 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
594 {
595     u_char            *p;
596     uint32_t          available;
597     nxt_mp_page_t     *page;
598     nxt_queue_link_t  *link, *next;
599 
600     nxt_mp_thread_assert(mp);
601 
602     for (link = nxt_queue_first(pages);
603          link != nxt_queue_tail(pages);
604          link = next)
605     {
606         next = nxt_queue_next(link);
607         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
608 
609         available = mp->page_size - page->u.taken;
610 
611         if (size <= available) {
612             goto found;
613         }
614 
615         if (available == 0 || page->fails++ > 100) {
616             nxt_queue_remove(link);
617         }
618     }
619 
620     page = nxt_mp_alloc_page(mp);
621 
622     if (nxt_slow_path(page == NULL)) {
623         return page;
624     }
625 
626     nxt_queue_insert_head(pages, &page->link);
627 
628     page->size = 0xFF;
629     page->u.taken = 0;
630 
631 found:
632 
633     p = nxt_mp_page_addr(mp, page);
634 
635     p += page->u.taken;
636     page->u.taken += size;
637 
638     return p;
639 }
640 
641 
642 static nxt_mp_page_t *
643 nxt_mp_alloc_page(nxt_mp_t *mp)
644 {
645     nxt_mp_page_t     *page;
646     nxt_mp_block_t    *cluster;
647     nxt_queue_link_t  *link;
648 
649     if (nxt_queue_is_empty(&mp->free_pages)) {
650         cluster = nxt_mp_alloc_cluster(mp);
651         if (nxt_slow_path(cluster == NULL)) {
652             return NULL;
653         }
654     }
655 
656     link = nxt_queue_first(&mp->free_pages);
657     nxt_queue_remove(link);
658 
659     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
660 
661     return page;
662 }
663 
664 
665 static nxt_mp_block_t *
666 nxt_mp_alloc_cluster(nxt_mp_t *mp)
667 {
668     nxt_uint_t      n;
669     nxt_mp_block_t  *cluster;
670 
671     n = mp->cluster_size >> mp->page_size_shift;
672 
673     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
674 
675     if (nxt_slow_path(cluster == NULL)) {
676         return NULL;
677     }
678 
679     /* NXT_MP_CLUSTER_BLOCK type is zero. */
680 
681     cluster->size = mp->cluster_size;
682 
683     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
684     if (nxt_slow_path(cluster->start == NULL)) {
685         nxt_free(cluster);
686         return NULL;
687     }
688 
689     n--;
690     cluster->pages[n].number = n;
691     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
692 
693     while (n != 0) {
694         n--;
695         cluster->pages[n].number = n;
696         nxt_queue_insert_before(&cluster->pages[n + 1].link,
697                                 &cluster->pages[n].link);
698     }
699 
700     nxt_rbtree_insert(&mp->blocks, &cluster->node);
701 
702     return cluster;
703 }
704 
705 #endif
706 
707 
708 static void *
709 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
710 {
711     u_char          *p;
712     size_t          aligned_size;
713     uint8_t         type;
714     nxt_mp_block_t  *block;
715 
716     nxt_mp_thread_assert(mp);
717 
718     /* Allocation must be less than 4G. */
719     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
720         return NULL;
721     }
722 
723     if (nxt_is_power_of_two(size)) {
724         block = nxt_malloc(sizeof(nxt_mp_block_t));
725         if (nxt_slow_path(block == NULL)) {
726             return NULL;
727         }
728 
729         p = nxt_memalign(alignment, size);
730         if (nxt_slow_path(p == NULL)) {
731             nxt_free(block);
732             return NULL;
733         }
734 
735         type = NXT_MP_DISCRETE_BLOCK;
736 
737     } else {
738         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
739 
740         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
741         if (nxt_slow_path(p == NULL)) {
742             return NULL;
743         }
744 
745         block = (nxt_mp_block_t *) (p + aligned_size);
746         type = NXT_MP_EMBEDDED_BLOCK;
747     }
748 
749     block->type = type;
750     block->size = size;
751     block->start = p;
752 
753     nxt_rbtree_insert(&mp->blocks, &block->node);
754 
755     return p;
756 }
757 
758 
759 static intptr_t
760 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
761 {
762     nxt_mp_block_t  *block1, *block2;
763 
764     block1 = (nxt_mp_block_t *) node1;
765     block2 = (nxt_mp_block_t *) node2;
766 
767     return (uintptr_t) block1->start - (uintptr_t) block2->start;
768 }
769 
770 
771 void
772 nxt_mp_free(nxt_mp_t *mp, void *p)
773 {
774     const char      *err;
775     nxt_thread_t    *thread;
776     nxt_mp_block_t  *block;
777 
778     nxt_mp_thread_assert(mp);
779 
780     nxt_debug_alloc("mp %p free(%p)", mp, p);
781 
782     block = nxt_mp_find_block(&mp->blocks, p);
783 
784     if (nxt_fast_path(block != NULL)) {
785 
786         if (block->type == NXT_MP_CLUSTER_BLOCK) {
787             err = nxt_mp_chunk_free(mp, block, p);
788 
789             if (nxt_fast_path(err == NULL)) {
790                 return;
791             }
792 
793         } else if (nxt_fast_path(p == block->start)) {
794             nxt_rbtree_delete(&mp->blocks, &block->node);
795 
796             if (block->type == NXT_MP_DISCRETE_BLOCK) {
797                 nxt_free(block);
798             }
799 
800             nxt_free(p);
801 
802             return;
803 
804         } else {
805             err = "freed pointer points to middle of block: %p";
806         }
807 
808     } else {
809         err = "freed pointer is out of pool: %p";
810     }
811 
812     thread = nxt_thread();
813 
814     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
815 }
816 
817 
818 static nxt_mp_block_t *
819 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
820 {
821     nxt_mp_block_t     *block;
822     nxt_rbtree_node_t  *node, *sentinel;
823 
824     node = nxt_rbtree_root(tree);
825     sentinel = nxt_rbtree_sentinel(tree);
826 
827     while (node != sentinel) {
828 
829         block = (nxt_mp_block_t *) node;
830 
831         if (p < block->start) {
832             node = node->left;
833 
834         } else if (p >= block->start + block->size) {
835             node = node->right;
836 
837         } else {
838             return block;
839         }
840     }
841 
842     return NULL;
843 }
844 
845 
846 static const char *
847 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
848 {
849     u_char         *start;
850     uintptr_t      offset;
851     nxt_uint_t     n, size, chunk;
852     nxt_queue_t    *chunk_pages;
853     nxt_mp_page_t  *page;
854 
855     n = (p - cluster->start) >> mp->page_size_shift;
856     start = cluster->start + (n << mp->page_size_shift);
857 
858     page = &cluster->pages[n];
859 
860     if (nxt_slow_path(page->size == 0)) {
861         return "freed pointer points to already free page: %p";
862     }
863 
864     if (nxt_slow_path(page->size == 0xFF)) {
865         return "freed pointer points to non-freeble page: %p";
866     }
867 
868     size = page->size << mp->chunk_size_shift;
869 
870     if (size != mp->page_size) {
871 
872         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
873         chunk = offset / size;
874 
875         if (nxt_slow_path(offset != chunk * size)) {
876             return "freed pointer points to wrong chunk: %p";
877         }
878 
879         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
880             return "freed pointer points to already free chunk: %p";
881         }
882 
883         nxt_mp_chunk_set_free(page->u.map, chunk);
884 
885         if (page->u.map != 0xFFFFFFFF) {
886             page->chunks++;
887 
888             if (page->chunks == 1) {
889                 /*
890                  * Add the page to the head of pool chunk pages list
891                  * of pages with free chunks.
892                  */
893                 n = nxt_mp_chunk_pages_index(mp, size);
894                 chunk_pages = &mp->chunk_pages[n];
895 
896                 nxt_queue_insert_head(chunk_pages, &page->link);
897             }
898 
899             nxt_mp_free_junk(p, size);
900 
901             return NULL;
902 
903         } else {
904             /*
905              * All chunks are free, remove the page from pool
906              * chunk pages list of pages with free chunks.
907              */
908             nxt_queue_remove(&page->link);
909         }
910 
911     } else if (nxt_slow_path(p != start)) {
912         return "invalid pointer to chunk: %p";
913     }
914 
915     /* Add the free page to the pool's free pages tree. */
916 
917     page->size = 0;
918     nxt_queue_insert_head(&mp->free_pages, &page->link);
919 
920     nxt_mp_free_junk(p, size);
921 
922     /* Test if all pages in the cluster are free. */
923 
924     n = mp->cluster_size >> mp->page_size_shift;
925     page = cluster->pages;
926 
927     do {
928          if (page->size != 0) {
929              return NULL;
930          }
931 
932          page++;
933          n--;
934     } while (n != 0);
935 
936     /* Free cluster. */
937 
938     n = mp->cluster_size >> mp->page_size_shift;
939     page = cluster->pages;
940 
941     do {
942          nxt_queue_remove(&page->link);
943          page++;
944          n--;
945     } while (n != 0);
946 
947     nxt_rbtree_delete(&mp->blocks, &cluster->node);
948 
949     p = cluster->start;
950 
951     nxt_free(cluster);
952     nxt_free(p);
953 
954     return NULL;
955 }
956 
957 
958 void *
959 nxt_mp_nget(nxt_mp_t *mp, size_t size)
960 {
961     void  *p;
962 
963 #if !(NXT_DEBUG_MEMORY)
964 
965     if (size <= mp->page_size) {
966         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
967 
968     } else {
969         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
970     }
971 
972 #else
973 
974     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
975 
976 #endif
977 
978     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
979 
980     return p;
981 }
982 
983 
984 void *
985 nxt_mp_get(nxt_mp_t *mp, size_t size)
986 {
987     void  *p;
988 
989 #if !(NXT_DEBUG_MEMORY)
990 
991     if (size <= mp->page_size) {
992         size = nxt_max(size, NXT_MAX_ALIGNMENT);
993         p = nxt_mp_get_small(mp, &mp->get_pages, size);
994 
995     } else {
996         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
997     }
998 
999 #else
1000 
1001     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
1002 
1003 #endif
1004 
1005     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1006 
1007     return p;
1008 }
1009 
1010 
1011 void *
1012 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1013 {
1014     void  *p;
1015 
1016     p = nxt_mp_get(mp, size);
1017 
1018     if (nxt_fast_path(p != NULL)) {
1019         memset(p, 0, size);
1020     }
1021 
1022     return p;
1023 }
1024 
1025 
1026 nxt_int_t
1027 nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler,
1028     nxt_task_t *task, void *obj, void *data)
1029 {
1030     nxt_work_t  *work;
1031 
1032     work = nxt_mp_get(mp, sizeof(nxt_work_t));
1033 
1034     if (nxt_slow_path(work == NULL)) {
1035         return NXT_ERROR;
1036     }
1037 
1038     work->next = mp->cleanup;
1039     work->handler = handler;
1040     work->task = task;
1041     work->obj = obj;
1042     work->data = data;
1043 
1044     mp->cleanup = work;
1045 
1046     return NXT_OK;
1047 }
1048