xref: /unit/src/nxt_mp.c (revision 145:3578a7b7ace4)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92 
93     /* Block size must be less than 4G. */
94     uint32_t             size;
95 
96     u_char               *start;
97     nxt_mp_page_t        pages[];
98 } nxt_mp_block_t;
99 
100 
101 struct nxt_mp_s {
102     /* rbtree of nxt_mp_block_t. */
103     nxt_rbtree_t         blocks;
104 
105     uint8_t              chunk_size_shift;
106     uint8_t              page_size_shift;
107     uint32_t             page_size;
108     uint32_t             page_alignment;
109     uint32_t             cluster_size;
110     uint32_t             retain;
111 
112 #if (NXT_DEBUG)
113     nxt_pid_t            pid;
114     nxt_tid_t            tid;
115 #endif
116 
117     /* Lists of nxt_mp_page_t. */
118     nxt_queue_t          free_pages;
119     nxt_queue_t          nget_pages;
120     nxt_queue_t          get_pages;
121     nxt_queue_t          chunk_pages[];
122 };
123 
124 
125 #define nxt_mp_chunk_get_free(map)                                            \
126     (__builtin_ffs(map) - 1)
127 
128 
129 #define nxt_mp_chunk_is_free(map, chunk)                                      \
130     ((map & (1 << chunk)) != 0)
131 
132 
133 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
134     map &= ~(1 << chunk)
135 
136 
137 #define nxt_mp_chunk_set_free(map, chunk)                                     \
138     map |= (1 << chunk)
139 
140 
141 #define nxt_mp_free_junk(p, size)                                             \
142     memset((p), 0x5A, size)
143 
144 
145 #if !(NXT_DEBUG_MEMORY)
146 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
147 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
148 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
149 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
150 #endif
151 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size);
152 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
153     nxt_rbtree_node_t *node2);
154 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p);
155 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
156     u_char *p);
157 
158 
159 #if (NXT_HAVE_BUILTIN_CLZ)
160 
161 #define nxt_lg2(value)                                                        \
162     (31 - __builtin_clz(value))
163 
164 #else
165 
166 static const int nxt_lg2_tab64[64] = {
167     63,  0, 58,  1, 59, 47, 53,  2,
168     60, 39, 48, 27, 54, 33, 42,  3,
169     61, 51, 37, 40, 49, 18, 28, 20,
170     55, 30, 34, 11, 43, 14, 22,  4,
171     62, 57, 46, 52, 38, 26, 32, 41,
172     50, 36, 17, 19, 29, 10, 13, 21,
173     56, 45, 25, 31, 35, 16,  9, 12,
174     44, 24, 15,  8, 23,  7,  6,  5
175 };
176 
177 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
178 
179 static int
180 nxt_lg2(uint64_t v)
181 {
182     v |= v >> 1;
183     v |= v >> 2;
184     v |= v >> 4;
185     v |= v >> 8;
186     v |= v >> 16;
187     v |= v >> 32;
188     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
189 }
190 
191 #endif
192 
193 
194 #if (NXT_DEBUG)
195 
196 nxt_inline void
197 nxt_mp_thread_assert(nxt_mp_t *mp)
198 {
199     nxt_tid_t     tid;
200     nxt_thread_t  *thread;
201 
202     thread = nxt_thread();
203     tid = nxt_thread_tid(thread);
204 
205     if (nxt_fast_path(mp->tid == tid)) {
206         return;
207     }
208 
209     if (nxt_slow_path(nxt_pid != mp->pid)) {
210         mp->pid = nxt_pid;
211         mp->tid = tid;
212 
213         return;
214     }
215 
216     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
217     nxt_abort();
218 }
219 
220 #else
221 
222 #define nxt_mp_thread_assert(mp)
223 
224 #endif
225 
226 
227 void
228 nxt_mp_thread_adopt(nxt_mp_t *mp)
229 {
230 #if (NXT_DEBUG)
231     mp->pid = nxt_pid;
232     mp->tid = nxt_thread_tid(NULL);
233 #endif
234 }
235 
236 
237 nxt_mp_t *
238 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
239     size_t min_chunk_size)
240 {
241     nxt_mp_t     *mp;
242     uint32_t     pages, chunk_size_shift, page_size_shift;
243     nxt_queue_t  *chunk_pages;
244 
245     chunk_size_shift = nxt_lg2(min_chunk_size);
246     page_size_shift = nxt_lg2(page_size);
247 
248     pages = page_size_shift - chunk_size_shift;
249 
250     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
251 
252     if (nxt_fast_path(mp != NULL)) {
253         mp->retain = 1;
254         mp->chunk_size_shift = chunk_size_shift;
255         mp->page_size_shift = page_size_shift;
256         mp->page_size = page_size;
257         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
258         mp->cluster_size = cluster_size;
259 
260         chunk_pages = mp->chunk_pages;
261 
262         while (pages != 0) {
263             nxt_queue_init(chunk_pages);
264             chunk_pages++;
265             pages--;
266         }
267 
268         nxt_queue_init(&mp->free_pages);
269         nxt_queue_init(&mp->nget_pages);
270         nxt_queue_init(&mp->get_pages);
271 
272         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
273     }
274 
275     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
276                     page_alignment, page_size, min_chunk_size);
277 
278     return mp;
279 }
280 
281 
282 void
283 nxt_mp_destroy(nxt_mp_t *mp)
284 {
285     void               *p;
286     nxt_mp_block_t     *block;
287     nxt_rbtree_node_t  *node, *next;
288 
289     nxt_debug_alloc("mp %p destroy", mp);
290 
291     nxt_mp_thread_assert(mp);
292 
293     next = nxt_rbtree_root(&mp->blocks);
294 
295     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
296 
297         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
298         block = (nxt_mp_block_t *) node;
299 
300         p = block->start;
301 
302         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
303             nxt_free(block);
304         }
305 
306         nxt_free(p);
307     }
308 
309     nxt_free(mp);
310 }
311 
312 
313 nxt_bool_t
314 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
315     size_t min_chunk_size)
316 {
317     nxt_bool_t  valid;
318 
319     /* Alignment and sizes must be a power of 2. */
320 
321     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
322                            && nxt_is_power_of_two(page_size)
323                            && nxt_is_power_of_two(min_chunk_size)));
324     if (!valid) {
325         return 0;
326     }
327 
328     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
329 
330     valid = nxt_expect(1, (page_size >= 64
331                            && page_size >= page_alignment
332                            && page_size >= min_chunk_size
333                            && min_chunk_size * 32 >= page_size
334                            && cluster_size >= page_size
335                            && cluster_size / page_size <= 256
336                            && cluster_size % page_size == 0));
337     if (!valid) {
338         return 0;
339     }
340 
341     return 1;
342 }
343 
344 
345 nxt_bool_t
346 nxt_mp_is_empty(nxt_mp_t *mp)
347 {
348     return (nxt_rbtree_is_empty(&mp->blocks)
349             && nxt_queue_is_empty(&mp->free_pages));
350 }
351 
352 
353 void *
354 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
355 {
356     void  *p;
357 
358 #if !(NXT_DEBUG_MEMORY)
359 
360     if (size <= mp->page_size) {
361         p = nxt_mp_alloc_small(mp, size);
362 
363     } else {
364 
365 #endif
366 
367     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
368 
369 #if !(NXT_DEBUG_MEMORY)
370 
371     }
372 
373 #endif
374 
375     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
376 
377     return p;
378 }
379 
380 
381 void *
382 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
383 {
384     void  *p;
385 
386     p = nxt_mp_alloc(mp, size);
387 
388     if (nxt_fast_path(p != NULL)) {
389         memset(p, 0, size);
390     }
391 
392     return p;
393 }
394 
395 
396 void *
397 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
398 {
399     void    *p;
400     size_t  aligned_size;
401 
402     /* Alignment must be a power of 2. */
403 
404     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
405 
406 #if !(NXT_DEBUG_MEMORY)
407 
408         aligned_size = nxt_max(size, alignment);
409 
410         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
411 
412             p = nxt_mp_alloc_small(mp, aligned_size);
413 
414         } else {
415 
416 #endif
417 
418         p = nxt_mp_alloc_large(mp, alignment, size);
419 
420 #if !(NXT_DEBUG_MEMORY)
421 
422         }
423 
424 #endif
425 
426     } else {
427         p = NULL;
428     }
429 
430     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
431 
432     return p;
433 }
434 
435 
436 void *
437 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
438 {
439     void  *p;
440 
441     p = nxt_mp_align(mp, alignment, size);
442 
443     if (nxt_fast_path(p != NULL)) {
444         memset(p, 0, size);
445     }
446 
447     return p;
448 }
449 
450 
451 nxt_inline nxt_uint_t
452 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
453 {
454     nxt_int_t  n, index;
455 
456     index = 0;
457 
458     if (size > 1) {
459         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
460 
461         if (n > 0) {
462             index = n;
463         }
464     }
465 
466     return index;
467 }
468 
469 
470 #if !(NXT_DEBUG_MEMORY)
471 
472 nxt_inline u_char *
473 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
474 {
475     size_t          page_offset;
476     nxt_mp_block_t  *block;
477 
478     page_offset = page->number * sizeof(nxt_mp_page_t)
479                   + offsetof(nxt_mp_block_t, pages);
480 
481     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
482 
483     return block->start + (page->number << mp->page_size_shift);
484 }
485 
486 
487 static void *
488 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
489 {
490     u_char            *p;
491     nxt_uint_t        n, index;
492     nxt_queue_t       *chunk_pages;
493     nxt_mp_page_t     *page;
494     nxt_queue_link_t  *link;
495 
496     nxt_mp_thread_assert(mp);
497 
498     p = NULL;
499 
500     if (size <= mp->page_size / 2) {
501 
502         index = nxt_mp_chunk_pages_index(mp, size);
503         chunk_pages = &mp->chunk_pages[index];
504 
505         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
506 
507             link = nxt_queue_first(chunk_pages);
508             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
509 
510             p = nxt_mp_page_addr(mp, page);
511 
512             n = nxt_mp_chunk_get_free(page->u.map);
513             nxt_mp_chunk_set_busy(page->u.map, n);
514 
515             p += ((n << index) << mp->chunk_size_shift);
516 
517             page->chunks--;
518 
519             if (page->chunks == 0) {
520                 /*
521                  * Remove full page from the pool chunk pages list
522                  * of pages with free chunks.
523                  */
524                 nxt_queue_remove(&page->link);
525             }
526 
527         } else {
528             page = nxt_mp_alloc_page(mp);
529 
530             if (nxt_fast_path(page != NULL)) {
531                 page->size = (1 << index);
532 
533                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
534                 page->chunks = (1 << n) - 1;
535 
536                 nxt_queue_insert_head(chunk_pages, &page->link);
537 
538                 /* Mark the first chunk as busy. */
539                 page->u.map = 0xFFFFFFFE;
540 
541                 p = nxt_mp_page_addr(mp, page);
542             }
543         }
544 
545     } else {
546         page = nxt_mp_alloc_page(mp);
547 
548         if (nxt_fast_path(page != NULL)) {
549             page->size = mp->page_size >> mp->chunk_size_shift;
550 
551             p = nxt_mp_page_addr(mp, page);
552         }
553     }
554 
555     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
556                     page->size << mp->chunk_size_shift, p);
557 
558     return p;
559 }
560 
561 
562 static void *
563 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
564 {
565     u_char            *p;
566     uint32_t          available;
567     nxt_mp_page_t     *page;
568     nxt_queue_link_t  *link, *next;
569 
570     nxt_mp_thread_assert(mp);
571 
572     for (link = nxt_queue_first(pages);
573          link != nxt_queue_tail(pages);
574          link = next)
575     {
576         next = nxt_queue_next(link);
577         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
578 
579         available = mp->page_size - page->u.taken;
580 
581         if (size <= available) {
582             goto found;
583         }
584 
585         if (available == 0 || page->fails++ > 100) {
586             nxt_queue_remove(link);
587         }
588     }
589 
590     page = nxt_mp_alloc_page(mp);
591 
592     if (nxt_slow_path(page == NULL)) {
593         return page;
594     }
595 
596     nxt_queue_insert_head(pages, &page->link);
597 
598     page->size = 0xFF;
599     page->u.taken = 0;
600 
601 found:
602 
603     p = nxt_mp_page_addr(mp, page);
604 
605     p += page->u.taken;
606     page->u.taken += size;
607 
608     return p;
609 }
610 
611 
612 static nxt_mp_page_t *
613 nxt_mp_alloc_page(nxt_mp_t *mp)
614 {
615     nxt_mp_page_t     *page;
616     nxt_mp_block_t    *cluster;
617     nxt_queue_link_t  *link;
618 
619     if (nxt_queue_is_empty(&mp->free_pages)) {
620         cluster = nxt_mp_alloc_cluster(mp);
621         if (nxt_slow_path(cluster == NULL)) {
622             return NULL;
623         }
624     }
625 
626     link = nxt_queue_first(&mp->free_pages);
627     nxt_queue_remove(link);
628 
629     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
630 
631     return page;
632 }
633 
634 
635 static nxt_mp_block_t *
636 nxt_mp_alloc_cluster(nxt_mp_t *mp)
637 {
638     nxt_uint_t      n;
639     nxt_mp_block_t  *cluster;
640 
641     n = mp->cluster_size >> mp->page_size_shift;
642 
643     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
644 
645     if (nxt_slow_path(cluster == NULL)) {
646         return NULL;
647     }
648 
649     /* NXT_MP_CLUSTER_BLOCK type is zero. */
650 
651     cluster->size = mp->cluster_size;
652 
653     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
654     if (nxt_slow_path(cluster->start == NULL)) {
655         nxt_free(cluster);
656         return NULL;
657     }
658 
659     n--;
660     cluster->pages[n].number = n;
661     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
662 
663     while (n != 0) {
664         n--;
665         cluster->pages[n].number = n;
666         nxt_queue_insert_before(&cluster->pages[n + 1].link,
667                                 &cluster->pages[n].link);
668     }
669 
670     nxt_rbtree_insert(&mp->blocks, &cluster->node);
671 
672     return cluster;
673 }
674 
675 #endif
676 
677 
678 static void *
679 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size)
680 {
681     u_char          *p;
682     size_t          aligned_size;
683     uint8_t         type;
684     nxt_mp_block_t  *block;
685 
686     nxt_mp_thread_assert(mp);
687 
688     /* Allocation must be less than 4G. */
689     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
690         return NULL;
691     }
692 
693     if (nxt_is_power_of_two(size)) {
694         block = nxt_malloc(sizeof(nxt_mp_block_t));
695         if (nxt_slow_path(block == NULL)) {
696             return NULL;
697         }
698 
699         p = nxt_memalign(alignment, size);
700         if (nxt_slow_path(p == NULL)) {
701             nxt_free(block);
702             return NULL;
703         }
704 
705         type = NXT_MP_DISCRETE_BLOCK;
706 
707     } else {
708         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
709 
710         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
711         if (nxt_slow_path(p == NULL)) {
712             return NULL;
713         }
714 
715         block = (nxt_mp_block_t *) (p + aligned_size);
716         type = NXT_MP_EMBEDDED_BLOCK;
717     }
718 
719     block->type = type;
720     block->size = size;
721     block->start = p;
722 
723     nxt_rbtree_insert(&mp->blocks, &block->node);
724 
725     return p;
726 }
727 
728 
729 static intptr_t
730 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
731 {
732     nxt_mp_block_t  *block1, *block2;
733 
734     block1 = (nxt_mp_block_t *) node1;
735     block2 = (nxt_mp_block_t *) node2;
736 
737     return (uintptr_t) block1->start - (uintptr_t) block2->start;
738 }
739 
740 
741 void
742 nxt_mp_free(nxt_mp_t *mp, void *p)
743 {
744     const char      *err;
745     nxt_thread_t    *thread;
746     nxt_mp_block_t  *block;
747 
748     nxt_mp_thread_assert(mp);
749 
750     nxt_debug_alloc("mp %p free(%p)", mp, p);
751 
752     block = nxt_mp_find_block(&mp->blocks, p);
753 
754     if (nxt_fast_path(block != NULL)) {
755 
756         if (block->type == NXT_MP_CLUSTER_BLOCK) {
757             err = nxt_mp_chunk_free(mp, block, p);
758 
759             if (nxt_fast_path(err == NULL)) {
760                 return;
761             }
762 
763         } else if (nxt_fast_path(p == block->start)) {
764             nxt_rbtree_delete(&mp->blocks, &block->node);
765 
766             if (block->type == NXT_MP_DISCRETE_BLOCK) {
767                 nxt_free(block);
768             }
769 
770             nxt_free(p);
771 
772             return;
773 
774         } else {
775             err = "freed pointer points to middle of block: %p";
776         }
777 
778     } else {
779         err = "freed pointer is out of pool: %p";
780     }
781 
782     thread = nxt_thread();
783 
784     nxt_log(thread->task, NXT_LOG_CRIT, err, p);
785 }
786 
787 
788 static nxt_mp_block_t *
789 nxt_mp_find_block(nxt_rbtree_t *tree, u_char *p)
790 {
791     nxt_mp_block_t     *block;
792     nxt_rbtree_node_t  *node, *sentinel;
793 
794     node = nxt_rbtree_root(tree);
795     sentinel = nxt_rbtree_sentinel(tree);
796 
797     while (node != sentinel) {
798 
799         block = (nxt_mp_block_t *) node;
800 
801         if (p < block->start) {
802             node = node->left;
803 
804         } else if (p >= block->start + block->size) {
805             node = node->right;
806 
807         } else {
808             return block;
809         }
810     }
811 
812     return NULL;
813 }
814 
815 
816 static const char *
817 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
818 {
819     u_char         *start;
820     uintptr_t      offset;
821     nxt_uint_t     n, size, chunk;
822     nxt_queue_t    *chunk_pages;
823     nxt_mp_page_t  *page;
824 
825     n = (p - cluster->start) >> mp->page_size_shift;
826     start = cluster->start + (n << mp->page_size_shift);
827 
828     page = &cluster->pages[n];
829 
830     if (nxt_slow_path(page->size == 0)) {
831         return "freed pointer points to already free page: %p";
832     }
833 
834     if (nxt_slow_path(page->size == 0xFF)) {
835         return "freed pointer points to non-freeble page: %p";
836     }
837 
838     size = page->size << mp->chunk_size_shift;
839 
840     if (size != mp->page_size) {
841 
842         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
843         chunk = offset / size;
844 
845         if (nxt_slow_path(offset != chunk * size)) {
846             return "freed pointer points to wrong chunk: %p";
847         }
848 
849         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
850             return "freed pointer points to already free chunk: %p";
851         }
852 
853         nxt_mp_chunk_set_free(page->u.map, chunk);
854 
855         if (page->u.map != 0xFFFFFFFF) {
856             page->chunks++;
857 
858             if (page->chunks == 1) {
859                 /*
860                  * Add the page to the head of pool chunk pages list
861                  * of pages with free chunks.
862                  */
863                 n = nxt_mp_chunk_pages_index(mp, size);
864                 chunk_pages = &mp->chunk_pages[n];
865 
866                 nxt_queue_insert_head(chunk_pages, &page->link);
867             }
868 
869             nxt_mp_free_junk(p, size);
870 
871             return NULL;
872 
873         } else {
874             /*
875              * All chunks are free, remove the page from pool
876              * chunk pages list of pages with free chunks.
877              */
878             nxt_queue_remove(&page->link);
879         }
880 
881     } else if (nxt_slow_path(p != start)) {
882         return "invalid pointer to chunk: %p";
883     }
884 
885     /* Add the free page to the pool's free pages tree. */
886 
887     page->size = 0;
888     nxt_queue_insert_head(&mp->free_pages, &page->link);
889 
890     nxt_mp_free_junk(p, size);
891 
892     /* Test if all pages in the cluster are free. */
893 
894     n = mp->cluster_size >> mp->page_size_shift;
895     page = cluster->pages;
896 
897     do {
898          if (page->size != 0) {
899              return NULL;
900          }
901 
902          page++;
903          n--;
904     } while (n != 0);
905 
906     /* Free cluster. */
907 
908     n = mp->cluster_size >> mp->page_size_shift;
909     page = cluster->pages;
910 
911     do {
912          nxt_queue_remove(&page->link);
913          page++;
914          n--;
915     } while (n != 0);
916 
917     nxt_rbtree_delete(&mp->blocks, &cluster->node);
918 
919     p = cluster->start;
920 
921     nxt_free(cluster);
922     nxt_free(p);
923 
924     return NULL;
925 }
926 
927 
928 void *
929 nxt_mp_retain(nxt_mp_t *mp, size_t size)
930 {
931     void  *p;
932 
933     p = nxt_mp_alloc(mp, size);
934 
935     if (nxt_fast_path(p != NULL)) {
936         mp->retain++;
937         nxt_debug_alloc("mp %p retain: %uD", mp, mp->retain);
938     }
939 
940     return p;
941 }
942 
943 
944 uint32_t
945 nxt_mp_release(nxt_mp_t *mp, void *p)
946 {
947     nxt_mp_free(mp, p);
948 
949     mp->retain--;
950 
951     nxt_debug_alloc("mp %p release: %uD", mp, mp->retain);
952 
953     if (mp->retain == 0) {
954         nxt_mp_destroy(mp);
955 
956         return 0;
957     }
958 
959     return mp->retain;
960 }
961 
962 
963 void *
964 nxt_mp_nget(nxt_mp_t *mp, size_t size)
965 {
966     void  *p;
967 
968 #if !(NXT_DEBUG_MEMORY)
969 
970     if (size <= mp->page_size) {
971         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
972 
973     } else {
974 
975 #endif
976 
977     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
978 
979 #if !(NXT_DEBUG_MEMORY)
980 
981     }
982 
983 #endif
984 
985     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
986 
987     return p;
988 }
989 
990 
991 void *
992 nxt_mp_get(nxt_mp_t *mp, size_t size)
993 {
994     void  *p;
995 
996 #if !(NXT_DEBUG_MEMORY)
997 
998     if (size <= mp->page_size) {
999         size = nxt_max(size, NXT_MAX_ALIGNMENT);
1000         p = nxt_mp_get_small(mp, &mp->get_pages, size);
1001 
1002     } else {
1003 
1004 #endif
1005 
1006     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size);
1007 
1008 #if !(NXT_DEBUG_MEMORY)
1009 
1010     }
1011 
1012 #endif
1013 
1014     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1015 
1016     return p;
1017 }
1018 
1019 
1020 void *
1021 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1022 {
1023     void  *p;
1024 
1025     p = nxt_mp_get(mp, size);
1026 
1027     if (nxt_fast_path(p != NULL)) {
1028         memset(p, 0, size);
1029     }
1030 
1031     return p;
1032 }
1033