xref: /unit/src/nxt_mp.c (revision 2139:99d792169ffb)
1 
2 /*
3  * Copyright (C) Igor Sysoev
4  * Copyright (C) NGINX, Inc.
5  */
6 
7 #include <nxt_main.h>
8 
9 
10 /*
11  * A memory pool allocates memory in clusters of specified size and aligned
12  * to page_alignment.  A cluster is divided on pages of specified size.  Page
13  * size must be a power of 2.  A page can be used entirely or can be divided
14  * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15  * memory is also allocated from pages.  A cluster can contains a mix of pages
16  * with different chunk sizes and non-freeable pages.  Cluster size must be
17  * a multiple of page size and may be not a power of 2.  Allocations greater
18  * than page are allocated outside clusters.  Start addresses and sizes of
19  * the clusters and large allocations are stored in rbtree blocks to find
20  * them on free operations.  The rbtree nodes are sorted by start addresses.
21  * The rbtree is also used to destroy memory pool.
22  */
23 
24 
25 typedef struct {
26     /*
27      * Used to link
28      *  *) pages with free chunks in pool chunk pages lists,
29      *  *) pages with free space for non-freeable allocations,
30      *  *) free pages in clusters.
31      */
32     nxt_queue_link_t     link;
33 
34     union {
35         /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36         uint32_t         map;
37 
38         /* Size of taken non-freeable space. */
39         uint32_t         taken;
40     } u;
41 
42     /*
43      * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44      * that page is free, 0xFF means page with non-freeable allocations.
45      */
46     uint8_t              size;
47 
48     /* Number of free chunks of a chunked page. */
49     uint8_t              chunks;
50 
51     /*
52      * Number of allocation fails due to free space insufficiency
53      * in non-freeable page.
54      */
55     uint8_t              fails;
56 
57     /*
58      * Page number in page cluster.
59      * There can be no more than 256 pages in a cluster.
60      */
61     uint8_t              number;
62 } nxt_mp_page_t;
63 
64 
65 /*
66  * Some malloc implementations (e.g. jemalloc) allocates large enough
67  * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68  * descriptor will be allocated together with the block it will take
69  * excessive 4K memory.  So it is better to allocate the block descriptor
70  * apart.
71  */
72 
73 typedef enum {
74     /* Block of cluster.  The block is allocated apart of the cluster. */
75     NXT_MP_CLUSTER_BLOCK = 0,
76     /*
77      * Block of large allocation.
78      * The block is allocated apart of the allocation.
79      */
80     NXT_MP_DISCRETE_BLOCK,
81     /*
82      * Block of large allocation.
83      * The block is allocated just after of the allocation.
84      */
85     NXT_MP_EMBEDDED_BLOCK,
86 } nxt_mp_block_type_t;
87 
88 
89 typedef struct {
90     NXT_RBTREE_NODE      (node);
91     nxt_mp_block_type_t  type:8;
92     uint8_t              freeable;
93 
94     /* Block size must be less than 4G. */
95     uint32_t             size;
96 
97     u_char               *start;
98     nxt_mp_page_t        pages[];
99 } nxt_mp_block_t;
100 
101 
102 struct nxt_mp_s {
103     /* rbtree of nxt_mp_block_t. */
104     nxt_rbtree_t         blocks;
105 
106     uint8_t              chunk_size_shift;
107     uint8_t              page_size_shift;
108     uint32_t             page_size;
109     uint32_t             page_alignment;
110     uint32_t             cluster_size;
111     uint32_t             retain;
112 
113 #if (NXT_DEBUG)
114     nxt_pid_t            pid;
115     nxt_tid_t            tid;
116 #endif
117 
118     nxt_work_t           *cleanup;
119 
120     /* Lists of nxt_mp_page_t. */
121     nxt_queue_t          free_pages;
122     nxt_queue_t          nget_pages;
123     nxt_queue_t          get_pages;
124     nxt_queue_t          chunk_pages[];
125 };
126 
127 
128 #define nxt_mp_chunk_get_free(map)                                            \
129     (__builtin_ffs(map) - 1)
130 
131 
132 #define nxt_mp_chunk_is_free(map, chunk)                                      \
133     ((map & (1 << chunk)) != 0)
134 
135 
136 #define nxt_mp_chunk_set_busy(map, chunk)                                     \
137     map &= ~(1 << chunk)
138 
139 
140 #define nxt_mp_chunk_set_free(map, chunk)                                     \
141     map |= (1 << chunk)
142 
143 
144 #define nxt_mp_free_junk(p, size)                                             \
145     memset((p), 0x5A, size)
146 
147 
148 #if !(NXT_DEBUG_MEMORY)
149 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
150 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
151 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
152 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
153 #endif
154 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size,
155     nxt_bool_t freeable);
156 static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
157     nxt_rbtree_node_t *node2);
158 static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, const u_char *p);
159 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
160     u_char *p);
161 
162 
163 #if (NXT_HAVE_BUILTIN_CLZ)
164 
165 #define nxt_lg2(value)                                                        \
166     (31 - __builtin_clz(value))
167 
168 #else
169 
170 static const int nxt_lg2_tab64[64] = {
171     63,  0, 58,  1, 59, 47, 53,  2,
172     60, 39, 48, 27, 54, 33, 42,  3,
173     61, 51, 37, 40, 49, 18, 28, 20,
174     55, 30, 34, 11, 43, 14, 22,  4,
175     62, 57, 46, 52, 38, 26, 32, 41,
176     50, 36, 17, 19, 29, 10, 13, 21,
177     56, 45, 25, 31, 35, 16,  9, 12,
178     44, 24, 15,  8, 23,  7,  6,  5
179 };
180 
181 static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
182 
183 static int
nxt_lg2(uint64_t v)184 nxt_lg2(uint64_t v)
185 {
186     v |= v >> 1;
187     v |= v >> 2;
188     v |= v >> 4;
189     v |= v >> 8;
190     v |= v >> 16;
191     v |= v >> 32;
192     return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
193 }
194 
195 #endif
196 
197 
198 #if (NXT_DEBUG)
199 
200 nxt_inline void
nxt_mp_thread_assert(nxt_mp_t * mp)201 nxt_mp_thread_assert(nxt_mp_t *mp)
202 {
203     nxt_tid_t     tid;
204     nxt_thread_t  *thread;
205 
206     thread = nxt_thread();
207     tid = nxt_thread_tid(thread);
208 
209     if (nxt_fast_path(mp->tid == tid)) {
210         return;
211     }
212 
213     if (nxt_slow_path(nxt_pid != mp->pid)) {
214         mp->pid = nxt_pid;
215         mp->tid = tid;
216 
217         return;
218     }
219 
220     nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
221     nxt_abort();
222 }
223 
224 #else
225 
226 #define nxt_mp_thread_assert(mp)
227 
228 #endif
229 
230 
231 void
nxt_mp_thread_adopt(nxt_mp_t * mp)232 nxt_mp_thread_adopt(nxt_mp_t *mp)
233 {
234 #if (NXT_DEBUG)
235     mp->pid = nxt_pid;
236     mp->tid = nxt_thread_tid(nxt_thread());
237 #endif
238 }
239 
240 
241 nxt_mp_t *
nxt_mp_create(size_t cluster_size,size_t page_alignment,size_t page_size,size_t min_chunk_size)242 nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
243     size_t min_chunk_size)
244 {
245     nxt_mp_t     *mp;
246     uint32_t     pages, chunk_size_shift, page_size_shift;
247     nxt_queue_t  *chunk_pages;
248 
249     chunk_size_shift = nxt_lg2(min_chunk_size);
250     page_size_shift = nxt_lg2(page_size);
251 
252     pages = page_size_shift - chunk_size_shift;
253 
254     mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
255 
256     if (nxt_fast_path(mp != NULL)) {
257         mp->retain = 1;
258         mp->chunk_size_shift = chunk_size_shift;
259         mp->page_size_shift = page_size_shift;
260         mp->page_size = page_size;
261         mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
262         mp->cluster_size = cluster_size;
263 
264         chunk_pages = mp->chunk_pages;
265 
266         while (pages != 0) {
267             nxt_queue_init(chunk_pages);
268             chunk_pages++;
269             pages--;
270         }
271 
272         nxt_queue_init(&mp->free_pages);
273         nxt_queue_init(&mp->nget_pages);
274         nxt_queue_init(&mp->get_pages);
275 
276         nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
277     }
278 
279     nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
280                     page_alignment, page_size, min_chunk_size);
281 
282     return mp;
283 }
284 
285 
286 void
nxt_mp_retain(nxt_mp_t * mp)287 nxt_mp_retain(nxt_mp_t *mp)
288 {
289     mp->retain++;
290 
291     nxt_thread_log_debug("mp %p retain: %uD", mp, mp->retain);
292 }
293 
294 
295 void
nxt_mp_release(nxt_mp_t * mp)296 nxt_mp_release(nxt_mp_t *mp)
297 {
298     mp->retain--;
299 
300     nxt_thread_log_debug("mp %p release: %uD", mp, mp->retain);
301 
302     if (mp->retain == 0) {
303         nxt_mp_destroy(mp);
304     }
305 }
306 
307 
308 void
nxt_mp_destroy(nxt_mp_t * mp)309 nxt_mp_destroy(nxt_mp_t *mp)
310 {
311     void               *p;
312     nxt_work_t         *work, *next_work;
313     nxt_mp_block_t     *block;
314     nxt_rbtree_node_t  *node, *next;
315 
316     nxt_debug_alloc("mp %p destroy", mp);
317 
318     nxt_mp_thread_assert(mp);
319 
320     while (mp->cleanup != NULL) {
321         work = mp->cleanup;
322         next_work = work->next;
323 
324         work->handler(work->task, work->obj, work->data);
325 
326         mp->cleanup = next_work;
327     }
328 
329     next = nxt_rbtree_root(&mp->blocks);
330 
331     while (next != nxt_rbtree_sentinel(&mp->blocks)) {
332 
333         node = nxt_rbtree_destroy_next(&mp->blocks, &next);
334         block = (nxt_mp_block_t *) node;
335 
336         p = block->start;
337 
338         if (block->type != NXT_MP_EMBEDDED_BLOCK) {
339             nxt_free(block);
340         }
341 
342         nxt_free(p);
343     }
344 
345     nxt_free(mp);
346 }
347 
348 
349 nxt_bool_t
nxt_mp_test_sizes(size_t cluster_size,size_t page_alignment,size_t page_size,size_t min_chunk_size)350 nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
351     size_t min_chunk_size)
352 {
353     nxt_bool_t  valid;
354 
355     /* Alignment and sizes must be a power of 2. */
356 
357     valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
358                            && nxt_is_power_of_two(page_size)
359                            && nxt_is_power_of_two(min_chunk_size)));
360     if (!valid) {
361         return 0;
362     }
363 
364     page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
365 
366     valid = nxt_expect(1, (page_size >= 64
367                            && page_size >= page_alignment
368                            && page_size >= min_chunk_size
369                            && min_chunk_size * 32 >= page_size
370                            && cluster_size >= page_size
371                            && cluster_size / page_size <= 256
372                            && cluster_size % page_size == 0));
373     if (!valid) {
374         return 0;
375     }
376 
377     return 1;
378 }
379 
380 
381 nxt_bool_t
nxt_mp_is_empty(nxt_mp_t * mp)382 nxt_mp_is_empty(nxt_mp_t *mp)
383 {
384     return (nxt_rbtree_is_empty(&mp->blocks)
385             && nxt_queue_is_empty(&mp->free_pages));
386 }
387 
388 
389 void *
nxt_mp_alloc(nxt_mp_t * mp,size_t size)390 nxt_mp_alloc(nxt_mp_t *mp, size_t size)
391 {
392     void  *p;
393 
394 #if !(NXT_DEBUG_MEMORY)
395 
396     if (size <= mp->page_size) {
397         p = nxt_mp_alloc_small(mp, size);
398 
399     } else {
400         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1);
401     }
402 
403 #else
404 
405     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1);
406 
407 #endif
408 
409     nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
410 
411     return p;
412 }
413 
414 
415 void *
nxt_mp_zalloc(nxt_mp_t * mp,size_t size)416 nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
417 {
418     void  *p;
419 
420     p = nxt_mp_alloc(mp, size);
421 
422     if (nxt_fast_path(p != NULL)) {
423         memset(p, 0, size);
424     }
425 
426     return p;
427 }
428 
429 
430 void *
nxt_mp_align(nxt_mp_t * mp,size_t alignment,size_t size)431 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
432 {
433     void    *p;
434 
435     /* Alignment must be a power of 2. */
436 
437     if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
438 
439 #if !(NXT_DEBUG_MEMORY)
440 
441         size_t  aligned_size;
442 
443         aligned_size = nxt_max(size, alignment);
444 
445         if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
446             p = nxt_mp_alloc_small(mp, aligned_size);
447 
448         } else {
449             p = nxt_mp_alloc_large(mp, alignment, size, 1);
450         }
451 
452 #else
453 
454         p = nxt_mp_alloc_large(mp, alignment, size, 1);
455 
456 #endif
457 
458     } else {
459         p = NULL;
460     }
461 
462     nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
463 
464     return p;
465 }
466 
467 
468 void *
nxt_mp_zalign(nxt_mp_t * mp,size_t alignment,size_t size)469 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
470 {
471     void  *p;
472 
473     p = nxt_mp_align(mp, alignment, size);
474 
475     if (nxt_fast_path(p != NULL)) {
476         memset(p, 0, size);
477     }
478 
479     return p;
480 }
481 
482 
483 nxt_inline nxt_uint_t
nxt_mp_chunk_pages_index(nxt_mp_t * mp,size_t size)484 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
485 {
486     nxt_int_t  n, index;
487 
488     index = 0;
489 
490     if (size > 1) {
491         n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
492 
493         if (n > 0) {
494             index = n;
495         }
496     }
497 
498     return index;
499 }
500 
501 
502 #if !(NXT_DEBUG_MEMORY)
503 
504 nxt_inline u_char *
nxt_mp_page_addr(nxt_mp_t * mp,nxt_mp_page_t * page)505 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
506 {
507     size_t          page_offset;
508     nxt_mp_block_t  *block;
509 
510     page_offset = page->number * sizeof(nxt_mp_page_t)
511                   + offsetof(nxt_mp_block_t, pages);
512 
513     block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
514 
515     return block->start + (page->number << mp->page_size_shift);
516 }
517 
518 
519 static void *
nxt_mp_alloc_small(nxt_mp_t * mp,size_t size)520 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
521 {
522     u_char            *p;
523     nxt_uint_t        n, index;
524     nxt_queue_t       *chunk_pages;
525     nxt_mp_page_t     *page;
526     nxt_queue_link_t  *link;
527 
528     nxt_mp_thread_assert(mp);
529 
530     p = NULL;
531 
532     if (size <= mp->page_size / 2) {
533 
534         index = nxt_mp_chunk_pages_index(mp, size);
535         chunk_pages = &mp->chunk_pages[index];
536 
537         if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
538 
539             link = nxt_queue_first(chunk_pages);
540             page = nxt_queue_link_data(link, nxt_mp_page_t, link);
541 
542             p = nxt_mp_page_addr(mp, page);
543 
544             n = nxt_mp_chunk_get_free(page->u.map);
545             nxt_mp_chunk_set_busy(page->u.map, n);
546 
547             p += ((n << index) << mp->chunk_size_shift);
548 
549             page->chunks--;
550 
551             if (page->chunks == 0) {
552                 /*
553                  * Remove full page from the pool chunk pages list
554                  * of pages with free chunks.
555                  */
556                 nxt_queue_remove(&page->link);
557             }
558 
559         } else {
560             page = nxt_mp_alloc_page(mp);
561 
562             if (nxt_fast_path(page != NULL)) {
563                 page->size = (1 << index);
564 
565                 n = mp->page_size_shift - (index + mp->chunk_size_shift);
566                 page->chunks = (1 << n) - 1;
567 
568                 nxt_queue_insert_head(chunk_pages, &page->link);
569 
570                 /* Mark the first chunk as busy. */
571                 page->u.map = 0xFFFFFFFE;
572 
573                 p = nxt_mp_page_addr(mp, page);
574             }
575         }
576 
577     } else {
578         page = nxt_mp_alloc_page(mp);
579 
580         if (nxt_fast_path(page != NULL)) {
581             page->size = mp->page_size >> mp->chunk_size_shift;
582 
583             p = nxt_mp_page_addr(mp, page);
584         }
585     }
586 
587     nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
588                     page->size << mp->chunk_size_shift, p);
589 
590     return p;
591 }
592 
593 
594 static void *
nxt_mp_get_small(nxt_mp_t * mp,nxt_queue_t * pages,size_t size)595 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
596 {
597     u_char            *p;
598     uint32_t          available;
599     nxt_mp_page_t     *page;
600     nxt_queue_link_t  *link, *next;
601 
602     nxt_mp_thread_assert(mp);
603 
604     for (link = nxt_queue_first(pages);
605          link != nxt_queue_tail(pages);
606          link = next)
607     {
608         next = nxt_queue_next(link);
609         page = nxt_queue_link_data(link, nxt_mp_page_t, link);
610 
611         available = mp->page_size - page->u.taken;
612 
613         if (size <= available) {
614             goto found;
615         }
616 
617         if (available == 0 || page->fails++ > 100) {
618             nxt_queue_remove(link);
619         }
620     }
621 
622     page = nxt_mp_alloc_page(mp);
623 
624     if (nxt_slow_path(page == NULL)) {
625         return page;
626     }
627 
628     nxt_queue_insert_head(pages, &page->link);
629 
630     page->size = 0xFF;
631     page->u.taken = 0;
632 
633 found:
634 
635     p = nxt_mp_page_addr(mp, page);
636 
637     p += page->u.taken;
638     page->u.taken += size;
639 
640     return p;
641 }
642 
643 
644 static nxt_mp_page_t *
nxt_mp_alloc_page(nxt_mp_t * mp)645 nxt_mp_alloc_page(nxt_mp_t *mp)
646 {
647     nxt_mp_page_t     *page;
648     nxt_mp_block_t    *cluster;
649     nxt_queue_link_t  *link;
650 
651     if (nxt_queue_is_empty(&mp->free_pages)) {
652         cluster = nxt_mp_alloc_cluster(mp);
653         if (nxt_slow_path(cluster == NULL)) {
654             return NULL;
655         }
656     }
657 
658     link = nxt_queue_first(&mp->free_pages);
659     nxt_queue_remove(link);
660 
661     page = nxt_queue_link_data(link, nxt_mp_page_t, link);
662 
663     return page;
664 }
665 
666 
667 static nxt_mp_block_t *
nxt_mp_alloc_cluster(nxt_mp_t * mp)668 nxt_mp_alloc_cluster(nxt_mp_t *mp)
669 {
670     nxt_uint_t      n;
671     nxt_mp_block_t  *cluster;
672 
673     n = mp->cluster_size >> mp->page_size_shift;
674 
675     cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
676 
677     if (nxt_slow_path(cluster == NULL)) {
678         return NULL;
679     }
680 
681     /* NXT_MP_CLUSTER_BLOCK type is zero. */
682 
683     cluster->size = mp->cluster_size;
684 
685     cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
686     if (nxt_slow_path(cluster->start == NULL)) {
687         nxt_free(cluster);
688         return NULL;
689     }
690 
691     n--;
692     cluster->pages[n].number = n;
693     nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
694 
695     while (n != 0) {
696         n--;
697         cluster->pages[n].number = n;
698         nxt_queue_insert_before(&cluster->pages[n + 1].link,
699                                 &cluster->pages[n].link);
700     }
701 
702     nxt_rbtree_insert(&mp->blocks, &cluster->node);
703 
704     return cluster;
705 }
706 
707 #endif
708 
709 
710 static void *
nxt_mp_alloc_large(nxt_mp_t * mp,size_t alignment,size_t size,nxt_bool_t freeable)711 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size,
712     nxt_bool_t freeable)
713 {
714     u_char          *p;
715     size_t          aligned_size;
716     uint8_t         type;
717     nxt_mp_block_t  *block;
718 
719     nxt_mp_thread_assert(mp);
720 
721     /* Allocation must be less than 4G. */
722     if (nxt_slow_path(size >= 0xFFFFFFFF)) {
723         return NULL;
724     }
725 
726     if (nxt_is_power_of_two(size)) {
727         block = nxt_malloc(sizeof(nxt_mp_block_t));
728         if (nxt_slow_path(block == NULL)) {
729             return NULL;
730         }
731 
732         p = nxt_memalign(alignment, size);
733         if (nxt_slow_path(p == NULL)) {
734             nxt_free(block);
735             return NULL;
736         }
737 
738         type = NXT_MP_DISCRETE_BLOCK;
739 
740     } else {
741         aligned_size = nxt_align_size(size, sizeof(uintptr_t));
742 
743         p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
744         if (nxt_slow_path(p == NULL)) {
745             return NULL;
746         }
747 
748         block = (nxt_mp_block_t *) (p + aligned_size);
749         type = NXT_MP_EMBEDDED_BLOCK;
750     }
751 
752     block->type = type;
753     block->freeable = freeable;
754     block->size = size;
755     block->start = p;
756 
757     nxt_rbtree_insert(&mp->blocks, &block->node);
758 
759     return p;
760 }
761 
762 
763 static intptr_t
nxt_mp_rbtree_compare(nxt_rbtree_node_t * node1,nxt_rbtree_node_t * node2)764 nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
765 {
766     nxt_mp_block_t  *block1, *block2;
767 
768     block1 = (nxt_mp_block_t *) node1;
769     block2 = (nxt_mp_block_t *) node2;
770 
771     /*
772      * Shifting is necessary to prevent overflow of intptr_t when block1->start
773      * is much greater than block2->start or vice versa.
774      *
775      * It is safe to drop one bit since there cannot be adjacent addresses
776      * because of alignments and allocation sizes.  Effectively this reduces
777      * the absolute values to fit into the magnitude of intptr_t.
778      */
779     return ((uintptr_t) block1->start >> 1) - ((uintptr_t) block2->start >> 1);
780 }
781 
782 
783 void
nxt_mp_free(nxt_mp_t * mp,void * p)784 nxt_mp_free(nxt_mp_t *mp, void *p)
785 {
786     const char      *err;
787     nxt_mp_block_t  *block;
788 
789     nxt_mp_thread_assert(mp);
790 
791     nxt_debug_alloc("mp %p free(%p)", mp, p);
792 
793     block = nxt_mp_find_block(&mp->blocks, p);
794 
795     if (nxt_fast_path(block != NULL)) {
796 
797         if (block->type == NXT_MP_CLUSTER_BLOCK) {
798             err = nxt_mp_chunk_free(mp, block, p);
799 
800             if (nxt_fast_path(err == NULL)) {
801                 return;
802             }
803 
804         } else if (nxt_fast_path(p == block->start)) {
805 
806             if (block->freeable) {
807                 nxt_rbtree_delete(&mp->blocks, &block->node);
808 
809                 if (block->type == NXT_MP_DISCRETE_BLOCK) {
810                     nxt_free(block);
811                 }
812 
813                 nxt_free(p);
814 
815                 return;
816             }
817 
818             err = "freed pointer points to non-freeable block: %p";
819 
820         } else {
821             err = "freed pointer points to middle of block: %p";
822         }
823 
824     } else {
825         err = "freed pointer is out of pool: %p";
826     }
827 
828     nxt_thread_log_alert(err, p);
829 }
830 
831 
832 static nxt_mp_block_t *
nxt_mp_find_block(nxt_rbtree_t * tree,const u_char * p)833 nxt_mp_find_block(nxt_rbtree_t *tree, const u_char *p)
834 {
835     nxt_mp_block_t     *block;
836     nxt_rbtree_node_t  *node, *sentinel;
837 
838     node = nxt_rbtree_root(tree);
839     sentinel = nxt_rbtree_sentinel(tree);
840 
841     while (node != sentinel) {
842 
843         block = (nxt_mp_block_t *) node;
844 
845         if (p < block->start) {
846             node = node->left;
847 
848         } else if (p >= block->start + block->size) {
849             node = node->right;
850 
851         } else {
852             return block;
853         }
854     }
855 
856     return NULL;
857 }
858 
859 
860 static const char *
nxt_mp_chunk_free(nxt_mp_t * mp,nxt_mp_block_t * cluster,u_char * p)861 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
862 {
863     u_char         *start;
864     uintptr_t      offset;
865     nxt_uint_t     n, size, chunk;
866     nxt_queue_t    *chunk_pages;
867     nxt_mp_page_t  *page;
868 
869     n = (p - cluster->start) >> mp->page_size_shift;
870     start = cluster->start + (n << mp->page_size_shift);
871 
872     page = &cluster->pages[n];
873 
874     if (nxt_slow_path(page->size == 0)) {
875         return "freed pointer points to already free page: %p";
876     }
877 
878     if (nxt_slow_path(page->size == 0xFF)) {
879         return "freed pointer points to non-freeable page: %p";
880     }
881 
882     size = page->size << mp->chunk_size_shift;
883 
884     if (size != mp->page_size) {
885 
886         offset = (uintptr_t) (p - start) & (mp->page_size - 1);
887         chunk = offset / size;
888 
889         if (nxt_slow_path(offset != chunk * size)) {
890             return "freed pointer points to wrong chunk: %p";
891         }
892 
893         if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
894             return "freed pointer points to already free chunk: %p";
895         }
896 
897         nxt_mp_chunk_set_free(page->u.map, chunk);
898 
899         if (page->u.map != 0xFFFFFFFF) {
900             page->chunks++;
901 
902             if (page->chunks == 1) {
903                 /*
904                  * Add the page to the head of pool chunk pages list
905                  * of pages with free chunks.
906                  */
907                 n = nxt_mp_chunk_pages_index(mp, size);
908                 chunk_pages = &mp->chunk_pages[n];
909 
910                 nxt_queue_insert_head(chunk_pages, &page->link);
911             }
912 
913             nxt_mp_free_junk(p, size);
914 
915             return NULL;
916 
917         } else {
918             /*
919              * All chunks are free, remove the page from pool
920              * chunk pages list of pages with free chunks.
921              */
922             nxt_queue_remove(&page->link);
923         }
924 
925     } else if (nxt_slow_path(p != start)) {
926         return "invalid pointer to chunk: %p";
927     }
928 
929     /* Add the free page to the pool's free pages tree. */
930 
931     page->size = 0;
932     nxt_queue_insert_head(&mp->free_pages, &page->link);
933 
934     nxt_mp_free_junk(p, size);
935 
936     /* Test if all pages in the cluster are free. */
937 
938     n = mp->cluster_size >> mp->page_size_shift;
939     page = cluster->pages;
940 
941     do {
942         if (page->size != 0) {
943             return NULL;
944         }
945 
946         page++;
947         n--;
948     } while (n != 0);
949 
950     /* Free cluster. */
951 
952     n = mp->cluster_size >> mp->page_size_shift;
953     page = cluster->pages;
954 
955     do {
956         nxt_queue_remove(&page->link);
957         page++;
958         n--;
959     } while (n != 0);
960 
961     nxt_rbtree_delete(&mp->blocks, &cluster->node);
962 
963     p = cluster->start;
964 
965     nxt_free(cluster);
966     nxt_free(p);
967 
968     return NULL;
969 }
970 
971 
972 void *
nxt_mp_nget(nxt_mp_t * mp,size_t size)973 nxt_mp_nget(nxt_mp_t *mp, size_t size)
974 {
975     void  *p;
976 
977 #if !(NXT_DEBUG_MEMORY)
978 
979     if (size <= mp->page_size) {
980         p = nxt_mp_get_small(mp, &mp->nget_pages, size);
981 
982     } else {
983         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
984     }
985 
986 #else
987 
988     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
989 
990 #endif
991 
992     nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
993 
994     return p;
995 }
996 
997 
998 void *
nxt_mp_get(nxt_mp_t * mp,size_t size)999 nxt_mp_get(nxt_mp_t *mp, size_t size)
1000 {
1001     void  *p;
1002 
1003 #if !(NXT_DEBUG_MEMORY)
1004 
1005     if (size <= mp->page_size) {
1006         size = nxt_max(size, NXT_MAX_ALIGNMENT);
1007         p = nxt_mp_get_small(mp, &mp->get_pages, size);
1008 
1009     } else {
1010         p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
1011     }
1012 
1013 #else
1014 
1015     p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
1016 
1017 #endif
1018 
1019     nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1020 
1021     return p;
1022 }
1023 
1024 
1025 void *
nxt_mp_zget(nxt_mp_t * mp,size_t size)1026 nxt_mp_zget(nxt_mp_t *mp, size_t size)
1027 {
1028     void  *p;
1029 
1030     p = nxt_mp_get(mp, size);
1031 
1032     if (nxt_fast_path(p != NULL)) {
1033         memset(p, 0, size);
1034     }
1035 
1036     return p;
1037 }
1038 
1039 
1040 nxt_int_t
nxt_mp_cleanup(nxt_mp_t * mp,nxt_work_handler_t handler,nxt_task_t * task,void * obj,void * data)1041 nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler,
1042     nxt_task_t *task, void *obj, void *data)
1043 {
1044     nxt_work_t  *work;
1045 
1046     work = nxt_mp_get(mp, sizeof(nxt_work_t));
1047 
1048     if (nxt_slow_path(work == NULL)) {
1049         return NXT_ERROR;
1050     }
1051 
1052     work->next = mp->cleanup;
1053     work->handler = handler;
1054     work->task = task;
1055     work->obj = obj;
1056     work->data = data;
1057 
1058     mp->cleanup = work;
1059 
1060     return NXT_OK;
1061 }
1062 
1063 
1064 void *
nxt_mp_lvlhsh_alloc(void * pool,size_t size)1065 nxt_mp_lvlhsh_alloc(void *pool, size_t size)
1066 {
1067     return nxt_mp_align(pool, size, size);
1068 }
1069 
1070 
1071 void
nxt_mp_lvlhsh_free(void * pool,void * p)1072 nxt_mp_lvlhsh_free(void *pool, void *p)
1073 {
1074     nxt_mp_free(pool, p);
1075 }
1076