Lines Matching refs:mp

149 static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
150 static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
151 static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
152 static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
154 static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size,
159 static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
201 nxt_mp_thread_assert(nxt_mp_t *mp) in nxt_mp_thread_assert() argument
209 if (nxt_fast_path(mp->tid == tid)) { in nxt_mp_thread_assert()
213 if (nxt_slow_path(nxt_pid != mp->pid)) { in nxt_mp_thread_assert()
214 mp->pid = nxt_pid; in nxt_mp_thread_assert()
215 mp->tid = tid; in nxt_mp_thread_assert()
220 nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid); in nxt_mp_thread_assert()
226 #define nxt_mp_thread_assert(mp) argument
232 nxt_mp_thread_adopt(nxt_mp_t *mp) in nxt_mp_thread_adopt() argument
235 mp->pid = nxt_pid; in nxt_mp_thread_adopt()
236 mp->tid = nxt_thread_tid(nxt_thread()); in nxt_mp_thread_adopt()
245 nxt_mp_t *mp; in nxt_mp_create() local
254 mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t)); in nxt_mp_create()
256 if (nxt_fast_path(mp != NULL)) { in nxt_mp_create()
257 mp->retain = 1; in nxt_mp_create()
258 mp->chunk_size_shift = chunk_size_shift; in nxt_mp_create()
259 mp->page_size_shift = page_size_shift; in nxt_mp_create()
260 mp->page_size = page_size; in nxt_mp_create()
261 mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT); in nxt_mp_create()
262 mp->cluster_size = cluster_size; in nxt_mp_create()
264 chunk_pages = mp->chunk_pages; in nxt_mp_create()
272 nxt_queue_init(&mp->free_pages); in nxt_mp_create()
273 nxt_queue_init(&mp->nget_pages); in nxt_mp_create()
274 nxt_queue_init(&mp->get_pages); in nxt_mp_create()
276 nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare); in nxt_mp_create()
279 nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size, in nxt_mp_create()
282 return mp; in nxt_mp_create()
287 nxt_mp_retain(nxt_mp_t *mp) in nxt_mp_retain() argument
289 mp->retain++; in nxt_mp_retain()
291 nxt_thread_log_debug("mp %p retain: %uD", mp, mp->retain); in nxt_mp_retain()
296 nxt_mp_release(nxt_mp_t *mp) in nxt_mp_release() argument
298 mp->retain--; in nxt_mp_release()
300 nxt_thread_log_debug("mp %p release: %uD", mp, mp->retain); in nxt_mp_release()
302 if (mp->retain == 0) { in nxt_mp_release()
303 nxt_mp_destroy(mp); in nxt_mp_release()
309 nxt_mp_destroy(nxt_mp_t *mp) in nxt_mp_destroy() argument
316 nxt_debug_alloc("mp %p destroy", mp); in nxt_mp_destroy()
318 nxt_mp_thread_assert(mp); in nxt_mp_destroy()
320 while (mp->cleanup != NULL) { in nxt_mp_destroy()
321 work = mp->cleanup; in nxt_mp_destroy()
326 mp->cleanup = next_work; in nxt_mp_destroy()
329 next = nxt_rbtree_root(&mp->blocks); in nxt_mp_destroy()
331 while (next != nxt_rbtree_sentinel(&mp->blocks)) { in nxt_mp_destroy()
333 node = nxt_rbtree_destroy_next(&mp->blocks, &next); in nxt_mp_destroy()
345 nxt_free(mp); in nxt_mp_destroy()
382 nxt_mp_is_empty(nxt_mp_t *mp) in nxt_mp_is_empty() argument
384 return (nxt_rbtree_is_empty(&mp->blocks) in nxt_mp_is_empty()
385 && nxt_queue_is_empty(&mp->free_pages)); in nxt_mp_is_empty()
390 nxt_mp_alloc(nxt_mp_t *mp, size_t size) in nxt_mp_alloc() argument
396 if (size <= mp->page_size) { in nxt_mp_alloc()
397 p = nxt_mp_alloc_small(mp, size); in nxt_mp_alloc()
400 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1); in nxt_mp_alloc()
405 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1); in nxt_mp_alloc()
409 nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p); in nxt_mp_alloc()
416 nxt_mp_zalloc(nxt_mp_t *mp, size_t size) in nxt_mp_zalloc() argument
420 p = nxt_mp_alloc(mp, size); in nxt_mp_zalloc()
431 nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size) in nxt_mp_align() argument
445 if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) { in nxt_mp_align()
446 p = nxt_mp_alloc_small(mp, aligned_size); in nxt_mp_align()
449 p = nxt_mp_alloc_large(mp, alignment, size, 1); in nxt_mp_align()
454 p = nxt_mp_alloc_large(mp, alignment, size, 1); in nxt_mp_align()
462 nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p); in nxt_mp_align()
469 nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size) in nxt_mp_zalign() argument
473 p = nxt_mp_align(mp, alignment, size); in nxt_mp_zalign()
484 nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size) in nxt_mp_chunk_pages_index() argument
491 n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift; in nxt_mp_chunk_pages_index()
505 nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page) in nxt_mp_page_addr() argument
515 return block->start + (page->number << mp->page_size_shift); in nxt_mp_page_addr()
520 nxt_mp_alloc_small(nxt_mp_t *mp, size_t size) in nxt_mp_alloc_small() argument
528 nxt_mp_thread_assert(mp); in nxt_mp_alloc_small()
532 if (size <= mp->page_size / 2) { in nxt_mp_alloc_small()
534 index = nxt_mp_chunk_pages_index(mp, size); in nxt_mp_alloc_small()
535 chunk_pages = &mp->chunk_pages[index]; in nxt_mp_alloc_small()
542 p = nxt_mp_page_addr(mp, page); in nxt_mp_alloc_small()
547 p += ((n << index) << mp->chunk_size_shift); in nxt_mp_alloc_small()
560 page = nxt_mp_alloc_page(mp); in nxt_mp_alloc_small()
565 n = mp->page_size_shift - (index + mp->chunk_size_shift); in nxt_mp_alloc_small()
573 p = nxt_mp_page_addr(mp, page); in nxt_mp_alloc_small()
578 page = nxt_mp_alloc_page(mp); in nxt_mp_alloc_small()
581 page->size = mp->page_size >> mp->chunk_size_shift; in nxt_mp_alloc_small()
583 p = nxt_mp_page_addr(mp, page); in nxt_mp_alloc_small()
587 nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp, in nxt_mp_alloc_small()
588 page->size << mp->chunk_size_shift, p); in nxt_mp_alloc_small()
595 nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size) in nxt_mp_get_small() argument
602 nxt_mp_thread_assert(mp); in nxt_mp_get_small()
611 available = mp->page_size - page->u.taken; in nxt_mp_get_small()
622 page = nxt_mp_alloc_page(mp); in nxt_mp_get_small()
635 p = nxt_mp_page_addr(mp, page); in nxt_mp_get_small()
645 nxt_mp_alloc_page(nxt_mp_t *mp) in nxt_mp_alloc_page() argument
651 if (nxt_queue_is_empty(&mp->free_pages)) { in nxt_mp_alloc_page()
652 cluster = nxt_mp_alloc_cluster(mp); in nxt_mp_alloc_page()
658 link = nxt_queue_first(&mp->free_pages); in nxt_mp_alloc_page()
668 nxt_mp_alloc_cluster(nxt_mp_t *mp) in nxt_mp_alloc_cluster() argument
673 n = mp->cluster_size >> mp->page_size_shift; in nxt_mp_alloc_cluster()
683 cluster->size = mp->cluster_size; in nxt_mp_alloc_cluster()
685 cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size); in nxt_mp_alloc_cluster()
693 nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link); in nxt_mp_alloc_cluster()
702 nxt_rbtree_insert(&mp->blocks, &cluster->node); in nxt_mp_alloc_cluster()
711 nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size, in nxt_mp_alloc_large() argument
719 nxt_mp_thread_assert(mp); in nxt_mp_alloc_large()
757 nxt_rbtree_insert(&mp->blocks, &block->node); in nxt_mp_alloc_large()
784 nxt_mp_free(nxt_mp_t *mp, void *p) in nxt_mp_free() argument
789 nxt_mp_thread_assert(mp); in nxt_mp_free()
791 nxt_debug_alloc("mp %p free(%p)", mp, p); in nxt_mp_free()
793 block = nxt_mp_find_block(&mp->blocks, p); in nxt_mp_free()
798 err = nxt_mp_chunk_free(mp, block, p); in nxt_mp_free()
807 nxt_rbtree_delete(&mp->blocks, &block->node); in nxt_mp_free()
861 nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p) in nxt_mp_chunk_free() argument
869 n = (p - cluster->start) >> mp->page_size_shift; in nxt_mp_chunk_free()
870 start = cluster->start + (n << mp->page_size_shift); in nxt_mp_chunk_free()
882 size = page->size << mp->chunk_size_shift; in nxt_mp_chunk_free()
884 if (size != mp->page_size) { in nxt_mp_chunk_free()
886 offset = (uintptr_t) (p - start) & (mp->page_size - 1); in nxt_mp_chunk_free()
907 n = nxt_mp_chunk_pages_index(mp, size); in nxt_mp_chunk_free()
908 chunk_pages = &mp->chunk_pages[n]; in nxt_mp_chunk_free()
932 nxt_queue_insert_head(&mp->free_pages, &page->link); in nxt_mp_chunk_free()
938 n = mp->cluster_size >> mp->page_size_shift; in nxt_mp_chunk_free()
952 n = mp->cluster_size >> mp->page_size_shift; in nxt_mp_chunk_free()
961 nxt_rbtree_delete(&mp->blocks, &cluster->node); in nxt_mp_chunk_free()
973 nxt_mp_nget(nxt_mp_t *mp, size_t size) in nxt_mp_nget() argument
979 if (size <= mp->page_size) { in nxt_mp_nget()
980 p = nxt_mp_get_small(mp, &mp->nget_pages, size); in nxt_mp_nget()
983 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0); in nxt_mp_nget()
988 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0); in nxt_mp_nget()
992 nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p); in nxt_mp_nget()
999 nxt_mp_get(nxt_mp_t *mp, size_t size) in nxt_mp_get() argument
1005 if (size <= mp->page_size) { in nxt_mp_get()
1007 p = nxt_mp_get_small(mp, &mp->get_pages, size); in nxt_mp_get()
1010 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0); in nxt_mp_get()
1015 p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0); in nxt_mp_get()
1019 nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p); in nxt_mp_get()
1026 nxt_mp_zget(nxt_mp_t *mp, size_t size) in nxt_mp_zget() argument
1030 p = nxt_mp_get(mp, size); in nxt_mp_zget()
1041 nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler, in nxt_mp_cleanup() argument
1046 work = nxt_mp_get(mp, sizeof(nxt_work_t)); in nxt_mp_cleanup()
1052 work->next = mp->cleanup; in nxt_mp_cleanup()
1058 mp->cleanup = work; in nxt_mp_cleanup()