1 2 /* 3 * Copyright (C) Igor Sysoev 4 * Copyright (C) NGINX, Inc. 5 */ 6 7 #include <nxt_main.h> 8 9 10 /* 11 * The level hash consists of hierarchical levels of arrays of pointers. 12 * The pointers may point to another level, a bucket, or NULL. 13 * The levels and buckets must be allocated in manner alike posix_memalign() 14 * to bookkeep additional information in pointer low bits. 15 * 16 * A level is an array of pointers. Its size is a power of 2. Levels 17 * may be different sizes, but on the same level the sizes are the same. 18 * Level sizes are specified by number of bits per level in lvlhsh->shift 19 * array. A hash may have up to 7 levels. There are two predefined 20 * shift arrays given by the first two shift array values: 21 * 22 * 1) [0, 0]: [4, 4, 4, 4, 4, 4, 4] on a 64-bit platform or 23 * [5, 5, 5, 5, 5, 5, 0] on a 32-bit platform, 24 * so default size of levels is 128 bytes. 25 * 26 * 2) [0, 10]: [10, 4, 4, 4, 4, 4, 0] on a 64-bit platform or 27 * [10, 5, 5, 5, 5, 0, 0] on a 32-bit platform, 28 * so default size of levels is 128 bytes on all levels except 29 * the first level. The first level is 8K or 4K on 64-bit or 32-bit 30 * platforms respectively. 31 * 32 * All buckets in a hash are the same size which is a power of 2. 33 * A bucket contains several entries stored and tested sequentially. 34 * The bucket size should be one or two CPU cache line size, a minimum 35 * allowed size is 32 bytes. A default 128-byte bucket contains 10 64-bit 36 * entries or 15 32-bit entries. Each entry consists of pointer to value 37 * data and 32-bit key. If an entry value pointer is NULL, the entry is free. 38 * On a 64-bit platform entry value pointers are no aligned, therefore they 39 * are accessed as two 32-bit integers. The rest trailing space in a bucket 40 * is used as pointer to next bucket and this pointer is always aligned. 41 * Although the level hash allows to store a lot of values in a bucket chain, 42 * this is non optimal way. The large data set should be stored using 43 * several levels. 44 */ 45 46 #define \ 47 nxt_lvlhsh_is_bucket(p) \ 48 ((uintptr_t) (p) & 1) 49 50 51 #define \ 52 nxt_lvlhsh_count_inc(n) \ 53 n = (void *) ((uintptr_t) (n) + 2) 54 55 56 #define \ 57 nxt_lvlhsh_count_dec(n) \ 58 n = (void *) ((uintptr_t) (n) - 2) 59 60 61 #define \ 62 nxt_lvlhsh_level_size(proto, nlvl) \ 63 ((uintptr_t) 1 << proto->shift[nlvl]) 64 65 66 #define \ 67 nxt_lvlhsh_level(lvl, mask) \ 68 (void **) ((uintptr_t) lvl & (~mask << 2)) 69 70 71 #define \ 72 nxt_lvlhsh_level_entries(lvl, mask) \ 73 ((uintptr_t) lvl & (mask << 1)) 74 75 76 #define \ 77 nxt_lvlhsh_store_bucket(slot, bkt) \ 78 slot = (void **) ((uintptr_t) bkt | 2 | 1) 79 80 81 #define \ 82 nxt_lvlhsh_bucket_size(proto) \ 83 proto->bucket_size 84 85 86 #define \ 87 nxt_lvlhsh_bucket(proto, bkt) \ 88 (uint32_t *) ((uintptr_t) bkt & ~(uintptr_t) proto->bucket_mask) 89 90 91 #define \ 92 nxt_lvlhsh_bucket_entries(proto, bkt) \ 93 (((uintptr_t) bkt & (uintptr_t) proto->bucket_mask) >> 1) 94 95 96 #define \ 97 nxt_lvlhsh_bucket_end(proto, bkt) \ 98 &bkt[proto->bucket_end] 99 100 101 #define \ 102 nxt_lvlhsh_free_entry(e) \ 103 (!(nxt_lvlhsh_valid_entry(e))) 104 105 106 #define \ 107 nxt_lvlhsh_next_bucket(proto, bkt) \ 108 ((void **) &bkt[proto->bucket_end]) 109 110 #if (NXT_64BIT) 111 112 #define \ 113 nxt_lvlhsh_valid_entry(e) \ 114 (((e)[0] | (e)[1]) != 0) 115 116 117 #define \ 118 nxt_lvlhsh_entry_value(e) \ 119 (void *) (((uintptr_t) (e)[1] << 32) + (e)[0]) 120 121 122 #define \ 123 nxt_lvlhsh_set_entry_value(e, n) \ 124 (e)[0] = (uint32_t) (uintptr_t) n; \ 125 (e)[1] = (uint32_t) ((uintptr_t) n >> 32) 126 127 128 #define \ 129 nxt_lvlhsh_entry_key(e) \ 130 (e)[2] 131 132 133 #define \ 134 nxt_lvlhsh_set_entry_key(e, n) \ 135 (e)[2] = n 136 137 #else 138 139 #define \ 140 nxt_lvlhsh_valid_entry(e) \ 141 ((e)[0] != 0) 142 143 144 #define \ 145 nxt_lvlhsh_entry_value(e) \ 146 (void *) (e)[0] 147 148 149 #define \ 150 nxt_lvlhsh_set_entry_value(e, n) \ 151 (e)[0] = (uint32_t) n 152 153 154 #define \ 155 nxt_lvlhsh_entry_key(e) \ 156 (e)[1] 157 158 159 #define \ 160 nxt_lvlhsh_set_entry_key(e, n) \ 161 (e)[1] = n 162 163 #endif 164 165 166 #define NXT_LVLHSH_BUCKET_DONE ((void *) -1) 167 168 169 static nxt_int_t nxt_lvlhsh_level_find(nxt_lvlhsh_query_t *lhq, void **lvl, 170 uint32_t key, nxt_uint_t nlvl); 171 static nxt_int_t nxt_lvlhsh_bucket_find(nxt_lvlhsh_query_t *lhq, void **bkt); 172 static nxt_int_t nxt_lvlhsh_new_bucket(nxt_lvlhsh_query_t *lhq, void **slot); 173 static nxt_int_t nxt_lvlhsh_level_insert(nxt_lvlhsh_query_t *lhq, 174 void **slot, uint32_t key, nxt_uint_t nlvl); 175 static nxt_int_t nxt_lvlhsh_bucket_insert(nxt_lvlhsh_query_t *lhq, 176 void **slot, uint32_t key, nxt_int_t nlvl); 177 static nxt_int_t nxt_lvlhsh_convert_bucket_to_level(nxt_lvlhsh_query_t *lhq, 178 void **slot, nxt_uint_t nlvl, uint32_t *bucket); 179 static nxt_int_t nxt_lvlhsh_level_convertion_insert(nxt_lvlhsh_query_t *lhq, 180 void **parent, uint32_t key, nxt_uint_t nlvl); 181 static nxt_int_t nxt_lvlhsh_bucket_convertion_insert(nxt_lvlhsh_query_t *lhq, 182 void **slot, uint32_t key, nxt_int_t nlvl); 183 static nxt_int_t nxt_lvlhsh_free_level(nxt_lvlhsh_query_t *lhq, void **level, 184 nxt_uint_t size); 185 static nxt_int_t nxt_lvlhsh_level_delete(nxt_lvlhsh_query_t *lhq, void **slot, 186 uint32_t key, nxt_uint_t nlvl); 187 static nxt_int_t nxt_lvlhsh_bucket_delete(nxt_lvlhsh_query_t *lhq, void **bkt); 188 static void *nxt_lvlhsh_level_each(nxt_lvlhsh_each_t *lhe, void **level, 189 nxt_uint_t nlvl, nxt_uint_t shift); 190 static void *nxt_lvlhsh_bucket_each(nxt_lvlhsh_each_t *lhe); 191 static void *nxt_lvlhsh_level_peek(const nxt_lvlhsh_proto_t *proto, 192 void **level, nxt_uint_t nlvl); 193 static void *nxt_lvlhsh_bucket_peek(const nxt_lvlhsh_proto_t *proto, 194 void **bkt); 195 196 197 nxt_int_t 198 nxt_lvlhsh_find(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq) 199 { 200 void *slot; 201 202 slot = lh->slot; 203 204 if (nxt_fast_path(slot != NULL)) { 205 206 if (nxt_lvlhsh_is_bucket(slot)) { 207 return nxt_lvlhsh_bucket_find(lhq, slot); 208 } 209 210 return nxt_lvlhsh_level_find(lhq, slot, lhq->key_hash, 0); 211 } 212 213 return NXT_DECLINED; 214 } 215 216 217 static nxt_int_t 218 nxt_lvlhsh_level_find(nxt_lvlhsh_query_t *lhq, void **lvl, uint32_t key, 219 nxt_uint_t nlvl) 220 { 221 void **slot; 222 uintptr_t mask; 223 nxt_uint_t shift; 224 225 shift = lhq->proto->shift[nlvl]; 226 mask = ((uintptr_t) 1 << shift) - 1; 227 228 lvl = nxt_lvlhsh_level(lvl, mask); 229 slot = lvl[key & mask]; 230 231 if (slot != NULL) { 232 233 if (nxt_lvlhsh_is_bucket(slot)) { 234 return nxt_lvlhsh_bucket_find(lhq, slot); 235 } 236 237 return nxt_lvlhsh_level_find(lhq, slot, key >> shift, nlvl + 1); 238 } 239 240 return NXT_DECLINED; 241 } 242 243 244 static nxt_int_t 245 nxt_lvlhsh_bucket_find(nxt_lvlhsh_query_t *lhq, void **bkt) 246 { 247 void *value; 248 uint32_t *bucket, *e; 249 nxt_uint_t n; 250 251 do { 252 bucket = nxt_lvlhsh_bucket(lhq->proto, bkt); 253 n = nxt_lvlhsh_bucket_entries(lhq->proto, bkt); 254 e = bucket; 255 256 do { 257 if (nxt_lvlhsh_valid_entry(e)) { 258 n--; 259 260 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) { 261 262 value = nxt_lvlhsh_entry_value(e); 263 264 if (lhq->proto->test(lhq, value) == NXT_OK) { 265 lhq->value = value; 266 267 return NXT_OK; 268 } 269 } 270 } 271 272 e += NXT_LVLHSH_ENTRY_SIZE; 273 274 } while (n != 0); 275 276 bkt = *nxt_lvlhsh_next_bucket(lhq->proto, bucket); 277 278 } while (bkt != NULL); 279 280 return NXT_DECLINED; 281 } 282 283 284 nxt_int_t 285 nxt_lvlhsh_insert(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq) 286 { 287 uint32_t key; 288 289 if (nxt_fast_path(lh->slot != NULL)) { 290 291 key = lhq->key_hash; 292 293 if (nxt_lvlhsh_is_bucket(lh->slot)) { 294 return nxt_lvlhsh_bucket_insert(lhq, &lh->slot, key, -1); 295 } 296 297 return nxt_lvlhsh_level_insert(lhq, &lh->slot, key, 0); 298 } 299 300 return nxt_lvlhsh_new_bucket(lhq, &lh->slot); 301 } 302 303 304 static nxt_int_t 305 nxt_lvlhsh_new_bucket(nxt_lvlhsh_query_t *lhq, void **slot) 306 { 307 uint32_t *bucket; 308 309 bucket = lhq->proto->alloc(lhq->pool, nxt_lvlhsh_bucket_size(lhq->proto)); 310 311 if (nxt_fast_path(bucket != NULL)) { 312 313 nxt_lvlhsh_set_entry_value(bucket, lhq->value); 314 nxt_lvlhsh_set_entry_key(bucket, lhq->key_hash); 315 316 *nxt_lvlhsh_next_bucket(lhq->proto, bucket) = NULL; 317 318 nxt_lvlhsh_store_bucket(*slot, bucket); 319 320 return NXT_OK; 321 } 322 323 return NXT_ERROR; 324 } 325 326 327 static nxt_int_t 328 nxt_lvlhsh_level_insert(nxt_lvlhsh_query_t *lhq, void **parent, uint32_t key, 329 nxt_uint_t nlvl) 330 { 331 void **slot, **lvl; 332 nxt_int_t ret; 333 uintptr_t mask; 334 nxt_uint_t shift; 335 336 shift = lhq->proto->shift[nlvl]; 337 mask = ((uintptr_t) 1 << shift) - 1; 338 339 lvl = nxt_lvlhsh_level(*parent, mask); 340 slot = &lvl[key & mask]; 341 342 if (*slot != NULL) { 343 key >>= shift; 344 345 if (nxt_lvlhsh_is_bucket(*slot)) { 346 return nxt_lvlhsh_bucket_insert(lhq, slot, key, nlvl); 347 } 348 349 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl + 1); 350 } 351 352 ret = nxt_lvlhsh_new_bucket(lhq, slot); 353 354 if (nxt_fast_path(ret == NXT_OK)) { 355 nxt_lvlhsh_count_inc(*parent); 356 } 357 358 return ret; 359 } 360 361 362 static nxt_int_t 363 nxt_lvlhsh_bucket_insert(nxt_lvlhsh_query_t *lhq, void **slot, uint32_t key, 364 nxt_int_t nlvl) 365 { 366 void **bkt, **vacant_bucket, *value; 367 uint32_t *bucket, *e, *vacant_entry; 368 nxt_int_t ret; 369 uintptr_t n; 370 const void *new_value; 371 const nxt_lvlhsh_proto_t *proto; 372 373 bkt = slot; 374 vacant_entry = NULL; 375 vacant_bucket = NULL; 376 proto = lhq->proto; 377 378 /* Search for duplicate entry in bucket chain. */ 379 380 do { 381 bucket = nxt_lvlhsh_bucket(proto, *bkt); 382 n = nxt_lvlhsh_bucket_entries(proto, *bkt); 383 e = bucket; 384 385 do { 386 if (nxt_lvlhsh_valid_entry(e)) { 387 388 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) { 389 390 value = nxt_lvlhsh_entry_value(e); 391 392 if (proto->test(lhq, value) == NXT_OK) { 393 394 new_value = lhq->value; 395 lhq->value = value; 396 397 if (lhq->replace) { 398 nxt_lvlhsh_set_entry_value(e, new_value); 399 400 return NXT_OK; 401 } 402 403 return NXT_DECLINED; 404 } 405 } 406 407 n--; 408 409 } else { 410 /* 411 * Save a hole vacant position in bucket 412 * and continue to search for duplicate entry. 413 */ 414 if (vacant_entry == NULL) { 415 vacant_entry = e; 416 vacant_bucket = bkt; 417 } 418 } 419 420 e += NXT_LVLHSH_ENTRY_SIZE; 421 422 } while (n != 0); 423 424 if (e < nxt_lvlhsh_bucket_end(proto, bucket)) { 425 /* 426 * Save a vacant position on incomplete bucket's end 427 * and continue to search for duplicate entry. 428 */ 429 if (vacant_entry == NULL) { 430 vacant_entry = e; 431 vacant_bucket = bkt; 432 } 433 } 434 435 bkt = nxt_lvlhsh_next_bucket(proto, bucket); 436 437 } while (*bkt != NULL); 438 439 if (vacant_entry != NULL) { 440 nxt_lvlhsh_set_entry_value(vacant_entry, lhq->value); 441 nxt_lvlhsh_set_entry_key(vacant_entry, lhq->key_hash); 442 nxt_lvlhsh_count_inc(*vacant_bucket); 443 444 return NXT_OK; 445 } 446 447 /* All buckets are full. */ 448 449 nlvl++; 450 451 if (nxt_fast_path(proto->shift[nlvl] != 0)) { 452 453 ret = nxt_lvlhsh_convert_bucket_to_level(lhq, slot, nlvl, bucket); 454 455 if (nxt_fast_path(ret == NXT_OK)) { 456 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl); 457 } 458 459 return ret; 460 } 461 462 /* The last allowed level, only buckets may be allocated here. */ 463 464 return nxt_lvlhsh_new_bucket(lhq, bkt); 465 } 466 467 468 static nxt_int_t 469 nxt_lvlhsh_convert_bucket_to_level(nxt_lvlhsh_query_t *lhq, void **slot, 470 nxt_uint_t nlvl, uint32_t *bucket) 471 { 472 void *lvl, **level; 473 uint32_t *e, *end, key; 474 nxt_int_t ret; 475 nxt_uint_t i, shift, size; 476 nxt_lvlhsh_query_t q; 477 const nxt_lvlhsh_proto_t *proto; 478 479 proto = lhq->proto; 480 size = nxt_lvlhsh_level_size(proto, nlvl); 481 482 lvl = proto->alloc(lhq->pool, size * (sizeof(void *))); 483 484 if (nxt_slow_path(lvl == NULL)) { 485 return NXT_ERROR; 486 } 487 488 nxt_memzero(lvl, size * (sizeof(void *))); 489 490 level = lvl; 491 shift = 0; 492 493 for (i = 0; i < nlvl; i++) { 494 /* 495 * Using SIMD operations in this trivial loop with maximum 496 * 8 iterations may increase code size by 170 bytes. 497 */ 498 nxt_pragma_loop_disable_vectorization; 499 500 shift += proto->shift[i]; 501 } 502 503 end = nxt_lvlhsh_bucket_end(proto, bucket); 504 505 for (e = bucket; e < end; e += NXT_LVLHSH_ENTRY_SIZE) { 506 507 q.proto = proto; 508 q.pool = lhq->pool; 509 q.value = nxt_lvlhsh_entry_value(e); 510 key = nxt_lvlhsh_entry_key(e); 511 q.key_hash = key; 512 513 ret = nxt_lvlhsh_level_convertion_insert(&q, &lvl, key >> shift, nlvl); 514 515 if (nxt_slow_path(ret != NXT_OK)) { 516 return nxt_lvlhsh_free_level(lhq, level, size); 517 } 518 } 519 520 *slot = lvl; 521 522 proto->free(lhq->pool, bucket); 523 524 return NXT_OK; 525 } 526 527 528 static nxt_int_t 529 nxt_lvlhsh_level_convertion_insert(nxt_lvlhsh_query_t *lhq, void **parent, 530 uint32_t key, nxt_uint_t nlvl) 531 { 532 void **slot, **lvl; 533 nxt_int_t ret; 534 uintptr_t mask; 535 nxt_uint_t shift; 536 537 shift = lhq->proto->shift[nlvl]; 538 mask = ((uintptr_t) 1 << shift) - 1; 539 540 lvl = nxt_lvlhsh_level(*parent, mask); 541 slot = &lvl[key & mask]; 542 543 if (*slot == NULL) { 544 ret = nxt_lvlhsh_new_bucket(lhq, slot); 545 546 if (nxt_fast_path(ret == NXT_OK)) { 547 nxt_lvlhsh_count_inc(*parent); 548 } 549 550 return ret; 551 } 552 553 /* Only backets can be here. */ 554 555 return nxt_lvlhsh_bucket_convertion_insert(lhq, slot, key >> shift, nlvl); 556 } 557 558 559 /* 560 * The special bucket insertion procedure is required because during 561 * convertion lhq->key contains garbage values and the test function 562 * cannot be called. Besides, the procedure can be simpler because 563 * a new entry is inserted just after occupied entries. 564 */ 565 566 static nxt_int_t 567 nxt_lvlhsh_bucket_convertion_insert(nxt_lvlhsh_query_t *lhq, void **slot, 568 uint32_t key, nxt_int_t nlvl) 569 { 570 void **bkt; 571 uint32_t *bucket, *e; 572 nxt_int_t ret; 573 uintptr_t n; 574 const nxt_lvlhsh_proto_t *proto; 575 576 bkt = slot; 577 proto = lhq->proto; 578 579 do { 580 bucket = nxt_lvlhsh_bucket(proto, *bkt); 581 n = nxt_lvlhsh_bucket_entries(proto, *bkt); 582 e = bucket + n * NXT_LVLHSH_ENTRY_SIZE; 583 584 if (nxt_fast_path(e < nxt_lvlhsh_bucket_end(proto, bucket))) { 585 586 nxt_lvlhsh_set_entry_value(e, lhq->value); 587 nxt_lvlhsh_set_entry_key(e, lhq->key_hash); 588 nxt_lvlhsh_count_inc(*bkt); 589 590 return NXT_OK; 591 } 592 593 bkt = nxt_lvlhsh_next_bucket(proto, bucket); 594 595 } while (*bkt != NULL); 596 597 /* All buckets are full. */ 598 599 nlvl++; 600 601 if (nxt_fast_path(proto->shift[nlvl] != 0)) { 602 603 ret = nxt_lvlhsh_convert_bucket_to_level(lhq, slot, nlvl, bucket); 604 605 if (nxt_fast_path(ret == NXT_OK)) { 606 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl); 607 } 608 609 return ret; 610 } 611 612 /* The last allowed level, only buckets may be allocated here. */ 613 614 return nxt_lvlhsh_new_bucket(lhq, bkt); 615 } 616 617 618 static nxt_int_t 619 nxt_lvlhsh_free_level(nxt_lvlhsh_query_t *lhq, void **level, nxt_uint_t size) 620 { 621 nxt_uint_t i; 622 const nxt_lvlhsh_proto_t *proto; 623 624 proto = lhq->proto; 625 626 for (i = 0; i < size; i++) { 627 628 if (level[i] != NULL) { 629 /* 630 * Chained buckets are not possible here, since even 631 * in the worst case one bucket cannot be converted 632 * in two chained buckets but remains the same bucket. 633 */ 634 proto->free(lhq->pool, nxt_lvlhsh_bucket(proto, level[i])); 635 } 636 } 637 638 proto->free(lhq->pool, level); 639 640 return NXT_ERROR; 641 } 642 643 644 nxt_int_t 645 nxt_lvlhsh_delete(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq) 646 { 647 if (nxt_fast_path(lh->slot != NULL)) { 648 649 if (nxt_lvlhsh_is_bucket(lh->slot)) { 650 return nxt_lvlhsh_bucket_delete(lhq, &lh->slot); 651 } 652 653 return nxt_lvlhsh_level_delete(lhq, &lh->slot, lhq->key_hash, 0); 654 } 655 656 return NXT_DECLINED; 657 } 658 659 660 static nxt_int_t 661 nxt_lvlhsh_level_delete(nxt_lvlhsh_query_t *lhq, void **parent, uint32_t key, 662 nxt_uint_t nlvl) 663 { 664 void **slot, **lvl; 665 uintptr_t mask; 666 nxt_int_t ret; 667 nxt_uint_t shift; 668 669 shift = lhq->proto->shift[nlvl]; 670 mask = ((uintptr_t) 1 << shift) - 1; 671 672 lvl = nxt_lvlhsh_level(*parent, mask); 673 slot = &lvl[key & mask]; 674 675 if (*slot != NULL) { 676 677 if (nxt_lvlhsh_is_bucket(*slot)) { 678 ret = nxt_lvlhsh_bucket_delete(lhq, slot); 679 680 } else { 681 key >>= shift; 682 ret = nxt_lvlhsh_level_delete(lhq, slot, key, nlvl + 1); 683 } 684 685 if (*slot == NULL) { 686 nxt_lvlhsh_count_dec(*parent); 687 688 if (nxt_lvlhsh_level_entries(*parent, mask) == 0) { 689 *parent = NULL; 690 lhq->proto->free(lhq->pool, lvl); 691 } 692 } 693 694 return ret; 695 } 696 697 return NXT_DECLINED; 698 } 699 700 701 static nxt_int_t 702 nxt_lvlhsh_bucket_delete(nxt_lvlhsh_query_t *lhq, void **bkt) 703 { 704 void *value; 705 uint32_t *bucket, *e; 706 uintptr_t n; 707 const nxt_lvlhsh_proto_t *proto; 708 709 proto = lhq->proto; 710 711 do { 712 bucket = nxt_lvlhsh_bucket(proto, *bkt); 713 n = nxt_lvlhsh_bucket_entries(proto, *bkt); 714 e = bucket; 715 716 do { 717 if (nxt_lvlhsh_valid_entry(e)) { 718 719 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) { 720 721 value = nxt_lvlhsh_entry_value(e); 722 723 if (proto->test(lhq, value) == NXT_OK) { 724 725 if (nxt_lvlhsh_bucket_entries(proto, *bkt) == 1) { 726 *bkt = *nxt_lvlhsh_next_bucket(proto, bucket); 727 proto->free(lhq->pool, bucket); 728 729 } else { 730 nxt_lvlhsh_count_dec(*bkt); 731 nxt_lvlhsh_set_entry_value(e, NULL); 732 } 733 734 lhq->value = value; 735 736 return NXT_OK; 737 } 738 } 739 740 n--; 741 } 742 743 e += NXT_LVLHSH_ENTRY_SIZE; 744 745 } while (n != 0); 746 747 bkt = nxt_lvlhsh_next_bucket(proto, bucket); 748 749 } while (*bkt != NULL); 750 751 return NXT_DECLINED; 752 } 753 754 755 void * 756 nxt_lvlhsh_each(nxt_lvlhsh_t *lh, nxt_lvlhsh_each_t *lhe) 757 { 758 void **slot; 759 760 if (lhe->bucket == NXT_LVLHSH_BUCKET_DONE) { 761 slot = lh->slot; 762 763 if (nxt_lvlhsh_is_bucket(slot)) { 764 return NULL; 765 } 766 767 } else { 768 if (nxt_slow_path(lhe->bucket == NULL)) { 769 770 /* The first iteration only. */ 771 772 slot = lh->slot; 773 774 if (slot == NULL) { 775 return NULL; 776 } 777 778 if (!nxt_lvlhsh_is_bucket(slot)) { 779 goto level; 780 } 781 782 lhe->bucket = nxt_lvlhsh_bucket(lhe->proto, slot); 783 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, slot); 784 } 785 786 return nxt_lvlhsh_bucket_each(lhe); 787 } 788 789 level: 790 791 return nxt_lvlhsh_level_each(lhe, slot, 0, 0); 792 } 793 794 795 static void * 796 nxt_lvlhsh_level_each(nxt_lvlhsh_each_t *lhe, void **level, nxt_uint_t nlvl, 797 nxt_uint_t shift) 798 { 799 void **slot, *value; 800 uintptr_t mask; 801 nxt_uint_t n, level_shift; 802 803 level_shift = lhe->proto->shift[nlvl]; 804 mask = ((uintptr_t) 1 << level_shift) - 1; 805 806 level = nxt_lvlhsh_level(level, mask); 807 808 do { 809 n = (lhe->current >> shift) & mask; 810 slot = level[n]; 811 812 if (slot != NULL) { 813 if (nxt_lvlhsh_is_bucket(slot)) { 814 815 if (lhe->bucket != NXT_LVLHSH_BUCKET_DONE) { 816 817 lhe->bucket = nxt_lvlhsh_bucket(lhe->proto, slot); 818 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, slot); 819 lhe->entry = 0; 820 821 return nxt_lvlhsh_bucket_each(lhe); 822 } 823 824 lhe->bucket = NULL; 825 826 } else { 827 value = nxt_lvlhsh_level_each(lhe, slot, nlvl + 1, 828 shift + level_shift); 829 if (value != NULL) { 830 return value; 831 } 832 } 833 } 834 835 lhe->current &= ~(mask << shift); 836 n = ((n + 1) & mask) << shift; 837 lhe->current |= n; 838 839 } while (n != 0); 840 841 return NULL; 842 } 843 844 845 static nxt_noinline void * 846 nxt_lvlhsh_bucket_each(nxt_lvlhsh_each_t *lhe) 847 { 848 void *value, **next; 849 uint32_t *bucket; 850 851 /* At least one valid entry must present here. */ 852 do { 853 bucket = &lhe->bucket[lhe->entry]; 854 lhe->entry += NXT_LVLHSH_ENTRY_SIZE; 855 856 } while (nxt_lvlhsh_free_entry(bucket)); 857 858 value = nxt_lvlhsh_entry_value(bucket); 859 860 lhe->entries--; 861 862 if (lhe->entries == 0) { 863 next = *nxt_lvlhsh_next_bucket(lhe->proto, lhe->bucket); 864 865 lhe->bucket = (next == NULL) ? NXT_LVLHSH_BUCKET_DONE 866 : nxt_lvlhsh_bucket(lhe->proto, next); 867 868 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, next); 869 lhe->entry = 0; 870 } 871 872 return value; 873 } 874 875 876 void * 877 nxt_lvlhsh_peek(nxt_lvlhsh_t *lh, const nxt_lvlhsh_proto_t *proto) 878 { 879 void **slot; 880 881 slot = lh->slot; 882 883 if (slot != NULL) { 884 885 if (nxt_lvlhsh_is_bucket(slot)) { 886 return nxt_lvlhsh_bucket_peek(proto, slot); 887 } 888 889 return nxt_lvlhsh_level_peek(proto, slot, 0); 890 } 891 892 return NULL; 893 } 894 895 896 static void * 897 nxt_lvlhsh_level_peek(const nxt_lvlhsh_proto_t *proto, void **level, 898 nxt_uint_t nlvl) 899 { 900 void **slot; 901 uintptr_t mask; 902 nxt_uint_t n, shift; 903 904 shift = proto->shift[nlvl]; 905 mask = ((uintptr_t) 1 << shift) - 1; 906 907 level = nxt_lvlhsh_level(level, mask); 908 909 n = 0; 910 911 /* At least one valid level slot must present here. */ 912 913 for ( ;; ) { 914 slot = level[n]; 915 916 if (slot != NULL) { 917 918 if (nxt_lvlhsh_is_bucket(slot)) { 919 return nxt_lvlhsh_bucket_peek(proto, slot); 920 } 921 922 return nxt_lvlhsh_level_peek(proto, slot, nlvl + 1); 923 } 924 925 n++; 926 } 927 } 928 929 930 static void * 931 nxt_lvlhsh_bucket_peek(const nxt_lvlhsh_proto_t *proto, void **bkt) 932 { 933 void *value; 934 uint32_t *entry; 935 936 /* At least one valid entry must present here. */ 937 938 for (entry = nxt_lvlhsh_bucket(proto, bkt); 939 nxt_lvlhsh_free_entry(entry); 940 entry += NXT_LVLHSH_ENTRY_SIZE) 941 { 942 /* void */ 943 } 944 945 value = nxt_lvlhsh_entry_value(entry); 946 return value; 947 } 948 949 950 void * 951 nxt_lvlhsh_alloc(void *data, size_t size) 952 { 953 return nxt_memalign(size, size); 954 } 955 956 957 void 958 nxt_lvlhsh_free(void *data, void *p) 959 { 960 nxt_free(p); 961 } 962