200static void *nxt_unit_lvlhsh_alloc(void *data, size_t size); 201static void nxt_unit_lvlhsh_free(void *data, void *p); 202static int nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length); 203 204 205struct nxt_unit_mmap_buf_s { 206 nxt_unit_buf_t buf; 207 208 nxt_unit_mmap_buf_t *next; 209 nxt_unit_mmap_buf_t **prev; 210 211 nxt_port_mmap_header_t *hdr; 212 nxt_unit_request_info_t *req; 213 nxt_unit_ctx_impl_t *ctx_impl; 214 char *free_ptr; 215 char *plain_ptr; 216}; 217 218 219struct nxt_unit_recv_msg_s { 220 uint32_t stream; 221 nxt_pid_t pid; 222 nxt_port_id_t reply_port; 223 224 uint8_t last; /* 1 bit */ 225 uint8_t mmap; /* 1 bit */ 226 227 void *start; 228 uint32_t size; 229 230 int fd[2]; 231 232 nxt_unit_mmap_buf_t *incoming_buf; 233}; 234 235 236typedef enum { 237 NXT_UNIT_RS_START = 0, 238 NXT_UNIT_RS_RESPONSE_INIT, 239 NXT_UNIT_RS_RESPONSE_HAS_CONTENT, 240 NXT_UNIT_RS_RESPONSE_SENT, 241 NXT_UNIT_RS_RELEASED, 242} nxt_unit_req_state_t; 243 244 245struct nxt_unit_request_info_impl_s { 246 nxt_unit_request_info_t req; 247 248 uint32_t stream; 249 250 nxt_unit_mmap_buf_t *outgoing_buf; 251 nxt_unit_mmap_buf_t *incoming_buf; 252 253 nxt_unit_req_state_t state; 254 uint8_t websocket; 255 uint8_t in_hash; 256 257 /* for nxt_unit_ctx_impl_t.free_req or active_req */ 258 nxt_queue_link_t link; 259 /* for nxt_unit_port_impl_t.awaiting_req */ 260 nxt_queue_link_t port_wait_link; 261 262 char extra_data[]; 263}; 264 265 266struct nxt_unit_websocket_frame_impl_s { 267 nxt_unit_websocket_frame_t ws; 268 269 nxt_unit_mmap_buf_t *buf; 270 271 nxt_queue_link_t link; 272 273 nxt_unit_ctx_impl_t *ctx_impl; 274}; 275 276 277struct nxt_unit_read_buf_s { 278 nxt_queue_link_t link; 279 nxt_unit_ctx_impl_t *ctx_impl; 280 ssize_t size; 281 nxt_recv_oob_t oob; 282 char buf[16384]; 283}; 284 285 286struct nxt_unit_ctx_impl_s { 287 nxt_unit_ctx_t ctx; 288 289 nxt_atomic_t use_count; 290 nxt_atomic_t wait_items; 291 292 pthread_mutex_t mutex; 293 294 nxt_unit_port_t *read_port; 295 296 nxt_queue_link_t link; 297 298 nxt_unit_mmap_buf_t *free_buf; 299 300 /* of nxt_unit_request_info_impl_t */ 301 nxt_queue_t free_req; 302 303 /* of nxt_unit_websocket_frame_impl_t */ 304 nxt_queue_t free_ws; 305 306 /* of nxt_unit_request_info_impl_t */ 307 nxt_queue_t active_req; 308 309 /* of nxt_unit_request_info_impl_t */ 310 nxt_lvlhsh_t requests; 311 312 /* of nxt_unit_request_info_impl_t */ 313 nxt_queue_t ready_req; 314 315 /* of nxt_unit_read_buf_t */ 316 nxt_queue_t pending_rbuf; 317 318 /* of nxt_unit_read_buf_t */ 319 nxt_queue_t free_rbuf; 320 321 uint8_t online; /* 1 bit */ 322 uint8_t ready; /* 1 bit */ 323 uint8_t quit_param; 324 325 nxt_unit_mmap_buf_t ctx_buf[2]; 326 nxt_unit_read_buf_t ctx_read_buf; 327 328 nxt_unit_request_info_impl_t req; 329}; 330 331 332struct nxt_unit_mmap_s { 333 nxt_port_mmap_header_t *hdr; 334 pthread_t src_thread; 335 336 /* of nxt_unit_read_buf_t */ 337 nxt_queue_t awaiting_rbuf; 338}; 339 340 341struct nxt_unit_mmaps_s { 342 pthread_mutex_t mutex; 343 uint32_t size; 344 uint32_t cap; 345 nxt_atomic_t allocated_chunks; 346 nxt_unit_mmap_t *elts; 347}; 348 349 350struct nxt_unit_impl_s { 351 nxt_unit_t unit; 352 nxt_unit_callbacks_t callbacks; 353 354 nxt_atomic_t use_count; 355 nxt_atomic_t request_count; 356 357 uint32_t request_data_size; 358 uint32_t shm_mmap_limit; 359 uint32_t request_limit; 360 361 pthread_mutex_t mutex; 362 363 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ 364 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ 365 366 nxt_unit_port_t *router_port; 367 nxt_unit_port_t *shared_port; 368 369 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ 370 371 nxt_unit_mmaps_t incoming; 372 nxt_unit_mmaps_t outgoing; 373 374 pid_t pid; 375 int log_fd; 376 377 nxt_unit_ctx_impl_t main_ctx; 378}; 379 380 381struct nxt_unit_port_impl_s { 382 nxt_unit_port_t port; 383 384 nxt_atomic_t use_count; 385 386 /* for nxt_unit_process_t.ports */ 387 nxt_queue_link_t link; 388 nxt_unit_process_t *process; 389 390 /* of nxt_unit_request_info_impl_t */ 391 nxt_queue_t awaiting_req; 392 393 int ready; 394 395 void *queue; 396 397 int from_socket; 398 nxt_unit_read_buf_t *socket_rbuf; 399}; 400 401 402struct nxt_unit_process_s { 403 pid_t pid; 404 405 nxt_queue_t ports; /* of nxt_unit_port_impl_t */ 406 407 nxt_unit_impl_t *lib; 408 409 nxt_atomic_t use_count; 410 411 uint32_t next_port_id; 412}; 413 414 415/* Explicitly using 32 bit types to avoid possible alignment. */ 416typedef struct { 417 int32_t pid; 418 uint32_t id; 419} nxt_unit_port_hash_id_t; 420 421 422static pid_t nxt_unit_pid; 423 424 425nxt_unit_ctx_t * 426nxt_unit_init(nxt_unit_init_t *init) 427{ 428 int rc, queue_fd, shared_queue_fd; 429 void *mem; 430 uint32_t ready_stream, shm_limit, request_limit; 431 nxt_unit_ctx_t *ctx; 432 nxt_unit_impl_t *lib; 433 nxt_unit_port_t ready_port, router_port, read_port, shared_port; 434 435 nxt_unit_pid = getpid(); 436 437 lib = nxt_unit_create(init); 438 if (nxt_slow_path(lib == NULL)) { 439 return NULL; 440 } 441 442 queue_fd = -1; 443 mem = MAP_FAILED; 444 shared_port.out_fd = -1; 445 shared_port.data = NULL; 446 447 if (init->ready_port.id.pid != 0 448 && init->ready_stream != 0 449 && init->read_port.id.pid != 0) 450 { 451 ready_port = init->ready_port; 452 ready_stream = init->ready_stream; 453 router_port = init->router_port; 454 read_port = init->read_port; 455 lib->log_fd = init->log_fd; 456 457 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid, 458 ready_port.id.id); 459 nxt_unit_port_id_init(&router_port.id, router_port.id.pid, 460 router_port.id.id); 461 nxt_unit_port_id_init(&read_port.id, read_port.id.pid, 462 read_port.id.id); 463 464 shared_port.in_fd = init->shared_port_fd; 465 shared_queue_fd = init->shared_queue_fd; 466 467 } else { 468 rc = nxt_unit_read_env(&ready_port, &router_port, &read_port, 469 &shared_port.in_fd, &shared_queue_fd, 470 &lib->log_fd, &ready_stream, &shm_limit, 471 &request_limit); 472 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 473 goto fail; 474 } 475 476 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1) 477 / PORT_MMAP_DATA_SIZE; 478 lib->request_limit = request_limit; 479 } 480 481 if (nxt_slow_path(lib->shm_mmap_limit < 1)) { 482 lib->shm_mmap_limit = 1; 483 } 484 485 lib->pid = read_port.id.pid; 486 nxt_unit_pid = lib->pid; 487 488 ctx = &lib->main_ctx.ctx; 489 490 rc = nxt_unit_fd_blocking(router_port.out_fd); 491 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 492 goto fail; 493 } 494 495 lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL); 496 if (nxt_slow_path(lib->router_port == NULL)) { 497 nxt_unit_alert(NULL, "failed to add router_port"); 498 499 goto fail; 500 } 501 502 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t)); 503 if (nxt_slow_path(queue_fd == -1)) { 504 goto fail; 505 } 506 507 mem = mmap(NULL, sizeof(nxt_port_queue_t), 508 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); 509 if (nxt_slow_path(mem == MAP_FAILED)) { 510 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, 511 strerror(errno), errno); 512 513 goto fail; 514 } 515 516 nxt_port_queue_init(mem); 517 518 rc = nxt_unit_fd_blocking(read_port.in_fd); 519 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 520 goto fail; 521 } 522 523 lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem); 524 if (nxt_slow_path(lib->main_ctx.read_port == NULL)) { 525 nxt_unit_alert(NULL, "failed to add read_port"); 526 527 goto fail; 528 } 529 530 rc = nxt_unit_fd_blocking(ready_port.out_fd); 531 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 532 goto fail; 533 } 534 535 nxt_unit_port_id_init(&shared_port.id, read_port.id.pid, 536 NXT_UNIT_SHARED_PORT_ID); 537 538 mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE, 539 MAP_SHARED, shared_queue_fd, 0); 540 if (nxt_slow_path(mem == MAP_FAILED)) { 541 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", shared_queue_fd, 542 strerror(errno), errno); 543 544 goto fail; 545 } 546 547 nxt_unit_close(shared_queue_fd); 548 549 lib->shared_port = nxt_unit_add_port(ctx, &shared_port, mem); 550 if (nxt_slow_path(lib->shared_port == NULL)) { 551 nxt_unit_alert(NULL, "failed to add shared_port"); 552 553 goto fail; 554 } 555 556 rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream, queue_fd); 557 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 558 nxt_unit_alert(NULL, "failed to send READY message"); 559 560 goto fail; 561 } 562 563 nxt_unit_close(ready_port.out_fd); 564 nxt_unit_close(queue_fd); 565 566 return ctx; 567 568fail: 569 570 if (mem != MAP_FAILED) { 571 munmap(mem, sizeof(nxt_port_queue_t)); 572 } 573 574 if (queue_fd != -1) { 575 nxt_unit_close(queue_fd); 576 } 577 578 nxt_unit_ctx_release(&lib->main_ctx.ctx); 579 580 return NULL; 581} 582 583 584static nxt_unit_impl_t * 585nxt_unit_create(nxt_unit_init_t *init) 586{ 587 int rc; 588 nxt_unit_impl_t *lib; 589 nxt_unit_callbacks_t *cb; 590 591 lib = nxt_unit_malloc(NULL, 592 sizeof(nxt_unit_impl_t) + init->request_data_size); 593 if (nxt_slow_path(lib == NULL)) { 594 nxt_unit_alert(NULL, "failed to allocate unit struct"); 595 596 return NULL; 597 } 598 599 rc = pthread_mutex_init(&lib->mutex, NULL); 600 if (nxt_slow_path(rc != 0)) { 601 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 602 603 goto fail; 604 } 605 606 lib->unit.data = init->data; 607 lib->callbacks = init->callbacks; 608 609 lib->request_data_size = init->request_data_size; 610 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1) 611 / PORT_MMAP_DATA_SIZE; 612 lib->request_limit = init->request_limit; 613 614 lib->processes.slot = NULL; 615 lib->ports.slot = NULL; 616 617 lib->log_fd = STDERR_FILENO; 618 619 nxt_queue_init(&lib->contexts); 620 621 lib->use_count = 0; 622 lib->request_count = 0; 623 lib->router_port = NULL; 624 lib->shared_port = NULL; 625 626 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); 627 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 628 pthread_mutex_destroy(&lib->mutex); 629 goto fail; 630 } 631 632 cb = &lib->callbacks; 633 634 if (cb->request_handler == NULL) { 635 nxt_unit_alert(NULL, "request_handler is NULL"); 636 637 pthread_mutex_destroy(&lib->mutex); 638 goto fail; 639 } 640 641 nxt_unit_mmaps_init(&lib->incoming); 642 nxt_unit_mmaps_init(&lib->outgoing); 643 644 return lib; 645 646fail: 647 648 nxt_unit_free(NULL, lib); 649 650 return NULL; 651} 652 653 654static int 655nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, 656 void *data) 657{ 658 int rc; 659 660 ctx_impl->ctx.data = data; 661 ctx_impl->ctx.unit = &lib->unit; 662 663 rc = pthread_mutex_init(&ctx_impl->mutex, NULL); 664 if (nxt_slow_path(rc != 0)) { 665 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 666 667 return NXT_UNIT_ERROR; 668 } 669 670 nxt_unit_lib_use(lib); 671 672 pthread_mutex_lock(&lib->mutex); 673 674 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); 675 676 pthread_mutex_unlock(&lib->mutex); 677 678 ctx_impl->use_count = 1; 679 ctx_impl->wait_items = 0; 680 ctx_impl->online = 1; 681 ctx_impl->ready = 0; 682 ctx_impl->quit_param = NXT_QUIT_GRACEFUL; 683 684 nxt_queue_init(&ctx_impl->free_req); 685 nxt_queue_init(&ctx_impl->free_ws); 686 nxt_queue_init(&ctx_impl->active_req); 687 nxt_queue_init(&ctx_impl->ready_req); 688 nxt_queue_init(&ctx_impl->pending_rbuf); 689 nxt_queue_init(&ctx_impl->free_rbuf); 690 691 ctx_impl->free_buf = NULL; 692 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); 693 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]); 694 695 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link); 696 nxt_queue_insert_tail(&ctx_impl->free_rbuf, &ctx_impl->ctx_read_buf.link); 697 698 ctx_impl->ctx_read_buf.ctx_impl = ctx_impl; 699 700 ctx_impl->req.req.ctx = &ctx_impl->ctx; 701 ctx_impl->req.req.unit = &lib->unit; 702 703 ctx_impl->read_port = NULL; 704 ctx_impl->requests.slot = 0; 705 706 return NXT_UNIT_OK; 707} 708 709 710nxt_inline void 711nxt_unit_ctx_use(nxt_unit_ctx_t *ctx) 712{ 713 nxt_unit_ctx_impl_t *ctx_impl; 714 715 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 716 717 nxt_atomic_fetch_add(&ctx_impl->use_count, 1); 718} 719 720 721nxt_inline void 722nxt_unit_ctx_release(nxt_unit_ctx_t *ctx) 723{ 724 long c; 725 nxt_unit_ctx_impl_t *ctx_impl; 726 727 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 728 729 c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1); 730 731 if (c == 1) { 732 nxt_unit_ctx_free(ctx_impl); 733 } 734} 735 736 737nxt_inline void 738nxt_unit_lib_use(nxt_unit_impl_t *lib) 739{ 740 nxt_atomic_fetch_add(&lib->use_count, 1); 741} 742 743 744nxt_inline void 745nxt_unit_lib_release(nxt_unit_impl_t *lib) 746{ 747 long c; 748 nxt_unit_process_t *process; 749 750 c = nxt_atomic_fetch_add(&lib->use_count, -1); 751 752 if (c == 1) { 753 for ( ;; ) { 754 pthread_mutex_lock(&lib->mutex); 755 756 process = nxt_unit_process_pop_first(lib); 757 if (process == NULL) { 758 pthread_mutex_unlock(&lib->mutex); 759 760 break; 761 } 762 763 nxt_unit_remove_process(lib, process); 764 } 765 766 pthread_mutex_destroy(&lib->mutex); 767 768 if (nxt_fast_path(lib->router_port != NULL)) { 769 nxt_unit_port_release(lib->router_port); 770 } 771 772 if (nxt_fast_path(lib->shared_port != NULL)) { 773 nxt_unit_port_release(lib->shared_port); 774 } 775 776 nxt_unit_mmaps_destroy(&lib->incoming); 777 nxt_unit_mmaps_destroy(&lib->outgoing); 778 779 nxt_unit_free(NULL, lib); 780 } 781} 782 783 784nxt_inline void 785nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 786 nxt_unit_mmap_buf_t *mmap_buf) 787{ 788 mmap_buf->next = *head; 789 790 if (mmap_buf->next != NULL) { 791 mmap_buf->next->prev = &mmap_buf->next; 792 } 793 794 *head = mmap_buf; 795 mmap_buf->prev = head; 796} 797 798 799nxt_inline void 800nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 801 nxt_unit_mmap_buf_t *mmap_buf) 802{ 803 while (*prev != NULL) { 804 prev = &(*prev)->next; 805 } 806 807 nxt_unit_mmap_buf_insert(prev, mmap_buf); 808} 809 810 811nxt_inline void 812nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf) 813{ 814 nxt_unit_mmap_buf_t **prev; 815 816 prev = mmap_buf->prev; 817 818 if (mmap_buf->next != NULL) { 819 mmap_buf->next->prev = prev; 820 } 821 822 if (prev != NULL) { 823 *prev = mmap_buf->next; 824 } 825} 826 827 828static int 829nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, 830 nxt_unit_port_t *read_port, int *shared_port_fd, int *shared_queue_fd, 831 int *log_fd, uint32_t *stream, 832 uint32_t *shm_limit, uint32_t *request_limit) 833{ 834 int rc; 835 int ready_fd, router_fd, read_in_fd, read_out_fd; 836 char *unit_init, *version_end, *vars; 837 size_t version_length; 838 int64_t ready_pid, router_pid, read_pid; 839 uint32_t ready_stream, router_id, ready_id, read_id; 840 841 unit_init = getenv(NXT_UNIT_INIT_ENV); 842 if (nxt_slow_path(unit_init == NULL)) { 843 nxt_unit_alert(NULL, "%s is not in the current environment", 844 NXT_UNIT_INIT_ENV); 845 846 return NXT_UNIT_ERROR; 847 } 848 849 version_end = strchr(unit_init, ';'); 850 if (nxt_slow_path(version_end == NULL)) { 851 nxt_unit_alert(NULL, "Unit version not found in %s=\"%s\"", 852 NXT_UNIT_INIT_ENV, unit_init); 853 854 return NXT_UNIT_ERROR; 855 } 856 857 version_length = version_end - unit_init; 858 859 rc = version_length != nxt_length(NXT_VERSION) 860 || memcmp(unit_init, NXT_VERSION, nxt_length(NXT_VERSION)); 861 862 if (nxt_slow_path(rc != 0)) { 863 nxt_unit_alert(NULL, "versions mismatch: the Unit daemon has version " 864 "%.*s, while the app was compiled with libunit %s", 865 (int) version_length, unit_init, NXT_VERSION); 866 867 return NXT_UNIT_ERROR; 868 } 869 870 vars = version_end + 1; 871 872 rc = sscanf(vars, 873 "%"PRIu32";" 874 "%"PRId64",%"PRIu32",%d;" 875 "%"PRId64",%"PRIu32",%d;" 876 "%"PRId64",%"PRIu32",%d,%d;" 877 "%d,%d;" 878 "%d,%"PRIu32",%"PRIu32, 879 &ready_stream, 880 &ready_pid, &ready_id, &ready_fd, 881 &router_pid, &router_id, &router_fd, 882 &read_pid, &read_id, &read_in_fd, &read_out_fd, 883 shared_port_fd, shared_queue_fd, 884 log_fd, shm_limit, request_limit); 885 886 if (nxt_slow_path(rc == EOF)) { 887 nxt_unit_alert(NULL, "sscanf(%s) failed: %s (%d) for %s env", 888 vars, strerror(errno), errno, NXT_UNIT_INIT_ENV); 889 890 return NXT_UNIT_ERROR; 891 } 892 893 if (nxt_slow_path(rc != 16)) { 894 nxt_unit_alert(NULL, "invalid number of variables in %s env: " 895 "found %d of %d in %s", NXT_UNIT_INIT_ENV, rc, 16, vars); 896 897 return NXT_UNIT_ERROR; 898 } 899 900 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init); 901 902 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id); 903 904 ready_port->in_fd = -1; 905 ready_port->out_fd = ready_fd; 906 ready_port->data = NULL; 907 908 nxt_unit_port_id_init(&router_port->id, (pid_t) router_pid, router_id); 909 910 router_port->in_fd = -1; 911 router_port->out_fd = router_fd; 912 router_port->data = NULL; 913 914 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id); 915 916 read_port->in_fd = read_in_fd; 917 read_port->out_fd = read_out_fd; 918 read_port->data = NULL; 919 920 *stream = ready_stream; 921 922 return NXT_UNIT_OK; 923} 924 925 926static int 927nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, int queue_fd) 928{ 929 ssize_t res; 930 nxt_send_oob_t oob; 931 nxt_port_msg_t msg; 932 nxt_unit_impl_t *lib; 933 int fds[2] = {queue_fd, -1}; 934 935 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 936 937 msg.stream = stream; 938 msg.pid = lib->pid; 939 msg.reply_port = 0; 940 msg.type = _NXT_PORT_MSG_PROCESS_READY; 941 msg.last = 1; 942 msg.mmap = 0; 943 msg.nf = 0; 944 msg.mf = 0; 945 946 nxt_socket_msg_oob_init(&oob, fds); 947 948 res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg), &oob); 949 if (res != sizeof(msg)) { 950 return NXT_UNIT_ERROR; 951 } 952 953 return NXT_UNIT_OK; 954} 955 956 957static int 958nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf, 959 nxt_unit_request_info_t **preq) 960{ 961 int rc; 962 pid_t pid; 963 uint8_t quit_param; 964 nxt_port_msg_t *port_msg; 965 nxt_unit_impl_t *lib; 966 nxt_unit_recv_msg_t recv_msg; 967 968 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 969 970 recv_msg.incoming_buf = NULL; 971 recv_msg.fd[0] = -1; 972 recv_msg.fd[1] = -1; 973 974 rc = nxt_socket_msg_oob_get_fds(&rbuf->oob, recv_msg.fd); 975 if (nxt_slow_path(rc != NXT_OK)) { 976 nxt_unit_alert(ctx, "failed to receive file descriptor over cmsg"); 977 rc = NXT_UNIT_ERROR; 978 goto done; 979 } 980 981 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { 982 if (nxt_slow_path(rbuf->size == 0)) { 983 nxt_unit_debug(ctx, "read port closed"); 984 985 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 986 rc = NXT_UNIT_OK; 987 goto done; 988 } 989 990 nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size); 991 992 rc = NXT_UNIT_ERROR; 993 goto done; 994 } 995 996 port_msg = (nxt_port_msg_t *) rbuf->buf; 997 998 nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d", 999 port_msg->stream, (int) port_msg->type, 1000 recv_msg.fd[0], recv_msg.fd[1]); 1001 1002 recv_msg.stream = port_msg->stream; 1003 recv_msg.pid = port_msg->pid; 1004 recv_msg.reply_port = port_msg->reply_port; 1005 recv_msg.last = port_msg->last; 1006 recv_msg.mmap = port_msg->mmap; 1007 1008 recv_msg.start = port_msg + 1; 1009 recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t); 1010 1011 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { 1012 nxt_unit_alert(ctx, "#%"PRIu32": unknown message type (%d)", 1013 port_msg->stream, (int) port_msg->type); 1014 rc = NXT_UNIT_ERROR; 1015 goto done; 1016 } 1017 1018 /* Fragmentation is unsupported. */ 1019 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { 1020 nxt_unit_alert(ctx, "#%"PRIu32": fragmented message type (%d)", 1021 port_msg->stream, (int) port_msg->type); 1022 rc = NXT_UNIT_ERROR; 1023 goto done; 1024 } 1025 1026 if (port_msg->mmap) { 1027 rc = nxt_unit_mmap_read(ctx, &recv_msg, rbuf); 1028 1029 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1030 if (rc == NXT_UNIT_AGAIN) { 1031 recv_msg.fd[0] = -1; 1032 recv_msg.fd[1] = -1; 1033 } 1034 1035 goto done; 1036 } 1037 } 1038 1039 switch (port_msg->type) { 1040 1041 case _NXT_PORT_MSG_RPC_READY: 1042 rc = NXT_UNIT_OK; 1043 break; 1044 1045 case _NXT_PORT_MSG_QUIT: 1046 if (recv_msg.size == sizeof(quit_param)) { 1047 memcpy(&quit_param, recv_msg.start, sizeof(quit_param)); 1048 1049 } else { 1050 quit_param = NXT_QUIT_NORMAL; 1051 } 1052 1053 nxt_unit_debug(ctx, "#%"PRIu32": %squit", port_msg->stream, 1054 (quit_param == NXT_QUIT_GRACEFUL ? "graceful " : "")); 1055 1056 nxt_unit_quit(ctx, quit_param); 1057 1058 rc = NXT_UNIT_OK; 1059 break; 1060 1061 case _NXT_PORT_MSG_NEW_PORT: 1062 rc = nxt_unit_process_new_port(ctx, &recv_msg); 1063 break; 1064 1065 case _NXT_PORT_MSG_PORT_ACK: 1066 rc = nxt_unit_ctx_ready(ctx); 1067 break; 1068 1069 case _NXT_PORT_MSG_CHANGE_FILE: 1070 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d", 1071 port_msg->stream, recv_msg.fd[0]); 1072 1073 if (dup2(recv_msg.fd[0], lib->log_fd) == -1) { 1074 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)", 1075 port_msg->stream, recv_msg.fd[0], lib->log_fd, 1076 strerror(errno), errno); 1077 1078 rc = NXT_UNIT_ERROR; 1079 goto done; 1080 } 1081 1082 rc = NXT_UNIT_OK; 1083 break; 1084 1085 case _NXT_PORT_MSG_MMAP: 1086 if (nxt_slow_path(recv_msg.fd[0] < 0)) { 1087 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", 1088 port_msg->stream, recv_msg.fd[0]); 1089 1090 rc = NXT_UNIT_ERROR; 1091 goto done; 1092 } 1093 1094 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]); 1095 break; 1096 1097 case _NXT_PORT_MSG_REQ_HEADERS: 1098 rc = nxt_unit_process_req_headers(ctx, &recv_msg, preq); 1099 break; 1100 1101 case _NXT_PORT_MSG_REQ_BODY: 1102 rc = nxt_unit_process_req_body(ctx, &recv_msg); 1103 break; 1104 1105 case _NXT_PORT_MSG_WEBSOCKET: 1106 rc = nxt_unit_process_websocket(ctx, &recv_msg); 1107 break; 1108 1109 case _NXT_PORT_MSG_REMOVE_PID: 1110 if (nxt_slow_path(recv_msg.size != sizeof(pid))) { 1111 nxt_unit_alert(ctx, "#%"PRIu32": remove_pid: invalid message size " 1112 "(%d != %d)", port_msg->stream, (int) recv_msg.size, 1113 (int) sizeof(pid)); 1114 1115 rc = NXT_UNIT_ERROR; 1116 goto done; 1117 } 1118 1119 memcpy(&pid, recv_msg.start, sizeof(pid)); 1120 1121 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d", 1122 port_msg->stream, (int) pid); 1123 1124 nxt_unit_remove_pid(lib, pid); 1125 1126 rc = NXT_UNIT_OK; 1127 break; 1128 1129 case _NXT_PORT_MSG_SHM_ACK: 1130 rc = nxt_unit_process_shm_ack(ctx); 1131 break; 1132 1133 default: 1134 nxt_unit_alert(ctx, "#%"PRIu32": ignore message type: %d", 1135 port_msg->stream, (int) port_msg->type); 1136 1137 rc = NXT_UNIT_ERROR; 1138 goto done; 1139 } 1140 1141done: 1142 1143 if (recv_msg.fd[0] != -1) { 1144 nxt_unit_close(recv_msg.fd[0]); 1145 } 1146 1147 if (recv_msg.fd[1] != -1) { 1148 nxt_unit_close(recv_msg.fd[1]); 1149 } 1150 1151 while (recv_msg.incoming_buf != NULL) { 1152 nxt_unit_mmap_buf_free(recv_msg.incoming_buf); 1153 } 1154 1155 if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { 1156#if (NXT_DEBUG) 1157 memset(rbuf->buf, 0xAC, rbuf->size); 1158#endif 1159 nxt_unit_read_buf_release(ctx, rbuf); 1160 } 1161 1162 return rc; 1163} 1164 1165 1166static int 1167nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1168{ 1169 void *mem; 1170 nxt_unit_port_t new_port, *port; 1171 nxt_port_msg_new_port_t *new_port_msg; 1172 1173 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) { 1174 nxt_unit_warn(ctx, "#%"PRIu32": new_port: " 1175 "invalid message size (%d)", 1176 recv_msg->stream, (int) recv_msg->size); 1177 1178 return NXT_UNIT_ERROR; 1179 } 1180 1181 if (nxt_slow_path(recv_msg->fd[0] < 0)) { 1182 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port", 1183 recv_msg->stream, recv_msg->fd[0]); 1184 1185 return NXT_UNIT_ERROR; 1186 } 1187 1188 new_port_msg = recv_msg->start; 1189 1190 nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd[0] %d fd[1] %d", 1191 recv_msg->stream, (int) new_port_msg->pid, 1192 (int) new_port_msg->id, recv_msg->fd[0], recv_msg->fd[1]); 1193 1194 if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd[0]) != NXT_UNIT_OK)) { 1195 return NXT_UNIT_ERROR; 1196 } 1197 1198 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, new_port_msg->id); 1199 1200 new_port.in_fd = -1; 1201 new_port.out_fd = recv_msg->fd[0]; 1202 1203 mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE, 1204 MAP_SHARED, recv_msg->fd[1], 0); 1205 1206 if (nxt_slow_path(mem == MAP_FAILED)) { 1207 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd[1], 1208 strerror(errno), errno); 1209 1210 return NXT_UNIT_ERROR; 1211 } 1212 1213 new_port.data = NULL; 1214 1215 recv_msg->fd[0] = -1; 1216 1217 port = nxt_unit_add_port(ctx, &new_port, mem); 1218 if (nxt_slow_path(port == NULL)) { 1219 return NXT_UNIT_ERROR; 1220 } 1221 1222 nxt_unit_port_release(port); 1223 1224 return NXT_UNIT_OK; 1225} 1226 1227 1228static int 1229nxt_unit_ctx_ready(nxt_unit_ctx_t *ctx) 1230{ 1231 nxt_unit_impl_t *lib; 1232 nxt_unit_ctx_impl_t *ctx_impl; 1233 1234 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1235 1236 if (nxt_slow_path(ctx_impl->ready)) { 1237 return NXT_UNIT_OK; 1238 } 1239 1240 ctx_impl->ready = 1; 1241 1242 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1243 1244 /* Call ready_handler() only for main context. */ 1245 if (&lib->main_ctx == ctx_impl && lib->callbacks.ready_handler != NULL) { 1246 return lib->callbacks.ready_handler(ctx); 1247 } 1248 1249 if (&lib->main_ctx != ctx_impl) { 1250 /* Check if the main context is already stopped or quit. */ 1251 if (nxt_slow_path(!lib->main_ctx.ready)) { 1252 ctx_impl->ready = 0; 1253 1254 nxt_unit_quit(ctx, lib->main_ctx.quit_param); 1255 1256 return NXT_UNIT_OK; 1257 } 1258 1259 if (lib->callbacks.add_port != NULL) { 1260 lib->callbacks.add_port(ctx, lib->shared_port); 1261 } 1262 } 1263 1264 return NXT_UNIT_OK; 1265} 1266 1267 1268static int 1269nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, 1270 nxt_unit_request_info_t **preq) 1271{ 1272 int res; 1273 nxt_unit_impl_t *lib; 1274 nxt_unit_port_id_t port_id; 1275 nxt_unit_request_t *r; 1276 nxt_unit_mmap_buf_t *b; 1277 nxt_unit_request_info_t *req; 1278 nxt_unit_request_info_impl_t *req_impl; 1279 1280 if (nxt_slow_path(recv_msg->mmap == 0)) { 1281 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory", 1282 recv_msg->stream); 1283 1284 return NXT_UNIT_ERROR; 1285 } 1286 1287 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) { 1288 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least " 1289 "%d expected", recv_msg->stream, (int) recv_msg->size, 1290 (int) sizeof(nxt_unit_request_t)); 1291 1292 return NXT_UNIT_ERROR; 1293 } 1294 1295 req_impl = nxt_unit_request_info_get(ctx); 1296 if (nxt_slow_path(req_impl == NULL)) { 1297 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed", 1298 recv_msg->stream); 1299 1300 return NXT_UNIT_ERROR; 1301 } 1302 1303 req = &req_impl->req; 1304 1305 req->request = recv_msg->start; 1306 1307 b = recv_msg->incoming_buf; 1308 1309 req->request_buf = &b->buf; 1310 req->response = NULL; 1311 req->response_buf = NULL; 1312 1313 r = req->request; 1314 1315 req->content_length = r->content_length; 1316 1317 req->content_buf = req->request_buf; 1318 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content); 1319 1320 req_impl->stream = recv_msg->stream; 1321 1322 req_impl->outgoing_buf = NULL; 1323 1324 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1325 b->req = req; 1326 } 1327 1328 /* "Move" incoming buffer list to req_impl. */ 1329 req_impl->incoming_buf = recv_msg->incoming_buf; 1330 req_impl->incoming_buf->prev = &req_impl->incoming_buf; 1331 recv_msg->incoming_buf = NULL; 1332 1333 req->content_fd = recv_msg->fd[0]; 1334 recv_msg->fd[0] = -1; 1335 1336 req->response_max_fields = 0; 1337 req_impl->state = NXT_UNIT_RS_START; 1338 req_impl->websocket = 0; 1339 req_impl->in_hash = 0; 1340 1341 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream, 1342 (int) r->method_length, 1343 (char *) nxt_unit_sptr_get(&r->method), 1344 (int) r->target_length, 1345 (char *) nxt_unit_sptr_get(&r->target), 1346 (int) r->content_length); 1347 1348 nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); 1349 1350 res = nxt_unit_request_check_response_port(req, &port_id); 1351 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1352 return NXT_UNIT_ERROR; 1353 } 1354 1355 if (nxt_fast_path(res == NXT_UNIT_OK)) { 1356 res = nxt_unit_send_req_headers_ack(req); 1357 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1358 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1359 1360 return NXT_UNIT_ERROR; 1361 } 1362 1363 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1364 1365 if (req->content_length 1366 > (uint64_t) (req->content_buf->end - req->content_buf->free)) 1367 { 1368 res = nxt_unit_request_hash_add(ctx, req); 1369 if (nxt_slow_path(res != NXT_UNIT_OK)) { 1370 nxt_unit_req_warn(req, "failed to add request to hash"); 1371 1372 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1373 1374 return NXT_UNIT_ERROR; 1375 } 1376 1377 /* 1378 * If application have separate data handler, we may start 1379 * request processing and process data when it is arrived. 1380 */ 1381 if (lib->callbacks.data_handler == NULL) { 1382 return NXT_UNIT_OK; 1383 } 1384 } 1385 1386 if (preq == NULL) { 1387 lib->callbacks.request_handler(req); 1388 1389 } else { 1390 *preq = req; 1391 } 1392 } 1393 1394 return NXT_UNIT_OK; 1395} 1396 1397 1398static int 1399nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1400{ 1401 uint64_t l; 1402 nxt_unit_impl_t *lib; 1403 nxt_unit_mmap_buf_t *b; 1404 nxt_unit_request_info_t *req; 1405 1406 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); 1407 if (req == NULL) { 1408 return NXT_UNIT_OK; 1409 } 1410 1411 l = req->content_buf->end - req->content_buf->free; 1412 1413 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1414 b->req = req; 1415 l += b->buf.end - b->buf.free; 1416 } 1417 1418 if (recv_msg->incoming_buf != NULL) { 1419 b = nxt_container_of(req->content_buf, nxt_unit_mmap_buf_t, buf); 1420 1421 while (b->next != NULL) { 1422 b = b->next; 1423 } 1424 1425 /* "Move" incoming buffer list to req_impl. */ 1426 b->next = recv_msg->incoming_buf; 1427 b->next->prev = &b->next; 1428 1429 recv_msg->incoming_buf = NULL; 1430 } 1431 1432 req->content_fd = recv_msg->fd[0]; 1433 recv_msg->fd[0] = -1; 1434 1435 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1436 1437 if (lib->callbacks.data_handler != NULL) { 1438 lib->callbacks.data_handler(req); 1439 1440 return NXT_UNIT_OK; 1441 } 1442 1443 if (req->content_fd != -1 || l == req->content_length) { 1444 lib->callbacks.request_handler(req); 1445 } 1446 1447 return NXT_UNIT_OK; 1448} 1449 1450 1451static int 1452nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, 1453 nxt_unit_port_id_t *port_id) 1454{ 1455 int res; 1456 nxt_unit_ctx_t *ctx; 1457 nxt_unit_impl_t *lib; 1458 nxt_unit_port_t *port; 1459 nxt_unit_process_t *process; 1460 nxt_unit_ctx_impl_t *ctx_impl; 1461 nxt_unit_port_impl_t *port_impl; 1462 nxt_unit_request_info_impl_t *req_impl; 1463 1464 ctx = req->ctx; 1465 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1466 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1467 1468 pthread_mutex_lock(&lib->mutex); 1469 1470 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 1471 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 1472 1473 if (nxt_fast_path(port != NULL)) { 1474 req->response_port = port; 1475 1476 if (nxt_fast_path(port_impl->ready)) { 1477 pthread_mutex_unlock(&lib->mutex); 1478 1479 nxt_unit_debug(ctx, "check_response_port: found port{%d,%d}", 1480 (int) port->id.pid, (int) port->id.id); 1481 1482 return NXT_UNIT_OK; 1483 } 1484 1485 nxt_unit_debug(ctx, "check_response_port: " 1486 "port{%d,%d} already requested", 1487 (int) port->id.pid, (int) port->id.id); 1488 1489 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1490 1491 nxt_queue_insert_tail(&port_impl->awaiting_req, 1492 &req_impl->port_wait_link); 1493 1494 pthread_mutex_unlock(&lib->mutex); 1495 1496 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 1497 1498 return NXT_UNIT_AGAIN; 1499 } 1500 1501 port_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t)); 1502 if (nxt_slow_path(port_impl == NULL)) { 1503 nxt_unit_alert(ctx, "check_response_port: malloc(%d) failed", 1504 (int) sizeof(nxt_unit_port_impl_t)); 1505 1506 pthread_mutex_unlock(&lib->mutex); 1507 1508 return NXT_UNIT_ERROR; 1509 } 1510 1511 port = &port_impl->port; 1512 1513 port->id = *port_id; 1514 port->in_fd = -1; 1515 port->out_fd = -1; 1516 port->data = NULL; 1517 1518 res = nxt_unit_port_hash_add(&lib->ports, port); 1519 if (nxt_slow_path(res != NXT_UNIT_OK)) { 1520 nxt_unit_alert(ctx, "check_response_port: %d,%d hash_add failed", 1521 port->id.pid, port->id.id); 1522 1523 pthread_mutex_unlock(&lib->mutex); 1524 1525 nxt_unit_free(ctx, port); 1526 1527 return NXT_UNIT_ERROR; 1528 } 1529 1530 process = nxt_unit_process_find(lib, port_id->pid, 0); 1531 if (nxt_slow_path(process == NULL)) { 1532 nxt_unit_alert(ctx, "check_response_port: process %d not found", 1533 port->id.pid); 1534 1535 nxt_unit_port_hash_find(&lib->ports, port_id, 1); 1536 1537 pthread_mutex_unlock(&lib->mutex); 1538 1539 nxt_unit_free(ctx, port); 1540 1541 return NXT_UNIT_ERROR; 1542 } 1543 1544 nxt_queue_insert_tail(&process->ports, &port_impl->link); 1545 1546 port_impl->process = process; 1547 port_impl->queue = NULL; 1548 port_impl->from_socket = 0; 1549 port_impl->socket_rbuf = NULL; 1550 1551 nxt_queue_init(&port_impl->awaiting_req); 1552 1553 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1554 1555 nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link); 1556 1557 port_impl->use_count = 2; 1558 port_impl->ready = 0; 1559 1560 req->response_port = port; 1561 1562 pthread_mutex_unlock(&lib->mutex); 1563 1564 res = nxt_unit_get_port(ctx, port_id); 1565 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1566 return NXT_UNIT_ERROR; 1567 } 1568 1569 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 1570 1571 return NXT_UNIT_AGAIN; 1572} 1573 1574 1575static int 1576nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req) 1577{ 1578 ssize_t res; 1579 nxt_port_msg_t msg; 1580 nxt_unit_impl_t *lib; 1581 nxt_unit_ctx_impl_t *ctx_impl; 1582 nxt_unit_request_info_impl_t *req_impl; 1583 1584 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 1585 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1586 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1587 1588 memset(&msg, 0, sizeof(nxt_port_msg_t)); 1589 1590 msg.stream = req_impl->stream; 1591 msg.pid = lib->pid; 1592 msg.reply_port = ctx_impl->read_port->id.id; 1593 msg.type = _NXT_PORT_MSG_REQ_HEADERS_ACK; 1594 1595 res = nxt_unit_port_send(req->ctx, req->response_port, 1596 &msg, sizeof(msg), NULL); 1597 if (nxt_slow_path(res != sizeof(msg))) { 1598 return NXT_UNIT_ERROR; 1599 } 1600 1601 return NXT_UNIT_OK; 1602} 1603 1604 1605static int 1606nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1607{ 1608 size_t hsize; 1609 nxt_unit_impl_t *lib; 1610 nxt_unit_mmap_buf_t *b; 1611 nxt_unit_callbacks_t *cb; 1612 nxt_unit_request_info_t *req; 1613 nxt_unit_request_info_impl_t *req_impl; 1614 nxt_unit_websocket_frame_impl_t *ws_impl; 1615 1616 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); 1617 if (nxt_slow_path(req == NULL)) { 1618 return NXT_UNIT_OK; 1619 } 1620 1621 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1622 1623 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1624 cb = &lib->callbacks; 1625 1626 if (cb->websocket_handler && recv_msg->size >= 2) { 1627 ws_impl = nxt_unit_websocket_frame_get(ctx); 1628 if (nxt_slow_path(ws_impl == NULL)) { 1629 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed", 1630 req_impl->stream); 1631 1632 return NXT_UNIT_ERROR; 1633 } 1634 1635 ws_impl->ws.req = req; 1636 1637 ws_impl->buf = NULL; 1638 1639 if (recv_msg->mmap) { 1640 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1641 b->req = req; 1642 } 1643 1644 /* "Move" incoming buffer list to ws_impl. */ 1645 ws_impl->buf = recv_msg->incoming_buf; 1646 ws_impl->buf->prev = &ws_impl->buf; 1647 recv_msg->incoming_buf = NULL; 1648 1649 b = ws_impl->buf; 1650 1651 } else { 1652 b = nxt_unit_mmap_buf_get(ctx); 1653 if (nxt_slow_path(b == NULL)) { 1654 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf", 1655 req_impl->stream); 1656 1657 nxt_unit_websocket_frame_release(&ws_impl->ws); 1658 1659 return NXT_UNIT_ERROR; 1660 } 1661 1662 b->req = req; 1663 b->buf.start = recv_msg->start; 1664 b->buf.free = b->buf.start; 1665 b->buf.end = b->buf.start + recv_msg->size; 1666 1667 nxt_unit_mmap_buf_insert(&ws_impl->buf, b); 1668 } 1669 1670 ws_impl->ws.header = (void *) b->buf.start; 1671 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len( 1672 ws_impl->ws.header); 1673 1674 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header); 1675 1676 if (ws_impl->ws.header->mask) { 1677 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4; 1678 1679 } else { 1680 ws_impl->ws.mask = NULL; 1681 } 1682 1683 b->buf.free += hsize; 1684 1685 ws_impl->ws.content_buf = &b->buf; 1686 ws_impl->ws.content_length = ws_impl->ws.payload_len; 1687 1688 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, " 1689 "payload_len=%"PRIu64, 1690 ws_impl->ws.header->opcode, 1691 ws_impl->ws.payload_len); 1692 1693 cb->websocket_handler(&ws_impl->ws); 1694 } 1695 1696 if (recv_msg->last) { 1697 if (cb->close_handler) { 1698 nxt_unit_req_debug(req, "close_handler"); 1699 1700 cb->close_handler(req); 1701 1702 } else { 1703 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1704 } 1705 } 1706 1707 return NXT_UNIT_OK; 1708} 1709 1710 1711static int 1712nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx) 1713{ 1714 nxt_unit_impl_t *lib; 1715 nxt_unit_callbacks_t *cb; 1716 1717 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1718 cb = &lib->callbacks; 1719 1720 if (cb->shm_ack_handler != NULL) { 1721 cb->shm_ack_handler(ctx); 1722 } 1723 1724 return NXT_UNIT_OK; 1725} 1726 1727 1728static nxt_unit_request_info_impl_t * 1729nxt_unit_request_info_get(nxt_unit_ctx_t *ctx) 1730{ 1731 nxt_unit_impl_t *lib; 1732 nxt_queue_link_t *lnk; 1733 nxt_unit_ctx_impl_t *ctx_impl; 1734 nxt_unit_request_info_impl_t *req_impl; 1735 1736 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1737 1738 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1739 1740 pthread_mutex_lock(&ctx_impl->mutex); 1741 1742 if (nxt_queue_is_empty(&ctx_impl->free_req)) { 1743 pthread_mutex_unlock(&ctx_impl->mutex); 1744 1745 req_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_request_info_impl_t) 1746 + lib->request_data_size); 1747 if (nxt_slow_path(req_impl == NULL)) { 1748 return NULL; 1749 } 1750 1751 req_impl->req.unit = ctx->unit; 1752 req_impl->req.ctx = ctx; 1753 1754 pthread_mutex_lock(&ctx_impl->mutex); 1755 1756 } else { 1757 lnk = nxt_queue_first(&ctx_impl->free_req); 1758 nxt_queue_remove(lnk); 1759 1760 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link); 1761 } 1762 1763 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link); 1764 1765 pthread_mutex_unlock(&ctx_impl->mutex); 1766 1767 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL; 1768 1769 return req_impl; 1770} 1771 1772 1773static void 1774nxt_unit_request_info_release(nxt_unit_request_info_t *req) 1775{ 1776 nxt_unit_ctx_t *ctx; 1777 nxt_unit_ctx_impl_t *ctx_impl; 1778 nxt_unit_request_info_impl_t *req_impl; 1779 1780 ctx = req->ctx; 1781 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1782 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1783 1784 req->response = NULL; 1785 req->response_buf = NULL; 1786 1787 if (req_impl->in_hash) { 1788 nxt_unit_request_hash_find(req->ctx, req_impl->stream, 1); 1789 } 1790 1791 while (req_impl->outgoing_buf != NULL) { 1792 nxt_unit_mmap_buf_free(req_impl->outgoing_buf); 1793 } 1794 1795 while (req_impl->incoming_buf != NULL) { 1796 nxt_unit_mmap_buf_free(req_impl->incoming_buf); 1797 } 1798 1799 if (req->content_fd != -1) { 1800 nxt_unit_close(req->content_fd); 1801 1802 req->content_fd = -1; 1803 } 1804 1805 if (req->response_port != NULL) { 1806 nxt_unit_port_release(req->response_port); 1807 1808 req->response_port = NULL; 1809 } 1810 1811 req_impl->state = NXT_UNIT_RS_RELEASED; 1812 1813 pthread_mutex_lock(&ctx_impl->mutex); 1814 1815 nxt_queue_remove(&req_impl->link); 1816 1817 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link); 1818 1819 pthread_mutex_unlock(&ctx_impl->mutex); 1820 1821 if (nxt_slow_path(!nxt_unit_chk_ready(ctx))) { 1822 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 1823 } 1824} 1825 1826 1827static void 1828nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl) 1829{ 1830 nxt_unit_ctx_impl_t *ctx_impl; 1831 1832 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx); 1833 1834 nxt_queue_remove(&req_impl->link); 1835 1836 if (req_impl != &ctx_impl->req) { 1837 nxt_unit_free(&ctx_impl->ctx, req_impl); 1838 } 1839} 1840 1841 1842static nxt_unit_websocket_frame_impl_t * 1843nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx) 1844{ 1845 nxt_queue_link_t *lnk; 1846 nxt_unit_ctx_impl_t *ctx_impl; 1847 nxt_unit_websocket_frame_impl_t *ws_impl; 1848 1849 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1850 1851 pthread_mutex_lock(&ctx_impl->mutex); 1852 1853 if (nxt_queue_is_empty(&ctx_impl->free_ws)) { 1854 pthread_mutex_unlock(&ctx_impl->mutex); 1855 1856 ws_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_websocket_frame_impl_t)); 1857 if (nxt_slow_path(ws_impl == NULL)) { 1858 return NULL; 1859 } 1860 1861 } else { 1862 lnk = nxt_queue_first(&ctx_impl->free_ws); 1863 nxt_queue_remove(lnk); 1864 1865 pthread_mutex_unlock(&ctx_impl->mutex); 1866 1867 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link); 1868 } 1869 1870 ws_impl->ctx_impl = ctx_impl; 1871 1872 return ws_impl; 1873} 1874 1875 1876static void 1877nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws) 1878{ 1879 nxt_unit_websocket_frame_impl_t *ws_impl; 1880 1881 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 1882 1883 while (ws_impl->buf != NULL) { 1884 nxt_unit_mmap_buf_free(ws_impl->buf); 1885 } 1886 1887 ws->req = NULL; 1888 1889 pthread_mutex_lock(&ws_impl->ctx_impl->mutex); 1890 1891 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link); 1892 1893 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex); 1894} 1895 1896 1897static void 1898nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx, 1899 nxt_unit_websocket_frame_impl_t *ws_impl) 1900{ 1901 nxt_queue_remove(&ws_impl->link); 1902 1903 nxt_unit_free(ctx, ws_impl); 1904} 1905 1906 1907uint16_t 1908nxt_unit_field_hash(const char *name, size_t name_length) 1909{ 1910 u_char ch; 1911 uint32_t hash; 1912 const char *p, *end; 1913 1914 hash = 159406; /* Magic value copied from nxt_http_parse.c */ 1915 end = name + name_length; 1916 1917 for (p = name; p < end; p++) { 1918 ch = *p; 1919 hash = (hash << 4) + hash + nxt_lowcase(ch); 1920 } 1921 1922 hash = (hash >> 16) ^ hash; 1923 1924 return hash; 1925} 1926 1927 1928void 1929nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req) 1930{ 1931 char *name; 1932 uint32_t i, j; 1933 nxt_unit_field_t *fields, f; 1934 nxt_unit_request_t *r; 1935 1936 static nxt_str_t content_length = nxt_string("content-length"); 1937 static nxt_str_t content_type = nxt_string("content-type"); 1938 static nxt_str_t cookie = nxt_string("cookie"); 1939 1940 nxt_unit_req_debug(req, "group_dup_fields"); 1941 1942 r = req->request; 1943 fields = r->fields; 1944 1945 for (i = 0; i < r->fields_count; i++) { 1946 name = nxt_unit_sptr_get(&fields[i].name); 1947 1948 switch (fields[i].hash) { 1949 case NXT_UNIT_HASH_CONTENT_LENGTH: 1950 if (fields[i].name_length == content_length.length 1951 && nxt_unit_memcasecmp(name, content_length.start, 1952 content_length.length) == 0) 1953 { 1954 r->content_length_field = i; 1955 } 1956 1957 break; 1958 1959 case NXT_UNIT_HASH_CONTENT_TYPE: 1960 if (fields[i].name_length == content_type.length 1961 && nxt_unit_memcasecmp(name, content_type.start, 1962 content_type.length) == 0) 1963 { 1964 r->content_type_field = i; 1965 } 1966 1967 break; 1968 1969 case NXT_UNIT_HASH_COOKIE: 1970 if (fields[i].name_length == cookie.length 1971 && nxt_unit_memcasecmp(name, cookie.start, 1972 cookie.length) == 0) 1973 { 1974 r->cookie_field = i; 1975 } 1976 1977 break; 1978 } 1979 1980 for (j = i + 1; j < r->fields_count; j++) { 1981 if (fields[i].hash != fields[j].hash 1982 || fields[i].name_length != fields[j].name_length 1983 || nxt_unit_memcasecmp(name, 1984 nxt_unit_sptr_get(&fields[j].name), 1985 fields[j].name_length) != 0) 1986 { 1987 continue; 1988 } 1989 1990 f = fields[j]; 1991 f.value.offset += (j - (i + 1)) * sizeof(f); 1992 1993 while (j > i + 1) { 1994 fields[j] = fields[j - 1]; 1995 fields[j].name.offset -= sizeof(f); 1996 fields[j].value.offset -= sizeof(f); 1997 j--; 1998 } 1999 2000 fields[j] = f; 2001 2002 /* Assign the same name pointer for further grouping simplicity. */ 2003 nxt_unit_sptr_set(&fields[j].name, name); 2004 2005 i++; 2006 } 2007 } 2008} 2009 2010 2011int 2012nxt_unit_response_init(nxt_unit_request_info_t *req, 2013 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size) 2014{ 2015 uint32_t buf_size; 2016 nxt_unit_buf_t *buf; 2017 nxt_unit_request_info_impl_t *req_impl; 2018 2019 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2020 2021 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2022 nxt_unit_req_warn(req, "init: response already sent"); 2023 2024 return NXT_UNIT_ERROR; 2025 } 2026 2027 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status, 2028 (int) max_fields_count, (int) max_fields_size); 2029 2030 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) { 2031 nxt_unit_req_debug(req, "duplicate response init"); 2032 } 2033 2034 /* 2035 * Each field name and value 0-terminated by libunit, 2036 * this is the reason of '+ 2' below. 2037 */ 2038 buf_size = sizeof(nxt_unit_response_t) 2039 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 2040 + max_fields_size; 2041 2042 if (nxt_slow_path(req->response_buf != NULL)) { 2043 buf = req->response_buf; 2044 2045 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) { 2046 goto init_response; 2047 } 2048 2049 nxt_unit_buf_free(buf); 2050 2051 req->response_buf = NULL; 2052 req->response = NULL; 2053 req->response_max_fields = 0; 2054 2055 req_impl->state = NXT_UNIT_RS_START; 2056 } 2057 2058 buf = nxt_unit_response_buf_alloc(req, buf_size); 2059 if (nxt_slow_path(buf == NULL)) { 2060 return NXT_UNIT_ERROR; 2061 } 2062 2063init_response: 2064 2065 memset(buf->start, 0, sizeof(nxt_unit_response_t)); 2066 2067 req->response_buf = buf; 2068 2069 req->response = (nxt_unit_response_t *) buf->start; 2070 req->response->status = status; 2071 2072 buf->free = buf->start + sizeof(nxt_unit_response_t) 2073 + max_fields_count * sizeof(nxt_unit_field_t); 2074 2075 req->response_max_fields = max_fields_count; 2076 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT; 2077 2078 return NXT_UNIT_OK; 2079} 2080 2081 2082int 2083nxt_unit_response_realloc(nxt_unit_request_info_t *req, 2084 uint32_t max_fields_count, uint32_t max_fields_size) 2085{ 2086 char *p; 2087 uint32_t i, buf_size; 2088 nxt_unit_buf_t *buf; 2089 nxt_unit_field_t *f, *src; 2090 nxt_unit_response_t *resp; 2091 nxt_unit_request_info_impl_t *req_impl; 2092 2093 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2094 2095 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2096 nxt_unit_req_warn(req, "realloc: response not init"); 2097 2098 return NXT_UNIT_ERROR; 2099 } 2100 2101 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2102 nxt_unit_req_warn(req, "realloc: response already sent"); 2103 2104 return NXT_UNIT_ERROR; 2105 } 2106 2107 if (nxt_slow_path(max_fields_count < req->response->fields_count)) { 2108 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small"); 2109 2110 return NXT_UNIT_ERROR; 2111 } 2112 2113 /* 2114 * Each field name and value 0-terminated by libunit, 2115 * this is the reason of '+ 2' below. 2116 */ 2117 buf_size = sizeof(nxt_unit_response_t) 2118 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 2119 + max_fields_size; 2120 2121 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size); 2122 2123 buf = nxt_unit_response_buf_alloc(req, buf_size); 2124 if (nxt_slow_path(buf == NULL)) { 2125 nxt_unit_req_warn(req, "realloc: new buf allocation failed"); 2126 return NXT_UNIT_ERROR; 2127 } 2128 2129 resp = (nxt_unit_response_t *) buf->start; 2130 2131 memset(resp, 0, sizeof(nxt_unit_response_t)); 2132 2133 resp->status = req->response->status; 2134 resp->content_length = req->response->content_length; 2135 2136 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t); 2137 f = resp->fields; 2138 2139 for (i = 0; i < req->response->fields_count; i++) { 2140 src = req->response->fields + i; 2141 2142 if (nxt_slow_path(src->skip != 0)) { 2143 continue; 2144 } 2145 2146 if (nxt_slow_path(src->name_length + src->value_length + 2 2147 > (uint32_t) (buf->end - p))) 2148 { 2149 nxt_unit_req_warn(req, "realloc: not enough space for field" 2150 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required", 2151 i, src, src->name_length, src->value_length); 2152 2153 goto fail; 2154 } 2155 2156 nxt_unit_sptr_set(&f->name, p); 2157 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length); 2158 *p++ = '\0'; 2159 2160 nxt_unit_sptr_set(&f->value, p); 2161 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length); 2162 *p++ = '\0'; 2163 2164 f->hash = src->hash; 2165 f->skip = 0; 2166 f->name_length = src->name_length; 2167 f->value_length = src->value_length; 2168 2169 resp->fields_count++; 2170 f++; 2171 } 2172 2173 if (req->response->piggyback_content_length > 0) { 2174 if (nxt_slow_path(req->response->piggyback_content_length 2175 > (uint32_t) (buf->end - p))) 2176 { 2177 nxt_unit_req_warn(req, "realloc: not enought space for content" 2178 " #%"PRIu32", %"PRIu32" required", 2179 i, req->response->piggyback_content_length); 2180 2181 goto fail; 2182 } 2183 2184 resp->piggyback_content_length = 2185 req->response->piggyback_content_length; 2186 2187 nxt_unit_sptr_set(&resp->piggyback_content, p); 2188 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content), 2189 req->response->piggyback_content_length); 2190 } 2191 2192 buf->free = p; 2193 2194 nxt_unit_buf_free(req->response_buf); 2195 2196 req->response = resp; 2197 req->response_buf = buf; 2198 req->response_max_fields = max_fields_count; 2199 2200 return NXT_UNIT_OK; 2201 2202fail: 2203 2204 nxt_unit_buf_free(buf); 2205 2206 return NXT_UNIT_ERROR; 2207} 2208 2209 2210int 2211nxt_unit_response_is_init(nxt_unit_request_info_t *req) 2212{ 2213 nxt_unit_request_info_impl_t *req_impl; 2214 2215 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2216 2217 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT; 2218} 2219 2220 2221int 2222nxt_unit_response_add_field(nxt_unit_request_info_t *req, 2223 const char *name, uint8_t name_length, 2224 const char *value, uint32_t value_length) 2225{ 2226 nxt_unit_buf_t *buf; 2227 nxt_unit_field_t *f; 2228 nxt_unit_response_t *resp; 2229 nxt_unit_request_info_impl_t *req_impl; 2230 2231 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2232 2233 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) { 2234 nxt_unit_req_warn(req, "add_field: response not initialized or " 2235 "already sent"); 2236 2237 return NXT_UNIT_ERROR; 2238 } 2239 2240 resp = req->response; 2241 2242 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) { 2243 nxt_unit_req_warn(req, "add_field: too many response fields (%d)", 2244 (int) resp->fields_count); 2245 2246 return NXT_UNIT_ERROR; 2247 } 2248 2249 buf = req->response_buf; 2250 2251 if (nxt_slow_path(name_length + value_length + 2 2252 > (uint32_t) (buf->end - buf->free))) 2253 { 2254 nxt_unit_req_warn(req, "add_field: response buffer overflow"); 2255 2256 return NXT_UNIT_ERROR; 2257 } 2258 2259 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s", 2260 resp->fields_count, 2261 (int) name_length, name, 2262 (int) value_length, value); 2263 2264 f = resp->fields + resp->fields_count; 2265 2266 nxt_unit_sptr_set(&f->name, buf->free); 2267 buf->free = nxt_cpymem(buf->free, name, name_length); 2268 *buf->free++ = '\0'; 2269 2270 nxt_unit_sptr_set(&f->value, buf->free); 2271 buf->free = nxt_cpymem(buf->free, value, value_length); 2272 *buf->free++ = '\0'; 2273 2274 f->hash = nxt_unit_field_hash(name, name_length); 2275 f->skip = 0; 2276 f->name_length = name_length; 2277 f->value_length = value_length; 2278 2279 resp->fields_count++; 2280 2281 return NXT_UNIT_OK; 2282} 2283 2284 2285int 2286nxt_unit_response_add_content(nxt_unit_request_info_t *req, 2287 const void* src, uint32_t size) 2288{ 2289 nxt_unit_buf_t *buf; 2290 nxt_unit_response_t *resp; 2291 nxt_unit_request_info_impl_t *req_impl; 2292 2293 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2294 2295 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2296 nxt_unit_req_warn(req, "add_content: response not initialized yet"); 2297 2298 return NXT_UNIT_ERROR; 2299 } 2300 2301 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2302 nxt_unit_req_warn(req, "add_content: response already sent"); 2303 2304 return NXT_UNIT_ERROR; 2305 } 2306 2307 buf = req->response_buf; 2308 2309 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) { 2310 nxt_unit_req_warn(req, "add_content: buffer overflow"); 2311 2312 return NXT_UNIT_ERROR; 2313 } 2314 2315 resp = req->response; 2316 2317 if (resp->piggyback_content_length == 0) { 2318 nxt_unit_sptr_set(&resp->piggyback_content, buf->free); 2319 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT; 2320 } 2321 2322 resp->piggyback_content_length += size; 2323 2324 buf->free = nxt_cpymem(buf->free, src, size); 2325 2326 return NXT_UNIT_OK; 2327} 2328 2329 2330int 2331nxt_unit_response_send(nxt_unit_request_info_t *req) 2332{ 2333 int rc; 2334 nxt_unit_mmap_buf_t *mmap_buf; 2335 nxt_unit_request_info_impl_t *req_impl; 2336 2337 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2338 2339 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2340 nxt_unit_req_warn(req, "send: response is not initialized yet"); 2341 2342 return NXT_UNIT_ERROR; 2343 } 2344 2345 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2346 nxt_unit_req_warn(req, "send: response already sent"); 2347 2348 return NXT_UNIT_ERROR; 2349 } 2350 2351 if (req->request->websocket_handshake && req->response->status == 101) { 2352 nxt_unit_response_upgrade(req); 2353 } 2354 2355 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes", 2356 req->response->fields_count, 2357 (int) (req->response_buf->free 2358 - req->response_buf->start)); 2359 2360 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf); 2361 2362 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); 2363 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 2364 req->response = NULL; 2365 req->response_buf = NULL; 2366 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 2367 2368 nxt_unit_mmap_buf_free(mmap_buf); 2369 } 2370 2371 return rc; 2372} 2373 2374 2375int 2376nxt_unit_response_is_sent(nxt_unit_request_info_t *req) 2377{ 2378 nxt_unit_request_info_impl_t *req_impl; 2379 2380 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2381 2382 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT; 2383} 2384 2385 2386nxt_unit_buf_t * 2387nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) 2388{ 2389 int rc; 2390 nxt_unit_mmap_buf_t *mmap_buf; 2391 nxt_unit_request_info_impl_t *req_impl; 2392 2393 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) { 2394 nxt_unit_req_warn(req, "response_buf_alloc: " 2395 "requested buffer (%"PRIu32") too big", size); 2396 2397 return NULL; 2398 } 2399 2400 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size); 2401 2402 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2403 2404 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 2405 if (nxt_slow_path(mmap_buf == NULL)) { 2406 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf"); 2407 2408 return NULL; 2409 } 2410 2411 mmap_buf->req = req; 2412 2413 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf); 2414 2415 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 2416 size, size, mmap_buf, 2417 NULL); 2418 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2419 nxt_unit_mmap_buf_release(mmap_buf); 2420 2421 nxt_unit_req_alert(req, "response_buf_alloc: failed to get out buf"); 2422 2423 return NULL; 2424 } 2425 2426 return &mmap_buf->buf; 2427} 2428 2429 2430static nxt_unit_mmap_buf_t * 2431nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx) 2432{ 2433 nxt_unit_mmap_buf_t *mmap_buf; 2434 nxt_unit_ctx_impl_t *ctx_impl; 2435 2436 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2437 2438 pthread_mutex_lock(&ctx_impl->mutex); 2439 2440 if (ctx_impl->free_buf == NULL) { 2441 pthread_mutex_unlock(&ctx_impl->mutex); 2442 2443 mmap_buf = nxt_unit_malloc(ctx, sizeof(nxt_unit_mmap_buf_t)); 2444 if (nxt_slow_path(mmap_buf == NULL)) { 2445 return NULL; 2446 } 2447 2448 } else { 2449 mmap_buf = ctx_impl->free_buf; 2450 2451 nxt_unit_mmap_buf_unlink(mmap_buf); 2452 2453 pthread_mutex_unlock(&ctx_impl->mutex); 2454 } 2455 2456 mmap_buf->ctx_impl = ctx_impl; 2457 2458 mmap_buf->hdr = NULL; 2459 mmap_buf->free_ptr = NULL; 2460 2461 return mmap_buf; 2462} 2463 2464 2465static void 2466nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf) 2467{ 2468 nxt_unit_mmap_buf_unlink(mmap_buf); 2469 2470 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex); 2471 2472 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf); 2473 2474 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex); 2475} 2476 2477 2478int 2479nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req) 2480{ 2481 return req->request->websocket_handshake; 2482} 2483 2484 2485int 2486nxt_unit_response_upgrade(nxt_unit_request_info_t *req) 2487{ 2488 int rc; 2489 nxt_unit_request_info_impl_t *req_impl; 2490 2491 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2492 2493 if (nxt_slow_path(req_impl->websocket != 0)) { 2494 nxt_unit_req_debug(req, "upgrade: already upgraded"); 2495 2496 return NXT_UNIT_OK; 2497 } 2498 2499 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2500 nxt_unit_req_warn(req, "upgrade: response is not initialized yet"); 2501 2502 return NXT_UNIT_ERROR; 2503 } 2504 2505 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2506 nxt_unit_req_warn(req, "upgrade: response already sent"); 2507 2508 return NXT_UNIT_ERROR; 2509 } 2510 2511 rc = nxt_unit_request_hash_add(req->ctx, req); 2512 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2513 nxt_unit_req_warn(req, "upgrade: failed to add request to hash"); 2514 2515 return NXT_UNIT_ERROR; 2516 } 2517 2518 req_impl->websocket = 1; 2519 2520 req->response->status = 101; 2521 2522 return NXT_UNIT_OK; 2523} 2524 2525 2526int 2527nxt_unit_response_is_websocket(nxt_unit_request_info_t *req) 2528{ 2529 nxt_unit_request_info_impl_t *req_impl; 2530 2531 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2532 2533 return req_impl->websocket; 2534} 2535 2536 2537nxt_unit_request_info_t * 2538nxt_unit_get_request_info_from_data(void *data) 2539{ 2540 nxt_unit_request_info_impl_t *req_impl; 2541 2542 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data); 2543 2544 return &req_impl->req; 2545} 2546 2547 2548int 2549nxt_unit_buf_send(nxt_unit_buf_t *buf) 2550{ 2551 int rc; 2552 nxt_unit_mmap_buf_t *mmap_buf; 2553 nxt_unit_request_info_t *req; 2554 nxt_unit_request_info_impl_t *req_impl; 2555 2556 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2557 2558 req = mmap_buf->req; 2559 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2560 2561 nxt_unit_req_debug(req, "buf_send: %d bytes", 2562 (int) (buf->free - buf->start)); 2563 2564 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2565 nxt_unit_req_warn(req, "buf_send: response not initialized yet"); 2566 2567 return NXT_UNIT_ERROR; 2568 } 2569 2570 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 2571 nxt_unit_req_warn(req, "buf_send: headers not sent yet"); 2572 2573 return NXT_UNIT_ERROR; 2574 } 2575 2576 if (nxt_fast_path(buf->free > buf->start)) { 2577 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); 2578 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2579 return rc; 2580 } 2581 } 2582 2583 nxt_unit_mmap_buf_free(mmap_buf); 2584 2585 return NXT_UNIT_OK; 2586} 2587 2588 2589static void 2590nxt_unit_buf_send_done(nxt_unit_buf_t *buf) 2591{ 2592 int rc; 2593 nxt_unit_mmap_buf_t *mmap_buf; 2594 nxt_unit_request_info_t *req; 2595 2596 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2597 2598 req = mmap_buf->req; 2599 2600 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 1); 2601 if (nxt_slow_path(rc == NXT_UNIT_OK)) { 2602 nxt_unit_mmap_buf_free(mmap_buf); 2603 2604 nxt_unit_request_info_release(req); 2605 2606 } else { 2607 nxt_unit_request_done(req, rc); 2608 } 2609} 2610 2611 2612static int 2613nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, 2614 nxt_unit_mmap_buf_t *mmap_buf, int last) 2615{ 2616 struct { 2617 nxt_port_msg_t msg; 2618 nxt_port_mmap_msg_t mmap_msg; 2619 } m; 2620 2621 int rc; 2622 u_char *last_used, *first_free; 2623 ssize_t res; 2624 nxt_chunk_id_t first_free_chunk; 2625 nxt_unit_buf_t *buf; 2626 nxt_unit_impl_t *lib; 2627 nxt_port_mmap_header_t *hdr; 2628 nxt_unit_request_info_impl_t *req_impl; 2629 2630 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 2631 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2632 2633 buf = &mmap_buf->buf; 2634 hdr = mmap_buf->hdr; 2635 2636 m.mmap_msg.size = buf->free - buf->start; 2637 2638 m.msg.stream = req_impl->stream; 2639 m.msg.pid = lib->pid; 2640 m.msg.reply_port = 0; 2641 m.msg.type = _NXT_PORT_MSG_DATA; 2642 m.msg.last = last != 0; 2643 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0; 2644 m.msg.nf = 0; 2645 m.msg.mf = 0; 2646 2647 rc = NXT_UNIT_ERROR; 2648 2649 if (m.msg.mmap) { 2650 m.mmap_msg.mmap_id = hdr->id; 2651 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr, 2652 (u_char *) buf->start); 2653 2654 nxt_unit_debug(req->ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", 2655 req_impl->stream, 2656 (int) m.mmap_msg.mmap_id, 2657 (int) m.mmap_msg.chunk_id, 2658 (int) m.mmap_msg.size); 2659 2660 res = nxt_unit_port_send(req->ctx, req->response_port, &m, sizeof(m), 2661 NULL); 2662 if (nxt_slow_path(res != sizeof(m))) { 2663 goto free_buf; 2664 } 2665 2666 last_used = (u_char *) buf->free - 1; 2667 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1; 2668 2669 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) { 2670 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk); 2671 2672 buf->start = (char *) first_free; 2673 buf->free = buf->start; 2674 2675 if (buf->end < buf->start) { 2676 buf->end = buf->start; 2677 } 2678 2679 } else { 2680 buf->start = NULL; 2681 buf->free = NULL; 2682 buf->end = NULL; 2683 2684 mmap_buf->hdr = NULL; 2685 } 2686 2687 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, 2688 (int) m.mmap_msg.chunk_id - (int) first_free_chunk); 2689 2690 nxt_unit_debug(req->ctx, "allocated_chunks %d", 2691 (int) lib->outgoing.allocated_chunks); 2692 2693 } else { 2694 if (nxt_slow_path(mmap_buf->plain_ptr == NULL 2695 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg))) 2696 { 2697 nxt_unit_alert(req->ctx, 2698 "#%"PRIu32": failed to send plain memory buffer" 2699 ": no space reserved for message header", 2700 req_impl->stream); 2701 2702 goto free_buf; 2703 } 2704 2705 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg)); 2706 2707 nxt_unit_debug(req->ctx, "#%"PRIu32": send plain: %d", 2708 req_impl->stream, 2709 (int) (sizeof(m.msg) + m.mmap_msg.size)); 2710 2711 res = nxt_unit_port_send(req->ctx, req->response_port, 2712 buf->start - sizeof(m.msg), 2713 m.mmap_msg.size + sizeof(m.msg), NULL); 2714 2715 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) { 2716 goto free_buf; 2717 } 2718 } 2719 2720 rc = NXT_UNIT_OK; 2721 2722free_buf: 2723 2724 nxt_unit_free_outgoing_buf(mmap_buf); 2725 2726 return rc; 2727} 2728 2729 2730void 2731nxt_unit_buf_free(nxt_unit_buf_t *buf) 2732{ 2733 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf)); 2734} 2735 2736 2737static void 2738nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf) 2739{ 2740 nxt_unit_free_outgoing_buf(mmap_buf); 2741 2742 nxt_unit_mmap_buf_release(mmap_buf); 2743} 2744 2745 2746static void 2747nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf) 2748{ 2749 if (mmap_buf->hdr != NULL) { 2750 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx, 2751 mmap_buf->hdr, mmap_buf->buf.start, 2752 mmap_buf->buf.end - mmap_buf->buf.start); 2753 2754 mmap_buf->hdr = NULL; 2755 2756 return; 2757 } 2758 2759 if (mmap_buf->free_ptr != NULL) { 2760 nxt_unit_free(&mmap_buf->ctx_impl->ctx, mmap_buf->free_ptr); 2761 2762 mmap_buf->free_ptr = NULL; 2763 } 2764} 2765 2766 2767static nxt_unit_read_buf_t * 2768nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) 2769{ 2770 nxt_unit_ctx_impl_t *ctx_impl; 2771 nxt_unit_read_buf_t *rbuf; 2772 2773 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2774 2775 pthread_mutex_lock(&ctx_impl->mutex); 2776 2777 rbuf = nxt_unit_read_buf_get_impl(ctx_impl); 2778 2779 pthread_mutex_unlock(&ctx_impl->mutex); 2780 2781 rbuf->oob.size = 0; 2782 2783 return rbuf; 2784} 2785 2786 2787static nxt_unit_read_buf_t * 2788nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl) 2789{ 2790 nxt_queue_link_t *link; 2791 nxt_unit_read_buf_t *rbuf; 2792 2793 if (!nxt_queue_is_empty(&ctx_impl->free_rbuf)) { 2794 link = nxt_queue_first(&ctx_impl->free_rbuf); 2795 nxt_queue_remove(link); 2796 2797 rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link); 2798 2799 return rbuf; 2800 } 2801 2802 rbuf = nxt_unit_malloc(&ctx_impl->ctx, sizeof(nxt_unit_read_buf_t)); 2803 2804 if (nxt_fast_path(rbuf != NULL)) { 2805 rbuf->ctx_impl = ctx_impl; 2806 } 2807 2808 return rbuf; 2809} 2810 2811 2812static void 2813nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 2814 nxt_unit_read_buf_t *rbuf) 2815{ 2816 nxt_unit_ctx_impl_t *ctx_impl; 2817 2818 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2819 2820 pthread_mutex_lock(&ctx_impl->mutex); 2821 2822 nxt_queue_insert_head(&ctx_impl->free_rbuf, &rbuf->link); 2823 2824 pthread_mutex_unlock(&ctx_impl->mutex); 2825} 2826 2827 2828nxt_unit_buf_t * 2829nxt_unit_buf_next(nxt_unit_buf_t *buf) 2830{ 2831 nxt_unit_mmap_buf_t *mmap_buf; 2832 2833 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2834 2835 if (mmap_buf->next == NULL) { 2836 return NULL; 2837 } 2838 2839 return &mmap_buf->next->buf; 2840} 2841 2842 2843uint32_t 2844nxt_unit_buf_max(void) 2845{ 2846 return PORT_MMAP_DATA_SIZE; 2847} 2848 2849 2850uint32_t 2851nxt_unit_buf_min(void) 2852{ 2853 return PORT_MMAP_CHUNK_SIZE; 2854} 2855 2856 2857int 2858nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start, 2859 size_t size) 2860{ 2861 ssize_t res; 2862 2863 res = nxt_unit_response_write_nb(req, start, size, size); 2864 2865 return res < 0 ? -res : NXT_UNIT_OK; 2866} 2867 2868 2869ssize_t 2870nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, 2871 size_t size, size_t min_size) 2872{ 2873 int rc; 2874 ssize_t sent; 2875 uint32_t part_size, min_part_size, buf_size; 2876 const char *part_start; 2877 nxt_unit_mmap_buf_t mmap_buf; 2878 nxt_unit_request_info_impl_t *req_impl; 2879 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2880 2881 nxt_unit_req_debug(req, "write: %d", (int) size); 2882 2883 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2884 2885 part_start = start; 2886 sent = 0; 2887 2888 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2889 nxt_unit_req_alert(req, "write: response not initialized yet"); 2890 2891 return -NXT_UNIT_ERROR; 2892 } 2893 2894 /* Check if response is not send yet. */ 2895 if (nxt_slow_path(req->response_buf != NULL)) { 2896 part_size = req->response_buf->end - req->response_buf->free; 2897 part_size = nxt_min(size, part_size); 2898 2899 rc = nxt_unit_response_add_content(req, part_start, part_size); 2900 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2901 return -rc; 2902 } 2903 2904 rc = nxt_unit_response_send(req); 2905 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2906 return -rc; 2907 } 2908 2909 size -= part_size; 2910 part_start += part_size; 2911 sent += part_size; 2912 2913 min_size -= nxt_min(min_size, part_size); 2914 } 2915 2916 while (size > 0) { 2917 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE); 2918 min_part_size = nxt_min(min_size, part_size); 2919 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE); 2920 2921 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, part_size, 2922 min_part_size, &mmap_buf, local_buf); 2923 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2924 return -rc; 2925 } 2926 2927 buf_size = mmap_buf.buf.end - mmap_buf.buf.free; 2928 if (nxt_slow_path(buf_size == 0)) { 2929 return sent; 2930 } 2931 part_size = nxt_min(buf_size, part_size); 2932 2933 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free, 2934 part_start, part_size); 2935 2936 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 2937 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2938 return -rc; 2939 } 2940 2941 size -= part_size; 2942 part_start += part_size; 2943 sent += part_size; 2944 2945 min_size -= nxt_min(min_size, part_size); 2946 } 2947 2948 return sent; 2949} 2950 2951 2952int 2953nxt_unit_response_write_cb(nxt_unit_request_info_t *req, 2954 nxt_unit_read_info_t *read_info) 2955{ 2956 int rc; 2957 ssize_t n; 2958 uint32_t buf_size; 2959 nxt_unit_buf_t *buf; 2960 nxt_unit_mmap_buf_t mmap_buf; 2961 nxt_unit_request_info_impl_t *req_impl; 2962 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2963 2964 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2965 2966 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2967 nxt_unit_req_alert(req, "write: response not initialized yet"); 2968 2969 return NXT_UNIT_ERROR; 2970 } 2971 2972 /* Check if response is not send yet. */ 2973 if (nxt_slow_path(req->response_buf != NULL)) { 2974 2975 /* Enable content in headers buf. */ 2976 rc = nxt_unit_response_add_content(req, "", 0); 2977 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2978 nxt_unit_req_error(req, "Failed to add piggyback content"); 2979 2980 return rc; 2981 } 2982 2983 buf = req->response_buf; 2984 2985 while (buf->end - buf->free > 0) { 2986 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2987 if (nxt_slow_path(n < 0)) { 2988 nxt_unit_req_error(req, "Read error"); 2989 2990 return NXT_UNIT_ERROR; 2991 } 2992 2993 /* Manually increase sizes. */ 2994 buf->free += n; 2995 req->response->piggyback_content_length += n; 2996 2997 if (read_info->eof) { 2998 break; 2999 } 3000 } 3001 3002 rc = nxt_unit_response_send(req); 3003 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3004 nxt_unit_req_error(req, "Failed to send headers with content"); 3005 3006 return rc; 3007 } 3008 3009 if (read_info->eof) { 3010 return NXT_UNIT_OK; 3011 } 3012 } 3013 3014 while (!read_info->eof) { 3015 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"", 3016 read_info->buf_size); 3017 3018 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE); 3019 3020 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3021 buf_size, buf_size, 3022 &mmap_buf, local_buf); 3023 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3024 return rc; 3025 } 3026 3027 buf = &mmap_buf.buf; 3028 3029 while (!read_info->eof && buf->end > buf->free) { 3030 n = read_info->read(read_info, buf->free, buf->end - buf->free); 3031 if (nxt_slow_path(n < 0)) { 3032 nxt_unit_req_error(req, "Read error"); 3033 3034 nxt_unit_free_outgoing_buf(&mmap_buf); 3035 3036 return NXT_UNIT_ERROR; 3037 } 3038 3039 buf->free += n; 3040 } 3041 3042 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3043 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3044 nxt_unit_req_error(req, "Failed to send content"); 3045 3046 return rc; 3047 } 3048 } 3049 3050 return NXT_UNIT_OK; 3051} 3052 3053 3054ssize_t 3055nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) 3056{ 3057 ssize_t buf_res, res; 3058 3059 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length, 3060 dst, size); 3061 3062 if (buf_res < (ssize_t) size && req->content_fd != -1) { 3063 res = read(req->content_fd, dst, size); 3064 if (nxt_slow_path(res < 0)) { 3065 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 3066 strerror(errno), errno); 3067 3068 return res; 3069 } 3070 3071 if (res < (ssize_t) size) { 3072 nxt_unit_close(req->content_fd); 3073 3074 req->content_fd = -1; 3075 } 3076 3077 req->content_length -= res; 3078 size -= res; 3079 3080 dst = nxt_pointer_to(dst, res); 3081 3082 } else { 3083 res = 0; 3084 } 3085 3086 return buf_res + res; 3087} 3088 3089 3090ssize_t 3091nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size) 3092{ 3093 char *p; 3094 size_t l_size, b_size; 3095 nxt_unit_buf_t *b; 3096 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf; 3097 3098 if (req->content_length == 0) { 3099 return 0; 3100 } 3101 3102 l_size = 0; 3103 3104 b = req->content_buf; 3105 3106 while (b != NULL) { 3107 b_size = b->end - b->free; 3108 p = memchr(b->free, '\n', b_size); 3109 3110 if (p != NULL) { 3111 p++; 3112 l_size += p - b->free; 3113 break; 3114 } 3115 3116 l_size += b_size; 3117 3118 if (max_size <= l_size) { 3119 break; 3120 } 3121 3122 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf); 3123 if (mmap_buf->next == NULL 3124 && req->content_fd != -1 3125 && l_size < req->content_length) 3126 { 3127 preread_buf = nxt_unit_request_preread(req, 16384); 3128 if (nxt_slow_path(preread_buf == NULL)) { 3129 return -1; 3130 } 3131 3132 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf); 3133 } 3134 3135 b = nxt_unit_buf_next(b); 3136 } 3137 3138 return nxt_min(max_size, l_size); 3139} 3140 3141 3142static nxt_unit_mmap_buf_t * 3143nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) 3144{ 3145 ssize_t res; 3146 nxt_unit_mmap_buf_t *mmap_buf; 3147 3148 if (req->content_fd == -1) { 3149 nxt_unit_req_alert(req, "preread: content_fd == -1"); 3150 return NULL; 3151 } 3152 3153 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 3154 if (nxt_slow_path(mmap_buf == NULL)) { 3155 nxt_unit_req_alert(req, "preread: failed to allocate buf"); 3156 return NULL; 3157 } 3158 3159 mmap_buf->free_ptr = nxt_unit_malloc(req->ctx, size); 3160 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3161 nxt_unit_req_alert(req, "preread: failed to allocate buf memory"); 3162 nxt_unit_mmap_buf_release(mmap_buf); 3163 return NULL; 3164 } 3165 3166 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3167 3168 mmap_buf->hdr = NULL; 3169 mmap_buf->buf.start = mmap_buf->free_ptr; 3170 mmap_buf->buf.free = mmap_buf->buf.start; 3171 mmap_buf->buf.end = mmap_buf->buf.start + size; 3172 3173 res = read(req->content_fd, mmap_buf->free_ptr, size); 3174 if (res < 0) { 3175 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 3176 strerror(errno), errno); 3177 3178 nxt_unit_mmap_buf_free(mmap_buf); 3179 3180 return NULL; 3181 } 3182 3183 if (res < (ssize_t) size) { 3184 nxt_unit_close(req->content_fd); 3185 3186 req->content_fd = -1; 3187 } 3188 3189 nxt_unit_req_debug(req, "preread: read %d", (int) res); 3190 3191 mmap_buf->buf.end = mmap_buf->buf.free + res; 3192 3193 return mmap_buf; 3194} 3195 3196 3197static ssize_t 3198nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size) 3199{ 3200 u_char *p; 3201 size_t rest, copy, read; 3202 nxt_unit_buf_t *buf, *last_buf; 3203 3204 p = dst; 3205 rest = size; 3206 3207 buf = *b; 3208 last_buf = buf; 3209 3210 while (buf != NULL) { 3211 last_buf = buf; 3212 3213 copy = buf->end - buf->free; 3214 copy = nxt_min(rest, copy); 3215 3216 p = nxt_cpymem(p, buf->free, copy); 3217 3218 buf->free += copy; 3219 rest -= copy; 3220 3221 if (rest == 0) { 3222 if (buf->end == buf->free) { 3223 buf = nxt_unit_buf_next(buf); 3224 } 3225 3226 break; 3227 } 3228 3229 buf = nxt_unit_buf_next(buf); 3230 } 3231 3232 *b = last_buf; 3233 3234 read = size - rest; 3235 3236 *len -= read; 3237 3238 return read; 3239} 3240 3241 3242void 3243nxt_unit_request_done(nxt_unit_request_info_t *req, int rc) 3244{ 3245 uint32_t size; 3246 nxt_port_msg_t msg; 3247 nxt_unit_impl_t *lib; 3248 nxt_unit_request_info_impl_t *req_impl; 3249 3250 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 3251 3252 nxt_unit_req_debug(req, "done: %d", rc); 3253 3254 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3255 goto skip_response_send; 3256 } 3257 3258 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 3259 3260 size = nxt_length("Content-Type") + nxt_length("text/plain"); 3261 3262 rc = nxt_unit_response_init(req, 200, 1, size); 3263 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3264 goto skip_response_send; 3265 } 3266 3267 rc = nxt_unit_response_add_field(req, "Content-Type", 3268 nxt_length("Content-Type"), 3269 "text/plain", nxt_length("text/plain")); 3270 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3271 goto skip_response_send; 3272 } 3273 } 3274 3275 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 3276 3277 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 3278 3279 nxt_unit_buf_send_done(req->response_buf); 3280 3281 return; 3282 } 3283 3284skip_response_send: 3285 3286 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit); 3287 3288 msg.stream = req_impl->stream; 3289 msg.pid = lib->pid; 3290 msg.reply_port = 0; 3291 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA 3292 : _NXT_PORT_MSG_RPC_ERROR; 3293 msg.last = 1; 3294 msg.mmap = 0; 3295 msg.nf = 0; 3296 msg.mf = 0; 3297 3298 (void) nxt_unit_port_send(req->ctx, req->response_port, 3299 &msg, sizeof(msg), NULL); 3300 3301 nxt_unit_request_info_release(req); 3302} 3303 3304 3305int 3306nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode, 3307 uint8_t last, const void *start, size_t size) 3308{ 3309 const struct iovec iov = { (void *) start, size }; 3310 3311 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1); 3312} 3313 3314 3315int 3316nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, 3317 uint8_t last, const struct iovec *iov, int iovcnt) 3318{ 3319 int i, rc; 3320 size_t l, copy; 3321 uint32_t payload_len, buf_size, alloc_size; 3322 const uint8_t *b; 3323 nxt_unit_buf_t *buf; 3324 nxt_unit_mmap_buf_t mmap_buf; 3325 nxt_websocket_header_t *wh; 3326 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 3327 3328 payload_len = 0; 3329 3330 for (i = 0; i < iovcnt; i++) { 3331 payload_len += iov[i].iov_len; 3332 } 3333 3334 buf_size = 10 + payload_len; 3335 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 3336 3337 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3338 alloc_size, alloc_size, 3339 &mmap_buf, local_buf); 3340 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3341 return rc; 3342 } 3343 3344 buf = &mmap_buf.buf; 3345 3346 buf->start[0] = 0; 3347 buf->start[1] = 0; 3348 3349 buf_size -= buf->end - buf->start; 3350 3351 wh = (void *) buf->free; 3352 3353 buf->free = nxt_websocket_frame_init(wh, payload_len); 3354 wh->fin = last; 3355 wh->opcode = opcode; 3356 3357 for (i = 0; i < iovcnt; i++) { 3358 b = iov[i].iov_base; 3359 l = iov[i].iov_len; 3360 3361 while (l > 0) { 3362 copy = buf->end - buf->free; 3363 copy = nxt_min(l, copy); 3364 3365 buf->free = nxt_cpymem(buf->free, b, copy); 3366 b += copy; 3367 l -= copy; 3368 3369 if (l > 0) { 3370 if (nxt_fast_path(buf->free > buf->start)) { 3371 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3372 3373 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3374 return rc; 3375 } 3376 } 3377 3378 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 3379 3380 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3381 alloc_size, alloc_size, 3382 &mmap_buf, local_buf); 3383 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3384 return rc; 3385 } 3386 3387 buf_size -= buf->end - buf->start; 3388 } 3389 } 3390 } 3391 3392 if (buf->free > buf->start) { 3393 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3394 } 3395 3396 return rc; 3397} 3398 3399 3400ssize_t 3401nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst, 3402 size_t size) 3403{ 3404 ssize_t res; 3405 uint8_t *b; 3406 uint64_t i, d; 3407 3408 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length, 3409 dst, size); 3410 3411 if (ws->mask == NULL) { 3412 return res; 3413 } 3414 3415 b = dst; 3416 d = (ws->payload_len - ws->content_length - res) % 4; 3417 3418 for (i = 0; i < (uint64_t) res; i++) { 3419 b[i] ^= ws->mask[ (i + d) % 4 ]; 3420 } 3421 3422 return res; 3423} 3424 3425 3426int 3427nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws) 3428{ 3429 char *b; 3430 size_t size, hsize; 3431 nxt_unit_websocket_frame_impl_t *ws_impl; 3432 3433 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 3434 3435 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) { 3436 return NXT_UNIT_OK; 3437 } 3438 3439 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start; 3440 3441 b = nxt_unit_malloc(ws->req->ctx, size); 3442 if (nxt_slow_path(b == NULL)) { 3443 return NXT_UNIT_ERROR; 3444 } 3445 3446 memcpy(b, ws_impl->buf->buf.start, size); 3447 3448 hsize = nxt_websocket_frame_header_size(b); 3449 3450 ws_impl->buf->buf.start = b; 3451 ws_impl->buf->buf.free = b + hsize; 3452 ws_impl->buf->buf.end = b + size; 3453 3454 ws_impl->buf->free_ptr = b; 3455 3456 ws_impl->ws.header = (nxt_websocket_header_t *) b; 3457 3458 if (ws_impl->ws.header->mask) { 3459 ws_impl->ws.mask = (uint8_t *) b + hsize - 4; 3460 3461 } else { 3462 ws_impl->ws.mask = NULL; 3463 } 3464 3465 return NXT_UNIT_OK; 3466} 3467 3468 3469void 3470nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws) 3471{ 3472 nxt_unit_websocket_frame_release(ws); 3473} 3474 3475 3476static nxt_port_mmap_header_t * 3477nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 3478 nxt_chunk_id_t *c, int *n, int min_n) 3479{ 3480 int res, nchunks, i; 3481 uint32_t outgoing_size; 3482 nxt_unit_mmap_t *mm, *mm_end; 3483 nxt_unit_impl_t *lib; 3484 nxt_port_mmap_header_t *hdr; 3485 3486 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3487 3488 pthread_mutex_lock(&lib->outgoing.mutex); 3489 3490retry: 3491 3492 outgoing_size = lib->outgoing.size; 3493 3494 mm_end = lib->outgoing.elts + outgoing_size; 3495 3496 for (mm = lib->outgoing.elts; mm < mm_end; mm++) { 3497 hdr = mm->hdr; 3498 3499 if (hdr->sent_over != 0xFFFFu 3500 && (hdr->sent_over != port->id.id 3501 || mm->src_thread != pthread_self())) 3502 { 3503 continue; 3504 } 3505 3506 *c = 0; 3507 3508 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) { 3509 nchunks = 1; 3510 3511 while (nchunks < *n) { 3512 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map, 3513 *c + nchunks); 3514 3515 if (res == 0) { 3516 if (nchunks >= min_n) { 3517 *n = nchunks; 3518 3519 goto unlock; 3520 } 3521 3522 for (i = 0; i < nchunks; i++) { 3523 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i); 3524 } 3525 3526 *c += nchunks + 1; 3527 nchunks = 0; 3528 break; 3529 } 3530 3531 nchunks++; 3532 } 3533 3534 if (nchunks >= min_n) { 3535 *n = nchunks; 3536 3537 goto unlock; 3538 } 3539 } 3540 3541 hdr->oosm = 1; 3542 } 3543 3544 if (outgoing_size >= lib->shm_mmap_limit) { 3545 /* Cannot allocate more shared memory. */ 3546 pthread_mutex_unlock(&lib->outgoing.mutex); 3547 3548 if (min_n == 0) { 3549 *n = 0; 3550 } 3551 3552 if (nxt_slow_path(lib->outgoing.allocated_chunks + min_n 3553 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT)) 3554 { 3555 /* Memory allocated by application, but not send to router. */ 3556 return NULL; 3557 } 3558 3559 /* Notify router about OOSM condition. */ 3560 3561 res = nxt_unit_send_oosm(ctx, port); 3562 if (nxt_slow_path(res != NXT_UNIT_OK)) { 3563 return NULL; 3564 } 3565 3566 /* Return if caller can handle OOSM condition. Non-blocking mode. */ 3567 3568 if (min_n == 0) { 3569 return NULL; 3570 } 3571 3572 nxt_unit_debug(ctx, "oosm: waiting for ACK"); 3573 3574 res = nxt_unit_wait_shm_ack(ctx); 3575 if (nxt_slow_path(res != NXT_UNIT_OK)) { 3576 return NULL; 3577 } 3578 3579 nxt_unit_debug(ctx, "oosm: retry"); 3580 3581 pthread_mutex_lock(&lib->outgoing.mutex); 3582 3583 goto retry; 3584 } 3585 3586 *c = 0; 3587 hdr = nxt_unit_new_mmap(ctx, port, *n); 3588 3589unlock: 3590 3591 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, *n); 3592 3593 nxt_unit_debug(ctx, "allocated_chunks %d", 3594 (int) lib->outgoing.allocated_chunks); 3595 3596 pthread_mutex_unlock(&lib->outgoing.mutex); 3597 3598 return hdr; 3599} 3600 3601 3602static int 3603nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 3604{ 3605 ssize_t res; 3606 nxt_port_msg_t msg; 3607 nxt_unit_impl_t *lib; 3608 3609 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3610 3611 msg.stream = 0; 3612 msg.pid = lib->pid; 3613 msg.reply_port = 0; 3614 msg.type = _NXT_PORT_MSG_OOSM; 3615 msg.last = 0; 3616 msg.mmap = 0; 3617 msg.nf = 0; 3618 msg.mf = 0; 3619 3620 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL); 3621 if (nxt_slow_path(res != sizeof(msg))) { 3622 return NXT_UNIT_ERROR; 3623 } 3624 3625 return NXT_UNIT_OK; 3626} 3627 3628 3629static int 3630nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) 3631{ 3632 int res; 3633 nxt_unit_ctx_impl_t *ctx_impl; 3634 nxt_unit_read_buf_t *rbuf; 3635 3636 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3637 3638 while (1) { 3639 rbuf = nxt_unit_read_buf_get(ctx); 3640 if (nxt_slow_path(rbuf == NULL)) { 3641 return NXT_UNIT_ERROR; 3642 } 3643 3644 do { 3645 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 3646 } while (res == NXT_UNIT_AGAIN); 3647 3648 if (res == NXT_UNIT_ERROR) { 3649 nxt_unit_read_buf_release(ctx, rbuf); 3650 3651 return NXT_UNIT_ERROR; 3652 } 3653 3654 if (nxt_unit_is_shm_ack(rbuf)) { 3655 nxt_unit_read_buf_release(ctx, rbuf); 3656 break; 3657 } 3658 3659 pthread_mutex_lock(&ctx_impl->mutex); 3660 3661 nxt_queue_insert_tail(&ctx_impl->pending_rbuf, &rbuf->link); 3662 3663 pthread_mutex_unlock(&ctx_impl->mutex); 3664 3665 if (nxt_unit_is_quit(rbuf)) { 3666 nxt_unit_debug(ctx, "oosm: quit received"); 3667 3668 return NXT_UNIT_ERROR; 3669 } 3670 } 3671 3672 return NXT_UNIT_OK; 3673} 3674 3675 3676static nxt_unit_mmap_t * 3677nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) 3678{ 3679 uint32_t cap, n; 3680 nxt_unit_mmap_t *e; 3681 3682 if (nxt_fast_path(mmaps->size > i)) { 3683 return mmaps->elts + i; 3684 } 3685 3686 cap = mmaps->cap; 3687 3688 if (cap == 0) { 3689 cap = i + 1; 3690 } 3691 3692 while (i + 1 > cap) { 3693 3694 if (cap < 16) { 3695 cap = cap * 2; 3696 3697 } else { 3698 cap = cap + cap / 2; 3699 } 3700 } 3701 3702 if (cap != mmaps->cap) { 3703 3704 e = realloc(mmaps->elts, cap * sizeof(nxt_unit_mmap_t)); 3705 if (nxt_slow_path(e == NULL)) { 3706 return NULL; 3707 } 3708 3709 mmaps->elts = e; 3710 3711 for (n = mmaps->cap; n < cap; n++) { 3712 e = mmaps->elts + n; 3713 3714 e->hdr = NULL; 3715 nxt_queue_init(&e->awaiting_rbuf); 3716 } 3717 3718 mmaps->cap = cap; 3719 } 3720 3721 if (i + 1 > mmaps->size) { 3722 mmaps->size = i + 1; 3723 } 3724 3725 return mmaps->elts + i; 3726} 3727 3728 3729static nxt_port_mmap_header_t * 3730nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) 3731{ 3732 int i, fd, rc; 3733 void *mem; 3734 nxt_unit_mmap_t *mm; 3735 nxt_unit_impl_t *lib; 3736 nxt_port_mmap_header_t *hdr; 3737 3738 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3739 3740 mm = nxt_unit_mmap_at(&lib->outgoing, lib->outgoing.size); 3741 if (nxt_slow_path(mm == NULL)) { 3742 nxt_unit_alert(ctx, "failed to add mmap to outgoing array"); 3743 3744 return NULL; 3745 } 3746 3747 fd = nxt_unit_shm_open(ctx, PORT_MMAP_SIZE); 3748 if (nxt_slow_path(fd == -1)) { 3749 goto remove_fail; 3750 } 3751 3752 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 3753 if (nxt_slow_path(mem == MAP_FAILED)) { 3754 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, 3755 strerror(errno), errno); 3756 3757 nxt_unit_close(fd); 3758 3759 goto remove_fail; 3760 } 3761 3762 mm->hdr = mem; 3763 hdr = mem; 3764 3765 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); 3766 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); 3767 3768 hdr->id = lib->outgoing.size - 1; 3769 hdr->src_pid = lib->pid; 3770 hdr->dst_pid = port->id.pid; 3771 hdr->sent_over = port->id.id; 3772 mm->src_thread = pthread_self(); 3773 3774 /* Mark first n chunk(s) as busy */ 3775 for (i = 0; i < n; i++) { 3776 nxt_port_mmap_set_chunk_busy(hdr->free_map, i); 3777 } 3778 3779 /* Mark as busy chunk followed the last available chunk. */ 3780 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); 3781 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); 3782 3783 pthread_mutex_unlock(&lib->outgoing.mutex); 3784 3785 rc = nxt_unit_send_mmap(ctx, port, fd); 3786 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3787 munmap(mem, PORT_MMAP_SIZE); 3788 hdr = NULL; 3789 3790 } else { 3791 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d", 3792 hdr->id, (int) lib->pid, (int) port->id.pid); 3793 } 3794 3795 nxt_unit_close(fd); 3796 3797 pthread_mutex_lock(&lib->outgoing.mutex); 3798 3799 if (nxt_fast_path(hdr != NULL)) { 3800 return hdr; 3801 } 3802 3803remove_fail: 3804 3805 lib->outgoing.size--; 3806 3807 return NULL; 3808} 3809 3810 3811static int 3812nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size) 3813{ 3814 int fd; 3815 nxt_unit_impl_t *lib; 3816 3817 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3818 3819#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN) 3820 char name[64]; 3821 3822 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", 3823 lib->pid, (void *) (uintptr_t) pthread_self()); 3824#endif 3825 3826#if (NXT_HAVE_MEMFD_CREATE) 3827 3828 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); 3829 if (nxt_slow_path(fd == -1)) { 3830 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, 3831 strerror(errno), errno); 3832 3833 return -1; 3834 } 3835 3836 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); 3837 3838#elif (NXT_HAVE_SHM_OPEN_ANON) 3839 3840 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); 3841 if (nxt_slow_path(fd == -1)) { 3842 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", 3843 strerror(errno), errno); 3844 3845 return -1; 3846 } 3847 3848#elif (NXT_HAVE_SHM_OPEN) 3849 3850 /* Just in case. */ 3851 shm_unlink(name); 3852 3853 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); 3854 if (nxt_slow_path(fd == -1)) { 3855 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, 3856 strerror(errno), errno); 3857 3858 return -1; 3859 } 3860 3861 if (nxt_slow_path(shm_unlink(name) == -1)) { 3862 nxt_unit_alert(ctx, "shm_unlink(%s) failed: %s (%d)", name, 3863 strerror(errno), errno); 3864 } 3865 3866#else 3867 3868#error No working shared memory implementation. 3869 3870#endif 3871 3872 if (nxt_slow_path(ftruncate(fd, size) == -1)) { 3873 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, 3874 strerror(errno), errno); 3875 3876 nxt_unit_close(fd); 3877 3878 return -1; 3879 } 3880 3881 return fd; 3882} 3883 3884 3885static int 3886nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd) 3887{ 3888 ssize_t res; 3889 nxt_send_oob_t oob; 3890 nxt_port_msg_t msg; 3891 nxt_unit_impl_t *lib; 3892 int fds[2] = {fd, -1}; 3893 3894 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3895 3896 msg.stream = 0; 3897 msg.pid = lib->pid; 3898 msg.reply_port = 0; 3899 msg.type = _NXT_PORT_MSG_MMAP; 3900 msg.last = 0; 3901 msg.mmap = 0; 3902 msg.nf = 0; 3903 msg.mf = 0; 3904 3905 nxt_socket_msg_oob_init(&oob, fds); 3906 3907 res = nxt_unit_port_send(ctx, port, &msg, sizeof(msg), &oob); 3908 if (nxt_slow_path(res != sizeof(msg))) { 3909 return NXT_UNIT_ERROR; 3910 } 3911 3912 return NXT_UNIT_OK; 3913} 3914 3915 3916static int 3917nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 3918 uint32_t size, uint32_t min_size, 3919 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf) 3920{ 3921 int nchunks, min_nchunks; 3922 nxt_chunk_id_t c; 3923 nxt_port_mmap_header_t *hdr; 3924 3925 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) { 3926 if (local_buf != NULL) { 3927 mmap_buf->free_ptr = NULL; 3928 mmap_buf->plain_ptr = local_buf; 3929 3930 } else { 3931 mmap_buf->free_ptr = nxt_unit_malloc(ctx, 3932 size + sizeof(nxt_port_msg_t)); 3933 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3934 return NXT_UNIT_ERROR; 3935 } 3936 3937 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3938 } 3939 3940 mmap_buf->hdr = NULL; 3941 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); 3942 mmap_buf->buf.free = mmap_buf->buf.start; 3943 mmap_buf->buf.end = mmap_buf->buf.start + size; 3944 3945 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", 3946 mmap_buf->buf.start, (int) size); 3947 3948 return NXT_UNIT_OK; 3949 } 3950 3951 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3952 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3953 3954 hdr = nxt_unit_mmap_get(ctx, port, &c, &nchunks, min_nchunks); 3955 if (nxt_slow_path(hdr == NULL)) { 3956 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) { 3957 mmap_buf->hdr = NULL; 3958 mmap_buf->buf.start = NULL; 3959 mmap_buf->buf.free = NULL; 3960 mmap_buf->buf.end = NULL; 3961 mmap_buf->free_ptr = NULL; 3962 3963 return NXT_UNIT_OK; 3964 } 3965 3966 return NXT_UNIT_ERROR; 3967 } 3968 3969 mmap_buf->hdr = hdr; 3970 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); 3971 mmap_buf->buf.free = mmap_buf->buf.start; 3972 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; 3973 mmap_buf->free_ptr = NULL; 3974 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3975 3976 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)", 3977 (int) hdr->id, (int) c, 3978 (int) (nchunks * PORT_MMAP_CHUNK_SIZE)); 3979 3980 return NXT_UNIT_OK; 3981} 3982 3983 3984static int 3985nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) 3986{ 3987 int rc; 3988 void *mem; 3989 nxt_queue_t awaiting_rbuf; 3990 struct stat mmap_stat; 3991 nxt_unit_mmap_t *mm; 3992 nxt_unit_impl_t *lib; 3993 nxt_unit_ctx_impl_t *ctx_impl; 3994 nxt_unit_read_buf_t *rbuf; 3995 nxt_port_mmap_header_t *hdr; 3996 3997 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3998 3999 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid); 4000 4001 if (fstat(fd, &mmap_stat) == -1) { 4002 nxt_unit_alert(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, 4003 strerror(errno), errno); 4004 4005 return NXT_UNIT_ERROR; 4006 } 4007 4008 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE, 4009 MAP_SHARED, fd, 0); 4010 if (nxt_slow_path(mem == MAP_FAILED)) { 4011 nxt_unit_alert(ctx, "incoming_mmap: mmap() failed: %s (%d)", 4012 strerror(errno), errno); 4013 4014 return NXT_UNIT_ERROR; 4015 } 4016 4017 hdr = mem; 4018 4019 if (nxt_slow_path(hdr->src_pid != pid)) { 4020 4021 nxt_unit_alert(ctx, "incoming_mmap: unexpected pid in mmap header " 4022 "detected: %d != %d or %d != %d", (int) hdr->src_pid, 4023 (int) pid, (int) hdr->dst_pid, (int) lib->pid); 4024 4025 munmap(mem, PORT_MMAP_SIZE); 4026 4027 return NXT_UNIT_ERROR; 4028 } 4029 4030 nxt_queue_init(&awaiting_rbuf); 4031 4032 pthread_mutex_lock(&lib->incoming.mutex); 4033 4034 mm = nxt_unit_mmap_at(&lib->incoming, hdr->id); 4035 if (nxt_slow_path(mm == NULL)) { 4036 nxt_unit_alert(ctx, "incoming_mmap: failed to add to incoming array"); 4037 4038 munmap(mem, PORT_MMAP_SIZE); 4039 4040 rc = NXT_UNIT_ERROR; 4041 4042 } else { 4043 mm->hdr = hdr; 4044 4045 hdr->sent_over = 0xFFFFu; 4046 4047 nxt_queue_add(&awaiting_rbuf, &mm->awaiting_rbuf); 4048 nxt_queue_init(&mm->awaiting_rbuf); 4049 4050 rc = NXT_UNIT_OK; 4051 } 4052 4053 pthread_mutex_unlock(&lib->incoming.mutex); 4054 4055 nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) { 4056 4057 ctx_impl = rbuf->ctx_impl; 4058 4059 pthread_mutex_lock(&ctx_impl->mutex); 4060 4061 nxt_queue_insert_head(&ctx_impl->pending_rbuf, &rbuf->link); 4062 4063 pthread_mutex_unlock(&ctx_impl->mutex); 4064 4065 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); 4066 4067 nxt_unit_awake_ctx(ctx, ctx_impl); 4068 4069 } nxt_queue_loop; 4070 4071 return rc; 4072} 4073 4074 4075static void 4076nxt_unit_awake_ctx(nxt_unit_ctx_t *ctx, nxt_unit_ctx_impl_t *ctx_impl) 4077{ 4078 nxt_port_msg_t msg; 4079 4080 if (nxt_fast_path(ctx == &ctx_impl->ctx)) { 4081 return; 4082 } 4083 4084 if (nxt_slow_path(ctx_impl->read_port == NULL 4085 || ctx_impl->read_port->out_fd == -1)) 4086 { 4087 nxt_unit_alert(ctx, "target context read_port is NULL or not writable"); 4088 4089 return; 4090 } 4091 4092 memset(&msg, 0, sizeof(nxt_port_msg_t)); 4093 4094 msg.type = _NXT_PORT_MSG_RPC_READY; 4095 4096 (void) nxt_unit_port_send(ctx, ctx_impl->read_port, 4097 &msg, sizeof(msg), NULL); 4098} 4099 4100 4101static void 4102nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps) 4103{ 4104 pthread_mutex_init(&mmaps->mutex, NULL); 4105 4106 mmaps->size = 0; 4107 mmaps->cap = 0; 4108 mmaps->elts = NULL; 4109 mmaps->allocated_chunks = 0; 4110} 4111 4112 4113nxt_inline void 4114nxt_unit_process_use(nxt_unit_process_t *process) 4115{ 4116 nxt_atomic_fetch_add(&process->use_count, 1); 4117} 4118 4119 4120nxt_inline void 4121nxt_unit_process_release(nxt_unit_process_t *process) 4122{ 4123 long c; 4124 4125 c = nxt_atomic_fetch_add(&process->use_count, -1); 4126 4127 if (c == 1) { 4128 nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid); 4129 4130 nxt_unit_free(NULL, process); 4131 } 4132} 4133 4134 4135static void 4136nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) 4137{ 4138 nxt_unit_mmap_t *mm, *end; 4139 4140 if (mmaps->elts != NULL) { 4141 end = mmaps->elts + mmaps->size; 4142 4143 for (mm = mmaps->elts; mm < end; mm++) { 4144 munmap(mm->hdr, PORT_MMAP_SIZE); 4145 } 4146 4147 nxt_unit_free(NULL, mmaps->elts); 4148 } 4149 4150 pthread_mutex_destroy(&mmaps->mutex); 4151} 4152 4153 4154static int 4155nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, 4156 pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, 4157 nxt_unit_read_buf_t *rbuf) 4158{ 4159 int res, need_rbuf; 4160 nxt_unit_mmap_t *mm; 4161 nxt_unit_ctx_impl_t *ctx_impl; 4162 4163 mm = nxt_unit_mmap_at(mmaps, id); 4164 if (nxt_slow_path(mm == NULL)) { 4165 nxt_unit_alert(ctx, "failed to allocate mmap"); 4166 4167 pthread_mutex_unlock(&mmaps->mutex); 4168 4169 *hdr = NULL; 4170 4171 return NXT_UNIT_ERROR; 4172 } 4173 4174 *hdr = mm->hdr; 4175 4176 if (nxt_fast_path(*hdr != NULL)) { 4177 return NXT_UNIT_OK; 4178 } 4179 4180 need_rbuf = nxt_queue_is_empty(&mm->awaiting_rbuf); 4181 4182 nxt_queue_insert_tail(&mm->awaiting_rbuf, &rbuf->link); 4183 4184 pthread_mutex_unlock(&mmaps->mutex); 4185 4186 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4187 4188 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 4189 4190 if (need_rbuf) { 4191 res = nxt_unit_get_mmap(ctx, pid, id); 4192 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 4193 return NXT_UNIT_ERROR; 4194 } 4195 } 4196 4197 return NXT_UNIT_AGAIN; 4198} 4199 4200 4201static int 4202nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, 4203 nxt_unit_read_buf_t *rbuf) 4204{ 4205 int res; 4206 void *start; 4207 uint32_t size; 4208 nxt_unit_impl_t *lib; 4209 nxt_unit_mmaps_t *mmaps; 4210 nxt_unit_mmap_buf_t *b, **incoming_tail; 4211 nxt_port_mmap_msg_t *mmap_msg, *end; 4212 nxt_port_mmap_header_t *hdr; 4213 4214 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) { 4215 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)", 4216 recv_msg->stream, (int) recv_msg->size); 4217 4218 return NXT_UNIT_ERROR; 4219 } 4220 4221 mmap_msg = recv_msg->start; 4222 end = nxt_pointer_to(recv_msg->start, recv_msg->size); 4223 4224 incoming_tail = &recv_msg->incoming_buf; 4225 4226 /* Allocating buffer structures. */ 4227 for (; mmap_msg < end; mmap_msg++) { 4228 b = nxt_unit_mmap_buf_get(ctx); 4229 if (nxt_slow_path(b == NULL)) { 4230 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf", 4231 recv_msg->stream); 4232 4233 while (recv_msg->incoming_buf != NULL) { 4234 nxt_unit_mmap_buf_release(recv_msg->incoming_buf); 4235 } 4236 4237 return NXT_UNIT_ERROR; 4238 } 4239 4240 nxt_unit_mmap_buf_insert(incoming_tail, b); 4241 incoming_tail = &b->next; 4242 } 4243 4244 b = recv_msg->incoming_buf; 4245 mmap_msg = recv_msg->start; 4246 4247 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4248 4249 mmaps = &lib->incoming; 4250 4251 pthread_mutex_lock(&mmaps->mutex); 4252 4253 for (; mmap_msg < end; mmap_msg++) { 4254 res = nxt_unit_check_rbuf_mmap(ctx, mmaps, 4255 recv_msg->pid, mmap_msg->mmap_id, 4256 &hdr, rbuf); 4257 4258 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4259 while (recv_msg->incoming_buf != NULL) { 4260 nxt_unit_mmap_buf_release(recv_msg->incoming_buf); 4261 } 4262 4263 return res; 4264 } 4265 4266 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id); 4267 size = mmap_msg->size; 4268 4269 if (recv_msg->start == mmap_msg) { 4270 recv_msg->start = start; 4271 recv_msg->size = size; 4272 } 4273 4274 b->buf.start = start; 4275 b->buf.free = start; 4276 b->buf.end = b->buf.start + size; 4277 b->hdr = hdr; 4278 4279 b = b->next; 4280 4281 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)", 4282 recv_msg->stream, 4283 start, (int) size, 4284 (int) hdr->src_pid, (int) hdr->dst_pid, 4285 (int) hdr->id, (int) mmap_msg->chunk_id, 4286 (int) mmap_msg->size); 4287 } 4288 4289 pthread_mutex_unlock(&mmaps->mutex); 4290 4291 return NXT_UNIT_OK; 4292} 4293 4294 4295static int 4296nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id) 4297{ 4298 ssize_t res; 4299 nxt_unit_impl_t *lib; 4300 nxt_unit_ctx_impl_t *ctx_impl; 4301 4302 struct { 4303 nxt_port_msg_t msg; 4304 nxt_port_msg_get_mmap_t get_mmap; 4305 } m; 4306 4307 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4308 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4309 4310 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 4311 4312 m.msg.pid = lib->pid; 4313 m.msg.reply_port = ctx_impl->read_port->id.id; 4314 m.msg.type = _NXT_PORT_MSG_GET_MMAP; 4315 4316 m.get_mmap.id = id; 4317 4318 nxt_unit_debug(ctx, "get_mmap: %d %d", (int) pid, (int) id); 4319 4320 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL); 4321 if (nxt_slow_path(res != sizeof(m))) { 4322 return NXT_UNIT_ERROR; 4323 } 4324 4325 return NXT_UNIT_OK; 4326} 4327 4328 4329static void 4330nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr, 4331 void *start, uint32_t size) 4332{ 4333 int freed_chunks; 4334 u_char *p, *end; 4335 nxt_chunk_id_t c; 4336 nxt_unit_impl_t *lib; 4337 4338 memset(start, 0xA5, size); 4339 4340 p = start; 4341 end = p + size; 4342 c = nxt_port_mmap_chunk_id(hdr, p); 4343 freed_chunks = 0; 4344 4345 while (p < end) { 4346 nxt_port_mmap_set_chunk_free(hdr->free_map, c); 4347 4348 p += PORT_MMAP_CHUNK_SIZE; 4349 c++; 4350 freed_chunks++; 4351 } 4352 4353 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4354 4355 if (hdr->src_pid == lib->pid && freed_chunks != 0) { 4356 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, -freed_chunks); 4357 4358 nxt_unit_debug(ctx, "allocated_chunks %d", 4359 (int) lib->outgoing.allocated_chunks); 4360 } 4361 4362 if (hdr->dst_pid == lib->pid 4363 && freed_chunks != 0 4364 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0)) 4365 { 4366 nxt_unit_send_shm_ack(ctx, hdr->src_pid); 4367 } 4368} 4369 4370 4371static int 4372nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) 4373{ 4374 ssize_t res; 4375 nxt_port_msg_t msg; 4376 nxt_unit_impl_t *lib; 4377 4378 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4379 4380 msg.stream = 0; 4381 msg.pid = lib->pid; 4382 msg.reply_port = 0; 4383 msg.type = _NXT_PORT_MSG_SHM_ACK; 4384 msg.last = 0; 4385 msg.mmap = 0; 4386 msg.nf = 0; 4387 msg.mf = 0; 4388 4389 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL); 4390 if (nxt_slow_path(res != sizeof(msg))) { 4391 return NXT_UNIT_ERROR; 4392 } 4393 4394 return NXT_UNIT_OK; 4395} 4396 4397 4398static nxt_int_t 4399nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data) 4400{ 4401 nxt_process_t *process; 4402 4403 process = data; 4404 4405 if (lhq->key.length == sizeof(pid_t) 4406 && *(pid_t *) lhq->key.start == process->pid) 4407 { 4408 return NXT_OK; 4409 } 4410 4411 return NXT_DECLINED; 4412} 4413 4414 4415static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = { 4416 NXT_LVLHSH_DEFAULT, 4417 nxt_unit_lvlhsh_pid_test, 4418 nxt_unit_lvlhsh_alloc, 4419 nxt_unit_lvlhsh_free, 4420}; 4421 4422 4423static inline void 4424nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid) 4425{ 4426 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid)); 4427 lhq->key.length = sizeof(*pid); 4428 lhq->key.start = (u_char *) pid; 4429 lhq->proto = &lvlhsh_processes_proto; 4430} 4431 4432 4433static nxt_unit_process_t * 4434nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) 4435{ 4436 nxt_unit_impl_t *lib; 4437 nxt_unit_process_t *process; 4438 nxt_lvlhsh_query_t lhq; 4439 4440 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4441 4442 nxt_unit_process_lhq_pid(&lhq, &pid); 4443 4444 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) { 4445 process = lhq.value; 4446 nxt_unit_process_use(process); 4447 4448 return process; 4449 } 4450 4451 process = nxt_unit_malloc(ctx, sizeof(nxt_unit_process_t)); 4452 if (nxt_slow_path(process == NULL)) { 4453 nxt_unit_alert(ctx, "failed to allocate process for #%d", (int) pid); 4454 4455 return NULL; 4456 } 4457 4458 process->pid = pid; 4459 process->use_count = 2; 4460 process->next_port_id = 0; 4461 process->lib = lib; 4462 4463 nxt_queue_init(&process->ports); 4464 4465 lhq.replace = 0; 4466 lhq.value = process; 4467 4468 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) { 4469 4470 case NXT_OK: 4471 break; 4472 4473 default: 4474 nxt_unit_alert(ctx, "process %d insert failed", (int) pid); 4475 4476 nxt_unit_free(ctx, process); 4477 process = NULL; 4478 break; 4479 } 4480 4481 return process; 4482} 4483 4484 4485static nxt_unit_process_t * 4486nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove) 4487{ 4488 int rc; 4489 nxt_lvlhsh_query_t lhq; 4490 4491 nxt_unit_process_lhq_pid(&lhq, &pid); 4492 4493 if (remove) { 4494 rc = nxt_lvlhsh_delete(&lib->processes, &lhq); 4495 4496 } else { 4497 rc = nxt_lvlhsh_find(&lib->processes, &lhq); 4498 } 4499 4500 if (rc == NXT_OK) { 4501 if (!remove) { 4502 nxt_unit_process_use(lhq.value); 4503 } 4504 4505 return lhq.value; 4506 } 4507 4508 return NULL; 4509} 4510 4511 4512static nxt_unit_process_t * 4513nxt_unit_process_pop_first(nxt_unit_impl_t *lib) 4514{ 4515 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL); 4516} 4517 4518 4519int 4520nxt_unit_run(nxt_unit_ctx_t *ctx) 4521{ 4522 int rc; 4523 nxt_unit_ctx_impl_t *ctx_impl; 4524 4525 nxt_unit_ctx_use(ctx); 4526 4527 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4528 4529 rc = NXT_UNIT_OK; 4530 4531 while (nxt_fast_path(ctx_impl->online)) { 4532 rc = nxt_unit_run_once_impl(ctx); 4533 4534 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4535 nxt_unit_quit(ctx, NXT_QUIT_NORMAL); 4536 break; 4537 } 4538 } 4539 4540 nxt_unit_ctx_release(ctx); 4541 4542 return rc; 4543} 4544 4545 4546int 4547nxt_unit_run_once(nxt_unit_ctx_t *ctx) 4548{ 4549 int rc; 4550 4551 nxt_unit_ctx_use(ctx); 4552 4553 rc = nxt_unit_run_once_impl(ctx); 4554 4555 nxt_unit_ctx_release(ctx); 4556 4557 return rc; 4558} 4559 4560 4561static int 4562nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx) 4563{ 4564 int rc; 4565 nxt_unit_read_buf_t *rbuf; 4566 4567 rbuf = nxt_unit_read_buf_get(ctx); 4568 if (nxt_slow_path(rbuf == NULL)) { 4569 return NXT_UNIT_ERROR; 4570 } 4571 4572 rc = nxt_unit_read_buf(ctx, rbuf); 4573 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4574 nxt_unit_read_buf_release(ctx, rbuf); 4575 4576 return rc; 4577 } 4578 4579 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4580 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4581 return NXT_UNIT_ERROR; 4582 } 4583 4584 rc = nxt_unit_process_pending_rbuf(ctx); 4585 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4586 return NXT_UNIT_ERROR; 4587 } 4588 4589 nxt_unit_process_ready_req(ctx); 4590 4591 return rc; 4592} 4593 4594 4595static int 4596nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) 4597{ 4598 int nevents, res, err; 4599 nxt_uint_t nfds; 4600 nxt_unit_impl_t *lib; 4601 nxt_unit_ctx_impl_t *ctx_impl; 4602 nxt_unit_port_impl_t *port_impl; 4603 struct pollfd fds[2]; 4604 4605 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4606 4607 if (ctx_impl->wait_items > 0 || !nxt_unit_chk_ready(ctx)) { 4608 return nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4609 } 4610 4611 port_impl = nxt_container_of(ctx_impl->read_port, nxt_unit_port_impl_t, 4612 port); 4613 4614 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4615 4616retry: 4617 4618 if (port_impl->from_socket == 0) { 4619 res = nxt_unit_port_queue_recv(ctx_impl->read_port, rbuf); 4620 if (res == NXT_UNIT_OK) { 4621 if (nxt_unit_is_read_socket(rbuf)) { 4622 port_impl->from_socket++; 4623 4624 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", 4625 (int) ctx_impl->read_port->id.pid, 4626 (int) ctx_impl->read_port->id.id, 4627 port_impl->from_socket); 4628 4629 } else { 4630 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", 4631 (int) ctx_impl->read_port->id.pid, 4632 (int) ctx_impl->read_port->id.id, 4633 (int) rbuf->size); 4634 4635 return NXT_UNIT_OK; 4636 } 4637 } 4638 } 4639 4640 if (nxt_fast_path(nxt_unit_chk_ready(ctx))) { 4641 res = nxt_unit_app_queue_recv(ctx, lib->shared_port, rbuf); 4642 if (res == NXT_UNIT_OK) { 4643 return NXT_UNIT_OK; 4644 } 4645 4646 fds[1].fd = lib->shared_port->in_fd; 4647 fds[1].events = POLLIN; 4648 4649 nfds = 2; 4650 4651 } else { 4652 nfds = 1; 4653 } 4654 4655 fds[0].fd = ctx_impl->read_port->in_fd; 4656 fds[0].events = POLLIN; 4657 fds[0].revents = 0; 4658 4659 fds[1].revents = 0; 4660 4661 nevents = poll(fds, nfds, -1); 4662 if (nxt_slow_path(nevents == -1)) { 4663 err = errno; 4664 4665 if (err == EINTR) { 4666 goto retry; 4667 } 4668 4669 nxt_unit_alert(ctx, "poll(%d,%d) failed: %s (%d)", 4670 fds[0].fd, fds[1].fd, strerror(err), err); 4671 4672 rbuf->size = -1; 4673 4674 return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR; 4675 } 4676 4677 nxt_unit_debug(ctx, "poll(%d,%d): %d, revents [%04X, %04X]", 4678 fds[0].fd, fds[1].fd, nevents, fds[0].revents, 4679 fds[1].revents); 4680 4681 if ((fds[0].revents & POLLIN) != 0) { 4682 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4683 if (res == NXT_UNIT_AGAIN) { 4684 goto retry; 4685 } 4686 4687 return res; 4688 } 4689 4690 if ((fds[1].revents & POLLIN) != 0) { 4691 res = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); 4692 if (res == NXT_UNIT_AGAIN) { 4693 goto retry; 4694 } 4695 4696 return res; 4697 } 4698 4699 nxt_unit_alert(ctx, "poll(%d,%d): %d unexpected revents [%04uXi, %04uXi]", 4700 fds[0].fd, fds[1].fd, nevents, fds[0].revents, 4701 fds[1].revents); 4702 4703 return NXT_UNIT_ERROR; 4704} 4705 4706 4707static int 4708nxt_unit_chk_ready(nxt_unit_ctx_t *ctx) 4709{ 4710 nxt_unit_impl_t *lib; 4711 nxt_unit_ctx_impl_t *ctx_impl; 4712 4713 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4714 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4715 4716 return (ctx_impl->ready 4717 && (lib->request_limit == 0 4718 || lib->request_count < lib->request_limit)); 4719} 4720 4721 4722static int 4723nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx) 4724{ 4725 int rc; 4726 nxt_queue_t pending_rbuf; 4727 nxt_unit_ctx_impl_t *ctx_impl; 4728 nxt_unit_read_buf_t *rbuf; 4729 4730 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4731 4732 pthread_mutex_lock(&ctx_impl->mutex); 4733 4734 if (nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { 4735 pthread_mutex_unlock(&ctx_impl->mutex); 4736 4737 return NXT_UNIT_OK; 4738 } 4739 4740 nxt_queue_init(&pending_rbuf); 4741 4742 nxt_queue_add(&pending_rbuf, &ctx_impl->pending_rbuf); 4743 nxt_queue_init(&ctx_impl->pending_rbuf); 4744 4745 pthread_mutex_unlock(&ctx_impl->mutex); 4746 4747 rc = NXT_UNIT_OK; 4748 4749 nxt_queue_each(rbuf, &pending_rbuf, nxt_unit_read_buf_t, link) { 4750 4751 if (nxt_fast_path(rc != NXT_UNIT_ERROR)) { 4752 rc = nxt_unit_process_msg(&ctx_impl->ctx, rbuf, NULL); 4753 4754 } else { 4755 nxt_unit_read_buf_release(ctx, rbuf); 4756 } 4757 4758 } nxt_queue_loop; 4759 4760 if (!ctx_impl->ready) { 4761 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 4762 } 4763 4764 return rc; 4765} 4766 4767 4768static void 4769nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx) 4770{ 4771 int res; 4772 nxt_queue_t ready_req; 4773 nxt_unit_impl_t *lib; 4774 nxt_unit_ctx_impl_t *ctx_impl; 4775 nxt_unit_request_info_t *req; 4776 nxt_unit_request_info_impl_t *req_impl; 4777 4778 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4779 4780 pthread_mutex_lock(&ctx_impl->mutex); 4781 4782 if (nxt_queue_is_empty(&ctx_impl->ready_req)) { 4783 pthread_mutex_unlock(&ctx_impl->mutex); 4784 4785 return; 4786 } 4787 4788 nxt_queue_init(&ready_req); 4789 4790 nxt_queue_add(&ready_req, &ctx_impl->ready_req); 4791 nxt_queue_init(&ctx_impl->ready_req); 4792 4793 pthread_mutex_unlock(&ctx_impl->mutex); 4794 4795 nxt_queue_each(req_impl, &ready_req, 4796 nxt_unit_request_info_impl_t, port_wait_link) 4797 { 4798 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); 4799 4800 req = &req_impl->req; 4801 4802 res = nxt_unit_send_req_headers_ack(req); 4803 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4804 nxt_unit_request_done(req, NXT_UNIT_ERROR); 4805 4806 continue; 4807 } 4808 4809 if (req->content_length 4810 > (uint64_t) (req->content_buf->end - req->content_buf->free)) 4811 { 4812 res = nxt_unit_request_hash_add(ctx, req); 4813 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4814 nxt_unit_req_warn(req, "failed to add request to hash"); 4815 4816 nxt_unit_request_done(req, NXT_UNIT_ERROR); 4817 4818 continue; 4819 } 4820 4821 /* 4822 * If application have separate data handler, we may start 4823 * request processing and process data when it is arrived. 4824 */ 4825 if (lib->callbacks.data_handler == NULL) { 4826 continue; 4827 } 4828 } 4829 4830 lib->callbacks.request_handler(&req_impl->req); 4831 4832 } nxt_queue_loop; 4833} 4834 4835 4836int 4837nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) 4838{ 4839 int rc; 4840 nxt_unit_read_buf_t *rbuf; 4841 nxt_unit_ctx_impl_t *ctx_impl; 4842 4843 nxt_unit_ctx_use(ctx); 4844 4845 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4846 4847 rc = NXT_UNIT_OK; 4848 4849 while (nxt_fast_path(ctx_impl->online)) { 4850 rbuf = nxt_unit_read_buf_get(ctx); 4851 if (nxt_slow_path(rbuf == NULL)) { 4852 rc = NXT_UNIT_ERROR; 4853 break; 4854 } 4855 4856 retry: 4857 4858 rc = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4859 if (rc == NXT_UNIT_AGAIN) { 4860 goto retry; 4861 } 4862 4863 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4864 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4865 break; 4866 } 4867 4868 rc = nxt_unit_process_pending_rbuf(ctx); 4869 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4870 break; 4871 } 4872 4873 nxt_unit_process_ready_req(ctx); 4874 } 4875 4876 nxt_unit_ctx_release(ctx); 4877 4878 return rc; 4879} 4880 4881 4882nxt_inline int 4883nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf) 4884{ 4885 nxt_port_msg_t *port_msg; 4886 4887 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4888 port_msg = (nxt_port_msg_t *) rbuf->buf; 4889 4890 return port_msg->type == _NXT_PORT_MSG_READ_QUEUE; 4891 } 4892 4893 return 0; 4894} 4895 4896 4897nxt_inline int 4898nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf) 4899{ 4900 if (nxt_fast_path(rbuf->size == 1)) { 4901 return rbuf->buf[0] == _NXT_PORT_MSG_READ_SOCKET; 4902 } 4903 4904 return 0; 4905} 4906 4907 4908nxt_inline int 4909nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf) 4910{ 4911 nxt_port_msg_t *port_msg; 4912 4913 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4914 port_msg = (nxt_port_msg_t *) rbuf->buf; 4915 4916 return port_msg->type == _NXT_PORT_MSG_SHM_ACK; 4917 } 4918 4919 return 0; 4920} 4921 4922 4923nxt_inline int 4924nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf) 4925{ 4926 nxt_port_msg_t *port_msg; 4927 4928 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4929 port_msg = (nxt_port_msg_t *) rbuf->buf; 4930 4931 return port_msg->type == _NXT_PORT_MSG_QUIT; 4932 } 4933 4934 return 0; 4935} 4936 4937 4938int 4939nxt_unit_run_shared(nxt_unit_ctx_t *ctx) 4940{ 4941 int rc; 4942 nxt_unit_impl_t *lib; 4943 nxt_unit_read_buf_t *rbuf; 4944 4945 nxt_unit_ctx_use(ctx); 4946 4947 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4948 4949 rc = NXT_UNIT_OK; 4950 4951 while (nxt_fast_path(nxt_unit_chk_ready(ctx))) { 4952 rbuf = nxt_unit_read_buf_get(ctx); 4953 if (nxt_slow_path(rbuf == NULL)) { 4954 rc = NXT_UNIT_ERROR; 4955 break; 4956 } 4957 4958 retry: 4959 4960 rc = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); 4961 if (rc == NXT_UNIT_AGAIN) { 4962 goto retry; 4963 } 4964 4965 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4966 nxt_unit_read_buf_release(ctx, rbuf); 4967 break; 4968 } 4969 4970 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4971 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4972 break; 4973 } 4974 } 4975 4976 nxt_unit_ctx_release(ctx); 4977 4978 return rc; 4979} 4980 4981 4982nxt_unit_request_info_t * 4983nxt_unit_dequeue_request(nxt_unit_ctx_t *ctx) 4984{ 4985 int rc; 4986 nxt_unit_impl_t *lib; 4987 nxt_unit_read_buf_t *rbuf; 4988 nxt_unit_request_info_t *req; 4989 4990 nxt_unit_ctx_use(ctx); 4991 4992 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4993 4994 req = NULL; 4995 4996 if (nxt_slow_path(!nxt_unit_chk_ready(ctx))) { 4997 goto done; 4998 } 4999 5000 rbuf = nxt_unit_read_buf_get(ctx); 5001 if (nxt_slow_path(rbuf == NULL)) { 5002 goto done; 5003 } 5004 5005 rc = nxt_unit_app_queue_recv(ctx, lib->shared_port, rbuf); 5006 if (rc != NXT_UNIT_OK) { 5007 nxt_unit_read_buf_release(ctx, rbuf); 5008 goto done; 5009 } 5010 5011 (void) nxt_unit_process_msg(ctx, rbuf, &req); 5012 5013done: 5014 5015 nxt_unit_ctx_release(ctx); 5016 5017 return req; 5018} 5019 5020 5021int 5022nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 5023{ 5024 int rc; 5025 5026 nxt_unit_ctx_use(ctx); 5027 5028 rc = nxt_unit_process_port_msg_impl(ctx, port); 5029 5030 nxt_unit_ctx_release(ctx); 5031 5032 return rc; 5033} 5034 5035 5036static int 5037nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 5038{ 5039 int rc; 5040 nxt_unit_impl_t *lib; 5041 nxt_unit_read_buf_t *rbuf; 5042 5043 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5044 5045 if (port == lib->shared_port && !nxt_unit_chk_ready(ctx)) { 5046 return NXT_UNIT_AGAIN; 5047 } 5048 5049 rbuf = nxt_unit_read_buf_get(ctx); 5050 if (nxt_slow_path(rbuf == NULL)) { 5051 return NXT_UNIT_ERROR; 5052 } 5053 5054 if (port == lib->shared_port) { 5055 rc = nxt_unit_shared_port_recv(ctx, port, rbuf); 5056 5057 } else { 5058 rc = nxt_unit_ctx_port_recv(ctx, port, rbuf); 5059 } 5060 5061 if (rc != NXT_UNIT_OK) { 5062 nxt_unit_read_buf_release(ctx, rbuf); 5063 return rc; 5064 } 5065 5066 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 5067 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 5068 return NXT_UNIT_ERROR; 5069 } 5070 5071 rc = nxt_unit_process_pending_rbuf(ctx); 5072 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 5073 return NXT_UNIT_ERROR; 5074 } 5075 5076 nxt_unit_process_ready_req(ctx); 5077 5078 return rc; 5079} 5080 5081 5082void 5083nxt_unit_done(nxt_unit_ctx_t *ctx) 5084{ 5085 nxt_unit_ctx_release(ctx); 5086} 5087 5088 5089nxt_unit_ctx_t * 5090nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) 5091{ 5092 int rc, queue_fd; 5093 void *mem; 5094 nxt_unit_impl_t *lib; 5095 nxt_unit_port_t *port; 5096 nxt_unit_ctx_impl_t *new_ctx; 5097 nxt_unit_port_impl_t *port_impl; 5098 5099 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5100 5101 new_ctx = nxt_unit_malloc(ctx, sizeof(nxt_unit_ctx_impl_t) 5102 + lib->request_data_size); 5103 if (nxt_slow_path(new_ctx == NULL)) { 5104 nxt_unit_alert(ctx, "failed to allocate context"); 5105 5106 return NULL; 5107 } 5108 5109 rc = nxt_unit_ctx_init(lib, new_ctx, data); 5110 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5111 nxt_unit_free(ctx, new_ctx); 5112 5113 return NULL; 5114 } 5115 5116 queue_fd = -1; 5117 5118 port = nxt_unit_create_port(&new_ctx->ctx); 5119 if (nxt_slow_path(port == NULL)) { 5120 goto fail; 5121 } 5122 5123 new_ctx->read_port = port; 5124 5125 queue_fd = nxt_unit_shm_open(&new_ctx->ctx, sizeof(nxt_port_queue_t)); 5126 if (nxt_slow_path(queue_fd == -1)) { 5127 goto fail; 5128 } 5129 5130 mem = mmap(NULL, sizeof(nxt_port_queue_t), 5131 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); 5132 if (nxt_slow_path(mem == MAP_FAILED)) { 5133 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, 5134 strerror(errno), errno); 5135 5136 goto fail; 5137 } 5138 5139 nxt_port_queue_init(mem); 5140 5141 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5142 port_impl->queue = mem; 5143 5144 rc = nxt_unit_send_port(&new_ctx->ctx, lib->router_port, port, queue_fd); 5145 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5146 goto fail; 5147 } 5148 5149 nxt_unit_close(queue_fd); 5150 5151 return &new_ctx->ctx; 5152 5153fail: 5154 5155 if (queue_fd != -1) { 5156 nxt_unit_close(queue_fd); 5157 } 5158 5159 nxt_unit_ctx_release(&new_ctx->ctx); 5160 5161 return NULL; 5162} 5163 5164 5165static void 5166nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl) 5167{ 5168 nxt_unit_impl_t *lib; 5169 nxt_unit_mmap_buf_t *mmap_buf; 5170 nxt_unit_read_buf_t *rbuf; 5171 nxt_unit_request_info_impl_t *req_impl; 5172 nxt_unit_websocket_frame_impl_t *ws_impl; 5173 5174 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); 5175 5176 nxt_queue_each(req_impl, &ctx_impl->active_req, 5177 nxt_unit_request_info_impl_t, link) 5178 { 5179 nxt_unit_req_warn(&req_impl->req, "active request on ctx free"); 5180 5181 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR); 5182 5183 } nxt_queue_loop; 5184 5185 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]); 5186 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]); 5187 5188 while (ctx_impl->free_buf != NULL) { 5189 mmap_buf = ctx_impl->free_buf; 5190 nxt_unit_mmap_buf_unlink(mmap_buf); 5191 nxt_unit_free(&ctx_impl->ctx, mmap_buf); 5192 } 5193 5194 nxt_queue_each(req_impl, &ctx_impl->free_req, 5195 nxt_unit_request_info_impl_t, link) 5196 { 5197 nxt_unit_request_info_free(req_impl); 5198 5199 } nxt_queue_loop; 5200 5201 nxt_queue_each(ws_impl, &ctx_impl->free_ws, 5202 nxt_unit_websocket_frame_impl_t, link) 5203 { 5204 nxt_unit_websocket_frame_free(&ctx_impl->ctx, ws_impl); 5205 5206 } nxt_queue_loop; 5207 5208 nxt_queue_each(rbuf, &ctx_impl->free_rbuf, nxt_unit_read_buf_t, link) 5209 { 5210 if (rbuf != &ctx_impl->ctx_read_buf) { 5211 nxt_unit_free(&ctx_impl->ctx, rbuf); 5212 } 5213 } nxt_queue_loop; 5214 5215 pthread_mutex_destroy(&ctx_impl->mutex); 5216 5217 pthread_mutex_lock(&lib->mutex); 5218 5219 nxt_queue_remove(&ctx_impl->link); 5220 5221 pthread_mutex_unlock(&lib->mutex); 5222 5223 if (nxt_fast_path(ctx_impl->read_port != NULL)) { 5224 nxt_unit_remove_port(lib, NULL, &ctx_impl->read_port->id); 5225 nxt_unit_port_release(ctx_impl->read_port); 5226 } 5227 5228 if (ctx_impl != &lib->main_ctx) { 5229 nxt_unit_free(&lib->main_ctx.ctx, ctx_impl); 5230 } 5231 5232 nxt_unit_lib_release(lib); 5233} 5234 5235 5236/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */ 5237#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET) 5238#define NXT_UNIX_SOCKET SOCK_SEQPACKET 5239#else 5240#define NXT_UNIX_SOCKET SOCK_DGRAM 5241#endif 5242 5243 5244void 5245nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) 5246{ 5247 nxt_unit_port_hash_id_t port_hash_id; 5248 5249 port_hash_id.pid = pid; 5250 port_hash_id.id = id; 5251 5252 port_id->pid = pid; 5253 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id)); 5254 port_id->id = id; 5255} 5256 5257 5258static nxt_unit_port_t * 5259nxt_unit_create_port(nxt_unit_ctx_t *ctx) 5260{ 5261 int rc, port_sockets[2]; 5262 nxt_unit_impl_t *lib; 5263 nxt_unit_port_t new_port, *port; 5264 nxt_unit_process_t *process; 5265 5266 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5267 5268 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets); 5269 if (nxt_slow_path(rc != 0)) { 5270 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)", 5271 strerror(errno), errno); 5272 5273 return NULL; 5274 } 5275 5276#if (NXT_HAVE_SOCKOPT_SO_PASSCRED) 5277 int enable_creds = 1; 5278 5279 if (nxt_slow_path(setsockopt(port_sockets[0], SOL_SOCKET, SO_PASSCRED, 5280 &enable_creds, sizeof(enable_creds)) == -1)) 5281 { 5282 nxt_unit_warn(ctx, "failed to set SO_PASSCRED %s", strerror(errno)); 5283 return NULL; 5284 } 5285 5286 if (nxt_slow_path(setsockopt(port_sockets[1], SOL_SOCKET, SO_PASSCRED, 5287 &enable_creds, sizeof(enable_creds)) == -1)) 5288 { 5289 nxt_unit_warn(ctx, "failed to set SO_PASSCRED %s", strerror(errno)); 5290 return NULL; 5291 } 5292#endif 5293 5294 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d", 5295 port_sockets[0], port_sockets[1]); 5296 5297 pthread_mutex_lock(&lib->mutex); 5298 5299 process = nxt_unit_process_get(ctx, lib->pid); 5300 if (nxt_slow_path(process == NULL)) { 5301 pthread_mutex_unlock(&lib->mutex); 5302 5303 nxt_unit_close(port_sockets[0]); 5304 nxt_unit_close(port_sockets[1]); 5305 5306 return NULL; 5307 } 5308 5309 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++); 5310 5311 new_port.in_fd = port_sockets[0]; 5312 new_port.out_fd = port_sockets[1]; 5313 new_port.data = NULL; 5314 5315 pthread_mutex_unlock(&lib->mutex); 5316 5317 nxt_unit_process_release(process); 5318 5319 port = nxt_unit_add_port(ctx, &new_port, NULL); 5320 if (nxt_slow_path(port == NULL)) { 5321 nxt_unit_close(port_sockets[0]); 5322 nxt_unit_close(port_sockets[1]); 5323 } 5324 5325 return port; 5326} 5327 5328 5329static int 5330nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, 5331 nxt_unit_port_t *port, int queue_fd) 5332{ 5333 ssize_t res; 5334 nxt_send_oob_t oob; 5335 nxt_unit_impl_t *lib; 5336 int fds[2] = { port->out_fd, queue_fd }; 5337 5338 struct { 5339 nxt_port_msg_t msg; 5340 nxt_port_msg_new_port_t new_port; 5341 } m; 5342 5343 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5344 5345 m.msg.stream = 0; 5346 m.msg.pid = lib->pid; 5347 m.msg.reply_port = 0; 5348 m.msg.type = _NXT_PORT_MSG_NEW_PORT; 5349 m.msg.last = 0; 5350 m.msg.mmap = 0; 5351 m.msg.nf = 0; 5352 m.msg.mf = 0; 5353 5354 m.new_port.id = port->id.id; 5355 m.new_port.pid = port->id.pid; 5356 m.new_port.type = NXT_PROCESS_APP; 5357 m.new_port.max_size = 16 * 1024; 5358 m.new_port.max_share = 64 * 1024; 5359 5360 nxt_socket_msg_oob_init(&oob, fds); 5361 5362 res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &oob); 5363 5364 return (res == sizeof(m)) ? NXT_UNIT_OK : NXT_UNIT_ERROR; 5365} 5366 5367 5368nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port) 5369{ 5370 nxt_unit_port_impl_t *port_impl; 5371 5372 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5373 5374 nxt_atomic_fetch_add(&port_impl->use_count, 1); 5375} 5376 5377 5378nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) 5379{ 5380 long c; 5381 nxt_unit_port_impl_t *port_impl; 5382 5383 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5384 5385 c = nxt_atomic_fetch_add(&port_impl->use_count, -1); 5386 5387 if (c == 1) { 5388 nxt_unit_debug(NULL, "destroy port{%d,%d} in_fd %d out_fd %d", 5389 (int) port->id.pid, (int) port->id.id, 5390 port->in_fd, port->out_fd); 5391 5392 nxt_unit_process_release(port_impl->process); 5393 5394 if (port->in_fd != -1) { 5395 nxt_unit_close(port->in_fd); 5396 5397 port->in_fd = -1; 5398 } 5399 5400 if (port->out_fd != -1) { 5401 nxt_unit_close(port->out_fd); 5402 5403 port->out_fd = -1; 5404 } 5405 5406 if (port_impl->queue != NULL) { 5407 munmap(port_impl->queue, (port->id.id == NXT_UNIT_SHARED_PORT_ID) 5408 ? sizeof(nxt_app_queue_t) 5409 : sizeof(nxt_port_queue_t)); 5410 } 5411 5412 nxt_unit_free(NULL, port_impl); 5413 } 5414} 5415 5416 5417static nxt_unit_port_t * 5418nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue) 5419{ 5420 int rc, ready; 5421 nxt_queue_t awaiting_req; 5422 nxt_unit_impl_t *lib; 5423 nxt_unit_port_t *old_port; 5424 nxt_unit_process_t *process; 5425 nxt_unit_port_impl_t *new_port, *old_port_impl; 5426 5427 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5428 5429 pthread_mutex_lock(&lib->mutex); 5430 5431 old_port = nxt_unit_port_hash_find(&lib->ports, &port->id, 0); 5432 5433 if (nxt_slow_path(old_port != NULL)) { 5434 nxt_unit_debug(ctx, "add_port: duplicate port{%d,%d} " 5435 "in_fd %d out_fd %d queue %p", 5436 port->id.pid, port->id.id, 5437 port->in_fd, port->out_fd, queue); 5438 5439 if (old_port->data == NULL) { 5440 old_port->data = port->data; 5441 port->data = NULL; 5442 } 5443 5444 if (old_port->in_fd == -1) { 5445 old_port->in_fd = port->in_fd; 5446 port->in_fd = -1; 5447 } 5448 5449 if (port->in_fd != -1) { 5450 nxt_unit_close(port->in_fd); 5451 port->in_fd = -1; 5452 } 5453 5454 if (old_port->out_fd == -1) { 5455 old_port->out_fd = port->out_fd; 5456 port->out_fd = -1; 5457 } 5458 5459 if (port->out_fd != -1) { 5460 nxt_unit_close(port->out_fd); 5461 port->out_fd = -1; 5462 } 5463 5464 *port = *old_port; 5465 5466 nxt_queue_init(&awaiting_req); 5467 5468 old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port); 5469 5470 if (old_port_impl->queue == NULL) { 5471 old_port_impl->queue = queue; 5472 } 5473 5474 ready = (port->in_fd != -1 || port->out_fd != -1); 5475 5476 /* 5477 * Port can be market as 'ready' only after callbacks.add_port() call. 5478 * Otherwise, request may try to use the port before callback. 5479 */ 5480 if (lib->callbacks.add_port == NULL && ready) { 5481 old_port_impl->ready = ready; 5482 5483 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { 5484 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); 5485 nxt_queue_init(&old_port_impl->awaiting_req); 5486 } 5487 } 5488 5489 pthread_mutex_unlock(&lib->mutex); 5490 5491 if (lib->callbacks.add_port != NULL && ready) { 5492 lib->callbacks.add_port(ctx, old_port); 5493 5494 pthread_mutex_lock(&lib->mutex); 5495 5496 old_port_impl->ready = ready; 5497 5498 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { 5499 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); 5500 nxt_queue_init(&old_port_impl->awaiting_req); 5501 } 5502 5503 pthread_mutex_unlock(&lib->mutex); 5504 } 5505 5506 nxt_unit_process_awaiting_req(ctx, &awaiting_req); 5507 5508 return old_port; 5509 } 5510 5511 new_port = NULL; 5512 ready = 0; 5513 5514 nxt_unit_debug(ctx, "add_port: port{%d,%d} in_fd %d out_fd %d queue %p", 5515 port->id.pid, port->id.id, 5516 port->in_fd, port->out_fd, queue); 5517 5518 process = nxt_unit_process_get(ctx, port->id.pid); 5519 if (nxt_slow_path(process == NULL)) { 5520 goto unlock; 5521 } 5522 5523 if (port->id.id != NXT_UNIT_SHARED_PORT_ID 5524 && port->id.id >= process->next_port_id) 5525 { 5526 process->next_port_id = port->id.id + 1; 5527 } 5528 5529 new_port = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t)); 5530 if (nxt_slow_path(new_port == NULL)) { 5531 nxt_unit_alert(ctx, "add_port: %d,%d malloc() failed", 5532 port->id.pid, port->id.id); 5533 5534 goto unlock; 5535 } 5536 5537 new_port->port = *port; 5538 5539 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port); 5540 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5541 nxt_unit_alert(ctx, "add_port: %d,%d hash_add failed", 5542 port->id.pid, port->id.id); 5543 5544 nxt_unit_free(ctx, new_port); 5545 5546 new_port = NULL; 5547 5548 goto unlock; 5549 } 5550 5551 nxt_queue_insert_tail(&process->ports, &new_port->link); 5552 5553 new_port->use_count = 2; 5554 new_port->process = process; 5555 new_port->queue = queue; 5556 new_port->from_socket = 0; 5557 new_port->socket_rbuf = NULL; 5558 5559 nxt_queue_init(&new_port->awaiting_req); 5560 5561 ready = (port->in_fd != -1 || port->out_fd != -1); 5562 5563 if (lib->callbacks.add_port == NULL) { 5564 new_port->ready = ready; 5565 5566 } else { 5567 new_port->ready = 0; 5568 } 5569 5570 process = NULL; 5571 5572unlock: 5573 5574 pthread_mutex_unlock(&lib->mutex); 5575 5576 if (nxt_slow_path(process != NULL)) { 5577 nxt_unit_process_release(process); 5578 } 5579 5580 if (lib->callbacks.add_port != NULL && new_port != NULL && ready) { 5581 lib->callbacks.add_port(ctx, &new_port->port); 5582 5583 nxt_queue_init(&awaiting_req); 5584 5585 pthread_mutex_lock(&lib->mutex); 5586 5587 new_port->ready = 1; 5588 5589 if (!nxt_queue_is_empty(&new_port->awaiting_req)) { 5590 nxt_queue_add(&awaiting_req, &new_port->awaiting_req); 5591 nxt_queue_init(&new_port->awaiting_req); 5592 } 5593 5594 pthread_mutex_unlock(&lib->mutex); 5595 5596 nxt_unit_process_awaiting_req(ctx, &awaiting_req); 5597 } 5598 5599 return (new_port == NULL) ? NULL : &new_port->port; 5600} 5601 5602 5603static void 5604nxt_unit_process_awaiting_req(nxt_unit_ctx_t *ctx, nxt_queue_t *awaiting_req) 5605{ 5606 nxt_unit_ctx_impl_t *ctx_impl; 5607 nxt_unit_request_info_impl_t *req_impl; 5608 5609 nxt_queue_each(req_impl, awaiting_req, 5610 nxt_unit_request_info_impl_t, port_wait_link) 5611 { 5612 nxt_queue_remove(&req_impl->port_wait_link); 5613 5614 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, 5615 ctx); 5616 5617 pthread_mutex_lock(&ctx_impl->mutex); 5618 5619 nxt_queue_insert_tail(&ctx_impl->ready_req, 5620 &req_impl->port_wait_link); 5621 5622 pthread_mutex_unlock(&ctx_impl->mutex); 5623 5624 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); 5625 5626 nxt_unit_awake_ctx(ctx, ctx_impl); 5627 5628 } nxt_queue_loop; 5629} 5630 5631 5632static void 5633nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_ctx_t *ctx, 5634 nxt_unit_port_id_t *port_id) 5635{ 5636 nxt_unit_port_t *port; 5637 nxt_unit_port_impl_t *port_impl; 5638 5639 pthread_mutex_lock(&lib->mutex); 5640 5641 port = nxt_unit_remove_port_unsafe(lib, port_id); 5642 5643 if (nxt_fast_path(port != NULL)) { 5644 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5645 5646 nxt_queue_remove(&port_impl->link); 5647 } 5648 5649 pthread_mutex_unlock(&lib->mutex); 5650 5651 if (lib->callbacks.remove_port != NULL && port != NULL) { 5652 lib->callbacks.remove_port(&lib->unit, ctx, port); 5653 } 5654 5655 if (nxt_fast_path(port != NULL)) { 5656 nxt_unit_port_release(port); 5657 } 5658} 5659 5660 5661static nxt_unit_port_t * 5662nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) 5663{ 5664 nxt_unit_port_t *port; 5665 5666 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); 5667 if (nxt_slow_path(port == NULL)) { 5668 nxt_unit_debug(NULL, "remove_port: port{%d,%d} not found", 5669 (int) port_id->pid, (int) port_id->id); 5670 5671 return NULL; 5672 } 5673 5674 nxt_unit_debug(NULL, "remove_port: port{%d,%d}, fds %d,%d, data %p", 5675 (int) port_id->pid, (int) port_id->id, 5676 port->in_fd, port->out_fd, port->data); 5677 5678 return port; 5679} 5680 5681 5682static void 5683nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid) 5684{ 5685 nxt_unit_process_t *process; 5686 5687 pthread_mutex_lock(&lib->mutex); 5688 5689 process = nxt_unit_process_find(lib, pid, 1); 5690 if (nxt_slow_path(process == NULL)) { 5691 nxt_unit_debug(NULL, "remove_pid: process %d not found", (int) pid); 5692 5693 pthread_mutex_unlock(&lib->mutex); 5694 5695 return; 5696 } 5697 5698 nxt_unit_remove_process(lib, process); 5699 5700 if (lib->callbacks.remove_pid != NULL) { 5701 lib->callbacks.remove_pid(&lib->unit, pid); 5702 } 5703} 5704 5705 5706static void 5707nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process) 5708{ 5709 nxt_queue_t ports; 5710 nxt_unit_port_impl_t *port; 5711 5712 nxt_queue_init(&ports); 5713 5714 nxt_queue_add(&ports, &process->ports); 5715 5716 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 5717 5718 nxt_unit_remove_port_unsafe(lib, &port->port.id); 5719 5720 } nxt_queue_loop; 5721 5722 pthread_mutex_unlock(&lib->mutex); 5723 5724 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 5725 5726 nxt_queue_remove(&port->link); 5727 5728 if (lib->callbacks.remove_port != NULL) { 5729 lib->callbacks.remove_port(&lib->unit, NULL, &port->port); 5730 } 5731 5732 nxt_unit_port_release(&port->port); 5733 5734 } nxt_queue_loop; 5735 5736 nxt_unit_process_release(process); 5737} 5738 5739 5740static void 5741nxt_unit_quit(nxt_unit_ctx_t *ctx, uint8_t quit_param) 5742{ 5743 nxt_bool_t skip_graceful_broadcast, quit; 5744 nxt_unit_impl_t *lib; 5745 nxt_unit_ctx_impl_t *ctx_impl; 5746 nxt_unit_callbacks_t *cb; 5747 nxt_unit_request_info_t *req; 5748 nxt_unit_request_info_impl_t *req_impl; 5749 5750 struct { 5751 nxt_port_msg_t msg; 5752 uint8_t quit_param; 5753 } nxt_packed m; 5754 5755 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5756 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 5757 5758 nxt_unit_debug(ctx, "quit: %d/%d/%d", (int) quit_param, ctx_impl->ready, 5759 ctx_impl->online); 5760 5761 if (nxt_slow_path(!ctx_impl->online)) { 5762 return; 5763 } 5764 5765 skip_graceful_broadcast = quit_param == NXT_QUIT_GRACEFUL 5766 && !ctx_impl->ready; 5767 5768 cb = &lib->callbacks; 5769 5770 if (nxt_fast_path(ctx_impl->ready)) { 5771 ctx_impl->ready = 0; 5772 5773 if (cb->remove_port != NULL) { 5774 cb->remove_port(&lib->unit, ctx, lib->shared_port); 5775 } 5776 } 5777 5778 if (quit_param == NXT_QUIT_GRACEFUL) { 5779 pthread_mutex_lock(&ctx_impl->mutex); 5780 5781 quit = nxt_queue_is_empty(&ctx_impl->active_req) 5782 && nxt_queue_is_empty(&ctx_impl->pending_rbuf) 5783 && ctx_impl->wait_items == 0; 5784 5785 pthread_mutex_unlock(&ctx_impl->mutex); 5786 5787 } else { 5788 quit = 1; 5789 ctx_impl->quit_param = NXT_QUIT_GRACEFUL; 5790 } 5791 5792 if (quit) { 5793 ctx_impl->online = 0; 5794 5795 if (cb->quit != NULL) { 5796 cb->quit(ctx); 5797 } 5798 5799 nxt_queue_each(req_impl, &ctx_impl->active_req, 5800 nxt_unit_request_info_impl_t, link) 5801 { 5802 req = &req_impl->req; 5803 5804 nxt_unit_req_warn(req, "active request on ctx quit"); 5805 5806 if (cb->close_handler) { 5807 nxt_unit_req_debug(req, "close_handler"); 5808 5809 cb->close_handler(req); 5810 5811 } else { 5812 nxt_unit_request_done(req, NXT_UNIT_ERROR); 5813 } 5814 5815 } nxt_queue_loop; 5816 5817 if (nxt_fast_path(ctx_impl->read_port != NULL)) { 5818 nxt_unit_remove_port(lib, ctx, &ctx_impl->read_port->id); 5819 } 5820 } 5821 5822 if (ctx != &lib->main_ctx.ctx || skip_graceful_broadcast) { 5823 return; 5824 } 5825 5826 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 5827 5828 m.msg.pid = lib->pid; 5829 m.msg.type = _NXT_PORT_MSG_QUIT; 5830 m.quit_param = quit_param; 5831 5832 pthread_mutex_lock(&lib->mutex); 5833 5834 nxt_queue_each(ctx_impl, &lib->contexts, nxt_unit_ctx_impl_t, link) { 5835 5836 if (ctx == &ctx_impl->ctx 5837 || ctx_impl->read_port == NULL 5838 || ctx_impl->read_port->out_fd == -1) 5839 { 5840 continue; 5841 } 5842 5843 (void) nxt_unit_port_send(ctx, ctx_impl->read_port, 5844 &m, sizeof(m), NULL); 5845 5846 } nxt_queue_loop; 5847 5848 pthread_mutex_unlock(&lib->mutex); 5849} 5850 5851 5852static int 5853nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 5854{ 5855 ssize_t res; 5856 nxt_unit_impl_t *lib; 5857 nxt_unit_ctx_impl_t *ctx_impl; 5858 5859 struct { 5860 nxt_port_msg_t msg; 5861 nxt_port_msg_get_port_t get_port; 5862 } m; 5863 5864 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5865 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 5866 5867 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 5868 5869 m.msg.pid = lib->pid; 5870 m.msg.reply_port = ctx_impl->read_port->id.id; 5871 m.msg.type = _NXT_PORT_MSG_GET_PORT; 5872 5873 m.get_port.id = port_id->id; 5874 m.get_port.pid = port_id->pid; 5875 5876 nxt_unit_debug(ctx, "get_port: %d %d", (int) port_id->pid, 5877 (int) port_id->id); 5878 5879 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL); 5880 if (nxt_slow_path(res != sizeof(m))) { 5881 return NXT_UNIT_ERROR; 5882 } 5883 5884 return NXT_UNIT_OK; 5885} 5886 5887 5888static ssize_t 5889nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 5890 const void *buf, size_t buf_size, const nxt_send_oob_t *oob) 5891{ 5892 int notify; 5893 ssize_t ret; 5894 nxt_int_t rc; 5895 nxt_port_msg_t msg; 5896 nxt_unit_impl_t *lib; 5897 nxt_unit_port_impl_t *port_impl; 5898 5899 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5900 5901 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5902 if (port_impl->queue != NULL && (oob == NULL || oob->size == 0) 5903 && buf_size <= NXT_PORT_QUEUE_MSG_SIZE) 5904 { 5905 rc = nxt_port_queue_send(port_impl->queue, buf, buf_size, ¬ify); 5906 if (nxt_slow_path(rc != NXT_OK)) { 5907 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", 5908 (int) port->id.pid, (int) port->id.id); 5909 5910 return -1; 5911 } 5912 5913 nxt_unit_debug(ctx, "port{%d,%d} enqueue %d notify %d", 5914 (int) port->id.pid, (int) port->id.id, 5915 (int) buf_size, notify); 5916 5917 if (notify) { 5918 memcpy(&msg, buf, sizeof(nxt_port_msg_t)); 5919 5920 msg.type = _NXT_PORT_MSG_READ_QUEUE; 5921 5922 if (lib->callbacks.port_send == NULL) { 5923 ret = nxt_unit_sendmsg(ctx, port->out_fd, &msg, 5924 sizeof(nxt_port_msg_t), NULL); 5925 5926 nxt_unit_debug(ctx, "port{%d,%d} send %d read_queue", 5927 (int) port->id.pid, (int) port->id.id, 5928 (int) ret); 5929 5930 } else { 5931 ret = lib->callbacks.port_send(ctx, port, &msg, 5932 sizeof(nxt_port_msg_t), NULL, 0); 5933 5934 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d read_queue", 5935 (int) port->id.pid, (int) port->id.id, 5936 (int) ret); 5937 } 5938 5939 } 5940 5941 return buf_size; 5942 } 5943 5944 if (port_impl->queue != NULL) { 5945 msg.type = _NXT_PORT_MSG_READ_SOCKET; 5946 5947 rc = nxt_port_queue_send(port_impl->queue, &msg.type, 1, ¬ify); 5948 if (nxt_slow_path(rc != NXT_OK)) { 5949 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", 5950 (int) port->id.pid, (int) port->id.id); 5951 5952 return -1; 5953 } 5954 5955 nxt_unit_debug(ctx, "port{%d,%d} enqueue 1 read_socket notify %d", 5956 (int) port->id.pid, (int) port->id.id, notify); 5957 } 5958 5959 if (lib->callbacks.port_send != NULL) { 5960 ret = lib->callbacks.port_send(ctx, port, buf, buf_size, 5961 oob != NULL ? oob->buf : NULL, 5962 oob != NULL ? oob->size : 0); 5963 5964 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d", 5965 (int) port->id.pid, (int) port->id.id, 5966 (int) ret); 5967 5968 } else { 5969 ret = nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size, oob); 5970 5971 nxt_unit_debug(ctx, "port{%d,%d} sendmsg %d", 5972 (int) port->id.pid, (int) port->id.id, 5973 (int) ret); 5974 } 5975 5976 return ret; 5977} 5978 5979 5980static ssize_t 5981nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, 5982 const void *buf, size_t buf_size, const nxt_send_oob_t *oob) 5983{ 5984 int err; 5985 ssize_t n; 5986 struct iovec iov[1]; 5987 5988 iov[0].iov_base = (void *) buf; 5989 iov[0].iov_len = buf_size; 5990 5991retry: 5992 5993 n = nxt_sendmsg(fd, iov, 1, oob); 5994 5995 if (nxt_slow_path(n == -1)) { 5996 err = errno; 5997 5998 if (err == EINTR) { 5999 goto retry; 6000 } 6001 6002 /* 6003 * FIXME: This should be "alert" after router graceful shutdown 6004 * implementation. 6005 */ 6006 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)", 6007 fd, (int) buf_size, strerror(err), err); 6008 6009 } else { 6010 nxt_unit_debug(ctx, "sendmsg(%d, %d, %d): %d", fd, (int) buf_size, 6011 (oob != NULL ? (int) oob->size : 0), (int) n); 6012 } 6013 6014 return n; 6015} 6016 6017 6018static int 6019nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6020 nxt_unit_read_buf_t *rbuf) 6021{ 6022 int res, read; 6023 nxt_unit_port_impl_t *port_impl; 6024 6025 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6026 6027 read = 0; 6028 6029retry: 6030 6031 if (port_impl->from_socket > 0) { 6032 if (port_impl->socket_rbuf != NULL 6033 && port_impl->socket_rbuf->size > 0) 6034 { 6035 port_impl->from_socket--; 6036 6037 nxt_unit_rbuf_cpy(rbuf, port_impl->socket_rbuf); 6038 port_impl->socket_rbuf->size = 0; 6039 6040 nxt_unit_debug(ctx, "port{%d,%d} use suspended message %d", 6041 (int) port->id.pid, (int) port->id.id, 6042 (int) rbuf->size); 6043 6044 return NXT_UNIT_OK; 6045 } 6046 6047 } else { 6048 res = nxt_unit_port_queue_recv(port, rbuf); 6049 6050 if (res == NXT_UNIT_OK) { 6051 if (nxt_unit_is_read_socket(rbuf)) { 6052 port_impl->from_socket++; 6053 6054 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", 6055 (int) port->id.pid, (int) port->id.id, 6056 port_impl->from_socket); 6057 6058 goto retry; 6059 } 6060 6061 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", 6062 (int) port->id.pid, (int) port->id.id, 6063 (int) rbuf->size); 6064 6065 return NXT_UNIT_OK; 6066 } 6067 } 6068 6069 if (read) { 6070 return NXT_UNIT_AGAIN; 6071 } 6072 6073 res = nxt_unit_port_recv(ctx, port, rbuf); 6074 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 6075 return NXT_UNIT_ERROR; 6076 } 6077 6078 read = 1; 6079 6080 if (nxt_unit_is_read_queue(rbuf)) { 6081 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", 6082 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6083 6084 goto retry; 6085 } 6086 6087 nxt_unit_debug(ctx, "port{%d,%d} recvmsg %d", 6088 (int) port->id.pid, (int) port->id.id, 6089 (int) rbuf->size); 6090 6091 if (res == NXT_UNIT_AGAIN) { 6092 return NXT_UNIT_AGAIN; 6093 } 6094 6095 if (port_impl->from_socket > 0) { 6096 port_impl->from_socket--; 6097 6098 return NXT_UNIT_OK; 6099 } 6100 6101 nxt_unit_debug(ctx, "port{%d,%d} suspend message %d", 6102 (int) port->id.pid, (int) port->id.id, 6103 (int) rbuf->size); 6104 6105 if (port_impl->socket_rbuf == NULL) { 6106 port_impl->socket_rbuf = nxt_unit_read_buf_get(ctx); 6107 6108 if (nxt_slow_path(port_impl->socket_rbuf == NULL)) { 6109 return NXT_UNIT_ERROR; 6110 } 6111 6112 port_impl->socket_rbuf->size = 0; 6113 } 6114 6115 if (port_impl->socket_rbuf->size > 0) { 6116 nxt_unit_alert(ctx, "too many port socket messages"); 6117 6118 return NXT_UNIT_ERROR; 6119 } 6120 6121 nxt_unit_rbuf_cpy(port_impl->socket_rbuf, rbuf); 6122 6123 rbuf->oob.size = 0; 6124 6125 goto retry; 6126} 6127 6128 6129nxt_inline void 6130nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, nxt_unit_read_buf_t *src) 6131{ 6132 memcpy(dst->buf, src->buf, src->size); 6133 dst->size = src->size; 6134 dst->oob.size = src->oob.size; 6135 memcpy(dst->oob.buf, src->oob.buf, src->oob.size); 6136} 6137 6138 6139static int 6140nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6141 nxt_unit_read_buf_t *rbuf) 6142{ 6143 int res; 6144 nxt_unit_port_impl_t *port_impl; 6145 6146 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6147 6148retry: 6149 6150 res = nxt_unit_app_queue_recv(ctx, port, rbuf); 6151 6152 if (res == NXT_UNIT_OK) { 6153 return NXT_UNIT_OK; 6154 } 6155 6156 if (res == NXT_UNIT_AGAIN) { 6157 res = nxt_unit_port_recv(ctx, port, rbuf); 6158 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 6159 return NXT_UNIT_ERROR; 6160 } 6161 6162 if (nxt_unit_is_read_queue(rbuf)) { 6163 nxt_app_queue_notification_received(port_impl->queue); 6164 6165 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", 6166 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6167 6168 goto retry; 6169 } 6170 } 6171 6172 return res; 6173} 6174 6175 6176static int 6177nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6178 nxt_unit_read_buf_t *rbuf) 6179{ 6180 int fd, err; 6181 size_t oob_size; 6182 struct iovec iov[1]; 6183 nxt_unit_impl_t *lib; 6184 6185 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6186 6187 if (lib->callbacks.port_recv != NULL) { 6188 oob_size = sizeof(rbuf->oob.buf); 6189 6190 rbuf->size = lib->callbacks.port_recv(ctx, port, 6191 rbuf->buf, sizeof(rbuf->buf), 6192 rbuf->oob.buf, &oob_size); 6193 6194 nxt_unit_debug(ctx, "port{%d,%d} recvcb %d", 6195 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6196 6197 if (nxt_slow_path(rbuf->size < 0)) { 6198 return NXT_UNIT_ERROR; 6199 } 6200 6201 rbuf->oob.size = oob_size; 6202 return NXT_UNIT_OK; 6203 } 6204 6205 iov[0].iov_base = rbuf->buf; 6206 iov[0].iov_len = sizeof(rbuf->buf); 6207 6208 fd = port->in_fd; 6209 6210retry: 6211 6212 rbuf->size = nxt_recvmsg(fd, iov, 1, &rbuf->oob); 6213 6214 if (nxt_slow_path(rbuf->size == -1)) { 6215 err = errno; 6216 6217 if (err == EINTR) { 6218 goto retry; 6219 } 6220 6221 if (err == EAGAIN) { 6222 nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)", 6223 fd, strerror(err), err); 6224 6225 return NXT_UNIT_AGAIN; 6226 } 6227 6228 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", 6229 fd, strerror(err), err); 6230 6231 return NXT_UNIT_ERROR; 6232 } 6233 6234 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) rbuf->size); 6235 6236 return NXT_UNIT_OK; 6237} 6238 6239 6240static int 6241nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf) 6242{ 6243 nxt_unit_port_impl_t *port_impl; 6244 6245 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6246 6247 rbuf->size = nxt_port_queue_recv(port_impl->queue, rbuf->buf); 6248 6249 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; 6250} 6251 6252 6253static int 6254nxt_unit_app_queue_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6255 nxt_unit_read_buf_t *rbuf) 6256{ 6257 uint32_t cookie; 6258 nxt_port_msg_t *port_msg; 6259 nxt_app_queue_t *queue; 6260 nxt_unit_impl_t *lib; 6261 nxt_unit_port_impl_t *port_impl; 6262 6263 struct { 6264 nxt_port_msg_t msg; 6265 uint8_t quit_param; 6266 } nxt_packed m; 6267 6268 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6269 queue = port_impl->queue; 6270 6271retry: 6272 6273 rbuf->size = nxt_app_queue_recv(queue, rbuf->buf, &cookie); 6274 6275 nxt_unit_debug(NULL, "app_queue_recv: %d", (int) rbuf->size); 6276 6277 if (rbuf->size >= (ssize_t) sizeof(nxt_port_msg_t)) { 6278 port_msg = (nxt_port_msg_t *) rbuf->buf; 6279 6280 if (nxt_app_queue_cancel(queue, cookie, port_msg->stream)) { 6281 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6282 6283 if (lib->request_limit != 0) { 6284 nxt_atomic_fetch_add(&lib->request_count, 1); 6285 6286 if (nxt_slow_path(lib->request_count >= lib->request_limit)) { 6287 nxt_unit_debug(ctx, "request limit reached"); 6288 6289 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 6290 6291 m.msg.pid = lib->pid; 6292 m.msg.type = _NXT_PORT_MSG_QUIT; 6293 m.quit_param = NXT_QUIT_GRACEFUL; 6294 6295 (void) nxt_unit_port_send(ctx, lib->main_ctx.read_port, 6296 &m, sizeof(m), NULL); 6297 } 6298 } 6299 6300 return NXT_UNIT_OK; 6301 } 6302 6303 nxt_unit_debug(NULL, "app_queue_recv: message cancelled"); 6304 6305 goto retry; 6306 } 6307 6308 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; 6309} 6310 6311 6312nxt_inline int 6313nxt_unit_close(int fd) 6314{ 6315 int res; 6316 6317 res = close(fd); 6318 6319 if (nxt_slow_path(res == -1)) { 6320 nxt_unit_alert(NULL, "close(%d) failed: %s (%d)", 6321 fd, strerror(errno), errno); 6322 6323 } else { 6324 nxt_unit_debug(NULL, "close(%d): %d", fd, res); 6325 } 6326 6327 return res; 6328} 6329 6330 6331static int 6332nxt_unit_fd_blocking(int fd) 6333{ 6334 int nb; 6335 6336 nb = 0; 6337 6338 if (nxt_slow_path(ioctl(fd, FIONBIO, &nb) == -1)) { 6339 nxt_unit_alert(NULL, "ioctl(%d, FIONBIO, 0) failed: %s (%d)", 6340 fd, strerror(errno), errno); 6341 6342 return NXT_UNIT_ERROR; 6343 } 6344 6345 return NXT_UNIT_OK; 6346} 6347 6348 6349static nxt_int_t 6350nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 6351{ 6352 nxt_unit_port_t *port; 6353 nxt_unit_port_hash_id_t *port_id; 6354 6355 port = data; 6356 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start; 6357 6358 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t) 6359 && port_id->pid == port->id.pid 6360 && port_id->id == port->id.id) 6361 { 6362 return NXT_OK; 6363 } 6364 6365 return NXT_DECLINED; 6366} 6367 6368 6369static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = { 6370 NXT_LVLHSH_DEFAULT, 6371 nxt_unit_port_hash_test, 6372 nxt_unit_lvlhsh_alloc, 6373 nxt_unit_lvlhsh_free, 6374}; 6375 6376 6377static inline void 6378nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq, 6379 nxt_unit_port_hash_id_t *port_hash_id, 6380 nxt_unit_port_id_t *port_id) 6381{ 6382 port_hash_id->pid = port_id->pid; 6383 port_hash_id->id = port_id->id; 6384 6385 if (nxt_fast_path(port_id->hash != 0)) { 6386 lhq->key_hash = port_id->hash; 6387 6388 } else { 6389 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id)); 6390 6391 port_id->hash = lhq->key_hash; 6392 6393 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X", 6394 (int) port_id->pid, (int) port_id->id, 6395 (int) port_id->hash); 6396 } 6397 6398 lhq->key.length = sizeof(nxt_unit_port_hash_id_t); 6399 lhq->key.start = (u_char *) port_hash_id; 6400 lhq->proto = &lvlhsh_ports_proto; 6401 lhq->pool = NULL; 6402} 6403 6404 6405static int 6406nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port) 6407{ 6408 nxt_int_t res; 6409 nxt_lvlhsh_query_t lhq; 6410 nxt_unit_port_hash_id_t port_hash_id; 6411 6412 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id); 6413 lhq.replace = 0; 6414 lhq.value = port; 6415 6416 res = nxt_lvlhsh_insert(port_hash, &lhq); 6417 6418 switch (res) { 6419 6420 case NXT_OK: 6421 return NXT_UNIT_OK; 6422 6423 default: 6424 return NXT_UNIT_ERROR; 6425 } 6426} 6427 6428 6429static nxt_unit_port_t * 6430nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, 6431 int remove) 6432{ 6433 nxt_int_t res; 6434 nxt_lvlhsh_query_t lhq; 6435 nxt_unit_port_hash_id_t port_hash_id; 6436 6437 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id); 6438 6439 if (remove) { 6440 res = nxt_lvlhsh_delete(port_hash, &lhq); 6441 6442 } else { 6443 res = nxt_lvlhsh_find(port_hash, &lhq); 6444 } 6445 6446 switch (res) { 6447 6448 case NXT_OK: 6449 if (!remove) { 6450 nxt_unit_port_use(lhq.value); 6451 } 6452 6453 return lhq.value; 6454 6455 default: 6456 return NULL; 6457 } 6458} 6459 6460 6461static nxt_int_t 6462nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 6463{ 6464 return NXT_OK; 6465} 6466 6467 6468static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = { 6469 NXT_LVLHSH_DEFAULT, 6470 nxt_unit_request_hash_test, 6471 nxt_unit_lvlhsh_alloc, 6472 nxt_unit_lvlhsh_free, 6473}; 6474 6475 6476static int 6477nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx, 6478 nxt_unit_request_info_t *req) 6479{ 6480 uint32_t *stream; 6481 nxt_int_t res; 6482 nxt_lvlhsh_query_t lhq; 6483 nxt_unit_ctx_impl_t *ctx_impl; 6484 nxt_unit_request_info_impl_t *req_impl; 6485 6486 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 6487 if (req_impl->in_hash) { 6488 return NXT_UNIT_OK; 6489 } 6490 6491 stream = &req_impl->stream; 6492 6493 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream)); 6494 lhq.key.length = sizeof(*stream); 6495 lhq.key.start = (u_char *) stream; 6496 lhq.proto = &lvlhsh_requests_proto; 6497 lhq.pool = NULL; 6498 lhq.replace = 0; 6499 lhq.value = req_impl; 6500 6501 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 6502 6503 pthread_mutex_lock(&ctx_impl->mutex); 6504 6505 res = nxt_lvlhsh_insert(&ctx_impl->requests, &lhq); 6506 6507 pthread_mutex_unlock(&ctx_impl->mutex); 6508 6509 switch (res) { 6510 6511 case NXT_OK: 6512 req_impl->in_hash = 1; 6513 return NXT_UNIT_OK; 6514 6515 default: 6516 return NXT_UNIT_ERROR; 6517 } 6518} 6519 6520 6521static nxt_unit_request_info_t * 6522nxt_unit_request_hash_find(nxt_unit_ctx_t *ctx, uint32_t stream, int remove) 6523{ 6524 nxt_int_t res; 6525 nxt_lvlhsh_query_t lhq; 6526 nxt_unit_ctx_impl_t *ctx_impl; 6527 nxt_unit_request_info_impl_t *req_impl; 6528 6529 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream)); 6530 lhq.key.length = sizeof(stream); 6531 lhq.key.start = (u_char *) &stream; 6532 lhq.proto = &lvlhsh_requests_proto; 6533 lhq.pool = NULL; 6534 6535 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 6536 6537 pthread_mutex_lock(&ctx_impl->mutex); 6538 6539 if (remove) { 6540 res = nxt_lvlhsh_delete(&ctx_impl->requests, &lhq); 6541 6542 } else { 6543 res = nxt_lvlhsh_find(&ctx_impl->requests, &lhq); 6544 } 6545 6546 pthread_mutex_unlock(&ctx_impl->mutex); 6547 6548 switch (res) { 6549 6550 case NXT_OK: 6551 req_impl = nxt_container_of(lhq.value, nxt_unit_request_info_impl_t, 6552 req); 6553 if (remove) { 6554 req_impl->in_hash = 0; 6555 } 6556 6557 return lhq.value; 6558 6559 default: 6560 return NULL; 6561 } 6562} 6563 6564 6565void 6566nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...) 6567{ 6568 int log_fd, n; 6569 char msg[NXT_MAX_ERROR_STR], *p, *end; 6570 pid_t pid; 6571 va_list ap; 6572 nxt_unit_impl_t *lib; 6573 6574 if (nxt_fast_path(ctx != NULL)) { 6575 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6576 6577 pid = lib->pid; 6578 log_fd = lib->log_fd; 6579 6580 } else { 6581 pid = nxt_unit_pid; 6582 log_fd = STDERR_FILENO; 6583 } 6584 6585 p = msg; 6586 end = p + sizeof(msg) - 1; 6587 6588 p = nxt_unit_snprint_prefix(p, end, pid, level); 6589 6590 va_start(ap, fmt); 6591 p += vsnprintf(p, end - p, fmt, ap); 6592 va_end(ap); 6593 6594 if (nxt_slow_path(p > end)) { 6595 memcpy(end - 5, "[...]", 5); 6596 p = end; 6597 } 6598 6599 *p++ = '\n'; 6600 6601 n = write(log_fd, msg, p - msg); 6602 if (nxt_slow_path(n < 0)) { 6603 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 6604 } 6605} 6606 6607 6608void 6609nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...) 6610{ 6611 int log_fd, n; 6612 char msg[NXT_MAX_ERROR_STR], *p, *end; 6613 pid_t pid; 6614 va_list ap; 6615 nxt_unit_impl_t *lib; 6616 nxt_unit_request_info_impl_t *req_impl; 6617 6618 if (nxt_fast_path(req != NULL)) { 6619 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 6620 6621 pid = lib->pid; 6622 log_fd = lib->log_fd; 6623 6624 } else { 6625 pid = nxt_unit_pid; 6626 log_fd = STDERR_FILENO; 6627 } 6628 6629 p = msg; 6630 end = p + sizeof(msg) - 1; 6631 6632 p = nxt_unit_snprint_prefix(p, end, pid, level); 6633 6634 if (nxt_fast_path(req != NULL)) { 6635 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 6636 6637 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream); 6638 } 6639 6640 va_start(ap, fmt); 6641 p += vsnprintf(p, end - p, fmt, ap); 6642 va_end(ap); 6643 6644 if (nxt_slow_path(p > end)) { 6645 memcpy(end - 5, "[...]", 5); 6646 p = end; 6647 } 6648 6649 *p++ = '\n'; 6650 6651 n = write(log_fd, msg, p - msg); 6652 if (nxt_slow_path(n < 0)) { 6653 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 6654 } 6655} 6656 6657 6658static const char * nxt_unit_log_levels[] = { 6659 "alert", 6660 "error", 6661 "warn", 6662 "notice", 6663 "info", 6664 "debug", 6665}; 6666 6667 6668static char *
| 201static void *nxt_unit_lvlhsh_alloc(void *data, size_t size); 202static void nxt_unit_lvlhsh_free(void *data, void *p); 203static int nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length); 204 205 206struct nxt_unit_mmap_buf_s { 207 nxt_unit_buf_t buf; 208 209 nxt_unit_mmap_buf_t *next; 210 nxt_unit_mmap_buf_t **prev; 211 212 nxt_port_mmap_header_t *hdr; 213 nxt_unit_request_info_t *req; 214 nxt_unit_ctx_impl_t *ctx_impl; 215 char *free_ptr; 216 char *plain_ptr; 217}; 218 219 220struct nxt_unit_recv_msg_s { 221 uint32_t stream; 222 nxt_pid_t pid; 223 nxt_port_id_t reply_port; 224 225 uint8_t last; /* 1 bit */ 226 uint8_t mmap; /* 1 bit */ 227 228 void *start; 229 uint32_t size; 230 231 int fd[2]; 232 233 nxt_unit_mmap_buf_t *incoming_buf; 234}; 235 236 237typedef enum { 238 NXT_UNIT_RS_START = 0, 239 NXT_UNIT_RS_RESPONSE_INIT, 240 NXT_UNIT_RS_RESPONSE_HAS_CONTENT, 241 NXT_UNIT_RS_RESPONSE_SENT, 242 NXT_UNIT_RS_RELEASED, 243} nxt_unit_req_state_t; 244 245 246struct nxt_unit_request_info_impl_s { 247 nxt_unit_request_info_t req; 248 249 uint32_t stream; 250 251 nxt_unit_mmap_buf_t *outgoing_buf; 252 nxt_unit_mmap_buf_t *incoming_buf; 253 254 nxt_unit_req_state_t state; 255 uint8_t websocket; 256 uint8_t in_hash; 257 258 /* for nxt_unit_ctx_impl_t.free_req or active_req */ 259 nxt_queue_link_t link; 260 /* for nxt_unit_port_impl_t.awaiting_req */ 261 nxt_queue_link_t port_wait_link; 262 263 char extra_data[]; 264}; 265 266 267struct nxt_unit_websocket_frame_impl_s { 268 nxt_unit_websocket_frame_t ws; 269 270 nxt_unit_mmap_buf_t *buf; 271 272 nxt_queue_link_t link; 273 274 nxt_unit_ctx_impl_t *ctx_impl; 275}; 276 277 278struct nxt_unit_read_buf_s { 279 nxt_queue_link_t link; 280 nxt_unit_ctx_impl_t *ctx_impl; 281 ssize_t size; 282 nxt_recv_oob_t oob; 283 char buf[16384]; 284}; 285 286 287struct nxt_unit_ctx_impl_s { 288 nxt_unit_ctx_t ctx; 289 290 nxt_atomic_t use_count; 291 nxt_atomic_t wait_items; 292 293 pthread_mutex_t mutex; 294 295 nxt_unit_port_t *read_port; 296 297 nxt_queue_link_t link; 298 299 nxt_unit_mmap_buf_t *free_buf; 300 301 /* of nxt_unit_request_info_impl_t */ 302 nxt_queue_t free_req; 303 304 /* of nxt_unit_websocket_frame_impl_t */ 305 nxt_queue_t free_ws; 306 307 /* of nxt_unit_request_info_impl_t */ 308 nxt_queue_t active_req; 309 310 /* of nxt_unit_request_info_impl_t */ 311 nxt_lvlhsh_t requests; 312 313 /* of nxt_unit_request_info_impl_t */ 314 nxt_queue_t ready_req; 315 316 /* of nxt_unit_read_buf_t */ 317 nxt_queue_t pending_rbuf; 318 319 /* of nxt_unit_read_buf_t */ 320 nxt_queue_t free_rbuf; 321 322 uint8_t online; /* 1 bit */ 323 uint8_t ready; /* 1 bit */ 324 uint8_t quit_param; 325 326 nxt_unit_mmap_buf_t ctx_buf[2]; 327 nxt_unit_read_buf_t ctx_read_buf; 328 329 nxt_unit_request_info_impl_t req; 330}; 331 332 333struct nxt_unit_mmap_s { 334 nxt_port_mmap_header_t *hdr; 335 pthread_t src_thread; 336 337 /* of nxt_unit_read_buf_t */ 338 nxt_queue_t awaiting_rbuf; 339}; 340 341 342struct nxt_unit_mmaps_s { 343 pthread_mutex_t mutex; 344 uint32_t size; 345 uint32_t cap; 346 nxt_atomic_t allocated_chunks; 347 nxt_unit_mmap_t *elts; 348}; 349 350 351struct nxt_unit_impl_s { 352 nxt_unit_t unit; 353 nxt_unit_callbacks_t callbacks; 354 355 nxt_atomic_t use_count; 356 nxt_atomic_t request_count; 357 358 uint32_t request_data_size; 359 uint32_t shm_mmap_limit; 360 uint32_t request_limit; 361 362 pthread_mutex_t mutex; 363 364 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ 365 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ 366 367 nxt_unit_port_t *router_port; 368 nxt_unit_port_t *shared_port; 369 370 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ 371 372 nxt_unit_mmaps_t incoming; 373 nxt_unit_mmaps_t outgoing; 374 375 pid_t pid; 376 int log_fd; 377 378 nxt_unit_ctx_impl_t main_ctx; 379}; 380 381 382struct nxt_unit_port_impl_s { 383 nxt_unit_port_t port; 384 385 nxt_atomic_t use_count; 386 387 /* for nxt_unit_process_t.ports */ 388 nxt_queue_link_t link; 389 nxt_unit_process_t *process; 390 391 /* of nxt_unit_request_info_impl_t */ 392 nxt_queue_t awaiting_req; 393 394 int ready; 395 396 void *queue; 397 398 int from_socket; 399 nxt_unit_read_buf_t *socket_rbuf; 400}; 401 402 403struct nxt_unit_process_s { 404 pid_t pid; 405 406 nxt_queue_t ports; /* of nxt_unit_port_impl_t */ 407 408 nxt_unit_impl_t *lib; 409 410 nxt_atomic_t use_count; 411 412 uint32_t next_port_id; 413}; 414 415 416/* Explicitly using 32 bit types to avoid possible alignment. */ 417typedef struct { 418 int32_t pid; 419 uint32_t id; 420} nxt_unit_port_hash_id_t; 421 422 423static pid_t nxt_unit_pid; 424 425 426nxt_unit_ctx_t * 427nxt_unit_init(nxt_unit_init_t *init) 428{ 429 int rc, queue_fd, shared_queue_fd; 430 void *mem; 431 uint32_t ready_stream, shm_limit, request_limit; 432 nxt_unit_ctx_t *ctx; 433 nxt_unit_impl_t *lib; 434 nxt_unit_port_t ready_port, router_port, read_port, shared_port; 435 436 nxt_unit_pid = getpid(); 437 438 lib = nxt_unit_create(init); 439 if (nxt_slow_path(lib == NULL)) { 440 return NULL; 441 } 442 443 queue_fd = -1; 444 mem = MAP_FAILED; 445 shared_port.out_fd = -1; 446 shared_port.data = NULL; 447 448 if (init->ready_port.id.pid != 0 449 && init->ready_stream != 0 450 && init->read_port.id.pid != 0) 451 { 452 ready_port = init->ready_port; 453 ready_stream = init->ready_stream; 454 router_port = init->router_port; 455 read_port = init->read_port; 456 lib->log_fd = init->log_fd; 457 458 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid, 459 ready_port.id.id); 460 nxt_unit_port_id_init(&router_port.id, router_port.id.pid, 461 router_port.id.id); 462 nxt_unit_port_id_init(&read_port.id, read_port.id.pid, 463 read_port.id.id); 464 465 shared_port.in_fd = init->shared_port_fd; 466 shared_queue_fd = init->shared_queue_fd; 467 468 } else { 469 rc = nxt_unit_read_env(&ready_port, &router_port, &read_port, 470 &shared_port.in_fd, &shared_queue_fd, 471 &lib->log_fd, &ready_stream, &shm_limit, 472 &request_limit); 473 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 474 goto fail; 475 } 476 477 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1) 478 / PORT_MMAP_DATA_SIZE; 479 lib->request_limit = request_limit; 480 } 481 482 if (nxt_slow_path(lib->shm_mmap_limit < 1)) { 483 lib->shm_mmap_limit = 1; 484 } 485 486 lib->pid = read_port.id.pid; 487 nxt_unit_pid = lib->pid; 488 489 ctx = &lib->main_ctx.ctx; 490 491 rc = nxt_unit_fd_blocking(router_port.out_fd); 492 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 493 goto fail; 494 } 495 496 lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL); 497 if (nxt_slow_path(lib->router_port == NULL)) { 498 nxt_unit_alert(NULL, "failed to add router_port"); 499 500 goto fail; 501 } 502 503 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t)); 504 if (nxt_slow_path(queue_fd == -1)) { 505 goto fail; 506 } 507 508 mem = mmap(NULL, sizeof(nxt_port_queue_t), 509 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); 510 if (nxt_slow_path(mem == MAP_FAILED)) { 511 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, 512 strerror(errno), errno); 513 514 goto fail; 515 } 516 517 nxt_port_queue_init(mem); 518 519 rc = nxt_unit_fd_blocking(read_port.in_fd); 520 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 521 goto fail; 522 } 523 524 lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem); 525 if (nxt_slow_path(lib->main_ctx.read_port == NULL)) { 526 nxt_unit_alert(NULL, "failed to add read_port"); 527 528 goto fail; 529 } 530 531 rc = nxt_unit_fd_blocking(ready_port.out_fd); 532 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 533 goto fail; 534 } 535 536 nxt_unit_port_id_init(&shared_port.id, read_port.id.pid, 537 NXT_UNIT_SHARED_PORT_ID); 538 539 mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE, 540 MAP_SHARED, shared_queue_fd, 0); 541 if (nxt_slow_path(mem == MAP_FAILED)) { 542 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", shared_queue_fd, 543 strerror(errno), errno); 544 545 goto fail; 546 } 547 548 nxt_unit_close(shared_queue_fd); 549 550 lib->shared_port = nxt_unit_add_port(ctx, &shared_port, mem); 551 if (nxt_slow_path(lib->shared_port == NULL)) { 552 nxt_unit_alert(NULL, "failed to add shared_port"); 553 554 goto fail; 555 } 556 557 rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream, queue_fd); 558 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 559 nxt_unit_alert(NULL, "failed to send READY message"); 560 561 goto fail; 562 } 563 564 nxt_unit_close(ready_port.out_fd); 565 nxt_unit_close(queue_fd); 566 567 return ctx; 568 569fail: 570 571 if (mem != MAP_FAILED) { 572 munmap(mem, sizeof(nxt_port_queue_t)); 573 } 574 575 if (queue_fd != -1) { 576 nxt_unit_close(queue_fd); 577 } 578 579 nxt_unit_ctx_release(&lib->main_ctx.ctx); 580 581 return NULL; 582} 583 584 585static nxt_unit_impl_t * 586nxt_unit_create(nxt_unit_init_t *init) 587{ 588 int rc; 589 nxt_unit_impl_t *lib; 590 nxt_unit_callbacks_t *cb; 591 592 lib = nxt_unit_malloc(NULL, 593 sizeof(nxt_unit_impl_t) + init->request_data_size); 594 if (nxt_slow_path(lib == NULL)) { 595 nxt_unit_alert(NULL, "failed to allocate unit struct"); 596 597 return NULL; 598 } 599 600 rc = pthread_mutex_init(&lib->mutex, NULL); 601 if (nxt_slow_path(rc != 0)) { 602 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 603 604 goto fail; 605 } 606 607 lib->unit.data = init->data; 608 lib->callbacks = init->callbacks; 609 610 lib->request_data_size = init->request_data_size; 611 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1) 612 / PORT_MMAP_DATA_SIZE; 613 lib->request_limit = init->request_limit; 614 615 lib->processes.slot = NULL; 616 lib->ports.slot = NULL; 617 618 lib->log_fd = STDERR_FILENO; 619 620 nxt_queue_init(&lib->contexts); 621 622 lib->use_count = 0; 623 lib->request_count = 0; 624 lib->router_port = NULL; 625 lib->shared_port = NULL; 626 627 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); 628 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 629 pthread_mutex_destroy(&lib->mutex); 630 goto fail; 631 } 632 633 cb = &lib->callbacks; 634 635 if (cb->request_handler == NULL) { 636 nxt_unit_alert(NULL, "request_handler is NULL"); 637 638 pthread_mutex_destroy(&lib->mutex); 639 goto fail; 640 } 641 642 nxt_unit_mmaps_init(&lib->incoming); 643 nxt_unit_mmaps_init(&lib->outgoing); 644 645 return lib; 646 647fail: 648 649 nxt_unit_free(NULL, lib); 650 651 return NULL; 652} 653 654 655static int 656nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, 657 void *data) 658{ 659 int rc; 660 661 ctx_impl->ctx.data = data; 662 ctx_impl->ctx.unit = &lib->unit; 663 664 rc = pthread_mutex_init(&ctx_impl->mutex, NULL); 665 if (nxt_slow_path(rc != 0)) { 666 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 667 668 return NXT_UNIT_ERROR; 669 } 670 671 nxt_unit_lib_use(lib); 672 673 pthread_mutex_lock(&lib->mutex); 674 675 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); 676 677 pthread_mutex_unlock(&lib->mutex); 678 679 ctx_impl->use_count = 1; 680 ctx_impl->wait_items = 0; 681 ctx_impl->online = 1; 682 ctx_impl->ready = 0; 683 ctx_impl->quit_param = NXT_QUIT_GRACEFUL; 684 685 nxt_queue_init(&ctx_impl->free_req); 686 nxt_queue_init(&ctx_impl->free_ws); 687 nxt_queue_init(&ctx_impl->active_req); 688 nxt_queue_init(&ctx_impl->ready_req); 689 nxt_queue_init(&ctx_impl->pending_rbuf); 690 nxt_queue_init(&ctx_impl->free_rbuf); 691 692 ctx_impl->free_buf = NULL; 693 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); 694 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]); 695 696 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link); 697 nxt_queue_insert_tail(&ctx_impl->free_rbuf, &ctx_impl->ctx_read_buf.link); 698 699 ctx_impl->ctx_read_buf.ctx_impl = ctx_impl; 700 701 ctx_impl->req.req.ctx = &ctx_impl->ctx; 702 ctx_impl->req.req.unit = &lib->unit; 703 704 ctx_impl->read_port = NULL; 705 ctx_impl->requests.slot = 0; 706 707 return NXT_UNIT_OK; 708} 709 710 711nxt_inline void 712nxt_unit_ctx_use(nxt_unit_ctx_t *ctx) 713{ 714 nxt_unit_ctx_impl_t *ctx_impl; 715 716 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 717 718 nxt_atomic_fetch_add(&ctx_impl->use_count, 1); 719} 720 721 722nxt_inline void 723nxt_unit_ctx_release(nxt_unit_ctx_t *ctx) 724{ 725 long c; 726 nxt_unit_ctx_impl_t *ctx_impl; 727 728 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 729 730 c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1); 731 732 if (c == 1) { 733 nxt_unit_ctx_free(ctx_impl); 734 } 735} 736 737 738nxt_inline void 739nxt_unit_lib_use(nxt_unit_impl_t *lib) 740{ 741 nxt_atomic_fetch_add(&lib->use_count, 1); 742} 743 744 745nxt_inline void 746nxt_unit_lib_release(nxt_unit_impl_t *lib) 747{ 748 long c; 749 nxt_unit_process_t *process; 750 751 c = nxt_atomic_fetch_add(&lib->use_count, -1); 752 753 if (c == 1) { 754 for ( ;; ) { 755 pthread_mutex_lock(&lib->mutex); 756 757 process = nxt_unit_process_pop_first(lib); 758 if (process == NULL) { 759 pthread_mutex_unlock(&lib->mutex); 760 761 break; 762 } 763 764 nxt_unit_remove_process(lib, process); 765 } 766 767 pthread_mutex_destroy(&lib->mutex); 768 769 if (nxt_fast_path(lib->router_port != NULL)) { 770 nxt_unit_port_release(lib->router_port); 771 } 772 773 if (nxt_fast_path(lib->shared_port != NULL)) { 774 nxt_unit_port_release(lib->shared_port); 775 } 776 777 nxt_unit_mmaps_destroy(&lib->incoming); 778 nxt_unit_mmaps_destroy(&lib->outgoing); 779 780 nxt_unit_free(NULL, lib); 781 } 782} 783 784 785nxt_inline void 786nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 787 nxt_unit_mmap_buf_t *mmap_buf) 788{ 789 mmap_buf->next = *head; 790 791 if (mmap_buf->next != NULL) { 792 mmap_buf->next->prev = &mmap_buf->next; 793 } 794 795 *head = mmap_buf; 796 mmap_buf->prev = head; 797} 798 799 800nxt_inline void 801nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 802 nxt_unit_mmap_buf_t *mmap_buf) 803{ 804 while (*prev != NULL) { 805 prev = &(*prev)->next; 806 } 807 808 nxt_unit_mmap_buf_insert(prev, mmap_buf); 809} 810 811 812nxt_inline void 813nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf) 814{ 815 nxt_unit_mmap_buf_t **prev; 816 817 prev = mmap_buf->prev; 818 819 if (mmap_buf->next != NULL) { 820 mmap_buf->next->prev = prev; 821 } 822 823 if (prev != NULL) { 824 *prev = mmap_buf->next; 825 } 826} 827 828 829static int 830nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, 831 nxt_unit_port_t *read_port, int *shared_port_fd, int *shared_queue_fd, 832 int *log_fd, uint32_t *stream, 833 uint32_t *shm_limit, uint32_t *request_limit) 834{ 835 int rc; 836 int ready_fd, router_fd, read_in_fd, read_out_fd; 837 char *unit_init, *version_end, *vars; 838 size_t version_length; 839 int64_t ready_pid, router_pid, read_pid; 840 uint32_t ready_stream, router_id, ready_id, read_id; 841 842 unit_init = getenv(NXT_UNIT_INIT_ENV); 843 if (nxt_slow_path(unit_init == NULL)) { 844 nxt_unit_alert(NULL, "%s is not in the current environment", 845 NXT_UNIT_INIT_ENV); 846 847 return NXT_UNIT_ERROR; 848 } 849 850 version_end = strchr(unit_init, ';'); 851 if (nxt_slow_path(version_end == NULL)) { 852 nxt_unit_alert(NULL, "Unit version not found in %s=\"%s\"", 853 NXT_UNIT_INIT_ENV, unit_init); 854 855 return NXT_UNIT_ERROR; 856 } 857 858 version_length = version_end - unit_init; 859 860 rc = version_length != nxt_length(NXT_VERSION) 861 || memcmp(unit_init, NXT_VERSION, nxt_length(NXT_VERSION)); 862 863 if (nxt_slow_path(rc != 0)) { 864 nxt_unit_alert(NULL, "versions mismatch: the Unit daemon has version " 865 "%.*s, while the app was compiled with libunit %s", 866 (int) version_length, unit_init, NXT_VERSION); 867 868 return NXT_UNIT_ERROR; 869 } 870 871 vars = version_end + 1; 872 873 rc = sscanf(vars, 874 "%"PRIu32";" 875 "%"PRId64",%"PRIu32",%d;" 876 "%"PRId64",%"PRIu32",%d;" 877 "%"PRId64",%"PRIu32",%d,%d;" 878 "%d,%d;" 879 "%d,%"PRIu32",%"PRIu32, 880 &ready_stream, 881 &ready_pid, &ready_id, &ready_fd, 882 &router_pid, &router_id, &router_fd, 883 &read_pid, &read_id, &read_in_fd, &read_out_fd, 884 shared_port_fd, shared_queue_fd, 885 log_fd, shm_limit, request_limit); 886 887 if (nxt_slow_path(rc == EOF)) { 888 nxt_unit_alert(NULL, "sscanf(%s) failed: %s (%d) for %s env", 889 vars, strerror(errno), errno, NXT_UNIT_INIT_ENV); 890 891 return NXT_UNIT_ERROR; 892 } 893 894 if (nxt_slow_path(rc != 16)) { 895 nxt_unit_alert(NULL, "invalid number of variables in %s env: " 896 "found %d of %d in %s", NXT_UNIT_INIT_ENV, rc, 16, vars); 897 898 return NXT_UNIT_ERROR; 899 } 900 901 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init); 902 903 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id); 904 905 ready_port->in_fd = -1; 906 ready_port->out_fd = ready_fd; 907 ready_port->data = NULL; 908 909 nxt_unit_port_id_init(&router_port->id, (pid_t) router_pid, router_id); 910 911 router_port->in_fd = -1; 912 router_port->out_fd = router_fd; 913 router_port->data = NULL; 914 915 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id); 916 917 read_port->in_fd = read_in_fd; 918 read_port->out_fd = read_out_fd; 919 read_port->data = NULL; 920 921 *stream = ready_stream; 922 923 return NXT_UNIT_OK; 924} 925 926 927static int 928nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, int queue_fd) 929{ 930 ssize_t res; 931 nxt_send_oob_t oob; 932 nxt_port_msg_t msg; 933 nxt_unit_impl_t *lib; 934 int fds[2] = {queue_fd, -1}; 935 936 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 937 938 msg.stream = stream; 939 msg.pid = lib->pid; 940 msg.reply_port = 0; 941 msg.type = _NXT_PORT_MSG_PROCESS_READY; 942 msg.last = 1; 943 msg.mmap = 0; 944 msg.nf = 0; 945 msg.mf = 0; 946 947 nxt_socket_msg_oob_init(&oob, fds); 948 949 res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg), &oob); 950 if (res != sizeof(msg)) { 951 return NXT_UNIT_ERROR; 952 } 953 954 return NXT_UNIT_OK; 955} 956 957 958static int 959nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf, 960 nxt_unit_request_info_t **preq) 961{ 962 int rc; 963 pid_t pid; 964 uint8_t quit_param; 965 nxt_port_msg_t *port_msg; 966 nxt_unit_impl_t *lib; 967 nxt_unit_recv_msg_t recv_msg; 968 969 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 970 971 recv_msg.incoming_buf = NULL; 972 recv_msg.fd[0] = -1; 973 recv_msg.fd[1] = -1; 974 975 rc = nxt_socket_msg_oob_get_fds(&rbuf->oob, recv_msg.fd); 976 if (nxt_slow_path(rc != NXT_OK)) { 977 nxt_unit_alert(ctx, "failed to receive file descriptor over cmsg"); 978 rc = NXT_UNIT_ERROR; 979 goto done; 980 } 981 982 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { 983 if (nxt_slow_path(rbuf->size == 0)) { 984 nxt_unit_debug(ctx, "read port closed"); 985 986 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 987 rc = NXT_UNIT_OK; 988 goto done; 989 } 990 991 nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size); 992 993 rc = NXT_UNIT_ERROR; 994 goto done; 995 } 996 997 port_msg = (nxt_port_msg_t *) rbuf->buf; 998 999 nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d", 1000 port_msg->stream, (int) port_msg->type, 1001 recv_msg.fd[0], recv_msg.fd[1]); 1002 1003 recv_msg.stream = port_msg->stream; 1004 recv_msg.pid = port_msg->pid; 1005 recv_msg.reply_port = port_msg->reply_port; 1006 recv_msg.last = port_msg->last; 1007 recv_msg.mmap = port_msg->mmap; 1008 1009 recv_msg.start = port_msg + 1; 1010 recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t); 1011 1012 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { 1013 nxt_unit_alert(ctx, "#%"PRIu32": unknown message type (%d)", 1014 port_msg->stream, (int) port_msg->type); 1015 rc = NXT_UNIT_ERROR; 1016 goto done; 1017 } 1018 1019 /* Fragmentation is unsupported. */ 1020 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { 1021 nxt_unit_alert(ctx, "#%"PRIu32": fragmented message type (%d)", 1022 port_msg->stream, (int) port_msg->type); 1023 rc = NXT_UNIT_ERROR; 1024 goto done; 1025 } 1026 1027 if (port_msg->mmap) { 1028 rc = nxt_unit_mmap_read(ctx, &recv_msg, rbuf); 1029 1030 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1031 if (rc == NXT_UNIT_AGAIN) { 1032 recv_msg.fd[0] = -1; 1033 recv_msg.fd[1] = -1; 1034 } 1035 1036 goto done; 1037 } 1038 } 1039 1040 switch (port_msg->type) { 1041 1042 case _NXT_PORT_MSG_RPC_READY: 1043 rc = NXT_UNIT_OK; 1044 break; 1045 1046 case _NXT_PORT_MSG_QUIT: 1047 if (recv_msg.size == sizeof(quit_param)) { 1048 memcpy(&quit_param, recv_msg.start, sizeof(quit_param)); 1049 1050 } else { 1051 quit_param = NXT_QUIT_NORMAL; 1052 } 1053 1054 nxt_unit_debug(ctx, "#%"PRIu32": %squit", port_msg->stream, 1055 (quit_param == NXT_QUIT_GRACEFUL ? "graceful " : "")); 1056 1057 nxt_unit_quit(ctx, quit_param); 1058 1059 rc = NXT_UNIT_OK; 1060 break; 1061 1062 case _NXT_PORT_MSG_NEW_PORT: 1063 rc = nxt_unit_process_new_port(ctx, &recv_msg); 1064 break; 1065 1066 case _NXT_PORT_MSG_PORT_ACK: 1067 rc = nxt_unit_ctx_ready(ctx); 1068 break; 1069 1070 case _NXT_PORT_MSG_CHANGE_FILE: 1071 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d", 1072 port_msg->stream, recv_msg.fd[0]); 1073 1074 if (dup2(recv_msg.fd[0], lib->log_fd) == -1) { 1075 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)", 1076 port_msg->stream, recv_msg.fd[0], lib->log_fd, 1077 strerror(errno), errno); 1078 1079 rc = NXT_UNIT_ERROR; 1080 goto done; 1081 } 1082 1083 rc = NXT_UNIT_OK; 1084 break; 1085 1086 case _NXT_PORT_MSG_MMAP: 1087 if (nxt_slow_path(recv_msg.fd[0] < 0)) { 1088 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", 1089 port_msg->stream, recv_msg.fd[0]); 1090 1091 rc = NXT_UNIT_ERROR; 1092 goto done; 1093 } 1094 1095 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]); 1096 break; 1097 1098 case _NXT_PORT_MSG_REQ_HEADERS: 1099 rc = nxt_unit_process_req_headers(ctx, &recv_msg, preq); 1100 break; 1101 1102 case _NXT_PORT_MSG_REQ_BODY: 1103 rc = nxt_unit_process_req_body(ctx, &recv_msg); 1104 break; 1105 1106 case _NXT_PORT_MSG_WEBSOCKET: 1107 rc = nxt_unit_process_websocket(ctx, &recv_msg); 1108 break; 1109 1110 case _NXT_PORT_MSG_REMOVE_PID: 1111 if (nxt_slow_path(recv_msg.size != sizeof(pid))) { 1112 nxt_unit_alert(ctx, "#%"PRIu32": remove_pid: invalid message size " 1113 "(%d != %d)", port_msg->stream, (int) recv_msg.size, 1114 (int) sizeof(pid)); 1115 1116 rc = NXT_UNIT_ERROR; 1117 goto done; 1118 } 1119 1120 memcpy(&pid, recv_msg.start, sizeof(pid)); 1121 1122 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d", 1123 port_msg->stream, (int) pid); 1124 1125 nxt_unit_remove_pid(lib, pid); 1126 1127 rc = NXT_UNIT_OK; 1128 break; 1129 1130 case _NXT_PORT_MSG_SHM_ACK: 1131 rc = nxt_unit_process_shm_ack(ctx); 1132 break; 1133 1134 default: 1135 nxt_unit_alert(ctx, "#%"PRIu32": ignore message type: %d", 1136 port_msg->stream, (int) port_msg->type); 1137 1138 rc = NXT_UNIT_ERROR; 1139 goto done; 1140 } 1141 1142done: 1143 1144 if (recv_msg.fd[0] != -1) { 1145 nxt_unit_close(recv_msg.fd[0]); 1146 } 1147 1148 if (recv_msg.fd[1] != -1) { 1149 nxt_unit_close(recv_msg.fd[1]); 1150 } 1151 1152 while (recv_msg.incoming_buf != NULL) { 1153 nxt_unit_mmap_buf_free(recv_msg.incoming_buf); 1154 } 1155 1156 if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { 1157#if (NXT_DEBUG) 1158 memset(rbuf->buf, 0xAC, rbuf->size); 1159#endif 1160 nxt_unit_read_buf_release(ctx, rbuf); 1161 } 1162 1163 return rc; 1164} 1165 1166 1167static int 1168nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1169{ 1170 void *mem; 1171 nxt_unit_port_t new_port, *port; 1172 nxt_port_msg_new_port_t *new_port_msg; 1173 1174 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) { 1175 nxt_unit_warn(ctx, "#%"PRIu32": new_port: " 1176 "invalid message size (%d)", 1177 recv_msg->stream, (int) recv_msg->size); 1178 1179 return NXT_UNIT_ERROR; 1180 } 1181 1182 if (nxt_slow_path(recv_msg->fd[0] < 0)) { 1183 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port", 1184 recv_msg->stream, recv_msg->fd[0]); 1185 1186 return NXT_UNIT_ERROR; 1187 } 1188 1189 new_port_msg = recv_msg->start; 1190 1191 nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd[0] %d fd[1] %d", 1192 recv_msg->stream, (int) new_port_msg->pid, 1193 (int) new_port_msg->id, recv_msg->fd[0], recv_msg->fd[1]); 1194 1195 if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd[0]) != NXT_UNIT_OK)) { 1196 return NXT_UNIT_ERROR; 1197 } 1198 1199 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, new_port_msg->id); 1200 1201 new_port.in_fd = -1; 1202 new_port.out_fd = recv_msg->fd[0]; 1203 1204 mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE, 1205 MAP_SHARED, recv_msg->fd[1], 0); 1206 1207 if (nxt_slow_path(mem == MAP_FAILED)) { 1208 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd[1], 1209 strerror(errno), errno); 1210 1211 return NXT_UNIT_ERROR; 1212 } 1213 1214 new_port.data = NULL; 1215 1216 recv_msg->fd[0] = -1; 1217 1218 port = nxt_unit_add_port(ctx, &new_port, mem); 1219 if (nxt_slow_path(port == NULL)) { 1220 return NXT_UNIT_ERROR; 1221 } 1222 1223 nxt_unit_port_release(port); 1224 1225 return NXT_UNIT_OK; 1226} 1227 1228 1229static int 1230nxt_unit_ctx_ready(nxt_unit_ctx_t *ctx) 1231{ 1232 nxt_unit_impl_t *lib; 1233 nxt_unit_ctx_impl_t *ctx_impl; 1234 1235 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1236 1237 if (nxt_slow_path(ctx_impl->ready)) { 1238 return NXT_UNIT_OK; 1239 } 1240 1241 ctx_impl->ready = 1; 1242 1243 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1244 1245 /* Call ready_handler() only for main context. */ 1246 if (&lib->main_ctx == ctx_impl && lib->callbacks.ready_handler != NULL) { 1247 return lib->callbacks.ready_handler(ctx); 1248 } 1249 1250 if (&lib->main_ctx != ctx_impl) { 1251 /* Check if the main context is already stopped or quit. */ 1252 if (nxt_slow_path(!lib->main_ctx.ready)) { 1253 ctx_impl->ready = 0; 1254 1255 nxt_unit_quit(ctx, lib->main_ctx.quit_param); 1256 1257 return NXT_UNIT_OK; 1258 } 1259 1260 if (lib->callbacks.add_port != NULL) { 1261 lib->callbacks.add_port(ctx, lib->shared_port); 1262 } 1263 } 1264 1265 return NXT_UNIT_OK; 1266} 1267 1268 1269static int 1270nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, 1271 nxt_unit_request_info_t **preq) 1272{ 1273 int res; 1274 nxt_unit_impl_t *lib; 1275 nxt_unit_port_id_t port_id; 1276 nxt_unit_request_t *r; 1277 nxt_unit_mmap_buf_t *b; 1278 nxt_unit_request_info_t *req; 1279 nxt_unit_request_info_impl_t *req_impl; 1280 1281 if (nxt_slow_path(recv_msg->mmap == 0)) { 1282 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory", 1283 recv_msg->stream); 1284 1285 return NXT_UNIT_ERROR; 1286 } 1287 1288 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) { 1289 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least " 1290 "%d expected", recv_msg->stream, (int) recv_msg->size, 1291 (int) sizeof(nxt_unit_request_t)); 1292 1293 return NXT_UNIT_ERROR; 1294 } 1295 1296 req_impl = nxt_unit_request_info_get(ctx); 1297 if (nxt_slow_path(req_impl == NULL)) { 1298 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed", 1299 recv_msg->stream); 1300 1301 return NXT_UNIT_ERROR; 1302 } 1303 1304 req = &req_impl->req; 1305 1306 req->request = recv_msg->start; 1307 1308 b = recv_msg->incoming_buf; 1309 1310 req->request_buf = &b->buf; 1311 req->response = NULL; 1312 req->response_buf = NULL; 1313 1314 r = req->request; 1315 1316 req->content_length = r->content_length; 1317 1318 req->content_buf = req->request_buf; 1319 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content); 1320 1321 req_impl->stream = recv_msg->stream; 1322 1323 req_impl->outgoing_buf = NULL; 1324 1325 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1326 b->req = req; 1327 } 1328 1329 /* "Move" incoming buffer list to req_impl. */ 1330 req_impl->incoming_buf = recv_msg->incoming_buf; 1331 req_impl->incoming_buf->prev = &req_impl->incoming_buf; 1332 recv_msg->incoming_buf = NULL; 1333 1334 req->content_fd = recv_msg->fd[0]; 1335 recv_msg->fd[0] = -1; 1336 1337 req->response_max_fields = 0; 1338 req_impl->state = NXT_UNIT_RS_START; 1339 req_impl->websocket = 0; 1340 req_impl->in_hash = 0; 1341 1342 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream, 1343 (int) r->method_length, 1344 (char *) nxt_unit_sptr_get(&r->method), 1345 (int) r->target_length, 1346 (char *) nxt_unit_sptr_get(&r->target), 1347 (int) r->content_length); 1348 1349 nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); 1350 1351 res = nxt_unit_request_check_response_port(req, &port_id); 1352 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1353 return NXT_UNIT_ERROR; 1354 } 1355 1356 if (nxt_fast_path(res == NXT_UNIT_OK)) { 1357 res = nxt_unit_send_req_headers_ack(req); 1358 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1359 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1360 1361 return NXT_UNIT_ERROR; 1362 } 1363 1364 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1365 1366 if (req->content_length 1367 > (uint64_t) (req->content_buf->end - req->content_buf->free)) 1368 { 1369 res = nxt_unit_request_hash_add(ctx, req); 1370 if (nxt_slow_path(res != NXT_UNIT_OK)) { 1371 nxt_unit_req_warn(req, "failed to add request to hash"); 1372 1373 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1374 1375 return NXT_UNIT_ERROR; 1376 } 1377 1378 /* 1379 * If application have separate data handler, we may start 1380 * request processing and process data when it is arrived. 1381 */ 1382 if (lib->callbacks.data_handler == NULL) { 1383 return NXT_UNIT_OK; 1384 } 1385 } 1386 1387 if (preq == NULL) { 1388 lib->callbacks.request_handler(req); 1389 1390 } else { 1391 *preq = req; 1392 } 1393 } 1394 1395 return NXT_UNIT_OK; 1396} 1397 1398 1399static int 1400nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1401{ 1402 uint64_t l; 1403 nxt_unit_impl_t *lib; 1404 nxt_unit_mmap_buf_t *b; 1405 nxt_unit_request_info_t *req; 1406 1407 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); 1408 if (req == NULL) { 1409 return NXT_UNIT_OK; 1410 } 1411 1412 l = req->content_buf->end - req->content_buf->free; 1413 1414 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1415 b->req = req; 1416 l += b->buf.end - b->buf.free; 1417 } 1418 1419 if (recv_msg->incoming_buf != NULL) { 1420 b = nxt_container_of(req->content_buf, nxt_unit_mmap_buf_t, buf); 1421 1422 while (b->next != NULL) { 1423 b = b->next; 1424 } 1425 1426 /* "Move" incoming buffer list to req_impl. */ 1427 b->next = recv_msg->incoming_buf; 1428 b->next->prev = &b->next; 1429 1430 recv_msg->incoming_buf = NULL; 1431 } 1432 1433 req->content_fd = recv_msg->fd[0]; 1434 recv_msg->fd[0] = -1; 1435 1436 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1437 1438 if (lib->callbacks.data_handler != NULL) { 1439 lib->callbacks.data_handler(req); 1440 1441 return NXT_UNIT_OK; 1442 } 1443 1444 if (req->content_fd != -1 || l == req->content_length) { 1445 lib->callbacks.request_handler(req); 1446 } 1447 1448 return NXT_UNIT_OK; 1449} 1450 1451 1452static int 1453nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, 1454 nxt_unit_port_id_t *port_id) 1455{ 1456 int res; 1457 nxt_unit_ctx_t *ctx; 1458 nxt_unit_impl_t *lib; 1459 nxt_unit_port_t *port; 1460 nxt_unit_process_t *process; 1461 nxt_unit_ctx_impl_t *ctx_impl; 1462 nxt_unit_port_impl_t *port_impl; 1463 nxt_unit_request_info_impl_t *req_impl; 1464 1465 ctx = req->ctx; 1466 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1467 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1468 1469 pthread_mutex_lock(&lib->mutex); 1470 1471 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 1472 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 1473 1474 if (nxt_fast_path(port != NULL)) { 1475 req->response_port = port; 1476 1477 if (nxt_fast_path(port_impl->ready)) { 1478 pthread_mutex_unlock(&lib->mutex); 1479 1480 nxt_unit_debug(ctx, "check_response_port: found port{%d,%d}", 1481 (int) port->id.pid, (int) port->id.id); 1482 1483 return NXT_UNIT_OK; 1484 } 1485 1486 nxt_unit_debug(ctx, "check_response_port: " 1487 "port{%d,%d} already requested", 1488 (int) port->id.pid, (int) port->id.id); 1489 1490 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1491 1492 nxt_queue_insert_tail(&port_impl->awaiting_req, 1493 &req_impl->port_wait_link); 1494 1495 pthread_mutex_unlock(&lib->mutex); 1496 1497 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 1498 1499 return NXT_UNIT_AGAIN; 1500 } 1501 1502 port_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t)); 1503 if (nxt_slow_path(port_impl == NULL)) { 1504 nxt_unit_alert(ctx, "check_response_port: malloc(%d) failed", 1505 (int) sizeof(nxt_unit_port_impl_t)); 1506 1507 pthread_mutex_unlock(&lib->mutex); 1508 1509 return NXT_UNIT_ERROR; 1510 } 1511 1512 port = &port_impl->port; 1513 1514 port->id = *port_id; 1515 port->in_fd = -1; 1516 port->out_fd = -1; 1517 port->data = NULL; 1518 1519 res = nxt_unit_port_hash_add(&lib->ports, port); 1520 if (nxt_slow_path(res != NXT_UNIT_OK)) { 1521 nxt_unit_alert(ctx, "check_response_port: %d,%d hash_add failed", 1522 port->id.pid, port->id.id); 1523 1524 pthread_mutex_unlock(&lib->mutex); 1525 1526 nxt_unit_free(ctx, port); 1527 1528 return NXT_UNIT_ERROR; 1529 } 1530 1531 process = nxt_unit_process_find(lib, port_id->pid, 0); 1532 if (nxt_slow_path(process == NULL)) { 1533 nxt_unit_alert(ctx, "check_response_port: process %d not found", 1534 port->id.pid); 1535 1536 nxt_unit_port_hash_find(&lib->ports, port_id, 1); 1537 1538 pthread_mutex_unlock(&lib->mutex); 1539 1540 nxt_unit_free(ctx, port); 1541 1542 return NXT_UNIT_ERROR; 1543 } 1544 1545 nxt_queue_insert_tail(&process->ports, &port_impl->link); 1546 1547 port_impl->process = process; 1548 port_impl->queue = NULL; 1549 port_impl->from_socket = 0; 1550 port_impl->socket_rbuf = NULL; 1551 1552 nxt_queue_init(&port_impl->awaiting_req); 1553 1554 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1555 1556 nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link); 1557 1558 port_impl->use_count = 2; 1559 port_impl->ready = 0; 1560 1561 req->response_port = port; 1562 1563 pthread_mutex_unlock(&lib->mutex); 1564 1565 res = nxt_unit_get_port(ctx, port_id); 1566 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 1567 return NXT_UNIT_ERROR; 1568 } 1569 1570 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 1571 1572 return NXT_UNIT_AGAIN; 1573} 1574 1575 1576static int 1577nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req) 1578{ 1579 ssize_t res; 1580 nxt_port_msg_t msg; 1581 nxt_unit_impl_t *lib; 1582 nxt_unit_ctx_impl_t *ctx_impl; 1583 nxt_unit_request_info_impl_t *req_impl; 1584 1585 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 1586 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1587 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1588 1589 memset(&msg, 0, sizeof(nxt_port_msg_t)); 1590 1591 msg.stream = req_impl->stream; 1592 msg.pid = lib->pid; 1593 msg.reply_port = ctx_impl->read_port->id.id; 1594 msg.type = _NXT_PORT_MSG_REQ_HEADERS_ACK; 1595 1596 res = nxt_unit_port_send(req->ctx, req->response_port, 1597 &msg, sizeof(msg), NULL); 1598 if (nxt_slow_path(res != sizeof(msg))) { 1599 return NXT_UNIT_ERROR; 1600 } 1601 1602 return NXT_UNIT_OK; 1603} 1604 1605 1606static int 1607nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1608{ 1609 size_t hsize; 1610 nxt_unit_impl_t *lib; 1611 nxt_unit_mmap_buf_t *b; 1612 nxt_unit_callbacks_t *cb; 1613 nxt_unit_request_info_t *req; 1614 nxt_unit_request_info_impl_t *req_impl; 1615 nxt_unit_websocket_frame_impl_t *ws_impl; 1616 1617 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); 1618 if (nxt_slow_path(req == NULL)) { 1619 return NXT_UNIT_OK; 1620 } 1621 1622 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1623 1624 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1625 cb = &lib->callbacks; 1626 1627 if (cb->websocket_handler && recv_msg->size >= 2) { 1628 ws_impl = nxt_unit_websocket_frame_get(ctx); 1629 if (nxt_slow_path(ws_impl == NULL)) { 1630 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed", 1631 req_impl->stream); 1632 1633 return NXT_UNIT_ERROR; 1634 } 1635 1636 ws_impl->ws.req = req; 1637 1638 ws_impl->buf = NULL; 1639 1640 if (recv_msg->mmap) { 1641 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1642 b->req = req; 1643 } 1644 1645 /* "Move" incoming buffer list to ws_impl. */ 1646 ws_impl->buf = recv_msg->incoming_buf; 1647 ws_impl->buf->prev = &ws_impl->buf; 1648 recv_msg->incoming_buf = NULL; 1649 1650 b = ws_impl->buf; 1651 1652 } else { 1653 b = nxt_unit_mmap_buf_get(ctx); 1654 if (nxt_slow_path(b == NULL)) { 1655 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf", 1656 req_impl->stream); 1657 1658 nxt_unit_websocket_frame_release(&ws_impl->ws); 1659 1660 return NXT_UNIT_ERROR; 1661 } 1662 1663 b->req = req; 1664 b->buf.start = recv_msg->start; 1665 b->buf.free = b->buf.start; 1666 b->buf.end = b->buf.start + recv_msg->size; 1667 1668 nxt_unit_mmap_buf_insert(&ws_impl->buf, b); 1669 } 1670 1671 ws_impl->ws.header = (void *) b->buf.start; 1672 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len( 1673 ws_impl->ws.header); 1674 1675 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header); 1676 1677 if (ws_impl->ws.header->mask) { 1678 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4; 1679 1680 } else { 1681 ws_impl->ws.mask = NULL; 1682 } 1683 1684 b->buf.free += hsize; 1685 1686 ws_impl->ws.content_buf = &b->buf; 1687 ws_impl->ws.content_length = ws_impl->ws.payload_len; 1688 1689 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, " 1690 "payload_len=%"PRIu64, 1691 ws_impl->ws.header->opcode, 1692 ws_impl->ws.payload_len); 1693 1694 cb->websocket_handler(&ws_impl->ws); 1695 } 1696 1697 if (recv_msg->last) { 1698 if (cb->close_handler) { 1699 nxt_unit_req_debug(req, "close_handler"); 1700 1701 cb->close_handler(req); 1702 1703 } else { 1704 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1705 } 1706 } 1707 1708 return NXT_UNIT_OK; 1709} 1710 1711 1712static int 1713nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx) 1714{ 1715 nxt_unit_impl_t *lib; 1716 nxt_unit_callbacks_t *cb; 1717 1718 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1719 cb = &lib->callbacks; 1720 1721 if (cb->shm_ack_handler != NULL) { 1722 cb->shm_ack_handler(ctx); 1723 } 1724 1725 return NXT_UNIT_OK; 1726} 1727 1728 1729static nxt_unit_request_info_impl_t * 1730nxt_unit_request_info_get(nxt_unit_ctx_t *ctx) 1731{ 1732 nxt_unit_impl_t *lib; 1733 nxt_queue_link_t *lnk; 1734 nxt_unit_ctx_impl_t *ctx_impl; 1735 nxt_unit_request_info_impl_t *req_impl; 1736 1737 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1738 1739 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1740 1741 pthread_mutex_lock(&ctx_impl->mutex); 1742 1743 if (nxt_queue_is_empty(&ctx_impl->free_req)) { 1744 pthread_mutex_unlock(&ctx_impl->mutex); 1745 1746 req_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_request_info_impl_t) 1747 + lib->request_data_size); 1748 if (nxt_slow_path(req_impl == NULL)) { 1749 return NULL; 1750 } 1751 1752 req_impl->req.unit = ctx->unit; 1753 req_impl->req.ctx = ctx; 1754 1755 pthread_mutex_lock(&ctx_impl->mutex); 1756 1757 } else { 1758 lnk = nxt_queue_first(&ctx_impl->free_req); 1759 nxt_queue_remove(lnk); 1760 1761 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link); 1762 } 1763 1764 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link); 1765 1766 pthread_mutex_unlock(&ctx_impl->mutex); 1767 1768 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL; 1769 1770 return req_impl; 1771} 1772 1773 1774static void 1775nxt_unit_request_info_release(nxt_unit_request_info_t *req) 1776{ 1777 nxt_unit_ctx_t *ctx; 1778 nxt_unit_ctx_impl_t *ctx_impl; 1779 nxt_unit_request_info_impl_t *req_impl; 1780 1781 ctx = req->ctx; 1782 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1783 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1784 1785 req->response = NULL; 1786 req->response_buf = NULL; 1787 1788 if (req_impl->in_hash) { 1789 nxt_unit_request_hash_find(req->ctx, req_impl->stream, 1); 1790 } 1791 1792 while (req_impl->outgoing_buf != NULL) { 1793 nxt_unit_mmap_buf_free(req_impl->outgoing_buf); 1794 } 1795 1796 while (req_impl->incoming_buf != NULL) { 1797 nxt_unit_mmap_buf_free(req_impl->incoming_buf); 1798 } 1799 1800 if (req->content_fd != -1) { 1801 nxt_unit_close(req->content_fd); 1802 1803 req->content_fd = -1; 1804 } 1805 1806 if (req->response_port != NULL) { 1807 nxt_unit_port_release(req->response_port); 1808 1809 req->response_port = NULL; 1810 } 1811 1812 req_impl->state = NXT_UNIT_RS_RELEASED; 1813 1814 pthread_mutex_lock(&ctx_impl->mutex); 1815 1816 nxt_queue_remove(&req_impl->link); 1817 1818 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link); 1819 1820 pthread_mutex_unlock(&ctx_impl->mutex); 1821 1822 if (nxt_slow_path(!nxt_unit_chk_ready(ctx))) { 1823 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 1824 } 1825} 1826 1827 1828static void 1829nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl) 1830{ 1831 nxt_unit_ctx_impl_t *ctx_impl; 1832 1833 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx); 1834 1835 nxt_queue_remove(&req_impl->link); 1836 1837 if (req_impl != &ctx_impl->req) { 1838 nxt_unit_free(&ctx_impl->ctx, req_impl); 1839 } 1840} 1841 1842 1843static nxt_unit_websocket_frame_impl_t * 1844nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx) 1845{ 1846 nxt_queue_link_t *lnk; 1847 nxt_unit_ctx_impl_t *ctx_impl; 1848 nxt_unit_websocket_frame_impl_t *ws_impl; 1849 1850 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1851 1852 pthread_mutex_lock(&ctx_impl->mutex); 1853 1854 if (nxt_queue_is_empty(&ctx_impl->free_ws)) { 1855 pthread_mutex_unlock(&ctx_impl->mutex); 1856 1857 ws_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_websocket_frame_impl_t)); 1858 if (nxt_slow_path(ws_impl == NULL)) { 1859 return NULL; 1860 } 1861 1862 } else { 1863 lnk = nxt_queue_first(&ctx_impl->free_ws); 1864 nxt_queue_remove(lnk); 1865 1866 pthread_mutex_unlock(&ctx_impl->mutex); 1867 1868 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link); 1869 } 1870 1871 ws_impl->ctx_impl = ctx_impl; 1872 1873 return ws_impl; 1874} 1875 1876 1877static void 1878nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws) 1879{ 1880 nxt_unit_websocket_frame_impl_t *ws_impl; 1881 1882 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 1883 1884 while (ws_impl->buf != NULL) { 1885 nxt_unit_mmap_buf_free(ws_impl->buf); 1886 } 1887 1888 ws->req = NULL; 1889 1890 pthread_mutex_lock(&ws_impl->ctx_impl->mutex); 1891 1892 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link); 1893 1894 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex); 1895} 1896 1897 1898static void 1899nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx, 1900 nxt_unit_websocket_frame_impl_t *ws_impl) 1901{ 1902 nxt_queue_remove(&ws_impl->link); 1903 1904 nxt_unit_free(ctx, ws_impl); 1905} 1906 1907 1908uint16_t 1909nxt_unit_field_hash(const char *name, size_t name_length) 1910{ 1911 u_char ch; 1912 uint32_t hash; 1913 const char *p, *end; 1914 1915 hash = 159406; /* Magic value copied from nxt_http_parse.c */ 1916 end = name + name_length; 1917 1918 for (p = name; p < end; p++) { 1919 ch = *p; 1920 hash = (hash << 4) + hash + nxt_lowcase(ch); 1921 } 1922 1923 hash = (hash >> 16) ^ hash; 1924 1925 return hash; 1926} 1927 1928 1929void 1930nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req) 1931{ 1932 char *name; 1933 uint32_t i, j; 1934 nxt_unit_field_t *fields, f; 1935 nxt_unit_request_t *r; 1936 1937 static nxt_str_t content_length = nxt_string("content-length"); 1938 static nxt_str_t content_type = nxt_string("content-type"); 1939 static nxt_str_t cookie = nxt_string("cookie"); 1940 1941 nxt_unit_req_debug(req, "group_dup_fields"); 1942 1943 r = req->request; 1944 fields = r->fields; 1945 1946 for (i = 0; i < r->fields_count; i++) { 1947 name = nxt_unit_sptr_get(&fields[i].name); 1948 1949 switch (fields[i].hash) { 1950 case NXT_UNIT_HASH_CONTENT_LENGTH: 1951 if (fields[i].name_length == content_length.length 1952 && nxt_unit_memcasecmp(name, content_length.start, 1953 content_length.length) == 0) 1954 { 1955 r->content_length_field = i; 1956 } 1957 1958 break; 1959 1960 case NXT_UNIT_HASH_CONTENT_TYPE: 1961 if (fields[i].name_length == content_type.length 1962 && nxt_unit_memcasecmp(name, content_type.start, 1963 content_type.length) == 0) 1964 { 1965 r->content_type_field = i; 1966 } 1967 1968 break; 1969 1970 case NXT_UNIT_HASH_COOKIE: 1971 if (fields[i].name_length == cookie.length 1972 && nxt_unit_memcasecmp(name, cookie.start, 1973 cookie.length) == 0) 1974 { 1975 r->cookie_field = i; 1976 } 1977 1978 break; 1979 } 1980 1981 for (j = i + 1; j < r->fields_count; j++) { 1982 if (fields[i].hash != fields[j].hash 1983 || fields[i].name_length != fields[j].name_length 1984 || nxt_unit_memcasecmp(name, 1985 nxt_unit_sptr_get(&fields[j].name), 1986 fields[j].name_length) != 0) 1987 { 1988 continue; 1989 } 1990 1991 f = fields[j]; 1992 f.value.offset += (j - (i + 1)) * sizeof(f); 1993 1994 while (j > i + 1) { 1995 fields[j] = fields[j - 1]; 1996 fields[j].name.offset -= sizeof(f); 1997 fields[j].value.offset -= sizeof(f); 1998 j--; 1999 } 2000 2001 fields[j] = f; 2002 2003 /* Assign the same name pointer for further grouping simplicity. */ 2004 nxt_unit_sptr_set(&fields[j].name, name); 2005 2006 i++; 2007 } 2008 } 2009} 2010 2011 2012int 2013nxt_unit_response_init(nxt_unit_request_info_t *req, 2014 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size) 2015{ 2016 uint32_t buf_size; 2017 nxt_unit_buf_t *buf; 2018 nxt_unit_request_info_impl_t *req_impl; 2019 2020 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2021 2022 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2023 nxt_unit_req_warn(req, "init: response already sent"); 2024 2025 return NXT_UNIT_ERROR; 2026 } 2027 2028 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status, 2029 (int) max_fields_count, (int) max_fields_size); 2030 2031 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) { 2032 nxt_unit_req_debug(req, "duplicate response init"); 2033 } 2034 2035 /* 2036 * Each field name and value 0-terminated by libunit, 2037 * this is the reason of '+ 2' below. 2038 */ 2039 buf_size = sizeof(nxt_unit_response_t) 2040 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 2041 + max_fields_size; 2042 2043 if (nxt_slow_path(req->response_buf != NULL)) { 2044 buf = req->response_buf; 2045 2046 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) { 2047 goto init_response; 2048 } 2049 2050 nxt_unit_buf_free(buf); 2051 2052 req->response_buf = NULL; 2053 req->response = NULL; 2054 req->response_max_fields = 0; 2055 2056 req_impl->state = NXT_UNIT_RS_START; 2057 } 2058 2059 buf = nxt_unit_response_buf_alloc(req, buf_size); 2060 if (nxt_slow_path(buf == NULL)) { 2061 return NXT_UNIT_ERROR; 2062 } 2063 2064init_response: 2065 2066 memset(buf->start, 0, sizeof(nxt_unit_response_t)); 2067 2068 req->response_buf = buf; 2069 2070 req->response = (nxt_unit_response_t *) buf->start; 2071 req->response->status = status; 2072 2073 buf->free = buf->start + sizeof(nxt_unit_response_t) 2074 + max_fields_count * sizeof(nxt_unit_field_t); 2075 2076 req->response_max_fields = max_fields_count; 2077 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT; 2078 2079 return NXT_UNIT_OK; 2080} 2081 2082 2083int 2084nxt_unit_response_realloc(nxt_unit_request_info_t *req, 2085 uint32_t max_fields_count, uint32_t max_fields_size) 2086{ 2087 char *p; 2088 uint32_t i, buf_size; 2089 nxt_unit_buf_t *buf; 2090 nxt_unit_field_t *f, *src; 2091 nxt_unit_response_t *resp; 2092 nxt_unit_request_info_impl_t *req_impl; 2093 2094 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2095 2096 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2097 nxt_unit_req_warn(req, "realloc: response not init"); 2098 2099 return NXT_UNIT_ERROR; 2100 } 2101 2102 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2103 nxt_unit_req_warn(req, "realloc: response already sent"); 2104 2105 return NXT_UNIT_ERROR; 2106 } 2107 2108 if (nxt_slow_path(max_fields_count < req->response->fields_count)) { 2109 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small"); 2110 2111 return NXT_UNIT_ERROR; 2112 } 2113 2114 /* 2115 * Each field name and value 0-terminated by libunit, 2116 * this is the reason of '+ 2' below. 2117 */ 2118 buf_size = sizeof(nxt_unit_response_t) 2119 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 2120 + max_fields_size; 2121 2122 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size); 2123 2124 buf = nxt_unit_response_buf_alloc(req, buf_size); 2125 if (nxt_slow_path(buf == NULL)) { 2126 nxt_unit_req_warn(req, "realloc: new buf allocation failed"); 2127 return NXT_UNIT_ERROR; 2128 } 2129 2130 resp = (nxt_unit_response_t *) buf->start; 2131 2132 memset(resp, 0, sizeof(nxt_unit_response_t)); 2133 2134 resp->status = req->response->status; 2135 resp->content_length = req->response->content_length; 2136 2137 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t); 2138 f = resp->fields; 2139 2140 for (i = 0; i < req->response->fields_count; i++) { 2141 src = req->response->fields + i; 2142 2143 if (nxt_slow_path(src->skip != 0)) { 2144 continue; 2145 } 2146 2147 if (nxt_slow_path(src->name_length + src->value_length + 2 2148 > (uint32_t) (buf->end - p))) 2149 { 2150 nxt_unit_req_warn(req, "realloc: not enough space for field" 2151 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required", 2152 i, src, src->name_length, src->value_length); 2153 2154 goto fail; 2155 } 2156 2157 nxt_unit_sptr_set(&f->name, p); 2158 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length); 2159 *p++ = '\0'; 2160 2161 nxt_unit_sptr_set(&f->value, p); 2162 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length); 2163 *p++ = '\0'; 2164 2165 f->hash = src->hash; 2166 f->skip = 0; 2167 f->name_length = src->name_length; 2168 f->value_length = src->value_length; 2169 2170 resp->fields_count++; 2171 f++; 2172 } 2173 2174 if (req->response->piggyback_content_length > 0) { 2175 if (nxt_slow_path(req->response->piggyback_content_length 2176 > (uint32_t) (buf->end - p))) 2177 { 2178 nxt_unit_req_warn(req, "realloc: not enought space for content" 2179 " #%"PRIu32", %"PRIu32" required", 2180 i, req->response->piggyback_content_length); 2181 2182 goto fail; 2183 } 2184 2185 resp->piggyback_content_length = 2186 req->response->piggyback_content_length; 2187 2188 nxt_unit_sptr_set(&resp->piggyback_content, p); 2189 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content), 2190 req->response->piggyback_content_length); 2191 } 2192 2193 buf->free = p; 2194 2195 nxt_unit_buf_free(req->response_buf); 2196 2197 req->response = resp; 2198 req->response_buf = buf; 2199 req->response_max_fields = max_fields_count; 2200 2201 return NXT_UNIT_OK; 2202 2203fail: 2204 2205 nxt_unit_buf_free(buf); 2206 2207 return NXT_UNIT_ERROR; 2208} 2209 2210 2211int 2212nxt_unit_response_is_init(nxt_unit_request_info_t *req) 2213{ 2214 nxt_unit_request_info_impl_t *req_impl; 2215 2216 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2217 2218 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT; 2219} 2220 2221 2222int 2223nxt_unit_response_add_field(nxt_unit_request_info_t *req, 2224 const char *name, uint8_t name_length, 2225 const char *value, uint32_t value_length) 2226{ 2227 nxt_unit_buf_t *buf; 2228 nxt_unit_field_t *f; 2229 nxt_unit_response_t *resp; 2230 nxt_unit_request_info_impl_t *req_impl; 2231 2232 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2233 2234 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) { 2235 nxt_unit_req_warn(req, "add_field: response not initialized or " 2236 "already sent"); 2237 2238 return NXT_UNIT_ERROR; 2239 } 2240 2241 resp = req->response; 2242 2243 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) { 2244 nxt_unit_req_warn(req, "add_field: too many response fields (%d)", 2245 (int) resp->fields_count); 2246 2247 return NXT_UNIT_ERROR; 2248 } 2249 2250 buf = req->response_buf; 2251 2252 if (nxt_slow_path(name_length + value_length + 2 2253 > (uint32_t) (buf->end - buf->free))) 2254 { 2255 nxt_unit_req_warn(req, "add_field: response buffer overflow"); 2256 2257 return NXT_UNIT_ERROR; 2258 } 2259 2260 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s", 2261 resp->fields_count, 2262 (int) name_length, name, 2263 (int) value_length, value); 2264 2265 f = resp->fields + resp->fields_count; 2266 2267 nxt_unit_sptr_set(&f->name, buf->free); 2268 buf->free = nxt_cpymem(buf->free, name, name_length); 2269 *buf->free++ = '\0'; 2270 2271 nxt_unit_sptr_set(&f->value, buf->free); 2272 buf->free = nxt_cpymem(buf->free, value, value_length); 2273 *buf->free++ = '\0'; 2274 2275 f->hash = nxt_unit_field_hash(name, name_length); 2276 f->skip = 0; 2277 f->name_length = name_length; 2278 f->value_length = value_length; 2279 2280 resp->fields_count++; 2281 2282 return NXT_UNIT_OK; 2283} 2284 2285 2286int 2287nxt_unit_response_add_content(nxt_unit_request_info_t *req, 2288 const void* src, uint32_t size) 2289{ 2290 nxt_unit_buf_t *buf; 2291 nxt_unit_response_t *resp; 2292 nxt_unit_request_info_impl_t *req_impl; 2293 2294 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2295 2296 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2297 nxt_unit_req_warn(req, "add_content: response not initialized yet"); 2298 2299 return NXT_UNIT_ERROR; 2300 } 2301 2302 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2303 nxt_unit_req_warn(req, "add_content: response already sent"); 2304 2305 return NXT_UNIT_ERROR; 2306 } 2307 2308 buf = req->response_buf; 2309 2310 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) { 2311 nxt_unit_req_warn(req, "add_content: buffer overflow"); 2312 2313 return NXT_UNIT_ERROR; 2314 } 2315 2316 resp = req->response; 2317 2318 if (resp->piggyback_content_length == 0) { 2319 nxt_unit_sptr_set(&resp->piggyback_content, buf->free); 2320 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT; 2321 } 2322 2323 resp->piggyback_content_length += size; 2324 2325 buf->free = nxt_cpymem(buf->free, src, size); 2326 2327 return NXT_UNIT_OK; 2328} 2329 2330 2331int 2332nxt_unit_response_send(nxt_unit_request_info_t *req) 2333{ 2334 int rc; 2335 nxt_unit_mmap_buf_t *mmap_buf; 2336 nxt_unit_request_info_impl_t *req_impl; 2337 2338 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2339 2340 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2341 nxt_unit_req_warn(req, "send: response is not initialized yet"); 2342 2343 return NXT_UNIT_ERROR; 2344 } 2345 2346 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2347 nxt_unit_req_warn(req, "send: response already sent"); 2348 2349 return NXT_UNIT_ERROR; 2350 } 2351 2352 if (req->request->websocket_handshake && req->response->status == 101) { 2353 nxt_unit_response_upgrade(req); 2354 } 2355 2356 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes", 2357 req->response->fields_count, 2358 (int) (req->response_buf->free 2359 - req->response_buf->start)); 2360 2361 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf); 2362 2363 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); 2364 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 2365 req->response = NULL; 2366 req->response_buf = NULL; 2367 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 2368 2369 nxt_unit_mmap_buf_free(mmap_buf); 2370 } 2371 2372 return rc; 2373} 2374 2375 2376int 2377nxt_unit_response_is_sent(nxt_unit_request_info_t *req) 2378{ 2379 nxt_unit_request_info_impl_t *req_impl; 2380 2381 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2382 2383 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT; 2384} 2385 2386 2387nxt_unit_buf_t * 2388nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) 2389{ 2390 int rc; 2391 nxt_unit_mmap_buf_t *mmap_buf; 2392 nxt_unit_request_info_impl_t *req_impl; 2393 2394 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) { 2395 nxt_unit_req_warn(req, "response_buf_alloc: " 2396 "requested buffer (%"PRIu32") too big", size); 2397 2398 return NULL; 2399 } 2400 2401 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size); 2402 2403 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2404 2405 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 2406 if (nxt_slow_path(mmap_buf == NULL)) { 2407 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf"); 2408 2409 return NULL; 2410 } 2411 2412 mmap_buf->req = req; 2413 2414 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf); 2415 2416 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 2417 size, size, mmap_buf, 2418 NULL); 2419 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2420 nxt_unit_mmap_buf_release(mmap_buf); 2421 2422 nxt_unit_req_alert(req, "response_buf_alloc: failed to get out buf"); 2423 2424 return NULL; 2425 } 2426 2427 return &mmap_buf->buf; 2428} 2429 2430 2431static nxt_unit_mmap_buf_t * 2432nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx) 2433{ 2434 nxt_unit_mmap_buf_t *mmap_buf; 2435 nxt_unit_ctx_impl_t *ctx_impl; 2436 2437 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2438 2439 pthread_mutex_lock(&ctx_impl->mutex); 2440 2441 if (ctx_impl->free_buf == NULL) { 2442 pthread_mutex_unlock(&ctx_impl->mutex); 2443 2444 mmap_buf = nxt_unit_malloc(ctx, sizeof(nxt_unit_mmap_buf_t)); 2445 if (nxt_slow_path(mmap_buf == NULL)) { 2446 return NULL; 2447 } 2448 2449 } else { 2450 mmap_buf = ctx_impl->free_buf; 2451 2452 nxt_unit_mmap_buf_unlink(mmap_buf); 2453 2454 pthread_mutex_unlock(&ctx_impl->mutex); 2455 } 2456 2457 mmap_buf->ctx_impl = ctx_impl; 2458 2459 mmap_buf->hdr = NULL; 2460 mmap_buf->free_ptr = NULL; 2461 2462 return mmap_buf; 2463} 2464 2465 2466static void 2467nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf) 2468{ 2469 nxt_unit_mmap_buf_unlink(mmap_buf); 2470 2471 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex); 2472 2473 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf); 2474 2475 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex); 2476} 2477 2478 2479int 2480nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req) 2481{ 2482 return req->request->websocket_handshake; 2483} 2484 2485 2486int 2487nxt_unit_response_upgrade(nxt_unit_request_info_t *req) 2488{ 2489 int rc; 2490 nxt_unit_request_info_impl_t *req_impl; 2491 2492 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2493 2494 if (nxt_slow_path(req_impl->websocket != 0)) { 2495 nxt_unit_req_debug(req, "upgrade: already upgraded"); 2496 2497 return NXT_UNIT_OK; 2498 } 2499 2500 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2501 nxt_unit_req_warn(req, "upgrade: response is not initialized yet"); 2502 2503 return NXT_UNIT_ERROR; 2504 } 2505 2506 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 2507 nxt_unit_req_warn(req, "upgrade: response already sent"); 2508 2509 return NXT_UNIT_ERROR; 2510 } 2511 2512 rc = nxt_unit_request_hash_add(req->ctx, req); 2513 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2514 nxt_unit_req_warn(req, "upgrade: failed to add request to hash"); 2515 2516 return NXT_UNIT_ERROR; 2517 } 2518 2519 req_impl->websocket = 1; 2520 2521 req->response->status = 101; 2522 2523 return NXT_UNIT_OK; 2524} 2525 2526 2527int 2528nxt_unit_response_is_websocket(nxt_unit_request_info_t *req) 2529{ 2530 nxt_unit_request_info_impl_t *req_impl; 2531 2532 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2533 2534 return req_impl->websocket; 2535} 2536 2537 2538nxt_unit_request_info_t * 2539nxt_unit_get_request_info_from_data(void *data) 2540{ 2541 nxt_unit_request_info_impl_t *req_impl; 2542 2543 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data); 2544 2545 return &req_impl->req; 2546} 2547 2548 2549int 2550nxt_unit_buf_send(nxt_unit_buf_t *buf) 2551{ 2552 int rc; 2553 nxt_unit_mmap_buf_t *mmap_buf; 2554 nxt_unit_request_info_t *req; 2555 nxt_unit_request_info_impl_t *req_impl; 2556 2557 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2558 2559 req = mmap_buf->req; 2560 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2561 2562 nxt_unit_req_debug(req, "buf_send: %d bytes", 2563 (int) (buf->free - buf->start)); 2564 2565 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2566 nxt_unit_req_warn(req, "buf_send: response not initialized yet"); 2567 2568 return NXT_UNIT_ERROR; 2569 } 2570 2571 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 2572 nxt_unit_req_warn(req, "buf_send: headers not sent yet"); 2573 2574 return NXT_UNIT_ERROR; 2575 } 2576 2577 if (nxt_fast_path(buf->free > buf->start)) { 2578 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); 2579 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2580 return rc; 2581 } 2582 } 2583 2584 nxt_unit_mmap_buf_free(mmap_buf); 2585 2586 return NXT_UNIT_OK; 2587} 2588 2589 2590static void 2591nxt_unit_buf_send_done(nxt_unit_buf_t *buf) 2592{ 2593 int rc; 2594 nxt_unit_mmap_buf_t *mmap_buf; 2595 nxt_unit_request_info_t *req; 2596 2597 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2598 2599 req = mmap_buf->req; 2600 2601 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 1); 2602 if (nxt_slow_path(rc == NXT_UNIT_OK)) { 2603 nxt_unit_mmap_buf_free(mmap_buf); 2604 2605 nxt_unit_request_info_release(req); 2606 2607 } else { 2608 nxt_unit_request_done(req, rc); 2609 } 2610} 2611 2612 2613static int 2614nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, 2615 nxt_unit_mmap_buf_t *mmap_buf, int last) 2616{ 2617 struct { 2618 nxt_port_msg_t msg; 2619 nxt_port_mmap_msg_t mmap_msg; 2620 } m; 2621 2622 int rc; 2623 u_char *last_used, *first_free; 2624 ssize_t res; 2625 nxt_chunk_id_t first_free_chunk; 2626 nxt_unit_buf_t *buf; 2627 nxt_unit_impl_t *lib; 2628 nxt_port_mmap_header_t *hdr; 2629 nxt_unit_request_info_impl_t *req_impl; 2630 2631 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 2632 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2633 2634 buf = &mmap_buf->buf; 2635 hdr = mmap_buf->hdr; 2636 2637 m.mmap_msg.size = buf->free - buf->start; 2638 2639 m.msg.stream = req_impl->stream; 2640 m.msg.pid = lib->pid; 2641 m.msg.reply_port = 0; 2642 m.msg.type = _NXT_PORT_MSG_DATA; 2643 m.msg.last = last != 0; 2644 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0; 2645 m.msg.nf = 0; 2646 m.msg.mf = 0; 2647 2648 rc = NXT_UNIT_ERROR; 2649 2650 if (m.msg.mmap) { 2651 m.mmap_msg.mmap_id = hdr->id; 2652 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr, 2653 (u_char *) buf->start); 2654 2655 nxt_unit_debug(req->ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", 2656 req_impl->stream, 2657 (int) m.mmap_msg.mmap_id, 2658 (int) m.mmap_msg.chunk_id, 2659 (int) m.mmap_msg.size); 2660 2661 res = nxt_unit_port_send(req->ctx, req->response_port, &m, sizeof(m), 2662 NULL); 2663 if (nxt_slow_path(res != sizeof(m))) { 2664 goto free_buf; 2665 } 2666 2667 last_used = (u_char *) buf->free - 1; 2668 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1; 2669 2670 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) { 2671 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk); 2672 2673 buf->start = (char *) first_free; 2674 buf->free = buf->start; 2675 2676 if (buf->end < buf->start) { 2677 buf->end = buf->start; 2678 } 2679 2680 } else { 2681 buf->start = NULL; 2682 buf->free = NULL; 2683 buf->end = NULL; 2684 2685 mmap_buf->hdr = NULL; 2686 } 2687 2688 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, 2689 (int) m.mmap_msg.chunk_id - (int) first_free_chunk); 2690 2691 nxt_unit_debug(req->ctx, "allocated_chunks %d", 2692 (int) lib->outgoing.allocated_chunks); 2693 2694 } else { 2695 if (nxt_slow_path(mmap_buf->plain_ptr == NULL 2696 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg))) 2697 { 2698 nxt_unit_alert(req->ctx, 2699 "#%"PRIu32": failed to send plain memory buffer" 2700 ": no space reserved for message header", 2701 req_impl->stream); 2702 2703 goto free_buf; 2704 } 2705 2706 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg)); 2707 2708 nxt_unit_debug(req->ctx, "#%"PRIu32": send plain: %d", 2709 req_impl->stream, 2710 (int) (sizeof(m.msg) + m.mmap_msg.size)); 2711 2712 res = nxt_unit_port_send(req->ctx, req->response_port, 2713 buf->start - sizeof(m.msg), 2714 m.mmap_msg.size + sizeof(m.msg), NULL); 2715 2716 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) { 2717 goto free_buf; 2718 } 2719 } 2720 2721 rc = NXT_UNIT_OK; 2722 2723free_buf: 2724 2725 nxt_unit_free_outgoing_buf(mmap_buf); 2726 2727 return rc; 2728} 2729 2730 2731void 2732nxt_unit_buf_free(nxt_unit_buf_t *buf) 2733{ 2734 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf)); 2735} 2736 2737 2738static void 2739nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf) 2740{ 2741 nxt_unit_free_outgoing_buf(mmap_buf); 2742 2743 nxt_unit_mmap_buf_release(mmap_buf); 2744} 2745 2746 2747static void 2748nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf) 2749{ 2750 if (mmap_buf->hdr != NULL) { 2751 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx, 2752 mmap_buf->hdr, mmap_buf->buf.start, 2753 mmap_buf->buf.end - mmap_buf->buf.start); 2754 2755 mmap_buf->hdr = NULL; 2756 2757 return; 2758 } 2759 2760 if (mmap_buf->free_ptr != NULL) { 2761 nxt_unit_free(&mmap_buf->ctx_impl->ctx, mmap_buf->free_ptr); 2762 2763 mmap_buf->free_ptr = NULL; 2764 } 2765} 2766 2767 2768static nxt_unit_read_buf_t * 2769nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) 2770{ 2771 nxt_unit_ctx_impl_t *ctx_impl; 2772 nxt_unit_read_buf_t *rbuf; 2773 2774 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2775 2776 pthread_mutex_lock(&ctx_impl->mutex); 2777 2778 rbuf = nxt_unit_read_buf_get_impl(ctx_impl); 2779 2780 pthread_mutex_unlock(&ctx_impl->mutex); 2781 2782 rbuf->oob.size = 0; 2783 2784 return rbuf; 2785} 2786 2787 2788static nxt_unit_read_buf_t * 2789nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl) 2790{ 2791 nxt_queue_link_t *link; 2792 nxt_unit_read_buf_t *rbuf; 2793 2794 if (!nxt_queue_is_empty(&ctx_impl->free_rbuf)) { 2795 link = nxt_queue_first(&ctx_impl->free_rbuf); 2796 nxt_queue_remove(link); 2797 2798 rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link); 2799 2800 return rbuf; 2801 } 2802 2803 rbuf = nxt_unit_malloc(&ctx_impl->ctx, sizeof(nxt_unit_read_buf_t)); 2804 2805 if (nxt_fast_path(rbuf != NULL)) { 2806 rbuf->ctx_impl = ctx_impl; 2807 } 2808 2809 return rbuf; 2810} 2811 2812 2813static void 2814nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 2815 nxt_unit_read_buf_t *rbuf) 2816{ 2817 nxt_unit_ctx_impl_t *ctx_impl; 2818 2819 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2820 2821 pthread_mutex_lock(&ctx_impl->mutex); 2822 2823 nxt_queue_insert_head(&ctx_impl->free_rbuf, &rbuf->link); 2824 2825 pthread_mutex_unlock(&ctx_impl->mutex); 2826} 2827 2828 2829nxt_unit_buf_t * 2830nxt_unit_buf_next(nxt_unit_buf_t *buf) 2831{ 2832 nxt_unit_mmap_buf_t *mmap_buf; 2833 2834 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2835 2836 if (mmap_buf->next == NULL) { 2837 return NULL; 2838 } 2839 2840 return &mmap_buf->next->buf; 2841} 2842 2843 2844uint32_t 2845nxt_unit_buf_max(void) 2846{ 2847 return PORT_MMAP_DATA_SIZE; 2848} 2849 2850 2851uint32_t 2852nxt_unit_buf_min(void) 2853{ 2854 return PORT_MMAP_CHUNK_SIZE; 2855} 2856 2857 2858int 2859nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start, 2860 size_t size) 2861{ 2862 ssize_t res; 2863 2864 res = nxt_unit_response_write_nb(req, start, size, size); 2865 2866 return res < 0 ? -res : NXT_UNIT_OK; 2867} 2868 2869 2870ssize_t 2871nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, 2872 size_t size, size_t min_size) 2873{ 2874 int rc; 2875 ssize_t sent; 2876 uint32_t part_size, min_part_size, buf_size; 2877 const char *part_start; 2878 nxt_unit_mmap_buf_t mmap_buf; 2879 nxt_unit_request_info_impl_t *req_impl; 2880 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2881 2882 nxt_unit_req_debug(req, "write: %d", (int) size); 2883 2884 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2885 2886 part_start = start; 2887 sent = 0; 2888 2889 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2890 nxt_unit_req_alert(req, "write: response not initialized yet"); 2891 2892 return -NXT_UNIT_ERROR; 2893 } 2894 2895 /* Check if response is not send yet. */ 2896 if (nxt_slow_path(req->response_buf != NULL)) { 2897 part_size = req->response_buf->end - req->response_buf->free; 2898 part_size = nxt_min(size, part_size); 2899 2900 rc = nxt_unit_response_add_content(req, part_start, part_size); 2901 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2902 return -rc; 2903 } 2904 2905 rc = nxt_unit_response_send(req); 2906 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2907 return -rc; 2908 } 2909 2910 size -= part_size; 2911 part_start += part_size; 2912 sent += part_size; 2913 2914 min_size -= nxt_min(min_size, part_size); 2915 } 2916 2917 while (size > 0) { 2918 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE); 2919 min_part_size = nxt_min(min_size, part_size); 2920 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE); 2921 2922 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, part_size, 2923 min_part_size, &mmap_buf, local_buf); 2924 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2925 return -rc; 2926 } 2927 2928 buf_size = mmap_buf.buf.end - mmap_buf.buf.free; 2929 if (nxt_slow_path(buf_size == 0)) { 2930 return sent; 2931 } 2932 part_size = nxt_min(buf_size, part_size); 2933 2934 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free, 2935 part_start, part_size); 2936 2937 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 2938 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2939 return -rc; 2940 } 2941 2942 size -= part_size; 2943 part_start += part_size; 2944 sent += part_size; 2945 2946 min_size -= nxt_min(min_size, part_size); 2947 } 2948 2949 return sent; 2950} 2951 2952 2953int 2954nxt_unit_response_write_cb(nxt_unit_request_info_t *req, 2955 nxt_unit_read_info_t *read_info) 2956{ 2957 int rc; 2958 ssize_t n; 2959 uint32_t buf_size; 2960 nxt_unit_buf_t *buf; 2961 nxt_unit_mmap_buf_t mmap_buf; 2962 nxt_unit_request_info_impl_t *req_impl; 2963 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2964 2965 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2966 2967 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2968 nxt_unit_req_alert(req, "write: response not initialized yet"); 2969 2970 return NXT_UNIT_ERROR; 2971 } 2972 2973 /* Check if response is not send yet. */ 2974 if (nxt_slow_path(req->response_buf != NULL)) { 2975 2976 /* Enable content in headers buf. */ 2977 rc = nxt_unit_response_add_content(req, "", 0); 2978 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2979 nxt_unit_req_error(req, "Failed to add piggyback content"); 2980 2981 return rc; 2982 } 2983 2984 buf = req->response_buf; 2985 2986 while (buf->end - buf->free > 0) { 2987 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2988 if (nxt_slow_path(n < 0)) { 2989 nxt_unit_req_error(req, "Read error"); 2990 2991 return NXT_UNIT_ERROR; 2992 } 2993 2994 /* Manually increase sizes. */ 2995 buf->free += n; 2996 req->response->piggyback_content_length += n; 2997 2998 if (read_info->eof) { 2999 break; 3000 } 3001 } 3002 3003 rc = nxt_unit_response_send(req); 3004 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3005 nxt_unit_req_error(req, "Failed to send headers with content"); 3006 3007 return rc; 3008 } 3009 3010 if (read_info->eof) { 3011 return NXT_UNIT_OK; 3012 } 3013 } 3014 3015 while (!read_info->eof) { 3016 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"", 3017 read_info->buf_size); 3018 3019 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE); 3020 3021 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3022 buf_size, buf_size, 3023 &mmap_buf, local_buf); 3024 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3025 return rc; 3026 } 3027 3028 buf = &mmap_buf.buf; 3029 3030 while (!read_info->eof && buf->end > buf->free) { 3031 n = read_info->read(read_info, buf->free, buf->end - buf->free); 3032 if (nxt_slow_path(n < 0)) { 3033 nxt_unit_req_error(req, "Read error"); 3034 3035 nxt_unit_free_outgoing_buf(&mmap_buf); 3036 3037 return NXT_UNIT_ERROR; 3038 } 3039 3040 buf->free += n; 3041 } 3042 3043 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3044 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3045 nxt_unit_req_error(req, "Failed to send content"); 3046 3047 return rc; 3048 } 3049 } 3050 3051 return NXT_UNIT_OK; 3052} 3053 3054 3055ssize_t 3056nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) 3057{ 3058 ssize_t buf_res, res; 3059 3060 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length, 3061 dst, size); 3062 3063 if (buf_res < (ssize_t) size && req->content_fd != -1) { 3064 res = read(req->content_fd, dst, size); 3065 if (nxt_slow_path(res < 0)) { 3066 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 3067 strerror(errno), errno); 3068 3069 return res; 3070 } 3071 3072 if (res < (ssize_t) size) { 3073 nxt_unit_close(req->content_fd); 3074 3075 req->content_fd = -1; 3076 } 3077 3078 req->content_length -= res; 3079 size -= res; 3080 3081 dst = nxt_pointer_to(dst, res); 3082 3083 } else { 3084 res = 0; 3085 } 3086 3087 return buf_res + res; 3088} 3089 3090 3091ssize_t 3092nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size) 3093{ 3094 char *p; 3095 size_t l_size, b_size; 3096 nxt_unit_buf_t *b; 3097 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf; 3098 3099 if (req->content_length == 0) { 3100 return 0; 3101 } 3102 3103 l_size = 0; 3104 3105 b = req->content_buf; 3106 3107 while (b != NULL) { 3108 b_size = b->end - b->free; 3109 p = memchr(b->free, '\n', b_size); 3110 3111 if (p != NULL) { 3112 p++; 3113 l_size += p - b->free; 3114 break; 3115 } 3116 3117 l_size += b_size; 3118 3119 if (max_size <= l_size) { 3120 break; 3121 } 3122 3123 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf); 3124 if (mmap_buf->next == NULL 3125 && req->content_fd != -1 3126 && l_size < req->content_length) 3127 { 3128 preread_buf = nxt_unit_request_preread(req, 16384); 3129 if (nxt_slow_path(preread_buf == NULL)) { 3130 return -1; 3131 } 3132 3133 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf); 3134 } 3135 3136 b = nxt_unit_buf_next(b); 3137 } 3138 3139 return nxt_min(max_size, l_size); 3140} 3141 3142 3143static nxt_unit_mmap_buf_t * 3144nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) 3145{ 3146 ssize_t res; 3147 nxt_unit_mmap_buf_t *mmap_buf; 3148 3149 if (req->content_fd == -1) { 3150 nxt_unit_req_alert(req, "preread: content_fd == -1"); 3151 return NULL; 3152 } 3153 3154 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 3155 if (nxt_slow_path(mmap_buf == NULL)) { 3156 nxt_unit_req_alert(req, "preread: failed to allocate buf"); 3157 return NULL; 3158 } 3159 3160 mmap_buf->free_ptr = nxt_unit_malloc(req->ctx, size); 3161 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3162 nxt_unit_req_alert(req, "preread: failed to allocate buf memory"); 3163 nxt_unit_mmap_buf_release(mmap_buf); 3164 return NULL; 3165 } 3166 3167 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3168 3169 mmap_buf->hdr = NULL; 3170 mmap_buf->buf.start = mmap_buf->free_ptr; 3171 mmap_buf->buf.free = mmap_buf->buf.start; 3172 mmap_buf->buf.end = mmap_buf->buf.start + size; 3173 3174 res = read(req->content_fd, mmap_buf->free_ptr, size); 3175 if (res < 0) { 3176 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 3177 strerror(errno), errno); 3178 3179 nxt_unit_mmap_buf_free(mmap_buf); 3180 3181 return NULL; 3182 } 3183 3184 if (res < (ssize_t) size) { 3185 nxt_unit_close(req->content_fd); 3186 3187 req->content_fd = -1; 3188 } 3189 3190 nxt_unit_req_debug(req, "preread: read %d", (int) res); 3191 3192 mmap_buf->buf.end = mmap_buf->buf.free + res; 3193 3194 return mmap_buf; 3195} 3196 3197 3198static ssize_t 3199nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size) 3200{ 3201 u_char *p; 3202 size_t rest, copy, read; 3203 nxt_unit_buf_t *buf, *last_buf; 3204 3205 p = dst; 3206 rest = size; 3207 3208 buf = *b; 3209 last_buf = buf; 3210 3211 while (buf != NULL) { 3212 last_buf = buf; 3213 3214 copy = buf->end - buf->free; 3215 copy = nxt_min(rest, copy); 3216 3217 p = nxt_cpymem(p, buf->free, copy); 3218 3219 buf->free += copy; 3220 rest -= copy; 3221 3222 if (rest == 0) { 3223 if (buf->end == buf->free) { 3224 buf = nxt_unit_buf_next(buf); 3225 } 3226 3227 break; 3228 } 3229 3230 buf = nxt_unit_buf_next(buf); 3231 } 3232 3233 *b = last_buf; 3234 3235 read = size - rest; 3236 3237 *len -= read; 3238 3239 return read; 3240} 3241 3242 3243void 3244nxt_unit_request_done(nxt_unit_request_info_t *req, int rc) 3245{ 3246 uint32_t size; 3247 nxt_port_msg_t msg; 3248 nxt_unit_impl_t *lib; 3249 nxt_unit_request_info_impl_t *req_impl; 3250 3251 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 3252 3253 nxt_unit_req_debug(req, "done: %d", rc); 3254 3255 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3256 goto skip_response_send; 3257 } 3258 3259 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 3260 3261 size = nxt_length("Content-Type") + nxt_length("text/plain"); 3262 3263 rc = nxt_unit_response_init(req, 200, 1, size); 3264 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3265 goto skip_response_send; 3266 } 3267 3268 rc = nxt_unit_response_add_field(req, "Content-Type", 3269 nxt_length("Content-Type"), 3270 "text/plain", nxt_length("text/plain")); 3271 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3272 goto skip_response_send; 3273 } 3274 } 3275 3276 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 3277 3278 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 3279 3280 nxt_unit_buf_send_done(req->response_buf); 3281 3282 return; 3283 } 3284 3285skip_response_send: 3286 3287 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit); 3288 3289 msg.stream = req_impl->stream; 3290 msg.pid = lib->pid; 3291 msg.reply_port = 0; 3292 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA 3293 : _NXT_PORT_MSG_RPC_ERROR; 3294 msg.last = 1; 3295 msg.mmap = 0; 3296 msg.nf = 0; 3297 msg.mf = 0; 3298 3299 (void) nxt_unit_port_send(req->ctx, req->response_port, 3300 &msg, sizeof(msg), NULL); 3301 3302 nxt_unit_request_info_release(req); 3303} 3304 3305 3306int 3307nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode, 3308 uint8_t last, const void *start, size_t size) 3309{ 3310 const struct iovec iov = { (void *) start, size }; 3311 3312 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1); 3313} 3314 3315 3316int 3317nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, 3318 uint8_t last, const struct iovec *iov, int iovcnt) 3319{ 3320 int i, rc; 3321 size_t l, copy; 3322 uint32_t payload_len, buf_size, alloc_size; 3323 const uint8_t *b; 3324 nxt_unit_buf_t *buf; 3325 nxt_unit_mmap_buf_t mmap_buf; 3326 nxt_websocket_header_t *wh; 3327 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 3328 3329 payload_len = 0; 3330 3331 for (i = 0; i < iovcnt; i++) { 3332 payload_len += iov[i].iov_len; 3333 } 3334 3335 buf_size = 10 + payload_len; 3336 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 3337 3338 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3339 alloc_size, alloc_size, 3340 &mmap_buf, local_buf); 3341 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3342 return rc; 3343 } 3344 3345 buf = &mmap_buf.buf; 3346 3347 buf->start[0] = 0; 3348 buf->start[1] = 0; 3349 3350 buf_size -= buf->end - buf->start; 3351 3352 wh = (void *) buf->free; 3353 3354 buf->free = nxt_websocket_frame_init(wh, payload_len); 3355 wh->fin = last; 3356 wh->opcode = opcode; 3357 3358 for (i = 0; i < iovcnt; i++) { 3359 b = iov[i].iov_base; 3360 l = iov[i].iov_len; 3361 3362 while (l > 0) { 3363 copy = buf->end - buf->free; 3364 copy = nxt_min(l, copy); 3365 3366 buf->free = nxt_cpymem(buf->free, b, copy); 3367 b += copy; 3368 l -= copy; 3369 3370 if (l > 0) { 3371 if (nxt_fast_path(buf->free > buf->start)) { 3372 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3373 3374 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3375 return rc; 3376 } 3377 } 3378 3379 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 3380 3381 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, 3382 alloc_size, alloc_size, 3383 &mmap_buf, local_buf); 3384 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3385 return rc; 3386 } 3387 3388 buf_size -= buf->end - buf->start; 3389 } 3390 } 3391 } 3392 3393 if (buf->free > buf->start) { 3394 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); 3395 } 3396 3397 return rc; 3398} 3399 3400 3401ssize_t 3402nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst, 3403 size_t size) 3404{ 3405 ssize_t res; 3406 uint8_t *b; 3407 uint64_t i, d; 3408 3409 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length, 3410 dst, size); 3411 3412 if (ws->mask == NULL) { 3413 return res; 3414 } 3415 3416 b = dst; 3417 d = (ws->payload_len - ws->content_length - res) % 4; 3418 3419 for (i = 0; i < (uint64_t) res; i++) { 3420 b[i] ^= ws->mask[ (i + d) % 4 ]; 3421 } 3422 3423 return res; 3424} 3425 3426 3427int 3428nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws) 3429{ 3430 char *b; 3431 size_t size, hsize; 3432 nxt_unit_websocket_frame_impl_t *ws_impl; 3433 3434 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 3435 3436 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) { 3437 return NXT_UNIT_OK; 3438 } 3439 3440 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start; 3441 3442 b = nxt_unit_malloc(ws->req->ctx, size); 3443 if (nxt_slow_path(b == NULL)) { 3444 return NXT_UNIT_ERROR; 3445 } 3446 3447 memcpy(b, ws_impl->buf->buf.start, size); 3448 3449 hsize = nxt_websocket_frame_header_size(b); 3450 3451 ws_impl->buf->buf.start = b; 3452 ws_impl->buf->buf.free = b + hsize; 3453 ws_impl->buf->buf.end = b + size; 3454 3455 ws_impl->buf->free_ptr = b; 3456 3457 ws_impl->ws.header = (nxt_websocket_header_t *) b; 3458 3459 if (ws_impl->ws.header->mask) { 3460 ws_impl->ws.mask = (uint8_t *) b + hsize - 4; 3461 3462 } else { 3463 ws_impl->ws.mask = NULL; 3464 } 3465 3466 return NXT_UNIT_OK; 3467} 3468 3469 3470void 3471nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws) 3472{ 3473 nxt_unit_websocket_frame_release(ws); 3474} 3475 3476 3477static nxt_port_mmap_header_t * 3478nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 3479 nxt_chunk_id_t *c, int *n, int min_n) 3480{ 3481 int res, nchunks, i; 3482 uint32_t outgoing_size; 3483 nxt_unit_mmap_t *mm, *mm_end; 3484 nxt_unit_impl_t *lib; 3485 nxt_port_mmap_header_t *hdr; 3486 3487 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3488 3489 pthread_mutex_lock(&lib->outgoing.mutex); 3490 3491retry: 3492 3493 outgoing_size = lib->outgoing.size; 3494 3495 mm_end = lib->outgoing.elts + outgoing_size; 3496 3497 for (mm = lib->outgoing.elts; mm < mm_end; mm++) { 3498 hdr = mm->hdr; 3499 3500 if (hdr->sent_over != 0xFFFFu 3501 && (hdr->sent_over != port->id.id 3502 || mm->src_thread != pthread_self())) 3503 { 3504 continue; 3505 } 3506 3507 *c = 0; 3508 3509 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) { 3510 nchunks = 1; 3511 3512 while (nchunks < *n) { 3513 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map, 3514 *c + nchunks); 3515 3516 if (res == 0) { 3517 if (nchunks >= min_n) { 3518 *n = nchunks; 3519 3520 goto unlock; 3521 } 3522 3523 for (i = 0; i < nchunks; i++) { 3524 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i); 3525 } 3526 3527 *c += nchunks + 1; 3528 nchunks = 0; 3529 break; 3530 } 3531 3532 nchunks++; 3533 } 3534 3535 if (nchunks >= min_n) { 3536 *n = nchunks; 3537 3538 goto unlock; 3539 } 3540 } 3541 3542 hdr->oosm = 1; 3543 } 3544 3545 if (outgoing_size >= lib->shm_mmap_limit) { 3546 /* Cannot allocate more shared memory. */ 3547 pthread_mutex_unlock(&lib->outgoing.mutex); 3548 3549 if (min_n == 0) { 3550 *n = 0; 3551 } 3552 3553 if (nxt_slow_path(lib->outgoing.allocated_chunks + min_n 3554 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT)) 3555 { 3556 /* Memory allocated by application, but not send to router. */ 3557 return NULL; 3558 } 3559 3560 /* Notify router about OOSM condition. */ 3561 3562 res = nxt_unit_send_oosm(ctx, port); 3563 if (nxt_slow_path(res != NXT_UNIT_OK)) { 3564 return NULL; 3565 } 3566 3567 /* Return if caller can handle OOSM condition. Non-blocking mode. */ 3568 3569 if (min_n == 0) { 3570 return NULL; 3571 } 3572 3573 nxt_unit_debug(ctx, "oosm: waiting for ACK"); 3574 3575 res = nxt_unit_wait_shm_ack(ctx); 3576 if (nxt_slow_path(res != NXT_UNIT_OK)) { 3577 return NULL; 3578 } 3579 3580 nxt_unit_debug(ctx, "oosm: retry"); 3581 3582 pthread_mutex_lock(&lib->outgoing.mutex); 3583 3584 goto retry; 3585 } 3586 3587 *c = 0; 3588 hdr = nxt_unit_new_mmap(ctx, port, *n); 3589 3590unlock: 3591 3592 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, *n); 3593 3594 nxt_unit_debug(ctx, "allocated_chunks %d", 3595 (int) lib->outgoing.allocated_chunks); 3596 3597 pthread_mutex_unlock(&lib->outgoing.mutex); 3598 3599 return hdr; 3600} 3601 3602 3603static int 3604nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 3605{ 3606 ssize_t res; 3607 nxt_port_msg_t msg; 3608 nxt_unit_impl_t *lib; 3609 3610 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3611 3612 msg.stream = 0; 3613 msg.pid = lib->pid; 3614 msg.reply_port = 0; 3615 msg.type = _NXT_PORT_MSG_OOSM; 3616 msg.last = 0; 3617 msg.mmap = 0; 3618 msg.nf = 0; 3619 msg.mf = 0; 3620 3621 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL); 3622 if (nxt_slow_path(res != sizeof(msg))) { 3623 return NXT_UNIT_ERROR; 3624 } 3625 3626 return NXT_UNIT_OK; 3627} 3628 3629 3630static int 3631nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) 3632{ 3633 int res; 3634 nxt_unit_ctx_impl_t *ctx_impl; 3635 nxt_unit_read_buf_t *rbuf; 3636 3637 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3638 3639 while (1) { 3640 rbuf = nxt_unit_read_buf_get(ctx); 3641 if (nxt_slow_path(rbuf == NULL)) { 3642 return NXT_UNIT_ERROR; 3643 } 3644 3645 do { 3646 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 3647 } while (res == NXT_UNIT_AGAIN); 3648 3649 if (res == NXT_UNIT_ERROR) { 3650 nxt_unit_read_buf_release(ctx, rbuf); 3651 3652 return NXT_UNIT_ERROR; 3653 } 3654 3655 if (nxt_unit_is_shm_ack(rbuf)) { 3656 nxt_unit_read_buf_release(ctx, rbuf); 3657 break; 3658 } 3659 3660 pthread_mutex_lock(&ctx_impl->mutex); 3661 3662 nxt_queue_insert_tail(&ctx_impl->pending_rbuf, &rbuf->link); 3663 3664 pthread_mutex_unlock(&ctx_impl->mutex); 3665 3666 if (nxt_unit_is_quit(rbuf)) { 3667 nxt_unit_debug(ctx, "oosm: quit received"); 3668 3669 return NXT_UNIT_ERROR; 3670 } 3671 } 3672 3673 return NXT_UNIT_OK; 3674} 3675 3676 3677static nxt_unit_mmap_t * 3678nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) 3679{ 3680 uint32_t cap, n; 3681 nxt_unit_mmap_t *e; 3682 3683 if (nxt_fast_path(mmaps->size > i)) { 3684 return mmaps->elts + i; 3685 } 3686 3687 cap = mmaps->cap; 3688 3689 if (cap == 0) { 3690 cap = i + 1; 3691 } 3692 3693 while (i + 1 > cap) { 3694 3695 if (cap < 16) { 3696 cap = cap * 2; 3697 3698 } else { 3699 cap = cap + cap / 2; 3700 } 3701 } 3702 3703 if (cap != mmaps->cap) { 3704 3705 e = realloc(mmaps->elts, cap * sizeof(nxt_unit_mmap_t)); 3706 if (nxt_slow_path(e == NULL)) { 3707 return NULL; 3708 } 3709 3710 mmaps->elts = e; 3711 3712 for (n = mmaps->cap; n < cap; n++) { 3713 e = mmaps->elts + n; 3714 3715 e->hdr = NULL; 3716 nxt_queue_init(&e->awaiting_rbuf); 3717 } 3718 3719 mmaps->cap = cap; 3720 } 3721 3722 if (i + 1 > mmaps->size) { 3723 mmaps->size = i + 1; 3724 } 3725 3726 return mmaps->elts + i; 3727} 3728 3729 3730static nxt_port_mmap_header_t * 3731nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) 3732{ 3733 int i, fd, rc; 3734 void *mem; 3735 nxt_unit_mmap_t *mm; 3736 nxt_unit_impl_t *lib; 3737 nxt_port_mmap_header_t *hdr; 3738 3739 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3740 3741 mm = nxt_unit_mmap_at(&lib->outgoing, lib->outgoing.size); 3742 if (nxt_slow_path(mm == NULL)) { 3743 nxt_unit_alert(ctx, "failed to add mmap to outgoing array"); 3744 3745 return NULL; 3746 } 3747 3748 fd = nxt_unit_shm_open(ctx, PORT_MMAP_SIZE); 3749 if (nxt_slow_path(fd == -1)) { 3750 goto remove_fail; 3751 } 3752 3753 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 3754 if (nxt_slow_path(mem == MAP_FAILED)) { 3755 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, 3756 strerror(errno), errno); 3757 3758 nxt_unit_close(fd); 3759 3760 goto remove_fail; 3761 } 3762 3763 mm->hdr = mem; 3764 hdr = mem; 3765 3766 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); 3767 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); 3768 3769 hdr->id = lib->outgoing.size - 1; 3770 hdr->src_pid = lib->pid; 3771 hdr->dst_pid = port->id.pid; 3772 hdr->sent_over = port->id.id; 3773 mm->src_thread = pthread_self(); 3774 3775 /* Mark first n chunk(s) as busy */ 3776 for (i = 0; i < n; i++) { 3777 nxt_port_mmap_set_chunk_busy(hdr->free_map, i); 3778 } 3779 3780 /* Mark as busy chunk followed the last available chunk. */ 3781 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); 3782 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); 3783 3784 pthread_mutex_unlock(&lib->outgoing.mutex); 3785 3786 rc = nxt_unit_send_mmap(ctx, port, fd); 3787 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3788 munmap(mem, PORT_MMAP_SIZE); 3789 hdr = NULL; 3790 3791 } else { 3792 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d", 3793 hdr->id, (int) lib->pid, (int) port->id.pid); 3794 } 3795 3796 nxt_unit_close(fd); 3797 3798 pthread_mutex_lock(&lib->outgoing.mutex); 3799 3800 if (nxt_fast_path(hdr != NULL)) { 3801 return hdr; 3802 } 3803 3804remove_fail: 3805 3806 lib->outgoing.size--; 3807 3808 return NULL; 3809} 3810 3811 3812static int 3813nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size) 3814{ 3815 int fd; 3816 nxt_unit_impl_t *lib; 3817 3818 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3819 3820#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN) 3821 char name[64]; 3822 3823 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", 3824 lib->pid, (void *) (uintptr_t) pthread_self()); 3825#endif 3826 3827#if (NXT_HAVE_MEMFD_CREATE) 3828 3829 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); 3830 if (nxt_slow_path(fd == -1)) { 3831 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, 3832 strerror(errno), errno); 3833 3834 return -1; 3835 } 3836 3837 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); 3838 3839#elif (NXT_HAVE_SHM_OPEN_ANON) 3840 3841 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); 3842 if (nxt_slow_path(fd == -1)) { 3843 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", 3844 strerror(errno), errno); 3845 3846 return -1; 3847 } 3848 3849#elif (NXT_HAVE_SHM_OPEN) 3850 3851 /* Just in case. */ 3852 shm_unlink(name); 3853 3854 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); 3855 if (nxt_slow_path(fd == -1)) { 3856 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, 3857 strerror(errno), errno); 3858 3859 return -1; 3860 } 3861 3862 if (nxt_slow_path(shm_unlink(name) == -1)) { 3863 nxt_unit_alert(ctx, "shm_unlink(%s) failed: %s (%d)", name, 3864 strerror(errno), errno); 3865 } 3866 3867#else 3868 3869#error No working shared memory implementation. 3870 3871#endif 3872 3873 if (nxt_slow_path(ftruncate(fd, size) == -1)) { 3874 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, 3875 strerror(errno), errno); 3876 3877 nxt_unit_close(fd); 3878 3879 return -1; 3880 } 3881 3882 return fd; 3883} 3884 3885 3886static int 3887nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd) 3888{ 3889 ssize_t res; 3890 nxt_send_oob_t oob; 3891 nxt_port_msg_t msg; 3892 nxt_unit_impl_t *lib; 3893 int fds[2] = {fd, -1}; 3894 3895 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3896 3897 msg.stream = 0; 3898 msg.pid = lib->pid; 3899 msg.reply_port = 0; 3900 msg.type = _NXT_PORT_MSG_MMAP; 3901 msg.last = 0; 3902 msg.mmap = 0; 3903 msg.nf = 0; 3904 msg.mf = 0; 3905 3906 nxt_socket_msg_oob_init(&oob, fds); 3907 3908 res = nxt_unit_port_send(ctx, port, &msg, sizeof(msg), &oob); 3909 if (nxt_slow_path(res != sizeof(msg))) { 3910 return NXT_UNIT_ERROR; 3911 } 3912 3913 return NXT_UNIT_OK; 3914} 3915 3916 3917static int 3918nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 3919 uint32_t size, uint32_t min_size, 3920 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf) 3921{ 3922 int nchunks, min_nchunks; 3923 nxt_chunk_id_t c; 3924 nxt_port_mmap_header_t *hdr; 3925 3926 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) { 3927 if (local_buf != NULL) { 3928 mmap_buf->free_ptr = NULL; 3929 mmap_buf->plain_ptr = local_buf; 3930 3931 } else { 3932 mmap_buf->free_ptr = nxt_unit_malloc(ctx, 3933 size + sizeof(nxt_port_msg_t)); 3934 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3935 return NXT_UNIT_ERROR; 3936 } 3937 3938 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3939 } 3940 3941 mmap_buf->hdr = NULL; 3942 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); 3943 mmap_buf->buf.free = mmap_buf->buf.start; 3944 mmap_buf->buf.end = mmap_buf->buf.start + size; 3945 3946 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", 3947 mmap_buf->buf.start, (int) size); 3948 3949 return NXT_UNIT_OK; 3950 } 3951 3952 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3953 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3954 3955 hdr = nxt_unit_mmap_get(ctx, port, &c, &nchunks, min_nchunks); 3956 if (nxt_slow_path(hdr == NULL)) { 3957 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) { 3958 mmap_buf->hdr = NULL; 3959 mmap_buf->buf.start = NULL; 3960 mmap_buf->buf.free = NULL; 3961 mmap_buf->buf.end = NULL; 3962 mmap_buf->free_ptr = NULL; 3963 3964 return NXT_UNIT_OK; 3965 } 3966 3967 return NXT_UNIT_ERROR; 3968 } 3969 3970 mmap_buf->hdr = hdr; 3971 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); 3972 mmap_buf->buf.free = mmap_buf->buf.start; 3973 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; 3974 mmap_buf->free_ptr = NULL; 3975 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3976 3977 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)", 3978 (int) hdr->id, (int) c, 3979 (int) (nchunks * PORT_MMAP_CHUNK_SIZE)); 3980 3981 return NXT_UNIT_OK; 3982} 3983 3984 3985static int 3986nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) 3987{ 3988 int rc; 3989 void *mem; 3990 nxt_queue_t awaiting_rbuf; 3991 struct stat mmap_stat; 3992 nxt_unit_mmap_t *mm; 3993 nxt_unit_impl_t *lib; 3994 nxt_unit_ctx_impl_t *ctx_impl; 3995 nxt_unit_read_buf_t *rbuf; 3996 nxt_port_mmap_header_t *hdr; 3997 3998 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3999 4000 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid); 4001 4002 if (fstat(fd, &mmap_stat) == -1) { 4003 nxt_unit_alert(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, 4004 strerror(errno), errno); 4005 4006 return NXT_UNIT_ERROR; 4007 } 4008 4009 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE, 4010 MAP_SHARED, fd, 0); 4011 if (nxt_slow_path(mem == MAP_FAILED)) { 4012 nxt_unit_alert(ctx, "incoming_mmap: mmap() failed: %s (%d)", 4013 strerror(errno), errno); 4014 4015 return NXT_UNIT_ERROR; 4016 } 4017 4018 hdr = mem; 4019 4020 if (nxt_slow_path(hdr->src_pid != pid)) { 4021 4022 nxt_unit_alert(ctx, "incoming_mmap: unexpected pid in mmap header " 4023 "detected: %d != %d or %d != %d", (int) hdr->src_pid, 4024 (int) pid, (int) hdr->dst_pid, (int) lib->pid); 4025 4026 munmap(mem, PORT_MMAP_SIZE); 4027 4028 return NXT_UNIT_ERROR; 4029 } 4030 4031 nxt_queue_init(&awaiting_rbuf); 4032 4033 pthread_mutex_lock(&lib->incoming.mutex); 4034 4035 mm = nxt_unit_mmap_at(&lib->incoming, hdr->id); 4036 if (nxt_slow_path(mm == NULL)) { 4037 nxt_unit_alert(ctx, "incoming_mmap: failed to add to incoming array"); 4038 4039 munmap(mem, PORT_MMAP_SIZE); 4040 4041 rc = NXT_UNIT_ERROR; 4042 4043 } else { 4044 mm->hdr = hdr; 4045 4046 hdr->sent_over = 0xFFFFu; 4047 4048 nxt_queue_add(&awaiting_rbuf, &mm->awaiting_rbuf); 4049 nxt_queue_init(&mm->awaiting_rbuf); 4050 4051 rc = NXT_UNIT_OK; 4052 } 4053 4054 pthread_mutex_unlock(&lib->incoming.mutex); 4055 4056 nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) { 4057 4058 ctx_impl = rbuf->ctx_impl; 4059 4060 pthread_mutex_lock(&ctx_impl->mutex); 4061 4062 nxt_queue_insert_head(&ctx_impl->pending_rbuf, &rbuf->link); 4063 4064 pthread_mutex_unlock(&ctx_impl->mutex); 4065 4066 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); 4067 4068 nxt_unit_awake_ctx(ctx, ctx_impl); 4069 4070 } nxt_queue_loop; 4071 4072 return rc; 4073} 4074 4075 4076static void 4077nxt_unit_awake_ctx(nxt_unit_ctx_t *ctx, nxt_unit_ctx_impl_t *ctx_impl) 4078{ 4079 nxt_port_msg_t msg; 4080 4081 if (nxt_fast_path(ctx == &ctx_impl->ctx)) { 4082 return; 4083 } 4084 4085 if (nxt_slow_path(ctx_impl->read_port == NULL 4086 || ctx_impl->read_port->out_fd == -1)) 4087 { 4088 nxt_unit_alert(ctx, "target context read_port is NULL or not writable"); 4089 4090 return; 4091 } 4092 4093 memset(&msg, 0, sizeof(nxt_port_msg_t)); 4094 4095 msg.type = _NXT_PORT_MSG_RPC_READY; 4096 4097 (void) nxt_unit_port_send(ctx, ctx_impl->read_port, 4098 &msg, sizeof(msg), NULL); 4099} 4100 4101 4102static void 4103nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps) 4104{ 4105 pthread_mutex_init(&mmaps->mutex, NULL); 4106 4107 mmaps->size = 0; 4108 mmaps->cap = 0; 4109 mmaps->elts = NULL; 4110 mmaps->allocated_chunks = 0; 4111} 4112 4113 4114nxt_inline void 4115nxt_unit_process_use(nxt_unit_process_t *process) 4116{ 4117 nxt_atomic_fetch_add(&process->use_count, 1); 4118} 4119 4120 4121nxt_inline void 4122nxt_unit_process_release(nxt_unit_process_t *process) 4123{ 4124 long c; 4125 4126 c = nxt_atomic_fetch_add(&process->use_count, -1); 4127 4128 if (c == 1) { 4129 nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid); 4130 4131 nxt_unit_free(NULL, process); 4132 } 4133} 4134 4135 4136static void 4137nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) 4138{ 4139 nxt_unit_mmap_t *mm, *end; 4140 4141 if (mmaps->elts != NULL) { 4142 end = mmaps->elts + mmaps->size; 4143 4144 for (mm = mmaps->elts; mm < end; mm++) { 4145 munmap(mm->hdr, PORT_MMAP_SIZE); 4146 } 4147 4148 nxt_unit_free(NULL, mmaps->elts); 4149 } 4150 4151 pthread_mutex_destroy(&mmaps->mutex); 4152} 4153 4154 4155static int 4156nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, 4157 pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, 4158 nxt_unit_read_buf_t *rbuf) 4159{ 4160 int res, need_rbuf; 4161 nxt_unit_mmap_t *mm; 4162 nxt_unit_ctx_impl_t *ctx_impl; 4163 4164 mm = nxt_unit_mmap_at(mmaps, id); 4165 if (nxt_slow_path(mm == NULL)) { 4166 nxt_unit_alert(ctx, "failed to allocate mmap"); 4167 4168 pthread_mutex_unlock(&mmaps->mutex); 4169 4170 *hdr = NULL; 4171 4172 return NXT_UNIT_ERROR; 4173 } 4174 4175 *hdr = mm->hdr; 4176 4177 if (nxt_fast_path(*hdr != NULL)) { 4178 return NXT_UNIT_OK; 4179 } 4180 4181 need_rbuf = nxt_queue_is_empty(&mm->awaiting_rbuf); 4182 4183 nxt_queue_insert_tail(&mm->awaiting_rbuf, &rbuf->link); 4184 4185 pthread_mutex_unlock(&mmaps->mutex); 4186 4187 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4188 4189 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); 4190 4191 if (need_rbuf) { 4192 res = nxt_unit_get_mmap(ctx, pid, id); 4193 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 4194 return NXT_UNIT_ERROR; 4195 } 4196 } 4197 4198 return NXT_UNIT_AGAIN; 4199} 4200 4201 4202static int 4203nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, 4204 nxt_unit_read_buf_t *rbuf) 4205{ 4206 int res; 4207 void *start; 4208 uint32_t size; 4209 nxt_unit_impl_t *lib; 4210 nxt_unit_mmaps_t *mmaps; 4211 nxt_unit_mmap_buf_t *b, **incoming_tail; 4212 nxt_port_mmap_msg_t *mmap_msg, *end; 4213 nxt_port_mmap_header_t *hdr; 4214 4215 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) { 4216 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)", 4217 recv_msg->stream, (int) recv_msg->size); 4218 4219 return NXT_UNIT_ERROR; 4220 } 4221 4222 mmap_msg = recv_msg->start; 4223 end = nxt_pointer_to(recv_msg->start, recv_msg->size); 4224 4225 incoming_tail = &recv_msg->incoming_buf; 4226 4227 /* Allocating buffer structures. */ 4228 for (; mmap_msg < end; mmap_msg++) { 4229 b = nxt_unit_mmap_buf_get(ctx); 4230 if (nxt_slow_path(b == NULL)) { 4231 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf", 4232 recv_msg->stream); 4233 4234 while (recv_msg->incoming_buf != NULL) { 4235 nxt_unit_mmap_buf_release(recv_msg->incoming_buf); 4236 } 4237 4238 return NXT_UNIT_ERROR; 4239 } 4240 4241 nxt_unit_mmap_buf_insert(incoming_tail, b); 4242 incoming_tail = &b->next; 4243 } 4244 4245 b = recv_msg->incoming_buf; 4246 mmap_msg = recv_msg->start; 4247 4248 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4249 4250 mmaps = &lib->incoming; 4251 4252 pthread_mutex_lock(&mmaps->mutex); 4253 4254 for (; mmap_msg < end; mmap_msg++) { 4255 res = nxt_unit_check_rbuf_mmap(ctx, mmaps, 4256 recv_msg->pid, mmap_msg->mmap_id, 4257 &hdr, rbuf); 4258 4259 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4260 while (recv_msg->incoming_buf != NULL) { 4261 nxt_unit_mmap_buf_release(recv_msg->incoming_buf); 4262 } 4263 4264 return res; 4265 } 4266 4267 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id); 4268 size = mmap_msg->size; 4269 4270 if (recv_msg->start == mmap_msg) { 4271 recv_msg->start = start; 4272 recv_msg->size = size; 4273 } 4274 4275 b->buf.start = start; 4276 b->buf.free = start; 4277 b->buf.end = b->buf.start + size; 4278 b->hdr = hdr; 4279 4280 b = b->next; 4281 4282 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)", 4283 recv_msg->stream, 4284 start, (int) size, 4285 (int) hdr->src_pid, (int) hdr->dst_pid, 4286 (int) hdr->id, (int) mmap_msg->chunk_id, 4287 (int) mmap_msg->size); 4288 } 4289 4290 pthread_mutex_unlock(&mmaps->mutex); 4291 4292 return NXT_UNIT_OK; 4293} 4294 4295 4296static int 4297nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id) 4298{ 4299 ssize_t res; 4300 nxt_unit_impl_t *lib; 4301 nxt_unit_ctx_impl_t *ctx_impl; 4302 4303 struct { 4304 nxt_port_msg_t msg; 4305 nxt_port_msg_get_mmap_t get_mmap; 4306 } m; 4307 4308 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4309 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4310 4311 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 4312 4313 m.msg.pid = lib->pid; 4314 m.msg.reply_port = ctx_impl->read_port->id.id; 4315 m.msg.type = _NXT_PORT_MSG_GET_MMAP; 4316 4317 m.get_mmap.id = id; 4318 4319 nxt_unit_debug(ctx, "get_mmap: %d %d", (int) pid, (int) id); 4320 4321 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL); 4322 if (nxt_slow_path(res != sizeof(m))) { 4323 return NXT_UNIT_ERROR; 4324 } 4325 4326 return NXT_UNIT_OK; 4327} 4328 4329 4330static void 4331nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr, 4332 void *start, uint32_t size) 4333{ 4334 int freed_chunks; 4335 u_char *p, *end; 4336 nxt_chunk_id_t c; 4337 nxt_unit_impl_t *lib; 4338 4339 memset(start, 0xA5, size); 4340 4341 p = start; 4342 end = p + size; 4343 c = nxt_port_mmap_chunk_id(hdr, p); 4344 freed_chunks = 0; 4345 4346 while (p < end) { 4347 nxt_port_mmap_set_chunk_free(hdr->free_map, c); 4348 4349 p += PORT_MMAP_CHUNK_SIZE; 4350 c++; 4351 freed_chunks++; 4352 } 4353 4354 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4355 4356 if (hdr->src_pid == lib->pid && freed_chunks != 0) { 4357 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, -freed_chunks); 4358 4359 nxt_unit_debug(ctx, "allocated_chunks %d", 4360 (int) lib->outgoing.allocated_chunks); 4361 } 4362 4363 if (hdr->dst_pid == lib->pid 4364 && freed_chunks != 0 4365 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0)) 4366 { 4367 nxt_unit_send_shm_ack(ctx, hdr->src_pid); 4368 } 4369} 4370 4371 4372static int 4373nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) 4374{ 4375 ssize_t res; 4376 nxt_port_msg_t msg; 4377 nxt_unit_impl_t *lib; 4378 4379 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4380 4381 msg.stream = 0; 4382 msg.pid = lib->pid; 4383 msg.reply_port = 0; 4384 msg.type = _NXT_PORT_MSG_SHM_ACK; 4385 msg.last = 0; 4386 msg.mmap = 0; 4387 msg.nf = 0; 4388 msg.mf = 0; 4389 4390 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL); 4391 if (nxt_slow_path(res != sizeof(msg))) { 4392 return NXT_UNIT_ERROR; 4393 } 4394 4395 return NXT_UNIT_OK; 4396} 4397 4398 4399static nxt_int_t 4400nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data) 4401{ 4402 nxt_process_t *process; 4403 4404 process = data; 4405 4406 if (lhq->key.length == sizeof(pid_t) 4407 && *(pid_t *) lhq->key.start == process->pid) 4408 { 4409 return NXT_OK; 4410 } 4411 4412 return NXT_DECLINED; 4413} 4414 4415 4416static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = { 4417 NXT_LVLHSH_DEFAULT, 4418 nxt_unit_lvlhsh_pid_test, 4419 nxt_unit_lvlhsh_alloc, 4420 nxt_unit_lvlhsh_free, 4421}; 4422 4423 4424static inline void 4425nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid) 4426{ 4427 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid)); 4428 lhq->key.length = sizeof(*pid); 4429 lhq->key.start = (u_char *) pid; 4430 lhq->proto = &lvlhsh_processes_proto; 4431} 4432 4433 4434static nxt_unit_process_t * 4435nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) 4436{ 4437 nxt_unit_impl_t *lib; 4438 nxt_unit_process_t *process; 4439 nxt_lvlhsh_query_t lhq; 4440 4441 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4442 4443 nxt_unit_process_lhq_pid(&lhq, &pid); 4444 4445 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) { 4446 process = lhq.value; 4447 nxt_unit_process_use(process); 4448 4449 return process; 4450 } 4451 4452 process = nxt_unit_malloc(ctx, sizeof(nxt_unit_process_t)); 4453 if (nxt_slow_path(process == NULL)) { 4454 nxt_unit_alert(ctx, "failed to allocate process for #%d", (int) pid); 4455 4456 return NULL; 4457 } 4458 4459 process->pid = pid; 4460 process->use_count = 2; 4461 process->next_port_id = 0; 4462 process->lib = lib; 4463 4464 nxt_queue_init(&process->ports); 4465 4466 lhq.replace = 0; 4467 lhq.value = process; 4468 4469 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) { 4470 4471 case NXT_OK: 4472 break; 4473 4474 default: 4475 nxt_unit_alert(ctx, "process %d insert failed", (int) pid); 4476 4477 nxt_unit_free(ctx, process); 4478 process = NULL; 4479 break; 4480 } 4481 4482 return process; 4483} 4484 4485 4486static nxt_unit_process_t * 4487nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove) 4488{ 4489 int rc; 4490 nxt_lvlhsh_query_t lhq; 4491 4492 nxt_unit_process_lhq_pid(&lhq, &pid); 4493 4494 if (remove) { 4495 rc = nxt_lvlhsh_delete(&lib->processes, &lhq); 4496 4497 } else { 4498 rc = nxt_lvlhsh_find(&lib->processes, &lhq); 4499 } 4500 4501 if (rc == NXT_OK) { 4502 if (!remove) { 4503 nxt_unit_process_use(lhq.value); 4504 } 4505 4506 return lhq.value; 4507 } 4508 4509 return NULL; 4510} 4511 4512 4513static nxt_unit_process_t * 4514nxt_unit_process_pop_first(nxt_unit_impl_t *lib) 4515{ 4516 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL); 4517} 4518 4519 4520int 4521nxt_unit_run(nxt_unit_ctx_t *ctx) 4522{ 4523 int rc; 4524 nxt_unit_ctx_impl_t *ctx_impl; 4525 4526 nxt_unit_ctx_use(ctx); 4527 4528 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4529 4530 rc = NXT_UNIT_OK; 4531 4532 while (nxt_fast_path(ctx_impl->online)) { 4533 rc = nxt_unit_run_once_impl(ctx); 4534 4535 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4536 nxt_unit_quit(ctx, NXT_QUIT_NORMAL); 4537 break; 4538 } 4539 } 4540 4541 nxt_unit_ctx_release(ctx); 4542 4543 return rc; 4544} 4545 4546 4547int 4548nxt_unit_run_once(nxt_unit_ctx_t *ctx) 4549{ 4550 int rc; 4551 4552 nxt_unit_ctx_use(ctx); 4553 4554 rc = nxt_unit_run_once_impl(ctx); 4555 4556 nxt_unit_ctx_release(ctx); 4557 4558 return rc; 4559} 4560 4561 4562static int 4563nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx) 4564{ 4565 int rc; 4566 nxt_unit_read_buf_t *rbuf; 4567 4568 rbuf = nxt_unit_read_buf_get(ctx); 4569 if (nxt_slow_path(rbuf == NULL)) { 4570 return NXT_UNIT_ERROR; 4571 } 4572 4573 rc = nxt_unit_read_buf(ctx, rbuf); 4574 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4575 nxt_unit_read_buf_release(ctx, rbuf); 4576 4577 return rc; 4578 } 4579 4580 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4581 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4582 return NXT_UNIT_ERROR; 4583 } 4584 4585 rc = nxt_unit_process_pending_rbuf(ctx); 4586 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4587 return NXT_UNIT_ERROR; 4588 } 4589 4590 nxt_unit_process_ready_req(ctx); 4591 4592 return rc; 4593} 4594 4595 4596static int 4597nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) 4598{ 4599 int nevents, res, err; 4600 nxt_uint_t nfds; 4601 nxt_unit_impl_t *lib; 4602 nxt_unit_ctx_impl_t *ctx_impl; 4603 nxt_unit_port_impl_t *port_impl; 4604 struct pollfd fds[2]; 4605 4606 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4607 4608 if (ctx_impl->wait_items > 0 || !nxt_unit_chk_ready(ctx)) { 4609 return nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4610 } 4611 4612 port_impl = nxt_container_of(ctx_impl->read_port, nxt_unit_port_impl_t, 4613 port); 4614 4615 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4616 4617retry: 4618 4619 if (port_impl->from_socket == 0) { 4620 res = nxt_unit_port_queue_recv(ctx_impl->read_port, rbuf); 4621 if (res == NXT_UNIT_OK) { 4622 if (nxt_unit_is_read_socket(rbuf)) { 4623 port_impl->from_socket++; 4624 4625 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", 4626 (int) ctx_impl->read_port->id.pid, 4627 (int) ctx_impl->read_port->id.id, 4628 port_impl->from_socket); 4629 4630 } else { 4631 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", 4632 (int) ctx_impl->read_port->id.pid, 4633 (int) ctx_impl->read_port->id.id, 4634 (int) rbuf->size); 4635 4636 return NXT_UNIT_OK; 4637 } 4638 } 4639 } 4640 4641 if (nxt_fast_path(nxt_unit_chk_ready(ctx))) { 4642 res = nxt_unit_app_queue_recv(ctx, lib->shared_port, rbuf); 4643 if (res == NXT_UNIT_OK) { 4644 return NXT_UNIT_OK; 4645 } 4646 4647 fds[1].fd = lib->shared_port->in_fd; 4648 fds[1].events = POLLIN; 4649 4650 nfds = 2; 4651 4652 } else { 4653 nfds = 1; 4654 } 4655 4656 fds[0].fd = ctx_impl->read_port->in_fd; 4657 fds[0].events = POLLIN; 4658 fds[0].revents = 0; 4659 4660 fds[1].revents = 0; 4661 4662 nevents = poll(fds, nfds, -1); 4663 if (nxt_slow_path(nevents == -1)) { 4664 err = errno; 4665 4666 if (err == EINTR) { 4667 goto retry; 4668 } 4669 4670 nxt_unit_alert(ctx, "poll(%d,%d) failed: %s (%d)", 4671 fds[0].fd, fds[1].fd, strerror(err), err); 4672 4673 rbuf->size = -1; 4674 4675 return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR; 4676 } 4677 4678 nxt_unit_debug(ctx, "poll(%d,%d): %d, revents [%04X, %04X]", 4679 fds[0].fd, fds[1].fd, nevents, fds[0].revents, 4680 fds[1].revents); 4681 4682 if ((fds[0].revents & POLLIN) != 0) { 4683 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4684 if (res == NXT_UNIT_AGAIN) { 4685 goto retry; 4686 } 4687 4688 return res; 4689 } 4690 4691 if ((fds[1].revents & POLLIN) != 0) { 4692 res = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); 4693 if (res == NXT_UNIT_AGAIN) { 4694 goto retry; 4695 } 4696 4697 return res; 4698 } 4699 4700 nxt_unit_alert(ctx, "poll(%d,%d): %d unexpected revents [%04uXi, %04uXi]", 4701 fds[0].fd, fds[1].fd, nevents, fds[0].revents, 4702 fds[1].revents); 4703 4704 return NXT_UNIT_ERROR; 4705} 4706 4707 4708static int 4709nxt_unit_chk_ready(nxt_unit_ctx_t *ctx) 4710{ 4711 nxt_unit_impl_t *lib; 4712 nxt_unit_ctx_impl_t *ctx_impl; 4713 4714 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4715 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4716 4717 return (ctx_impl->ready 4718 && (lib->request_limit == 0 4719 || lib->request_count < lib->request_limit)); 4720} 4721 4722 4723static int 4724nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx) 4725{ 4726 int rc; 4727 nxt_queue_t pending_rbuf; 4728 nxt_unit_ctx_impl_t *ctx_impl; 4729 nxt_unit_read_buf_t *rbuf; 4730 4731 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4732 4733 pthread_mutex_lock(&ctx_impl->mutex); 4734 4735 if (nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { 4736 pthread_mutex_unlock(&ctx_impl->mutex); 4737 4738 return NXT_UNIT_OK; 4739 } 4740 4741 nxt_queue_init(&pending_rbuf); 4742 4743 nxt_queue_add(&pending_rbuf, &ctx_impl->pending_rbuf); 4744 nxt_queue_init(&ctx_impl->pending_rbuf); 4745 4746 pthread_mutex_unlock(&ctx_impl->mutex); 4747 4748 rc = NXT_UNIT_OK; 4749 4750 nxt_queue_each(rbuf, &pending_rbuf, nxt_unit_read_buf_t, link) { 4751 4752 if (nxt_fast_path(rc != NXT_UNIT_ERROR)) { 4753 rc = nxt_unit_process_msg(&ctx_impl->ctx, rbuf, NULL); 4754 4755 } else { 4756 nxt_unit_read_buf_release(ctx, rbuf); 4757 } 4758 4759 } nxt_queue_loop; 4760 4761 if (!ctx_impl->ready) { 4762 nxt_unit_quit(ctx, NXT_QUIT_GRACEFUL); 4763 } 4764 4765 return rc; 4766} 4767 4768 4769static void 4770nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx) 4771{ 4772 int res; 4773 nxt_queue_t ready_req; 4774 nxt_unit_impl_t *lib; 4775 nxt_unit_ctx_impl_t *ctx_impl; 4776 nxt_unit_request_info_t *req; 4777 nxt_unit_request_info_impl_t *req_impl; 4778 4779 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4780 4781 pthread_mutex_lock(&ctx_impl->mutex); 4782 4783 if (nxt_queue_is_empty(&ctx_impl->ready_req)) { 4784 pthread_mutex_unlock(&ctx_impl->mutex); 4785 4786 return; 4787 } 4788 4789 nxt_queue_init(&ready_req); 4790 4791 nxt_queue_add(&ready_req, &ctx_impl->ready_req); 4792 nxt_queue_init(&ctx_impl->ready_req); 4793 4794 pthread_mutex_unlock(&ctx_impl->mutex); 4795 4796 nxt_queue_each(req_impl, &ready_req, 4797 nxt_unit_request_info_impl_t, port_wait_link) 4798 { 4799 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); 4800 4801 req = &req_impl->req; 4802 4803 res = nxt_unit_send_req_headers_ack(req); 4804 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4805 nxt_unit_request_done(req, NXT_UNIT_ERROR); 4806 4807 continue; 4808 } 4809 4810 if (req->content_length 4811 > (uint64_t) (req->content_buf->end - req->content_buf->free)) 4812 { 4813 res = nxt_unit_request_hash_add(ctx, req); 4814 if (nxt_slow_path(res != NXT_UNIT_OK)) { 4815 nxt_unit_req_warn(req, "failed to add request to hash"); 4816 4817 nxt_unit_request_done(req, NXT_UNIT_ERROR); 4818 4819 continue; 4820 } 4821 4822 /* 4823 * If application have separate data handler, we may start 4824 * request processing and process data when it is arrived. 4825 */ 4826 if (lib->callbacks.data_handler == NULL) { 4827 continue; 4828 } 4829 } 4830 4831 lib->callbacks.request_handler(&req_impl->req); 4832 4833 } nxt_queue_loop; 4834} 4835 4836 4837int 4838nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) 4839{ 4840 int rc; 4841 nxt_unit_read_buf_t *rbuf; 4842 nxt_unit_ctx_impl_t *ctx_impl; 4843 4844 nxt_unit_ctx_use(ctx); 4845 4846 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4847 4848 rc = NXT_UNIT_OK; 4849 4850 while (nxt_fast_path(ctx_impl->online)) { 4851 rbuf = nxt_unit_read_buf_get(ctx); 4852 if (nxt_slow_path(rbuf == NULL)) { 4853 rc = NXT_UNIT_ERROR; 4854 break; 4855 } 4856 4857 retry: 4858 4859 rc = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); 4860 if (rc == NXT_UNIT_AGAIN) { 4861 goto retry; 4862 } 4863 4864 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4865 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4866 break; 4867 } 4868 4869 rc = nxt_unit_process_pending_rbuf(ctx); 4870 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4871 break; 4872 } 4873 4874 nxt_unit_process_ready_req(ctx); 4875 } 4876 4877 nxt_unit_ctx_release(ctx); 4878 4879 return rc; 4880} 4881 4882 4883nxt_inline int 4884nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf) 4885{ 4886 nxt_port_msg_t *port_msg; 4887 4888 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4889 port_msg = (nxt_port_msg_t *) rbuf->buf; 4890 4891 return port_msg->type == _NXT_PORT_MSG_READ_QUEUE; 4892 } 4893 4894 return 0; 4895} 4896 4897 4898nxt_inline int 4899nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf) 4900{ 4901 if (nxt_fast_path(rbuf->size == 1)) { 4902 return rbuf->buf[0] == _NXT_PORT_MSG_READ_SOCKET; 4903 } 4904 4905 return 0; 4906} 4907 4908 4909nxt_inline int 4910nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf) 4911{ 4912 nxt_port_msg_t *port_msg; 4913 4914 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4915 port_msg = (nxt_port_msg_t *) rbuf->buf; 4916 4917 return port_msg->type == _NXT_PORT_MSG_SHM_ACK; 4918 } 4919 4920 return 0; 4921} 4922 4923 4924nxt_inline int 4925nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf) 4926{ 4927 nxt_port_msg_t *port_msg; 4928 4929 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { 4930 port_msg = (nxt_port_msg_t *) rbuf->buf; 4931 4932 return port_msg->type == _NXT_PORT_MSG_QUIT; 4933 } 4934 4935 return 0; 4936} 4937 4938 4939int 4940nxt_unit_run_shared(nxt_unit_ctx_t *ctx) 4941{ 4942 int rc; 4943 nxt_unit_impl_t *lib; 4944 nxt_unit_read_buf_t *rbuf; 4945 4946 nxt_unit_ctx_use(ctx); 4947 4948 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4949 4950 rc = NXT_UNIT_OK; 4951 4952 while (nxt_fast_path(nxt_unit_chk_ready(ctx))) { 4953 rbuf = nxt_unit_read_buf_get(ctx); 4954 if (nxt_slow_path(rbuf == NULL)) { 4955 rc = NXT_UNIT_ERROR; 4956 break; 4957 } 4958 4959 retry: 4960 4961 rc = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); 4962 if (rc == NXT_UNIT_AGAIN) { 4963 goto retry; 4964 } 4965 4966 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4967 nxt_unit_read_buf_release(ctx, rbuf); 4968 break; 4969 } 4970 4971 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 4972 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 4973 break; 4974 } 4975 } 4976 4977 nxt_unit_ctx_release(ctx); 4978 4979 return rc; 4980} 4981 4982 4983nxt_unit_request_info_t * 4984nxt_unit_dequeue_request(nxt_unit_ctx_t *ctx) 4985{ 4986 int rc; 4987 nxt_unit_impl_t *lib; 4988 nxt_unit_read_buf_t *rbuf; 4989 nxt_unit_request_info_t *req; 4990 4991 nxt_unit_ctx_use(ctx); 4992 4993 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4994 4995 req = NULL; 4996 4997 if (nxt_slow_path(!nxt_unit_chk_ready(ctx))) { 4998 goto done; 4999 } 5000 5001 rbuf = nxt_unit_read_buf_get(ctx); 5002 if (nxt_slow_path(rbuf == NULL)) { 5003 goto done; 5004 } 5005 5006 rc = nxt_unit_app_queue_recv(ctx, lib->shared_port, rbuf); 5007 if (rc != NXT_UNIT_OK) { 5008 nxt_unit_read_buf_release(ctx, rbuf); 5009 goto done; 5010 } 5011 5012 (void) nxt_unit_process_msg(ctx, rbuf, &req); 5013 5014done: 5015 5016 nxt_unit_ctx_release(ctx); 5017 5018 return req; 5019} 5020 5021 5022int 5023nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 5024{ 5025 int rc; 5026 5027 nxt_unit_ctx_use(ctx); 5028 5029 rc = nxt_unit_process_port_msg_impl(ctx, port); 5030 5031 nxt_unit_ctx_release(ctx); 5032 5033 return rc; 5034} 5035 5036 5037static int 5038nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 5039{ 5040 int rc; 5041 nxt_unit_impl_t *lib; 5042 nxt_unit_read_buf_t *rbuf; 5043 5044 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5045 5046 if (port == lib->shared_port && !nxt_unit_chk_ready(ctx)) { 5047 return NXT_UNIT_AGAIN; 5048 } 5049 5050 rbuf = nxt_unit_read_buf_get(ctx); 5051 if (nxt_slow_path(rbuf == NULL)) { 5052 return NXT_UNIT_ERROR; 5053 } 5054 5055 if (port == lib->shared_port) { 5056 rc = nxt_unit_shared_port_recv(ctx, port, rbuf); 5057 5058 } else { 5059 rc = nxt_unit_ctx_port_recv(ctx, port, rbuf); 5060 } 5061 5062 if (rc != NXT_UNIT_OK) { 5063 nxt_unit_read_buf_release(ctx, rbuf); 5064 return rc; 5065 } 5066 5067 rc = nxt_unit_process_msg(ctx, rbuf, NULL); 5068 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 5069 return NXT_UNIT_ERROR; 5070 } 5071 5072 rc = nxt_unit_process_pending_rbuf(ctx); 5073 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { 5074 return NXT_UNIT_ERROR; 5075 } 5076 5077 nxt_unit_process_ready_req(ctx); 5078 5079 return rc; 5080} 5081 5082 5083void 5084nxt_unit_done(nxt_unit_ctx_t *ctx) 5085{ 5086 nxt_unit_ctx_release(ctx); 5087} 5088 5089 5090nxt_unit_ctx_t * 5091nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) 5092{ 5093 int rc, queue_fd; 5094 void *mem; 5095 nxt_unit_impl_t *lib; 5096 nxt_unit_port_t *port; 5097 nxt_unit_ctx_impl_t *new_ctx; 5098 nxt_unit_port_impl_t *port_impl; 5099 5100 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5101 5102 new_ctx = nxt_unit_malloc(ctx, sizeof(nxt_unit_ctx_impl_t) 5103 + lib->request_data_size); 5104 if (nxt_slow_path(new_ctx == NULL)) { 5105 nxt_unit_alert(ctx, "failed to allocate context"); 5106 5107 return NULL; 5108 } 5109 5110 rc = nxt_unit_ctx_init(lib, new_ctx, data); 5111 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5112 nxt_unit_free(ctx, new_ctx); 5113 5114 return NULL; 5115 } 5116 5117 queue_fd = -1; 5118 5119 port = nxt_unit_create_port(&new_ctx->ctx); 5120 if (nxt_slow_path(port == NULL)) { 5121 goto fail; 5122 } 5123 5124 new_ctx->read_port = port; 5125 5126 queue_fd = nxt_unit_shm_open(&new_ctx->ctx, sizeof(nxt_port_queue_t)); 5127 if (nxt_slow_path(queue_fd == -1)) { 5128 goto fail; 5129 } 5130 5131 mem = mmap(NULL, sizeof(nxt_port_queue_t), 5132 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); 5133 if (nxt_slow_path(mem == MAP_FAILED)) { 5134 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, 5135 strerror(errno), errno); 5136 5137 goto fail; 5138 } 5139 5140 nxt_port_queue_init(mem); 5141 5142 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5143 port_impl->queue = mem; 5144 5145 rc = nxt_unit_send_port(&new_ctx->ctx, lib->router_port, port, queue_fd); 5146 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5147 goto fail; 5148 } 5149 5150 nxt_unit_close(queue_fd); 5151 5152 return &new_ctx->ctx; 5153 5154fail: 5155 5156 if (queue_fd != -1) { 5157 nxt_unit_close(queue_fd); 5158 } 5159 5160 nxt_unit_ctx_release(&new_ctx->ctx); 5161 5162 return NULL; 5163} 5164 5165 5166static void 5167nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl) 5168{ 5169 nxt_unit_impl_t *lib; 5170 nxt_unit_mmap_buf_t *mmap_buf; 5171 nxt_unit_read_buf_t *rbuf; 5172 nxt_unit_request_info_impl_t *req_impl; 5173 nxt_unit_websocket_frame_impl_t *ws_impl; 5174 5175 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); 5176 5177 nxt_queue_each(req_impl, &ctx_impl->active_req, 5178 nxt_unit_request_info_impl_t, link) 5179 { 5180 nxt_unit_req_warn(&req_impl->req, "active request on ctx free"); 5181 5182 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR); 5183 5184 } nxt_queue_loop; 5185 5186 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]); 5187 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]); 5188 5189 while (ctx_impl->free_buf != NULL) { 5190 mmap_buf = ctx_impl->free_buf; 5191 nxt_unit_mmap_buf_unlink(mmap_buf); 5192 nxt_unit_free(&ctx_impl->ctx, mmap_buf); 5193 } 5194 5195 nxt_queue_each(req_impl, &ctx_impl->free_req, 5196 nxt_unit_request_info_impl_t, link) 5197 { 5198 nxt_unit_request_info_free(req_impl); 5199 5200 } nxt_queue_loop; 5201 5202 nxt_queue_each(ws_impl, &ctx_impl->free_ws, 5203 nxt_unit_websocket_frame_impl_t, link) 5204 { 5205 nxt_unit_websocket_frame_free(&ctx_impl->ctx, ws_impl); 5206 5207 } nxt_queue_loop; 5208 5209 nxt_queue_each(rbuf, &ctx_impl->free_rbuf, nxt_unit_read_buf_t, link) 5210 { 5211 if (rbuf != &ctx_impl->ctx_read_buf) { 5212 nxt_unit_free(&ctx_impl->ctx, rbuf); 5213 } 5214 } nxt_queue_loop; 5215 5216 pthread_mutex_destroy(&ctx_impl->mutex); 5217 5218 pthread_mutex_lock(&lib->mutex); 5219 5220 nxt_queue_remove(&ctx_impl->link); 5221 5222 pthread_mutex_unlock(&lib->mutex); 5223 5224 if (nxt_fast_path(ctx_impl->read_port != NULL)) { 5225 nxt_unit_remove_port(lib, NULL, &ctx_impl->read_port->id); 5226 nxt_unit_port_release(ctx_impl->read_port); 5227 } 5228 5229 if (ctx_impl != &lib->main_ctx) { 5230 nxt_unit_free(&lib->main_ctx.ctx, ctx_impl); 5231 } 5232 5233 nxt_unit_lib_release(lib); 5234} 5235 5236 5237/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */ 5238#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET) 5239#define NXT_UNIX_SOCKET SOCK_SEQPACKET 5240#else 5241#define NXT_UNIX_SOCKET SOCK_DGRAM 5242#endif 5243 5244 5245void 5246nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) 5247{ 5248 nxt_unit_port_hash_id_t port_hash_id; 5249 5250 port_hash_id.pid = pid; 5251 port_hash_id.id = id; 5252 5253 port_id->pid = pid; 5254 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id)); 5255 port_id->id = id; 5256} 5257 5258 5259static nxt_unit_port_t * 5260nxt_unit_create_port(nxt_unit_ctx_t *ctx) 5261{ 5262 int rc, port_sockets[2]; 5263 nxt_unit_impl_t *lib; 5264 nxt_unit_port_t new_port, *port; 5265 nxt_unit_process_t *process; 5266 5267 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5268 5269 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets); 5270 if (nxt_slow_path(rc != 0)) { 5271 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)", 5272 strerror(errno), errno); 5273 5274 return NULL; 5275 } 5276 5277#if (NXT_HAVE_SOCKOPT_SO_PASSCRED) 5278 int enable_creds = 1; 5279 5280 if (nxt_slow_path(setsockopt(port_sockets[0], SOL_SOCKET, SO_PASSCRED, 5281 &enable_creds, sizeof(enable_creds)) == -1)) 5282 { 5283 nxt_unit_warn(ctx, "failed to set SO_PASSCRED %s", strerror(errno)); 5284 return NULL; 5285 } 5286 5287 if (nxt_slow_path(setsockopt(port_sockets[1], SOL_SOCKET, SO_PASSCRED, 5288 &enable_creds, sizeof(enable_creds)) == -1)) 5289 { 5290 nxt_unit_warn(ctx, "failed to set SO_PASSCRED %s", strerror(errno)); 5291 return NULL; 5292 } 5293#endif 5294 5295 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d", 5296 port_sockets[0], port_sockets[1]); 5297 5298 pthread_mutex_lock(&lib->mutex); 5299 5300 process = nxt_unit_process_get(ctx, lib->pid); 5301 if (nxt_slow_path(process == NULL)) { 5302 pthread_mutex_unlock(&lib->mutex); 5303 5304 nxt_unit_close(port_sockets[0]); 5305 nxt_unit_close(port_sockets[1]); 5306 5307 return NULL; 5308 } 5309 5310 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++); 5311 5312 new_port.in_fd = port_sockets[0]; 5313 new_port.out_fd = port_sockets[1]; 5314 new_port.data = NULL; 5315 5316 pthread_mutex_unlock(&lib->mutex); 5317 5318 nxt_unit_process_release(process); 5319 5320 port = nxt_unit_add_port(ctx, &new_port, NULL); 5321 if (nxt_slow_path(port == NULL)) { 5322 nxt_unit_close(port_sockets[0]); 5323 nxt_unit_close(port_sockets[1]); 5324 } 5325 5326 return port; 5327} 5328 5329 5330static int 5331nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, 5332 nxt_unit_port_t *port, int queue_fd) 5333{ 5334 ssize_t res; 5335 nxt_send_oob_t oob; 5336 nxt_unit_impl_t *lib; 5337 int fds[2] = { port->out_fd, queue_fd }; 5338 5339 struct { 5340 nxt_port_msg_t msg; 5341 nxt_port_msg_new_port_t new_port; 5342 } m; 5343 5344 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5345 5346 m.msg.stream = 0; 5347 m.msg.pid = lib->pid; 5348 m.msg.reply_port = 0; 5349 m.msg.type = _NXT_PORT_MSG_NEW_PORT; 5350 m.msg.last = 0; 5351 m.msg.mmap = 0; 5352 m.msg.nf = 0; 5353 m.msg.mf = 0; 5354 5355 m.new_port.id = port->id.id; 5356 m.new_port.pid = port->id.pid; 5357 m.new_port.type = NXT_PROCESS_APP; 5358 m.new_port.max_size = 16 * 1024; 5359 m.new_port.max_share = 64 * 1024; 5360 5361 nxt_socket_msg_oob_init(&oob, fds); 5362 5363 res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &oob); 5364 5365 return (res == sizeof(m)) ? NXT_UNIT_OK : NXT_UNIT_ERROR; 5366} 5367 5368 5369nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port) 5370{ 5371 nxt_unit_port_impl_t *port_impl; 5372 5373 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5374 5375 nxt_atomic_fetch_add(&port_impl->use_count, 1); 5376} 5377 5378 5379nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) 5380{ 5381 long c; 5382 nxt_unit_port_impl_t *port_impl; 5383 5384 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5385 5386 c = nxt_atomic_fetch_add(&port_impl->use_count, -1); 5387 5388 if (c == 1) { 5389 nxt_unit_debug(NULL, "destroy port{%d,%d} in_fd %d out_fd %d", 5390 (int) port->id.pid, (int) port->id.id, 5391 port->in_fd, port->out_fd); 5392 5393 nxt_unit_process_release(port_impl->process); 5394 5395 if (port->in_fd != -1) { 5396 nxt_unit_close(port->in_fd); 5397 5398 port->in_fd = -1; 5399 } 5400 5401 if (port->out_fd != -1) { 5402 nxt_unit_close(port->out_fd); 5403 5404 port->out_fd = -1; 5405 } 5406 5407 if (port_impl->queue != NULL) { 5408 munmap(port_impl->queue, (port->id.id == NXT_UNIT_SHARED_PORT_ID) 5409 ? sizeof(nxt_app_queue_t) 5410 : sizeof(nxt_port_queue_t)); 5411 } 5412 5413 nxt_unit_free(NULL, port_impl); 5414 } 5415} 5416 5417 5418static nxt_unit_port_t * 5419nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue) 5420{ 5421 int rc, ready; 5422 nxt_queue_t awaiting_req; 5423 nxt_unit_impl_t *lib; 5424 nxt_unit_port_t *old_port; 5425 nxt_unit_process_t *process; 5426 nxt_unit_port_impl_t *new_port, *old_port_impl; 5427 5428 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5429 5430 pthread_mutex_lock(&lib->mutex); 5431 5432 old_port = nxt_unit_port_hash_find(&lib->ports, &port->id, 0); 5433 5434 if (nxt_slow_path(old_port != NULL)) { 5435 nxt_unit_debug(ctx, "add_port: duplicate port{%d,%d} " 5436 "in_fd %d out_fd %d queue %p", 5437 port->id.pid, port->id.id, 5438 port->in_fd, port->out_fd, queue); 5439 5440 if (old_port->data == NULL) { 5441 old_port->data = port->data; 5442 port->data = NULL; 5443 } 5444 5445 if (old_port->in_fd == -1) { 5446 old_port->in_fd = port->in_fd; 5447 port->in_fd = -1; 5448 } 5449 5450 if (port->in_fd != -1) { 5451 nxt_unit_close(port->in_fd); 5452 port->in_fd = -1; 5453 } 5454 5455 if (old_port->out_fd == -1) { 5456 old_port->out_fd = port->out_fd; 5457 port->out_fd = -1; 5458 } 5459 5460 if (port->out_fd != -1) { 5461 nxt_unit_close(port->out_fd); 5462 port->out_fd = -1; 5463 } 5464 5465 *port = *old_port; 5466 5467 nxt_queue_init(&awaiting_req); 5468 5469 old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port); 5470 5471 if (old_port_impl->queue == NULL) { 5472 old_port_impl->queue = queue; 5473 } 5474 5475 ready = (port->in_fd != -1 || port->out_fd != -1); 5476 5477 /* 5478 * Port can be market as 'ready' only after callbacks.add_port() call. 5479 * Otherwise, request may try to use the port before callback. 5480 */ 5481 if (lib->callbacks.add_port == NULL && ready) { 5482 old_port_impl->ready = ready; 5483 5484 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { 5485 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); 5486 nxt_queue_init(&old_port_impl->awaiting_req); 5487 } 5488 } 5489 5490 pthread_mutex_unlock(&lib->mutex); 5491 5492 if (lib->callbacks.add_port != NULL && ready) { 5493 lib->callbacks.add_port(ctx, old_port); 5494 5495 pthread_mutex_lock(&lib->mutex); 5496 5497 old_port_impl->ready = ready; 5498 5499 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { 5500 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); 5501 nxt_queue_init(&old_port_impl->awaiting_req); 5502 } 5503 5504 pthread_mutex_unlock(&lib->mutex); 5505 } 5506 5507 nxt_unit_process_awaiting_req(ctx, &awaiting_req); 5508 5509 return old_port; 5510 } 5511 5512 new_port = NULL; 5513 ready = 0; 5514 5515 nxt_unit_debug(ctx, "add_port: port{%d,%d} in_fd %d out_fd %d queue %p", 5516 port->id.pid, port->id.id, 5517 port->in_fd, port->out_fd, queue); 5518 5519 process = nxt_unit_process_get(ctx, port->id.pid); 5520 if (nxt_slow_path(process == NULL)) { 5521 goto unlock; 5522 } 5523 5524 if (port->id.id != NXT_UNIT_SHARED_PORT_ID 5525 && port->id.id >= process->next_port_id) 5526 { 5527 process->next_port_id = port->id.id + 1; 5528 } 5529 5530 new_port = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t)); 5531 if (nxt_slow_path(new_port == NULL)) { 5532 nxt_unit_alert(ctx, "add_port: %d,%d malloc() failed", 5533 port->id.pid, port->id.id); 5534 5535 goto unlock; 5536 } 5537 5538 new_port->port = *port; 5539 5540 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port); 5541 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 5542 nxt_unit_alert(ctx, "add_port: %d,%d hash_add failed", 5543 port->id.pid, port->id.id); 5544 5545 nxt_unit_free(ctx, new_port); 5546 5547 new_port = NULL; 5548 5549 goto unlock; 5550 } 5551 5552 nxt_queue_insert_tail(&process->ports, &new_port->link); 5553 5554 new_port->use_count = 2; 5555 new_port->process = process; 5556 new_port->queue = queue; 5557 new_port->from_socket = 0; 5558 new_port->socket_rbuf = NULL; 5559 5560 nxt_queue_init(&new_port->awaiting_req); 5561 5562 ready = (port->in_fd != -1 || port->out_fd != -1); 5563 5564 if (lib->callbacks.add_port == NULL) { 5565 new_port->ready = ready; 5566 5567 } else { 5568 new_port->ready = 0; 5569 } 5570 5571 process = NULL; 5572 5573unlock: 5574 5575 pthread_mutex_unlock(&lib->mutex); 5576 5577 if (nxt_slow_path(process != NULL)) { 5578 nxt_unit_process_release(process); 5579 } 5580 5581 if (lib->callbacks.add_port != NULL && new_port != NULL && ready) { 5582 lib->callbacks.add_port(ctx, &new_port->port); 5583 5584 nxt_queue_init(&awaiting_req); 5585 5586 pthread_mutex_lock(&lib->mutex); 5587 5588 new_port->ready = 1; 5589 5590 if (!nxt_queue_is_empty(&new_port->awaiting_req)) { 5591 nxt_queue_add(&awaiting_req, &new_port->awaiting_req); 5592 nxt_queue_init(&new_port->awaiting_req); 5593 } 5594 5595 pthread_mutex_unlock(&lib->mutex); 5596 5597 nxt_unit_process_awaiting_req(ctx, &awaiting_req); 5598 } 5599 5600 return (new_port == NULL) ? NULL : &new_port->port; 5601} 5602 5603 5604static void 5605nxt_unit_process_awaiting_req(nxt_unit_ctx_t *ctx, nxt_queue_t *awaiting_req) 5606{ 5607 nxt_unit_ctx_impl_t *ctx_impl; 5608 nxt_unit_request_info_impl_t *req_impl; 5609 5610 nxt_queue_each(req_impl, awaiting_req, 5611 nxt_unit_request_info_impl_t, port_wait_link) 5612 { 5613 nxt_queue_remove(&req_impl->port_wait_link); 5614 5615 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, 5616 ctx); 5617 5618 pthread_mutex_lock(&ctx_impl->mutex); 5619 5620 nxt_queue_insert_tail(&ctx_impl->ready_req, 5621 &req_impl->port_wait_link); 5622 5623 pthread_mutex_unlock(&ctx_impl->mutex); 5624 5625 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); 5626 5627 nxt_unit_awake_ctx(ctx, ctx_impl); 5628 5629 } nxt_queue_loop; 5630} 5631 5632 5633static void 5634nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_ctx_t *ctx, 5635 nxt_unit_port_id_t *port_id) 5636{ 5637 nxt_unit_port_t *port; 5638 nxt_unit_port_impl_t *port_impl; 5639 5640 pthread_mutex_lock(&lib->mutex); 5641 5642 port = nxt_unit_remove_port_unsafe(lib, port_id); 5643 5644 if (nxt_fast_path(port != NULL)) { 5645 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5646 5647 nxt_queue_remove(&port_impl->link); 5648 } 5649 5650 pthread_mutex_unlock(&lib->mutex); 5651 5652 if (lib->callbacks.remove_port != NULL && port != NULL) { 5653 lib->callbacks.remove_port(&lib->unit, ctx, port); 5654 } 5655 5656 if (nxt_fast_path(port != NULL)) { 5657 nxt_unit_port_release(port); 5658 } 5659} 5660 5661 5662static nxt_unit_port_t * 5663nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) 5664{ 5665 nxt_unit_port_t *port; 5666 5667 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); 5668 if (nxt_slow_path(port == NULL)) { 5669 nxt_unit_debug(NULL, "remove_port: port{%d,%d} not found", 5670 (int) port_id->pid, (int) port_id->id); 5671 5672 return NULL; 5673 } 5674 5675 nxt_unit_debug(NULL, "remove_port: port{%d,%d}, fds %d,%d, data %p", 5676 (int) port_id->pid, (int) port_id->id, 5677 port->in_fd, port->out_fd, port->data); 5678 5679 return port; 5680} 5681 5682 5683static void 5684nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid) 5685{ 5686 nxt_unit_process_t *process; 5687 5688 pthread_mutex_lock(&lib->mutex); 5689 5690 process = nxt_unit_process_find(lib, pid, 1); 5691 if (nxt_slow_path(process == NULL)) { 5692 nxt_unit_debug(NULL, "remove_pid: process %d not found", (int) pid); 5693 5694 pthread_mutex_unlock(&lib->mutex); 5695 5696 return; 5697 } 5698 5699 nxt_unit_remove_process(lib, process); 5700 5701 if (lib->callbacks.remove_pid != NULL) { 5702 lib->callbacks.remove_pid(&lib->unit, pid); 5703 } 5704} 5705 5706 5707static void 5708nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process) 5709{ 5710 nxt_queue_t ports; 5711 nxt_unit_port_impl_t *port; 5712 5713 nxt_queue_init(&ports); 5714 5715 nxt_queue_add(&ports, &process->ports); 5716 5717 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 5718 5719 nxt_unit_remove_port_unsafe(lib, &port->port.id); 5720 5721 } nxt_queue_loop; 5722 5723 pthread_mutex_unlock(&lib->mutex); 5724 5725 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 5726 5727 nxt_queue_remove(&port->link); 5728 5729 if (lib->callbacks.remove_port != NULL) { 5730 lib->callbacks.remove_port(&lib->unit, NULL, &port->port); 5731 } 5732 5733 nxt_unit_port_release(&port->port); 5734 5735 } nxt_queue_loop; 5736 5737 nxt_unit_process_release(process); 5738} 5739 5740 5741static void 5742nxt_unit_quit(nxt_unit_ctx_t *ctx, uint8_t quit_param) 5743{ 5744 nxt_bool_t skip_graceful_broadcast, quit; 5745 nxt_unit_impl_t *lib; 5746 nxt_unit_ctx_impl_t *ctx_impl; 5747 nxt_unit_callbacks_t *cb; 5748 nxt_unit_request_info_t *req; 5749 nxt_unit_request_info_impl_t *req_impl; 5750 5751 struct { 5752 nxt_port_msg_t msg; 5753 uint8_t quit_param; 5754 } nxt_packed m; 5755 5756 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5757 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 5758 5759 nxt_unit_debug(ctx, "quit: %d/%d/%d", (int) quit_param, ctx_impl->ready, 5760 ctx_impl->online); 5761 5762 if (nxt_slow_path(!ctx_impl->online)) { 5763 return; 5764 } 5765 5766 skip_graceful_broadcast = quit_param == NXT_QUIT_GRACEFUL 5767 && !ctx_impl->ready; 5768 5769 cb = &lib->callbacks; 5770 5771 if (nxt_fast_path(ctx_impl->ready)) { 5772 ctx_impl->ready = 0; 5773 5774 if (cb->remove_port != NULL) { 5775 cb->remove_port(&lib->unit, ctx, lib->shared_port); 5776 } 5777 } 5778 5779 if (quit_param == NXT_QUIT_GRACEFUL) { 5780 pthread_mutex_lock(&ctx_impl->mutex); 5781 5782 quit = nxt_queue_is_empty(&ctx_impl->active_req) 5783 && nxt_queue_is_empty(&ctx_impl->pending_rbuf) 5784 && ctx_impl->wait_items == 0; 5785 5786 pthread_mutex_unlock(&ctx_impl->mutex); 5787 5788 } else { 5789 quit = 1; 5790 ctx_impl->quit_param = NXT_QUIT_GRACEFUL; 5791 } 5792 5793 if (quit) { 5794 ctx_impl->online = 0; 5795 5796 if (cb->quit != NULL) { 5797 cb->quit(ctx); 5798 } 5799 5800 nxt_queue_each(req_impl, &ctx_impl->active_req, 5801 nxt_unit_request_info_impl_t, link) 5802 { 5803 req = &req_impl->req; 5804 5805 nxt_unit_req_warn(req, "active request on ctx quit"); 5806 5807 if (cb->close_handler) { 5808 nxt_unit_req_debug(req, "close_handler"); 5809 5810 cb->close_handler(req); 5811 5812 } else { 5813 nxt_unit_request_done(req, NXT_UNIT_ERROR); 5814 } 5815 5816 } nxt_queue_loop; 5817 5818 if (nxt_fast_path(ctx_impl->read_port != NULL)) { 5819 nxt_unit_remove_port(lib, ctx, &ctx_impl->read_port->id); 5820 } 5821 } 5822 5823 if (ctx != &lib->main_ctx.ctx || skip_graceful_broadcast) { 5824 return; 5825 } 5826 5827 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 5828 5829 m.msg.pid = lib->pid; 5830 m.msg.type = _NXT_PORT_MSG_QUIT; 5831 m.quit_param = quit_param; 5832 5833 pthread_mutex_lock(&lib->mutex); 5834 5835 nxt_queue_each(ctx_impl, &lib->contexts, nxt_unit_ctx_impl_t, link) { 5836 5837 if (ctx == &ctx_impl->ctx 5838 || ctx_impl->read_port == NULL 5839 || ctx_impl->read_port->out_fd == -1) 5840 { 5841 continue; 5842 } 5843 5844 (void) nxt_unit_port_send(ctx, ctx_impl->read_port, 5845 &m, sizeof(m), NULL); 5846 5847 } nxt_queue_loop; 5848 5849 pthread_mutex_unlock(&lib->mutex); 5850} 5851 5852 5853static int 5854nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 5855{ 5856 ssize_t res; 5857 nxt_unit_impl_t *lib; 5858 nxt_unit_ctx_impl_t *ctx_impl; 5859 5860 struct { 5861 nxt_port_msg_t msg; 5862 nxt_port_msg_get_port_t get_port; 5863 } m; 5864 5865 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5866 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 5867 5868 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 5869 5870 m.msg.pid = lib->pid; 5871 m.msg.reply_port = ctx_impl->read_port->id.id; 5872 m.msg.type = _NXT_PORT_MSG_GET_PORT; 5873 5874 m.get_port.id = port_id->id; 5875 m.get_port.pid = port_id->pid; 5876 5877 nxt_unit_debug(ctx, "get_port: %d %d", (int) port_id->pid, 5878 (int) port_id->id); 5879 5880 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL); 5881 if (nxt_slow_path(res != sizeof(m))) { 5882 return NXT_UNIT_ERROR; 5883 } 5884 5885 return NXT_UNIT_OK; 5886} 5887 5888 5889static ssize_t 5890nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 5891 const void *buf, size_t buf_size, const nxt_send_oob_t *oob) 5892{ 5893 int notify; 5894 ssize_t ret; 5895 nxt_int_t rc; 5896 nxt_port_msg_t msg; 5897 nxt_unit_impl_t *lib; 5898 nxt_unit_port_impl_t *port_impl; 5899 5900 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 5901 5902 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 5903 if (port_impl->queue != NULL && (oob == NULL || oob->size == 0) 5904 && buf_size <= NXT_PORT_QUEUE_MSG_SIZE) 5905 { 5906 rc = nxt_port_queue_send(port_impl->queue, buf, buf_size, ¬ify); 5907 if (nxt_slow_path(rc != NXT_OK)) { 5908 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", 5909 (int) port->id.pid, (int) port->id.id); 5910 5911 return -1; 5912 } 5913 5914 nxt_unit_debug(ctx, "port{%d,%d} enqueue %d notify %d", 5915 (int) port->id.pid, (int) port->id.id, 5916 (int) buf_size, notify); 5917 5918 if (notify) { 5919 memcpy(&msg, buf, sizeof(nxt_port_msg_t)); 5920 5921 msg.type = _NXT_PORT_MSG_READ_QUEUE; 5922 5923 if (lib->callbacks.port_send == NULL) { 5924 ret = nxt_unit_sendmsg(ctx, port->out_fd, &msg, 5925 sizeof(nxt_port_msg_t), NULL); 5926 5927 nxt_unit_debug(ctx, "port{%d,%d} send %d read_queue", 5928 (int) port->id.pid, (int) port->id.id, 5929 (int) ret); 5930 5931 } else { 5932 ret = lib->callbacks.port_send(ctx, port, &msg, 5933 sizeof(nxt_port_msg_t), NULL, 0); 5934 5935 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d read_queue", 5936 (int) port->id.pid, (int) port->id.id, 5937 (int) ret); 5938 } 5939 5940 } 5941 5942 return buf_size; 5943 } 5944 5945 if (port_impl->queue != NULL) { 5946 msg.type = _NXT_PORT_MSG_READ_SOCKET; 5947 5948 rc = nxt_port_queue_send(port_impl->queue, &msg.type, 1, ¬ify); 5949 if (nxt_slow_path(rc != NXT_OK)) { 5950 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", 5951 (int) port->id.pid, (int) port->id.id); 5952 5953 return -1; 5954 } 5955 5956 nxt_unit_debug(ctx, "port{%d,%d} enqueue 1 read_socket notify %d", 5957 (int) port->id.pid, (int) port->id.id, notify); 5958 } 5959 5960 if (lib->callbacks.port_send != NULL) { 5961 ret = lib->callbacks.port_send(ctx, port, buf, buf_size, 5962 oob != NULL ? oob->buf : NULL, 5963 oob != NULL ? oob->size : 0); 5964 5965 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d", 5966 (int) port->id.pid, (int) port->id.id, 5967 (int) ret); 5968 5969 } else { 5970 ret = nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size, oob); 5971 5972 nxt_unit_debug(ctx, "port{%d,%d} sendmsg %d", 5973 (int) port->id.pid, (int) port->id.id, 5974 (int) ret); 5975 } 5976 5977 return ret; 5978} 5979 5980 5981static ssize_t 5982nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, 5983 const void *buf, size_t buf_size, const nxt_send_oob_t *oob) 5984{ 5985 int err; 5986 ssize_t n; 5987 struct iovec iov[1]; 5988 5989 iov[0].iov_base = (void *) buf; 5990 iov[0].iov_len = buf_size; 5991 5992retry: 5993 5994 n = nxt_sendmsg(fd, iov, 1, oob); 5995 5996 if (nxt_slow_path(n == -1)) { 5997 err = errno; 5998 5999 if (err == EINTR) { 6000 goto retry; 6001 } 6002 6003 /* 6004 * FIXME: This should be "alert" after router graceful shutdown 6005 * implementation. 6006 */ 6007 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)", 6008 fd, (int) buf_size, strerror(err), err); 6009 6010 } else { 6011 nxt_unit_debug(ctx, "sendmsg(%d, %d, %d): %d", fd, (int) buf_size, 6012 (oob != NULL ? (int) oob->size : 0), (int) n); 6013 } 6014 6015 return n; 6016} 6017 6018 6019static int 6020nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6021 nxt_unit_read_buf_t *rbuf) 6022{ 6023 int res, read; 6024 nxt_unit_port_impl_t *port_impl; 6025 6026 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6027 6028 read = 0; 6029 6030retry: 6031 6032 if (port_impl->from_socket > 0) { 6033 if (port_impl->socket_rbuf != NULL 6034 && port_impl->socket_rbuf->size > 0) 6035 { 6036 port_impl->from_socket--; 6037 6038 nxt_unit_rbuf_cpy(rbuf, port_impl->socket_rbuf); 6039 port_impl->socket_rbuf->size = 0; 6040 6041 nxt_unit_debug(ctx, "port{%d,%d} use suspended message %d", 6042 (int) port->id.pid, (int) port->id.id, 6043 (int) rbuf->size); 6044 6045 return NXT_UNIT_OK; 6046 } 6047 6048 } else { 6049 res = nxt_unit_port_queue_recv(port, rbuf); 6050 6051 if (res == NXT_UNIT_OK) { 6052 if (nxt_unit_is_read_socket(rbuf)) { 6053 port_impl->from_socket++; 6054 6055 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", 6056 (int) port->id.pid, (int) port->id.id, 6057 port_impl->from_socket); 6058 6059 goto retry; 6060 } 6061 6062 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", 6063 (int) port->id.pid, (int) port->id.id, 6064 (int) rbuf->size); 6065 6066 return NXT_UNIT_OK; 6067 } 6068 } 6069 6070 if (read) { 6071 return NXT_UNIT_AGAIN; 6072 } 6073 6074 res = nxt_unit_port_recv(ctx, port, rbuf); 6075 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 6076 return NXT_UNIT_ERROR; 6077 } 6078 6079 read = 1; 6080 6081 if (nxt_unit_is_read_queue(rbuf)) { 6082 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", 6083 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6084 6085 goto retry; 6086 } 6087 6088 nxt_unit_debug(ctx, "port{%d,%d} recvmsg %d", 6089 (int) port->id.pid, (int) port->id.id, 6090 (int) rbuf->size); 6091 6092 if (res == NXT_UNIT_AGAIN) { 6093 return NXT_UNIT_AGAIN; 6094 } 6095 6096 if (port_impl->from_socket > 0) { 6097 port_impl->from_socket--; 6098 6099 return NXT_UNIT_OK; 6100 } 6101 6102 nxt_unit_debug(ctx, "port{%d,%d} suspend message %d", 6103 (int) port->id.pid, (int) port->id.id, 6104 (int) rbuf->size); 6105 6106 if (port_impl->socket_rbuf == NULL) { 6107 port_impl->socket_rbuf = nxt_unit_read_buf_get(ctx); 6108 6109 if (nxt_slow_path(port_impl->socket_rbuf == NULL)) { 6110 return NXT_UNIT_ERROR; 6111 } 6112 6113 port_impl->socket_rbuf->size = 0; 6114 } 6115 6116 if (port_impl->socket_rbuf->size > 0) { 6117 nxt_unit_alert(ctx, "too many port socket messages"); 6118 6119 return NXT_UNIT_ERROR; 6120 } 6121 6122 nxt_unit_rbuf_cpy(port_impl->socket_rbuf, rbuf); 6123 6124 rbuf->oob.size = 0; 6125 6126 goto retry; 6127} 6128 6129 6130nxt_inline void 6131nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, nxt_unit_read_buf_t *src) 6132{ 6133 memcpy(dst->buf, src->buf, src->size); 6134 dst->size = src->size; 6135 dst->oob.size = src->oob.size; 6136 memcpy(dst->oob.buf, src->oob.buf, src->oob.size); 6137} 6138 6139 6140static int 6141nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6142 nxt_unit_read_buf_t *rbuf) 6143{ 6144 int res; 6145 nxt_unit_port_impl_t *port_impl; 6146 6147 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6148 6149retry: 6150 6151 res = nxt_unit_app_queue_recv(ctx, port, rbuf); 6152 6153 if (res == NXT_UNIT_OK) { 6154 return NXT_UNIT_OK; 6155 } 6156 6157 if (res == NXT_UNIT_AGAIN) { 6158 res = nxt_unit_port_recv(ctx, port, rbuf); 6159 if (nxt_slow_path(res == NXT_UNIT_ERROR)) { 6160 return NXT_UNIT_ERROR; 6161 } 6162 6163 if (nxt_unit_is_read_queue(rbuf)) { 6164 nxt_app_queue_notification_received(port_impl->queue); 6165 6166 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", 6167 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6168 6169 goto retry; 6170 } 6171 } 6172 6173 return res; 6174} 6175 6176 6177static int 6178nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6179 nxt_unit_read_buf_t *rbuf) 6180{ 6181 int fd, err; 6182 size_t oob_size; 6183 struct iovec iov[1]; 6184 nxt_unit_impl_t *lib; 6185 6186 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6187 6188 if (lib->callbacks.port_recv != NULL) { 6189 oob_size = sizeof(rbuf->oob.buf); 6190 6191 rbuf->size = lib->callbacks.port_recv(ctx, port, 6192 rbuf->buf, sizeof(rbuf->buf), 6193 rbuf->oob.buf, &oob_size); 6194 6195 nxt_unit_debug(ctx, "port{%d,%d} recvcb %d", 6196 (int) port->id.pid, (int) port->id.id, (int) rbuf->size); 6197 6198 if (nxt_slow_path(rbuf->size < 0)) { 6199 return NXT_UNIT_ERROR; 6200 } 6201 6202 rbuf->oob.size = oob_size; 6203 return NXT_UNIT_OK; 6204 } 6205 6206 iov[0].iov_base = rbuf->buf; 6207 iov[0].iov_len = sizeof(rbuf->buf); 6208 6209 fd = port->in_fd; 6210 6211retry: 6212 6213 rbuf->size = nxt_recvmsg(fd, iov, 1, &rbuf->oob); 6214 6215 if (nxt_slow_path(rbuf->size == -1)) { 6216 err = errno; 6217 6218 if (err == EINTR) { 6219 goto retry; 6220 } 6221 6222 if (err == EAGAIN) { 6223 nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)", 6224 fd, strerror(err), err); 6225 6226 return NXT_UNIT_AGAIN; 6227 } 6228 6229 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", 6230 fd, strerror(err), err); 6231 6232 return NXT_UNIT_ERROR; 6233 } 6234 6235 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) rbuf->size); 6236 6237 return NXT_UNIT_OK; 6238} 6239 6240 6241static int 6242nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf) 6243{ 6244 nxt_unit_port_impl_t *port_impl; 6245 6246 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6247 6248 rbuf->size = nxt_port_queue_recv(port_impl->queue, rbuf->buf); 6249 6250 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; 6251} 6252 6253 6254static int 6255nxt_unit_app_queue_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, 6256 nxt_unit_read_buf_t *rbuf) 6257{ 6258 uint32_t cookie; 6259 nxt_port_msg_t *port_msg; 6260 nxt_app_queue_t *queue; 6261 nxt_unit_impl_t *lib; 6262 nxt_unit_port_impl_t *port_impl; 6263 6264 struct { 6265 nxt_port_msg_t msg; 6266 uint8_t quit_param; 6267 } nxt_packed m; 6268 6269 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); 6270 queue = port_impl->queue; 6271 6272retry: 6273 6274 rbuf->size = nxt_app_queue_recv(queue, rbuf->buf, &cookie); 6275 6276 nxt_unit_debug(NULL, "app_queue_recv: %d", (int) rbuf->size); 6277 6278 if (rbuf->size >= (ssize_t) sizeof(nxt_port_msg_t)) { 6279 port_msg = (nxt_port_msg_t *) rbuf->buf; 6280 6281 if (nxt_app_queue_cancel(queue, cookie, port_msg->stream)) { 6282 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6283 6284 if (lib->request_limit != 0) { 6285 nxt_atomic_fetch_add(&lib->request_count, 1); 6286 6287 if (nxt_slow_path(lib->request_count >= lib->request_limit)) { 6288 nxt_unit_debug(ctx, "request limit reached"); 6289 6290 memset(&m.msg, 0, sizeof(nxt_port_msg_t)); 6291 6292 m.msg.pid = lib->pid; 6293 m.msg.type = _NXT_PORT_MSG_QUIT; 6294 m.quit_param = NXT_QUIT_GRACEFUL; 6295 6296 (void) nxt_unit_port_send(ctx, lib->main_ctx.read_port, 6297 &m, sizeof(m), NULL); 6298 } 6299 } 6300 6301 return NXT_UNIT_OK; 6302 } 6303 6304 nxt_unit_debug(NULL, "app_queue_recv: message cancelled"); 6305 6306 goto retry; 6307 } 6308 6309 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; 6310} 6311 6312 6313nxt_inline int 6314nxt_unit_close(int fd) 6315{ 6316 int res; 6317 6318 res = close(fd); 6319 6320 if (nxt_slow_path(res == -1)) { 6321 nxt_unit_alert(NULL, "close(%d) failed: %s (%d)", 6322 fd, strerror(errno), errno); 6323 6324 } else { 6325 nxt_unit_debug(NULL, "close(%d): %d", fd, res); 6326 } 6327 6328 return res; 6329} 6330 6331 6332static int 6333nxt_unit_fd_blocking(int fd) 6334{ 6335 int nb; 6336 6337 nb = 0; 6338 6339 if (nxt_slow_path(ioctl(fd, FIONBIO, &nb) == -1)) { 6340 nxt_unit_alert(NULL, "ioctl(%d, FIONBIO, 0) failed: %s (%d)", 6341 fd, strerror(errno), errno); 6342 6343 return NXT_UNIT_ERROR; 6344 } 6345 6346 return NXT_UNIT_OK; 6347} 6348 6349 6350static nxt_int_t 6351nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 6352{ 6353 nxt_unit_port_t *port; 6354 nxt_unit_port_hash_id_t *port_id; 6355 6356 port = data; 6357 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start; 6358 6359 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t) 6360 && port_id->pid == port->id.pid 6361 && port_id->id == port->id.id) 6362 { 6363 return NXT_OK; 6364 } 6365 6366 return NXT_DECLINED; 6367} 6368 6369 6370static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = { 6371 NXT_LVLHSH_DEFAULT, 6372 nxt_unit_port_hash_test, 6373 nxt_unit_lvlhsh_alloc, 6374 nxt_unit_lvlhsh_free, 6375}; 6376 6377 6378static inline void 6379nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq, 6380 nxt_unit_port_hash_id_t *port_hash_id, 6381 nxt_unit_port_id_t *port_id) 6382{ 6383 port_hash_id->pid = port_id->pid; 6384 port_hash_id->id = port_id->id; 6385 6386 if (nxt_fast_path(port_id->hash != 0)) { 6387 lhq->key_hash = port_id->hash; 6388 6389 } else { 6390 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id)); 6391 6392 port_id->hash = lhq->key_hash; 6393 6394 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X", 6395 (int) port_id->pid, (int) port_id->id, 6396 (int) port_id->hash); 6397 } 6398 6399 lhq->key.length = sizeof(nxt_unit_port_hash_id_t); 6400 lhq->key.start = (u_char *) port_hash_id; 6401 lhq->proto = &lvlhsh_ports_proto; 6402 lhq->pool = NULL; 6403} 6404 6405 6406static int 6407nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port) 6408{ 6409 nxt_int_t res; 6410 nxt_lvlhsh_query_t lhq; 6411 nxt_unit_port_hash_id_t port_hash_id; 6412 6413 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id); 6414 lhq.replace = 0; 6415 lhq.value = port; 6416 6417 res = nxt_lvlhsh_insert(port_hash, &lhq); 6418 6419 switch (res) { 6420 6421 case NXT_OK: 6422 return NXT_UNIT_OK; 6423 6424 default: 6425 return NXT_UNIT_ERROR; 6426 } 6427} 6428 6429 6430static nxt_unit_port_t * 6431nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, 6432 int remove) 6433{ 6434 nxt_int_t res; 6435 nxt_lvlhsh_query_t lhq; 6436 nxt_unit_port_hash_id_t port_hash_id; 6437 6438 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id); 6439 6440 if (remove) { 6441 res = nxt_lvlhsh_delete(port_hash, &lhq); 6442 6443 } else { 6444 res = nxt_lvlhsh_find(port_hash, &lhq); 6445 } 6446 6447 switch (res) { 6448 6449 case NXT_OK: 6450 if (!remove) { 6451 nxt_unit_port_use(lhq.value); 6452 } 6453 6454 return lhq.value; 6455 6456 default: 6457 return NULL; 6458 } 6459} 6460 6461 6462static nxt_int_t 6463nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 6464{ 6465 return NXT_OK; 6466} 6467 6468 6469static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = { 6470 NXT_LVLHSH_DEFAULT, 6471 nxt_unit_request_hash_test, 6472 nxt_unit_lvlhsh_alloc, 6473 nxt_unit_lvlhsh_free, 6474}; 6475 6476 6477static int 6478nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx, 6479 nxt_unit_request_info_t *req) 6480{ 6481 uint32_t *stream; 6482 nxt_int_t res; 6483 nxt_lvlhsh_query_t lhq; 6484 nxt_unit_ctx_impl_t *ctx_impl; 6485 nxt_unit_request_info_impl_t *req_impl; 6486 6487 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 6488 if (req_impl->in_hash) { 6489 return NXT_UNIT_OK; 6490 } 6491 6492 stream = &req_impl->stream; 6493 6494 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream)); 6495 lhq.key.length = sizeof(*stream); 6496 lhq.key.start = (u_char *) stream; 6497 lhq.proto = &lvlhsh_requests_proto; 6498 lhq.pool = NULL; 6499 lhq.replace = 0; 6500 lhq.value = req_impl; 6501 6502 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 6503 6504 pthread_mutex_lock(&ctx_impl->mutex); 6505 6506 res = nxt_lvlhsh_insert(&ctx_impl->requests, &lhq); 6507 6508 pthread_mutex_unlock(&ctx_impl->mutex); 6509 6510 switch (res) { 6511 6512 case NXT_OK: 6513 req_impl->in_hash = 1; 6514 return NXT_UNIT_OK; 6515 6516 default: 6517 return NXT_UNIT_ERROR; 6518 } 6519} 6520 6521 6522static nxt_unit_request_info_t * 6523nxt_unit_request_hash_find(nxt_unit_ctx_t *ctx, uint32_t stream, int remove) 6524{ 6525 nxt_int_t res; 6526 nxt_lvlhsh_query_t lhq; 6527 nxt_unit_ctx_impl_t *ctx_impl; 6528 nxt_unit_request_info_impl_t *req_impl; 6529 6530 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream)); 6531 lhq.key.length = sizeof(stream); 6532 lhq.key.start = (u_char *) &stream; 6533 lhq.proto = &lvlhsh_requests_proto; 6534 lhq.pool = NULL; 6535 6536 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 6537 6538 pthread_mutex_lock(&ctx_impl->mutex); 6539 6540 if (remove) { 6541 res = nxt_lvlhsh_delete(&ctx_impl->requests, &lhq); 6542 6543 } else { 6544 res = nxt_lvlhsh_find(&ctx_impl->requests, &lhq); 6545 } 6546 6547 pthread_mutex_unlock(&ctx_impl->mutex); 6548 6549 switch (res) { 6550 6551 case NXT_OK: 6552 req_impl = nxt_container_of(lhq.value, nxt_unit_request_info_impl_t, 6553 req); 6554 if (remove) { 6555 req_impl->in_hash = 0; 6556 } 6557 6558 return lhq.value; 6559 6560 default: 6561 return NULL; 6562 } 6563} 6564 6565 6566void 6567nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...) 6568{ 6569 int log_fd, n; 6570 char msg[NXT_MAX_ERROR_STR], *p, *end; 6571 pid_t pid; 6572 va_list ap; 6573 nxt_unit_impl_t *lib; 6574 6575 if (nxt_fast_path(ctx != NULL)) { 6576 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 6577 6578 pid = lib->pid; 6579 log_fd = lib->log_fd; 6580 6581 } else { 6582 pid = nxt_unit_pid; 6583 log_fd = STDERR_FILENO; 6584 } 6585 6586 p = msg; 6587 end = p + sizeof(msg) - 1; 6588 6589 p = nxt_unit_snprint_prefix(p, end, pid, level); 6590 6591 va_start(ap, fmt); 6592 p += vsnprintf(p, end - p, fmt, ap); 6593 va_end(ap); 6594 6595 if (nxt_slow_path(p > end)) { 6596 memcpy(end - 5, "[...]", 5); 6597 p = end; 6598 } 6599 6600 *p++ = '\n'; 6601 6602 n = write(log_fd, msg, p - msg); 6603 if (nxt_slow_path(n < 0)) { 6604 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 6605 } 6606} 6607 6608 6609void 6610nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...) 6611{ 6612 int log_fd, n; 6613 char msg[NXT_MAX_ERROR_STR], *p, *end; 6614 pid_t pid; 6615 va_list ap; 6616 nxt_unit_impl_t *lib; 6617 nxt_unit_request_info_impl_t *req_impl; 6618 6619 if (nxt_fast_path(req != NULL)) { 6620 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 6621 6622 pid = lib->pid; 6623 log_fd = lib->log_fd; 6624 6625 } else { 6626 pid = nxt_unit_pid; 6627 log_fd = STDERR_FILENO; 6628 } 6629 6630 p = msg; 6631 end = p + sizeof(msg) - 1; 6632 6633 p = nxt_unit_snprint_prefix(p, end, pid, level); 6634 6635 if (nxt_fast_path(req != NULL)) { 6636 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 6637 6638 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream); 6639 } 6640 6641 va_start(ap, fmt); 6642 p += vsnprintf(p, end - p, fmt, ap); 6643 va_end(ap); 6644 6645 if (nxt_slow_path(p > end)) { 6646 memcpy(end - 5, "[...]", 5); 6647 p = end; 6648 } 6649 6650 *p++ = '\n'; 6651 6652 n = write(log_fd, msg, p - msg); 6653 if (nxt_slow_path(n < 0)) { 6654 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 6655 } 6656} 6657 6658 6659static const char * nxt_unit_log_levels[] = { 6660 "alert", 6661 "error", 6662 "warn", 6663 "notice", 6664 "info", 6665 "debug", 6666}; 6667 6668 6669static char *
|