1 2/* 3 * Copyright (C) NGINX, Inc. 4 */ 5 6#include <stdlib.h> 7 8#include "nxt_main.h" 9#include "nxt_port_memory_int.h" 10 11#include "nxt_unit.h" 12#include "nxt_unit_request.h" 13#include "nxt_unit_response.h" 14#include "nxt_unit_websocket.h" 15 16#include "nxt_websocket.h" 17 18#if (NXT_HAVE_MEMFD_CREATE) 19#include <linux/memfd.h> 20#endif 21 22#define NXT_UNIT_MAX_PLAIN_SIZE 1024 23#define NXT_UNIT_LOCAL_BUF_SIZE \ 24 (NXT_UNIT_MAX_PLAIN_SIZE + sizeof(nxt_port_msg_t)) 25 26typedef struct nxt_unit_impl_s nxt_unit_impl_t; 27typedef struct nxt_unit_mmap_s nxt_unit_mmap_t; 28typedef struct nxt_unit_mmaps_s nxt_unit_mmaps_t; 29typedef struct nxt_unit_process_s nxt_unit_process_t; 30typedef struct nxt_unit_mmap_buf_s nxt_unit_mmap_buf_t; 31typedef struct nxt_unit_recv_msg_s nxt_unit_recv_msg_t; 32typedef struct nxt_unit_read_buf_s nxt_unit_read_buf_t; 33typedef struct nxt_unit_ctx_impl_s nxt_unit_ctx_impl_t; 34typedef struct nxt_unit_port_impl_s nxt_unit_port_impl_t; 35typedef struct nxt_unit_request_info_impl_s nxt_unit_request_info_impl_t; 36typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t; 37 38static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init); 39static int nxt_unit_ctx_init(nxt_unit_impl_t *lib, 40 nxt_unit_ctx_impl_t *ctx_impl, void *data); 41nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 42 nxt_unit_mmap_buf_t *mmap_buf); 43nxt_inline void nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 44 nxt_unit_mmap_buf_t *mmap_buf); 45nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf); 46static int nxt_unit_read_env(nxt_unit_port_t *ready_port, 47 nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, 48 uint32_t *shm_limit); 49static int nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 50 uint32_t stream); 51static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, 52 nxt_unit_recv_msg_t *recv_msg); 53static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, 54 nxt_unit_recv_msg_t *recv_msg); 55static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, 56 nxt_unit_recv_msg_t *recv_msg); 57static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx); 58static nxt_unit_request_info_impl_t *nxt_unit_request_info_get( 59 nxt_unit_ctx_t *ctx); 60static void nxt_unit_request_info_release(nxt_unit_request_info_t *req); 61static void nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req); 62static nxt_unit_websocket_frame_impl_t *nxt_unit_websocket_frame_get( 63 nxt_unit_ctx_t *ctx); 64static void nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws); 65static void nxt_unit_websocket_frame_free(nxt_unit_websocket_frame_impl_t *ws); 66static nxt_unit_process_t *nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, 67 nxt_unit_recv_msg_t *recv_msg); 68static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx); 69static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf); 70static int nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, 71 nxt_unit_mmap_buf_t *mmap_buf, int last); 72static void nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf); 73static void nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf); 74static nxt_unit_read_buf_t *nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx); 75static nxt_unit_read_buf_t *nxt_unit_read_buf_get_impl( 76 nxt_unit_ctx_impl_t *ctx_impl); 77static void nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 78 nxt_unit_read_buf_t *rbuf); 79static nxt_unit_mmap_buf_t *nxt_unit_request_preread( 80 nxt_unit_request_info_t *req, size_t size); 81static ssize_t nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, 82 size_t size); 83static nxt_port_mmap_header_t *nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, 84 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, 85 nxt_chunk_id_t *c, int *n, int min_n); 86static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id); 87static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx); 88static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i); 89static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, 90 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, int n); 91static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 92 int fd); 93static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, 94 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, uint32_t size, 95 uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf); 96static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd); 97 98static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps); 99static void nxt_unit_process_use(nxt_unit_ctx_t *ctx, 100 nxt_unit_process_t *process, int i); 101static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps); 102static nxt_port_mmap_header_t *nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, 103 nxt_unit_process_t *process, uint32_t id); 104static int nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, 105 nxt_unit_recv_msg_t *recv_msg); 106static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, 107 nxt_unit_recv_msg_t *recv_msg); 108static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, 109 nxt_unit_process_t *process, 110 nxt_port_mmap_header_t *hdr, void *start, uint32_t size); 111static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid); 112 113static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_ctx_t *ctx, 114 pid_t pid); 115static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_ctx_t *ctx, 116 pid_t pid, int remove); 117static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); 118static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, 119 nxt_unit_read_buf_t *rbuf); 120static int nxt_unit_create_port(nxt_unit_ctx_t *ctx, 121 nxt_unit_port_id_t *port_id, int *fd); 122 123static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 124 nxt_unit_port_id_t *new_port, int fd); 125 126static void nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, 127 nxt_unit_port_id_t *port_id, nxt_unit_port_t *r_port, 128 nxt_unit_process_t **process); 129static void nxt_unit_remove_process(nxt_unit_ctx_t *ctx, 130 nxt_unit_process_t *process); 131 132static ssize_t nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, 133 nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, 134 const void *oob, size_t oob_size); 135static ssize_t nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, 136 nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, 137 void *oob, size_t oob_size); 138 139static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, 140 nxt_unit_port_t *port); 141static nxt_unit_port_impl_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, 142 nxt_unit_port_id_t *port_id, int remove); 143 144static int nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, 145 nxt_unit_request_info_impl_t *req_impl); 146static nxt_unit_request_info_impl_t *nxt_unit_request_hash_find( 147 nxt_lvlhsh_t *request_hash, uint32_t stream, int remove); 148 149static char * nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level); 150 151 152struct nxt_unit_mmap_buf_s { 153 nxt_unit_buf_t buf; 154 155 nxt_unit_mmap_buf_t *next; 156 nxt_unit_mmap_buf_t **prev; 157 158 nxt_port_mmap_header_t *hdr; 159 nxt_unit_port_id_t port_id; 160 nxt_unit_request_info_t *req; 161 nxt_unit_ctx_impl_t *ctx_impl; 162 nxt_unit_process_t *process; 163 char *free_ptr; 164 char *plain_ptr; 165}; 166 167 168struct nxt_unit_recv_msg_s { 169 uint32_t stream; 170 nxt_pid_t pid; 171 nxt_port_id_t reply_port; 172 173 uint8_t last; /* 1 bit */ 174 uint8_t mmap; /* 1 bit */ 175 176 void *start; 177 uint32_t size; 178 179 int fd; 180 nxt_unit_process_t *process; 181 182 nxt_unit_mmap_buf_t *incoming_buf; 183}; 184 185 186typedef enum { 187 NXT_UNIT_RS_START = 0, 188 NXT_UNIT_RS_RESPONSE_INIT, 189 NXT_UNIT_RS_RESPONSE_HAS_CONTENT, 190 NXT_UNIT_RS_RESPONSE_SENT, 191 NXT_UNIT_RS_RELEASED, 192} nxt_unit_req_state_t; 193 194 195struct nxt_unit_request_info_impl_s { 196 nxt_unit_request_info_t req; 197 198 uint32_t stream; 199 200 nxt_unit_process_t *process; 201 202 nxt_unit_mmap_buf_t *outgoing_buf; 203 nxt_unit_mmap_buf_t *incoming_buf; 204 205 nxt_unit_req_state_t state; 206 uint8_t websocket; 207 208 nxt_queue_link_t link; 209 210 char extra_data[]; 211}; 212 213 214struct nxt_unit_websocket_frame_impl_s { 215 nxt_unit_websocket_frame_t ws; 216 217 nxt_unit_mmap_buf_t *buf; 218 219 nxt_queue_link_t link; 220 221 nxt_unit_ctx_impl_t *ctx_impl; 222}; 223 224 225struct nxt_unit_read_buf_s { 226 nxt_unit_read_buf_t *next; 227 ssize_t size; 228 char buf[16384]; 229 char oob[256]; 230}; 231 232 233struct nxt_unit_ctx_impl_s { 234 nxt_unit_ctx_t ctx; 235 236 pthread_mutex_t mutex; 237 238 nxt_unit_port_id_t read_port_id; 239 int read_port_fd; 240 241 nxt_queue_link_t link; 242 243 nxt_unit_mmap_buf_t *free_buf; 244 245 /* of nxt_unit_request_info_impl_t */ 246 nxt_queue_t free_req; 247 248 /* of nxt_unit_websocket_frame_impl_t */ 249 nxt_queue_t free_ws; 250 251 /* of nxt_unit_request_info_impl_t */ 252 nxt_queue_t active_req; 253 254 /* of nxt_unit_request_info_impl_t */ 255 nxt_lvlhsh_t requests; 256 257 nxt_unit_read_buf_t *pending_read_head; 258 nxt_unit_read_buf_t **pending_read_tail; 259 nxt_unit_read_buf_t *free_read_buf; 260 261 nxt_unit_mmap_buf_t ctx_buf[2]; 262 nxt_unit_read_buf_t ctx_read_buf; 263 264 nxt_unit_request_info_impl_t req; 265}; 266 267 268struct nxt_unit_impl_s { 269 nxt_unit_t unit; 270 nxt_unit_callbacks_t callbacks; 271 272 uint32_t request_data_size; 273 uint32_t shm_mmap_limit; 274 275 pthread_mutex_t mutex; 276 277 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ 278 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ 279 280 nxt_unit_port_id_t ready_port_id; 281 282 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ 283 284 pid_t pid; 285 int log_fd; 286 int online; 287 288 nxt_unit_ctx_impl_t main_ctx; 289}; 290 291 292struct nxt_unit_port_impl_s { 293 nxt_unit_port_t port; 294 295 nxt_queue_link_t link; 296 nxt_unit_process_t *process; 297}; 298 299 300struct nxt_unit_mmap_s { 301 nxt_port_mmap_header_t *hdr; 302}; 303 304 305struct nxt_unit_mmaps_s { 306 pthread_mutex_t mutex; 307 uint32_t size; 308 uint32_t cap; 309 nxt_atomic_t allocated_chunks; 310 nxt_unit_mmap_t *elts; 311}; 312 313 314struct nxt_unit_process_s { 315 pid_t pid; 316 317 nxt_queue_t ports; 318 319 nxt_unit_mmaps_t incoming; 320 nxt_unit_mmaps_t outgoing; 321 322 nxt_unit_impl_t *lib; 323 324 nxt_atomic_t use_count; 325 326 uint32_t next_port_id; 327}; 328 329 330/* Explicitly using 32 bit types to avoid possible alignment. */ 331typedef struct { 332 int32_t pid; 333 uint32_t id; 334} nxt_unit_port_hash_id_t; 335 336 337nxt_unit_ctx_t * 338nxt_unit_init(nxt_unit_init_t *init) 339{ 340 int rc; 341 uint32_t ready_stream, shm_limit; 342 nxt_unit_ctx_t *ctx; 343 nxt_unit_impl_t *lib; 344 nxt_unit_port_t ready_port, read_port; 345 346 lib = nxt_unit_create(init); 347 if (nxt_slow_path(lib == NULL)) { 348 return NULL; 349 } 350 351 if (init->ready_port.id.pid != 0 352 && init->ready_stream != 0 353 && init->read_port.id.pid != 0) 354 { 355 ready_port = init->ready_port; 356 ready_stream = init->ready_stream; 357 read_port = init->read_port; 358 lib->log_fd = init->log_fd; 359 360 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid, 361 ready_port.id.id); 362 nxt_unit_port_id_init(&read_port.id, read_port.id.pid, 363 read_port.id.id); 364 365 } else { 366 rc = nxt_unit_read_env(&ready_port, &read_port, &lib->log_fd, 367 &ready_stream, &shm_limit); 368 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 369 goto fail; 370 } 371 372 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1) 373 / PORT_MMAP_DATA_SIZE; 374 } 375 376 if (nxt_slow_path(lib->shm_mmap_limit < 1)) { 377 lib->shm_mmap_limit = 1; 378 } 379 380 lib->pid = read_port.id.pid; 381 ctx = &lib->main_ctx.ctx; 382 383 rc = lib->callbacks.add_port(ctx, &ready_port); 384 if (rc != NXT_UNIT_OK) { 385 nxt_unit_alert(NULL, "failed to add ready_port"); 386 387 goto fail; 388 } 389 390 rc = lib->callbacks.add_port(ctx, &read_port); 391 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 392 nxt_unit_alert(NULL, "failed to add read_port"); 393 394 goto fail; 395 } 396 397 lib->main_ctx.read_port_id = read_port.id; 398 lib->ready_port_id = ready_port.id; 399 400 rc = nxt_unit_ready(ctx, &ready_port.id, ready_stream); 401 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 402 nxt_unit_alert(NULL, "failed to send READY message"); 403 404 goto fail; 405 } 406 407 return ctx; 408 409fail: 410 411 free(lib); 412 413 return NULL; 414} 415 416 417static nxt_unit_impl_t * 418nxt_unit_create(nxt_unit_init_t *init) 419{ 420 int rc; 421 nxt_unit_impl_t *lib; 422 nxt_unit_callbacks_t *cb; 423 424 lib = malloc(sizeof(nxt_unit_impl_t) + init->request_data_size); 425 if (nxt_slow_path(lib == NULL)) { 426 nxt_unit_alert(NULL, "failed to allocate unit struct"); 427 428 return NULL; 429 } 430 431 rc = pthread_mutex_init(&lib->mutex, NULL); 432 if (nxt_slow_path(rc != 0)) { 433 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 434 435 goto fail; 436 } 437 438 lib->unit.data = init->data; 439 lib->callbacks = init->callbacks; 440 441 lib->request_data_size = init->request_data_size; 442 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1) 443 / PORT_MMAP_DATA_SIZE; 444 445 lib->processes.slot = NULL; 446 lib->ports.slot = NULL; 447 448 lib->log_fd = STDERR_FILENO; 449 lib->online = 1; 450 451 nxt_queue_init(&lib->contexts); 452 453 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); 454 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 455 goto fail; 456 } 457 458 cb = &lib->callbacks; 459 460 if (cb->request_handler == NULL) { 461 nxt_unit_alert(NULL, "request_handler is NULL"); 462 463 goto fail; 464 } 465 466 if (cb->add_port == NULL) { 467 cb->add_port = nxt_unit_add_port; 468 } 469 470 if (cb->remove_port == NULL) { 471 cb->remove_port = nxt_unit_remove_port; 472 } 473 474 if (cb->remove_pid == NULL) { 475 cb->remove_pid = nxt_unit_remove_pid; 476 } 477 478 if (cb->quit == NULL) { 479 cb->quit = nxt_unit_quit; 480 } 481 482 if (cb->port_send == NULL) { 483 cb->port_send = nxt_unit_port_send_default; 484 } 485 486 if (cb->port_recv == NULL) { 487 cb->port_recv = nxt_unit_port_recv_default; 488 } 489 490 return lib; 491 492fail: 493 494 free(lib); 495 496 return NULL; 497} 498 499 500static int 501nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, 502 void *data) 503{ 504 int rc; 505 506 ctx_impl->ctx.data = data; 507 ctx_impl->ctx.unit = &lib->unit; 508 509 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); 510 511 rc = pthread_mutex_init(&ctx_impl->mutex, NULL); 512 if (nxt_slow_path(rc != 0)) { 513 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 514 515 return NXT_UNIT_ERROR; 516 } 517 518 nxt_queue_init(&ctx_impl->free_req); 519 nxt_queue_init(&ctx_impl->free_ws); 520 nxt_queue_init(&ctx_impl->active_req); 521 522 ctx_impl->free_buf = NULL; 523 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); 524 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]); 525 526 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link); 527 528 ctx_impl->pending_read_head = NULL; 529 ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; 530 ctx_impl->free_read_buf = &ctx_impl->ctx_read_buf; 531 ctx_impl->ctx_read_buf.next = NULL; 532 533 ctx_impl->req.req.ctx = &ctx_impl->ctx; 534 ctx_impl->req.req.unit = &lib->unit; 535 536 ctx_impl->read_port_fd = -1; 537 ctx_impl->requests.slot = 0; 538 539 return NXT_UNIT_OK; 540} 541 542 543nxt_inline void 544nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 545 nxt_unit_mmap_buf_t *mmap_buf) 546{ 547 mmap_buf->next = *head; 548 549 if (mmap_buf->next != NULL) { 550 mmap_buf->next->prev = &mmap_buf->next; 551 } 552 553 *head = mmap_buf; 554 mmap_buf->prev = head; 555} 556 557 558nxt_inline void 559nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 560 nxt_unit_mmap_buf_t *mmap_buf) 561{ 562 while (*prev != NULL) { 563 prev = &(*prev)->next; 564 } 565 566 nxt_unit_mmap_buf_insert(prev, mmap_buf); 567} 568 569 570nxt_inline void 571nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf) 572{ 573 nxt_unit_mmap_buf_t **prev; 574 575 prev = mmap_buf->prev; 576 577 if (mmap_buf->next != NULL) { 578 mmap_buf->next->prev = prev; 579 } 580 581 if (prev != NULL) { 582 *prev = mmap_buf->next; 583 } 584} 585 586 587static int 588nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, 589 int *log_fd, uint32_t *stream, uint32_t *shm_limit) 590{ 591 int rc; 592 int ready_fd, read_fd; 593 char *unit_init, *version_end; 594 long version_length; 595 int64_t ready_pid, read_pid; 596 uint32_t ready_stream, ready_id, read_id; 597 598 unit_init = getenv(NXT_UNIT_INIT_ENV); 599 if (nxt_slow_path(unit_init == NULL)) { 600 nxt_unit_alert(NULL, "%s is not in the current environment", 601 NXT_UNIT_INIT_ENV); 602 603 return NXT_UNIT_ERROR; 604 } 605 606 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init); 607 608 version_length = nxt_length(NXT_VERSION); 609 610 version_end = strchr(unit_init, ';'); 611 if (version_end == NULL 612 || version_end - unit_init != version_length 613 || memcmp(unit_init, NXT_VERSION, version_length) != 0) 614 { 615 nxt_unit_alert(NULL, "version check error"); 616 617 return NXT_UNIT_ERROR; 618 } 619 620 rc = sscanf(version_end + 1, 621 "%"PRIu32";" 622 "%"PRId64",%"PRIu32",%d;" 623 "%"PRId64",%"PRIu32",%d;" 624 "%d,%"PRIu32, 625 &ready_stream, 626 &ready_pid, &ready_id, &ready_fd, 627 &read_pid, &read_id, &read_fd, 628 log_fd, shm_limit); 629 630 if (nxt_slow_path(rc != 9)) { 631 nxt_unit_alert(NULL, "failed to scan variables: %d", rc); 632 633 return NXT_UNIT_ERROR; 634 } 635 636 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id); 637 638 ready_port->in_fd = -1; 639 ready_port->out_fd = ready_fd; 640 ready_port->data = NULL; 641 642 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id); 643 644 read_port->in_fd = read_fd; 645 read_port->out_fd = -1; 646 read_port->data = NULL; 647 648 *stream = ready_stream; 649 650 return NXT_UNIT_OK; 651} 652 653 654static int 655nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 656 uint32_t stream) 657{ 658 ssize_t res; 659 nxt_port_msg_t msg; 660 nxt_unit_impl_t *lib; 661 662 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 663 664 msg.stream = stream; 665 msg.pid = lib->pid; 666 msg.reply_port = 0; 667 msg.type = _NXT_PORT_MSG_PROCESS_READY; 668 msg.last = 1; 669 msg.mmap = 0; 670 msg.nf = 0; 671 msg.mf = 0; 672 msg.tracking = 0; 673 674 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); 675 if (res != sizeof(msg)) { 676 return NXT_UNIT_ERROR; 677 } 678 679 return NXT_UNIT_OK; 680} 681 682 683int 684nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 685 void *buf, size_t buf_size, void *oob, size_t oob_size) 686{ 687 int rc; 688 pid_t pid; 689 struct cmsghdr *cm; 690 nxt_port_msg_t *port_msg; 691 nxt_unit_impl_t *lib; 692 nxt_unit_recv_msg_t recv_msg; 693 nxt_unit_callbacks_t *cb; 694 695 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 696 697 rc = NXT_UNIT_ERROR; 698 recv_msg.fd = -1; 699 recv_msg.process = NULL; 700 port_msg = buf; 701 cm = oob; 702 703 if (oob_size >= CMSG_SPACE(sizeof(int)) 704 && cm->cmsg_len == CMSG_LEN(sizeof(int)) 705 && cm->cmsg_level == SOL_SOCKET 706 && cm->cmsg_type == SCM_RIGHTS) 707 { 708 memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int)); 709 } 710 711 recv_msg.incoming_buf = NULL; 712 713 if (nxt_slow_path(buf_size < sizeof(nxt_port_msg_t))) { 714 nxt_unit_warn(ctx, "message too small (%d bytes)", (int) buf_size); 715 goto fail; 716 } 717 718 recv_msg.stream = port_msg->stream; 719 recv_msg.pid = port_msg->pid; 720 recv_msg.reply_port = port_msg->reply_port; 721 recv_msg.last = port_msg->last; 722 recv_msg.mmap = port_msg->mmap; 723 724 recv_msg.start = port_msg + 1; 725 recv_msg.size = buf_size - sizeof(nxt_port_msg_t); 726 727 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { 728 nxt_unit_warn(ctx, "#%"PRIu32": unknown message type (%d)", 729 port_msg->stream, (int) port_msg->type); 730 goto fail; 731 } 732 733 if (port_msg->tracking && nxt_unit_tracking_read(ctx, &recv_msg) == 0) { 734 rc = NXT_UNIT_OK; 735 736 goto fail; 737 } 738 739 /* Fragmentation is unsupported. */ 740 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { 741 nxt_unit_warn(ctx, "#%"PRIu32": fragmented message type (%d)", 742 port_msg->stream, (int) port_msg->type); 743 goto fail; 744 } 745 746 if (port_msg->mmap) { 747 if (nxt_unit_mmap_read(ctx, &recv_msg) != NXT_UNIT_OK) { 748 goto fail; 749 } 750 } 751 752 cb = &lib->callbacks; 753 754 switch (port_msg->type) { 755 756 case _NXT_PORT_MSG_QUIT: 757 nxt_unit_debug(ctx, "#%"PRIu32": quit", port_msg->stream); 758 759 cb->quit(ctx); 760 rc = NXT_UNIT_OK; 761 break; 762 763 case _NXT_PORT_MSG_NEW_PORT: 764 rc = nxt_unit_process_new_port(ctx, &recv_msg); 765 break; 766 767 case _NXT_PORT_MSG_CHANGE_FILE: 768 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d", 769 port_msg->stream, recv_msg.fd); 770 771 if (dup2(recv_msg.fd, lib->log_fd) == -1) { 772 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)", 773 port_msg->stream, recv_msg.fd, lib->log_fd, 774 strerror(errno), errno); 775 776 goto fail; 777 } 778 779 rc = NXT_UNIT_OK; 780 break; 781 782 case _NXT_PORT_MSG_MMAP: 783 if (nxt_slow_path(recv_msg.fd < 0)) { 784 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", 785 port_msg->stream, recv_msg.fd); 786 787 goto fail; 788 } 789 790 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd); 791 break; 792 793 case _NXT_PORT_MSG_REQ_HEADERS: 794 rc = nxt_unit_process_req_headers(ctx, &recv_msg); 795 break; 796 797 case _NXT_PORT_MSG_WEBSOCKET: 798 rc = nxt_unit_process_websocket(ctx, &recv_msg); 799 break; 800 801 case _NXT_PORT_MSG_REMOVE_PID: 802 if (nxt_slow_path(recv_msg.size != sizeof(pid))) { 803 nxt_unit_warn(ctx, "#%"PRIu32": remove_pid: invalid message size " 804 "(%d != %d)", port_msg->stream, (int) recv_msg.size, 805 (int) sizeof(pid)); 806 807 goto fail; 808 } 809 810 memcpy(&pid, recv_msg.start, sizeof(pid)); 811 812 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d", 813 port_msg->stream, (int) pid); 814 815 cb->remove_pid(ctx, pid); 816 817 rc = NXT_UNIT_OK; 818 break; 819 820 case _NXT_PORT_MSG_SHM_ACK: 821 rc = nxt_unit_process_shm_ack(ctx); 822 break; 823 824 default: 825 nxt_unit_debug(ctx, "#%"PRIu32": ignore message type: %d", 826 port_msg->stream, (int) port_msg->type); 827 828 goto fail; 829 } 830 831fail: 832 833 if (recv_msg.fd != -1) { 834 close(recv_msg.fd); 835 } 836 837 while (recv_msg.incoming_buf != NULL) { 838 nxt_unit_mmap_buf_free(recv_msg.incoming_buf); 839 } 840 841 if (recv_msg.process != NULL) { 842 nxt_unit_process_use(ctx, recv_msg.process, -1); 843 } 844 845 return rc; 846} 847 848 849static int 850nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 851{ 852 int nb; 853 nxt_unit_impl_t *lib; 854 nxt_unit_port_t new_port; 855 nxt_port_msg_new_port_t *new_port_msg; 856 857 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) { 858 nxt_unit_warn(ctx, "#%"PRIu32": new_port: " 859 "invalid message size (%d)", 860 recv_msg->stream, (int) recv_msg->size); 861 862 return NXT_UNIT_ERROR; 863 } 864 865 if (nxt_slow_path(recv_msg->fd < 0)) { 866 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port", 867 recv_msg->stream, recv_msg->fd); 868 869 return NXT_UNIT_ERROR; 870 } 871 872 new_port_msg = recv_msg->start; 873 874 nxt_unit_debug(ctx, "#%"PRIu32": new_port: %d,%d fd %d", 875 recv_msg->stream, (int) new_port_msg->pid, 876 (int) new_port_msg->id, recv_msg->fd); 877 878 nb = 0; 879 880 if (nxt_slow_path(ioctl(recv_msg->fd, FIONBIO, &nb) == -1)) { 881 nxt_unit_alert(ctx, "#%"PRIu32": new_port: ioctl(%d, FIONBIO, 0) " 882 "failed: %s (%d)", 883 recv_msg->stream, recv_msg->fd, strerror(errno), errno); 884 885 return NXT_UNIT_ERROR; 886 } 887 888 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, 889 new_port_msg->id); 890 891 new_port.in_fd = -1; 892 new_port.out_fd = recv_msg->fd; 893 new_port.data = NULL; 894 895 recv_msg->fd = -1; 896 897 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 898 899 return lib->callbacks.add_port(ctx, &new_port); 900} 901 902 903static int 904nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 905{ 906 nxt_unit_impl_t *lib; 907 nxt_unit_request_t *r; 908 nxt_unit_mmap_buf_t *b; 909 nxt_unit_request_info_t *req; 910 nxt_unit_request_info_impl_t *req_impl; 911 912 if (nxt_slow_path(recv_msg->mmap == 0)) { 913 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory", 914 recv_msg->stream); 915 916 return NXT_UNIT_ERROR; 917 } 918 919 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) { 920 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least " 921 "%d expected", recv_msg->stream, (int) recv_msg->size, 922 (int) sizeof(nxt_unit_request_t)); 923 924 return NXT_UNIT_ERROR; 925 } 926 927 req_impl = nxt_unit_request_info_get(ctx); 928 if (nxt_slow_path(req_impl == NULL)) { 929 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed", 930 recv_msg->stream); 931 932 return NXT_UNIT_ERROR; 933 } 934 935 req = &req_impl->req; 936 937 nxt_unit_port_id_init(&req->response_port, recv_msg->pid, 938 recv_msg->reply_port); 939 940 req->request = recv_msg->start; 941 942 b = recv_msg->incoming_buf; 943 944 req->request_buf = &b->buf; 945 req->response = NULL; 946 req->response_buf = NULL; 947 948 r = req->request; 949 950 req->content_length = r->content_length; 951 952 req->content_buf = req->request_buf; 953 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content); 954 955 /* "Move" process reference to req_impl. */ 956 req_impl->process = nxt_unit_msg_get_process(ctx, recv_msg); 957 if (nxt_slow_path(req_impl->process == NULL)) { 958 return NXT_UNIT_ERROR; 959 } 960 961 recv_msg->process = NULL; 962 963 req_impl->stream = recv_msg->stream; 964 965 req_impl->outgoing_buf = NULL; 966 967 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 968 b->req = req; 969 } 970 971 /* "Move" incoming buffer list to req_impl. */ 972 req_impl->incoming_buf = recv_msg->incoming_buf; 973 req_impl->incoming_buf->prev = &req_impl->incoming_buf; 974 recv_msg->incoming_buf = NULL; 975 976 req->content_fd = recv_msg->fd; 977 recv_msg->fd = -1; 978 979 req->response_max_fields = 0; 980 req_impl->state = NXT_UNIT_RS_START; 981 req_impl->websocket = 0; 982 983 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream, 984 (int) r->method_length, 985 (char *) nxt_unit_sptr_get(&r->method), 986 (int) r->target_length, 987 (char *) nxt_unit_sptr_get(&r->target), 988 (int) r->content_length); 989 990 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 991 992 lib->callbacks.request_handler(req); 993 994 return NXT_UNIT_OK; 995} 996 997 998static int 999nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1000{ 1001 size_t hsize; 1002 nxt_unit_impl_t *lib; 1003 nxt_unit_mmap_buf_t *b; 1004 nxt_unit_ctx_impl_t *ctx_impl; 1005 nxt_unit_callbacks_t *cb; 1006 nxt_unit_request_info_t *req; 1007 nxt_unit_request_info_impl_t *req_impl; 1008 nxt_unit_websocket_frame_impl_t *ws_impl; 1009 1010 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1011 1012 req_impl = nxt_unit_request_hash_find(&ctx_impl->requests, recv_msg->stream, 1013 recv_msg->last); 1014 if (req_impl == NULL) { 1015 return NXT_UNIT_OK; 1016 } 1017 1018 req = &req_impl->req; 1019 1020 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1021 cb = &lib->callbacks; 1022 1023 if (cb->websocket_handler && recv_msg->size >= 2) { 1024 ws_impl = nxt_unit_websocket_frame_get(ctx); 1025 if (nxt_slow_path(ws_impl == NULL)) { 1026 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed", 1027 req_impl->stream); 1028 1029 return NXT_UNIT_ERROR; 1030 } 1031 1032 ws_impl->ws.req = req; 1033 1034 ws_impl->buf = NULL; 1035 1036 if (recv_msg->mmap) { 1037 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1038 b->req = req; 1039 } 1040 1041 /* "Move" incoming buffer list to ws_impl. */ 1042 ws_impl->buf = recv_msg->incoming_buf; 1043 ws_impl->buf->prev = &ws_impl->buf; 1044 recv_msg->incoming_buf = NULL; 1045 1046 b = ws_impl->buf; 1047 1048 } else { 1049 b = nxt_unit_mmap_buf_get(ctx); 1050 if (nxt_slow_path(b == NULL)) { 1051 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf", 1052 req_impl->stream); 1053 1054 nxt_unit_websocket_frame_release(&ws_impl->ws); 1055 1056 return NXT_UNIT_ERROR; 1057 } 1058 1059 b->req = req; 1060 b->buf.start = recv_msg->start; 1061 b->buf.free = b->buf.start; 1062 b->buf.end = b->buf.start + recv_msg->size; 1063 1064 nxt_unit_mmap_buf_insert(&ws_impl->buf, b); 1065 } 1066 1067 ws_impl->ws.header = (void *) b->buf.start; 1068 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len( 1069 ws_impl->ws.header); 1070 1071 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header); 1072 1073 if (ws_impl->ws.header->mask) { 1074 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4; 1075 1076 } else { 1077 ws_impl->ws.mask = NULL; 1078 } 1079 1080 b->buf.free += hsize; 1081 1082 ws_impl->ws.content_buf = &b->buf; 1083 ws_impl->ws.content_length = ws_impl->ws.payload_len; 1084 1085 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, " 1086 "payload_len=%"PRIu64, 1087 ws_impl->ws.header->opcode, 1088 ws_impl->ws.payload_len); 1089 1090 cb->websocket_handler(&ws_impl->ws); 1091 } 1092 1093 if (recv_msg->last) { 1094 req_impl->websocket = 0; 1095 1096 if (cb->close_handler) { 1097 nxt_unit_req_debug(req, "close_handler"); 1098 1099 cb->close_handler(req); 1100 1101 } else { 1102 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1103 } 1104 } 1105 1106 return NXT_UNIT_OK; 1107} 1108 1109 1110static int 1111nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx) 1112{ 1113 nxt_unit_impl_t *lib; 1114 nxt_unit_callbacks_t *cb; 1115 1116 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1117 cb = &lib->callbacks; 1118 1119 if (cb->shm_ack_handler != NULL) { 1120 cb->shm_ack_handler(ctx); 1121 } 1122 1123 return NXT_UNIT_OK; 1124} 1125 1126 1127static nxt_unit_request_info_impl_t * 1128nxt_unit_request_info_get(nxt_unit_ctx_t *ctx) 1129{ 1130 nxt_unit_impl_t *lib; 1131 nxt_queue_link_t *lnk; 1132 nxt_unit_ctx_impl_t *ctx_impl; 1133 nxt_unit_request_info_impl_t *req_impl; 1134 1135 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1136 1137 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1138 1139 pthread_mutex_lock(&ctx_impl->mutex); 1140 1141 if (nxt_queue_is_empty(&ctx_impl->free_req)) { 1142 pthread_mutex_unlock(&ctx_impl->mutex); 1143 1144 req_impl = malloc(sizeof(nxt_unit_request_info_impl_t) 1145 + lib->request_data_size); 1146 if (nxt_slow_path(req_impl == NULL)) { 1147 return NULL; 1148 } 1149 1150 req_impl->req.unit = ctx->unit; 1151 req_impl->req.ctx = ctx; 1152 1153 pthread_mutex_lock(&ctx_impl->mutex); 1154 1155 } else { 1156 lnk = nxt_queue_first(&ctx_impl->free_req); 1157 nxt_queue_remove(lnk); 1158 1159 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link); 1160 } 1161 1162 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link); 1163 1164 pthread_mutex_unlock(&ctx_impl->mutex); 1165 1166 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL; 1167 1168 return req_impl; 1169} 1170 1171 1172static void 1173nxt_unit_request_info_release(nxt_unit_request_info_t *req) 1174{ 1175 nxt_unit_ctx_impl_t *ctx_impl; 1176 nxt_unit_request_info_impl_t *req_impl; 1177 1178 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1179 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1180 1181 req->response = NULL; 1182 req->response_buf = NULL; 1183 1184 if (req_impl->websocket) { 1185 nxt_unit_request_hash_find(&ctx_impl->requests, req_impl->stream, 1); 1186 1187 req_impl->websocket = 0; 1188 } 1189 1190 while (req_impl->outgoing_buf != NULL) { 1191 nxt_unit_mmap_buf_free(req_impl->outgoing_buf); 1192 } 1193 1194 while (req_impl->incoming_buf != NULL) { 1195 nxt_unit_mmap_buf_free(req_impl->incoming_buf); 1196 } 1197 1198 if (req->content_fd != -1) { 1199 close(req->content_fd); 1200 1201 req->content_fd = -1; 1202 } 1203 1204 /* 1205 * Process release should go after buffers release to guarantee mmap 1206 * existence. 1207 */ 1208 if (req_impl->process != NULL) { 1209 nxt_unit_process_use(req->ctx, req_impl->process, -1); 1210 1211 req_impl->process = NULL; 1212 } 1213 1214 pthread_mutex_lock(&ctx_impl->mutex); 1215 1216 nxt_queue_remove(&req_impl->link); 1217 1218 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link); 1219 1220 pthread_mutex_unlock(&ctx_impl->mutex); 1221 1222 req_impl->state = NXT_UNIT_RS_RELEASED; 1223} 1224 1225 1226static void 1227nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl) 1228{ 1229 nxt_unit_ctx_impl_t *ctx_impl; 1230 1231 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx); 1232 1233 nxt_queue_remove(&req_impl->link); 1234 1235 if (req_impl != &ctx_impl->req) { 1236 free(req_impl); 1237 } 1238} 1239 1240 1241static nxt_unit_websocket_frame_impl_t * 1242nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx) 1243{ 1244 nxt_queue_link_t *lnk; 1245 nxt_unit_ctx_impl_t *ctx_impl; 1246 nxt_unit_websocket_frame_impl_t *ws_impl; 1247 1248 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1249 1250 pthread_mutex_lock(&ctx_impl->mutex); 1251 1252 if (nxt_queue_is_empty(&ctx_impl->free_ws)) { 1253 pthread_mutex_unlock(&ctx_impl->mutex); 1254 1255 ws_impl = malloc(sizeof(nxt_unit_websocket_frame_impl_t)); 1256 if (nxt_slow_path(ws_impl == NULL)) { 1257 return NULL; 1258 } 1259 1260 } else { 1261 lnk = nxt_queue_first(&ctx_impl->free_ws); 1262 nxt_queue_remove(lnk); 1263 1264 pthread_mutex_unlock(&ctx_impl->mutex); 1265 1266 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link); 1267 } 1268 1269 ws_impl->ctx_impl = ctx_impl; 1270 1271 return ws_impl; 1272} 1273 1274 1275static void 1276nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws) 1277{ 1278 nxt_unit_websocket_frame_impl_t *ws_impl; 1279 1280 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 1281 1282 while (ws_impl->buf != NULL) { 1283 nxt_unit_mmap_buf_free(ws_impl->buf); 1284 } 1285 1286 ws->req = NULL; 1287 1288 pthread_mutex_lock(&ws_impl->ctx_impl->mutex); 1289 1290 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link); 1291 1292 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex); 1293} 1294 1295 1296static void 1297nxt_unit_websocket_frame_free(nxt_unit_websocket_frame_impl_t *ws_impl) 1298{ 1299 nxt_queue_remove(&ws_impl->link); 1300 1301 free(ws_impl); 1302} 1303 1304 1305uint16_t 1306nxt_unit_field_hash(const char *name, size_t name_length) 1307{ 1308 u_char ch; 1309 uint32_t hash; 1310 const char *p, *end; 1311 1312 hash = 159406; /* Magic value copied from nxt_http_parse.c */ 1313 end = name + name_length; 1314 1315 for (p = name; p < end; p++) { 1316 ch = *p; 1317 hash = (hash << 4) + hash + nxt_lowcase(ch); 1318 } 1319 1320 hash = (hash >> 16) ^ hash; 1321 1322 return hash; 1323} 1324 1325 1326void 1327nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req) 1328{ 1329 uint32_t i, j; 1330 nxt_unit_field_t *fields, f; 1331 nxt_unit_request_t *r; 1332 1333 nxt_unit_req_debug(req, "group_dup_fields"); 1334 1335 r = req->request; 1336 fields = r->fields; 1337 1338 for (i = 0; i < r->fields_count; i++) { 1339 1340 switch (fields[i].hash) { 1341 case NXT_UNIT_HASH_CONTENT_LENGTH: 1342 r->content_length_field = i; 1343 break; 1344 1345 case NXT_UNIT_HASH_CONTENT_TYPE: 1346 r->content_type_field = i; 1347 break; 1348 1349 case NXT_UNIT_HASH_COOKIE: 1350 r->cookie_field = i; 1351 break; 1352 }; 1353 1354 for (j = i + 1; j < r->fields_count; j++) { 1355 if (fields[i].hash != fields[j].hash) { 1356 continue; 1357 } 1358 1359 if (j == i + 1) { 1360 continue; 1361 } 1362 1363 f = fields[j]; 1364 f.name.offset += (j - (i + 1)) * sizeof(f); 1365 f.value.offset += (j - (i + 1)) * sizeof(f); 1366 1367 while (j > i + 1) { 1368 fields[j] = fields[j - 1]; 1369 fields[j].name.offset -= sizeof(f); 1370 fields[j].value.offset -= sizeof(f); 1371 j--; 1372 } 1373 1374 fields[j] = f; 1375 1376 i++; 1377 } 1378 } 1379} 1380 1381 1382int 1383nxt_unit_response_init(nxt_unit_request_info_t *req, 1384 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size) 1385{ 1386 uint32_t buf_size; 1387 nxt_unit_buf_t *buf; 1388 nxt_unit_request_info_impl_t *req_impl; 1389 1390 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1391 1392 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1393 nxt_unit_req_warn(req, "init: response already sent"); 1394 1395 return NXT_UNIT_ERROR; 1396 } 1397 1398 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status, 1399 (int) max_fields_count, (int) max_fields_size); 1400 1401 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) { 1402 nxt_unit_req_debug(req, "duplicate response init"); 1403 } 1404 1405 /* 1406 * Each field name and value 0-terminated by libunit, 1407 * this is the reason of '+ 2' below. 1408 */ 1409 buf_size = sizeof(nxt_unit_response_t) 1410 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 1411 + max_fields_size; 1412 1413 if (nxt_slow_path(req->response_buf != NULL)) { 1414 buf = req->response_buf; 1415 1416 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) { 1417 goto init_response; 1418 } 1419 1420 nxt_unit_buf_free(buf); 1421 1422 req->response_buf = NULL; 1423 req->response = NULL; 1424 req->response_max_fields = 0; 1425 1426 req_impl->state = NXT_UNIT_RS_START; 1427 } 1428 1429 buf = nxt_unit_response_buf_alloc(req, buf_size); 1430 if (nxt_slow_path(buf == NULL)) { 1431 return NXT_UNIT_ERROR; 1432 } 1433 1434init_response: 1435 1436 memset(buf->start, 0, sizeof(nxt_unit_response_t)); 1437 1438 req->response_buf = buf; 1439 1440 req->response = (nxt_unit_response_t *) buf->start; 1441 req->response->status = status; 1442 1443 buf->free = buf->start + sizeof(nxt_unit_response_t) 1444 + max_fields_count * sizeof(nxt_unit_field_t); 1445 1446 req->response_max_fields = max_fields_count; 1447 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT; 1448 1449 return NXT_UNIT_OK; 1450} 1451 1452 1453int 1454nxt_unit_response_realloc(nxt_unit_request_info_t *req, 1455 uint32_t max_fields_count, uint32_t max_fields_size) 1456{ 1457 char *p; 1458 uint32_t i, buf_size; 1459 nxt_unit_buf_t *buf; 1460 nxt_unit_field_t *f, *src; 1461 nxt_unit_response_t *resp; 1462 nxt_unit_request_info_impl_t *req_impl; 1463 1464 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1465 1466 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1467 nxt_unit_req_warn(req, "realloc: response not init"); 1468 1469 return NXT_UNIT_ERROR; 1470 } 1471 1472 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1473 nxt_unit_req_warn(req, "realloc: response already sent"); 1474 1475 return NXT_UNIT_ERROR; 1476 } 1477 1478 if (nxt_slow_path(max_fields_count < req->response->fields_count)) { 1479 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small"); 1480 1481 return NXT_UNIT_ERROR; 1482 } 1483 1484 /* 1485 * Each field name and value 0-terminated by libunit, 1486 * this is the reason of '+ 2' below. 1487 */ 1488 buf_size = sizeof(nxt_unit_response_t) 1489 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 1490 + max_fields_size; 1491 1492 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size); 1493 1494 buf = nxt_unit_response_buf_alloc(req, buf_size); 1495 if (nxt_slow_path(buf == NULL)) { 1496 nxt_unit_req_warn(req, "realloc: new buf allocation failed"); 1497 return NXT_UNIT_ERROR; 1498 } 1499 1500 resp = (nxt_unit_response_t *) buf->start; 1501 1502 memset(resp, 0, sizeof(nxt_unit_response_t)); 1503 1504 resp->status = req->response->status; 1505 resp->content_length = req->response->content_length; 1506 1507 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t); 1508 f = resp->fields; 1509 1510 for (i = 0; i < req->response->fields_count; i++) { 1511 src = req->response->fields + i; 1512 1513 if (nxt_slow_path(src->skip != 0)) { 1514 continue; 1515 } 1516 1517 if (nxt_slow_path(src->name_length + src->value_length + 2 1518 > (uint32_t) (buf->end - p))) 1519 { 1520 nxt_unit_req_warn(req, "realloc: not enough space for field" 1521 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required", 1522 i, src, src->name_length, src->value_length); 1523 1524 goto fail; 1525 } 1526 1527 nxt_unit_sptr_set(&f->name, p); 1528 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length); 1529 *p++ = '\0'; 1530 1531 nxt_unit_sptr_set(&f->value, p); 1532 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length); 1533 *p++ = '\0'; 1534 1535 f->hash = src->hash; 1536 f->skip = 0; 1537 f->name_length = src->name_length; 1538 f->value_length = src->value_length; 1539 1540 resp->fields_count++; 1541 f++; 1542 } 1543 1544 if (req->response->piggyback_content_length > 0) { 1545 if (nxt_slow_path(req->response->piggyback_content_length 1546 > (uint32_t) (buf->end - p))) 1547 { 1548 nxt_unit_req_warn(req, "realloc: not enought space for content" 1549 " #%"PRIu32", %"PRIu32" required", 1550 i, req->response->piggyback_content_length); 1551 1552 goto fail; 1553 } 1554 1555 resp->piggyback_content_length = 1556 req->response->piggyback_content_length; 1557 1558 nxt_unit_sptr_set(&resp->piggyback_content, p); 1559 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content), 1560 req->response->piggyback_content_length); 1561 } 1562 1563 buf->free = p; 1564 1565 nxt_unit_buf_free(req->response_buf); 1566 1567 req->response = resp; 1568 req->response_buf = buf; 1569 req->response_max_fields = max_fields_count; 1570 1571 return NXT_UNIT_OK; 1572 1573fail: 1574 1575 nxt_unit_buf_free(buf); 1576 1577 return NXT_UNIT_ERROR; 1578} 1579 1580 1581int 1582nxt_unit_response_is_init(nxt_unit_request_info_t *req) 1583{ 1584 nxt_unit_request_info_impl_t *req_impl; 1585 1586 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1587 1588 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT; 1589} 1590 1591 1592int 1593nxt_unit_response_add_field(nxt_unit_request_info_t *req, 1594 const char *name, uint8_t name_length, 1595 const char *value, uint32_t value_length) 1596{ 1597 nxt_unit_buf_t *buf; 1598 nxt_unit_field_t *f; 1599 nxt_unit_response_t *resp; 1600 nxt_unit_request_info_impl_t *req_impl; 1601 1602 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1603 1604 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) { 1605 nxt_unit_req_warn(req, "add_field: response not initialized or " 1606 "already sent"); 1607 1608 return NXT_UNIT_ERROR; 1609 } 1610 1611 resp = req->response; 1612 1613 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) { 1614 nxt_unit_req_warn(req, "add_field: too many response fields"); 1615 1616 return NXT_UNIT_ERROR; 1617 } 1618 1619 buf = req->response_buf; 1620 1621 if (nxt_slow_path(name_length + value_length + 2 1622 > (uint32_t) (buf->end - buf->free))) 1623 { 1624 nxt_unit_req_warn(req, "add_field: response buffer overflow"); 1625 1626 return NXT_UNIT_ERROR; 1627 } 1628 1629 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s", 1630 resp->fields_count, 1631 (int) name_length, name, 1632 (int) value_length, value); 1633 1634 f = resp->fields + resp->fields_count; 1635 1636 nxt_unit_sptr_set(&f->name, buf->free); 1637 buf->free = nxt_cpymem(buf->free, name, name_length); 1638 *buf->free++ = '\0'; 1639 1640 nxt_unit_sptr_set(&f->value, buf->free); 1641 buf->free = nxt_cpymem(buf->free, value, value_length); 1642 *buf->free++ = '\0'; 1643 1644 f->hash = nxt_unit_field_hash(name, name_length); 1645 f->skip = 0; 1646 f->name_length = name_length; 1647 f->value_length = value_length; 1648 1649 resp->fields_count++; 1650 1651 return NXT_UNIT_OK; 1652} 1653 1654 1655int 1656nxt_unit_response_add_content(nxt_unit_request_info_t *req, 1657 const void* src, uint32_t size) 1658{ 1659 nxt_unit_buf_t *buf; 1660 nxt_unit_response_t *resp; 1661 nxt_unit_request_info_impl_t *req_impl; 1662 1663 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1664 1665 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1666 nxt_unit_req_warn(req, "add_content: response not initialized yet"); 1667 1668 return NXT_UNIT_ERROR; 1669 } 1670 1671 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1672 nxt_unit_req_warn(req, "add_content: response already sent"); 1673 1674 return NXT_UNIT_ERROR; 1675 } 1676 1677 buf = req->response_buf; 1678 1679 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) { 1680 nxt_unit_req_warn(req, "add_content: buffer overflow"); 1681 1682 return NXT_UNIT_ERROR; 1683 } 1684 1685 resp = req->response; 1686 1687 if (resp->piggyback_content_length == 0) { 1688 nxt_unit_sptr_set(&resp->piggyback_content, buf->free); 1689 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT; 1690 } 1691 1692 resp->piggyback_content_length += size; 1693 1694 buf->free = nxt_cpymem(buf->free, src, size); 1695 1696 return NXT_UNIT_OK; 1697} 1698 1699 1700int 1701nxt_unit_response_send(nxt_unit_request_info_t *req) 1702{ 1703 int rc; 1704 nxt_unit_mmap_buf_t *mmap_buf; 1705 nxt_unit_request_info_impl_t *req_impl; 1706 1707 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1708 1709 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1710 nxt_unit_req_warn(req, "send: response is not initialized yet"); 1711 1712 return NXT_UNIT_ERROR; 1713 } 1714 1715 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1716 nxt_unit_req_warn(req, "send: response already sent"); 1717 1718 return NXT_UNIT_ERROR; 1719 } 1720 1721 if (req->request->websocket_handshake && req->response->status == 101) { 1722 nxt_unit_response_upgrade(req); 1723 } 1724 1725 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes", 1726 req->response->fields_count, 1727 (int) (req->response_buf->free 1728 - req->response_buf->start)); 1729 1730 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf); 1731 1732 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); 1733 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 1734 req->response = NULL; 1735 req->response_buf = NULL; 1736 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 1737 1738 nxt_unit_mmap_buf_free(mmap_buf); 1739 } 1740 1741 return rc; 1742} 1743 1744 1745int 1746nxt_unit_response_is_sent(nxt_unit_request_info_t *req) 1747{ 1748 nxt_unit_request_info_impl_t *req_impl; 1749 1750 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1751 1752 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT; 1753} 1754 1755 1756nxt_unit_buf_t * 1757nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) 1758{ 1759 int rc; 1760 nxt_unit_mmap_buf_t *mmap_buf; 1761 nxt_unit_request_info_impl_t *req_impl; 1762 1763 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) { 1764 nxt_unit_req_warn(req, "response_buf_alloc: " 1765 "requested buffer (%"PRIu32") too big", size); 1766 1767 return NULL; 1768 } 1769 1770 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size); 1771 1772 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1773 1774 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 1775 if (nxt_slow_path(mmap_buf == NULL)) { 1776 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf"); 1777 1778 return NULL; 1779 } 1780 1781 mmap_buf->req = req; 1782 1783 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf); 1784 1785 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 1786 &req->response_port, size, size, mmap_buf, 1787 NULL); 1788 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1789 nxt_unit_mmap_buf_release(mmap_buf); 1790 1791 return NULL; 1792 } 1793 1794 return &mmap_buf->buf; 1795} 1796 1797 1798static nxt_unit_process_t * 1799nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1800{ 1801 nxt_unit_impl_t *lib; 1802 1803 if (recv_msg->process != NULL) { 1804 return recv_msg->process; 1805 } 1806 1807 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1808 1809 pthread_mutex_lock(&lib->mutex); 1810 1811 recv_msg->process = nxt_unit_process_find(ctx, recv_msg->pid, 0); 1812 1813 pthread_mutex_unlock(&lib->mutex); 1814 1815 if (recv_msg->process == NULL) { 1816 nxt_unit_warn(ctx, "#%"PRIu32": process %d not found", 1817 recv_msg->stream, (int) recv_msg->pid); 1818 } 1819 1820 return recv_msg->process; 1821} 1822 1823 1824static nxt_unit_mmap_buf_t * 1825nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx) 1826{ 1827 nxt_unit_mmap_buf_t *mmap_buf; 1828 nxt_unit_ctx_impl_t *ctx_impl; 1829 1830 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1831 1832 pthread_mutex_lock(&ctx_impl->mutex); 1833 1834 if (ctx_impl->free_buf == NULL) { 1835 pthread_mutex_unlock(&ctx_impl->mutex); 1836 1837 mmap_buf = malloc(sizeof(nxt_unit_mmap_buf_t)); 1838 if (nxt_slow_path(mmap_buf == NULL)) { 1839 return NULL; 1840 } 1841 1842 } else { 1843 mmap_buf = ctx_impl->free_buf; 1844 1845 nxt_unit_mmap_buf_unlink(mmap_buf); 1846 1847 pthread_mutex_unlock(&ctx_impl->mutex); 1848 } 1849 1850 mmap_buf->ctx_impl = ctx_impl; 1851 1852 mmap_buf->hdr = NULL; 1853 mmap_buf->free_ptr = NULL; 1854 1855 return mmap_buf; 1856} 1857 1858 1859static void 1860nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf) 1861{ 1862 nxt_unit_mmap_buf_unlink(mmap_buf); 1863 1864 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex); 1865 1866 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf); 1867 1868 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex); 1869} 1870 1871 1872typedef struct { 1873 size_t len; 1874 const char *str; 1875} nxt_unit_str_t; 1876 1877 1878#define nxt_unit_str(str) { nxt_length(str), str } 1879 1880 1881int 1882nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req) 1883{ 1884 return req->request->websocket_handshake; 1885} 1886 1887 1888int 1889nxt_unit_response_upgrade(nxt_unit_request_info_t *req) 1890{ 1891 int rc; 1892 nxt_unit_ctx_impl_t *ctx_impl; 1893 nxt_unit_request_info_impl_t *req_impl; 1894 1895 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1896 1897 if (nxt_slow_path(req_impl->websocket != 0)) { 1898 nxt_unit_req_debug(req, "upgrade: already upgraded"); 1899 1900 return NXT_UNIT_OK; 1901 } 1902 1903 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1904 nxt_unit_req_warn(req, "upgrade: response is not initialized yet"); 1905 1906 return NXT_UNIT_ERROR; 1907 } 1908 1909 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1910 nxt_unit_req_warn(req, "upgrade: response already sent"); 1911 1912 return NXT_UNIT_ERROR; 1913 } 1914 1915 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1916 1917 rc = nxt_unit_request_hash_add(&ctx_impl->requests, req_impl); 1918 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1919 nxt_unit_req_warn(req, "upgrade: failed to add request to hash"); 1920 1921 return NXT_UNIT_ERROR; 1922 } 1923 1924 req_impl->websocket = 1; 1925 1926 req->response->status = 101; 1927 1928 return NXT_UNIT_OK; 1929} 1930 1931 1932int 1933nxt_unit_response_is_websocket(nxt_unit_request_info_t *req) 1934{ 1935 nxt_unit_request_info_impl_t *req_impl; 1936 1937 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1938 1939 return req_impl->websocket; 1940} 1941 1942 1943nxt_unit_request_info_t * 1944nxt_unit_get_request_info_from_data(void *data) 1945{ 1946 nxt_unit_request_info_impl_t *req_impl; 1947 1948 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data); 1949 1950 return &req_impl->req; 1951} 1952 1953 1954int 1955nxt_unit_buf_send(nxt_unit_buf_t *buf) 1956{ 1957 int rc; 1958 nxt_unit_mmap_buf_t *mmap_buf; 1959 nxt_unit_request_info_t *req; 1960 nxt_unit_request_info_impl_t *req_impl; 1961 1962 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 1963 1964 req = mmap_buf->req; 1965 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1966 1967 nxt_unit_req_debug(req, "buf_send: %d bytes", 1968 (int) (buf->free - buf->start)); 1969 1970 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1971 nxt_unit_req_warn(req, "buf_send: response not initialized yet"); 1972 1973 return NXT_UNIT_ERROR; 1974 } 1975 1976 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 1977 nxt_unit_req_warn(req, "buf_send: headers not sent yet"); 1978 1979 return NXT_UNIT_ERROR; 1980 } 1981 1982 if (nxt_fast_path(buf->free > buf->start)) { 1983 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); 1984 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1985 return rc; 1986 } 1987 } 1988 1989 nxt_unit_mmap_buf_free(mmap_buf); 1990 1991 return NXT_UNIT_OK; 1992} 1993 1994 1995static void 1996nxt_unit_buf_send_done(nxt_unit_buf_t *buf) 1997{ 1998 int rc; 1999 nxt_unit_mmap_buf_t *mmap_buf; 2000 nxt_unit_request_info_t *req; 2001 nxt_unit_request_info_impl_t *req_impl; 2002 2003 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2004 2005 req = mmap_buf->req; 2006 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2007 2008 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 1); 2009 if (nxt_slow_path(rc == NXT_UNIT_OK)) { 2010 nxt_unit_mmap_buf_free(mmap_buf); 2011 2012 nxt_unit_request_info_release(req); 2013 2014 } else { 2015 nxt_unit_request_done(req, rc); 2016 } 2017} 2018 2019 2020static int 2021nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, 2022 nxt_unit_mmap_buf_t *mmap_buf, int last) 2023{ 2024 struct { 2025 nxt_port_msg_t msg; 2026 nxt_port_mmap_msg_t mmap_msg; 2027 } m; 2028 2029 int rc; 2030 u_char *last_used, *first_free; 2031 ssize_t res; 2032 nxt_chunk_id_t first_free_chunk; 2033 nxt_unit_buf_t *buf; 2034 nxt_unit_impl_t *lib; 2035 nxt_port_mmap_header_t *hdr; 2036 2037 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2038 2039 buf = &mmap_buf->buf; 2040 hdr = mmap_buf->hdr; 2041 2042 m.mmap_msg.size = buf->free - buf->start; 2043 2044 m.msg.stream = stream; 2045 m.msg.pid = lib->pid; 2046 m.msg.reply_port = 0; 2047 m.msg.type = _NXT_PORT_MSG_DATA; 2048 m.msg.last = last != 0; 2049 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0; 2050 m.msg.nf = 0; 2051 m.msg.mf = 0; 2052 m.msg.tracking = 0; 2053 2054 rc = NXT_UNIT_ERROR; 2055 2056 if (m.msg.mmap) { 2057 m.mmap_msg.mmap_id = hdr->id; 2058 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr, 2059 (u_char *) buf->start); 2060 2061 nxt_unit_debug(ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", 2062 stream, 2063 (int) m.mmap_msg.mmap_id, 2064 (int) m.mmap_msg.chunk_id, 2065 (int) m.mmap_msg.size); 2066 2067 res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, &m, sizeof(m), 2068 NULL, 0); 2069 if (nxt_slow_path(res != sizeof(m))) { 2070 goto free_buf; 2071 } 2072 2073 last_used = (u_char *) buf->free - 1; 2074 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1; 2075 2076 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) { 2077 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk); 2078 2079 buf->start = (char *) first_free; 2080 buf->free = buf->start; 2081 2082 if (buf->end < buf->start) { 2083 buf->end = buf->start; 2084 } 2085 2086 } else { 2087 buf->start = NULL; 2088 buf->free = NULL; 2089 buf->end = NULL; 2090 2091 mmap_buf->hdr = NULL; 2092 } 2093 2094 nxt_atomic_fetch_add(&mmap_buf->process->outgoing.allocated_chunks, 2095 (int) m.mmap_msg.chunk_id - (int) first_free_chunk); 2096 2097 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 2098 mmap_buf->process->pid, 2099 (int) mmap_buf->process->outgoing.allocated_chunks); 2100 2101 } else { 2102 if (nxt_slow_path(mmap_buf->plain_ptr == NULL 2103 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg))) 2104 { 2105 nxt_unit_warn(ctx, "#%"PRIu32": failed to send plain memory buffer" 2106 ": no space reserved for message header", stream); 2107 2108 goto free_buf; 2109 } 2110 2111 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg)); 2112 2113 nxt_unit_debug(ctx, "#%"PRIu32": send plain: %d", 2114 stream, 2115 (int) (sizeof(m.msg) + m.mmap_msg.size)); 2116 2117 res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, 2118 buf->start - sizeof(m.msg), 2119 m.mmap_msg.size + sizeof(m.msg), 2120 NULL, 0); 2121 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) { 2122 goto free_buf; 2123 } 2124 } 2125 2126 rc = NXT_UNIT_OK; 2127 2128free_buf: 2129 2130 nxt_unit_free_outgoing_buf(mmap_buf); 2131 2132 return rc; 2133} 2134 2135 2136void 2137nxt_unit_buf_free(nxt_unit_buf_t *buf) 2138{ 2139 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf)); 2140} 2141 2142 2143static void 2144nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf) 2145{ 2146 nxt_unit_free_outgoing_buf(mmap_buf); 2147 2148 nxt_unit_mmap_buf_release(mmap_buf); 2149} 2150 2151 2152static void 2153nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf) 2154{ 2155 if (mmap_buf->hdr != NULL) { 2156 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx, 2157 mmap_buf->process, 2158 mmap_buf->hdr, mmap_buf->buf.start, 2159 mmap_buf->buf.end - mmap_buf->buf.start); 2160 2161 mmap_buf->hdr = NULL; 2162 2163 return; 2164 } 2165 2166 if (mmap_buf->free_ptr != NULL) { 2167 free(mmap_buf->free_ptr); 2168 2169 mmap_buf->free_ptr = NULL; 2170 } 2171} 2172 2173 2174static nxt_unit_read_buf_t * 2175nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) 2176{ 2177 nxt_unit_ctx_impl_t *ctx_impl; 2178 2179 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2180 2181 pthread_mutex_lock(&ctx_impl->mutex); 2182 2183 return nxt_unit_read_buf_get_impl(ctx_impl); 2184} 2185 2186 2187static nxt_unit_read_buf_t * 2188nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl) 2189{ 2190 nxt_unit_read_buf_t *rbuf; 2191 2192 if (ctx_impl->free_read_buf != NULL) { 2193 rbuf = ctx_impl->free_read_buf; 2194 ctx_impl->free_read_buf = rbuf->next; 2195 2196 pthread_mutex_unlock(&ctx_impl->mutex); 2197 2198 return rbuf; 2199 } 2200 2201 pthread_mutex_unlock(&ctx_impl->mutex); 2202 2203 rbuf = malloc(sizeof(nxt_unit_read_buf_t)); 2204 2205 return rbuf; 2206} 2207 2208 2209static void 2210nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 2211 nxt_unit_read_buf_t *rbuf) 2212{ 2213 nxt_unit_ctx_impl_t *ctx_impl; 2214 2215 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2216 2217 pthread_mutex_lock(&ctx_impl->mutex); 2218 2219 rbuf->next = ctx_impl->free_read_buf; 2220 ctx_impl->free_read_buf = rbuf; 2221 2222 pthread_mutex_unlock(&ctx_impl->mutex); 2223} 2224 2225 2226nxt_unit_buf_t * 2227nxt_unit_buf_next(nxt_unit_buf_t *buf) 2228{ 2229 nxt_unit_mmap_buf_t *mmap_buf; 2230 2231 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2232 2233 if (mmap_buf->next == NULL) { 2234 return NULL; 2235 } 2236 2237 return &mmap_buf->next->buf; 2238} 2239 2240 2241uint32_t 2242nxt_unit_buf_max(void) 2243{ 2244 return PORT_MMAP_DATA_SIZE; 2245} 2246 2247 2248uint32_t 2249nxt_unit_buf_min(void) 2250{ 2251 return PORT_MMAP_CHUNK_SIZE; 2252} 2253 2254 2255int 2256nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start, 2257 size_t size) 2258{ 2259 ssize_t res; 2260 2261 res = nxt_unit_response_write_nb(req, start, size, size); 2262 2263 return res < 0 ? -res : NXT_UNIT_OK; 2264} 2265 2266 2267ssize_t 2268nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, 2269 size_t size, size_t min_size) 2270{ 2271 int rc; 2272 ssize_t sent; 2273 uint32_t part_size, min_part_size, buf_size; 2274 const char *part_start; 2275 nxt_unit_mmap_buf_t mmap_buf; 2276 nxt_unit_request_info_impl_t *req_impl; 2277 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2278 2279 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2280 2281 part_start = start; 2282 sent = 0; 2283 2284 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2285 nxt_unit_req_warn(req, "write: response not initialized yet"); 2286 2287 return -NXT_UNIT_ERROR; 2288 } 2289 2290 /* Check if response is not send yet. */ 2291 if (nxt_slow_path(req->response_buf != NULL)) { 2292 part_size = req->response_buf->end - req->response_buf->free; 2293 part_size = nxt_min(size, part_size); 2294 2295 rc = nxt_unit_response_add_content(req, part_start, part_size); 2296 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2297 return -rc; 2298 } 2299 2300 rc = nxt_unit_response_send(req); 2301 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2302 return -rc; 2303 } 2304 2305 size -= part_size; 2306 part_start += part_size; 2307 sent += part_size; 2308 2309 min_size -= nxt_min(min_size, part_size); 2310 } 2311 2312 while (size > 0) { 2313 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE); 2314 min_part_size = nxt_min(min_size, part_size); 2315 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE); 2316 2317 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2318 &req->response_port, part_size, 2319 min_part_size, &mmap_buf, local_buf); 2320 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2321 return -rc; 2322 } 2323 2324 buf_size = mmap_buf.buf.end - mmap_buf.buf.free; 2325 if (nxt_slow_path(buf_size == 0)) { 2326 return sent; 2327 } 2328 part_size = nxt_min(buf_size, part_size); 2329 2330 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free, 2331 part_start, part_size); 2332 2333 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); 2334 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2335 return -rc; 2336 } 2337 2338 size -= part_size; 2339 part_start += part_size; 2340 sent += part_size; 2341 2342 min_size -= nxt_min(min_size, part_size); 2343 } 2344 2345 return sent; 2346} 2347 2348 2349int 2350nxt_unit_response_write_cb(nxt_unit_request_info_t *req, 2351 nxt_unit_read_info_t *read_info) 2352{ 2353 int rc; 2354 ssize_t n; 2355 uint32_t buf_size; 2356 nxt_unit_buf_t *buf; 2357 nxt_unit_mmap_buf_t mmap_buf; 2358 nxt_unit_request_info_impl_t *req_impl; 2359 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2360 2361 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2362 2363 /* Check if response is not send yet. */ 2364 if (nxt_slow_path(req->response_buf)) { 2365 2366 /* Enable content in headers buf. */ 2367 rc = nxt_unit_response_add_content(req, "", 0); 2368 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2369 nxt_unit_req_error(req, "Failed to add piggyback content"); 2370 2371 return rc; 2372 } 2373 2374 buf = req->response_buf; 2375 2376 while (buf->end - buf->free > 0) { 2377 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2378 if (nxt_slow_path(n < 0)) { 2379 nxt_unit_req_error(req, "Read error"); 2380 2381 return NXT_UNIT_ERROR; 2382 } 2383 2384 /* Manually increase sizes. */ 2385 buf->free += n; 2386 req->response->piggyback_content_length += n; 2387 2388 if (read_info->eof) { 2389 break; 2390 } 2391 } 2392 2393 rc = nxt_unit_response_send(req); 2394 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2395 nxt_unit_req_error(req, "Failed to send headers with content"); 2396 2397 return rc; 2398 } 2399 2400 if (read_info->eof) { 2401 return NXT_UNIT_OK; 2402 } 2403 } 2404 2405 while (!read_info->eof) { 2406 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"", 2407 read_info->buf_size); 2408 2409 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE); 2410 2411 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2412 &req->response_port, 2413 buf_size, buf_size, 2414 &mmap_buf, local_buf); 2415 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2416 return rc; 2417 } 2418 2419 buf = &mmap_buf.buf; 2420 2421 while (!read_info->eof && buf->end > buf->free) { 2422 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2423 if (nxt_slow_path(n < 0)) { 2424 nxt_unit_req_error(req, "Read error"); 2425 2426 nxt_unit_free_outgoing_buf(&mmap_buf); 2427 2428 return NXT_UNIT_ERROR; 2429 } 2430 2431 buf->free += n; 2432 } 2433 2434 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); 2435 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2436 nxt_unit_req_error(req, "Failed to send content"); 2437 2438 return rc; 2439 } 2440 } 2441 2442 return NXT_UNIT_OK; 2443} 2444 2445 2446ssize_t 2447nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) 2448{ 2449 ssize_t buf_res, res; 2450 2451 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length, 2452 dst, size); 2453 2454 if (buf_res < (ssize_t) size && req->content_fd != -1) { 2455 res = read(req->content_fd, dst, size); 2456 if (res < 0) { 2457 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 2458 strerror(errno), errno); 2459 2460 return res; 2461 } 2462 2463 if (res < (ssize_t) size) { 2464 close(req->content_fd); 2465 2466 req->content_fd = -1; 2467 } 2468 2469 req->content_length -= res; 2470 size -= res; 2471 2472 dst = nxt_pointer_to(dst, res); 2473 2474 } else { 2475 res = 0; 2476 } 2477 2478 return buf_res + res; 2479} 2480 2481 2482ssize_t 2483nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size) 2484{ 2485 char *p; 2486 size_t l_size, b_size; 2487 nxt_unit_buf_t *b; 2488 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf; 2489 2490 if (req->content_length == 0) { 2491 return 0; 2492 } 2493 2494 l_size = 0; 2495 2496 b = req->content_buf; 2497 2498 while (b != NULL) { 2499 b_size = b->end - b->free; 2500 p = memchr(b->free, '\n', b_size); 2501 2502 if (p != NULL) { 2503 p++; 2504 l_size += p - b->free; 2505 break; 2506 } 2507 2508 l_size += b_size; 2509 2510 if (max_size <= l_size) { 2511 break; 2512 } 2513 2514 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf); 2515 if (mmap_buf->next == NULL 2516 && req->content_fd != -1 2517 && l_size < req->content_length) 2518 { 2519 preread_buf = nxt_unit_request_preread(req, 16384); 2520 if (nxt_slow_path(preread_buf == NULL)) { 2521 return -1; 2522 } 2523 2524 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf); 2525 } 2526 2527 b = nxt_unit_buf_next(b); 2528 } 2529 2530 return nxt_min(max_size, l_size); 2531} 2532 2533 2534static nxt_unit_mmap_buf_t * 2535nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) 2536{ 2537 ssize_t res; 2538 nxt_unit_mmap_buf_t *mmap_buf; 2539 2540 if (req->content_fd == -1) { 2541 nxt_unit_req_alert(req, "preread: content_fd == -1"); 2542 return NULL; 2543 } 2544 2545 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 2546 if (nxt_slow_path(mmap_buf == NULL)) { 2547 nxt_unit_req_alert(req, "preread: failed to allocate buf"); 2548 return NULL; 2549 } 2550 2551 mmap_buf->free_ptr = malloc(size); 2552 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 2553 nxt_unit_req_alert(req, "preread: failed to allocate buf memory"); 2554 nxt_unit_mmap_buf_release(mmap_buf); 2555 return NULL; 2556 } 2557 2558 mmap_buf->plain_ptr = mmap_buf->free_ptr; 2559 2560 mmap_buf->hdr = NULL; 2561 mmap_buf->buf.start = mmap_buf->free_ptr; 2562 mmap_buf->buf.free = mmap_buf->buf.start; 2563 mmap_buf->buf.end = mmap_buf->buf.start + size; 2564 mmap_buf->process = NULL; 2565 2566 res = read(req->content_fd, mmap_buf->free_ptr, size); 2567 if (res < 0) { 2568 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 2569 strerror(errno), errno); 2570 2571 nxt_unit_mmap_buf_free(mmap_buf); 2572 2573 return NULL; 2574 } 2575 2576 if (res < (ssize_t) size) { 2577 close(req->content_fd); 2578 2579 req->content_fd = -1; 2580 } 2581 2582 nxt_unit_req_debug(req, "preread: read %d", (int) res); 2583 2584 mmap_buf->buf.end = mmap_buf->buf.free + res; 2585 2586 return mmap_buf; 2587} 2588 2589 2590static ssize_t 2591nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size) 2592{ 2593 u_char *p; 2594 size_t rest, copy, read; 2595 nxt_unit_buf_t *buf, *last_buf; 2596 2597 p = dst; 2598 rest = size; 2599 2600 buf = *b; 2601 last_buf = buf; 2602 2603 while (buf != NULL) { 2604 last_buf = buf; 2605 2606 copy = buf->end - buf->free; 2607 copy = nxt_min(rest, copy); 2608 2609 p = nxt_cpymem(p, buf->free, copy); 2610 2611 buf->free += copy; 2612 rest -= copy; 2613 2614 if (rest == 0) { 2615 if (buf->end == buf->free) { 2616 buf = nxt_unit_buf_next(buf); 2617 } 2618 2619 break; 2620 } 2621 2622 buf = nxt_unit_buf_next(buf); 2623 } 2624 2625 *b = last_buf; 2626 2627 read = size - rest; 2628 2629 *len -= read; 2630 2631 return read; 2632} 2633 2634 2635void 2636nxt_unit_request_done(nxt_unit_request_info_t *req, int rc) 2637{ 2638 uint32_t size; 2639 nxt_port_msg_t msg; 2640 nxt_unit_impl_t *lib; 2641 nxt_unit_request_info_impl_t *req_impl; 2642 2643 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2644 2645 nxt_unit_req_debug(req, "done: %d", rc); 2646 2647 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2648 goto skip_response_send; 2649 } 2650 2651 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2652 2653 size = nxt_length("Content-Type") + nxt_length("text/plain"); 2654 2655 rc = nxt_unit_response_init(req, 200, 1, size); 2656 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2657 goto skip_response_send; 2658 } 2659 2660 rc = nxt_unit_response_add_field(req, "Content-Type", 2661 nxt_length("Content-Type"), 2662 "text/plain", nxt_length("text/plain")); 2663 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2664 goto skip_response_send; 2665 } 2666 } 2667 2668 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 2669 2670 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 2671 2672 nxt_unit_buf_send_done(req->response_buf); 2673 2674 return; 2675 } 2676 2677skip_response_send: 2678 2679 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit); 2680 2681 msg.stream = req_impl->stream; 2682 msg.pid = lib->pid; 2683 msg.reply_port = 0; 2684 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA 2685 : _NXT_PORT_MSG_RPC_ERROR; 2686 msg.last = 1; 2687 msg.mmap = 0; 2688 msg.nf = 0; 2689 msg.mf = 0; 2690 msg.tracking = 0; 2691 2692 (void) lib->callbacks.port_send(req->ctx, &req->response_port, 2693 &msg, sizeof(msg), NULL, 0); 2694 2695 nxt_unit_request_info_release(req); 2696} 2697 2698 2699int 2700nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode, 2701 uint8_t last, const void *start, size_t size) 2702{ 2703 const struct iovec iov = { (void *) start, size }; 2704 2705 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1); 2706} 2707 2708 2709int 2710nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, 2711 uint8_t last, const struct iovec *iov, int iovcnt) 2712{ 2713 int i, rc; 2714 size_t l, copy; 2715 uint32_t payload_len, buf_size, alloc_size; 2716 const uint8_t *b; 2717 nxt_unit_buf_t *buf; 2718 nxt_unit_mmap_buf_t mmap_buf; 2719 nxt_websocket_header_t *wh; 2720 nxt_unit_request_info_impl_t *req_impl; 2721 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2722 2723 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2724 2725 payload_len = 0; 2726 2727 for (i = 0; i < iovcnt; i++) { 2728 payload_len += iov[i].iov_len; 2729 } 2730 2731 buf_size = 10 + payload_len; 2732 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 2733 2734 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2735 &req->response_port, 2736 alloc_size, alloc_size, 2737 &mmap_buf, local_buf); 2738 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2739 return rc; 2740 } 2741 2742 buf = &mmap_buf.buf; 2743 2744 buf->start[0] = 0; 2745 buf->start[1] = 0; 2746 2747 buf_size -= buf->end - buf->start; 2748 2749 wh = (void *) buf->free; 2750 2751 buf->free = nxt_websocket_frame_init(wh, payload_len); 2752 wh->fin = last; 2753 wh->opcode = opcode; 2754 2755 for (i = 0; i < iovcnt; i++) { 2756 b = iov[i].iov_base; 2757 l = iov[i].iov_len; 2758 2759 while (l > 0) { 2760 copy = buf->end - buf->free; 2761 copy = nxt_min(l, copy); 2762 2763 buf->free = nxt_cpymem(buf->free, b, copy); 2764 b += copy; 2765 l -= copy; 2766 2767 if (l > 0) { 2768 if (nxt_fast_path(buf->free > buf->start)) { 2769 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, 2770 &mmap_buf, 0); 2771 2772 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2773 return rc; 2774 } 2775 } 2776 2777 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 2778 2779 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2780 &req->response_port, 2781 alloc_size, alloc_size, 2782 &mmap_buf, local_buf); 2783 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2784 return rc; 2785 } 2786 2787 buf_size -= buf->end - buf->start; 2788 } 2789 } 2790 } 2791 2792 if (buf->free > buf->start) { 2793 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, 2794 &mmap_buf, 0); 2795 } 2796 2797 return rc; 2798} 2799 2800 2801ssize_t 2802nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst, 2803 size_t size) 2804{ 2805 ssize_t res; 2806 uint8_t *b; 2807 uint64_t i, d; 2808 2809 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length, 2810 dst, size); 2811 2812 if (ws->mask == NULL) { 2813 return res; 2814 } 2815 2816 b = dst; 2817 d = (ws->payload_len - ws->content_length - res) % 4; 2818 2819 for (i = 0; i < (uint64_t) res; i++) { 2820 b[i] ^= ws->mask[ (i + d) % 4 ]; 2821 } 2822 2823 return res; 2824} 2825 2826 2827int 2828nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws) 2829{ 2830 char *b; 2831 size_t size; 2832 nxt_unit_websocket_frame_impl_t *ws_impl; 2833 2834 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 2835 2836 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) { 2837 return NXT_UNIT_OK; 2838 } 2839 2840 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start; 2841 2842 b = malloc(size); 2843 if (nxt_slow_path(b == NULL)) { 2844 return NXT_UNIT_ERROR; 2845 } 2846 2847 memcpy(b, ws_impl->buf->buf.start, size); 2848 2849 ws_impl->buf->buf.start = b; 2850 ws_impl->buf->buf.free = b; 2851 ws_impl->buf->buf.end = b + size; 2852 2853 ws_impl->buf->free_ptr = b; 2854 2855 return NXT_UNIT_OK; 2856} 2857 2858 2859void 2860nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws) 2861{ 2862 nxt_unit_websocket_frame_release(ws); 2863} 2864 2865 2866static nxt_port_mmap_header_t * 2867nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 2868 nxt_unit_port_id_t *port_id, nxt_chunk_id_t *c, int *n, int min_n) 2869{ 2870 int res, nchunks, i; 2871 uint32_t outgoing_size; 2872 nxt_unit_mmap_t *mm, *mm_end; 2873 nxt_unit_impl_t *lib; 2874 nxt_port_mmap_header_t *hdr; 2875 2876 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2877 2878 pthread_mutex_lock(&process->outgoing.mutex); 2879 2880retry: 2881 2882 outgoing_size = process->outgoing.size; 2883 2884 mm_end = process->outgoing.elts + outgoing_size; 2885 2886 for (mm = process->outgoing.elts; mm < mm_end; mm++) { 2887 hdr = mm->hdr; 2888 2889 if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port_id->id) { 2890 continue; 2891 } 2892 2893 *c = 0; 2894 2895 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) { 2896 nchunks = 1; 2897 2898 while (nchunks < *n) { 2899 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map, 2900 *c + nchunks); 2901 2902 if (res == 0) { 2903 if (nchunks >= min_n) { 2904 *n = nchunks; 2905 2906 goto unlock; 2907 } 2908 2909 for (i = 0; i < nchunks; i++) { 2910 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i); 2911 } 2912 2913 *c += nchunks + 1; 2914 nchunks = 0; 2915 break; 2916 } 2917 2918 nchunks++; 2919 } 2920 2921 if (nchunks >= min_n) { 2922 *n = nchunks; 2923 2924 goto unlock; 2925 } 2926 } 2927 2928 hdr->oosm = 1; 2929 } 2930 2931 if (outgoing_size >= lib->shm_mmap_limit) { 2932 /* Cannot allocate more shared memory. */ 2933 pthread_mutex_unlock(&process->outgoing.mutex); 2934 2935 if (min_n == 0) { 2936 *n = 0; 2937 } 2938 2939 if (nxt_slow_path(process->outgoing.allocated_chunks + min_n 2940 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT)) 2941 { 2942 /* Memory allocated by application, but not send to router. */ 2943 return NULL; 2944 } 2945 2946 /* Notify router about OOSM condition. */ 2947 2948 res = nxt_unit_send_oosm(ctx, port_id); 2949 if (nxt_slow_path(res != NXT_UNIT_OK)) { 2950 return NULL; 2951 } 2952 2953 /* Return if caller can handle OOSM condition. Non-blocking mode. */ 2954 2955 if (min_n == 0) { 2956 return NULL; 2957 } 2958 2959 nxt_unit_debug(ctx, "oosm: waiting for ACK"); 2960 2961 res = nxt_unit_wait_shm_ack(ctx); 2962 if (nxt_slow_path(res != NXT_UNIT_OK)) { 2963 return NULL; 2964 } 2965 2966 nxt_unit_debug(ctx, "oosm: retry"); 2967 2968 pthread_mutex_lock(&process->outgoing.mutex); 2969 2970 goto retry; 2971 } 2972 2973 *c = 0; 2974 hdr = nxt_unit_new_mmap(ctx, process, port_id, *n); 2975 2976unlock: 2977 2978 nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, *n); 2979 2980 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 2981 process->pid, 2982 (int) process->outgoing.allocated_chunks); 2983 2984 pthread_mutex_unlock(&process->outgoing.mutex); 2985 2986 return hdr; 2987} 2988 2989 2990static int 2991nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 2992{ 2993 ssize_t res; 2994 nxt_port_msg_t msg; 2995 nxt_unit_impl_t *lib; 2996 2997 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2998 2999 msg.stream = 0; 3000 msg.pid = lib->pid; 3001 msg.reply_port = 0; 3002 msg.type = _NXT_PORT_MSG_OOSM; 3003 msg.last = 0; 3004 msg.mmap = 0; 3005 msg.nf = 0; 3006 msg.mf = 0; 3007 msg.tracking = 0; 3008 3009 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); 3010 if (nxt_slow_path(res != sizeof(msg))) { 3011 return NXT_UNIT_ERROR; 3012 } 3013 3014 return NXT_UNIT_OK; 3015} 3016 3017 3018static int 3019nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) 3020{ 3021 nxt_port_msg_t *port_msg; 3022 nxt_unit_ctx_impl_t *ctx_impl; 3023 nxt_unit_read_buf_t *rbuf; 3024 3025 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3026 3027 while (1) { 3028 rbuf = nxt_unit_read_buf_get(ctx); 3029 if (nxt_slow_path(rbuf == NULL)) { 3030 return NXT_UNIT_ERROR; 3031 } 3032 3033 nxt_unit_read_buf(ctx, rbuf); 3034 3035 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { 3036 nxt_unit_read_buf_release(ctx, rbuf); 3037 3038 return NXT_UNIT_ERROR; 3039 } 3040 3041 port_msg = (nxt_port_msg_t *) rbuf->buf; 3042 3043 if (port_msg->type == _NXT_PORT_MSG_SHM_ACK) { 3044 nxt_unit_read_buf_release(ctx, rbuf); 3045 3046 break; 3047 } 3048 3049 pthread_mutex_lock(&ctx_impl->mutex); 3050 3051 *ctx_impl->pending_read_tail = rbuf; 3052 ctx_impl->pending_read_tail = &rbuf->next; 3053 rbuf->next = NULL; 3054 3055 pthread_mutex_unlock(&ctx_impl->mutex); 3056 3057 if (port_msg->type == _NXT_PORT_MSG_QUIT) { 3058 nxt_unit_debug(ctx, "oosm: quit received"); 3059 3060 return NXT_UNIT_ERROR; 3061 } 3062 } 3063 3064 return NXT_UNIT_OK; 3065} 3066 3067 3068static nxt_unit_mmap_t * 3069nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) 3070{ 3071 uint32_t cap; 3072 3073 cap = mmaps->cap; 3074 3075 if (cap == 0) { 3076 cap = i + 1; 3077 } 3078 3079 while (i + 1 > cap) { 3080 3081 if (cap < 16) { 3082 cap = cap * 2; 3083 3084 } else { 3085 cap = cap + cap / 2; 3086 } 3087 } 3088 3089 if (cap != mmaps->cap) { 3090 3091 mmaps->elts = realloc(mmaps->elts, cap * sizeof(*mmaps->elts)); 3092 if (nxt_slow_path(mmaps->elts == NULL)) { 3093 return NULL; 3094 } 3095 3096 memset(mmaps->elts + mmaps->cap, 0, 3097 sizeof(*mmaps->elts) * (cap - mmaps->cap)); 3098 3099 mmaps->cap = cap; 3100 } 3101 3102 if (i + 1 > mmaps->size) { 3103 mmaps->size = i + 1; 3104 } 3105 3106 return mmaps->elts + i; 3107} 3108 3109 3110static nxt_port_mmap_header_t * 3111nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3112 nxt_unit_port_id_t *port_id, int n) 3113{ 3114 int i, fd, rc; 3115 void *mem; 3116 char name[64]; 3117 nxt_unit_mmap_t *mm; 3118 nxt_unit_impl_t *lib; 3119 nxt_port_mmap_header_t *hdr; 3120 3121 lib = process->lib; 3122 3123 mm = nxt_unit_mmap_at(&process->outgoing, process->outgoing.size); 3124 if (nxt_slow_path(mm == NULL)) { 3125 nxt_unit_warn(ctx, "failed to add mmap to outgoing array"); 3126 3127 return NULL; 3128 } 3129 3130 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", 3131 lib->pid, (void *) pthread_self()); 3132 3133#if (NXT_HAVE_MEMFD_CREATE) 3134 3135 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); 3136 if (nxt_slow_path(fd == -1)) { 3137 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, 3138 strerror(errno), errno); 3139 3140 goto remove_fail; 3141 } 3142 3143 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); 3144 3145#elif (NXT_HAVE_SHM_OPEN_ANON) 3146 3147 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); 3148 if (nxt_slow_path(fd == -1)) { 3149 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", 3150 strerror(errno), errno); 3151 3152 goto remove_fail; 3153 } 3154 3155#elif (NXT_HAVE_SHM_OPEN) 3156 3157 /* Just in case. */ 3158 shm_unlink(name); 3159 3160 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); 3161 if (nxt_slow_path(fd == -1)) { 3162 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, 3163 strerror(errno), errno); 3164 3165 goto remove_fail; 3166 } 3167 3168 if (nxt_slow_path(shm_unlink(name) == -1)) { 3169 nxt_unit_warn(ctx, "shm_unlink(%s) failed: %s (%d)", name, 3170 strerror(errno), errno); 3171 } 3172 3173#else 3174 3175#error No working shared memory implementation. 3176 3177#endif 3178 3179 if (nxt_slow_path(ftruncate(fd, PORT_MMAP_SIZE) == -1)) { 3180 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, 3181 strerror(errno), errno); 3182 3183 goto remove_fail; 3184 } 3185 3186 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 3187 if (nxt_slow_path(mem == MAP_FAILED)) { 3188 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, 3189 strerror(errno), errno); 3190 3191 goto remove_fail; 3192 } 3193 3194 mm->hdr = mem; 3195 hdr = mem; 3196 3197 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); 3198 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); 3199 3200 hdr->id = process->outgoing.size - 1; 3201 hdr->src_pid = lib->pid; 3202 hdr->dst_pid = process->pid; 3203 hdr->sent_over = port_id->id; 3204 3205 /* Mark first n chunk(s) as busy */ 3206 for (i = 0; i < n; i++) { 3207 nxt_port_mmap_set_chunk_busy(hdr->free_map, i); 3208 } 3209 3210 /* Mark as busy chunk followed the last available chunk. */ 3211 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); 3212 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); 3213 3214 pthread_mutex_unlock(&process->outgoing.mutex); 3215 3216 rc = nxt_unit_send_mmap(ctx, port_id, fd); 3217 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3218 munmap(mem, PORT_MMAP_SIZE); 3219 hdr = NULL; 3220 3221 } else { 3222 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d", 3223 hdr->id, (int) lib->pid, (int) process->pid); 3224 } 3225 3226 close(fd); 3227 3228 pthread_mutex_lock(&process->outgoing.mutex); 3229 3230 if (nxt_fast_path(hdr != NULL)) { 3231 return hdr; 3232 } 3233 3234remove_fail: 3235 3236 process->outgoing.size--; 3237 3238 return NULL; 3239} 3240 3241 3242static int 3243nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) 3244{ 3245 ssize_t res; 3246 nxt_port_msg_t msg; 3247 nxt_unit_impl_t *lib; 3248 union { 3249 struct cmsghdr cm; 3250 char space[CMSG_SPACE(sizeof(int))]; 3251 } cmsg; 3252 3253 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3254 3255 msg.stream = 0; 3256 msg.pid = lib->pid; 3257 msg.reply_port = 0; 3258 msg.type = _NXT_PORT_MSG_MMAP; 3259 msg.last = 0; 3260 msg.mmap = 0; 3261 msg.nf = 0; 3262 msg.mf = 0; 3263 msg.tracking = 0; 3264 3265 /* 3266 * Fill all padding fields with 0. 3267 * Code in Go 1.11 validate cmsghdr using padding field as part of len. 3268 * See Cmsghdr definition and socketControlMessageHeaderAndData function. 3269 */ 3270 memset(&cmsg, 0, sizeof(cmsg)); 3271 3272 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); 3273 cmsg.cm.cmsg_level = SOL_SOCKET; 3274 cmsg.cm.cmsg_type = SCM_RIGHTS; 3275 3276 /* 3277 * memcpy() is used instead of simple 3278 * *(int *) CMSG_DATA(&cmsg.cm) = fd; 3279 * because GCC 4.4 with -O2/3/s optimization may issue a warning: 3280 * dereferencing type-punned pointer will break strict-aliasing rules 3281 * 3282 * Fortunately, GCC with -O1 compiles this nxt_memcpy() 3283 * in the same simple assignment as in the code above. 3284 */ 3285 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); 3286 3287 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), 3288 &cmsg, sizeof(cmsg)); 3289 if (nxt_slow_path(res != sizeof(msg))) { 3290 return NXT_UNIT_ERROR; 3291 } 3292 3293 return NXT_UNIT_OK; 3294} 3295 3296 3297static int 3298nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3299 nxt_unit_port_id_t *port_id, uint32_t size, uint32_t min_size, 3300 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf) 3301{ 3302 int nchunks, min_nchunks; 3303 nxt_chunk_id_t c; 3304 nxt_port_mmap_header_t *hdr; 3305 3306 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) { 3307 if (local_buf != NULL) { 3308 mmap_buf->free_ptr = NULL; 3309 mmap_buf->plain_ptr = local_buf; 3310 3311 } else { 3312 mmap_buf->free_ptr = malloc(size + sizeof(nxt_port_msg_t)); 3313 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3314 return NXT_UNIT_ERROR; 3315 } 3316 3317 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3318 } 3319 3320 mmap_buf->hdr = NULL; 3321 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); 3322 mmap_buf->buf.free = mmap_buf->buf.start; 3323 mmap_buf->buf.end = mmap_buf->buf.start + size; 3324 mmap_buf->port_id = *port_id; 3325 mmap_buf->process = process; 3326 3327 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", 3328 mmap_buf->buf.start, (int) size); 3329 3330 return NXT_UNIT_OK; 3331 } 3332 3333 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3334 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3335 3336 hdr = nxt_unit_mmap_get(ctx, process, port_id, &c, &nchunks, min_nchunks); 3337 if (nxt_slow_path(hdr == NULL)) { 3338 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) { 3339 mmap_buf->hdr = NULL; 3340 mmap_buf->buf.start = NULL; 3341 mmap_buf->buf.free = NULL; 3342 mmap_buf->buf.end = NULL; 3343 mmap_buf->free_ptr = NULL; 3344 3345 return NXT_UNIT_OK; 3346 } 3347 3348 return NXT_UNIT_ERROR; 3349 } 3350 3351 mmap_buf->hdr = hdr; 3352 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); 3353 mmap_buf->buf.free = mmap_buf->buf.start; 3354 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; 3355 mmap_buf->port_id = *port_id; 3356 mmap_buf->process = process; 3357 mmap_buf->free_ptr = NULL; 3358 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3359 3360 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)", 3361 (int) hdr->id, (int) c, 3362 (int) (nchunks * PORT_MMAP_CHUNK_SIZE)); 3363 3364 return NXT_UNIT_OK; 3365} 3366 3367 3368static int 3369nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) 3370{ 3371 int rc; 3372 void *mem; 3373 struct stat mmap_stat; 3374 nxt_unit_mmap_t *mm; 3375 nxt_unit_impl_t *lib; 3376 nxt_unit_process_t *process; 3377 nxt_port_mmap_header_t *hdr; 3378 3379 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3380 3381 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid); 3382 3383 pthread_mutex_lock(&lib->mutex); 3384 3385 process = nxt_unit_process_find(ctx, pid, 0); 3386 3387 pthread_mutex_unlock(&lib->mutex); 3388 3389 if (nxt_slow_path(process == NULL)) { 3390 nxt_unit_warn(ctx, "incoming_mmap: process %d not found, fd %d", 3391 (int) pid, fd); 3392 3393 return NXT_UNIT_ERROR; 3394 } 3395 3396 rc = NXT_UNIT_ERROR; 3397 3398 if (fstat(fd, &mmap_stat) == -1) { 3399 nxt_unit_warn(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, 3400 strerror(errno), errno); 3401 3402 goto fail; 3403 } 3404 3405 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE, 3406 MAP_SHARED, fd, 0); 3407 if (nxt_slow_path(mem == MAP_FAILED)) { 3408 nxt_unit_warn(ctx, "incoming_mmap: mmap() failed: %s (%d)", 3409 strerror(errno), errno); 3410 3411 goto fail; 3412 } 3413 3414 hdr = mem; 3415 3416 if (nxt_slow_path(hdr->src_pid != pid || hdr->dst_pid != lib->pid)) { 3417 3418 nxt_unit_warn(ctx, "incoming_mmap: unexpected pid in mmap header " 3419 "detected: %d != %d or %d != %d", (int) hdr->src_pid, 3420 (int) pid, (int) hdr->dst_pid, (int) lib->pid); 3421 3422 munmap(mem, PORT_MMAP_SIZE); 3423 3424 goto fail; 3425 } 3426 3427 pthread_mutex_lock(&process->incoming.mutex); 3428 3429 mm = nxt_unit_mmap_at(&process->incoming, hdr->id); 3430 if (nxt_slow_path(mm == NULL)) { 3431 nxt_unit_warn(ctx, "incoming_mmap: failed to add to incoming array"); 3432 3433 munmap(mem, PORT_MMAP_SIZE); 3434 3435 } else { 3436 mm->hdr = hdr; 3437 3438 hdr->sent_over = 0xFFFFu; 3439 3440 rc = NXT_UNIT_OK; 3441 } 3442 3443 pthread_mutex_unlock(&process->incoming.mutex); 3444 3445fail: 3446 3447 nxt_unit_process_use(ctx, process, -1); 3448 3449 return rc; 3450} 3451 3452 3453static void 3454nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps) 3455{ 3456 pthread_mutex_init(&mmaps->mutex, NULL); 3457 3458 mmaps->size = 0; 3459 mmaps->cap = 0; 3460 mmaps->elts = NULL; 3461 mmaps->allocated_chunks = 0; 3462} 3463 3464 3465static void 3466nxt_unit_process_use(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, int i) 3467{ 3468 long c; 3469 3470 c = nxt_atomic_fetch_add(&process->use_count, i); 3471 3472 if (i < 0 && c == -i) { 3473 nxt_unit_debug(ctx, "destroy process #%d", (int) process->pid); 3474 3475 nxt_unit_mmaps_destroy(&process->incoming); 3476 nxt_unit_mmaps_destroy(&process->outgoing); 3477 3478 free(process); 3479 } 3480} 3481 3482 3483static void 3484nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) 3485{ 3486 nxt_unit_mmap_t *mm, *end; 3487 3488 if (mmaps->elts != NULL) { 3489 end = mmaps->elts + mmaps->size; 3490 3491 for (mm = mmaps->elts; mm < end; mm++) { 3492 munmap(mm->hdr, PORT_MMAP_SIZE); 3493 } 3494 3495 free(mmaps->elts); 3496 } 3497 3498 pthread_mutex_destroy(&mmaps->mutex); 3499} 3500 3501 3502static nxt_port_mmap_header_t * 3503nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3504 uint32_t id) 3505{ 3506 nxt_port_mmap_header_t *hdr; 3507 3508 if (nxt_fast_path(process->incoming.size > id)) { 3509 hdr = process->incoming.elts[id].hdr; 3510 3511 } else { 3512 hdr = NULL; 3513 } 3514 3515 return hdr; 3516} 3517 3518 3519static int 3520nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 3521{ 3522 int rc; 3523 nxt_chunk_id_t c; 3524 nxt_unit_process_t *process; 3525 nxt_port_mmap_header_t *hdr; 3526 nxt_port_mmap_tracking_msg_t *tracking_msg; 3527 3528 if (recv_msg->size < (int) sizeof(nxt_port_mmap_tracking_msg_t)) { 3529 nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: too small message (%d)", 3530 recv_msg->stream, (int) recv_msg->size); 3531 3532 return 0; 3533 } 3534 3535 tracking_msg = recv_msg->start; 3536 3537 recv_msg->start = tracking_msg + 1; 3538 recv_msg->size -= sizeof(nxt_port_mmap_tracking_msg_t); 3539 3540 process = nxt_unit_msg_get_process(ctx, recv_msg); 3541 if (nxt_slow_path(process == NULL)) { 3542 return 0; 3543 } 3544 3545 pthread_mutex_lock(&process->incoming.mutex); 3546 3547 hdr = nxt_unit_get_incoming_mmap(ctx, process, tracking_msg->mmap_id); 3548 if (nxt_slow_path(hdr == NULL)) { 3549 pthread_mutex_unlock(&process->incoming.mutex); 3550 3551 nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: " 3552 "invalid mmap id %d,%"PRIu32, 3553 recv_msg->stream, (int) process->pid, 3554 tracking_msg->mmap_id); 3555 3556 return 0; 3557 } 3558 3559 c = tracking_msg->tracking_id; 3560 rc = nxt_atomic_cmp_set(hdr->tracking + c, recv_msg->stream, 0); 3561 3562 if (rc == 0) { 3563 nxt_unit_debug(ctx, "#%"PRIu32": tracking cancelled", 3564 recv_msg->stream); 3565 3566 nxt_port_mmap_set_chunk_free(hdr->free_tracking_map, c); 3567 } 3568 3569 pthread_mutex_unlock(&process->incoming.mutex); 3570 3571 return rc; 3572} 3573 3574 3575static int 3576nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 3577{ 3578 void *start; 3579 uint32_t size; 3580 nxt_unit_process_t *process; 3581 nxt_unit_mmap_buf_t *b, **incoming_tail; 3582 nxt_port_mmap_msg_t *mmap_msg, *end; 3583 nxt_port_mmap_header_t *hdr; 3584 3585 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) { 3586 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)", 3587 recv_msg->stream, (int) recv_msg->size); 3588 3589 return NXT_UNIT_ERROR; 3590 } 3591 3592 process = nxt_unit_msg_get_process(ctx, recv_msg); 3593 if (nxt_slow_path(process == NULL)) { 3594 return NXT_UNIT_ERROR; 3595 } 3596 3597 mmap_msg = recv_msg->start; 3598 end = nxt_pointer_to(recv_msg->start, recv_msg->size); 3599 3600 incoming_tail = &recv_msg->incoming_buf; 3601 3602 for (; mmap_msg < end; mmap_msg++) { 3603 b = nxt_unit_mmap_buf_get(ctx); 3604 if (nxt_slow_path(b == NULL)) { 3605 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf", 3606 recv_msg->stream); 3607 3608 return NXT_UNIT_ERROR; 3609 } 3610 3611 nxt_unit_mmap_buf_insert(incoming_tail, b); 3612 incoming_tail = &b->next; 3613 } 3614 3615 b = recv_msg->incoming_buf; 3616 mmap_msg = recv_msg->start; 3617 3618 pthread_mutex_lock(&process->incoming.mutex); 3619 3620 for (; mmap_msg < end; mmap_msg++) { 3621 hdr = nxt_unit_get_incoming_mmap(ctx, process, mmap_msg->mmap_id); 3622 if (nxt_slow_path(hdr == NULL)) { 3623 pthread_mutex_unlock(&process->incoming.mutex); 3624 3625 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: " 3626 "invalid mmap id %d,%"PRIu32, 3627 recv_msg->stream, (int) process->pid, 3628 mmap_msg->mmap_id); 3629 3630 return NXT_UNIT_ERROR; 3631 } 3632 3633 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id); 3634 size = mmap_msg->size; 3635 3636 if (recv_msg->start == mmap_msg) { 3637 recv_msg->start = start; 3638 recv_msg->size = size; 3639 } 3640 3641 b->buf.start = start; 3642 b->buf.free = start; 3643 b->buf.end = b->buf.start + size; 3644 b->hdr = hdr; 3645 b->process = process; 3646 3647 b = b->next; 3648 3649 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)", 3650 recv_msg->stream, 3651 start, (int) size, 3652 (int) hdr->src_pid, (int) hdr->dst_pid, 3653 (int) hdr->id, (int) mmap_msg->chunk_id, 3654 (int) mmap_msg->size); 3655 } 3656 3657 pthread_mutex_unlock(&process->incoming.mutex); 3658 3659 return NXT_UNIT_OK; 3660} 3661 3662 3663static void 3664nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, 3665 nxt_unit_process_t *process, nxt_port_mmap_header_t *hdr, 3666 void *start, uint32_t size) 3667{ 3668 int freed_chunks; 3669 u_char *p, *end; 3670 nxt_chunk_id_t c; 3671 nxt_unit_impl_t *lib; 3672 3673 memset(start, 0xA5, size); 3674 3675 p = start; 3676 end = p + size; 3677 c = nxt_port_mmap_chunk_id(hdr, p); 3678 freed_chunks = 0; 3679 3680 while (p < end) { 3681 nxt_port_mmap_set_chunk_free(hdr->free_map, c); 3682 3683 p += PORT_MMAP_CHUNK_SIZE; 3684 c++; 3685 freed_chunks++; 3686 } 3687 3688 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3689 3690 if (hdr->src_pid == lib->pid && freed_chunks != 0) { 3691 nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, 3692 -freed_chunks); 3693 3694 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 3695 process->pid, 3696 (int) process->outgoing.allocated_chunks); 3697 } 3698 3699 if (hdr->dst_pid == lib->pid 3700 && freed_chunks != 0 3701 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0)) 3702 { 3703 nxt_unit_send_shm_ack(ctx, hdr->src_pid); 3704 } 3705} 3706 3707 3708static int 3709nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) 3710{ 3711 ssize_t res; 3712 nxt_port_msg_t msg; 3713 nxt_unit_impl_t *lib; 3714 nxt_unit_port_id_t port_id; 3715 3716 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3717 3718 nxt_unit_port_id_init(&port_id, pid, 0); 3719 3720 msg.stream = 0; 3721 msg.pid = lib->pid; 3722 msg.reply_port = 0; 3723 msg.type = _NXT_PORT_MSG_SHM_ACK; 3724 msg.last = 0; 3725 msg.mmap = 0; 3726 msg.nf = 0; 3727 msg.mf = 0; 3728 msg.tracking = 0; 3729 3730 res = lib->callbacks.port_send(ctx, &port_id, &msg, sizeof(msg), NULL, 0); 3731 if (nxt_slow_path(res != sizeof(msg))) { 3732 return NXT_UNIT_ERROR; 3733 } 3734 3735 return NXT_UNIT_OK; 3736} 3737 3738 3739static nxt_int_t 3740nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data) 3741{ 3742 nxt_process_t *process; 3743 3744 process = data; 3745 3746 if (lhq->key.length == sizeof(pid_t) 3747 && *(pid_t *) lhq->key.start == process->pid) 3748 { 3749 return NXT_OK; 3750 } 3751 3752 return NXT_DECLINED; 3753} 3754 3755 3756static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = { 3757 NXT_LVLHSH_DEFAULT, 3758 nxt_unit_lvlhsh_pid_test, 3759 nxt_lvlhsh_alloc, 3760 nxt_lvlhsh_free, 3761}; 3762 3763 3764static inline void 3765nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid) 3766{ 3767 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid)); 3768 lhq->key.length = sizeof(*pid); 3769 lhq->key.start = (u_char *) pid; 3770 lhq->proto = &lvlhsh_processes_proto; 3771} 3772 3773 3774static nxt_unit_process_t * 3775nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) 3776{ 3777 nxt_unit_impl_t *lib; 3778 nxt_unit_process_t *process; 3779 nxt_lvlhsh_query_t lhq; 3780 3781 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3782 3783 nxt_unit_process_lhq_pid(&lhq, &pid); 3784 3785 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) { 3786 process = lhq.value; 3787 nxt_unit_process_use(ctx, process, 1); 3788 3789 return process; 3790 } 3791 3792 process = malloc(sizeof(nxt_unit_process_t)); 3793 if (nxt_slow_path(process == NULL)) { 3794 nxt_unit_warn(ctx, "failed to allocate process for #%d", (int) pid); 3795 3796 return NULL; 3797 } 3798 3799 process->pid = pid; 3800 process->use_count = 1; 3801 process->next_port_id = 0; 3802 process->lib = lib; 3803 3804 nxt_queue_init(&process->ports); 3805 3806 nxt_unit_mmaps_init(&process->incoming); 3807 nxt_unit_mmaps_init(&process->outgoing); 3808 3809 lhq.replace = 0; 3810 lhq.value = process; 3811 3812 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) { 3813 3814 case NXT_OK: 3815 break; 3816 3817 default: 3818 nxt_unit_warn(ctx, "process %d insert failed", (int) pid); 3819 3820 pthread_mutex_destroy(&process->outgoing.mutex); 3821 pthread_mutex_destroy(&process->incoming.mutex); 3822 free(process); 3823 process = NULL; 3824 break; 3825 } 3826 3827 nxt_unit_process_use(ctx, process, 1); 3828 3829 return process; 3830} 3831 3832 3833static nxt_unit_process_t * 3834nxt_unit_process_find(nxt_unit_ctx_t *ctx, pid_t pid, int remove) 3835{ 3836 int rc; 3837 nxt_unit_impl_t *lib; 3838 nxt_unit_process_t *process; 3839 nxt_lvlhsh_query_t lhq; 3840 3841 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3842 3843 nxt_unit_process_lhq_pid(&lhq, &pid); 3844 3845 if (remove) { 3846 rc = nxt_lvlhsh_delete(&lib->processes, &lhq); 3847 3848 } else { 3849 rc = nxt_lvlhsh_find(&lib->processes, &lhq); 3850 } 3851 3852 if (rc == NXT_OK) { 3853 process = lhq.value; 3854 3855 if (!remove) { 3856 nxt_unit_process_use(ctx, process, 1); 3857 } 3858 3859 return process; 3860 } 3861 3862 return NULL; 3863} 3864 3865 3866static nxt_unit_process_t * 3867nxt_unit_process_pop_first(nxt_unit_impl_t *lib) 3868{ 3869 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL); 3870} 3871 3872 3873int 3874nxt_unit_run(nxt_unit_ctx_t *ctx) 3875{ 3876 int rc; 3877 nxt_unit_impl_t *lib; 3878 3879 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3880 rc = NXT_UNIT_OK; 3881 3882 while (nxt_fast_path(lib->online)) { 3883 rc = nxt_unit_run_once(ctx); 3884 3885 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3886 break; 3887 } 3888 } 3889 3890 return rc; 3891} 3892 3893 3894int 3895nxt_unit_run_once(nxt_unit_ctx_t *ctx) 3896{ 3897 int rc; 3898 nxt_unit_ctx_impl_t *ctx_impl; 3899 nxt_unit_read_buf_t *rbuf; 3900 3901 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3902 3903 pthread_mutex_lock(&ctx_impl->mutex); 3904 3905 if (ctx_impl->pending_read_head != NULL) { 3906 rbuf = ctx_impl->pending_read_head; 3907 ctx_impl->pending_read_head = rbuf->next; 3908 3909 if (ctx_impl->pending_read_tail == &rbuf->next) { 3910 ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; 3911 } 3912 3913 pthread_mutex_unlock(&ctx_impl->mutex); 3914 3915 } else { 3916 rbuf = nxt_unit_read_buf_get_impl(ctx_impl); 3917 if (nxt_slow_path(rbuf == NULL)) { 3918 return NXT_UNIT_ERROR; 3919 } 3920 3921 nxt_unit_read_buf(ctx, rbuf); 3922 } 3923 3924 if (nxt_fast_path(rbuf->size > 0)) { 3925 rc = nxt_unit_process_msg(ctx, &ctx_impl->read_port_id, 3926 rbuf->buf, rbuf->size, 3927 rbuf->oob, sizeof(rbuf->oob)); 3928 3929#if (NXT_DEBUG) 3930 memset(rbuf->buf, 0xAC, rbuf->size); 3931#endif 3932 3933 } else { 3934 rc = NXT_UNIT_ERROR; 3935 } 3936 3937 nxt_unit_read_buf_release(ctx, rbuf); 3938 3939 return rc; 3940} 3941 3942 3943static void 3944nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) 3945{ 3946 nxt_unit_impl_t *lib; 3947 nxt_unit_ctx_impl_t *ctx_impl; 3948 3949 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3950 3951 memset(rbuf->oob, 0, sizeof(struct cmsghdr)); 3952 3953 if (ctx_impl->read_port_fd != -1) { 3954 rbuf->size = nxt_unit_port_recv(ctx, ctx_impl->read_port_fd, 3955 rbuf->buf, sizeof(rbuf->buf), 3956 rbuf->oob, sizeof(rbuf->oob)); 3957 3958 } else { 3959 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3960 3961 rbuf->size = lib->callbacks.port_recv(ctx, &ctx_impl->read_port_id, 3962 rbuf->buf, sizeof(rbuf->buf), 3963 rbuf->oob, sizeof(rbuf->oob)); 3964 } 3965} 3966 3967 3968void 3969nxt_unit_done(nxt_unit_ctx_t *ctx) 3970{ 3971 nxt_unit_impl_t *lib; 3972 nxt_unit_process_t *process; 3973 nxt_unit_ctx_impl_t *ctx_impl; 3974 3975 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3976 3977 nxt_queue_each(ctx_impl, &lib->contexts, nxt_unit_ctx_impl_t, link) { 3978 3979 nxt_unit_ctx_free(&ctx_impl->ctx); 3980 3981 } nxt_queue_loop; 3982 3983 for ( ;; ) { 3984 pthread_mutex_lock(&lib->mutex); 3985 3986 process = nxt_unit_process_pop_first(lib); 3987 if (process == NULL) { 3988 pthread_mutex_unlock(&lib->mutex); 3989 3990 break; 3991 } 3992 3993 nxt_unit_remove_process(ctx, process); 3994 } 3995 3996 pthread_mutex_destroy(&lib->mutex); 3997 3998 free(lib); 3999} 4000 4001 4002nxt_unit_ctx_t * 4003nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) 4004{ 4005 int rc, fd; 4006 nxt_unit_impl_t *lib; 4007 nxt_unit_port_id_t new_port_id; 4008 nxt_unit_ctx_impl_t *new_ctx; 4009 4010 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4011 4012 new_ctx = malloc(sizeof(nxt_unit_ctx_impl_t) + lib->request_data_size); 4013 if (nxt_slow_path(new_ctx == NULL)) { 4014 nxt_unit_warn(ctx, "failed to allocate context"); 4015 4016 return NULL; 4017 } 4018 4019 rc = nxt_unit_create_port(ctx, &new_port_id, &fd); 4020 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4021 free(new_ctx); 4022 4023 return NULL; 4024 } 4025 4026 rc = nxt_unit_send_port(ctx, &lib->ready_port_id, &new_port_id, fd); 4027 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4028 lib->callbacks.remove_port(ctx, &new_port_id); 4029 4030 close(fd); 4031 4032 free(new_ctx); 4033 4034 return NULL; 4035 } 4036 4037 close(fd); 4038 4039 rc = nxt_unit_ctx_init(lib, new_ctx, data); 4040 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4041 lib->callbacks.remove_port(ctx, &new_port_id); 4042 4043 free(new_ctx); 4044 4045 return NULL; 4046 } 4047 4048 new_ctx->read_port_id = new_port_id; 4049 4050 return &new_ctx->ctx; 4051} 4052 4053 4054void 4055nxt_unit_ctx_free(nxt_unit_ctx_t *ctx) 4056{ 4057 nxt_unit_impl_t *lib; 4058 nxt_unit_ctx_impl_t *ctx_impl; 4059 nxt_unit_mmap_buf_t *mmap_buf; 4060 nxt_unit_request_info_impl_t *req_impl; 4061 nxt_unit_websocket_frame_impl_t *ws_impl; 4062 4063 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4064 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4065 4066 nxt_queue_each(req_impl, &ctx_impl->active_req, 4067 nxt_unit_request_info_impl_t, link) 4068 { 4069 nxt_unit_req_warn(&req_impl->req, "active request on ctx free"); 4070 4071 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR); 4072 4073 } nxt_queue_loop; 4074 4075 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]); 4076 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]); 4077 4078 while (ctx_impl->free_buf != NULL) { 4079 mmap_buf = ctx_impl->free_buf; 4080 nxt_unit_mmap_buf_unlink(mmap_buf); 4081 free(mmap_buf); 4082 } 4083 4084 nxt_queue_each(req_impl, &ctx_impl->free_req, 4085 nxt_unit_request_info_impl_t, link) 4086 { 4087 nxt_unit_request_info_free(req_impl); 4088 4089 } nxt_queue_loop; 4090 4091 nxt_queue_each(ws_impl, &ctx_impl->free_ws, 4092 nxt_unit_websocket_frame_impl_t, link) 4093 { 4094 nxt_unit_websocket_frame_free(ws_impl); 4095 4096 } nxt_queue_loop; 4097 4098 pthread_mutex_destroy(&ctx_impl->mutex); 4099 4100 nxt_queue_remove(&ctx_impl->link); 4101 4102 if (ctx_impl != &lib->main_ctx) { 4103 free(ctx_impl); 4104 } 4105} 4106 4107 4108/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */ 4109#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET) 4110#define NXT_UNIX_SOCKET SOCK_SEQPACKET 4111#else 4112#define NXT_UNIX_SOCKET SOCK_DGRAM 4113#endif 4114 4115 4116void 4117nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) 4118{ 4119 nxt_unit_port_hash_id_t port_hash_id; 4120 4121 port_hash_id.pid = pid; 4122 port_hash_id.id = id; 4123 4124 port_id->pid = pid; 4125 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id)); 4126 port_id->id = id; 4127} 4128 4129 4130int 4131nxt_unit_create_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 4132 nxt_unit_port_id_t *port_id) 4133{ 4134 int rc, fd; 4135 nxt_unit_impl_t *lib; 4136 nxt_unit_port_id_t new_port_id; 4137 4138 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4139 4140 rc = nxt_unit_create_port(ctx, &new_port_id, &fd); 4141 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4142 return rc; 4143 } 4144 4145 rc = nxt_unit_send_port(ctx, dst, &new_port_id, fd); 4146 4147 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 4148 *port_id = new_port_id; 4149 4150 } else { 4151 lib->callbacks.remove_port(ctx, &new_port_id); 4152 } 4153 4154 close(fd); 4155 4156 return rc; 4157} 4158 4159 4160static int 4161nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) 4162{ 4163 int rc, port_sockets[2]; 4164 nxt_unit_impl_t *lib; 4165 nxt_unit_port_t new_port; 4166 nxt_unit_process_t *process; 4167 4168 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4169 4170 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets); 4171 if (nxt_slow_path(rc != 0)) { 4172 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)", 4173 strerror(errno), errno); 4174 4175 return NXT_UNIT_ERROR; 4176 } 4177 4178 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d", 4179 port_sockets[0], port_sockets[1]); 4180 4181 pthread_mutex_lock(&lib->mutex); 4182 4183 process = nxt_unit_process_get(ctx, lib->pid); 4184 if (nxt_slow_path(process == NULL)) { 4185 pthread_mutex_unlock(&lib->mutex); 4186 4187 close(port_sockets[0]); 4188 close(port_sockets[1]); 4189 4190 return NXT_UNIT_ERROR; 4191 } 4192 4193 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++); 4194 4195 new_port.in_fd = port_sockets[0]; 4196 new_port.out_fd = -1; 4197 new_port.data = NULL; 4198 4199 pthread_mutex_unlock(&lib->mutex); 4200 4201 nxt_unit_process_use(ctx, process, -1); 4202 4203 rc = lib->callbacks.add_port(ctx, &new_port); 4204 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4205 nxt_unit_warn(ctx, "create_port: add_port() failed"); 4206 4207 close(port_sockets[0]); 4208 close(port_sockets[1]); 4209 4210 return rc; 4211 } 4212 4213 *port_id = new_port.id; 4214 *fd = port_sockets[1]; 4215 4216 return rc; 4217} 4218 4219 4220static int 4221nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 4222 nxt_unit_port_id_t *new_port, int fd) 4223{ 4224 ssize_t res; 4225 nxt_unit_impl_t *lib; 4226 4227 struct { 4228 nxt_port_msg_t msg; 4229 nxt_port_msg_new_port_t new_port; 4230 } m; 4231 4232 union { 4233 struct cmsghdr cm; 4234 char space[CMSG_SPACE(sizeof(int))]; 4235 } cmsg; 4236 4237 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4238 4239 m.msg.stream = 0; 4240 m.msg.pid = lib->pid; 4241 m.msg.reply_port = 0; 4242 m.msg.type = _NXT_PORT_MSG_NEW_PORT; 4243 m.msg.last = 0; 4244 m.msg.mmap = 0; 4245 m.msg.nf = 0; 4246 m.msg.mf = 0; 4247 m.msg.tracking = 0; 4248 4249 m.new_port.id = new_port->id; 4250 m.new_port.pid = new_port->pid; 4251 m.new_port.type = NXT_PROCESS_WORKER; 4252 m.new_port.max_size = 16 * 1024; 4253 m.new_port.max_share = 64 * 1024; 4254 4255 memset(&cmsg, 0, sizeof(cmsg)); 4256 4257 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); 4258 cmsg.cm.cmsg_level = SOL_SOCKET; 4259 cmsg.cm.cmsg_type = SCM_RIGHTS; 4260 4261 /* 4262 * memcpy() is used instead of simple 4263 * *(int *) CMSG_DATA(&cmsg.cm) = fd; 4264 * because GCC 4.4 with -O2/3/s optimization may issue a warning: 4265 * dereferencing type-punned pointer will break strict-aliasing rules 4266 * 4267 * Fortunately, GCC with -O1 compiles this nxt_memcpy() 4268 * in the same simple assignment as in the code above. 4269 */ 4270 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); 4271 4272 res = lib->callbacks.port_send(ctx, dst, &m, sizeof(m), 4273 &cmsg, sizeof(cmsg)); 4274 4275 return res == sizeof(m) ? NXT_UNIT_OK : NXT_UNIT_ERROR; 4276} 4277 4278 4279int 4280nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 4281{ 4282 int rc; 4283 nxt_unit_impl_t *lib; 4284 nxt_unit_process_t *process; 4285 nxt_unit_port_impl_t *new_port; 4286 4287 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4288 4289 nxt_unit_debug(ctx, "add_port: %d,%d in_fd %d out_fd %d", 4290 port->id.pid, port->id.id, 4291 port->in_fd, port->out_fd); 4292 4293 pthread_mutex_lock(&lib->mutex); 4294 4295 process = nxt_unit_process_get(ctx, port->id.pid); 4296 if (nxt_slow_path(process == NULL)) { 4297 rc = NXT_UNIT_ERROR; 4298 goto unlock; 4299 } 4300 4301 if (port->id.id >= process->next_port_id) { 4302 process->next_port_id = port->id.id + 1; 4303 } 4304 4305 new_port = malloc(sizeof(nxt_unit_port_impl_t)); 4306 if (nxt_slow_path(new_port == NULL)) { 4307 rc = NXT_UNIT_ERROR; 4308 goto unlock; 4309 } 4310 4311 new_port->port = *port; 4312 4313 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port); 4314 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
| 1 2/* 3 * Copyright (C) NGINX, Inc. 4 */ 5 6#include <stdlib.h> 7 8#include "nxt_main.h" 9#include "nxt_port_memory_int.h" 10 11#include "nxt_unit.h" 12#include "nxt_unit_request.h" 13#include "nxt_unit_response.h" 14#include "nxt_unit_websocket.h" 15 16#include "nxt_websocket.h" 17 18#if (NXT_HAVE_MEMFD_CREATE) 19#include <linux/memfd.h> 20#endif 21 22#define NXT_UNIT_MAX_PLAIN_SIZE 1024 23#define NXT_UNIT_LOCAL_BUF_SIZE \ 24 (NXT_UNIT_MAX_PLAIN_SIZE + sizeof(nxt_port_msg_t)) 25 26typedef struct nxt_unit_impl_s nxt_unit_impl_t; 27typedef struct nxt_unit_mmap_s nxt_unit_mmap_t; 28typedef struct nxt_unit_mmaps_s nxt_unit_mmaps_t; 29typedef struct nxt_unit_process_s nxt_unit_process_t; 30typedef struct nxt_unit_mmap_buf_s nxt_unit_mmap_buf_t; 31typedef struct nxt_unit_recv_msg_s nxt_unit_recv_msg_t; 32typedef struct nxt_unit_read_buf_s nxt_unit_read_buf_t; 33typedef struct nxt_unit_ctx_impl_s nxt_unit_ctx_impl_t; 34typedef struct nxt_unit_port_impl_s nxt_unit_port_impl_t; 35typedef struct nxt_unit_request_info_impl_s nxt_unit_request_info_impl_t; 36typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t; 37 38static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init); 39static int nxt_unit_ctx_init(nxt_unit_impl_t *lib, 40 nxt_unit_ctx_impl_t *ctx_impl, void *data); 41nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 42 nxt_unit_mmap_buf_t *mmap_buf); 43nxt_inline void nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 44 nxt_unit_mmap_buf_t *mmap_buf); 45nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf); 46static int nxt_unit_read_env(nxt_unit_port_t *ready_port, 47 nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, 48 uint32_t *shm_limit); 49static int nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 50 uint32_t stream); 51static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, 52 nxt_unit_recv_msg_t *recv_msg); 53static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, 54 nxt_unit_recv_msg_t *recv_msg); 55static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, 56 nxt_unit_recv_msg_t *recv_msg); 57static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx); 58static nxt_unit_request_info_impl_t *nxt_unit_request_info_get( 59 nxt_unit_ctx_t *ctx); 60static void nxt_unit_request_info_release(nxt_unit_request_info_t *req); 61static void nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req); 62static nxt_unit_websocket_frame_impl_t *nxt_unit_websocket_frame_get( 63 nxt_unit_ctx_t *ctx); 64static void nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws); 65static void nxt_unit_websocket_frame_free(nxt_unit_websocket_frame_impl_t *ws); 66static nxt_unit_process_t *nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, 67 nxt_unit_recv_msg_t *recv_msg); 68static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx); 69static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf); 70static int nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, 71 nxt_unit_mmap_buf_t *mmap_buf, int last); 72static void nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf); 73static void nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf); 74static nxt_unit_read_buf_t *nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx); 75static nxt_unit_read_buf_t *nxt_unit_read_buf_get_impl( 76 nxt_unit_ctx_impl_t *ctx_impl); 77static void nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 78 nxt_unit_read_buf_t *rbuf); 79static nxt_unit_mmap_buf_t *nxt_unit_request_preread( 80 nxt_unit_request_info_t *req, size_t size); 81static ssize_t nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, 82 size_t size); 83static nxt_port_mmap_header_t *nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, 84 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, 85 nxt_chunk_id_t *c, int *n, int min_n); 86static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id); 87static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx); 88static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i); 89static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, 90 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, int n); 91static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 92 int fd); 93static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, 94 nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, uint32_t size, 95 uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf); 96static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd); 97 98static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps); 99static void nxt_unit_process_use(nxt_unit_ctx_t *ctx, 100 nxt_unit_process_t *process, int i); 101static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps); 102static nxt_port_mmap_header_t *nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, 103 nxt_unit_process_t *process, uint32_t id); 104static int nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, 105 nxt_unit_recv_msg_t *recv_msg); 106static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, 107 nxt_unit_recv_msg_t *recv_msg); 108static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, 109 nxt_unit_process_t *process, 110 nxt_port_mmap_header_t *hdr, void *start, uint32_t size); 111static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid); 112 113static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_ctx_t *ctx, 114 pid_t pid); 115static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_ctx_t *ctx, 116 pid_t pid, int remove); 117static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); 118static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, 119 nxt_unit_read_buf_t *rbuf); 120static int nxt_unit_create_port(nxt_unit_ctx_t *ctx, 121 nxt_unit_port_id_t *port_id, int *fd); 122 123static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 124 nxt_unit_port_id_t *new_port, int fd); 125 126static void nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, 127 nxt_unit_port_id_t *port_id, nxt_unit_port_t *r_port, 128 nxt_unit_process_t **process); 129static void nxt_unit_remove_process(nxt_unit_ctx_t *ctx, 130 nxt_unit_process_t *process); 131 132static ssize_t nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, 133 nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, 134 const void *oob, size_t oob_size); 135static ssize_t nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, 136 nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, 137 void *oob, size_t oob_size); 138 139static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, 140 nxt_unit_port_t *port); 141static nxt_unit_port_impl_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, 142 nxt_unit_port_id_t *port_id, int remove); 143 144static int nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, 145 nxt_unit_request_info_impl_t *req_impl); 146static nxt_unit_request_info_impl_t *nxt_unit_request_hash_find( 147 nxt_lvlhsh_t *request_hash, uint32_t stream, int remove); 148 149static char * nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level); 150 151 152struct nxt_unit_mmap_buf_s { 153 nxt_unit_buf_t buf; 154 155 nxt_unit_mmap_buf_t *next; 156 nxt_unit_mmap_buf_t **prev; 157 158 nxt_port_mmap_header_t *hdr; 159 nxt_unit_port_id_t port_id; 160 nxt_unit_request_info_t *req; 161 nxt_unit_ctx_impl_t *ctx_impl; 162 nxt_unit_process_t *process; 163 char *free_ptr; 164 char *plain_ptr; 165}; 166 167 168struct nxt_unit_recv_msg_s { 169 uint32_t stream; 170 nxt_pid_t pid; 171 nxt_port_id_t reply_port; 172 173 uint8_t last; /* 1 bit */ 174 uint8_t mmap; /* 1 bit */ 175 176 void *start; 177 uint32_t size; 178 179 int fd; 180 nxt_unit_process_t *process; 181 182 nxt_unit_mmap_buf_t *incoming_buf; 183}; 184 185 186typedef enum { 187 NXT_UNIT_RS_START = 0, 188 NXT_UNIT_RS_RESPONSE_INIT, 189 NXT_UNIT_RS_RESPONSE_HAS_CONTENT, 190 NXT_UNIT_RS_RESPONSE_SENT, 191 NXT_UNIT_RS_RELEASED, 192} nxt_unit_req_state_t; 193 194 195struct nxt_unit_request_info_impl_s { 196 nxt_unit_request_info_t req; 197 198 uint32_t stream; 199 200 nxt_unit_process_t *process; 201 202 nxt_unit_mmap_buf_t *outgoing_buf; 203 nxt_unit_mmap_buf_t *incoming_buf; 204 205 nxt_unit_req_state_t state; 206 uint8_t websocket; 207 208 nxt_queue_link_t link; 209 210 char extra_data[]; 211}; 212 213 214struct nxt_unit_websocket_frame_impl_s { 215 nxt_unit_websocket_frame_t ws; 216 217 nxt_unit_mmap_buf_t *buf; 218 219 nxt_queue_link_t link; 220 221 nxt_unit_ctx_impl_t *ctx_impl; 222}; 223 224 225struct nxt_unit_read_buf_s { 226 nxt_unit_read_buf_t *next; 227 ssize_t size; 228 char buf[16384]; 229 char oob[256]; 230}; 231 232 233struct nxt_unit_ctx_impl_s { 234 nxt_unit_ctx_t ctx; 235 236 pthread_mutex_t mutex; 237 238 nxt_unit_port_id_t read_port_id; 239 int read_port_fd; 240 241 nxt_queue_link_t link; 242 243 nxt_unit_mmap_buf_t *free_buf; 244 245 /* of nxt_unit_request_info_impl_t */ 246 nxt_queue_t free_req; 247 248 /* of nxt_unit_websocket_frame_impl_t */ 249 nxt_queue_t free_ws; 250 251 /* of nxt_unit_request_info_impl_t */ 252 nxt_queue_t active_req; 253 254 /* of nxt_unit_request_info_impl_t */ 255 nxt_lvlhsh_t requests; 256 257 nxt_unit_read_buf_t *pending_read_head; 258 nxt_unit_read_buf_t **pending_read_tail; 259 nxt_unit_read_buf_t *free_read_buf; 260 261 nxt_unit_mmap_buf_t ctx_buf[2]; 262 nxt_unit_read_buf_t ctx_read_buf; 263 264 nxt_unit_request_info_impl_t req; 265}; 266 267 268struct nxt_unit_impl_s { 269 nxt_unit_t unit; 270 nxt_unit_callbacks_t callbacks; 271 272 uint32_t request_data_size; 273 uint32_t shm_mmap_limit; 274 275 pthread_mutex_t mutex; 276 277 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ 278 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ 279 280 nxt_unit_port_id_t ready_port_id; 281 282 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ 283 284 pid_t pid; 285 int log_fd; 286 int online; 287 288 nxt_unit_ctx_impl_t main_ctx; 289}; 290 291 292struct nxt_unit_port_impl_s { 293 nxt_unit_port_t port; 294 295 nxt_queue_link_t link; 296 nxt_unit_process_t *process; 297}; 298 299 300struct nxt_unit_mmap_s { 301 nxt_port_mmap_header_t *hdr; 302}; 303 304 305struct nxt_unit_mmaps_s { 306 pthread_mutex_t mutex; 307 uint32_t size; 308 uint32_t cap; 309 nxt_atomic_t allocated_chunks; 310 nxt_unit_mmap_t *elts; 311}; 312 313 314struct nxt_unit_process_s { 315 pid_t pid; 316 317 nxt_queue_t ports; 318 319 nxt_unit_mmaps_t incoming; 320 nxt_unit_mmaps_t outgoing; 321 322 nxt_unit_impl_t *lib; 323 324 nxt_atomic_t use_count; 325 326 uint32_t next_port_id; 327}; 328 329 330/* Explicitly using 32 bit types to avoid possible alignment. */ 331typedef struct { 332 int32_t pid; 333 uint32_t id; 334} nxt_unit_port_hash_id_t; 335 336 337nxt_unit_ctx_t * 338nxt_unit_init(nxt_unit_init_t *init) 339{ 340 int rc; 341 uint32_t ready_stream, shm_limit; 342 nxt_unit_ctx_t *ctx; 343 nxt_unit_impl_t *lib; 344 nxt_unit_port_t ready_port, read_port; 345 346 lib = nxt_unit_create(init); 347 if (nxt_slow_path(lib == NULL)) { 348 return NULL; 349 } 350 351 if (init->ready_port.id.pid != 0 352 && init->ready_stream != 0 353 && init->read_port.id.pid != 0) 354 { 355 ready_port = init->ready_port; 356 ready_stream = init->ready_stream; 357 read_port = init->read_port; 358 lib->log_fd = init->log_fd; 359 360 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid, 361 ready_port.id.id); 362 nxt_unit_port_id_init(&read_port.id, read_port.id.pid, 363 read_port.id.id); 364 365 } else { 366 rc = nxt_unit_read_env(&ready_port, &read_port, &lib->log_fd, 367 &ready_stream, &shm_limit); 368 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 369 goto fail; 370 } 371 372 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1) 373 / PORT_MMAP_DATA_SIZE; 374 } 375 376 if (nxt_slow_path(lib->shm_mmap_limit < 1)) { 377 lib->shm_mmap_limit = 1; 378 } 379 380 lib->pid = read_port.id.pid; 381 ctx = &lib->main_ctx.ctx; 382 383 rc = lib->callbacks.add_port(ctx, &ready_port); 384 if (rc != NXT_UNIT_OK) { 385 nxt_unit_alert(NULL, "failed to add ready_port"); 386 387 goto fail; 388 } 389 390 rc = lib->callbacks.add_port(ctx, &read_port); 391 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 392 nxt_unit_alert(NULL, "failed to add read_port"); 393 394 goto fail; 395 } 396 397 lib->main_ctx.read_port_id = read_port.id; 398 lib->ready_port_id = ready_port.id; 399 400 rc = nxt_unit_ready(ctx, &ready_port.id, ready_stream); 401 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 402 nxt_unit_alert(NULL, "failed to send READY message"); 403 404 goto fail; 405 } 406 407 return ctx; 408 409fail: 410 411 free(lib); 412 413 return NULL; 414} 415 416 417static nxt_unit_impl_t * 418nxt_unit_create(nxt_unit_init_t *init) 419{ 420 int rc; 421 nxt_unit_impl_t *lib; 422 nxt_unit_callbacks_t *cb; 423 424 lib = malloc(sizeof(nxt_unit_impl_t) + init->request_data_size); 425 if (nxt_slow_path(lib == NULL)) { 426 nxt_unit_alert(NULL, "failed to allocate unit struct"); 427 428 return NULL; 429 } 430 431 rc = pthread_mutex_init(&lib->mutex, NULL); 432 if (nxt_slow_path(rc != 0)) { 433 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 434 435 goto fail; 436 } 437 438 lib->unit.data = init->data; 439 lib->callbacks = init->callbacks; 440 441 lib->request_data_size = init->request_data_size; 442 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1) 443 / PORT_MMAP_DATA_SIZE; 444 445 lib->processes.slot = NULL; 446 lib->ports.slot = NULL; 447 448 lib->log_fd = STDERR_FILENO; 449 lib->online = 1; 450 451 nxt_queue_init(&lib->contexts); 452 453 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); 454 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 455 goto fail; 456 } 457 458 cb = &lib->callbacks; 459 460 if (cb->request_handler == NULL) { 461 nxt_unit_alert(NULL, "request_handler is NULL"); 462 463 goto fail; 464 } 465 466 if (cb->add_port == NULL) { 467 cb->add_port = nxt_unit_add_port; 468 } 469 470 if (cb->remove_port == NULL) { 471 cb->remove_port = nxt_unit_remove_port; 472 } 473 474 if (cb->remove_pid == NULL) { 475 cb->remove_pid = nxt_unit_remove_pid; 476 } 477 478 if (cb->quit == NULL) { 479 cb->quit = nxt_unit_quit; 480 } 481 482 if (cb->port_send == NULL) { 483 cb->port_send = nxt_unit_port_send_default; 484 } 485 486 if (cb->port_recv == NULL) { 487 cb->port_recv = nxt_unit_port_recv_default; 488 } 489 490 return lib; 491 492fail: 493 494 free(lib); 495 496 return NULL; 497} 498 499 500static int 501nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, 502 void *data) 503{ 504 int rc; 505 506 ctx_impl->ctx.data = data; 507 ctx_impl->ctx.unit = &lib->unit; 508 509 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); 510 511 rc = pthread_mutex_init(&ctx_impl->mutex, NULL); 512 if (nxt_slow_path(rc != 0)) { 513 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); 514 515 return NXT_UNIT_ERROR; 516 } 517 518 nxt_queue_init(&ctx_impl->free_req); 519 nxt_queue_init(&ctx_impl->free_ws); 520 nxt_queue_init(&ctx_impl->active_req); 521 522 ctx_impl->free_buf = NULL; 523 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); 524 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]); 525 526 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link); 527 528 ctx_impl->pending_read_head = NULL; 529 ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; 530 ctx_impl->free_read_buf = &ctx_impl->ctx_read_buf; 531 ctx_impl->ctx_read_buf.next = NULL; 532 533 ctx_impl->req.req.ctx = &ctx_impl->ctx; 534 ctx_impl->req.req.unit = &lib->unit; 535 536 ctx_impl->read_port_fd = -1; 537 ctx_impl->requests.slot = 0; 538 539 return NXT_UNIT_OK; 540} 541 542 543nxt_inline void 544nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, 545 nxt_unit_mmap_buf_t *mmap_buf) 546{ 547 mmap_buf->next = *head; 548 549 if (mmap_buf->next != NULL) { 550 mmap_buf->next->prev = &mmap_buf->next; 551 } 552 553 *head = mmap_buf; 554 mmap_buf->prev = head; 555} 556 557 558nxt_inline void 559nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, 560 nxt_unit_mmap_buf_t *mmap_buf) 561{ 562 while (*prev != NULL) { 563 prev = &(*prev)->next; 564 } 565 566 nxt_unit_mmap_buf_insert(prev, mmap_buf); 567} 568 569 570nxt_inline void 571nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf) 572{ 573 nxt_unit_mmap_buf_t **prev; 574 575 prev = mmap_buf->prev; 576 577 if (mmap_buf->next != NULL) { 578 mmap_buf->next->prev = prev; 579 } 580 581 if (prev != NULL) { 582 *prev = mmap_buf->next; 583 } 584} 585 586 587static int 588nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, 589 int *log_fd, uint32_t *stream, uint32_t *shm_limit) 590{ 591 int rc; 592 int ready_fd, read_fd; 593 char *unit_init, *version_end; 594 long version_length; 595 int64_t ready_pid, read_pid; 596 uint32_t ready_stream, ready_id, read_id; 597 598 unit_init = getenv(NXT_UNIT_INIT_ENV); 599 if (nxt_slow_path(unit_init == NULL)) { 600 nxt_unit_alert(NULL, "%s is not in the current environment", 601 NXT_UNIT_INIT_ENV); 602 603 return NXT_UNIT_ERROR; 604 } 605 606 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init); 607 608 version_length = nxt_length(NXT_VERSION); 609 610 version_end = strchr(unit_init, ';'); 611 if (version_end == NULL 612 || version_end - unit_init != version_length 613 || memcmp(unit_init, NXT_VERSION, version_length) != 0) 614 { 615 nxt_unit_alert(NULL, "version check error"); 616 617 return NXT_UNIT_ERROR; 618 } 619 620 rc = sscanf(version_end + 1, 621 "%"PRIu32";" 622 "%"PRId64",%"PRIu32",%d;" 623 "%"PRId64",%"PRIu32",%d;" 624 "%d,%"PRIu32, 625 &ready_stream, 626 &ready_pid, &ready_id, &ready_fd, 627 &read_pid, &read_id, &read_fd, 628 log_fd, shm_limit); 629 630 if (nxt_slow_path(rc != 9)) { 631 nxt_unit_alert(NULL, "failed to scan variables: %d", rc); 632 633 return NXT_UNIT_ERROR; 634 } 635 636 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id); 637 638 ready_port->in_fd = -1; 639 ready_port->out_fd = ready_fd; 640 ready_port->data = NULL; 641 642 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id); 643 644 read_port->in_fd = read_fd; 645 read_port->out_fd = -1; 646 read_port->data = NULL; 647 648 *stream = ready_stream; 649 650 return NXT_UNIT_OK; 651} 652 653 654static int 655nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 656 uint32_t stream) 657{ 658 ssize_t res; 659 nxt_port_msg_t msg; 660 nxt_unit_impl_t *lib; 661 662 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 663 664 msg.stream = stream; 665 msg.pid = lib->pid; 666 msg.reply_port = 0; 667 msg.type = _NXT_PORT_MSG_PROCESS_READY; 668 msg.last = 1; 669 msg.mmap = 0; 670 msg.nf = 0; 671 msg.mf = 0; 672 msg.tracking = 0; 673 674 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); 675 if (res != sizeof(msg)) { 676 return NXT_UNIT_ERROR; 677 } 678 679 return NXT_UNIT_OK; 680} 681 682 683int 684nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 685 void *buf, size_t buf_size, void *oob, size_t oob_size) 686{ 687 int rc; 688 pid_t pid; 689 struct cmsghdr *cm; 690 nxt_port_msg_t *port_msg; 691 nxt_unit_impl_t *lib; 692 nxt_unit_recv_msg_t recv_msg; 693 nxt_unit_callbacks_t *cb; 694 695 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 696 697 rc = NXT_UNIT_ERROR; 698 recv_msg.fd = -1; 699 recv_msg.process = NULL; 700 port_msg = buf; 701 cm = oob; 702 703 if (oob_size >= CMSG_SPACE(sizeof(int)) 704 && cm->cmsg_len == CMSG_LEN(sizeof(int)) 705 && cm->cmsg_level == SOL_SOCKET 706 && cm->cmsg_type == SCM_RIGHTS) 707 { 708 memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int)); 709 } 710 711 recv_msg.incoming_buf = NULL; 712 713 if (nxt_slow_path(buf_size < sizeof(nxt_port_msg_t))) { 714 nxt_unit_warn(ctx, "message too small (%d bytes)", (int) buf_size); 715 goto fail; 716 } 717 718 recv_msg.stream = port_msg->stream; 719 recv_msg.pid = port_msg->pid; 720 recv_msg.reply_port = port_msg->reply_port; 721 recv_msg.last = port_msg->last; 722 recv_msg.mmap = port_msg->mmap; 723 724 recv_msg.start = port_msg + 1; 725 recv_msg.size = buf_size - sizeof(nxt_port_msg_t); 726 727 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { 728 nxt_unit_warn(ctx, "#%"PRIu32": unknown message type (%d)", 729 port_msg->stream, (int) port_msg->type); 730 goto fail; 731 } 732 733 if (port_msg->tracking && nxt_unit_tracking_read(ctx, &recv_msg) == 0) { 734 rc = NXT_UNIT_OK; 735 736 goto fail; 737 } 738 739 /* Fragmentation is unsupported. */ 740 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { 741 nxt_unit_warn(ctx, "#%"PRIu32": fragmented message type (%d)", 742 port_msg->stream, (int) port_msg->type); 743 goto fail; 744 } 745 746 if (port_msg->mmap) { 747 if (nxt_unit_mmap_read(ctx, &recv_msg) != NXT_UNIT_OK) { 748 goto fail; 749 } 750 } 751 752 cb = &lib->callbacks; 753 754 switch (port_msg->type) { 755 756 case _NXT_PORT_MSG_QUIT: 757 nxt_unit_debug(ctx, "#%"PRIu32": quit", port_msg->stream); 758 759 cb->quit(ctx); 760 rc = NXT_UNIT_OK; 761 break; 762 763 case _NXT_PORT_MSG_NEW_PORT: 764 rc = nxt_unit_process_new_port(ctx, &recv_msg); 765 break; 766 767 case _NXT_PORT_MSG_CHANGE_FILE: 768 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d", 769 port_msg->stream, recv_msg.fd); 770 771 if (dup2(recv_msg.fd, lib->log_fd) == -1) { 772 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)", 773 port_msg->stream, recv_msg.fd, lib->log_fd, 774 strerror(errno), errno); 775 776 goto fail; 777 } 778 779 rc = NXT_UNIT_OK; 780 break; 781 782 case _NXT_PORT_MSG_MMAP: 783 if (nxt_slow_path(recv_msg.fd < 0)) { 784 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", 785 port_msg->stream, recv_msg.fd); 786 787 goto fail; 788 } 789 790 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd); 791 break; 792 793 case _NXT_PORT_MSG_REQ_HEADERS: 794 rc = nxt_unit_process_req_headers(ctx, &recv_msg); 795 break; 796 797 case _NXT_PORT_MSG_WEBSOCKET: 798 rc = nxt_unit_process_websocket(ctx, &recv_msg); 799 break; 800 801 case _NXT_PORT_MSG_REMOVE_PID: 802 if (nxt_slow_path(recv_msg.size != sizeof(pid))) { 803 nxt_unit_warn(ctx, "#%"PRIu32": remove_pid: invalid message size " 804 "(%d != %d)", port_msg->stream, (int) recv_msg.size, 805 (int) sizeof(pid)); 806 807 goto fail; 808 } 809 810 memcpy(&pid, recv_msg.start, sizeof(pid)); 811 812 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d", 813 port_msg->stream, (int) pid); 814 815 cb->remove_pid(ctx, pid); 816 817 rc = NXT_UNIT_OK; 818 break; 819 820 case _NXT_PORT_MSG_SHM_ACK: 821 rc = nxt_unit_process_shm_ack(ctx); 822 break; 823 824 default: 825 nxt_unit_debug(ctx, "#%"PRIu32": ignore message type: %d", 826 port_msg->stream, (int) port_msg->type); 827 828 goto fail; 829 } 830 831fail: 832 833 if (recv_msg.fd != -1) { 834 close(recv_msg.fd); 835 } 836 837 while (recv_msg.incoming_buf != NULL) { 838 nxt_unit_mmap_buf_free(recv_msg.incoming_buf); 839 } 840 841 if (recv_msg.process != NULL) { 842 nxt_unit_process_use(ctx, recv_msg.process, -1); 843 } 844 845 return rc; 846} 847 848 849static int 850nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 851{ 852 int nb; 853 nxt_unit_impl_t *lib; 854 nxt_unit_port_t new_port; 855 nxt_port_msg_new_port_t *new_port_msg; 856 857 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) { 858 nxt_unit_warn(ctx, "#%"PRIu32": new_port: " 859 "invalid message size (%d)", 860 recv_msg->stream, (int) recv_msg->size); 861 862 return NXT_UNIT_ERROR; 863 } 864 865 if (nxt_slow_path(recv_msg->fd < 0)) { 866 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port", 867 recv_msg->stream, recv_msg->fd); 868 869 return NXT_UNIT_ERROR; 870 } 871 872 new_port_msg = recv_msg->start; 873 874 nxt_unit_debug(ctx, "#%"PRIu32": new_port: %d,%d fd %d", 875 recv_msg->stream, (int) new_port_msg->pid, 876 (int) new_port_msg->id, recv_msg->fd); 877 878 nb = 0; 879 880 if (nxt_slow_path(ioctl(recv_msg->fd, FIONBIO, &nb) == -1)) { 881 nxt_unit_alert(ctx, "#%"PRIu32": new_port: ioctl(%d, FIONBIO, 0) " 882 "failed: %s (%d)", 883 recv_msg->stream, recv_msg->fd, strerror(errno), errno); 884 885 return NXT_UNIT_ERROR; 886 } 887 888 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, 889 new_port_msg->id); 890 891 new_port.in_fd = -1; 892 new_port.out_fd = recv_msg->fd; 893 new_port.data = NULL; 894 895 recv_msg->fd = -1; 896 897 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 898 899 return lib->callbacks.add_port(ctx, &new_port); 900} 901 902 903static int 904nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 905{ 906 nxt_unit_impl_t *lib; 907 nxt_unit_request_t *r; 908 nxt_unit_mmap_buf_t *b; 909 nxt_unit_request_info_t *req; 910 nxt_unit_request_info_impl_t *req_impl; 911 912 if (nxt_slow_path(recv_msg->mmap == 0)) { 913 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory", 914 recv_msg->stream); 915 916 return NXT_UNIT_ERROR; 917 } 918 919 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) { 920 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least " 921 "%d expected", recv_msg->stream, (int) recv_msg->size, 922 (int) sizeof(nxt_unit_request_t)); 923 924 return NXT_UNIT_ERROR; 925 } 926 927 req_impl = nxt_unit_request_info_get(ctx); 928 if (nxt_slow_path(req_impl == NULL)) { 929 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed", 930 recv_msg->stream); 931 932 return NXT_UNIT_ERROR; 933 } 934 935 req = &req_impl->req; 936 937 nxt_unit_port_id_init(&req->response_port, recv_msg->pid, 938 recv_msg->reply_port); 939 940 req->request = recv_msg->start; 941 942 b = recv_msg->incoming_buf; 943 944 req->request_buf = &b->buf; 945 req->response = NULL; 946 req->response_buf = NULL; 947 948 r = req->request; 949 950 req->content_length = r->content_length; 951 952 req->content_buf = req->request_buf; 953 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content); 954 955 /* "Move" process reference to req_impl. */ 956 req_impl->process = nxt_unit_msg_get_process(ctx, recv_msg); 957 if (nxt_slow_path(req_impl->process == NULL)) { 958 return NXT_UNIT_ERROR; 959 } 960 961 recv_msg->process = NULL; 962 963 req_impl->stream = recv_msg->stream; 964 965 req_impl->outgoing_buf = NULL; 966 967 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 968 b->req = req; 969 } 970 971 /* "Move" incoming buffer list to req_impl. */ 972 req_impl->incoming_buf = recv_msg->incoming_buf; 973 req_impl->incoming_buf->prev = &req_impl->incoming_buf; 974 recv_msg->incoming_buf = NULL; 975 976 req->content_fd = recv_msg->fd; 977 recv_msg->fd = -1; 978 979 req->response_max_fields = 0; 980 req_impl->state = NXT_UNIT_RS_START; 981 req_impl->websocket = 0; 982 983 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream, 984 (int) r->method_length, 985 (char *) nxt_unit_sptr_get(&r->method), 986 (int) r->target_length, 987 (char *) nxt_unit_sptr_get(&r->target), 988 (int) r->content_length); 989 990 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 991 992 lib->callbacks.request_handler(req); 993 994 return NXT_UNIT_OK; 995} 996 997 998static int 999nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1000{ 1001 size_t hsize; 1002 nxt_unit_impl_t *lib; 1003 nxt_unit_mmap_buf_t *b; 1004 nxt_unit_ctx_impl_t *ctx_impl; 1005 nxt_unit_callbacks_t *cb; 1006 nxt_unit_request_info_t *req; 1007 nxt_unit_request_info_impl_t *req_impl; 1008 nxt_unit_websocket_frame_impl_t *ws_impl; 1009 1010 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1011 1012 req_impl = nxt_unit_request_hash_find(&ctx_impl->requests, recv_msg->stream, 1013 recv_msg->last); 1014 if (req_impl == NULL) { 1015 return NXT_UNIT_OK; 1016 } 1017 1018 req = &req_impl->req; 1019 1020 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1021 cb = &lib->callbacks; 1022 1023 if (cb->websocket_handler && recv_msg->size >= 2) { 1024 ws_impl = nxt_unit_websocket_frame_get(ctx); 1025 if (nxt_slow_path(ws_impl == NULL)) { 1026 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed", 1027 req_impl->stream); 1028 1029 return NXT_UNIT_ERROR; 1030 } 1031 1032 ws_impl->ws.req = req; 1033 1034 ws_impl->buf = NULL; 1035 1036 if (recv_msg->mmap) { 1037 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { 1038 b->req = req; 1039 } 1040 1041 /* "Move" incoming buffer list to ws_impl. */ 1042 ws_impl->buf = recv_msg->incoming_buf; 1043 ws_impl->buf->prev = &ws_impl->buf; 1044 recv_msg->incoming_buf = NULL; 1045 1046 b = ws_impl->buf; 1047 1048 } else { 1049 b = nxt_unit_mmap_buf_get(ctx); 1050 if (nxt_slow_path(b == NULL)) { 1051 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf", 1052 req_impl->stream); 1053 1054 nxt_unit_websocket_frame_release(&ws_impl->ws); 1055 1056 return NXT_UNIT_ERROR; 1057 } 1058 1059 b->req = req; 1060 b->buf.start = recv_msg->start; 1061 b->buf.free = b->buf.start; 1062 b->buf.end = b->buf.start + recv_msg->size; 1063 1064 nxt_unit_mmap_buf_insert(&ws_impl->buf, b); 1065 } 1066 1067 ws_impl->ws.header = (void *) b->buf.start; 1068 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len( 1069 ws_impl->ws.header); 1070 1071 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header); 1072 1073 if (ws_impl->ws.header->mask) { 1074 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4; 1075 1076 } else { 1077 ws_impl->ws.mask = NULL; 1078 } 1079 1080 b->buf.free += hsize; 1081 1082 ws_impl->ws.content_buf = &b->buf; 1083 ws_impl->ws.content_length = ws_impl->ws.payload_len; 1084 1085 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, " 1086 "payload_len=%"PRIu64, 1087 ws_impl->ws.header->opcode, 1088 ws_impl->ws.payload_len); 1089 1090 cb->websocket_handler(&ws_impl->ws); 1091 } 1092 1093 if (recv_msg->last) { 1094 req_impl->websocket = 0; 1095 1096 if (cb->close_handler) { 1097 nxt_unit_req_debug(req, "close_handler"); 1098 1099 cb->close_handler(req); 1100 1101 } else { 1102 nxt_unit_request_done(req, NXT_UNIT_ERROR); 1103 } 1104 } 1105 1106 return NXT_UNIT_OK; 1107} 1108 1109 1110static int 1111nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx) 1112{ 1113 nxt_unit_impl_t *lib; 1114 nxt_unit_callbacks_t *cb; 1115 1116 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1117 cb = &lib->callbacks; 1118 1119 if (cb->shm_ack_handler != NULL) { 1120 cb->shm_ack_handler(ctx); 1121 } 1122 1123 return NXT_UNIT_OK; 1124} 1125 1126 1127static nxt_unit_request_info_impl_t * 1128nxt_unit_request_info_get(nxt_unit_ctx_t *ctx) 1129{ 1130 nxt_unit_impl_t *lib; 1131 nxt_queue_link_t *lnk; 1132 nxt_unit_ctx_impl_t *ctx_impl; 1133 nxt_unit_request_info_impl_t *req_impl; 1134 1135 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1136 1137 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1138 1139 pthread_mutex_lock(&ctx_impl->mutex); 1140 1141 if (nxt_queue_is_empty(&ctx_impl->free_req)) { 1142 pthread_mutex_unlock(&ctx_impl->mutex); 1143 1144 req_impl = malloc(sizeof(nxt_unit_request_info_impl_t) 1145 + lib->request_data_size); 1146 if (nxt_slow_path(req_impl == NULL)) { 1147 return NULL; 1148 } 1149 1150 req_impl->req.unit = ctx->unit; 1151 req_impl->req.ctx = ctx; 1152 1153 pthread_mutex_lock(&ctx_impl->mutex); 1154 1155 } else { 1156 lnk = nxt_queue_first(&ctx_impl->free_req); 1157 nxt_queue_remove(lnk); 1158 1159 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link); 1160 } 1161 1162 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link); 1163 1164 pthread_mutex_unlock(&ctx_impl->mutex); 1165 1166 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL; 1167 1168 return req_impl; 1169} 1170 1171 1172static void 1173nxt_unit_request_info_release(nxt_unit_request_info_t *req) 1174{ 1175 nxt_unit_ctx_impl_t *ctx_impl; 1176 nxt_unit_request_info_impl_t *req_impl; 1177 1178 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1179 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1180 1181 req->response = NULL; 1182 req->response_buf = NULL; 1183 1184 if (req_impl->websocket) { 1185 nxt_unit_request_hash_find(&ctx_impl->requests, req_impl->stream, 1); 1186 1187 req_impl->websocket = 0; 1188 } 1189 1190 while (req_impl->outgoing_buf != NULL) { 1191 nxt_unit_mmap_buf_free(req_impl->outgoing_buf); 1192 } 1193 1194 while (req_impl->incoming_buf != NULL) { 1195 nxt_unit_mmap_buf_free(req_impl->incoming_buf); 1196 } 1197 1198 if (req->content_fd != -1) { 1199 close(req->content_fd); 1200 1201 req->content_fd = -1; 1202 } 1203 1204 /* 1205 * Process release should go after buffers release to guarantee mmap 1206 * existence. 1207 */ 1208 if (req_impl->process != NULL) { 1209 nxt_unit_process_use(req->ctx, req_impl->process, -1); 1210 1211 req_impl->process = NULL; 1212 } 1213 1214 pthread_mutex_lock(&ctx_impl->mutex); 1215 1216 nxt_queue_remove(&req_impl->link); 1217 1218 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link); 1219 1220 pthread_mutex_unlock(&ctx_impl->mutex); 1221 1222 req_impl->state = NXT_UNIT_RS_RELEASED; 1223} 1224 1225 1226static void 1227nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl) 1228{ 1229 nxt_unit_ctx_impl_t *ctx_impl; 1230 1231 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx); 1232 1233 nxt_queue_remove(&req_impl->link); 1234 1235 if (req_impl != &ctx_impl->req) { 1236 free(req_impl); 1237 } 1238} 1239 1240 1241static nxt_unit_websocket_frame_impl_t * 1242nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx) 1243{ 1244 nxt_queue_link_t *lnk; 1245 nxt_unit_ctx_impl_t *ctx_impl; 1246 nxt_unit_websocket_frame_impl_t *ws_impl; 1247 1248 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1249 1250 pthread_mutex_lock(&ctx_impl->mutex); 1251 1252 if (nxt_queue_is_empty(&ctx_impl->free_ws)) { 1253 pthread_mutex_unlock(&ctx_impl->mutex); 1254 1255 ws_impl = malloc(sizeof(nxt_unit_websocket_frame_impl_t)); 1256 if (nxt_slow_path(ws_impl == NULL)) { 1257 return NULL; 1258 } 1259 1260 } else { 1261 lnk = nxt_queue_first(&ctx_impl->free_ws); 1262 nxt_queue_remove(lnk); 1263 1264 pthread_mutex_unlock(&ctx_impl->mutex); 1265 1266 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link); 1267 } 1268 1269 ws_impl->ctx_impl = ctx_impl; 1270 1271 return ws_impl; 1272} 1273 1274 1275static void 1276nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws) 1277{ 1278 nxt_unit_websocket_frame_impl_t *ws_impl; 1279 1280 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 1281 1282 while (ws_impl->buf != NULL) { 1283 nxt_unit_mmap_buf_free(ws_impl->buf); 1284 } 1285 1286 ws->req = NULL; 1287 1288 pthread_mutex_lock(&ws_impl->ctx_impl->mutex); 1289 1290 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link); 1291 1292 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex); 1293} 1294 1295 1296static void 1297nxt_unit_websocket_frame_free(nxt_unit_websocket_frame_impl_t *ws_impl) 1298{ 1299 nxt_queue_remove(&ws_impl->link); 1300 1301 free(ws_impl); 1302} 1303 1304 1305uint16_t 1306nxt_unit_field_hash(const char *name, size_t name_length) 1307{ 1308 u_char ch; 1309 uint32_t hash; 1310 const char *p, *end; 1311 1312 hash = 159406; /* Magic value copied from nxt_http_parse.c */ 1313 end = name + name_length; 1314 1315 for (p = name; p < end; p++) { 1316 ch = *p; 1317 hash = (hash << 4) + hash + nxt_lowcase(ch); 1318 } 1319 1320 hash = (hash >> 16) ^ hash; 1321 1322 return hash; 1323} 1324 1325 1326void 1327nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req) 1328{ 1329 uint32_t i, j; 1330 nxt_unit_field_t *fields, f; 1331 nxt_unit_request_t *r; 1332 1333 nxt_unit_req_debug(req, "group_dup_fields"); 1334 1335 r = req->request; 1336 fields = r->fields; 1337 1338 for (i = 0; i < r->fields_count; i++) { 1339 1340 switch (fields[i].hash) { 1341 case NXT_UNIT_HASH_CONTENT_LENGTH: 1342 r->content_length_field = i; 1343 break; 1344 1345 case NXT_UNIT_HASH_CONTENT_TYPE: 1346 r->content_type_field = i; 1347 break; 1348 1349 case NXT_UNIT_HASH_COOKIE: 1350 r->cookie_field = i; 1351 break; 1352 }; 1353 1354 for (j = i + 1; j < r->fields_count; j++) { 1355 if (fields[i].hash != fields[j].hash) { 1356 continue; 1357 } 1358 1359 if (j == i + 1) { 1360 continue; 1361 } 1362 1363 f = fields[j]; 1364 f.name.offset += (j - (i + 1)) * sizeof(f); 1365 f.value.offset += (j - (i + 1)) * sizeof(f); 1366 1367 while (j > i + 1) { 1368 fields[j] = fields[j - 1]; 1369 fields[j].name.offset -= sizeof(f); 1370 fields[j].value.offset -= sizeof(f); 1371 j--; 1372 } 1373 1374 fields[j] = f; 1375 1376 i++; 1377 } 1378 } 1379} 1380 1381 1382int 1383nxt_unit_response_init(nxt_unit_request_info_t *req, 1384 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size) 1385{ 1386 uint32_t buf_size; 1387 nxt_unit_buf_t *buf; 1388 nxt_unit_request_info_impl_t *req_impl; 1389 1390 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1391 1392 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1393 nxt_unit_req_warn(req, "init: response already sent"); 1394 1395 return NXT_UNIT_ERROR; 1396 } 1397 1398 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status, 1399 (int) max_fields_count, (int) max_fields_size); 1400 1401 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) { 1402 nxt_unit_req_debug(req, "duplicate response init"); 1403 } 1404 1405 /* 1406 * Each field name and value 0-terminated by libunit, 1407 * this is the reason of '+ 2' below. 1408 */ 1409 buf_size = sizeof(nxt_unit_response_t) 1410 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 1411 + max_fields_size; 1412 1413 if (nxt_slow_path(req->response_buf != NULL)) { 1414 buf = req->response_buf; 1415 1416 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) { 1417 goto init_response; 1418 } 1419 1420 nxt_unit_buf_free(buf); 1421 1422 req->response_buf = NULL; 1423 req->response = NULL; 1424 req->response_max_fields = 0; 1425 1426 req_impl->state = NXT_UNIT_RS_START; 1427 } 1428 1429 buf = nxt_unit_response_buf_alloc(req, buf_size); 1430 if (nxt_slow_path(buf == NULL)) { 1431 return NXT_UNIT_ERROR; 1432 } 1433 1434init_response: 1435 1436 memset(buf->start, 0, sizeof(nxt_unit_response_t)); 1437 1438 req->response_buf = buf; 1439 1440 req->response = (nxt_unit_response_t *) buf->start; 1441 req->response->status = status; 1442 1443 buf->free = buf->start + sizeof(nxt_unit_response_t) 1444 + max_fields_count * sizeof(nxt_unit_field_t); 1445 1446 req->response_max_fields = max_fields_count; 1447 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT; 1448 1449 return NXT_UNIT_OK; 1450} 1451 1452 1453int 1454nxt_unit_response_realloc(nxt_unit_request_info_t *req, 1455 uint32_t max_fields_count, uint32_t max_fields_size) 1456{ 1457 char *p; 1458 uint32_t i, buf_size; 1459 nxt_unit_buf_t *buf; 1460 nxt_unit_field_t *f, *src; 1461 nxt_unit_response_t *resp; 1462 nxt_unit_request_info_impl_t *req_impl; 1463 1464 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1465 1466 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1467 nxt_unit_req_warn(req, "realloc: response not init"); 1468 1469 return NXT_UNIT_ERROR; 1470 } 1471 1472 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1473 nxt_unit_req_warn(req, "realloc: response already sent"); 1474 1475 return NXT_UNIT_ERROR; 1476 } 1477 1478 if (nxt_slow_path(max_fields_count < req->response->fields_count)) { 1479 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small"); 1480 1481 return NXT_UNIT_ERROR; 1482 } 1483 1484 /* 1485 * Each field name and value 0-terminated by libunit, 1486 * this is the reason of '+ 2' below. 1487 */ 1488 buf_size = sizeof(nxt_unit_response_t) 1489 + max_fields_count * (sizeof(nxt_unit_field_t) + 2) 1490 + max_fields_size; 1491 1492 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size); 1493 1494 buf = nxt_unit_response_buf_alloc(req, buf_size); 1495 if (nxt_slow_path(buf == NULL)) { 1496 nxt_unit_req_warn(req, "realloc: new buf allocation failed"); 1497 return NXT_UNIT_ERROR; 1498 } 1499 1500 resp = (nxt_unit_response_t *) buf->start; 1501 1502 memset(resp, 0, sizeof(nxt_unit_response_t)); 1503 1504 resp->status = req->response->status; 1505 resp->content_length = req->response->content_length; 1506 1507 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t); 1508 f = resp->fields; 1509 1510 for (i = 0; i < req->response->fields_count; i++) { 1511 src = req->response->fields + i; 1512 1513 if (nxt_slow_path(src->skip != 0)) { 1514 continue; 1515 } 1516 1517 if (nxt_slow_path(src->name_length + src->value_length + 2 1518 > (uint32_t) (buf->end - p))) 1519 { 1520 nxt_unit_req_warn(req, "realloc: not enough space for field" 1521 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required", 1522 i, src, src->name_length, src->value_length); 1523 1524 goto fail; 1525 } 1526 1527 nxt_unit_sptr_set(&f->name, p); 1528 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length); 1529 *p++ = '\0'; 1530 1531 nxt_unit_sptr_set(&f->value, p); 1532 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length); 1533 *p++ = '\0'; 1534 1535 f->hash = src->hash; 1536 f->skip = 0; 1537 f->name_length = src->name_length; 1538 f->value_length = src->value_length; 1539 1540 resp->fields_count++; 1541 f++; 1542 } 1543 1544 if (req->response->piggyback_content_length > 0) { 1545 if (nxt_slow_path(req->response->piggyback_content_length 1546 > (uint32_t) (buf->end - p))) 1547 { 1548 nxt_unit_req_warn(req, "realloc: not enought space for content" 1549 " #%"PRIu32", %"PRIu32" required", 1550 i, req->response->piggyback_content_length); 1551 1552 goto fail; 1553 } 1554 1555 resp->piggyback_content_length = 1556 req->response->piggyback_content_length; 1557 1558 nxt_unit_sptr_set(&resp->piggyback_content, p); 1559 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content), 1560 req->response->piggyback_content_length); 1561 } 1562 1563 buf->free = p; 1564 1565 nxt_unit_buf_free(req->response_buf); 1566 1567 req->response = resp; 1568 req->response_buf = buf; 1569 req->response_max_fields = max_fields_count; 1570 1571 return NXT_UNIT_OK; 1572 1573fail: 1574 1575 nxt_unit_buf_free(buf); 1576 1577 return NXT_UNIT_ERROR; 1578} 1579 1580 1581int 1582nxt_unit_response_is_init(nxt_unit_request_info_t *req) 1583{ 1584 nxt_unit_request_info_impl_t *req_impl; 1585 1586 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1587 1588 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT; 1589} 1590 1591 1592int 1593nxt_unit_response_add_field(nxt_unit_request_info_t *req, 1594 const char *name, uint8_t name_length, 1595 const char *value, uint32_t value_length) 1596{ 1597 nxt_unit_buf_t *buf; 1598 nxt_unit_field_t *f; 1599 nxt_unit_response_t *resp; 1600 nxt_unit_request_info_impl_t *req_impl; 1601 1602 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1603 1604 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) { 1605 nxt_unit_req_warn(req, "add_field: response not initialized or " 1606 "already sent"); 1607 1608 return NXT_UNIT_ERROR; 1609 } 1610 1611 resp = req->response; 1612 1613 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) { 1614 nxt_unit_req_warn(req, "add_field: too many response fields"); 1615 1616 return NXT_UNIT_ERROR; 1617 } 1618 1619 buf = req->response_buf; 1620 1621 if (nxt_slow_path(name_length + value_length + 2 1622 > (uint32_t) (buf->end - buf->free))) 1623 { 1624 nxt_unit_req_warn(req, "add_field: response buffer overflow"); 1625 1626 return NXT_UNIT_ERROR; 1627 } 1628 1629 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s", 1630 resp->fields_count, 1631 (int) name_length, name, 1632 (int) value_length, value); 1633 1634 f = resp->fields + resp->fields_count; 1635 1636 nxt_unit_sptr_set(&f->name, buf->free); 1637 buf->free = nxt_cpymem(buf->free, name, name_length); 1638 *buf->free++ = '\0'; 1639 1640 nxt_unit_sptr_set(&f->value, buf->free); 1641 buf->free = nxt_cpymem(buf->free, value, value_length); 1642 *buf->free++ = '\0'; 1643 1644 f->hash = nxt_unit_field_hash(name, name_length); 1645 f->skip = 0; 1646 f->name_length = name_length; 1647 f->value_length = value_length; 1648 1649 resp->fields_count++; 1650 1651 return NXT_UNIT_OK; 1652} 1653 1654 1655int 1656nxt_unit_response_add_content(nxt_unit_request_info_t *req, 1657 const void* src, uint32_t size) 1658{ 1659 nxt_unit_buf_t *buf; 1660 nxt_unit_response_t *resp; 1661 nxt_unit_request_info_impl_t *req_impl; 1662 1663 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1664 1665 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1666 nxt_unit_req_warn(req, "add_content: response not initialized yet"); 1667 1668 return NXT_UNIT_ERROR; 1669 } 1670 1671 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1672 nxt_unit_req_warn(req, "add_content: response already sent"); 1673 1674 return NXT_UNIT_ERROR; 1675 } 1676 1677 buf = req->response_buf; 1678 1679 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) { 1680 nxt_unit_req_warn(req, "add_content: buffer overflow"); 1681 1682 return NXT_UNIT_ERROR; 1683 } 1684 1685 resp = req->response; 1686 1687 if (resp->piggyback_content_length == 0) { 1688 nxt_unit_sptr_set(&resp->piggyback_content, buf->free); 1689 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT; 1690 } 1691 1692 resp->piggyback_content_length += size; 1693 1694 buf->free = nxt_cpymem(buf->free, src, size); 1695 1696 return NXT_UNIT_OK; 1697} 1698 1699 1700int 1701nxt_unit_response_send(nxt_unit_request_info_t *req) 1702{ 1703 int rc; 1704 nxt_unit_mmap_buf_t *mmap_buf; 1705 nxt_unit_request_info_impl_t *req_impl; 1706 1707 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1708 1709 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1710 nxt_unit_req_warn(req, "send: response is not initialized yet"); 1711 1712 return NXT_UNIT_ERROR; 1713 } 1714 1715 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1716 nxt_unit_req_warn(req, "send: response already sent"); 1717 1718 return NXT_UNIT_ERROR; 1719 } 1720 1721 if (req->request->websocket_handshake && req->response->status == 101) { 1722 nxt_unit_response_upgrade(req); 1723 } 1724 1725 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes", 1726 req->response->fields_count, 1727 (int) (req->response_buf->free 1728 - req->response_buf->start)); 1729 1730 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf); 1731 1732 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); 1733 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 1734 req->response = NULL; 1735 req->response_buf = NULL; 1736 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 1737 1738 nxt_unit_mmap_buf_free(mmap_buf); 1739 } 1740 1741 return rc; 1742} 1743 1744 1745int 1746nxt_unit_response_is_sent(nxt_unit_request_info_t *req) 1747{ 1748 nxt_unit_request_info_impl_t *req_impl; 1749 1750 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1751 1752 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT; 1753} 1754 1755 1756nxt_unit_buf_t * 1757nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) 1758{ 1759 int rc; 1760 nxt_unit_mmap_buf_t *mmap_buf; 1761 nxt_unit_request_info_impl_t *req_impl; 1762 1763 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) { 1764 nxt_unit_req_warn(req, "response_buf_alloc: " 1765 "requested buffer (%"PRIu32") too big", size); 1766 1767 return NULL; 1768 } 1769 1770 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size); 1771 1772 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1773 1774 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 1775 if (nxt_slow_path(mmap_buf == NULL)) { 1776 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf"); 1777 1778 return NULL; 1779 } 1780 1781 mmap_buf->req = req; 1782 1783 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf); 1784 1785 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 1786 &req->response_port, size, size, mmap_buf, 1787 NULL); 1788 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1789 nxt_unit_mmap_buf_release(mmap_buf); 1790 1791 return NULL; 1792 } 1793 1794 return &mmap_buf->buf; 1795} 1796 1797 1798static nxt_unit_process_t * 1799nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 1800{ 1801 nxt_unit_impl_t *lib; 1802 1803 if (recv_msg->process != NULL) { 1804 return recv_msg->process; 1805 } 1806 1807 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 1808 1809 pthread_mutex_lock(&lib->mutex); 1810 1811 recv_msg->process = nxt_unit_process_find(ctx, recv_msg->pid, 0); 1812 1813 pthread_mutex_unlock(&lib->mutex); 1814 1815 if (recv_msg->process == NULL) { 1816 nxt_unit_warn(ctx, "#%"PRIu32": process %d not found", 1817 recv_msg->stream, (int) recv_msg->pid); 1818 } 1819 1820 return recv_msg->process; 1821} 1822 1823 1824static nxt_unit_mmap_buf_t * 1825nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx) 1826{ 1827 nxt_unit_mmap_buf_t *mmap_buf; 1828 nxt_unit_ctx_impl_t *ctx_impl; 1829 1830 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 1831 1832 pthread_mutex_lock(&ctx_impl->mutex); 1833 1834 if (ctx_impl->free_buf == NULL) { 1835 pthread_mutex_unlock(&ctx_impl->mutex); 1836 1837 mmap_buf = malloc(sizeof(nxt_unit_mmap_buf_t)); 1838 if (nxt_slow_path(mmap_buf == NULL)) { 1839 return NULL; 1840 } 1841 1842 } else { 1843 mmap_buf = ctx_impl->free_buf; 1844 1845 nxt_unit_mmap_buf_unlink(mmap_buf); 1846 1847 pthread_mutex_unlock(&ctx_impl->mutex); 1848 } 1849 1850 mmap_buf->ctx_impl = ctx_impl; 1851 1852 mmap_buf->hdr = NULL; 1853 mmap_buf->free_ptr = NULL; 1854 1855 return mmap_buf; 1856} 1857 1858 1859static void 1860nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf) 1861{ 1862 nxt_unit_mmap_buf_unlink(mmap_buf); 1863 1864 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex); 1865 1866 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf); 1867 1868 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex); 1869} 1870 1871 1872typedef struct { 1873 size_t len; 1874 const char *str; 1875} nxt_unit_str_t; 1876 1877 1878#define nxt_unit_str(str) { nxt_length(str), str } 1879 1880 1881int 1882nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req) 1883{ 1884 return req->request->websocket_handshake; 1885} 1886 1887 1888int 1889nxt_unit_response_upgrade(nxt_unit_request_info_t *req) 1890{ 1891 int rc; 1892 nxt_unit_ctx_impl_t *ctx_impl; 1893 nxt_unit_request_info_impl_t *req_impl; 1894 1895 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1896 1897 if (nxt_slow_path(req_impl->websocket != 0)) { 1898 nxt_unit_req_debug(req, "upgrade: already upgraded"); 1899 1900 return NXT_UNIT_OK; 1901 } 1902 1903 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1904 nxt_unit_req_warn(req, "upgrade: response is not initialized yet"); 1905 1906 return NXT_UNIT_ERROR; 1907 } 1908 1909 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) { 1910 nxt_unit_req_warn(req, "upgrade: response already sent"); 1911 1912 return NXT_UNIT_ERROR; 1913 } 1914 1915 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); 1916 1917 rc = nxt_unit_request_hash_add(&ctx_impl->requests, req_impl); 1918 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1919 nxt_unit_req_warn(req, "upgrade: failed to add request to hash"); 1920 1921 return NXT_UNIT_ERROR; 1922 } 1923 1924 req_impl->websocket = 1; 1925 1926 req->response->status = 101; 1927 1928 return NXT_UNIT_OK; 1929} 1930 1931 1932int 1933nxt_unit_response_is_websocket(nxt_unit_request_info_t *req) 1934{ 1935 nxt_unit_request_info_impl_t *req_impl; 1936 1937 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1938 1939 return req_impl->websocket; 1940} 1941 1942 1943nxt_unit_request_info_t * 1944nxt_unit_get_request_info_from_data(void *data) 1945{ 1946 nxt_unit_request_info_impl_t *req_impl; 1947 1948 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data); 1949 1950 return &req_impl->req; 1951} 1952 1953 1954int 1955nxt_unit_buf_send(nxt_unit_buf_t *buf) 1956{ 1957 int rc; 1958 nxt_unit_mmap_buf_t *mmap_buf; 1959 nxt_unit_request_info_t *req; 1960 nxt_unit_request_info_impl_t *req_impl; 1961 1962 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 1963 1964 req = mmap_buf->req; 1965 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 1966 1967 nxt_unit_req_debug(req, "buf_send: %d bytes", 1968 (int) (buf->free - buf->start)); 1969 1970 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 1971 nxt_unit_req_warn(req, "buf_send: response not initialized yet"); 1972 1973 return NXT_UNIT_ERROR; 1974 } 1975 1976 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 1977 nxt_unit_req_warn(req, "buf_send: headers not sent yet"); 1978 1979 return NXT_UNIT_ERROR; 1980 } 1981 1982 if (nxt_fast_path(buf->free > buf->start)) { 1983 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); 1984 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 1985 return rc; 1986 } 1987 } 1988 1989 nxt_unit_mmap_buf_free(mmap_buf); 1990 1991 return NXT_UNIT_OK; 1992} 1993 1994 1995static void 1996nxt_unit_buf_send_done(nxt_unit_buf_t *buf) 1997{ 1998 int rc; 1999 nxt_unit_mmap_buf_t *mmap_buf; 2000 nxt_unit_request_info_t *req; 2001 nxt_unit_request_info_impl_t *req_impl; 2002 2003 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2004 2005 req = mmap_buf->req; 2006 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2007 2008 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 1); 2009 if (nxt_slow_path(rc == NXT_UNIT_OK)) { 2010 nxt_unit_mmap_buf_free(mmap_buf); 2011 2012 nxt_unit_request_info_release(req); 2013 2014 } else { 2015 nxt_unit_request_done(req, rc); 2016 } 2017} 2018 2019 2020static int 2021nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, 2022 nxt_unit_mmap_buf_t *mmap_buf, int last) 2023{ 2024 struct { 2025 nxt_port_msg_t msg; 2026 nxt_port_mmap_msg_t mmap_msg; 2027 } m; 2028 2029 int rc; 2030 u_char *last_used, *first_free; 2031 ssize_t res; 2032 nxt_chunk_id_t first_free_chunk; 2033 nxt_unit_buf_t *buf; 2034 nxt_unit_impl_t *lib; 2035 nxt_port_mmap_header_t *hdr; 2036 2037 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2038 2039 buf = &mmap_buf->buf; 2040 hdr = mmap_buf->hdr; 2041 2042 m.mmap_msg.size = buf->free - buf->start; 2043 2044 m.msg.stream = stream; 2045 m.msg.pid = lib->pid; 2046 m.msg.reply_port = 0; 2047 m.msg.type = _NXT_PORT_MSG_DATA; 2048 m.msg.last = last != 0; 2049 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0; 2050 m.msg.nf = 0; 2051 m.msg.mf = 0; 2052 m.msg.tracking = 0; 2053 2054 rc = NXT_UNIT_ERROR; 2055 2056 if (m.msg.mmap) { 2057 m.mmap_msg.mmap_id = hdr->id; 2058 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr, 2059 (u_char *) buf->start); 2060 2061 nxt_unit_debug(ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", 2062 stream, 2063 (int) m.mmap_msg.mmap_id, 2064 (int) m.mmap_msg.chunk_id, 2065 (int) m.mmap_msg.size); 2066 2067 res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, &m, sizeof(m), 2068 NULL, 0); 2069 if (nxt_slow_path(res != sizeof(m))) { 2070 goto free_buf; 2071 } 2072 2073 last_used = (u_char *) buf->free - 1; 2074 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1; 2075 2076 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) { 2077 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk); 2078 2079 buf->start = (char *) first_free; 2080 buf->free = buf->start; 2081 2082 if (buf->end < buf->start) { 2083 buf->end = buf->start; 2084 } 2085 2086 } else { 2087 buf->start = NULL; 2088 buf->free = NULL; 2089 buf->end = NULL; 2090 2091 mmap_buf->hdr = NULL; 2092 } 2093 2094 nxt_atomic_fetch_add(&mmap_buf->process->outgoing.allocated_chunks, 2095 (int) m.mmap_msg.chunk_id - (int) first_free_chunk); 2096 2097 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 2098 mmap_buf->process->pid, 2099 (int) mmap_buf->process->outgoing.allocated_chunks); 2100 2101 } else { 2102 if (nxt_slow_path(mmap_buf->plain_ptr == NULL 2103 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg))) 2104 { 2105 nxt_unit_warn(ctx, "#%"PRIu32": failed to send plain memory buffer" 2106 ": no space reserved for message header", stream); 2107 2108 goto free_buf; 2109 } 2110 2111 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg)); 2112 2113 nxt_unit_debug(ctx, "#%"PRIu32": send plain: %d", 2114 stream, 2115 (int) (sizeof(m.msg) + m.mmap_msg.size)); 2116 2117 res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, 2118 buf->start - sizeof(m.msg), 2119 m.mmap_msg.size + sizeof(m.msg), 2120 NULL, 0); 2121 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) { 2122 goto free_buf; 2123 } 2124 } 2125 2126 rc = NXT_UNIT_OK; 2127 2128free_buf: 2129 2130 nxt_unit_free_outgoing_buf(mmap_buf); 2131 2132 return rc; 2133} 2134 2135 2136void 2137nxt_unit_buf_free(nxt_unit_buf_t *buf) 2138{ 2139 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf)); 2140} 2141 2142 2143static void 2144nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf) 2145{ 2146 nxt_unit_free_outgoing_buf(mmap_buf); 2147 2148 nxt_unit_mmap_buf_release(mmap_buf); 2149} 2150 2151 2152static void 2153nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf) 2154{ 2155 if (mmap_buf->hdr != NULL) { 2156 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx, 2157 mmap_buf->process, 2158 mmap_buf->hdr, mmap_buf->buf.start, 2159 mmap_buf->buf.end - mmap_buf->buf.start); 2160 2161 mmap_buf->hdr = NULL; 2162 2163 return; 2164 } 2165 2166 if (mmap_buf->free_ptr != NULL) { 2167 free(mmap_buf->free_ptr); 2168 2169 mmap_buf->free_ptr = NULL; 2170 } 2171} 2172 2173 2174static nxt_unit_read_buf_t * 2175nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) 2176{ 2177 nxt_unit_ctx_impl_t *ctx_impl; 2178 2179 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2180 2181 pthread_mutex_lock(&ctx_impl->mutex); 2182 2183 return nxt_unit_read_buf_get_impl(ctx_impl); 2184} 2185 2186 2187static nxt_unit_read_buf_t * 2188nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl) 2189{ 2190 nxt_unit_read_buf_t *rbuf; 2191 2192 if (ctx_impl->free_read_buf != NULL) { 2193 rbuf = ctx_impl->free_read_buf; 2194 ctx_impl->free_read_buf = rbuf->next; 2195 2196 pthread_mutex_unlock(&ctx_impl->mutex); 2197 2198 return rbuf; 2199 } 2200 2201 pthread_mutex_unlock(&ctx_impl->mutex); 2202 2203 rbuf = malloc(sizeof(nxt_unit_read_buf_t)); 2204 2205 return rbuf; 2206} 2207 2208 2209static void 2210nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, 2211 nxt_unit_read_buf_t *rbuf) 2212{ 2213 nxt_unit_ctx_impl_t *ctx_impl; 2214 2215 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 2216 2217 pthread_mutex_lock(&ctx_impl->mutex); 2218 2219 rbuf->next = ctx_impl->free_read_buf; 2220 ctx_impl->free_read_buf = rbuf; 2221 2222 pthread_mutex_unlock(&ctx_impl->mutex); 2223} 2224 2225 2226nxt_unit_buf_t * 2227nxt_unit_buf_next(nxt_unit_buf_t *buf) 2228{ 2229 nxt_unit_mmap_buf_t *mmap_buf; 2230 2231 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); 2232 2233 if (mmap_buf->next == NULL) { 2234 return NULL; 2235 } 2236 2237 return &mmap_buf->next->buf; 2238} 2239 2240 2241uint32_t 2242nxt_unit_buf_max(void) 2243{ 2244 return PORT_MMAP_DATA_SIZE; 2245} 2246 2247 2248uint32_t 2249nxt_unit_buf_min(void) 2250{ 2251 return PORT_MMAP_CHUNK_SIZE; 2252} 2253 2254 2255int 2256nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start, 2257 size_t size) 2258{ 2259 ssize_t res; 2260 2261 res = nxt_unit_response_write_nb(req, start, size, size); 2262 2263 return res < 0 ? -res : NXT_UNIT_OK; 2264} 2265 2266 2267ssize_t 2268nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, 2269 size_t size, size_t min_size) 2270{ 2271 int rc; 2272 ssize_t sent; 2273 uint32_t part_size, min_part_size, buf_size; 2274 const char *part_start; 2275 nxt_unit_mmap_buf_t mmap_buf; 2276 nxt_unit_request_info_impl_t *req_impl; 2277 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2278 2279 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2280 2281 part_start = start; 2282 sent = 0; 2283 2284 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2285 nxt_unit_req_warn(req, "write: response not initialized yet"); 2286 2287 return -NXT_UNIT_ERROR; 2288 } 2289 2290 /* Check if response is not send yet. */ 2291 if (nxt_slow_path(req->response_buf != NULL)) { 2292 part_size = req->response_buf->end - req->response_buf->free; 2293 part_size = nxt_min(size, part_size); 2294 2295 rc = nxt_unit_response_add_content(req, part_start, part_size); 2296 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2297 return -rc; 2298 } 2299 2300 rc = nxt_unit_response_send(req); 2301 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2302 return -rc; 2303 } 2304 2305 size -= part_size; 2306 part_start += part_size; 2307 sent += part_size; 2308 2309 min_size -= nxt_min(min_size, part_size); 2310 } 2311 2312 while (size > 0) { 2313 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE); 2314 min_part_size = nxt_min(min_size, part_size); 2315 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE); 2316 2317 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2318 &req->response_port, part_size, 2319 min_part_size, &mmap_buf, local_buf); 2320 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2321 return -rc; 2322 } 2323 2324 buf_size = mmap_buf.buf.end - mmap_buf.buf.free; 2325 if (nxt_slow_path(buf_size == 0)) { 2326 return sent; 2327 } 2328 part_size = nxt_min(buf_size, part_size); 2329 2330 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free, 2331 part_start, part_size); 2332 2333 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); 2334 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2335 return -rc; 2336 } 2337 2338 size -= part_size; 2339 part_start += part_size; 2340 sent += part_size; 2341 2342 min_size -= nxt_min(min_size, part_size); 2343 } 2344 2345 return sent; 2346} 2347 2348 2349int 2350nxt_unit_response_write_cb(nxt_unit_request_info_t *req, 2351 nxt_unit_read_info_t *read_info) 2352{ 2353 int rc; 2354 ssize_t n; 2355 uint32_t buf_size; 2356 nxt_unit_buf_t *buf; 2357 nxt_unit_mmap_buf_t mmap_buf; 2358 nxt_unit_request_info_impl_t *req_impl; 2359 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2360 2361 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2362 2363 /* Check if response is not send yet. */ 2364 if (nxt_slow_path(req->response_buf)) { 2365 2366 /* Enable content in headers buf. */ 2367 rc = nxt_unit_response_add_content(req, "", 0); 2368 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2369 nxt_unit_req_error(req, "Failed to add piggyback content"); 2370 2371 return rc; 2372 } 2373 2374 buf = req->response_buf; 2375 2376 while (buf->end - buf->free > 0) { 2377 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2378 if (nxt_slow_path(n < 0)) { 2379 nxt_unit_req_error(req, "Read error"); 2380 2381 return NXT_UNIT_ERROR; 2382 } 2383 2384 /* Manually increase sizes. */ 2385 buf->free += n; 2386 req->response->piggyback_content_length += n; 2387 2388 if (read_info->eof) { 2389 break; 2390 } 2391 } 2392 2393 rc = nxt_unit_response_send(req); 2394 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2395 nxt_unit_req_error(req, "Failed to send headers with content"); 2396 2397 return rc; 2398 } 2399 2400 if (read_info->eof) { 2401 return NXT_UNIT_OK; 2402 } 2403 } 2404 2405 while (!read_info->eof) { 2406 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"", 2407 read_info->buf_size); 2408 2409 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE); 2410 2411 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2412 &req->response_port, 2413 buf_size, buf_size, 2414 &mmap_buf, local_buf); 2415 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2416 return rc; 2417 } 2418 2419 buf = &mmap_buf.buf; 2420 2421 while (!read_info->eof && buf->end > buf->free) { 2422 n = read_info->read(read_info, buf->free, buf->end - buf->free); 2423 if (nxt_slow_path(n < 0)) { 2424 nxt_unit_req_error(req, "Read error"); 2425 2426 nxt_unit_free_outgoing_buf(&mmap_buf); 2427 2428 return NXT_UNIT_ERROR; 2429 } 2430 2431 buf->free += n; 2432 } 2433 2434 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); 2435 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2436 nxt_unit_req_error(req, "Failed to send content"); 2437 2438 return rc; 2439 } 2440 } 2441 2442 return NXT_UNIT_OK; 2443} 2444 2445 2446ssize_t 2447nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) 2448{ 2449 ssize_t buf_res, res; 2450 2451 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length, 2452 dst, size); 2453 2454 if (buf_res < (ssize_t) size && req->content_fd != -1) { 2455 res = read(req->content_fd, dst, size); 2456 if (res < 0) { 2457 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 2458 strerror(errno), errno); 2459 2460 return res; 2461 } 2462 2463 if (res < (ssize_t) size) { 2464 close(req->content_fd); 2465 2466 req->content_fd = -1; 2467 } 2468 2469 req->content_length -= res; 2470 size -= res; 2471 2472 dst = nxt_pointer_to(dst, res); 2473 2474 } else { 2475 res = 0; 2476 } 2477 2478 return buf_res + res; 2479} 2480 2481 2482ssize_t 2483nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size) 2484{ 2485 char *p; 2486 size_t l_size, b_size; 2487 nxt_unit_buf_t *b; 2488 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf; 2489 2490 if (req->content_length == 0) { 2491 return 0; 2492 } 2493 2494 l_size = 0; 2495 2496 b = req->content_buf; 2497 2498 while (b != NULL) { 2499 b_size = b->end - b->free; 2500 p = memchr(b->free, '\n', b_size); 2501 2502 if (p != NULL) { 2503 p++; 2504 l_size += p - b->free; 2505 break; 2506 } 2507 2508 l_size += b_size; 2509 2510 if (max_size <= l_size) { 2511 break; 2512 } 2513 2514 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf); 2515 if (mmap_buf->next == NULL 2516 && req->content_fd != -1 2517 && l_size < req->content_length) 2518 { 2519 preread_buf = nxt_unit_request_preread(req, 16384); 2520 if (nxt_slow_path(preread_buf == NULL)) { 2521 return -1; 2522 } 2523 2524 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf); 2525 } 2526 2527 b = nxt_unit_buf_next(b); 2528 } 2529 2530 return nxt_min(max_size, l_size); 2531} 2532 2533 2534static nxt_unit_mmap_buf_t * 2535nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) 2536{ 2537 ssize_t res; 2538 nxt_unit_mmap_buf_t *mmap_buf; 2539 2540 if (req->content_fd == -1) { 2541 nxt_unit_req_alert(req, "preread: content_fd == -1"); 2542 return NULL; 2543 } 2544 2545 mmap_buf = nxt_unit_mmap_buf_get(req->ctx); 2546 if (nxt_slow_path(mmap_buf == NULL)) { 2547 nxt_unit_req_alert(req, "preread: failed to allocate buf"); 2548 return NULL; 2549 } 2550 2551 mmap_buf->free_ptr = malloc(size); 2552 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 2553 nxt_unit_req_alert(req, "preread: failed to allocate buf memory"); 2554 nxt_unit_mmap_buf_release(mmap_buf); 2555 return NULL; 2556 } 2557 2558 mmap_buf->plain_ptr = mmap_buf->free_ptr; 2559 2560 mmap_buf->hdr = NULL; 2561 mmap_buf->buf.start = mmap_buf->free_ptr; 2562 mmap_buf->buf.free = mmap_buf->buf.start; 2563 mmap_buf->buf.end = mmap_buf->buf.start + size; 2564 mmap_buf->process = NULL; 2565 2566 res = read(req->content_fd, mmap_buf->free_ptr, size); 2567 if (res < 0) { 2568 nxt_unit_req_alert(req, "failed to read content: %s (%d)", 2569 strerror(errno), errno); 2570 2571 nxt_unit_mmap_buf_free(mmap_buf); 2572 2573 return NULL; 2574 } 2575 2576 if (res < (ssize_t) size) { 2577 close(req->content_fd); 2578 2579 req->content_fd = -1; 2580 } 2581 2582 nxt_unit_req_debug(req, "preread: read %d", (int) res); 2583 2584 mmap_buf->buf.end = mmap_buf->buf.free + res; 2585 2586 return mmap_buf; 2587} 2588 2589 2590static ssize_t 2591nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size) 2592{ 2593 u_char *p; 2594 size_t rest, copy, read; 2595 nxt_unit_buf_t *buf, *last_buf; 2596 2597 p = dst; 2598 rest = size; 2599 2600 buf = *b; 2601 last_buf = buf; 2602 2603 while (buf != NULL) { 2604 last_buf = buf; 2605 2606 copy = buf->end - buf->free; 2607 copy = nxt_min(rest, copy); 2608 2609 p = nxt_cpymem(p, buf->free, copy); 2610 2611 buf->free += copy; 2612 rest -= copy; 2613 2614 if (rest == 0) { 2615 if (buf->end == buf->free) { 2616 buf = nxt_unit_buf_next(buf); 2617 } 2618 2619 break; 2620 } 2621 2622 buf = nxt_unit_buf_next(buf); 2623 } 2624 2625 *b = last_buf; 2626 2627 read = size - rest; 2628 2629 *len -= read; 2630 2631 return read; 2632} 2633 2634 2635void 2636nxt_unit_request_done(nxt_unit_request_info_t *req, int rc) 2637{ 2638 uint32_t size; 2639 nxt_port_msg_t msg; 2640 nxt_unit_impl_t *lib; 2641 nxt_unit_request_info_impl_t *req_impl; 2642 2643 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2644 2645 nxt_unit_req_debug(req, "done: %d", rc); 2646 2647 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2648 goto skip_response_send; 2649 } 2650 2651 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { 2652 2653 size = nxt_length("Content-Type") + nxt_length("text/plain"); 2654 2655 rc = nxt_unit_response_init(req, 200, 1, size); 2656 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2657 goto skip_response_send; 2658 } 2659 2660 rc = nxt_unit_response_add_field(req, "Content-Type", 2661 nxt_length("Content-Type"), 2662 "text/plain", nxt_length("text/plain")); 2663 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2664 goto skip_response_send; 2665 } 2666 } 2667 2668 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) { 2669 2670 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT; 2671 2672 nxt_unit_buf_send_done(req->response_buf); 2673 2674 return; 2675 } 2676 2677skip_response_send: 2678 2679 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit); 2680 2681 msg.stream = req_impl->stream; 2682 msg.pid = lib->pid; 2683 msg.reply_port = 0; 2684 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA 2685 : _NXT_PORT_MSG_RPC_ERROR; 2686 msg.last = 1; 2687 msg.mmap = 0; 2688 msg.nf = 0; 2689 msg.mf = 0; 2690 msg.tracking = 0; 2691 2692 (void) lib->callbacks.port_send(req->ctx, &req->response_port, 2693 &msg, sizeof(msg), NULL, 0); 2694 2695 nxt_unit_request_info_release(req); 2696} 2697 2698 2699int 2700nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode, 2701 uint8_t last, const void *start, size_t size) 2702{ 2703 const struct iovec iov = { (void *) start, size }; 2704 2705 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1); 2706} 2707 2708 2709int 2710nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, 2711 uint8_t last, const struct iovec *iov, int iovcnt) 2712{ 2713 int i, rc; 2714 size_t l, copy; 2715 uint32_t payload_len, buf_size, alloc_size; 2716 const uint8_t *b; 2717 nxt_unit_buf_t *buf; 2718 nxt_unit_mmap_buf_t mmap_buf; 2719 nxt_websocket_header_t *wh; 2720 nxt_unit_request_info_impl_t *req_impl; 2721 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; 2722 2723 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 2724 2725 payload_len = 0; 2726 2727 for (i = 0; i < iovcnt; i++) { 2728 payload_len += iov[i].iov_len; 2729 } 2730 2731 buf_size = 10 + payload_len; 2732 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 2733 2734 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2735 &req->response_port, 2736 alloc_size, alloc_size, 2737 &mmap_buf, local_buf); 2738 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2739 return rc; 2740 } 2741 2742 buf = &mmap_buf.buf; 2743 2744 buf->start[0] = 0; 2745 buf->start[1] = 0; 2746 2747 buf_size -= buf->end - buf->start; 2748 2749 wh = (void *) buf->free; 2750 2751 buf->free = nxt_websocket_frame_init(wh, payload_len); 2752 wh->fin = last; 2753 wh->opcode = opcode; 2754 2755 for (i = 0; i < iovcnt; i++) { 2756 b = iov[i].iov_base; 2757 l = iov[i].iov_len; 2758 2759 while (l > 0) { 2760 copy = buf->end - buf->free; 2761 copy = nxt_min(l, copy); 2762 2763 buf->free = nxt_cpymem(buf->free, b, copy); 2764 b += copy; 2765 l -= copy; 2766 2767 if (l > 0) { 2768 if (nxt_fast_path(buf->free > buf->start)) { 2769 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, 2770 &mmap_buf, 0); 2771 2772 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2773 return rc; 2774 } 2775 } 2776 2777 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); 2778 2779 rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, 2780 &req->response_port, 2781 alloc_size, alloc_size, 2782 &mmap_buf, local_buf); 2783 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 2784 return rc; 2785 } 2786 2787 buf_size -= buf->end - buf->start; 2788 } 2789 } 2790 } 2791 2792 if (buf->free > buf->start) { 2793 rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, 2794 &mmap_buf, 0); 2795 } 2796 2797 return rc; 2798} 2799 2800 2801ssize_t 2802nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst, 2803 size_t size) 2804{ 2805 ssize_t res; 2806 uint8_t *b; 2807 uint64_t i, d; 2808 2809 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length, 2810 dst, size); 2811 2812 if (ws->mask == NULL) { 2813 return res; 2814 } 2815 2816 b = dst; 2817 d = (ws->payload_len - ws->content_length - res) % 4; 2818 2819 for (i = 0; i < (uint64_t) res; i++) { 2820 b[i] ^= ws->mask[ (i + d) % 4 ]; 2821 } 2822 2823 return res; 2824} 2825 2826 2827int 2828nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws) 2829{ 2830 char *b; 2831 size_t size; 2832 nxt_unit_websocket_frame_impl_t *ws_impl; 2833 2834 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws); 2835 2836 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) { 2837 return NXT_UNIT_OK; 2838 } 2839 2840 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start; 2841 2842 b = malloc(size); 2843 if (nxt_slow_path(b == NULL)) { 2844 return NXT_UNIT_ERROR; 2845 } 2846 2847 memcpy(b, ws_impl->buf->buf.start, size); 2848 2849 ws_impl->buf->buf.start = b; 2850 ws_impl->buf->buf.free = b; 2851 ws_impl->buf->buf.end = b + size; 2852 2853 ws_impl->buf->free_ptr = b; 2854 2855 return NXT_UNIT_OK; 2856} 2857 2858 2859void 2860nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws) 2861{ 2862 nxt_unit_websocket_frame_release(ws); 2863} 2864 2865 2866static nxt_port_mmap_header_t * 2867nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 2868 nxt_unit_port_id_t *port_id, nxt_chunk_id_t *c, int *n, int min_n) 2869{ 2870 int res, nchunks, i; 2871 uint32_t outgoing_size; 2872 nxt_unit_mmap_t *mm, *mm_end; 2873 nxt_unit_impl_t *lib; 2874 nxt_port_mmap_header_t *hdr; 2875 2876 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2877 2878 pthread_mutex_lock(&process->outgoing.mutex); 2879 2880retry: 2881 2882 outgoing_size = process->outgoing.size; 2883 2884 mm_end = process->outgoing.elts + outgoing_size; 2885 2886 for (mm = process->outgoing.elts; mm < mm_end; mm++) { 2887 hdr = mm->hdr; 2888 2889 if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port_id->id) { 2890 continue; 2891 } 2892 2893 *c = 0; 2894 2895 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) { 2896 nchunks = 1; 2897 2898 while (nchunks < *n) { 2899 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map, 2900 *c + nchunks); 2901 2902 if (res == 0) { 2903 if (nchunks >= min_n) { 2904 *n = nchunks; 2905 2906 goto unlock; 2907 } 2908 2909 for (i = 0; i < nchunks; i++) { 2910 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i); 2911 } 2912 2913 *c += nchunks + 1; 2914 nchunks = 0; 2915 break; 2916 } 2917 2918 nchunks++; 2919 } 2920 2921 if (nchunks >= min_n) { 2922 *n = nchunks; 2923 2924 goto unlock; 2925 } 2926 } 2927 2928 hdr->oosm = 1; 2929 } 2930 2931 if (outgoing_size >= lib->shm_mmap_limit) { 2932 /* Cannot allocate more shared memory. */ 2933 pthread_mutex_unlock(&process->outgoing.mutex); 2934 2935 if (min_n == 0) { 2936 *n = 0; 2937 } 2938 2939 if (nxt_slow_path(process->outgoing.allocated_chunks + min_n 2940 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT)) 2941 { 2942 /* Memory allocated by application, but not send to router. */ 2943 return NULL; 2944 } 2945 2946 /* Notify router about OOSM condition. */ 2947 2948 res = nxt_unit_send_oosm(ctx, port_id); 2949 if (nxt_slow_path(res != NXT_UNIT_OK)) { 2950 return NULL; 2951 } 2952 2953 /* Return if caller can handle OOSM condition. Non-blocking mode. */ 2954 2955 if (min_n == 0) { 2956 return NULL; 2957 } 2958 2959 nxt_unit_debug(ctx, "oosm: waiting for ACK"); 2960 2961 res = nxt_unit_wait_shm_ack(ctx); 2962 if (nxt_slow_path(res != NXT_UNIT_OK)) { 2963 return NULL; 2964 } 2965 2966 nxt_unit_debug(ctx, "oosm: retry"); 2967 2968 pthread_mutex_lock(&process->outgoing.mutex); 2969 2970 goto retry; 2971 } 2972 2973 *c = 0; 2974 hdr = nxt_unit_new_mmap(ctx, process, port_id, *n); 2975 2976unlock: 2977 2978 nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, *n); 2979 2980 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 2981 process->pid, 2982 (int) process->outgoing.allocated_chunks); 2983 2984 pthread_mutex_unlock(&process->outgoing.mutex); 2985 2986 return hdr; 2987} 2988 2989 2990static int 2991nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 2992{ 2993 ssize_t res; 2994 nxt_port_msg_t msg; 2995 nxt_unit_impl_t *lib; 2996 2997 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 2998 2999 msg.stream = 0; 3000 msg.pid = lib->pid; 3001 msg.reply_port = 0; 3002 msg.type = _NXT_PORT_MSG_OOSM; 3003 msg.last = 0; 3004 msg.mmap = 0; 3005 msg.nf = 0; 3006 msg.mf = 0; 3007 msg.tracking = 0; 3008 3009 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); 3010 if (nxt_slow_path(res != sizeof(msg))) { 3011 return NXT_UNIT_ERROR; 3012 } 3013 3014 return NXT_UNIT_OK; 3015} 3016 3017 3018static int 3019nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) 3020{ 3021 nxt_port_msg_t *port_msg; 3022 nxt_unit_ctx_impl_t *ctx_impl; 3023 nxt_unit_read_buf_t *rbuf; 3024 3025 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3026 3027 while (1) { 3028 rbuf = nxt_unit_read_buf_get(ctx); 3029 if (nxt_slow_path(rbuf == NULL)) { 3030 return NXT_UNIT_ERROR; 3031 } 3032 3033 nxt_unit_read_buf(ctx, rbuf); 3034 3035 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { 3036 nxt_unit_read_buf_release(ctx, rbuf); 3037 3038 return NXT_UNIT_ERROR; 3039 } 3040 3041 port_msg = (nxt_port_msg_t *) rbuf->buf; 3042 3043 if (port_msg->type == _NXT_PORT_MSG_SHM_ACK) { 3044 nxt_unit_read_buf_release(ctx, rbuf); 3045 3046 break; 3047 } 3048 3049 pthread_mutex_lock(&ctx_impl->mutex); 3050 3051 *ctx_impl->pending_read_tail = rbuf; 3052 ctx_impl->pending_read_tail = &rbuf->next; 3053 rbuf->next = NULL; 3054 3055 pthread_mutex_unlock(&ctx_impl->mutex); 3056 3057 if (port_msg->type == _NXT_PORT_MSG_QUIT) { 3058 nxt_unit_debug(ctx, "oosm: quit received"); 3059 3060 return NXT_UNIT_ERROR; 3061 } 3062 } 3063 3064 return NXT_UNIT_OK; 3065} 3066 3067 3068static nxt_unit_mmap_t * 3069nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) 3070{ 3071 uint32_t cap; 3072 3073 cap = mmaps->cap; 3074 3075 if (cap == 0) { 3076 cap = i + 1; 3077 } 3078 3079 while (i + 1 > cap) { 3080 3081 if (cap < 16) { 3082 cap = cap * 2; 3083 3084 } else { 3085 cap = cap + cap / 2; 3086 } 3087 } 3088 3089 if (cap != mmaps->cap) { 3090 3091 mmaps->elts = realloc(mmaps->elts, cap * sizeof(*mmaps->elts)); 3092 if (nxt_slow_path(mmaps->elts == NULL)) { 3093 return NULL; 3094 } 3095 3096 memset(mmaps->elts + mmaps->cap, 0, 3097 sizeof(*mmaps->elts) * (cap - mmaps->cap)); 3098 3099 mmaps->cap = cap; 3100 } 3101 3102 if (i + 1 > mmaps->size) { 3103 mmaps->size = i + 1; 3104 } 3105 3106 return mmaps->elts + i; 3107} 3108 3109 3110static nxt_port_mmap_header_t * 3111nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3112 nxt_unit_port_id_t *port_id, int n) 3113{ 3114 int i, fd, rc; 3115 void *mem; 3116 char name[64]; 3117 nxt_unit_mmap_t *mm; 3118 nxt_unit_impl_t *lib; 3119 nxt_port_mmap_header_t *hdr; 3120 3121 lib = process->lib; 3122 3123 mm = nxt_unit_mmap_at(&process->outgoing, process->outgoing.size); 3124 if (nxt_slow_path(mm == NULL)) { 3125 nxt_unit_warn(ctx, "failed to add mmap to outgoing array"); 3126 3127 return NULL; 3128 } 3129 3130 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", 3131 lib->pid, (void *) pthread_self()); 3132 3133#if (NXT_HAVE_MEMFD_CREATE) 3134 3135 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); 3136 if (nxt_slow_path(fd == -1)) { 3137 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, 3138 strerror(errno), errno); 3139 3140 goto remove_fail; 3141 } 3142 3143 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); 3144 3145#elif (NXT_HAVE_SHM_OPEN_ANON) 3146 3147 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); 3148 if (nxt_slow_path(fd == -1)) { 3149 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", 3150 strerror(errno), errno); 3151 3152 goto remove_fail; 3153 } 3154 3155#elif (NXT_HAVE_SHM_OPEN) 3156 3157 /* Just in case. */ 3158 shm_unlink(name); 3159 3160 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); 3161 if (nxt_slow_path(fd == -1)) { 3162 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, 3163 strerror(errno), errno); 3164 3165 goto remove_fail; 3166 } 3167 3168 if (nxt_slow_path(shm_unlink(name) == -1)) { 3169 nxt_unit_warn(ctx, "shm_unlink(%s) failed: %s (%d)", name, 3170 strerror(errno), errno); 3171 } 3172 3173#else 3174 3175#error No working shared memory implementation. 3176 3177#endif 3178 3179 if (nxt_slow_path(ftruncate(fd, PORT_MMAP_SIZE) == -1)) { 3180 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, 3181 strerror(errno), errno); 3182 3183 goto remove_fail; 3184 } 3185 3186 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 3187 if (nxt_slow_path(mem == MAP_FAILED)) { 3188 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, 3189 strerror(errno), errno); 3190 3191 goto remove_fail; 3192 } 3193 3194 mm->hdr = mem; 3195 hdr = mem; 3196 3197 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); 3198 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); 3199 3200 hdr->id = process->outgoing.size - 1; 3201 hdr->src_pid = lib->pid; 3202 hdr->dst_pid = process->pid; 3203 hdr->sent_over = port_id->id; 3204 3205 /* Mark first n chunk(s) as busy */ 3206 for (i = 0; i < n; i++) { 3207 nxt_port_mmap_set_chunk_busy(hdr->free_map, i); 3208 } 3209 3210 /* Mark as busy chunk followed the last available chunk. */ 3211 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); 3212 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); 3213 3214 pthread_mutex_unlock(&process->outgoing.mutex); 3215 3216 rc = nxt_unit_send_mmap(ctx, port_id, fd); 3217 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3218 munmap(mem, PORT_MMAP_SIZE); 3219 hdr = NULL; 3220 3221 } else { 3222 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d", 3223 hdr->id, (int) lib->pid, (int) process->pid); 3224 } 3225 3226 close(fd); 3227 3228 pthread_mutex_lock(&process->outgoing.mutex); 3229 3230 if (nxt_fast_path(hdr != NULL)) { 3231 return hdr; 3232 } 3233 3234remove_fail: 3235 3236 process->outgoing.size--; 3237 3238 return NULL; 3239} 3240 3241 3242static int 3243nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) 3244{ 3245 ssize_t res; 3246 nxt_port_msg_t msg; 3247 nxt_unit_impl_t *lib; 3248 union { 3249 struct cmsghdr cm; 3250 char space[CMSG_SPACE(sizeof(int))]; 3251 } cmsg; 3252 3253 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3254 3255 msg.stream = 0; 3256 msg.pid = lib->pid; 3257 msg.reply_port = 0; 3258 msg.type = _NXT_PORT_MSG_MMAP; 3259 msg.last = 0; 3260 msg.mmap = 0; 3261 msg.nf = 0; 3262 msg.mf = 0; 3263 msg.tracking = 0; 3264 3265 /* 3266 * Fill all padding fields with 0. 3267 * Code in Go 1.11 validate cmsghdr using padding field as part of len. 3268 * See Cmsghdr definition and socketControlMessageHeaderAndData function. 3269 */ 3270 memset(&cmsg, 0, sizeof(cmsg)); 3271 3272 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); 3273 cmsg.cm.cmsg_level = SOL_SOCKET; 3274 cmsg.cm.cmsg_type = SCM_RIGHTS; 3275 3276 /* 3277 * memcpy() is used instead of simple 3278 * *(int *) CMSG_DATA(&cmsg.cm) = fd; 3279 * because GCC 4.4 with -O2/3/s optimization may issue a warning: 3280 * dereferencing type-punned pointer will break strict-aliasing rules 3281 * 3282 * Fortunately, GCC with -O1 compiles this nxt_memcpy() 3283 * in the same simple assignment as in the code above. 3284 */ 3285 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); 3286 3287 res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), 3288 &cmsg, sizeof(cmsg)); 3289 if (nxt_slow_path(res != sizeof(msg))) { 3290 return NXT_UNIT_ERROR; 3291 } 3292 3293 return NXT_UNIT_OK; 3294} 3295 3296 3297static int 3298nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3299 nxt_unit_port_id_t *port_id, uint32_t size, uint32_t min_size, 3300 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf) 3301{ 3302 int nchunks, min_nchunks; 3303 nxt_chunk_id_t c; 3304 nxt_port_mmap_header_t *hdr; 3305 3306 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) { 3307 if (local_buf != NULL) { 3308 mmap_buf->free_ptr = NULL; 3309 mmap_buf->plain_ptr = local_buf; 3310 3311 } else { 3312 mmap_buf->free_ptr = malloc(size + sizeof(nxt_port_msg_t)); 3313 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) { 3314 return NXT_UNIT_ERROR; 3315 } 3316 3317 mmap_buf->plain_ptr = mmap_buf->free_ptr; 3318 } 3319 3320 mmap_buf->hdr = NULL; 3321 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); 3322 mmap_buf->buf.free = mmap_buf->buf.start; 3323 mmap_buf->buf.end = mmap_buf->buf.start + size; 3324 mmap_buf->port_id = *port_id; 3325 mmap_buf->process = process; 3326 3327 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", 3328 mmap_buf->buf.start, (int) size); 3329 3330 return NXT_UNIT_OK; 3331 } 3332 3333 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3334 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; 3335 3336 hdr = nxt_unit_mmap_get(ctx, process, port_id, &c, &nchunks, min_nchunks); 3337 if (nxt_slow_path(hdr == NULL)) { 3338 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) { 3339 mmap_buf->hdr = NULL; 3340 mmap_buf->buf.start = NULL; 3341 mmap_buf->buf.free = NULL; 3342 mmap_buf->buf.end = NULL; 3343 mmap_buf->free_ptr = NULL; 3344 3345 return NXT_UNIT_OK; 3346 } 3347 3348 return NXT_UNIT_ERROR; 3349 } 3350 3351 mmap_buf->hdr = hdr; 3352 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); 3353 mmap_buf->buf.free = mmap_buf->buf.start; 3354 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; 3355 mmap_buf->port_id = *port_id; 3356 mmap_buf->process = process; 3357 mmap_buf->free_ptr = NULL; 3358 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3359 3360 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)", 3361 (int) hdr->id, (int) c, 3362 (int) (nchunks * PORT_MMAP_CHUNK_SIZE)); 3363 3364 return NXT_UNIT_OK; 3365} 3366 3367 3368static int 3369nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) 3370{ 3371 int rc; 3372 void *mem; 3373 struct stat mmap_stat; 3374 nxt_unit_mmap_t *mm; 3375 nxt_unit_impl_t *lib; 3376 nxt_unit_process_t *process; 3377 nxt_port_mmap_header_t *hdr; 3378 3379 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3380 3381 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid); 3382 3383 pthread_mutex_lock(&lib->mutex); 3384 3385 process = nxt_unit_process_find(ctx, pid, 0); 3386 3387 pthread_mutex_unlock(&lib->mutex); 3388 3389 if (nxt_slow_path(process == NULL)) { 3390 nxt_unit_warn(ctx, "incoming_mmap: process %d not found, fd %d", 3391 (int) pid, fd); 3392 3393 return NXT_UNIT_ERROR; 3394 } 3395 3396 rc = NXT_UNIT_ERROR; 3397 3398 if (fstat(fd, &mmap_stat) == -1) { 3399 nxt_unit_warn(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, 3400 strerror(errno), errno); 3401 3402 goto fail; 3403 } 3404 3405 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE, 3406 MAP_SHARED, fd, 0); 3407 if (nxt_slow_path(mem == MAP_FAILED)) { 3408 nxt_unit_warn(ctx, "incoming_mmap: mmap() failed: %s (%d)", 3409 strerror(errno), errno); 3410 3411 goto fail; 3412 } 3413 3414 hdr = mem; 3415 3416 if (nxt_slow_path(hdr->src_pid != pid || hdr->dst_pid != lib->pid)) { 3417 3418 nxt_unit_warn(ctx, "incoming_mmap: unexpected pid in mmap header " 3419 "detected: %d != %d or %d != %d", (int) hdr->src_pid, 3420 (int) pid, (int) hdr->dst_pid, (int) lib->pid); 3421 3422 munmap(mem, PORT_MMAP_SIZE); 3423 3424 goto fail; 3425 } 3426 3427 pthread_mutex_lock(&process->incoming.mutex); 3428 3429 mm = nxt_unit_mmap_at(&process->incoming, hdr->id); 3430 if (nxt_slow_path(mm == NULL)) { 3431 nxt_unit_warn(ctx, "incoming_mmap: failed to add to incoming array"); 3432 3433 munmap(mem, PORT_MMAP_SIZE); 3434 3435 } else { 3436 mm->hdr = hdr; 3437 3438 hdr->sent_over = 0xFFFFu; 3439 3440 rc = NXT_UNIT_OK; 3441 } 3442 3443 pthread_mutex_unlock(&process->incoming.mutex); 3444 3445fail: 3446 3447 nxt_unit_process_use(ctx, process, -1); 3448 3449 return rc; 3450} 3451 3452 3453static void 3454nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps) 3455{ 3456 pthread_mutex_init(&mmaps->mutex, NULL); 3457 3458 mmaps->size = 0; 3459 mmaps->cap = 0; 3460 mmaps->elts = NULL; 3461 mmaps->allocated_chunks = 0; 3462} 3463 3464 3465static void 3466nxt_unit_process_use(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, int i) 3467{ 3468 long c; 3469 3470 c = nxt_atomic_fetch_add(&process->use_count, i); 3471 3472 if (i < 0 && c == -i) { 3473 nxt_unit_debug(ctx, "destroy process #%d", (int) process->pid); 3474 3475 nxt_unit_mmaps_destroy(&process->incoming); 3476 nxt_unit_mmaps_destroy(&process->outgoing); 3477 3478 free(process); 3479 } 3480} 3481 3482 3483static void 3484nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) 3485{ 3486 nxt_unit_mmap_t *mm, *end; 3487 3488 if (mmaps->elts != NULL) { 3489 end = mmaps->elts + mmaps->size; 3490 3491 for (mm = mmaps->elts; mm < end; mm++) { 3492 munmap(mm->hdr, PORT_MMAP_SIZE); 3493 } 3494 3495 free(mmaps->elts); 3496 } 3497 3498 pthread_mutex_destroy(&mmaps->mutex); 3499} 3500 3501 3502static nxt_port_mmap_header_t * 3503nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, 3504 uint32_t id) 3505{ 3506 nxt_port_mmap_header_t *hdr; 3507 3508 if (nxt_fast_path(process->incoming.size > id)) { 3509 hdr = process->incoming.elts[id].hdr; 3510 3511 } else { 3512 hdr = NULL; 3513 } 3514 3515 return hdr; 3516} 3517 3518 3519static int 3520nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 3521{ 3522 int rc; 3523 nxt_chunk_id_t c; 3524 nxt_unit_process_t *process; 3525 nxt_port_mmap_header_t *hdr; 3526 nxt_port_mmap_tracking_msg_t *tracking_msg; 3527 3528 if (recv_msg->size < (int) sizeof(nxt_port_mmap_tracking_msg_t)) { 3529 nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: too small message (%d)", 3530 recv_msg->stream, (int) recv_msg->size); 3531 3532 return 0; 3533 } 3534 3535 tracking_msg = recv_msg->start; 3536 3537 recv_msg->start = tracking_msg + 1; 3538 recv_msg->size -= sizeof(nxt_port_mmap_tracking_msg_t); 3539 3540 process = nxt_unit_msg_get_process(ctx, recv_msg); 3541 if (nxt_slow_path(process == NULL)) { 3542 return 0; 3543 } 3544 3545 pthread_mutex_lock(&process->incoming.mutex); 3546 3547 hdr = nxt_unit_get_incoming_mmap(ctx, process, tracking_msg->mmap_id); 3548 if (nxt_slow_path(hdr == NULL)) { 3549 pthread_mutex_unlock(&process->incoming.mutex); 3550 3551 nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: " 3552 "invalid mmap id %d,%"PRIu32, 3553 recv_msg->stream, (int) process->pid, 3554 tracking_msg->mmap_id); 3555 3556 return 0; 3557 } 3558 3559 c = tracking_msg->tracking_id; 3560 rc = nxt_atomic_cmp_set(hdr->tracking + c, recv_msg->stream, 0); 3561 3562 if (rc == 0) { 3563 nxt_unit_debug(ctx, "#%"PRIu32": tracking cancelled", 3564 recv_msg->stream); 3565 3566 nxt_port_mmap_set_chunk_free(hdr->free_tracking_map, c); 3567 } 3568 3569 pthread_mutex_unlock(&process->incoming.mutex); 3570 3571 return rc; 3572} 3573 3574 3575static int 3576nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) 3577{ 3578 void *start; 3579 uint32_t size; 3580 nxt_unit_process_t *process; 3581 nxt_unit_mmap_buf_t *b, **incoming_tail; 3582 nxt_port_mmap_msg_t *mmap_msg, *end; 3583 nxt_port_mmap_header_t *hdr; 3584 3585 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) { 3586 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)", 3587 recv_msg->stream, (int) recv_msg->size); 3588 3589 return NXT_UNIT_ERROR; 3590 } 3591 3592 process = nxt_unit_msg_get_process(ctx, recv_msg); 3593 if (nxt_slow_path(process == NULL)) { 3594 return NXT_UNIT_ERROR; 3595 } 3596 3597 mmap_msg = recv_msg->start; 3598 end = nxt_pointer_to(recv_msg->start, recv_msg->size); 3599 3600 incoming_tail = &recv_msg->incoming_buf; 3601 3602 for (; mmap_msg < end; mmap_msg++) { 3603 b = nxt_unit_mmap_buf_get(ctx); 3604 if (nxt_slow_path(b == NULL)) { 3605 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf", 3606 recv_msg->stream); 3607 3608 return NXT_UNIT_ERROR; 3609 } 3610 3611 nxt_unit_mmap_buf_insert(incoming_tail, b); 3612 incoming_tail = &b->next; 3613 } 3614 3615 b = recv_msg->incoming_buf; 3616 mmap_msg = recv_msg->start; 3617 3618 pthread_mutex_lock(&process->incoming.mutex); 3619 3620 for (; mmap_msg < end; mmap_msg++) { 3621 hdr = nxt_unit_get_incoming_mmap(ctx, process, mmap_msg->mmap_id); 3622 if (nxt_slow_path(hdr == NULL)) { 3623 pthread_mutex_unlock(&process->incoming.mutex); 3624 3625 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: " 3626 "invalid mmap id %d,%"PRIu32, 3627 recv_msg->stream, (int) process->pid, 3628 mmap_msg->mmap_id); 3629 3630 return NXT_UNIT_ERROR; 3631 } 3632 3633 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id); 3634 size = mmap_msg->size; 3635 3636 if (recv_msg->start == mmap_msg) { 3637 recv_msg->start = start; 3638 recv_msg->size = size; 3639 } 3640 3641 b->buf.start = start; 3642 b->buf.free = start; 3643 b->buf.end = b->buf.start + size; 3644 b->hdr = hdr; 3645 b->process = process; 3646 3647 b = b->next; 3648 3649 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)", 3650 recv_msg->stream, 3651 start, (int) size, 3652 (int) hdr->src_pid, (int) hdr->dst_pid, 3653 (int) hdr->id, (int) mmap_msg->chunk_id, 3654 (int) mmap_msg->size); 3655 } 3656 3657 pthread_mutex_unlock(&process->incoming.mutex); 3658 3659 return NXT_UNIT_OK; 3660} 3661 3662 3663static void 3664nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, 3665 nxt_unit_process_t *process, nxt_port_mmap_header_t *hdr, 3666 void *start, uint32_t size) 3667{ 3668 int freed_chunks; 3669 u_char *p, *end; 3670 nxt_chunk_id_t c; 3671 nxt_unit_impl_t *lib; 3672 3673 memset(start, 0xA5, size); 3674 3675 p = start; 3676 end = p + size; 3677 c = nxt_port_mmap_chunk_id(hdr, p); 3678 freed_chunks = 0; 3679 3680 while (p < end) { 3681 nxt_port_mmap_set_chunk_free(hdr->free_map, c); 3682 3683 p += PORT_MMAP_CHUNK_SIZE; 3684 c++; 3685 freed_chunks++; 3686 } 3687 3688 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3689 3690 if (hdr->src_pid == lib->pid && freed_chunks != 0) { 3691 nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, 3692 -freed_chunks); 3693 3694 nxt_unit_debug(ctx, "process %d allocated_chunks %d", 3695 process->pid, 3696 (int) process->outgoing.allocated_chunks); 3697 } 3698 3699 if (hdr->dst_pid == lib->pid 3700 && freed_chunks != 0 3701 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0)) 3702 { 3703 nxt_unit_send_shm_ack(ctx, hdr->src_pid); 3704 } 3705} 3706 3707 3708static int 3709nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) 3710{ 3711 ssize_t res; 3712 nxt_port_msg_t msg; 3713 nxt_unit_impl_t *lib; 3714 nxt_unit_port_id_t port_id; 3715 3716 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3717 3718 nxt_unit_port_id_init(&port_id, pid, 0); 3719 3720 msg.stream = 0; 3721 msg.pid = lib->pid; 3722 msg.reply_port = 0; 3723 msg.type = _NXT_PORT_MSG_SHM_ACK; 3724 msg.last = 0; 3725 msg.mmap = 0; 3726 msg.nf = 0; 3727 msg.mf = 0; 3728 msg.tracking = 0; 3729 3730 res = lib->callbacks.port_send(ctx, &port_id, &msg, sizeof(msg), NULL, 0); 3731 if (nxt_slow_path(res != sizeof(msg))) { 3732 return NXT_UNIT_ERROR; 3733 } 3734 3735 return NXT_UNIT_OK; 3736} 3737 3738 3739static nxt_int_t 3740nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data) 3741{ 3742 nxt_process_t *process; 3743 3744 process = data; 3745 3746 if (lhq->key.length == sizeof(pid_t) 3747 && *(pid_t *) lhq->key.start == process->pid) 3748 { 3749 return NXT_OK; 3750 } 3751 3752 return NXT_DECLINED; 3753} 3754 3755 3756static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = { 3757 NXT_LVLHSH_DEFAULT, 3758 nxt_unit_lvlhsh_pid_test, 3759 nxt_lvlhsh_alloc, 3760 nxt_lvlhsh_free, 3761}; 3762 3763 3764static inline void 3765nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid) 3766{ 3767 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid)); 3768 lhq->key.length = sizeof(*pid); 3769 lhq->key.start = (u_char *) pid; 3770 lhq->proto = &lvlhsh_processes_proto; 3771} 3772 3773 3774static nxt_unit_process_t * 3775nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) 3776{ 3777 nxt_unit_impl_t *lib; 3778 nxt_unit_process_t *process; 3779 nxt_lvlhsh_query_t lhq; 3780 3781 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3782 3783 nxt_unit_process_lhq_pid(&lhq, &pid); 3784 3785 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) { 3786 process = lhq.value; 3787 nxt_unit_process_use(ctx, process, 1); 3788 3789 return process; 3790 } 3791 3792 process = malloc(sizeof(nxt_unit_process_t)); 3793 if (nxt_slow_path(process == NULL)) { 3794 nxt_unit_warn(ctx, "failed to allocate process for #%d", (int) pid); 3795 3796 return NULL; 3797 } 3798 3799 process->pid = pid; 3800 process->use_count = 1; 3801 process->next_port_id = 0; 3802 process->lib = lib; 3803 3804 nxt_queue_init(&process->ports); 3805 3806 nxt_unit_mmaps_init(&process->incoming); 3807 nxt_unit_mmaps_init(&process->outgoing); 3808 3809 lhq.replace = 0; 3810 lhq.value = process; 3811 3812 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) { 3813 3814 case NXT_OK: 3815 break; 3816 3817 default: 3818 nxt_unit_warn(ctx, "process %d insert failed", (int) pid); 3819 3820 pthread_mutex_destroy(&process->outgoing.mutex); 3821 pthread_mutex_destroy(&process->incoming.mutex); 3822 free(process); 3823 process = NULL; 3824 break; 3825 } 3826 3827 nxt_unit_process_use(ctx, process, 1); 3828 3829 return process; 3830} 3831 3832 3833static nxt_unit_process_t * 3834nxt_unit_process_find(nxt_unit_ctx_t *ctx, pid_t pid, int remove) 3835{ 3836 int rc; 3837 nxt_unit_impl_t *lib; 3838 nxt_unit_process_t *process; 3839 nxt_lvlhsh_query_t lhq; 3840 3841 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3842 3843 nxt_unit_process_lhq_pid(&lhq, &pid); 3844 3845 if (remove) { 3846 rc = nxt_lvlhsh_delete(&lib->processes, &lhq); 3847 3848 } else { 3849 rc = nxt_lvlhsh_find(&lib->processes, &lhq); 3850 } 3851 3852 if (rc == NXT_OK) { 3853 process = lhq.value; 3854 3855 if (!remove) { 3856 nxt_unit_process_use(ctx, process, 1); 3857 } 3858 3859 return process; 3860 } 3861 3862 return NULL; 3863} 3864 3865 3866static nxt_unit_process_t * 3867nxt_unit_process_pop_first(nxt_unit_impl_t *lib) 3868{ 3869 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL); 3870} 3871 3872 3873int 3874nxt_unit_run(nxt_unit_ctx_t *ctx) 3875{ 3876 int rc; 3877 nxt_unit_impl_t *lib; 3878 3879 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3880 rc = NXT_UNIT_OK; 3881 3882 while (nxt_fast_path(lib->online)) { 3883 rc = nxt_unit_run_once(ctx); 3884 3885 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 3886 break; 3887 } 3888 } 3889 3890 return rc; 3891} 3892 3893 3894int 3895nxt_unit_run_once(nxt_unit_ctx_t *ctx) 3896{ 3897 int rc; 3898 nxt_unit_ctx_impl_t *ctx_impl; 3899 nxt_unit_read_buf_t *rbuf; 3900 3901 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3902 3903 pthread_mutex_lock(&ctx_impl->mutex); 3904 3905 if (ctx_impl->pending_read_head != NULL) { 3906 rbuf = ctx_impl->pending_read_head; 3907 ctx_impl->pending_read_head = rbuf->next; 3908 3909 if (ctx_impl->pending_read_tail == &rbuf->next) { 3910 ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; 3911 } 3912 3913 pthread_mutex_unlock(&ctx_impl->mutex); 3914 3915 } else { 3916 rbuf = nxt_unit_read_buf_get_impl(ctx_impl); 3917 if (nxt_slow_path(rbuf == NULL)) { 3918 return NXT_UNIT_ERROR; 3919 } 3920 3921 nxt_unit_read_buf(ctx, rbuf); 3922 } 3923 3924 if (nxt_fast_path(rbuf->size > 0)) { 3925 rc = nxt_unit_process_msg(ctx, &ctx_impl->read_port_id, 3926 rbuf->buf, rbuf->size, 3927 rbuf->oob, sizeof(rbuf->oob)); 3928 3929#if (NXT_DEBUG) 3930 memset(rbuf->buf, 0xAC, rbuf->size); 3931#endif 3932 3933 } else { 3934 rc = NXT_UNIT_ERROR; 3935 } 3936 3937 nxt_unit_read_buf_release(ctx, rbuf); 3938 3939 return rc; 3940} 3941 3942 3943static void 3944nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) 3945{ 3946 nxt_unit_impl_t *lib; 3947 nxt_unit_ctx_impl_t *ctx_impl; 3948 3949 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 3950 3951 memset(rbuf->oob, 0, sizeof(struct cmsghdr)); 3952 3953 if (ctx_impl->read_port_fd != -1) { 3954 rbuf->size = nxt_unit_port_recv(ctx, ctx_impl->read_port_fd, 3955 rbuf->buf, sizeof(rbuf->buf), 3956 rbuf->oob, sizeof(rbuf->oob)); 3957 3958 } else { 3959 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3960 3961 rbuf->size = lib->callbacks.port_recv(ctx, &ctx_impl->read_port_id, 3962 rbuf->buf, sizeof(rbuf->buf), 3963 rbuf->oob, sizeof(rbuf->oob)); 3964 } 3965} 3966 3967 3968void 3969nxt_unit_done(nxt_unit_ctx_t *ctx) 3970{ 3971 nxt_unit_impl_t *lib; 3972 nxt_unit_process_t *process; 3973 nxt_unit_ctx_impl_t *ctx_impl; 3974 3975 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 3976 3977 nxt_queue_each(ctx_impl, &lib->contexts, nxt_unit_ctx_impl_t, link) { 3978 3979 nxt_unit_ctx_free(&ctx_impl->ctx); 3980 3981 } nxt_queue_loop; 3982 3983 for ( ;; ) { 3984 pthread_mutex_lock(&lib->mutex); 3985 3986 process = nxt_unit_process_pop_first(lib); 3987 if (process == NULL) { 3988 pthread_mutex_unlock(&lib->mutex); 3989 3990 break; 3991 } 3992 3993 nxt_unit_remove_process(ctx, process); 3994 } 3995 3996 pthread_mutex_destroy(&lib->mutex); 3997 3998 free(lib); 3999} 4000 4001 4002nxt_unit_ctx_t * 4003nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) 4004{ 4005 int rc, fd; 4006 nxt_unit_impl_t *lib; 4007 nxt_unit_port_id_t new_port_id; 4008 nxt_unit_ctx_impl_t *new_ctx; 4009 4010 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4011 4012 new_ctx = malloc(sizeof(nxt_unit_ctx_impl_t) + lib->request_data_size); 4013 if (nxt_slow_path(new_ctx == NULL)) { 4014 nxt_unit_warn(ctx, "failed to allocate context"); 4015 4016 return NULL; 4017 } 4018 4019 rc = nxt_unit_create_port(ctx, &new_port_id, &fd); 4020 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4021 free(new_ctx); 4022 4023 return NULL; 4024 } 4025 4026 rc = nxt_unit_send_port(ctx, &lib->ready_port_id, &new_port_id, fd); 4027 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4028 lib->callbacks.remove_port(ctx, &new_port_id); 4029 4030 close(fd); 4031 4032 free(new_ctx); 4033 4034 return NULL; 4035 } 4036 4037 close(fd); 4038 4039 rc = nxt_unit_ctx_init(lib, new_ctx, data); 4040 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4041 lib->callbacks.remove_port(ctx, &new_port_id); 4042 4043 free(new_ctx); 4044 4045 return NULL; 4046 } 4047 4048 new_ctx->read_port_id = new_port_id; 4049 4050 return &new_ctx->ctx; 4051} 4052 4053 4054void 4055nxt_unit_ctx_free(nxt_unit_ctx_t *ctx) 4056{ 4057 nxt_unit_impl_t *lib; 4058 nxt_unit_ctx_impl_t *ctx_impl; 4059 nxt_unit_mmap_buf_t *mmap_buf; 4060 nxt_unit_request_info_impl_t *req_impl; 4061 nxt_unit_websocket_frame_impl_t *ws_impl; 4062 4063 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4064 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4065 4066 nxt_queue_each(req_impl, &ctx_impl->active_req, 4067 nxt_unit_request_info_impl_t, link) 4068 { 4069 nxt_unit_req_warn(&req_impl->req, "active request on ctx free"); 4070 4071 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR); 4072 4073 } nxt_queue_loop; 4074 4075 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]); 4076 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]); 4077 4078 while (ctx_impl->free_buf != NULL) { 4079 mmap_buf = ctx_impl->free_buf; 4080 nxt_unit_mmap_buf_unlink(mmap_buf); 4081 free(mmap_buf); 4082 } 4083 4084 nxt_queue_each(req_impl, &ctx_impl->free_req, 4085 nxt_unit_request_info_impl_t, link) 4086 { 4087 nxt_unit_request_info_free(req_impl); 4088 4089 } nxt_queue_loop; 4090 4091 nxt_queue_each(ws_impl, &ctx_impl->free_ws, 4092 nxt_unit_websocket_frame_impl_t, link) 4093 { 4094 nxt_unit_websocket_frame_free(ws_impl); 4095 4096 } nxt_queue_loop; 4097 4098 pthread_mutex_destroy(&ctx_impl->mutex); 4099 4100 nxt_queue_remove(&ctx_impl->link); 4101 4102 if (ctx_impl != &lib->main_ctx) { 4103 free(ctx_impl); 4104 } 4105} 4106 4107 4108/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */ 4109#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET) 4110#define NXT_UNIX_SOCKET SOCK_SEQPACKET 4111#else 4112#define NXT_UNIX_SOCKET SOCK_DGRAM 4113#endif 4114 4115 4116void 4117nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) 4118{ 4119 nxt_unit_port_hash_id_t port_hash_id; 4120 4121 port_hash_id.pid = pid; 4122 port_hash_id.id = id; 4123 4124 port_id->pid = pid; 4125 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id)); 4126 port_id->id = id; 4127} 4128 4129 4130int 4131nxt_unit_create_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 4132 nxt_unit_port_id_t *port_id) 4133{ 4134 int rc, fd; 4135 nxt_unit_impl_t *lib; 4136 nxt_unit_port_id_t new_port_id; 4137 4138 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4139 4140 rc = nxt_unit_create_port(ctx, &new_port_id, &fd); 4141 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4142 return rc; 4143 } 4144 4145 rc = nxt_unit_send_port(ctx, dst, &new_port_id, fd); 4146 4147 if (nxt_fast_path(rc == NXT_UNIT_OK)) { 4148 *port_id = new_port_id; 4149 4150 } else { 4151 lib->callbacks.remove_port(ctx, &new_port_id); 4152 } 4153 4154 close(fd); 4155 4156 return rc; 4157} 4158 4159 4160static int 4161nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) 4162{ 4163 int rc, port_sockets[2]; 4164 nxt_unit_impl_t *lib; 4165 nxt_unit_port_t new_port; 4166 nxt_unit_process_t *process; 4167 4168 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4169 4170 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets); 4171 if (nxt_slow_path(rc != 0)) { 4172 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)", 4173 strerror(errno), errno); 4174 4175 return NXT_UNIT_ERROR; 4176 } 4177 4178 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d", 4179 port_sockets[0], port_sockets[1]); 4180 4181 pthread_mutex_lock(&lib->mutex); 4182 4183 process = nxt_unit_process_get(ctx, lib->pid); 4184 if (nxt_slow_path(process == NULL)) { 4185 pthread_mutex_unlock(&lib->mutex); 4186 4187 close(port_sockets[0]); 4188 close(port_sockets[1]); 4189 4190 return NXT_UNIT_ERROR; 4191 } 4192 4193 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++); 4194 4195 new_port.in_fd = port_sockets[0]; 4196 new_port.out_fd = -1; 4197 new_port.data = NULL; 4198 4199 pthread_mutex_unlock(&lib->mutex); 4200 4201 nxt_unit_process_use(ctx, process, -1); 4202 4203 rc = lib->callbacks.add_port(ctx, &new_port); 4204 if (nxt_slow_path(rc != NXT_UNIT_OK)) { 4205 nxt_unit_warn(ctx, "create_port: add_port() failed"); 4206 4207 close(port_sockets[0]); 4208 close(port_sockets[1]); 4209 4210 return rc; 4211 } 4212 4213 *port_id = new_port.id; 4214 *fd = port_sockets[1]; 4215 4216 return rc; 4217} 4218 4219 4220static int 4221nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, 4222 nxt_unit_port_id_t *new_port, int fd) 4223{ 4224 ssize_t res; 4225 nxt_unit_impl_t *lib; 4226 4227 struct { 4228 nxt_port_msg_t msg; 4229 nxt_port_msg_new_port_t new_port; 4230 } m; 4231 4232 union { 4233 struct cmsghdr cm; 4234 char space[CMSG_SPACE(sizeof(int))]; 4235 } cmsg; 4236 4237 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4238 4239 m.msg.stream = 0; 4240 m.msg.pid = lib->pid; 4241 m.msg.reply_port = 0; 4242 m.msg.type = _NXT_PORT_MSG_NEW_PORT; 4243 m.msg.last = 0; 4244 m.msg.mmap = 0; 4245 m.msg.nf = 0; 4246 m.msg.mf = 0; 4247 m.msg.tracking = 0; 4248 4249 m.new_port.id = new_port->id; 4250 m.new_port.pid = new_port->pid; 4251 m.new_port.type = NXT_PROCESS_WORKER; 4252 m.new_port.max_size = 16 * 1024; 4253 m.new_port.max_share = 64 * 1024; 4254 4255 memset(&cmsg, 0, sizeof(cmsg)); 4256 4257 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); 4258 cmsg.cm.cmsg_level = SOL_SOCKET; 4259 cmsg.cm.cmsg_type = SCM_RIGHTS; 4260 4261 /* 4262 * memcpy() is used instead of simple 4263 * *(int *) CMSG_DATA(&cmsg.cm) = fd; 4264 * because GCC 4.4 with -O2/3/s optimization may issue a warning: 4265 * dereferencing type-punned pointer will break strict-aliasing rules 4266 * 4267 * Fortunately, GCC with -O1 compiles this nxt_memcpy() 4268 * in the same simple assignment as in the code above. 4269 */ 4270 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); 4271 4272 res = lib->callbacks.port_send(ctx, dst, &m, sizeof(m), 4273 &cmsg, sizeof(cmsg)); 4274 4275 return res == sizeof(m) ? NXT_UNIT_OK : NXT_UNIT_ERROR; 4276} 4277 4278 4279int 4280nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) 4281{ 4282 int rc; 4283 nxt_unit_impl_t *lib; 4284 nxt_unit_process_t *process; 4285 nxt_unit_port_impl_t *new_port; 4286 4287 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4288 4289 nxt_unit_debug(ctx, "add_port: %d,%d in_fd %d out_fd %d", 4290 port->id.pid, port->id.id, 4291 port->in_fd, port->out_fd); 4292 4293 pthread_mutex_lock(&lib->mutex); 4294 4295 process = nxt_unit_process_get(ctx, port->id.pid); 4296 if (nxt_slow_path(process == NULL)) { 4297 rc = NXT_UNIT_ERROR; 4298 goto unlock; 4299 } 4300 4301 if (port->id.id >= process->next_port_id) { 4302 process->next_port_id = port->id.id + 1; 4303 } 4304 4305 new_port = malloc(sizeof(nxt_unit_port_impl_t)); 4306 if (nxt_slow_path(new_port == NULL)) { 4307 rc = NXT_UNIT_ERROR; 4308 goto unlock; 4309 } 4310 4311 new_port->port = *port; 4312 4313 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port); 4314 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
|
4315 goto unlock; 4316 } 4317 4318 nxt_queue_insert_tail(&process->ports, &new_port->link); 4319 4320 rc = NXT_UNIT_OK; 4321 4322 new_port->process = process; 4323 4324unlock: 4325 4326 pthread_mutex_unlock(&lib->mutex); 4327 4328 if (nxt_slow_path(process != NULL && rc != NXT_UNIT_OK)) { 4329 nxt_unit_process_use(ctx, process, -1); 4330 } 4331 4332 return rc; 4333} 4334 4335 4336void 4337nxt_unit_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 4338{ 4339 nxt_unit_find_remove_port(ctx, port_id, NULL); 4340} 4341 4342 4343void 4344nxt_unit_find_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4345 nxt_unit_port_t *r_port) 4346{ 4347 nxt_unit_impl_t *lib; 4348 nxt_unit_process_t *process; 4349 4350 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4351 4352 pthread_mutex_lock(&lib->mutex); 4353 4354 process = NULL; 4355 4356 nxt_unit_remove_port_unsafe(ctx, port_id, r_port, &process); 4357 4358 pthread_mutex_unlock(&lib->mutex); 4359 4360 if (nxt_slow_path(process != NULL)) { 4361 nxt_unit_process_use(ctx, process, -1); 4362 } 4363} 4364 4365 4366static void 4367nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4368 nxt_unit_port_t *r_port, nxt_unit_process_t **process) 4369{ 4370 nxt_unit_impl_t *lib; 4371 nxt_unit_port_impl_t *port; 4372 4373 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4374 4375 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); 4376 if (nxt_slow_path(port == NULL)) { 4377 nxt_unit_debug(ctx, "remove_port: port %d,%d not found", 4378 (int) port_id->pid, (int) port_id->id); 4379 4380 return; 4381 } 4382 4383 nxt_unit_debug(ctx, "remove_port: port %d,%d, fds %d,%d, data %p", 4384 (int) port_id->pid, (int) port_id->id, 4385 port->port.in_fd, port->port.out_fd, port->port.data); 4386 4387 if (port->port.in_fd != -1) { 4388 close(port->port.in_fd); 4389 } 4390 4391 if (port->port.out_fd != -1) { 4392 close(port->port.out_fd); 4393 } 4394 4395 if (port->process != NULL) { 4396 nxt_queue_remove(&port->link); 4397 } 4398 4399 if (process != NULL) { 4400 *process = port->process; 4401 } 4402 4403 if (r_port != NULL) { 4404 *r_port = port->port; 4405 } 4406 4407 free(port); 4408} 4409 4410 4411void 4412nxt_unit_remove_pid(nxt_unit_ctx_t *ctx, pid_t pid) 4413{ 4414 nxt_unit_impl_t *lib; 4415 nxt_unit_process_t *process; 4416 4417 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4418 4419 pthread_mutex_lock(&lib->mutex); 4420 4421 process = nxt_unit_process_find(ctx, pid, 1); 4422 if (nxt_slow_path(process == NULL)) { 4423 nxt_unit_debug(ctx, "remove_pid: process %d not found", (int) pid); 4424 4425 pthread_mutex_unlock(&lib->mutex); 4426 4427 return; 4428 } 4429 4430 nxt_unit_remove_process(ctx, process); 4431} 4432 4433 4434static void 4435nxt_unit_remove_process(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process) 4436{ 4437 nxt_queue_t ports; 4438 nxt_unit_impl_t *lib; 4439 nxt_unit_port_impl_t *port; 4440 4441 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4442 4443 nxt_queue_init(&ports); 4444 4445 nxt_queue_add(&ports, &process->ports); 4446 4447 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 4448 4449 nxt_unit_process_use(ctx, process, -1); 4450 port->process = NULL; 4451 4452 /* Shortcut for default callback. */ 4453 if (lib->callbacks.remove_port == nxt_unit_remove_port) { 4454 nxt_queue_remove(&port->link); 4455 4456 nxt_unit_remove_port_unsafe(ctx, &port->port.id, NULL, NULL); 4457 } 4458 4459 } nxt_queue_loop; 4460 4461 pthread_mutex_unlock(&lib->mutex); 4462 4463 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 4464 4465 nxt_queue_remove(&port->link); 4466 4467 lib->callbacks.remove_port(ctx, &port->port.id); 4468 4469 } nxt_queue_loop; 4470 4471 nxt_unit_process_use(ctx, process, -1); 4472} 4473 4474 4475void 4476nxt_unit_quit(nxt_unit_ctx_t *ctx) 4477{ 4478 nxt_unit_impl_t *lib; 4479 4480 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4481 4482 lib->online = 0; 4483} 4484 4485 4486static ssize_t 4487nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4488 const void *buf, size_t buf_size, const void *oob, size_t oob_size) 4489{ 4490 int fd; 4491 nxt_unit_impl_t *lib; 4492 nxt_unit_port_impl_t *port; 4493 4494 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4495 4496 pthread_mutex_lock(&lib->mutex); 4497 4498 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 4499 4500 if (nxt_fast_path(port != NULL)) { 4501 fd = port->port.out_fd; 4502 4503 } else { 4504 nxt_unit_warn(ctx, "port_send: port %d,%d not found", 4505 (int) port_id->pid, (int) port_id->id); 4506 fd = -1; 4507 } 4508 4509 pthread_mutex_unlock(&lib->mutex); 4510 4511 if (nxt_slow_path(fd == -1)) { 4512 if (port != NULL) { 4513 nxt_unit_warn(ctx, "port_send: port %d,%d: fd == -1", 4514 (int) port_id->pid, (int) port_id->id); 4515 } 4516 4517 return -1; 4518 } 4519 4520 nxt_unit_debug(ctx, "port_send: found port %d,%d fd %d", 4521 (int) port_id->pid, (int) port_id->id, fd); 4522 4523 return nxt_unit_port_send(ctx, fd, buf, buf_size, oob, oob_size); 4524} 4525 4526 4527ssize_t 4528nxt_unit_port_send(nxt_unit_ctx_t *ctx, int fd, 4529 const void *buf, size_t buf_size, const void *oob, size_t oob_size) 4530{ 4531 ssize_t res; 4532 struct iovec iov[1]; 4533 struct msghdr msg; 4534 4535 iov[0].iov_base = (void *) buf; 4536 iov[0].iov_len = buf_size; 4537 4538 msg.msg_name = NULL; 4539 msg.msg_namelen = 0; 4540 msg.msg_iov = iov; 4541 msg.msg_iovlen = 1; 4542 msg.msg_flags = 0; 4543 msg.msg_control = (void *) oob; 4544 msg.msg_controllen = oob_size; 4545 4546retry: 4547 4548 res = sendmsg(fd, &msg, 0); 4549 4550 if (nxt_slow_path(res == -1)) { 4551 if (errno == EINTR) { 4552 goto retry; 4553 } 4554 4555 /* 4556 * FIXME: This should be "alert" after router graceful shutdown 4557 * implementation. 4558 */ 4559 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)", 4560 fd, (int) buf_size, strerror(errno), errno); 4561 4562 } else { 4563 nxt_unit_debug(ctx, "sendmsg(%d, %d): %d", fd, (int) buf_size, 4564 (int) res); 4565 } 4566 4567 return res; 4568} 4569 4570 4571static ssize_t 4572nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4573 void *buf, size_t buf_size, void *oob, size_t oob_size) 4574{ 4575 int fd; 4576 nxt_unit_impl_t *lib; 4577 nxt_unit_ctx_impl_t *ctx_impl; 4578 nxt_unit_port_impl_t *port; 4579 4580 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4581 4582 pthread_mutex_lock(&lib->mutex); 4583 4584 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 4585 4586 if (nxt_fast_path(port != NULL)) { 4587 fd = port->port.in_fd; 4588 4589 } else { 4590 nxt_unit_debug(ctx, "port_recv: port %d,%d not found", 4591 (int) port_id->pid, (int) port_id->id); 4592 fd = -1; 4593 } 4594 4595 pthread_mutex_unlock(&lib->mutex); 4596 4597 if (nxt_slow_path(fd == -1)) { 4598 return -1; 4599 } 4600 4601 nxt_unit_debug(ctx, "port_recv: found port %d,%d, fd %d", 4602 (int) port_id->pid, (int) port_id->id, fd); 4603 4604 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4605 4606 if (nxt_fast_path(port_id == &ctx_impl->read_port_id)) { 4607 ctx_impl->read_port_fd = fd; 4608 } 4609 4610 return nxt_unit_port_recv(ctx, fd, buf, buf_size, oob, oob_size); 4611} 4612 4613 4614ssize_t 4615nxt_unit_port_recv(nxt_unit_ctx_t *ctx, int fd, void *buf, size_t buf_size, 4616 void *oob, size_t oob_size) 4617{ 4618 ssize_t res; 4619 struct iovec iov[1]; 4620 struct msghdr msg; 4621 4622 iov[0].iov_base = buf; 4623 iov[0].iov_len = buf_size; 4624 4625 msg.msg_name = NULL; 4626 msg.msg_namelen = 0; 4627 msg.msg_iov = iov; 4628 msg.msg_iovlen = 1; 4629 msg.msg_flags = 0; 4630 msg.msg_control = oob; 4631 msg.msg_controllen = oob_size; 4632 4633retry: 4634 4635 res = recvmsg(fd, &msg, 0); 4636 4637 if (nxt_slow_path(res == -1)) { 4638 if (errno == EINTR) { 4639 goto retry; 4640 } 4641 4642 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", 4643 fd, strerror(errno), errno); 4644 4645 } else { 4646 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) res); 4647 } 4648 4649 return res; 4650} 4651 4652 4653static nxt_int_t 4654nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 4655{ 4656 nxt_unit_port_t *port; 4657 nxt_unit_port_hash_id_t *port_id; 4658 4659 port = data; 4660 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start; 4661 4662 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t) 4663 && port_id->pid == port->id.pid 4664 && port_id->id == port->id.id) 4665 { 4666 return NXT_OK; 4667 } 4668 4669 return NXT_DECLINED; 4670} 4671 4672 4673static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = { 4674 NXT_LVLHSH_DEFAULT, 4675 nxt_unit_port_hash_test, 4676 nxt_lvlhsh_alloc, 4677 nxt_lvlhsh_free, 4678}; 4679 4680 4681static inline void 4682nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq, 4683 nxt_unit_port_hash_id_t *port_hash_id, 4684 nxt_unit_port_id_t *port_id) 4685{ 4686 port_hash_id->pid = port_id->pid; 4687 port_hash_id->id = port_id->id; 4688 4689 if (nxt_fast_path(port_id->hash != 0)) { 4690 lhq->key_hash = port_id->hash; 4691 4692 } else { 4693 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id)); 4694 4695 port_id->hash = lhq->key_hash; 4696 4697 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X", 4698 (int) port_id->pid, (int) port_id->id, 4699 (int) port_id->hash); 4700 } 4701 4702 lhq->key.length = sizeof(nxt_unit_port_hash_id_t); 4703 lhq->key.start = (u_char *) port_hash_id; 4704 lhq->proto = &lvlhsh_ports_proto; 4705 lhq->pool = NULL; 4706} 4707 4708 4709static int 4710nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port) 4711{ 4712 nxt_int_t res; 4713 nxt_lvlhsh_query_t lhq; 4714 nxt_unit_port_hash_id_t port_hash_id; 4715 4716 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id); 4717 lhq.replace = 0; 4718 lhq.value = port; 4719 4720 res = nxt_lvlhsh_insert(port_hash, &lhq); 4721 4722 switch (res) { 4723 4724 case NXT_OK: 4725 return NXT_UNIT_OK; 4726 4727 default: 4728 return NXT_UNIT_ERROR; 4729 } 4730} 4731 4732 4733static nxt_unit_port_impl_t * 4734nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, 4735 int remove) 4736{ 4737 nxt_int_t res; 4738 nxt_lvlhsh_query_t lhq; 4739 nxt_unit_port_hash_id_t port_hash_id; 4740 4741 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id); 4742 4743 if (remove) { 4744 res = nxt_lvlhsh_delete(port_hash, &lhq); 4745 4746 } else { 4747 res = nxt_lvlhsh_find(port_hash, &lhq); 4748 } 4749 4750 switch (res) { 4751 4752 case NXT_OK: 4753 return lhq.value; 4754 4755 default: 4756 return NULL; 4757 } 4758} 4759 4760 4761static nxt_int_t 4762nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 4763{ 4764 return NXT_OK; 4765} 4766 4767 4768static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = { 4769 NXT_LVLHSH_DEFAULT, 4770 nxt_unit_request_hash_test, 4771 nxt_lvlhsh_alloc, 4772 nxt_lvlhsh_free, 4773}; 4774 4775 4776static int 4777nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, 4778 nxt_unit_request_info_impl_t *req_impl) 4779{ 4780 uint32_t *stream; 4781 nxt_int_t res; 4782 nxt_lvlhsh_query_t lhq; 4783 4784 stream = &req_impl->stream; 4785 4786 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream)); 4787 lhq.key.length = sizeof(*stream); 4788 lhq.key.start = (u_char *) stream; 4789 lhq.proto = &lvlhsh_requests_proto; 4790 lhq.pool = NULL; 4791 lhq.replace = 0; 4792 lhq.value = req_impl; 4793 4794 res = nxt_lvlhsh_insert(request_hash, &lhq); 4795 4796 switch (res) { 4797 4798 case NXT_OK: 4799 return NXT_UNIT_OK; 4800 4801 default: 4802 return NXT_UNIT_ERROR; 4803 } 4804} 4805 4806 4807static nxt_unit_request_info_impl_t * 4808nxt_unit_request_hash_find(nxt_lvlhsh_t *request_hash, uint32_t stream, 4809 int remove) 4810{ 4811 nxt_int_t res; 4812 nxt_lvlhsh_query_t lhq; 4813 4814 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream)); 4815 lhq.key.length = sizeof(stream); 4816 lhq.key.start = (u_char *) &stream; 4817 lhq.proto = &lvlhsh_requests_proto; 4818 lhq.pool = NULL; 4819 4820 if (remove) { 4821 res = nxt_lvlhsh_delete(request_hash, &lhq); 4822 4823 } else { 4824 res = nxt_lvlhsh_find(request_hash, &lhq); 4825 } 4826 4827 switch (res) { 4828 4829 case NXT_OK: 4830 return lhq.value; 4831 4832 default: 4833 return NULL; 4834 } 4835} 4836 4837 4838void 4839nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...) 4840{ 4841 int log_fd, n; 4842 char msg[NXT_MAX_ERROR_STR], *p, *end; 4843 pid_t pid; 4844 va_list ap; 4845 nxt_unit_impl_t *lib; 4846 4847 if (nxt_fast_path(ctx != NULL)) { 4848 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4849 4850 pid = lib->pid; 4851 log_fd = lib->log_fd; 4852 4853 } else { 4854 pid = getpid(); 4855 log_fd = STDERR_FILENO; 4856 } 4857 4858 p = msg; 4859 end = p + sizeof(msg) - 1; 4860 4861 p = nxt_unit_snprint_prefix(p, end, pid, level); 4862 4863 va_start(ap, fmt); 4864 p += vsnprintf(p, end - p, fmt, ap); 4865 va_end(ap); 4866 4867 if (nxt_slow_path(p > end)) { 4868 memcpy(end - 5, "[...]", 5); 4869 p = end; 4870 } 4871 4872 *p++ = '\n'; 4873 4874 n = write(log_fd, msg, p - msg); 4875 if (nxt_slow_path(n < 0)) { 4876 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 4877 } 4878} 4879 4880 4881void 4882nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...) 4883{ 4884 int log_fd, n; 4885 char msg[NXT_MAX_ERROR_STR], *p, *end; 4886 pid_t pid; 4887 va_list ap; 4888 nxt_unit_impl_t *lib; 4889 nxt_unit_request_info_impl_t *req_impl; 4890 4891 if (nxt_fast_path(req != NULL)) { 4892 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 4893 4894 pid = lib->pid; 4895 log_fd = lib->log_fd; 4896 4897 } else { 4898 pid = getpid(); 4899 log_fd = STDERR_FILENO; 4900 } 4901 4902 p = msg; 4903 end = p + sizeof(msg) - 1; 4904 4905 p = nxt_unit_snprint_prefix(p, end, pid, level); 4906 4907 if (nxt_fast_path(req != NULL)) { 4908 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 4909 4910 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream); 4911 } 4912 4913 va_start(ap, fmt); 4914 p += vsnprintf(p, end - p, fmt, ap); 4915 va_end(ap); 4916 4917 if (nxt_slow_path(p > end)) { 4918 memcpy(end - 5, "[...]", 5); 4919 p = end; 4920 } 4921 4922 *p++ = '\n'; 4923 4924 n = write(log_fd, msg, p - msg); 4925 if (nxt_slow_path(n < 0)) { 4926 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 4927 } 4928} 4929 4930 4931static const char * nxt_unit_log_levels[] = { 4932 "alert", 4933 "error", 4934 "warn", 4935 "notice", 4936 "info", 4937 "debug", 4938}; 4939 4940 4941static char * 4942nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level) 4943{ 4944 struct tm tm; 4945 struct timespec ts; 4946 4947 (void) clock_gettime(CLOCK_REALTIME, &ts); 4948 4949#if (NXT_HAVE_LOCALTIME_R) 4950 (void) localtime_r(&ts.tv_sec, &tm); 4951#else 4952 tm = *localtime(&ts.tv_sec); 4953#endif 4954 4955 p += snprintf(p, end - p, 4956 "%4d/%02d/%02d %02d:%02d:%02d.%03d ", 4957 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 4958 tm.tm_hour, tm.tm_min, tm.tm_sec, 4959 (int) ts.tv_nsec / 1000000); 4960 4961 p += snprintf(p, end - p, 4962 "[%s] %d#%"PRIu64" [unit] ", nxt_unit_log_levels[level], 4963 (int) pid, 4964 (uint64_t) (uintptr_t) nxt_thread_get_tid()); 4965 4966 return p; 4967} 4968 4969 4970/* The function required by nxt_lvlhsh_alloc() and nxt_lvlvhsh_free(). */ 4971 4972void * 4973nxt_memalign(size_t alignment, size_t size) 4974{ 4975 void *p; 4976 nxt_err_t err; 4977 4978 err = posix_memalign(&p, alignment, size); 4979 4980 if (nxt_fast_path(err == 0)) { 4981 return p; 4982 } 4983 4984 return NULL; 4985} 4986 4987#if (NXT_DEBUG) 4988 4989void 4990nxt_free(void *p) 4991{ 4992 free(p); 4993} 4994 4995#endif
| 4318 goto unlock; 4319 } 4320 4321 nxt_queue_insert_tail(&process->ports, &new_port->link); 4322 4323 rc = NXT_UNIT_OK; 4324 4325 new_port->process = process; 4326 4327unlock: 4328 4329 pthread_mutex_unlock(&lib->mutex); 4330 4331 if (nxt_slow_path(process != NULL && rc != NXT_UNIT_OK)) { 4332 nxt_unit_process_use(ctx, process, -1); 4333 } 4334 4335 return rc; 4336} 4337 4338 4339void 4340nxt_unit_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) 4341{ 4342 nxt_unit_find_remove_port(ctx, port_id, NULL); 4343} 4344 4345 4346void 4347nxt_unit_find_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4348 nxt_unit_port_t *r_port) 4349{ 4350 nxt_unit_impl_t *lib; 4351 nxt_unit_process_t *process; 4352 4353 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4354 4355 pthread_mutex_lock(&lib->mutex); 4356 4357 process = NULL; 4358 4359 nxt_unit_remove_port_unsafe(ctx, port_id, r_port, &process); 4360 4361 pthread_mutex_unlock(&lib->mutex); 4362 4363 if (nxt_slow_path(process != NULL)) { 4364 nxt_unit_process_use(ctx, process, -1); 4365 } 4366} 4367 4368 4369static void 4370nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4371 nxt_unit_port_t *r_port, nxt_unit_process_t **process) 4372{ 4373 nxt_unit_impl_t *lib; 4374 nxt_unit_port_impl_t *port; 4375 4376 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4377 4378 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); 4379 if (nxt_slow_path(port == NULL)) { 4380 nxt_unit_debug(ctx, "remove_port: port %d,%d not found", 4381 (int) port_id->pid, (int) port_id->id); 4382 4383 return; 4384 } 4385 4386 nxt_unit_debug(ctx, "remove_port: port %d,%d, fds %d,%d, data %p", 4387 (int) port_id->pid, (int) port_id->id, 4388 port->port.in_fd, port->port.out_fd, port->port.data); 4389 4390 if (port->port.in_fd != -1) { 4391 close(port->port.in_fd); 4392 } 4393 4394 if (port->port.out_fd != -1) { 4395 close(port->port.out_fd); 4396 } 4397 4398 if (port->process != NULL) { 4399 nxt_queue_remove(&port->link); 4400 } 4401 4402 if (process != NULL) { 4403 *process = port->process; 4404 } 4405 4406 if (r_port != NULL) { 4407 *r_port = port->port; 4408 } 4409 4410 free(port); 4411} 4412 4413 4414void 4415nxt_unit_remove_pid(nxt_unit_ctx_t *ctx, pid_t pid) 4416{ 4417 nxt_unit_impl_t *lib; 4418 nxt_unit_process_t *process; 4419 4420 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4421 4422 pthread_mutex_lock(&lib->mutex); 4423 4424 process = nxt_unit_process_find(ctx, pid, 1); 4425 if (nxt_slow_path(process == NULL)) { 4426 nxt_unit_debug(ctx, "remove_pid: process %d not found", (int) pid); 4427 4428 pthread_mutex_unlock(&lib->mutex); 4429 4430 return; 4431 } 4432 4433 nxt_unit_remove_process(ctx, process); 4434} 4435 4436 4437static void 4438nxt_unit_remove_process(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process) 4439{ 4440 nxt_queue_t ports; 4441 nxt_unit_impl_t *lib; 4442 nxt_unit_port_impl_t *port; 4443 4444 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4445 4446 nxt_queue_init(&ports); 4447 4448 nxt_queue_add(&ports, &process->ports); 4449 4450 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 4451 4452 nxt_unit_process_use(ctx, process, -1); 4453 port->process = NULL; 4454 4455 /* Shortcut for default callback. */ 4456 if (lib->callbacks.remove_port == nxt_unit_remove_port) { 4457 nxt_queue_remove(&port->link); 4458 4459 nxt_unit_remove_port_unsafe(ctx, &port->port.id, NULL, NULL); 4460 } 4461 4462 } nxt_queue_loop; 4463 4464 pthread_mutex_unlock(&lib->mutex); 4465 4466 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { 4467 4468 nxt_queue_remove(&port->link); 4469 4470 lib->callbacks.remove_port(ctx, &port->port.id); 4471 4472 } nxt_queue_loop; 4473 4474 nxt_unit_process_use(ctx, process, -1); 4475} 4476 4477 4478void 4479nxt_unit_quit(nxt_unit_ctx_t *ctx) 4480{ 4481 nxt_unit_impl_t *lib; 4482 4483 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4484 4485 lib->online = 0; 4486} 4487 4488 4489static ssize_t 4490nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4491 const void *buf, size_t buf_size, const void *oob, size_t oob_size) 4492{ 4493 int fd; 4494 nxt_unit_impl_t *lib; 4495 nxt_unit_port_impl_t *port; 4496 4497 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4498 4499 pthread_mutex_lock(&lib->mutex); 4500 4501 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 4502 4503 if (nxt_fast_path(port != NULL)) { 4504 fd = port->port.out_fd; 4505 4506 } else { 4507 nxt_unit_warn(ctx, "port_send: port %d,%d not found", 4508 (int) port_id->pid, (int) port_id->id); 4509 fd = -1; 4510 } 4511 4512 pthread_mutex_unlock(&lib->mutex); 4513 4514 if (nxt_slow_path(fd == -1)) { 4515 if (port != NULL) { 4516 nxt_unit_warn(ctx, "port_send: port %d,%d: fd == -1", 4517 (int) port_id->pid, (int) port_id->id); 4518 } 4519 4520 return -1; 4521 } 4522 4523 nxt_unit_debug(ctx, "port_send: found port %d,%d fd %d", 4524 (int) port_id->pid, (int) port_id->id, fd); 4525 4526 return nxt_unit_port_send(ctx, fd, buf, buf_size, oob, oob_size); 4527} 4528 4529 4530ssize_t 4531nxt_unit_port_send(nxt_unit_ctx_t *ctx, int fd, 4532 const void *buf, size_t buf_size, const void *oob, size_t oob_size) 4533{ 4534 ssize_t res; 4535 struct iovec iov[1]; 4536 struct msghdr msg; 4537 4538 iov[0].iov_base = (void *) buf; 4539 iov[0].iov_len = buf_size; 4540 4541 msg.msg_name = NULL; 4542 msg.msg_namelen = 0; 4543 msg.msg_iov = iov; 4544 msg.msg_iovlen = 1; 4545 msg.msg_flags = 0; 4546 msg.msg_control = (void *) oob; 4547 msg.msg_controllen = oob_size; 4548 4549retry: 4550 4551 res = sendmsg(fd, &msg, 0); 4552 4553 if (nxt_slow_path(res == -1)) { 4554 if (errno == EINTR) { 4555 goto retry; 4556 } 4557 4558 /* 4559 * FIXME: This should be "alert" after router graceful shutdown 4560 * implementation. 4561 */ 4562 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)", 4563 fd, (int) buf_size, strerror(errno), errno); 4564 4565 } else { 4566 nxt_unit_debug(ctx, "sendmsg(%d, %d): %d", fd, (int) buf_size, 4567 (int) res); 4568 } 4569 4570 return res; 4571} 4572 4573 4574static ssize_t 4575nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, 4576 void *buf, size_t buf_size, void *oob, size_t oob_size) 4577{ 4578 int fd; 4579 nxt_unit_impl_t *lib; 4580 nxt_unit_ctx_impl_t *ctx_impl; 4581 nxt_unit_port_impl_t *port; 4582 4583 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4584 4585 pthread_mutex_lock(&lib->mutex); 4586 4587 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); 4588 4589 if (nxt_fast_path(port != NULL)) { 4590 fd = port->port.in_fd; 4591 4592 } else { 4593 nxt_unit_debug(ctx, "port_recv: port %d,%d not found", 4594 (int) port_id->pid, (int) port_id->id); 4595 fd = -1; 4596 } 4597 4598 pthread_mutex_unlock(&lib->mutex); 4599 4600 if (nxt_slow_path(fd == -1)) { 4601 return -1; 4602 } 4603 4604 nxt_unit_debug(ctx, "port_recv: found port %d,%d, fd %d", 4605 (int) port_id->pid, (int) port_id->id, fd); 4606 4607 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); 4608 4609 if (nxt_fast_path(port_id == &ctx_impl->read_port_id)) { 4610 ctx_impl->read_port_fd = fd; 4611 } 4612 4613 return nxt_unit_port_recv(ctx, fd, buf, buf_size, oob, oob_size); 4614} 4615 4616 4617ssize_t 4618nxt_unit_port_recv(nxt_unit_ctx_t *ctx, int fd, void *buf, size_t buf_size, 4619 void *oob, size_t oob_size) 4620{ 4621 ssize_t res; 4622 struct iovec iov[1]; 4623 struct msghdr msg; 4624 4625 iov[0].iov_base = buf; 4626 iov[0].iov_len = buf_size; 4627 4628 msg.msg_name = NULL; 4629 msg.msg_namelen = 0; 4630 msg.msg_iov = iov; 4631 msg.msg_iovlen = 1; 4632 msg.msg_flags = 0; 4633 msg.msg_control = oob; 4634 msg.msg_controllen = oob_size; 4635 4636retry: 4637 4638 res = recvmsg(fd, &msg, 0); 4639 4640 if (nxt_slow_path(res == -1)) { 4641 if (errno == EINTR) { 4642 goto retry; 4643 } 4644 4645 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", 4646 fd, strerror(errno), errno); 4647 4648 } else { 4649 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) res); 4650 } 4651 4652 return res; 4653} 4654 4655 4656static nxt_int_t 4657nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 4658{ 4659 nxt_unit_port_t *port; 4660 nxt_unit_port_hash_id_t *port_id; 4661 4662 port = data; 4663 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start; 4664 4665 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t) 4666 && port_id->pid == port->id.pid 4667 && port_id->id == port->id.id) 4668 { 4669 return NXT_OK; 4670 } 4671 4672 return NXT_DECLINED; 4673} 4674 4675 4676static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = { 4677 NXT_LVLHSH_DEFAULT, 4678 nxt_unit_port_hash_test, 4679 nxt_lvlhsh_alloc, 4680 nxt_lvlhsh_free, 4681}; 4682 4683 4684static inline void 4685nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq, 4686 nxt_unit_port_hash_id_t *port_hash_id, 4687 nxt_unit_port_id_t *port_id) 4688{ 4689 port_hash_id->pid = port_id->pid; 4690 port_hash_id->id = port_id->id; 4691 4692 if (nxt_fast_path(port_id->hash != 0)) { 4693 lhq->key_hash = port_id->hash; 4694 4695 } else { 4696 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id)); 4697 4698 port_id->hash = lhq->key_hash; 4699 4700 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X", 4701 (int) port_id->pid, (int) port_id->id, 4702 (int) port_id->hash); 4703 } 4704 4705 lhq->key.length = sizeof(nxt_unit_port_hash_id_t); 4706 lhq->key.start = (u_char *) port_hash_id; 4707 lhq->proto = &lvlhsh_ports_proto; 4708 lhq->pool = NULL; 4709} 4710 4711 4712static int 4713nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port) 4714{ 4715 nxt_int_t res; 4716 nxt_lvlhsh_query_t lhq; 4717 nxt_unit_port_hash_id_t port_hash_id; 4718 4719 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id); 4720 lhq.replace = 0; 4721 lhq.value = port; 4722 4723 res = nxt_lvlhsh_insert(port_hash, &lhq); 4724 4725 switch (res) { 4726 4727 case NXT_OK: 4728 return NXT_UNIT_OK; 4729 4730 default: 4731 return NXT_UNIT_ERROR; 4732 } 4733} 4734 4735 4736static nxt_unit_port_impl_t * 4737nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, 4738 int remove) 4739{ 4740 nxt_int_t res; 4741 nxt_lvlhsh_query_t lhq; 4742 nxt_unit_port_hash_id_t port_hash_id; 4743 4744 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id); 4745 4746 if (remove) { 4747 res = nxt_lvlhsh_delete(port_hash, &lhq); 4748 4749 } else { 4750 res = nxt_lvlhsh_find(port_hash, &lhq); 4751 } 4752 4753 switch (res) { 4754 4755 case NXT_OK: 4756 return lhq.value; 4757 4758 default: 4759 return NULL; 4760 } 4761} 4762 4763 4764static nxt_int_t 4765nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data) 4766{ 4767 return NXT_OK; 4768} 4769 4770 4771static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = { 4772 NXT_LVLHSH_DEFAULT, 4773 nxt_unit_request_hash_test, 4774 nxt_lvlhsh_alloc, 4775 nxt_lvlhsh_free, 4776}; 4777 4778 4779static int 4780nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, 4781 nxt_unit_request_info_impl_t *req_impl) 4782{ 4783 uint32_t *stream; 4784 nxt_int_t res; 4785 nxt_lvlhsh_query_t lhq; 4786 4787 stream = &req_impl->stream; 4788 4789 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream)); 4790 lhq.key.length = sizeof(*stream); 4791 lhq.key.start = (u_char *) stream; 4792 lhq.proto = &lvlhsh_requests_proto; 4793 lhq.pool = NULL; 4794 lhq.replace = 0; 4795 lhq.value = req_impl; 4796 4797 res = nxt_lvlhsh_insert(request_hash, &lhq); 4798 4799 switch (res) { 4800 4801 case NXT_OK: 4802 return NXT_UNIT_OK; 4803 4804 default: 4805 return NXT_UNIT_ERROR; 4806 } 4807} 4808 4809 4810static nxt_unit_request_info_impl_t * 4811nxt_unit_request_hash_find(nxt_lvlhsh_t *request_hash, uint32_t stream, 4812 int remove) 4813{ 4814 nxt_int_t res; 4815 nxt_lvlhsh_query_t lhq; 4816 4817 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream)); 4818 lhq.key.length = sizeof(stream); 4819 lhq.key.start = (u_char *) &stream; 4820 lhq.proto = &lvlhsh_requests_proto; 4821 lhq.pool = NULL; 4822 4823 if (remove) { 4824 res = nxt_lvlhsh_delete(request_hash, &lhq); 4825 4826 } else { 4827 res = nxt_lvlhsh_find(request_hash, &lhq); 4828 } 4829 4830 switch (res) { 4831 4832 case NXT_OK: 4833 return lhq.value; 4834 4835 default: 4836 return NULL; 4837 } 4838} 4839 4840 4841void 4842nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...) 4843{ 4844 int log_fd, n; 4845 char msg[NXT_MAX_ERROR_STR], *p, *end; 4846 pid_t pid; 4847 va_list ap; 4848 nxt_unit_impl_t *lib; 4849 4850 if (nxt_fast_path(ctx != NULL)) { 4851 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); 4852 4853 pid = lib->pid; 4854 log_fd = lib->log_fd; 4855 4856 } else { 4857 pid = getpid(); 4858 log_fd = STDERR_FILENO; 4859 } 4860 4861 p = msg; 4862 end = p + sizeof(msg) - 1; 4863 4864 p = nxt_unit_snprint_prefix(p, end, pid, level); 4865 4866 va_start(ap, fmt); 4867 p += vsnprintf(p, end - p, fmt, ap); 4868 va_end(ap); 4869 4870 if (nxt_slow_path(p > end)) { 4871 memcpy(end - 5, "[...]", 5); 4872 p = end; 4873 } 4874 4875 *p++ = '\n'; 4876 4877 n = write(log_fd, msg, p - msg); 4878 if (nxt_slow_path(n < 0)) { 4879 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 4880 } 4881} 4882 4883 4884void 4885nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...) 4886{ 4887 int log_fd, n; 4888 char msg[NXT_MAX_ERROR_STR], *p, *end; 4889 pid_t pid; 4890 va_list ap; 4891 nxt_unit_impl_t *lib; 4892 nxt_unit_request_info_impl_t *req_impl; 4893 4894 if (nxt_fast_path(req != NULL)) { 4895 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); 4896 4897 pid = lib->pid; 4898 log_fd = lib->log_fd; 4899 4900 } else { 4901 pid = getpid(); 4902 log_fd = STDERR_FILENO; 4903 } 4904 4905 p = msg; 4906 end = p + sizeof(msg) - 1; 4907 4908 p = nxt_unit_snprint_prefix(p, end, pid, level); 4909 4910 if (nxt_fast_path(req != NULL)) { 4911 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); 4912 4913 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream); 4914 } 4915 4916 va_start(ap, fmt); 4917 p += vsnprintf(p, end - p, fmt, ap); 4918 va_end(ap); 4919 4920 if (nxt_slow_path(p > end)) { 4921 memcpy(end - 5, "[...]", 5); 4922 p = end; 4923 } 4924 4925 *p++ = '\n'; 4926 4927 n = write(log_fd, msg, p - msg); 4928 if (nxt_slow_path(n < 0)) { 4929 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg); 4930 } 4931} 4932 4933 4934static const char * nxt_unit_log_levels[] = { 4935 "alert", 4936 "error", 4937 "warn", 4938 "notice", 4939 "info", 4940 "debug", 4941}; 4942 4943 4944static char * 4945nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level) 4946{ 4947 struct tm tm; 4948 struct timespec ts; 4949 4950 (void) clock_gettime(CLOCK_REALTIME, &ts); 4951 4952#if (NXT_HAVE_LOCALTIME_R) 4953 (void) localtime_r(&ts.tv_sec, &tm); 4954#else 4955 tm = *localtime(&ts.tv_sec); 4956#endif 4957 4958 p += snprintf(p, end - p, 4959 "%4d/%02d/%02d %02d:%02d:%02d.%03d ", 4960 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 4961 tm.tm_hour, tm.tm_min, tm.tm_sec, 4962 (int) ts.tv_nsec / 1000000); 4963 4964 p += snprintf(p, end - p, 4965 "[%s] %d#%"PRIu64" [unit] ", nxt_unit_log_levels[level], 4966 (int) pid, 4967 (uint64_t) (uintptr_t) nxt_thread_get_tid()); 4968 4969 return p; 4970} 4971 4972 4973/* The function required by nxt_lvlhsh_alloc() and nxt_lvlvhsh_free(). */ 4974 4975void * 4976nxt_memalign(size_t alignment, size_t size) 4977{ 4978 void *p; 4979 nxt_err_t err; 4980 4981 err = posix_memalign(&p, alignment, size); 4982 4983 if (nxt_fast_path(err == 0)) { 4984 return p; 4985 } 4986 4987 return NULL; 4988} 4989 4990#if (NXT_DEBUG) 4991 4992void 4993nxt_free(void *p) 4994{ 4995 free(p); 4996} 4997 4998#endif
|