nxt_unit.c (1623:7cdddbe0fb45) nxt_unit.c (1631:73a1935e3899)
1
2/*
3 * Copyright (C) NGINX, Inc.
4 */
5
6#include <stdlib.h>
7
8#include "nxt_main.h"
9#include "nxt_port_memory_int.h"
10#include "nxt_port_queue.h"
11#include "nxt_app_queue.h"
12
13#include "nxt_unit.h"
14#include "nxt_unit_request.h"
15#include "nxt_unit_response.h"
16#include "nxt_unit_websocket.h"
17
18#include "nxt_websocket.h"
19
20#if (NXT_HAVE_MEMFD_CREATE)
21#include <linux/memfd.h>
22#endif
23
24#define NXT_UNIT_MAX_PLAIN_SIZE 1024
25#define NXT_UNIT_LOCAL_BUF_SIZE \
26 (NXT_UNIT_MAX_PLAIN_SIZE + sizeof(nxt_port_msg_t))
27
28typedef struct nxt_unit_impl_s nxt_unit_impl_t;
29typedef struct nxt_unit_mmap_s nxt_unit_mmap_t;
30typedef struct nxt_unit_mmaps_s nxt_unit_mmaps_t;
31typedef struct nxt_unit_process_s nxt_unit_process_t;
32typedef struct nxt_unit_mmap_buf_s nxt_unit_mmap_buf_t;
33typedef struct nxt_unit_recv_msg_s nxt_unit_recv_msg_t;
34typedef struct nxt_unit_read_buf_s nxt_unit_read_buf_t;
35typedef struct nxt_unit_ctx_impl_s nxt_unit_ctx_impl_t;
36typedef struct nxt_unit_port_impl_s nxt_unit_port_impl_t;
37typedef struct nxt_unit_request_info_impl_s nxt_unit_request_info_impl_t;
38typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t;
39
40static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init);
41static int nxt_unit_ctx_init(nxt_unit_impl_t *lib,
42 nxt_unit_ctx_impl_t *ctx_impl, void *data);
43nxt_inline void nxt_unit_ctx_use(nxt_unit_ctx_t *ctx);
44nxt_inline void nxt_unit_ctx_release(nxt_unit_ctx_t *ctx);
45nxt_inline void nxt_unit_lib_use(nxt_unit_impl_t *lib);
46nxt_inline void nxt_unit_lib_release(nxt_unit_impl_t *lib);
47nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head,
48 nxt_unit_mmap_buf_t *mmap_buf);
49nxt_inline void nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev,
50 nxt_unit_mmap_buf_t *mmap_buf);
51nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf);
52static int nxt_unit_read_env(nxt_unit_port_t *ready_port,
53 nxt_unit_port_t *router_port, nxt_unit_port_t *read_port,
54 int *log_fd, uint32_t *stream, uint32_t *shm_limit);
55static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream,
56 int queue_fd);
57static int nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf);
58static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx,
59 nxt_unit_recv_msg_t *recv_msg);
60static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx,
61 nxt_unit_recv_msg_t *recv_msg);
62static int nxt_unit_process_req_body(nxt_unit_ctx_t *ctx,
63 nxt_unit_recv_msg_t *recv_msg);
64static int nxt_unit_request_check_response_port(nxt_unit_request_info_t *req,
65 nxt_unit_port_id_t *port_id);
66static int nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req);
67static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx,
68 nxt_unit_recv_msg_t *recv_msg);
69static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx);
70static nxt_unit_request_info_impl_t *nxt_unit_request_info_get(
71 nxt_unit_ctx_t *ctx);
72static void nxt_unit_request_info_release(nxt_unit_request_info_t *req);
73static void nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req);
74static nxt_unit_websocket_frame_impl_t *nxt_unit_websocket_frame_get(
75 nxt_unit_ctx_t *ctx);
76static void nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws);
77static void nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx,
78 nxt_unit_websocket_frame_impl_t *ws);
79static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx);
80static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf);
81static int nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req,
82 nxt_unit_mmap_buf_t *mmap_buf, int last);
83static void nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf);
84static void nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf);
85static nxt_unit_read_buf_t *nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx);
86static nxt_unit_read_buf_t *nxt_unit_read_buf_get_impl(
87 nxt_unit_ctx_impl_t *ctx_impl);
88static void nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx,
89 nxt_unit_read_buf_t *rbuf);
90static nxt_unit_mmap_buf_t *nxt_unit_request_preread(
91 nxt_unit_request_info_t *req, size_t size);
92static ssize_t nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst,
93 size_t size);
94static nxt_port_mmap_header_t *nxt_unit_mmap_get(nxt_unit_ctx_t *ctx,
95 nxt_unit_port_t *port, nxt_chunk_id_t *c, int *n, int min_n);
96static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port);
97static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx);
98static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i);
99static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx,
100 nxt_unit_port_t *port, int n);
101static int nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size);
102static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
103 int fd);
104static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx,
105 nxt_unit_port_t *port, uint32_t size,
106 uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf);
107static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd);
108
109static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps);
110nxt_inline void nxt_unit_process_use(nxt_unit_process_t *process);
111nxt_inline void nxt_unit_process_release(nxt_unit_process_t *process);
112static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps);
113static int nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx,
114 nxt_unit_mmaps_t *mmaps, pid_t pid, uint32_t id,
115 nxt_port_mmap_header_t **hdr, nxt_unit_read_buf_t *rbuf);
116static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx,
117 nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf);
118static int nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id);
119static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx,
120 nxt_port_mmap_header_t *hdr, void *start, uint32_t size);
121static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid);
122
123static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid);
124static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_impl_t *lib,
125 pid_t pid, int remove);
126static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib);
127static int nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx);
128static int nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf);
129static int nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx);
130static void nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx);
131nxt_inline int nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf);
132nxt_inline int nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf);
133nxt_inline int nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf);
134nxt_inline int nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf);
135static int nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx,
136 nxt_unit_port_t *port);
137static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl);
138static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx);
139
140static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst,
141 nxt_unit_port_t *port, int queue_fd);
142
143nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port);
144nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port);
145static nxt_unit_port_t *nxt_unit_add_port(nxt_unit_ctx_t *ctx,
146 nxt_unit_port_t *port, void *queue);
147static void nxt_unit_remove_port(nxt_unit_impl_t *lib,
148 nxt_unit_port_id_t *port_id);
149static nxt_unit_port_t *nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib,
150 nxt_unit_port_id_t *port_id);
151static void nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid);
152static void nxt_unit_remove_process(nxt_unit_impl_t *lib,
153 nxt_unit_process_t *process);
154static void nxt_unit_quit(nxt_unit_ctx_t *ctx);
155static int nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id);
156static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx,
157 nxt_unit_port_t *port, const void *buf, size_t buf_size,
158 const void *oob, size_t oob_size);
159static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd,
160 const void *buf, size_t buf_size, const void *oob, size_t oob_size);
161static int nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
162 nxt_unit_read_buf_t *rbuf);
163nxt_inline void nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst,
164 nxt_unit_read_buf_t *src);
165static int nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
166 nxt_unit_read_buf_t *rbuf);
167static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
168 nxt_unit_read_buf_t *rbuf);
169static int nxt_unit_port_queue_recv(nxt_unit_port_t *port,
170 nxt_unit_read_buf_t *rbuf);
171static int nxt_unit_app_queue_recv(nxt_unit_port_t *port,
172 nxt_unit_read_buf_t *rbuf);
173nxt_inline int nxt_unit_close(int fd);
174static int nxt_unit_fd_blocking(int fd);
175
176static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash,
177 nxt_unit_port_t *port);
178static nxt_unit_port_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash,
179 nxt_unit_port_id_t *port_id, int remove);
180
181static int nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx,
182 nxt_unit_request_info_t *req);
183static nxt_unit_request_info_t *nxt_unit_request_hash_find(
184 nxt_unit_ctx_t *ctx, uint32_t stream, int remove);
185
186static char * nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level);
187static void *nxt_unit_lvlhsh_alloc(void *data, size_t size);
188static void nxt_unit_lvlhsh_free(void *data, void *p);
189static int nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length);
190
191
192struct nxt_unit_mmap_buf_s {
193 nxt_unit_buf_t buf;
194
195 nxt_unit_mmap_buf_t *next;
196 nxt_unit_mmap_buf_t **prev;
197
198 nxt_port_mmap_header_t *hdr;
199 nxt_unit_request_info_t *req;
200 nxt_unit_ctx_impl_t *ctx_impl;
201 char *free_ptr;
202 char *plain_ptr;
203};
204
205
206struct nxt_unit_recv_msg_s {
207 uint32_t stream;
208 nxt_pid_t pid;
209 nxt_port_id_t reply_port;
210
211 uint8_t last; /* 1 bit */
212 uint8_t mmap; /* 1 bit */
213
214 void *start;
215 uint32_t size;
216
217 int fd[2];
218
219 nxt_unit_mmap_buf_t *incoming_buf;
220};
221
222
223typedef enum {
224 NXT_UNIT_RS_START = 0,
225 NXT_UNIT_RS_RESPONSE_INIT,
226 NXT_UNIT_RS_RESPONSE_HAS_CONTENT,
227 NXT_UNIT_RS_RESPONSE_SENT,
228 NXT_UNIT_RS_RELEASED,
229} nxt_unit_req_state_t;
230
231
232struct nxt_unit_request_info_impl_s {
233 nxt_unit_request_info_t req;
234
235 uint32_t stream;
236
237 nxt_unit_mmap_buf_t *outgoing_buf;
238 nxt_unit_mmap_buf_t *incoming_buf;
239
240 nxt_unit_req_state_t state;
241 uint8_t websocket;
242 uint8_t in_hash;
243
244 /* for nxt_unit_ctx_impl_t.free_req or active_req */
245 nxt_queue_link_t link;
246 /* for nxt_unit_port_impl_t.awaiting_req */
247 nxt_queue_link_t port_wait_link;
248
249 char extra_data[];
250};
251
252
253struct nxt_unit_websocket_frame_impl_s {
254 nxt_unit_websocket_frame_t ws;
255
256 nxt_unit_mmap_buf_t *buf;
257
258 nxt_queue_link_t link;
259
260 nxt_unit_ctx_impl_t *ctx_impl;
261};
262
263
264struct nxt_unit_read_buf_s {
265 nxt_queue_link_t link;
266 nxt_unit_ctx_impl_t *ctx_impl;
267 ssize_t size;
268 char buf[16384];
269 char oob[256];
270};
271
272
273struct nxt_unit_ctx_impl_s {
274 nxt_unit_ctx_t ctx;
275
276 nxt_atomic_t use_count;
277 nxt_atomic_t wait_items;
278
279 pthread_mutex_t mutex;
280
281 nxt_unit_port_t *read_port;
282
283 nxt_queue_link_t link;
284
285 nxt_unit_mmap_buf_t *free_buf;
286
287 /* of nxt_unit_request_info_impl_t */
288 nxt_queue_t free_req;
289
290 /* of nxt_unit_websocket_frame_impl_t */
291 nxt_queue_t free_ws;
292
293 /* of nxt_unit_request_info_impl_t */
294 nxt_queue_t active_req;
295
296 /* of nxt_unit_request_info_impl_t */
297 nxt_lvlhsh_t requests;
298
299 /* of nxt_unit_request_info_impl_t */
300 nxt_queue_t ready_req;
301
302 /* of nxt_unit_read_buf_t */
303 nxt_queue_t pending_rbuf;
304
305 /* of nxt_unit_read_buf_t */
306 nxt_queue_t free_rbuf;
307
308 nxt_unit_mmap_buf_t ctx_buf[2];
309 nxt_unit_read_buf_t ctx_read_buf;
310
311 nxt_unit_request_info_impl_t req;
312};
313
314
315struct nxt_unit_mmap_s {
316 nxt_port_mmap_header_t *hdr;
317
318 /* of nxt_unit_read_buf_t */
319 nxt_queue_t awaiting_rbuf;
320};
321
322
323struct nxt_unit_mmaps_s {
324 pthread_mutex_t mutex;
325 uint32_t size;
326 uint32_t cap;
327 nxt_atomic_t allocated_chunks;
328 nxt_unit_mmap_t *elts;
329};
330
331
332struct nxt_unit_impl_s {
333 nxt_unit_t unit;
334 nxt_unit_callbacks_t callbacks;
335
336 nxt_atomic_t use_count;
337
338 uint32_t request_data_size;
339 uint32_t shm_mmap_limit;
340
341 pthread_mutex_t mutex;
342
343 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */
344 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */
345
346 nxt_unit_port_t *router_port;
347 nxt_unit_port_t *shared_port;
348
349 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */
350
351 nxt_unit_mmaps_t incoming;
352 nxt_unit_mmaps_t outgoing;
353
354 pid_t pid;
355 int log_fd;
356 int online;
357
358 nxt_unit_ctx_impl_t main_ctx;
359};
360
361
362struct nxt_unit_port_impl_s {
363 nxt_unit_port_t port;
364
365 nxt_atomic_t use_count;
366
367 /* for nxt_unit_process_t.ports */
368 nxt_queue_link_t link;
369 nxt_unit_process_t *process;
370
371 /* of nxt_unit_request_info_impl_t */
372 nxt_queue_t awaiting_req;
373
374 int ready;
375
376 void *queue;
377
378 int from_socket;
379 nxt_unit_read_buf_t *socket_rbuf;
380};
381
382
383struct nxt_unit_process_s {
384 pid_t pid;
385
386 nxt_queue_t ports; /* of nxt_unit_port_impl_t */
387
388 nxt_unit_impl_t *lib;
389
390 nxt_atomic_t use_count;
391
392 uint32_t next_port_id;
393};
394
395
396/* Explicitly using 32 bit types to avoid possible alignment. */
397typedef struct {
398 int32_t pid;
399 uint32_t id;
400} nxt_unit_port_hash_id_t;
401
402
403nxt_unit_ctx_t *
404nxt_unit_init(nxt_unit_init_t *init)
405{
406 int rc, queue_fd;
407 void *mem;
408 uint32_t ready_stream, shm_limit;
409 nxt_unit_ctx_t *ctx;
410 nxt_unit_impl_t *lib;
411 nxt_unit_port_t ready_port, router_port, read_port;
412
413 lib = nxt_unit_create(init);
414 if (nxt_slow_path(lib == NULL)) {
415 return NULL;
416 }
417
418 queue_fd = -1;
419 mem = MAP_FAILED;
420
421 if (init->ready_port.id.pid != 0
422 && init->ready_stream != 0
423 && init->read_port.id.pid != 0)
424 {
425 ready_port = init->ready_port;
426 ready_stream = init->ready_stream;
427 router_port = init->router_port;
428 read_port = init->read_port;
429 lib->log_fd = init->log_fd;
430
431 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid,
432 ready_port.id.id);
433 nxt_unit_port_id_init(&router_port.id, router_port.id.pid,
434 router_port.id.id);
435 nxt_unit_port_id_init(&read_port.id, read_port.id.pid,
436 read_port.id.id);
437
438 } else {
439 rc = nxt_unit_read_env(&ready_port, &router_port, &read_port,
440 &lib->log_fd, &ready_stream, &shm_limit);
441 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
442 goto fail;
443 }
444
445 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1)
446 / PORT_MMAP_DATA_SIZE;
447 }
448
449 if (nxt_slow_path(lib->shm_mmap_limit < 1)) {
450 lib->shm_mmap_limit = 1;
451 }
452
453 lib->pid = read_port.id.pid;
454
455 ctx = &lib->main_ctx.ctx;
456
457 rc = nxt_unit_fd_blocking(router_port.out_fd);
458 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
459 goto fail;
460 }
461
462 lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL);
463 if (nxt_slow_path(lib->router_port == NULL)) {
464 nxt_unit_alert(NULL, "failed to add router_port");
465
466 goto fail;
467 }
468
469 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t));
470 if (nxt_slow_path(queue_fd == -1)) {
471 goto fail;
472 }
473
474 mem = mmap(NULL, sizeof(nxt_port_queue_t),
475 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0);
476 if (nxt_slow_path(mem == MAP_FAILED)) {
477 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd,
478 strerror(errno), errno);
479
480 goto fail;
481 }
482
483 nxt_port_queue_init(mem);
484
485 rc = nxt_unit_fd_blocking(read_port.in_fd);
486 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
487 goto fail;
488 }
489
490 lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem);
491 if (nxt_slow_path(lib->main_ctx.read_port == NULL)) {
492 nxt_unit_alert(NULL, "failed to add read_port");
493
494 goto fail;
495 }
496
497 rc = nxt_unit_fd_blocking(ready_port.out_fd);
498 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
499 goto fail;
500 }
501
502 rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream, queue_fd);
503 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
504 nxt_unit_alert(NULL, "failed to send READY message");
505
506 goto fail;
507 }
508
509 nxt_unit_close(ready_port.out_fd);
510 nxt_unit_close(queue_fd);
511
512 return ctx;
513
514fail:
515
516 if (mem != MAP_FAILED) {
517 munmap(mem, sizeof(nxt_port_queue_t));
518 }
519
520 if (queue_fd != -1) {
521 nxt_unit_close(queue_fd);
522 }
523
524 nxt_unit_ctx_release(&lib->main_ctx.ctx);
525
526 return NULL;
527}
528
529
530static nxt_unit_impl_t *
531nxt_unit_create(nxt_unit_init_t *init)
532{
533 int rc;
534 nxt_unit_impl_t *lib;
535 nxt_unit_callbacks_t *cb;
536
537 lib = nxt_unit_malloc(NULL,
538 sizeof(nxt_unit_impl_t) + init->request_data_size);
539 if (nxt_slow_path(lib == NULL)) {
540 nxt_unit_alert(NULL, "failed to allocate unit struct");
541
542 return NULL;
543 }
544
545 rc = pthread_mutex_init(&lib->mutex, NULL);
546 if (nxt_slow_path(rc != 0)) {
547 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc);
548
549 goto fail;
550 }
551
552 lib->unit.data = init->data;
553 lib->callbacks = init->callbacks;
554
555 lib->request_data_size = init->request_data_size;
556 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1)
557 / PORT_MMAP_DATA_SIZE;
558
559 lib->processes.slot = NULL;
560 lib->ports.slot = NULL;
561
562 lib->log_fd = STDERR_FILENO;
563 lib->online = 1;
564
565 nxt_queue_init(&lib->contexts);
566
567 lib->use_count = 0;
568 lib->router_port = NULL;
569 lib->shared_port = NULL;
570
571 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data);
572 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
573 pthread_mutex_destroy(&lib->mutex);
574 goto fail;
575 }
576
577 cb = &lib->callbacks;
578
579 if (cb->request_handler == NULL) {
580 nxt_unit_alert(NULL, "request_handler is NULL");
581
582 pthread_mutex_destroy(&lib->mutex);
583 goto fail;
584 }
585
586 nxt_unit_mmaps_init(&lib->incoming);
587 nxt_unit_mmaps_init(&lib->outgoing);
588
589 return lib;
590
591fail:
592
593 nxt_unit_free(NULL, lib);
594
595 return NULL;
596}
597
598
599static int
600nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl,
601 void *data)
602{
603 int rc;
604
605 ctx_impl->ctx.data = data;
606 ctx_impl->ctx.unit = &lib->unit;
607
608 rc = pthread_mutex_init(&ctx_impl->mutex, NULL);
609 if (nxt_slow_path(rc != 0)) {
610 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc);
611
612 return NXT_UNIT_ERROR;
613 }
614
615 nxt_unit_lib_use(lib);
616
617 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link);
618
619 ctx_impl->use_count = 1;
620 ctx_impl->wait_items = 0;
621
622 nxt_queue_init(&ctx_impl->free_req);
623 nxt_queue_init(&ctx_impl->free_ws);
624 nxt_queue_init(&ctx_impl->active_req);
625 nxt_queue_init(&ctx_impl->ready_req);
626 nxt_queue_init(&ctx_impl->pending_rbuf);
627 nxt_queue_init(&ctx_impl->free_rbuf);
628
629 ctx_impl->free_buf = NULL;
630 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]);
631 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]);
632
633 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link);
634 nxt_queue_insert_tail(&ctx_impl->free_rbuf, &ctx_impl->ctx_read_buf.link);
635
636 ctx_impl->ctx_read_buf.ctx_impl = ctx_impl;
637
638 ctx_impl->req.req.ctx = &ctx_impl->ctx;
639 ctx_impl->req.req.unit = &lib->unit;
640
641 ctx_impl->read_port = NULL;
642 ctx_impl->requests.slot = 0;
643
644 return NXT_UNIT_OK;
645}
646
647
648nxt_inline void
649nxt_unit_ctx_use(nxt_unit_ctx_t *ctx)
650{
651 nxt_unit_ctx_impl_t *ctx_impl;
652
653 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
654
655 nxt_atomic_fetch_add(&ctx_impl->use_count, 1);
656}
657
658
659nxt_inline void
660nxt_unit_ctx_release(nxt_unit_ctx_t *ctx)
661{
662 long c;
663 nxt_unit_ctx_impl_t *ctx_impl;
664
665 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
666
667 c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1);
668
669 if (c == 1) {
670 nxt_unit_ctx_free(ctx_impl);
671 }
672}
673
674
675nxt_inline void
676nxt_unit_lib_use(nxt_unit_impl_t *lib)
677{
678 nxt_atomic_fetch_add(&lib->use_count, 1);
679}
680
681
682nxt_inline void
683nxt_unit_lib_release(nxt_unit_impl_t *lib)
684{
685 long c;
686 nxt_unit_process_t *process;
687
688 c = nxt_atomic_fetch_add(&lib->use_count, -1);
689
690 if (c == 1) {
691 for ( ;; ) {
692 pthread_mutex_lock(&lib->mutex);
693
694 process = nxt_unit_process_pop_first(lib);
695 if (process == NULL) {
696 pthread_mutex_unlock(&lib->mutex);
697
698 break;
699 }
700
701 nxt_unit_remove_process(lib, process);
702 }
703
704 pthread_mutex_destroy(&lib->mutex);
705
706 if (nxt_fast_path(lib->router_port != NULL)) {
707 nxt_unit_port_release(lib->router_port);
708 }
709
710 if (nxt_fast_path(lib->shared_port != NULL)) {
711 nxt_unit_port_release(lib->shared_port);
712 }
713
714 nxt_unit_mmaps_destroy(&lib->incoming);
715 nxt_unit_mmaps_destroy(&lib->outgoing);
716
717 nxt_unit_free(NULL, lib);
718 }
719}
720
721
722nxt_inline void
723nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head,
724 nxt_unit_mmap_buf_t *mmap_buf)
725{
726 mmap_buf->next = *head;
727
728 if (mmap_buf->next != NULL) {
729 mmap_buf->next->prev = &mmap_buf->next;
730 }
731
732 *head = mmap_buf;
733 mmap_buf->prev = head;
734}
735
736
737nxt_inline void
738nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev,
739 nxt_unit_mmap_buf_t *mmap_buf)
740{
741 while (*prev != NULL) {
742 prev = &(*prev)->next;
743 }
744
745 nxt_unit_mmap_buf_insert(prev, mmap_buf);
746}
747
748
749nxt_inline void
750nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf)
751{
752 nxt_unit_mmap_buf_t **prev;
753
754 prev = mmap_buf->prev;
755
756 if (mmap_buf->next != NULL) {
757 mmap_buf->next->prev = prev;
758 }
759
760 if (prev != NULL) {
761 *prev = mmap_buf->next;
762 }
763}
764
765
766static int
767nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port,
768 nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream,
769 uint32_t *shm_limit)
770{
771 int rc;
772 int ready_fd, router_fd, read_fd;
773 char *unit_init, *version_end;
774 long version_length;
775 int64_t ready_pid, router_pid, read_pid;
776 uint32_t ready_stream, router_id, ready_id, read_id;
777
778 unit_init = getenv(NXT_UNIT_INIT_ENV);
779 if (nxt_slow_path(unit_init == NULL)) {
780 nxt_unit_alert(NULL, "%s is not in the current environment",
781 NXT_UNIT_INIT_ENV);
782
783 return NXT_UNIT_ERROR;
784 }
785
786 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init);
787
788 version_length = nxt_length(NXT_VERSION);
789
790 version_end = strchr(unit_init, ';');
791 if (version_end == NULL
792 || version_end - unit_init != version_length
793 || memcmp(unit_init, NXT_VERSION, version_length) != 0)
794 {
795 nxt_unit_alert(NULL, "version check error");
796
797 return NXT_UNIT_ERROR;
798 }
799
800 rc = sscanf(version_end + 1,
801 "%"PRIu32";"
802 "%"PRId64",%"PRIu32",%d;"
803 "%"PRId64",%"PRIu32",%d;"
804 "%"PRId64",%"PRIu32",%d;"
805 "%d,%"PRIu32,
806 &ready_stream,
807 &ready_pid, &ready_id, &ready_fd,
808 &router_pid, &router_id, &router_fd,
809 &read_pid, &read_id, &read_fd,
810 log_fd, shm_limit);
811
812 if (nxt_slow_path(rc != 12)) {
813 nxt_unit_alert(NULL, "failed to scan variables: %d", rc);
814
815 return NXT_UNIT_ERROR;
816 }
817
818 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id);
819
820 ready_port->in_fd = -1;
821 ready_port->out_fd = ready_fd;
822 ready_port->data = NULL;
823
824 nxt_unit_port_id_init(&router_port->id, (pid_t) router_pid, router_id);
825
826 router_port->in_fd = -1;
827 router_port->out_fd = router_fd;
828 router_port->data = NULL;
829
830 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id);
831
832 read_port->in_fd = read_fd;
833 read_port->out_fd = -1;
834 read_port->data = NULL;
835
836 *stream = ready_stream;
837
838 return NXT_UNIT_OK;
839}
840
841
842static int
843nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, int queue_fd)
844{
845 ssize_t res;
846 nxt_port_msg_t msg;
847 nxt_unit_impl_t *lib;
848
849 union {
850 struct cmsghdr cm;
851 char space[CMSG_SPACE(sizeof(int))];
852 } cmsg;
853
854 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
855
856 msg.stream = stream;
857 msg.pid = lib->pid;
858 msg.reply_port = 0;
859 msg.type = _NXT_PORT_MSG_PROCESS_READY;
860 msg.last = 1;
861 msg.mmap = 0;
862 msg.nf = 0;
863 msg.mf = 0;
864 msg.tracking = 0;
865
866 memset(&cmsg, 0, sizeof(cmsg));
867
868 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int));
869 cmsg.cm.cmsg_level = SOL_SOCKET;
870 cmsg.cm.cmsg_type = SCM_RIGHTS;
871
872 /*
873 * memcpy() is used instead of simple
874 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
875 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
876 * dereferencing type-punned pointer will break strict-aliasing rules
877 *
878 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
879 * in the same simple assignment as in the code above.
880 */
881 memcpy(CMSG_DATA(&cmsg.cm), &queue_fd, sizeof(int));
882
883 res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg),
884 &cmsg, sizeof(cmsg));
885 if (res != sizeof(msg)) {
886 return NXT_UNIT_ERROR;
887 }
888
889 return NXT_UNIT_OK;
890}
891
892
893static int
894nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf)
895{
896 int rc;
897 pid_t pid;
898 struct cmsghdr *cm;
899 nxt_port_msg_t *port_msg;
900 nxt_unit_impl_t *lib;
901 nxt_unit_recv_msg_t recv_msg;
902
903 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
904
905 recv_msg.fd[0] = -1;
906 recv_msg.fd[1] = -1;
907 port_msg = (nxt_port_msg_t *) rbuf->buf;
908 cm = (struct cmsghdr *) rbuf->oob;
909
910 if (cm->cmsg_level == SOL_SOCKET
911 && cm->cmsg_type == SCM_RIGHTS)
912 {
913 if (cm->cmsg_len == CMSG_LEN(sizeof(int))) {
914 memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int));
915 }
916
917 if (cm->cmsg_len == CMSG_LEN(sizeof(int) * 2)) {
918 memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int) * 2);
919 }
920 }
921
922 recv_msg.incoming_buf = NULL;
923
924 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) {
925 if (nxt_slow_path(rbuf->size == 0)) {
926 nxt_unit_debug(ctx, "read port closed");
927
928 nxt_unit_quit(ctx);
929 rc = NXT_UNIT_OK;
930 goto done;
931 }
932
933 nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size);
934
935 rc = NXT_UNIT_ERROR;
936 goto done;
937 }
938
939 nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d",
940 port_msg->stream, (int) port_msg->type,
941 recv_msg.fd[0], recv_msg.fd[1]);
942
943 recv_msg.stream = port_msg->stream;
944 recv_msg.pid = port_msg->pid;
945 recv_msg.reply_port = port_msg->reply_port;
946 recv_msg.last = port_msg->last;
947 recv_msg.mmap = port_msg->mmap;
948
949 recv_msg.start = port_msg + 1;
950 recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t);
951
952 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) {
953 nxt_unit_alert(ctx, "#%"PRIu32": unknown message type (%d)",
954 port_msg->stream, (int) port_msg->type);
955 rc = NXT_UNIT_ERROR;
956 goto done;
957 }
958
959 /* Fragmentation is unsupported. */
960 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) {
961 nxt_unit_alert(ctx, "#%"PRIu32": fragmented message type (%d)",
962 port_msg->stream, (int) port_msg->type);
963 rc = NXT_UNIT_ERROR;
964 goto done;
965 }
966
967 if (port_msg->mmap) {
968 rc = nxt_unit_mmap_read(ctx, &recv_msg, rbuf);
969
970 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
971 if (rc == NXT_UNIT_AGAIN) {
972 recv_msg.fd[0] = -1;
973 recv_msg.fd[1] = -1;
974 }
975
976 goto done;
977 }
978 }
979
980 switch (port_msg->type) {
981
982 case _NXT_PORT_MSG_QUIT:
983 nxt_unit_debug(ctx, "#%"PRIu32": quit", port_msg->stream);
984
985 nxt_unit_quit(ctx);
986 rc = NXT_UNIT_OK;
987 break;
988
989 case _NXT_PORT_MSG_NEW_PORT:
990 rc = nxt_unit_process_new_port(ctx, &recv_msg);
991 break;
992
993 case _NXT_PORT_MSG_CHANGE_FILE:
994 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d",
995 port_msg->stream, recv_msg.fd[0]);
996
997 if (dup2(recv_msg.fd[0], lib->log_fd) == -1) {
998 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)",
999 port_msg->stream, recv_msg.fd[0], lib->log_fd,
1000 strerror(errno), errno);
1001
1002 rc = NXT_UNIT_ERROR;
1003 goto done;
1004 }
1005
1006 rc = NXT_UNIT_OK;
1007 break;
1008
1009 case _NXT_PORT_MSG_MMAP:
1010 if (nxt_slow_path(recv_msg.fd[0] < 0)) {
1011 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap",
1012 port_msg->stream, recv_msg.fd[0]);
1013
1014 rc = NXT_UNIT_ERROR;
1015 goto done;
1016 }
1017
1018 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]);
1019 break;
1020
1021 case _NXT_PORT_MSG_REQ_HEADERS:
1022 rc = nxt_unit_process_req_headers(ctx, &recv_msg);
1023 break;
1024
1025 case _NXT_PORT_MSG_REQ_BODY:
1026 rc = nxt_unit_process_req_body(ctx, &recv_msg);
1027 break;
1028
1029 case _NXT_PORT_MSG_WEBSOCKET:
1030 rc = nxt_unit_process_websocket(ctx, &recv_msg);
1031 break;
1032
1033 case _NXT_PORT_MSG_REMOVE_PID:
1034 if (nxt_slow_path(recv_msg.size != sizeof(pid))) {
1035 nxt_unit_alert(ctx, "#%"PRIu32": remove_pid: invalid message size "
1036 "(%d != %d)", port_msg->stream, (int) recv_msg.size,
1037 (int) sizeof(pid));
1038
1039 rc = NXT_UNIT_ERROR;
1040 goto done;
1041 }
1042
1043 memcpy(&pid, recv_msg.start, sizeof(pid));
1044
1045 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d",
1046 port_msg->stream, (int) pid);
1047
1048 nxt_unit_remove_pid(lib, pid);
1049
1050 rc = NXT_UNIT_OK;
1051 break;
1052
1053 case _NXT_PORT_MSG_SHM_ACK:
1054 rc = nxt_unit_process_shm_ack(ctx);
1055 break;
1056
1057 default:
1058 nxt_unit_debug(ctx, "#%"PRIu32": ignore message type: %d",
1059 port_msg->stream, (int) port_msg->type);
1060
1061 rc = NXT_UNIT_ERROR;
1062 goto done;
1063 }
1064
1065done:
1066
1067 if (recv_msg.fd[0] != -1) {
1068 nxt_unit_close(recv_msg.fd[0]);
1069 }
1070
1071 if (recv_msg.fd[1] != -1) {
1072 nxt_unit_close(recv_msg.fd[1]);
1073 }
1074
1075 while (recv_msg.incoming_buf != NULL) {
1076 nxt_unit_mmap_buf_free(recv_msg.incoming_buf);
1077 }
1078
1079 if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) {
1080#if (NXT_DEBUG)
1081 memset(rbuf->buf, 0xAC, rbuf->size);
1082#endif
1083 nxt_unit_read_buf_release(ctx, rbuf);
1084 }
1085
1086 return rc;
1087}
1088
1089
1090static int
1091nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1092{
1093 void *mem;
1094 nxt_unit_impl_t *lib;
1095 nxt_unit_port_t new_port, *port;
1096 nxt_port_msg_new_port_t *new_port_msg;
1097
1098 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) {
1099 nxt_unit_warn(ctx, "#%"PRIu32": new_port: "
1100 "invalid message size (%d)",
1101 recv_msg->stream, (int) recv_msg->size);
1102
1103 return NXT_UNIT_ERROR;
1104 }
1105
1106 if (nxt_slow_path(recv_msg->fd[0] < 0)) {
1107 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port",
1108 recv_msg->stream, recv_msg->fd[0]);
1109
1110 return NXT_UNIT_ERROR;
1111 }
1112
1113 new_port_msg = recv_msg->start;
1114
1115 nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd[0] %d fd[1] %d",
1116 recv_msg->stream, (int) new_port_msg->pid,
1117 (int) new_port_msg->id, recv_msg->fd[0], recv_msg->fd[1]);
1118
1119 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1120
1121 if (new_port_msg->id == (nxt_port_id_t) -1) {
1122 nxt_unit_port_id_init(&new_port.id, lib->pid, new_port_msg->id);
1123
1124 new_port.in_fd = recv_msg->fd[0];
1125 new_port.out_fd = -1;
1126
1127 mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE,
1128 MAP_SHARED, recv_msg->fd[1], 0);
1129
1130 } else {
1131 if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd[0])
1132 != NXT_UNIT_OK))
1133 {
1134 return NXT_UNIT_ERROR;
1135 }
1136
1137 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid,
1138 new_port_msg->id);
1139
1140 new_port.in_fd = -1;
1141 new_port.out_fd = recv_msg->fd[0];
1142
1143 mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE,
1144 MAP_SHARED, recv_msg->fd[1], 0);
1145 }
1146
1147 if (nxt_slow_path(mem == MAP_FAILED)) {
1148 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd[1],
1149 strerror(errno), errno);
1150
1151 return NXT_UNIT_ERROR;
1152 }
1153
1154 new_port.data = NULL;
1155
1156 recv_msg->fd[0] = -1;
1157
1158 port = nxt_unit_add_port(ctx, &new_port, mem);
1159 if (nxt_slow_path(port == NULL)) {
1160 return NXT_UNIT_ERROR;
1161 }
1162
1163 if (new_port_msg->id == (nxt_port_id_t) -1) {
1164 lib->shared_port = port;
1165
1166 } else {
1167 nxt_unit_port_release(port);
1168 }
1169
1170 return NXT_UNIT_OK;
1171}
1172
1173
1174static int
1175nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1176{
1177 int res;
1178 nxt_unit_impl_t *lib;
1179 nxt_unit_port_id_t port_id;
1180 nxt_unit_request_t *r;
1181 nxt_unit_mmap_buf_t *b;
1182 nxt_unit_request_info_t *req;
1183 nxt_unit_request_info_impl_t *req_impl;
1184
1185 if (nxt_slow_path(recv_msg->mmap == 0)) {
1186 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory",
1187 recv_msg->stream);
1188
1189 return NXT_UNIT_ERROR;
1190 }
1191
1192 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) {
1193 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least "
1194 "%d expected", recv_msg->stream, (int) recv_msg->size,
1195 (int) sizeof(nxt_unit_request_t));
1196
1197 return NXT_UNIT_ERROR;
1198 }
1199
1200 req_impl = nxt_unit_request_info_get(ctx);
1201 if (nxt_slow_path(req_impl == NULL)) {
1202 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed",
1203 recv_msg->stream);
1204
1205 return NXT_UNIT_ERROR;
1206 }
1207
1208 req = &req_impl->req;
1209
1210 req->request = recv_msg->start;
1211
1212 b = recv_msg->incoming_buf;
1213
1214 req->request_buf = &b->buf;
1215 req->response = NULL;
1216 req->response_buf = NULL;
1217
1218 r = req->request;
1219
1220 req->content_length = r->content_length;
1221
1222 req->content_buf = req->request_buf;
1223 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content);
1224
1225 req_impl->stream = recv_msg->stream;
1226
1227 req_impl->outgoing_buf = NULL;
1228
1229 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1230 b->req = req;
1231 }
1232
1233 /* "Move" incoming buffer list to req_impl. */
1234 req_impl->incoming_buf = recv_msg->incoming_buf;
1235 req_impl->incoming_buf->prev = &req_impl->incoming_buf;
1236 recv_msg->incoming_buf = NULL;
1237
1238 req->content_fd = recv_msg->fd[0];
1239 recv_msg->fd[0] = -1;
1240
1241 req->response_max_fields = 0;
1242 req_impl->state = NXT_UNIT_RS_START;
1243 req_impl->websocket = 0;
1244 req_impl->in_hash = 0;
1245
1246 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream,
1247 (int) r->method_length,
1248 (char *) nxt_unit_sptr_get(&r->method),
1249 (int) r->target_length,
1250 (char *) nxt_unit_sptr_get(&r->target),
1251 (int) r->content_length);
1252
1253 nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port);
1254
1255 res = nxt_unit_request_check_response_port(req, &port_id);
1256 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1257 return NXT_UNIT_ERROR;
1258 }
1259
1260 if (nxt_fast_path(res == NXT_UNIT_OK)) {
1261 res = nxt_unit_send_req_headers_ack(req);
1262 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1263 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1264
1265 return NXT_UNIT_ERROR;
1266 }
1267
1268 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1269
1270 if (req->content_length
1271 > (uint64_t) (req->content_buf->end - req->content_buf->free))
1272 {
1273 res = nxt_unit_request_hash_add(ctx, req);
1274 if (nxt_slow_path(res != NXT_UNIT_OK)) {
1275 nxt_unit_req_warn(req, "failed to add request to hash");
1276
1277 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1278
1279 return NXT_UNIT_ERROR;
1280 }
1281
1282 /*
1283 * If application have separate data handler, we may start
1284 * request processing and process data when it is arrived.
1285 */
1286 if (lib->callbacks.data_handler == NULL) {
1287 return NXT_UNIT_OK;
1288 }
1289 }
1290
1291 lib->callbacks.request_handler(req);
1292 }
1293
1294 return NXT_UNIT_OK;
1295}
1296
1297
1298static int
1299nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1300{
1301 uint64_t l;
1302 nxt_unit_impl_t *lib;
1303 nxt_unit_mmap_buf_t *b;
1304 nxt_unit_request_info_t *req;
1305
1306 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last);
1307 if (req == NULL) {
1308 return NXT_UNIT_OK;
1309 }
1310
1311 l = req->content_buf->end - req->content_buf->free;
1312
1313 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1314 b->req = req;
1315 l += b->buf.end - b->buf.free;
1316 }
1317
1318 if (recv_msg->incoming_buf != NULL) {
1319 b = nxt_container_of(req->content_buf, nxt_unit_mmap_buf_t, buf);
1320
1321 /* "Move" incoming buffer list to req_impl. */
1322 nxt_unit_mmap_buf_insert_tail(&b->next, recv_msg->incoming_buf);
1323 recv_msg->incoming_buf = NULL;
1324 }
1325
1326 req->content_fd = recv_msg->fd[0];
1327 recv_msg->fd[0] = -1;
1328
1329 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1330
1331 if (lib->callbacks.data_handler != NULL) {
1332 lib->callbacks.data_handler(req);
1333
1334 return NXT_UNIT_OK;
1335 }
1336
1337 if (req->content_fd != -1 || l == req->content_length) {
1338 lib->callbacks.request_handler(req);
1339 }
1340
1341 return NXT_UNIT_OK;
1342}
1343
1344
1345static int
1346nxt_unit_request_check_response_port(nxt_unit_request_info_t *req,
1347 nxt_unit_port_id_t *port_id)
1348{
1349 int res;
1350 nxt_unit_ctx_t *ctx;
1351 nxt_unit_impl_t *lib;
1352 nxt_unit_port_t *port;
1353 nxt_unit_process_t *process;
1354 nxt_unit_ctx_impl_t *ctx_impl;
1355 nxt_unit_port_impl_t *port_impl;
1356 nxt_unit_request_info_impl_t *req_impl;
1357
1358 ctx = req->ctx;
1359 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1360 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1361
1362 pthread_mutex_lock(&lib->mutex);
1363
1364 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0);
1365 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
1366
1367 if (nxt_fast_path(port != NULL)) {
1368 req->response_port = port;
1369
1370 if (nxt_fast_path(port_impl->ready)) {
1371 pthread_mutex_unlock(&lib->mutex);
1372
1373 nxt_unit_debug(ctx, "check_response_port: found port{%d,%d}",
1374 (int) port->id.pid, (int) port->id.id);
1375
1376 return NXT_UNIT_OK;
1377 }
1378
1379 nxt_unit_debug(ctx, "check_response_port: "
1380 "port{%d,%d} already requested",
1381 (int) port->id.pid, (int) port->id.id);
1382
1383 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1384
1385 nxt_queue_insert_tail(&port_impl->awaiting_req,
1386 &req_impl->port_wait_link);
1387
1388 pthread_mutex_unlock(&lib->mutex);
1389
1390 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
1391
1392 return NXT_UNIT_AGAIN;
1393 }
1394
1395 port_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t));
1396 if (nxt_slow_path(port_impl == NULL)) {
1397 nxt_unit_alert(ctx, "check_response_port: malloc(%d) failed",
1398 (int) sizeof(nxt_unit_port_impl_t));
1399
1400 pthread_mutex_unlock(&lib->mutex);
1401
1402 return NXT_UNIT_ERROR;
1403 }
1404
1405 port = &port_impl->port;
1406
1407 port->id = *port_id;
1408 port->in_fd = -1;
1409 port->out_fd = -1;
1410 port->data = NULL;
1411
1412 res = nxt_unit_port_hash_add(&lib->ports, port);
1413 if (nxt_slow_path(res != NXT_UNIT_OK)) {
1414 nxt_unit_alert(ctx, "check_response_port: %d,%d hash_add failed",
1415 port->id.pid, port->id.id);
1416
1417 pthread_mutex_unlock(&lib->mutex);
1418
1419 nxt_unit_free(ctx, port);
1420
1421 return NXT_UNIT_ERROR;
1422 }
1423
1424 process = nxt_unit_process_find(lib, port_id->pid, 0);
1425 if (nxt_slow_path(process == NULL)) {
1426 nxt_unit_alert(ctx, "check_response_port: process %d not found",
1427 port->id.pid);
1428
1429 nxt_unit_port_hash_find(&lib->ports, port_id, 1);
1430
1431 pthread_mutex_unlock(&lib->mutex);
1432
1433 nxt_unit_free(ctx, port);
1434
1435 return NXT_UNIT_ERROR;
1436 }
1437
1438 nxt_queue_insert_tail(&process->ports, &port_impl->link);
1439
1440 port_impl->process = process;
1441 port_impl->queue = NULL;
1442 port_impl->from_socket = 0;
1443 port_impl->socket_rbuf = NULL;
1444
1445 nxt_queue_init(&port_impl->awaiting_req);
1446
1447 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1448
1449 nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link);
1450
1451 port_impl->use_count = 2;
1452 port_impl->ready = 0;
1453
1454 req->response_port = port;
1455
1456 pthread_mutex_unlock(&lib->mutex);
1457
1458 res = nxt_unit_get_port(ctx, port_id);
1459 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1460 return NXT_UNIT_ERROR;
1461 }
1462
1463 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
1464
1465 return NXT_UNIT_AGAIN;
1466}
1467
1468
1469static int
1470nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req)
1471{
1472 ssize_t res;
1473 nxt_port_msg_t msg;
1474 nxt_unit_impl_t *lib;
1475 nxt_unit_ctx_impl_t *ctx_impl;
1476 nxt_unit_request_info_impl_t *req_impl;
1477
1478 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
1479 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx);
1480 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1481
1482 memset(&msg, 0, sizeof(nxt_port_msg_t));
1483
1484 msg.stream = req_impl->stream;
1485 msg.pid = lib->pid;
1486 msg.reply_port = ctx_impl->read_port->id.id;
1487 msg.type = _NXT_PORT_MSG_REQ_HEADERS_ACK;
1488
1489 res = nxt_unit_port_send(req->ctx, req->response_port,
1490 &msg, sizeof(msg), NULL, 0);
1491 if (nxt_slow_path(res != sizeof(msg))) {
1492 return NXT_UNIT_ERROR;
1493 }
1494
1495 return NXT_UNIT_OK;
1496}
1497
1498
1499static int
1500nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1501{
1502 size_t hsize;
1503 nxt_unit_impl_t *lib;
1504 nxt_unit_mmap_buf_t *b;
1505 nxt_unit_callbacks_t *cb;
1506 nxt_unit_request_info_t *req;
1507 nxt_unit_request_info_impl_t *req_impl;
1508 nxt_unit_websocket_frame_impl_t *ws_impl;
1509
1510 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last);
1511 if (nxt_slow_path(req == NULL)) {
1512 return NXT_UNIT_OK;
1513 }
1514
1515 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1516
1517 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1518 cb = &lib->callbacks;
1519
1520 if (cb->websocket_handler && recv_msg->size >= 2) {
1521 ws_impl = nxt_unit_websocket_frame_get(ctx);
1522 if (nxt_slow_path(ws_impl == NULL)) {
1523 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed",
1524 req_impl->stream);
1525
1526 return NXT_UNIT_ERROR;
1527 }
1528
1529 ws_impl->ws.req = req;
1530
1531 ws_impl->buf = NULL;
1532
1533 if (recv_msg->mmap) {
1534 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1535 b->req = req;
1536 }
1537
1538 /* "Move" incoming buffer list to ws_impl. */
1539 ws_impl->buf = recv_msg->incoming_buf;
1540 ws_impl->buf->prev = &ws_impl->buf;
1541 recv_msg->incoming_buf = NULL;
1542
1543 b = ws_impl->buf;
1544
1545 } else {
1546 b = nxt_unit_mmap_buf_get(ctx);
1547 if (nxt_slow_path(b == NULL)) {
1548 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf",
1549 req_impl->stream);
1550
1551 nxt_unit_websocket_frame_release(&ws_impl->ws);
1552
1553 return NXT_UNIT_ERROR;
1554 }
1555
1556 b->req = req;
1557 b->buf.start = recv_msg->start;
1558 b->buf.free = b->buf.start;
1559 b->buf.end = b->buf.start + recv_msg->size;
1560
1561 nxt_unit_mmap_buf_insert(&ws_impl->buf, b);
1562 }
1563
1564 ws_impl->ws.header = (void *) b->buf.start;
1565 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len(
1566 ws_impl->ws.header);
1567
1568 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header);
1569
1570 if (ws_impl->ws.header->mask) {
1571 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4;
1572
1573 } else {
1574 ws_impl->ws.mask = NULL;
1575 }
1576
1577 b->buf.free += hsize;
1578
1579 ws_impl->ws.content_buf = &b->buf;
1580 ws_impl->ws.content_length = ws_impl->ws.payload_len;
1581
1582 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, "
1583 "payload_len=%"PRIu64,
1584 ws_impl->ws.header->opcode,
1585 ws_impl->ws.payload_len);
1586
1587 cb->websocket_handler(&ws_impl->ws);
1588 }
1589
1590 if (recv_msg->last) {
1591 req_impl->websocket = 0;
1592
1593 if (cb->close_handler) {
1594 nxt_unit_req_debug(req, "close_handler");
1595
1596 cb->close_handler(req);
1597
1598 } else {
1599 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1600 }
1601 }
1602
1603 return NXT_UNIT_OK;
1604}
1605
1606
1607static int
1608nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx)
1609{
1610 nxt_unit_impl_t *lib;
1611 nxt_unit_callbacks_t *cb;
1612
1613 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1614 cb = &lib->callbacks;
1615
1616 if (cb->shm_ack_handler != NULL) {
1617 cb->shm_ack_handler(ctx);
1618 }
1619
1620 return NXT_UNIT_OK;
1621}
1622
1623
1624static nxt_unit_request_info_impl_t *
1625nxt_unit_request_info_get(nxt_unit_ctx_t *ctx)
1626{
1627 nxt_unit_impl_t *lib;
1628 nxt_queue_link_t *lnk;
1629 nxt_unit_ctx_impl_t *ctx_impl;
1630 nxt_unit_request_info_impl_t *req_impl;
1631
1632 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1633
1634 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1635
1636 pthread_mutex_lock(&ctx_impl->mutex);
1637
1638 if (nxt_queue_is_empty(&ctx_impl->free_req)) {
1639 pthread_mutex_unlock(&ctx_impl->mutex);
1640
1641 req_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_request_info_impl_t)
1642 + lib->request_data_size);
1643 if (nxt_slow_path(req_impl == NULL)) {
1644 return NULL;
1645 }
1646
1647 req_impl->req.unit = ctx->unit;
1648 req_impl->req.ctx = ctx;
1649
1650 pthread_mutex_lock(&ctx_impl->mutex);
1651
1652 } else {
1653 lnk = nxt_queue_first(&ctx_impl->free_req);
1654 nxt_queue_remove(lnk);
1655
1656 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link);
1657 }
1658
1659 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link);
1660
1661 pthread_mutex_unlock(&ctx_impl->mutex);
1662
1663 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL;
1664
1665 return req_impl;
1666}
1667
1668
1669static void
1670nxt_unit_request_info_release(nxt_unit_request_info_t *req)
1671{
1672 nxt_unit_ctx_impl_t *ctx_impl;
1673 nxt_unit_request_info_impl_t *req_impl;
1674
1675 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx);
1676 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1677
1678 req->response = NULL;
1679 req->response_buf = NULL;
1680
1681 if (req_impl->in_hash) {
1682 nxt_unit_request_hash_find(req->ctx, req_impl->stream, 1);
1683 }
1684
1685 req_impl->websocket = 0;
1686
1687 while (req_impl->outgoing_buf != NULL) {
1688 nxt_unit_mmap_buf_free(req_impl->outgoing_buf);
1689 }
1690
1691 while (req_impl->incoming_buf != NULL) {
1692 nxt_unit_mmap_buf_free(req_impl->incoming_buf);
1693 }
1694
1695 if (req->content_fd != -1) {
1696 nxt_unit_close(req->content_fd);
1697
1698 req->content_fd = -1;
1699 }
1700
1701 if (req->response_port != NULL) {
1702 nxt_unit_port_release(req->response_port);
1703
1704 req->response_port = NULL;
1705 }
1706
1707 pthread_mutex_lock(&ctx_impl->mutex);
1708
1709 nxt_queue_remove(&req_impl->link);
1710
1711 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link);
1712
1713 pthread_mutex_unlock(&ctx_impl->mutex);
1714
1715 req_impl->state = NXT_UNIT_RS_RELEASED;
1716}
1717
1718
1719static void
1720nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl)
1721{
1722 nxt_unit_ctx_impl_t *ctx_impl;
1723
1724 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx);
1725
1726 nxt_queue_remove(&req_impl->link);
1727
1728 if (req_impl != &ctx_impl->req) {
1729 nxt_unit_free(&ctx_impl->ctx, req_impl);
1730 }
1731}
1732
1733
1734static nxt_unit_websocket_frame_impl_t *
1735nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx)
1736{
1737 nxt_queue_link_t *lnk;
1738 nxt_unit_ctx_impl_t *ctx_impl;
1739 nxt_unit_websocket_frame_impl_t *ws_impl;
1740
1741 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1742
1743 pthread_mutex_lock(&ctx_impl->mutex);
1744
1745 if (nxt_queue_is_empty(&ctx_impl->free_ws)) {
1746 pthread_mutex_unlock(&ctx_impl->mutex);
1747
1748 ws_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_websocket_frame_impl_t));
1749 if (nxt_slow_path(ws_impl == NULL)) {
1750 return NULL;
1751 }
1752
1753 } else {
1754 lnk = nxt_queue_first(&ctx_impl->free_ws);
1755 nxt_queue_remove(lnk);
1756
1757 pthread_mutex_unlock(&ctx_impl->mutex);
1758
1759 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link);
1760 }
1761
1762 ws_impl->ctx_impl = ctx_impl;
1763
1764 return ws_impl;
1765}
1766
1767
1768static void
1769nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws)
1770{
1771 nxt_unit_websocket_frame_impl_t *ws_impl;
1772
1773 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws);
1774
1775 while (ws_impl->buf != NULL) {
1776 nxt_unit_mmap_buf_free(ws_impl->buf);
1777 }
1778
1779 ws->req = NULL;
1780
1781 pthread_mutex_lock(&ws_impl->ctx_impl->mutex);
1782
1783 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link);
1784
1785 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex);
1786}
1787
1788
1789static void
1790nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx,
1791 nxt_unit_websocket_frame_impl_t *ws_impl)
1792{
1793 nxt_queue_remove(&ws_impl->link);
1794
1795 nxt_unit_free(ctx, ws_impl);
1796}
1797
1798
1799uint16_t
1800nxt_unit_field_hash(const char *name, size_t name_length)
1801{
1802 u_char ch;
1803 uint32_t hash;
1804 const char *p, *end;
1805
1806 hash = 159406; /* Magic value copied from nxt_http_parse.c */
1807 end = name + name_length;
1808
1809 for (p = name; p < end; p++) {
1810 ch = *p;
1811 hash = (hash << 4) + hash + nxt_lowcase(ch);
1812 }
1813
1814 hash = (hash >> 16) ^ hash;
1815
1816 return hash;
1817}
1818
1819
1820void
1821nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req)
1822{
1823 char *name;
1824 uint32_t i, j;
1825 nxt_unit_field_t *fields, f;
1826 nxt_unit_request_t *r;
1827
1828 static nxt_str_t content_length = nxt_string("content-length");
1829 static nxt_str_t content_type = nxt_string("content-type");
1830 static nxt_str_t cookie = nxt_string("cookie");
1831
1832 nxt_unit_req_debug(req, "group_dup_fields");
1833
1834 r = req->request;
1835 fields = r->fields;
1836
1837 for (i = 0; i < r->fields_count; i++) {
1838 name = nxt_unit_sptr_get(&fields[i].name);
1839
1840 switch (fields[i].hash) {
1841 case NXT_UNIT_HASH_CONTENT_LENGTH:
1842 if (fields[i].name_length == content_length.length
1843 && nxt_unit_memcasecmp(name, content_length.start,
1844 content_length.length) == 0)
1845 {
1846 r->content_length_field = i;
1847 }
1848
1849 break;
1850
1851 case NXT_UNIT_HASH_CONTENT_TYPE:
1852 if (fields[i].name_length == content_type.length
1853 && nxt_unit_memcasecmp(name, content_type.start,
1854 content_type.length) == 0)
1855 {
1856 r->content_type_field = i;
1857 }
1858
1859 break;
1860
1861 case NXT_UNIT_HASH_COOKIE:
1862 if (fields[i].name_length == cookie.length
1863 && nxt_unit_memcasecmp(name, cookie.start,
1864 cookie.length) == 0)
1865 {
1866 r->cookie_field = i;
1867 }
1868
1869 break;
1870 }
1871
1872 for (j = i + 1; j < r->fields_count; j++) {
1873 if (fields[i].hash != fields[j].hash
1874 || fields[i].name_length != fields[j].name_length
1875 || nxt_unit_memcasecmp(name,
1876 nxt_unit_sptr_get(&fields[j].name),
1877 fields[j].name_length) != 0)
1878 {
1879 continue;
1880 }
1881
1882 f = fields[j];
1883 f.value.offset += (j - (i + 1)) * sizeof(f);
1884
1885 while (j > i + 1) {
1886 fields[j] = fields[j - 1];
1887 fields[j].name.offset -= sizeof(f);
1888 fields[j].value.offset -= sizeof(f);
1889 j--;
1890 }
1891
1892 fields[j] = f;
1893
1894 /* Assign the same name pointer for further grouping simplicity. */
1895 nxt_unit_sptr_set(&fields[j].name, name);
1896
1897 i++;
1898 }
1899 }
1900}
1901
1902
1903int
1904nxt_unit_response_init(nxt_unit_request_info_t *req,
1905 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size)
1906{
1907 uint32_t buf_size;
1908 nxt_unit_buf_t *buf;
1909 nxt_unit_request_info_impl_t *req_impl;
1910
1911 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1912
1913 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
1914 nxt_unit_req_warn(req, "init: response already sent");
1915
1916 return NXT_UNIT_ERROR;
1917 }
1918
1919 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status,
1920 (int) max_fields_count, (int) max_fields_size);
1921
1922 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) {
1923 nxt_unit_req_debug(req, "duplicate response init");
1924 }
1925
1926 /*
1927 * Each field name and value 0-terminated by libunit,
1928 * this is the reason of '+ 2' below.
1929 */
1930 buf_size = sizeof(nxt_unit_response_t)
1931 + max_fields_count * (sizeof(nxt_unit_field_t) + 2)
1932 + max_fields_size;
1933
1934 if (nxt_slow_path(req->response_buf != NULL)) {
1935 buf = req->response_buf;
1936
1937 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) {
1938 goto init_response;
1939 }
1940
1941 nxt_unit_buf_free(buf);
1942
1943 req->response_buf = NULL;
1944 req->response = NULL;
1945 req->response_max_fields = 0;
1946
1947 req_impl->state = NXT_UNIT_RS_START;
1948 }
1949
1950 buf = nxt_unit_response_buf_alloc(req, buf_size);
1951 if (nxt_slow_path(buf == NULL)) {
1952 return NXT_UNIT_ERROR;
1953 }
1954
1955init_response:
1956
1957 memset(buf->start, 0, sizeof(nxt_unit_response_t));
1958
1959 req->response_buf = buf;
1960
1961 req->response = (nxt_unit_response_t *) buf->start;
1962 req->response->status = status;
1963
1964 buf->free = buf->start + sizeof(nxt_unit_response_t)
1965 + max_fields_count * sizeof(nxt_unit_field_t);
1966
1967 req->response_max_fields = max_fields_count;
1968 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT;
1969
1970 return NXT_UNIT_OK;
1971}
1972
1973
1974int
1975nxt_unit_response_realloc(nxt_unit_request_info_t *req,
1976 uint32_t max_fields_count, uint32_t max_fields_size)
1977{
1978 char *p;
1979 uint32_t i, buf_size;
1980 nxt_unit_buf_t *buf;
1981 nxt_unit_field_t *f, *src;
1982 nxt_unit_response_t *resp;
1983 nxt_unit_request_info_impl_t *req_impl;
1984
1985 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1986
1987 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
1988 nxt_unit_req_warn(req, "realloc: response not init");
1989
1990 return NXT_UNIT_ERROR;
1991 }
1992
1993 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
1994 nxt_unit_req_warn(req, "realloc: response already sent");
1995
1996 return NXT_UNIT_ERROR;
1997 }
1998
1999 if (nxt_slow_path(max_fields_count < req->response->fields_count)) {
2000 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small");
2001
2002 return NXT_UNIT_ERROR;
2003 }
2004
2005 /*
2006 * Each field name and value 0-terminated by libunit,
2007 * this is the reason of '+ 2' below.
2008 */
2009 buf_size = sizeof(nxt_unit_response_t)
2010 + max_fields_count * (sizeof(nxt_unit_field_t) + 2)
2011 + max_fields_size;
2012
2013 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size);
2014
2015 buf = nxt_unit_response_buf_alloc(req, buf_size);
2016 if (nxt_slow_path(buf == NULL)) {
2017 nxt_unit_req_warn(req, "realloc: new buf allocation failed");
2018 return NXT_UNIT_ERROR;
2019 }
2020
2021 resp = (nxt_unit_response_t *) buf->start;
2022
2023 memset(resp, 0, sizeof(nxt_unit_response_t));
2024
2025 resp->status = req->response->status;
2026 resp->content_length = req->response->content_length;
2027
2028 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t);
2029 f = resp->fields;
2030
2031 for (i = 0; i < req->response->fields_count; i++) {
2032 src = req->response->fields + i;
2033
2034 if (nxt_slow_path(src->skip != 0)) {
2035 continue;
2036 }
2037
2038 if (nxt_slow_path(src->name_length + src->value_length + 2
2039 > (uint32_t) (buf->end - p)))
2040 {
2041 nxt_unit_req_warn(req, "realloc: not enough space for field"
2042 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required",
2043 i, src, src->name_length, src->value_length);
2044
2045 goto fail;
2046 }
2047
2048 nxt_unit_sptr_set(&f->name, p);
2049 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length);
2050 *p++ = '\0';
2051
2052 nxt_unit_sptr_set(&f->value, p);
2053 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length);
2054 *p++ = '\0';
2055
2056 f->hash = src->hash;
2057 f->skip = 0;
2058 f->name_length = src->name_length;
2059 f->value_length = src->value_length;
2060
2061 resp->fields_count++;
2062 f++;
2063 }
2064
2065 if (req->response->piggyback_content_length > 0) {
2066 if (nxt_slow_path(req->response->piggyback_content_length
2067 > (uint32_t) (buf->end - p)))
2068 {
2069 nxt_unit_req_warn(req, "realloc: not enought space for content"
2070 " #%"PRIu32", %"PRIu32" required",
2071 i, req->response->piggyback_content_length);
2072
2073 goto fail;
2074 }
2075
2076 resp->piggyback_content_length =
2077 req->response->piggyback_content_length;
2078
2079 nxt_unit_sptr_set(&resp->piggyback_content, p);
2080 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content),
2081 req->response->piggyback_content_length);
2082 }
2083
2084 buf->free = p;
2085
2086 nxt_unit_buf_free(req->response_buf);
2087
2088 req->response = resp;
2089 req->response_buf = buf;
2090 req->response_max_fields = max_fields_count;
2091
2092 return NXT_UNIT_OK;
2093
2094fail:
2095
2096 nxt_unit_buf_free(buf);
2097
2098 return NXT_UNIT_ERROR;
2099}
2100
2101
2102int
2103nxt_unit_response_is_init(nxt_unit_request_info_t *req)
2104{
2105 nxt_unit_request_info_impl_t *req_impl;
2106
2107 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2108
2109 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT;
2110}
2111
2112
2113int
2114nxt_unit_response_add_field(nxt_unit_request_info_t *req,
2115 const char *name, uint8_t name_length,
2116 const char *value, uint32_t value_length)
2117{
2118 nxt_unit_buf_t *buf;
2119 nxt_unit_field_t *f;
2120 nxt_unit_response_t *resp;
2121 nxt_unit_request_info_impl_t *req_impl;
2122
2123 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2124
2125 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) {
2126 nxt_unit_req_warn(req, "add_field: response not initialized or "
2127 "already sent");
2128
2129 return NXT_UNIT_ERROR;
2130 }
2131
2132 resp = req->response;
2133
2134 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) {
2135 nxt_unit_req_warn(req, "add_field: too many response fields");
2136
2137 return NXT_UNIT_ERROR;
2138 }
2139
2140 buf = req->response_buf;
2141
2142 if (nxt_slow_path(name_length + value_length + 2
2143 > (uint32_t) (buf->end - buf->free)))
2144 {
2145 nxt_unit_req_warn(req, "add_field: response buffer overflow");
2146
2147 return NXT_UNIT_ERROR;
2148 }
2149
2150 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s",
2151 resp->fields_count,
2152 (int) name_length, name,
2153 (int) value_length, value);
2154
2155 f = resp->fields + resp->fields_count;
2156
2157 nxt_unit_sptr_set(&f->name, buf->free);
2158 buf->free = nxt_cpymem(buf->free, name, name_length);
2159 *buf->free++ = '\0';
2160
2161 nxt_unit_sptr_set(&f->value, buf->free);
2162 buf->free = nxt_cpymem(buf->free, value, value_length);
2163 *buf->free++ = '\0';
2164
2165 f->hash = nxt_unit_field_hash(name, name_length);
2166 f->skip = 0;
2167 f->name_length = name_length;
2168 f->value_length = value_length;
2169
2170 resp->fields_count++;
2171
2172 return NXT_UNIT_OK;
2173}
2174
2175
2176int
2177nxt_unit_response_add_content(nxt_unit_request_info_t *req,
2178 const void* src, uint32_t size)
2179{
2180 nxt_unit_buf_t *buf;
2181 nxt_unit_response_t *resp;
2182 nxt_unit_request_info_impl_t *req_impl;
2183
2184 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2185
2186 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2187 nxt_unit_req_warn(req, "add_content: response not initialized yet");
2188
2189 return NXT_UNIT_ERROR;
2190 }
2191
2192 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2193 nxt_unit_req_warn(req, "add_content: response already sent");
2194
2195 return NXT_UNIT_ERROR;
2196 }
2197
2198 buf = req->response_buf;
2199
2200 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) {
2201 nxt_unit_req_warn(req, "add_content: buffer overflow");
2202
2203 return NXT_UNIT_ERROR;
2204 }
2205
2206 resp = req->response;
2207
2208 if (resp->piggyback_content_length == 0) {
2209 nxt_unit_sptr_set(&resp->piggyback_content, buf->free);
2210 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT;
2211 }
2212
2213 resp->piggyback_content_length += size;
2214
2215 buf->free = nxt_cpymem(buf->free, src, size);
2216
2217 return NXT_UNIT_OK;
2218}
2219
2220
2221int
2222nxt_unit_response_send(nxt_unit_request_info_t *req)
2223{
2224 int rc;
2225 nxt_unit_mmap_buf_t *mmap_buf;
2226 nxt_unit_request_info_impl_t *req_impl;
2227
2228 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2229
2230 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2231 nxt_unit_req_warn(req, "send: response is not initialized yet");
2232
2233 return NXT_UNIT_ERROR;
2234 }
2235
2236 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2237 nxt_unit_req_warn(req, "send: response already sent");
2238
2239 return NXT_UNIT_ERROR;
2240 }
2241
2242 if (req->request->websocket_handshake && req->response->status == 101) {
2243 nxt_unit_response_upgrade(req);
2244 }
2245
2246 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes",
2247 req->response->fields_count,
2248 (int) (req->response_buf->free
2249 - req->response_buf->start));
2250
2251 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf);
2252
2253 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0);
2254 if (nxt_fast_path(rc == NXT_UNIT_OK)) {
2255 req->response = NULL;
2256 req->response_buf = NULL;
2257 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT;
2258
2259 nxt_unit_mmap_buf_free(mmap_buf);
2260 }
2261
2262 return rc;
2263}
2264
2265
2266int
2267nxt_unit_response_is_sent(nxt_unit_request_info_t *req)
2268{
2269 nxt_unit_request_info_impl_t *req_impl;
2270
2271 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2272
2273 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT;
2274}
2275
2276
2277nxt_unit_buf_t *
2278nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size)
2279{
2280 int rc;
2281 nxt_unit_mmap_buf_t *mmap_buf;
2282 nxt_unit_request_info_impl_t *req_impl;
2283
2284 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) {
2285 nxt_unit_req_warn(req, "response_buf_alloc: "
2286 "requested buffer (%"PRIu32") too big", size);
2287
2288 return NULL;
2289 }
2290
2291 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size);
2292
2293 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2294
2295 mmap_buf = nxt_unit_mmap_buf_get(req->ctx);
2296 if (nxt_slow_path(mmap_buf == NULL)) {
2297 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf");
2298
2299 return NULL;
2300 }
2301
2302 mmap_buf->req = req;
2303
2304 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf);
2305
2306 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
2307 size, size, mmap_buf,
2308 NULL);
2309 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2310 nxt_unit_mmap_buf_release(mmap_buf);
2311
2312 return NULL;
2313 }
2314
2315 return &mmap_buf->buf;
2316}
2317
2318
2319static nxt_unit_mmap_buf_t *
2320nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx)
2321{
2322 nxt_unit_mmap_buf_t *mmap_buf;
2323 nxt_unit_ctx_impl_t *ctx_impl;
2324
2325 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2326
2327 pthread_mutex_lock(&ctx_impl->mutex);
2328
2329 if (ctx_impl->free_buf == NULL) {
2330 pthread_mutex_unlock(&ctx_impl->mutex);
2331
2332 mmap_buf = nxt_unit_malloc(ctx, sizeof(nxt_unit_mmap_buf_t));
2333 if (nxt_slow_path(mmap_buf == NULL)) {
2334 return NULL;
2335 }
2336
2337 } else {
2338 mmap_buf = ctx_impl->free_buf;
2339
2340 nxt_unit_mmap_buf_unlink(mmap_buf);
2341
2342 pthread_mutex_unlock(&ctx_impl->mutex);
2343 }
2344
2345 mmap_buf->ctx_impl = ctx_impl;
2346
2347 mmap_buf->hdr = NULL;
2348 mmap_buf->free_ptr = NULL;
2349
2350 return mmap_buf;
2351}
2352
2353
2354static void
2355nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf)
2356{
2357 nxt_unit_mmap_buf_unlink(mmap_buf);
2358
2359 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex);
2360
2361 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf);
2362
2363 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex);
2364}
2365
2366
2367int
2368nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req)
2369{
2370 return req->request->websocket_handshake;
2371}
2372
2373
2374int
2375nxt_unit_response_upgrade(nxt_unit_request_info_t *req)
2376{
2377 int rc;
2378 nxt_unit_request_info_impl_t *req_impl;
2379
2380 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2381
2382 if (nxt_slow_path(req_impl->websocket != 0)) {
2383 nxt_unit_req_debug(req, "upgrade: already upgraded");
2384
2385 return NXT_UNIT_OK;
2386 }
2387
2388 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2389 nxt_unit_req_warn(req, "upgrade: response is not initialized yet");
2390
2391 return NXT_UNIT_ERROR;
2392 }
2393
2394 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2395 nxt_unit_req_warn(req, "upgrade: response already sent");
2396
2397 return NXT_UNIT_ERROR;
2398 }
2399
2400 rc = nxt_unit_request_hash_add(req->ctx, req);
2401 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2402 nxt_unit_req_warn(req, "upgrade: failed to add request to hash");
2403
2404 return NXT_UNIT_ERROR;
2405 }
2406
2407 req_impl->websocket = 1;
2408
2409 req->response->status = 101;
2410
2411 return NXT_UNIT_OK;
2412}
2413
2414
2415int
2416nxt_unit_response_is_websocket(nxt_unit_request_info_t *req)
2417{
2418 nxt_unit_request_info_impl_t *req_impl;
2419
2420 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2421
2422 return req_impl->websocket;
2423}
2424
2425
2426nxt_unit_request_info_t *
2427nxt_unit_get_request_info_from_data(void *data)
2428{
2429 nxt_unit_request_info_impl_t *req_impl;
2430
2431 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data);
2432
2433 return &req_impl->req;
2434}
2435
2436
2437int
2438nxt_unit_buf_send(nxt_unit_buf_t *buf)
2439{
2440 int rc;
2441 nxt_unit_mmap_buf_t *mmap_buf;
2442 nxt_unit_request_info_t *req;
2443 nxt_unit_request_info_impl_t *req_impl;
2444
2445 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2446
2447 req = mmap_buf->req;
2448 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2449
2450 nxt_unit_req_debug(req, "buf_send: %d bytes",
2451 (int) (buf->free - buf->start));
2452
2453 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2454 nxt_unit_req_warn(req, "buf_send: response not initialized yet");
2455
2456 return NXT_UNIT_ERROR;
2457 }
2458
2459 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) {
2460 nxt_unit_req_warn(req, "buf_send: headers not sent yet");
2461
2462 return NXT_UNIT_ERROR;
2463 }
2464
2465 if (nxt_fast_path(buf->free > buf->start)) {
2466 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0);
2467 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2468 return rc;
2469 }
2470 }
2471
2472 nxt_unit_mmap_buf_free(mmap_buf);
2473
2474 return NXT_UNIT_OK;
2475}
2476
2477
2478static void
2479nxt_unit_buf_send_done(nxt_unit_buf_t *buf)
2480{
2481 int rc;
2482 nxt_unit_mmap_buf_t *mmap_buf;
2483 nxt_unit_request_info_t *req;
2484
2485 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2486
2487 req = mmap_buf->req;
2488
2489 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 1);
2490 if (nxt_slow_path(rc == NXT_UNIT_OK)) {
2491 nxt_unit_mmap_buf_free(mmap_buf);
2492
2493 nxt_unit_request_info_release(req);
2494
2495 } else {
2496 nxt_unit_request_done(req, rc);
2497 }
2498}
2499
2500
2501static int
2502nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req,
2503 nxt_unit_mmap_buf_t *mmap_buf, int last)
2504{
2505 struct {
2506 nxt_port_msg_t msg;
2507 nxt_port_mmap_msg_t mmap_msg;
2508 } m;
2509
2510 int rc;
2511 u_char *last_used, *first_free;
2512 ssize_t res;
2513 nxt_chunk_id_t first_free_chunk;
2514 nxt_unit_buf_t *buf;
2515 nxt_unit_impl_t *lib;
2516 nxt_port_mmap_header_t *hdr;
2517 nxt_unit_request_info_impl_t *req_impl;
2518
2519 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
2520 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2521
2522 buf = &mmap_buf->buf;
2523 hdr = mmap_buf->hdr;
2524
2525 m.mmap_msg.size = buf->free - buf->start;
2526
2527 m.msg.stream = req_impl->stream;
2528 m.msg.pid = lib->pid;
2529 m.msg.reply_port = 0;
2530 m.msg.type = _NXT_PORT_MSG_DATA;
2531 m.msg.last = last != 0;
2532 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0;
2533 m.msg.nf = 0;
2534 m.msg.mf = 0;
2535 m.msg.tracking = 0;
2536
2537 rc = NXT_UNIT_ERROR;
2538
2539 if (m.msg.mmap) {
2540 m.mmap_msg.mmap_id = hdr->id;
2541 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr,
2542 (u_char *) buf->start);
2543
2544 nxt_unit_debug(req->ctx, "#%"PRIu32": send mmap: (%d,%d,%d)",
2545 req_impl->stream,
2546 (int) m.mmap_msg.mmap_id,
2547 (int) m.mmap_msg.chunk_id,
2548 (int) m.mmap_msg.size);
2549
2550 res = nxt_unit_port_send(req->ctx, req->response_port, &m, sizeof(m),
2551 NULL, 0);
2552 if (nxt_slow_path(res != sizeof(m))) {
2553 goto free_buf;
2554 }
2555
2556 last_used = (u_char *) buf->free - 1;
2557 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1;
2558
2559 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) {
2560 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk);
2561
2562 buf->start = (char *) first_free;
2563 buf->free = buf->start;
2564
2565 if (buf->end < buf->start) {
2566 buf->end = buf->start;
2567 }
2568
2569 } else {
2570 buf->start = NULL;
2571 buf->free = NULL;
2572 buf->end = NULL;
2573
2574 mmap_buf->hdr = NULL;
2575 }
2576
2577 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks,
2578 (int) m.mmap_msg.chunk_id - (int) first_free_chunk);
2579
2580 nxt_unit_debug(req->ctx, "allocated_chunks %d",
2581 (int) lib->outgoing.allocated_chunks);
2582
2583 } else {
2584 if (nxt_slow_path(mmap_buf->plain_ptr == NULL
2585 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg)))
2586 {
2587 nxt_unit_alert(req->ctx,
2588 "#%"PRIu32": failed to send plain memory buffer"
2589 ": no space reserved for message header",
2590 req_impl->stream);
2591
2592 goto free_buf;
2593 }
2594
2595 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg));
2596
2597 nxt_unit_debug(req->ctx, "#%"PRIu32": send plain: %d",
2598 req_impl->stream,
2599 (int) (sizeof(m.msg) + m.mmap_msg.size));
2600
2601 res = nxt_unit_port_send(req->ctx, req->response_port,
2602 buf->start - sizeof(m.msg),
2603 m.mmap_msg.size + sizeof(m.msg),
2604 NULL, 0);
2605 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) {
2606 goto free_buf;
2607 }
2608 }
2609
2610 rc = NXT_UNIT_OK;
2611
2612free_buf:
2613
2614 nxt_unit_free_outgoing_buf(mmap_buf);
2615
2616 return rc;
2617}
2618
2619
2620void
2621nxt_unit_buf_free(nxt_unit_buf_t *buf)
2622{
2623 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf));
2624}
2625
2626
2627static void
2628nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf)
2629{
2630 nxt_unit_free_outgoing_buf(mmap_buf);
2631
2632 nxt_unit_mmap_buf_release(mmap_buf);
2633}
2634
2635
2636static void
2637nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf)
2638{
2639 if (mmap_buf->hdr != NULL) {
2640 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx,
2641 mmap_buf->hdr, mmap_buf->buf.start,
2642 mmap_buf->buf.end - mmap_buf->buf.start);
2643
2644 mmap_buf->hdr = NULL;
2645
2646 return;
2647 }
2648
2649 if (mmap_buf->free_ptr != NULL) {
2650 nxt_unit_free(&mmap_buf->ctx_impl->ctx, mmap_buf->free_ptr);
2651
2652 mmap_buf->free_ptr = NULL;
2653 }
2654}
2655
2656
2657static nxt_unit_read_buf_t *
2658nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx)
2659{
2660 nxt_unit_ctx_impl_t *ctx_impl;
2661 nxt_unit_read_buf_t *rbuf;
2662
2663 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2664
2665 pthread_mutex_lock(&ctx_impl->mutex);
2666
2667 rbuf = nxt_unit_read_buf_get_impl(ctx_impl);
2668
2669 pthread_mutex_unlock(&ctx_impl->mutex);
2670
2671 memset(rbuf->oob, 0, sizeof(struct cmsghdr));
2672
2673 return rbuf;
2674}
2675
2676
2677static nxt_unit_read_buf_t *
2678nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl)
2679{
2680 nxt_queue_link_t *link;
2681 nxt_unit_read_buf_t *rbuf;
2682
2683 if (!nxt_queue_is_empty(&ctx_impl->free_rbuf)) {
2684 link = nxt_queue_first(&ctx_impl->free_rbuf);
2685 nxt_queue_remove(link);
2686
2687 rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link);
2688
2689 return rbuf;
2690 }
2691
2692 rbuf = nxt_unit_malloc(&ctx_impl->ctx, sizeof(nxt_unit_read_buf_t));
2693
2694 if (nxt_fast_path(rbuf != NULL)) {
2695 rbuf->ctx_impl = ctx_impl;
2696 }
2697
2698 return rbuf;
2699}
2700
2701
2702static void
2703nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx,
2704 nxt_unit_read_buf_t *rbuf)
2705{
2706 nxt_unit_ctx_impl_t *ctx_impl;
2707
2708 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2709
2710 pthread_mutex_lock(&ctx_impl->mutex);
2711
2712 nxt_queue_insert_head(&ctx_impl->free_rbuf, &rbuf->link);
2713
2714 pthread_mutex_unlock(&ctx_impl->mutex);
2715}
2716
2717
2718nxt_unit_buf_t *
2719nxt_unit_buf_next(nxt_unit_buf_t *buf)
2720{
2721 nxt_unit_mmap_buf_t *mmap_buf;
2722
2723 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2724
2725 if (mmap_buf->next == NULL) {
2726 return NULL;
2727 }
2728
2729 return &mmap_buf->next->buf;
2730}
2731
2732
2733uint32_t
2734nxt_unit_buf_max(void)
2735{
2736 return PORT_MMAP_DATA_SIZE;
2737}
2738
2739
2740uint32_t
2741nxt_unit_buf_min(void)
2742{
2743 return PORT_MMAP_CHUNK_SIZE;
2744}
2745
2746
2747int
2748nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start,
2749 size_t size)
2750{
2751 ssize_t res;
2752
2753 res = nxt_unit_response_write_nb(req, start, size, size);
2754
2755 return res < 0 ? -res : NXT_UNIT_OK;
2756}
2757
2758
2759ssize_t
2760nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start,
2761 size_t size, size_t min_size)
2762{
2763 int rc;
2764 ssize_t sent;
2765 uint32_t part_size, min_part_size, buf_size;
2766 const char *part_start;
2767 nxt_unit_mmap_buf_t mmap_buf;
2768 nxt_unit_request_info_impl_t *req_impl;
2769 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
2770
2771 nxt_unit_req_debug(req, "write: %d", (int) size);
2772
2773 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2774
2775 part_start = start;
2776 sent = 0;
2777
2778 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2779 nxt_unit_req_alert(req, "write: response not initialized yet");
2780
2781 return -NXT_UNIT_ERROR;
2782 }
2783
2784 /* Check if response is not send yet. */
2785 if (nxt_slow_path(req->response_buf != NULL)) {
2786 part_size = req->response_buf->end - req->response_buf->free;
2787 part_size = nxt_min(size, part_size);
2788
2789 rc = nxt_unit_response_add_content(req, part_start, part_size);
2790 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2791 return -rc;
2792 }
2793
2794 rc = nxt_unit_response_send(req);
2795 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2796 return -rc;
2797 }
2798
2799 size -= part_size;
2800 part_start += part_size;
2801 sent += part_size;
2802
2803 min_size -= nxt_min(min_size, part_size);
2804 }
2805
2806 while (size > 0) {
2807 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE);
2808 min_part_size = nxt_min(min_size, part_size);
2809 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE);
2810
2811 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, part_size,
2812 min_part_size, &mmap_buf, local_buf);
2813 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2814 return -rc;
2815 }
2816
2817 buf_size = mmap_buf.buf.end - mmap_buf.buf.free;
2818 if (nxt_slow_path(buf_size == 0)) {
2819 return sent;
2820 }
2821 part_size = nxt_min(buf_size, part_size);
2822
2823 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free,
2824 part_start, part_size);
2825
2826 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
2827 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2828 return -rc;
2829 }
2830
2831 size -= part_size;
2832 part_start += part_size;
2833 sent += part_size;
2834
2835 min_size -= nxt_min(min_size, part_size);
2836 }
2837
2838 return sent;
2839}
2840
2841
2842int
2843nxt_unit_response_write_cb(nxt_unit_request_info_t *req,
2844 nxt_unit_read_info_t *read_info)
2845{
2846 int rc;
2847 ssize_t n;
2848 uint32_t buf_size;
2849 nxt_unit_buf_t *buf;
2850 nxt_unit_mmap_buf_t mmap_buf;
2851 nxt_unit_request_info_impl_t *req_impl;
2852 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
2853
2854 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2855
2856 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2857 nxt_unit_req_alert(req, "write: response not initialized yet");
2858
2859 return NXT_UNIT_ERROR;
2860 }
2861
2862 /* Check if response is not send yet. */
2863 if (nxt_slow_path(req->response_buf != NULL)) {
2864
2865 /* Enable content in headers buf. */
2866 rc = nxt_unit_response_add_content(req, "", 0);
2867 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2868 nxt_unit_req_error(req, "Failed to add piggyback content");
2869
2870 return rc;
2871 }
2872
2873 buf = req->response_buf;
2874
2875 while (buf->end - buf->free > 0) {
2876 n = read_info->read(read_info, buf->free, buf->end - buf->free);
2877 if (nxt_slow_path(n < 0)) {
2878 nxt_unit_req_error(req, "Read error");
2879
2880 return NXT_UNIT_ERROR;
2881 }
2882
2883 /* Manually increase sizes. */
2884 buf->free += n;
2885 req->response->piggyback_content_length += n;
2886
2887 if (read_info->eof) {
2888 break;
2889 }
2890 }
2891
2892 rc = nxt_unit_response_send(req);
2893 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2894 nxt_unit_req_error(req, "Failed to send headers with content");
2895
2896 return rc;
2897 }
2898
2899 if (read_info->eof) {
2900 return NXT_UNIT_OK;
2901 }
2902 }
2903
2904 while (!read_info->eof) {
2905 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"",
2906 read_info->buf_size);
2907
2908 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE);
2909
2910 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
2911 buf_size, buf_size,
2912 &mmap_buf, local_buf);
2913 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2914 return rc;
2915 }
2916
2917 buf = &mmap_buf.buf;
2918
2919 while (!read_info->eof && buf->end > buf->free) {
2920 n = read_info->read(read_info, buf->free, buf->end - buf->free);
2921 if (nxt_slow_path(n < 0)) {
2922 nxt_unit_req_error(req, "Read error");
2923
2924 nxt_unit_free_outgoing_buf(&mmap_buf);
2925
2926 return NXT_UNIT_ERROR;
2927 }
2928
2929 buf->free += n;
2930 }
2931
2932 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
2933 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2934 nxt_unit_req_error(req, "Failed to send content");
2935
2936 return rc;
2937 }
2938 }
2939
2940 return NXT_UNIT_OK;
2941}
2942
2943
2944ssize_t
2945nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size)
2946{
2947 ssize_t buf_res, res;
2948
2949 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length,
2950 dst, size);
2951
2952 nxt_unit_req_debug(req, "read: %d", (int) buf_res);
2953
2954 if (buf_res < (ssize_t) size && req->content_fd != -1) {
2955 res = read(req->content_fd, dst, size);
2956 if (nxt_slow_path(res < 0)) {
2957 nxt_unit_req_alert(req, "failed to read content: %s (%d)",
2958 strerror(errno), errno);
2959
2960 return res;
2961 }
2962
2963 if (res < (ssize_t) size) {
2964 nxt_unit_close(req->content_fd);
2965
2966 req->content_fd = -1;
2967 }
2968
2969 req->content_length -= res;
2970 size -= res;
2971
2972 dst = nxt_pointer_to(dst, res);
2973
2974 } else {
2975 res = 0;
2976 }
2977
2978 return buf_res + res;
2979}
2980
2981
2982ssize_t
2983nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size)
2984{
2985 char *p;
2986 size_t l_size, b_size;
2987 nxt_unit_buf_t *b;
2988 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf;
2989
2990 if (req->content_length == 0) {
2991 return 0;
2992 }
2993
2994 l_size = 0;
2995
2996 b = req->content_buf;
2997
2998 while (b != NULL) {
2999 b_size = b->end - b->free;
3000 p = memchr(b->free, '\n', b_size);
3001
3002 if (p != NULL) {
3003 p++;
3004 l_size += p - b->free;
3005 break;
3006 }
3007
3008 l_size += b_size;
3009
3010 if (max_size <= l_size) {
3011 break;
3012 }
3013
3014 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf);
3015 if (mmap_buf->next == NULL
3016 && req->content_fd != -1
3017 && l_size < req->content_length)
3018 {
3019 preread_buf = nxt_unit_request_preread(req, 16384);
3020 if (nxt_slow_path(preread_buf == NULL)) {
3021 return -1;
3022 }
3023
3024 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf);
3025 }
3026
3027 b = nxt_unit_buf_next(b);
3028 }
3029
3030 return nxt_min(max_size, l_size);
3031}
3032
3033
3034static nxt_unit_mmap_buf_t *
3035nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size)
3036{
3037 ssize_t res;
3038 nxt_unit_mmap_buf_t *mmap_buf;
3039
3040 if (req->content_fd == -1) {
3041 nxt_unit_req_alert(req, "preread: content_fd == -1");
3042 return NULL;
3043 }
3044
3045 mmap_buf = nxt_unit_mmap_buf_get(req->ctx);
3046 if (nxt_slow_path(mmap_buf == NULL)) {
3047 nxt_unit_req_alert(req, "preread: failed to allocate buf");
3048 return NULL;
3049 }
3050
3051 mmap_buf->free_ptr = nxt_unit_malloc(req->ctx, size);
3052 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) {
3053 nxt_unit_req_alert(req, "preread: failed to allocate buf memory");
3054 nxt_unit_mmap_buf_release(mmap_buf);
3055 return NULL;
3056 }
3057
3058 mmap_buf->plain_ptr = mmap_buf->free_ptr;
3059
3060 mmap_buf->hdr = NULL;
3061 mmap_buf->buf.start = mmap_buf->free_ptr;
3062 mmap_buf->buf.free = mmap_buf->buf.start;
3063 mmap_buf->buf.end = mmap_buf->buf.start + size;
3064
3065 res = read(req->content_fd, mmap_buf->free_ptr, size);
3066 if (res < 0) {
3067 nxt_unit_req_alert(req, "failed to read content: %s (%d)",
3068 strerror(errno), errno);
3069
3070 nxt_unit_mmap_buf_free(mmap_buf);
3071
3072 return NULL;
3073 }
3074
3075 if (res < (ssize_t) size) {
3076 nxt_unit_close(req->content_fd);
3077
3078 req->content_fd = -1;
3079 }
3080
3081 nxt_unit_req_debug(req, "preread: read %d", (int) res);
3082
3083 mmap_buf->buf.end = mmap_buf->buf.free + res;
3084
3085 return mmap_buf;
3086}
3087
3088
3089static ssize_t
3090nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size)
3091{
3092 u_char *p;
3093 size_t rest, copy, read;
3094 nxt_unit_buf_t *buf, *last_buf;
3095
3096 p = dst;
3097 rest = size;
3098
3099 buf = *b;
3100 last_buf = buf;
3101
3102 while (buf != NULL) {
3103 last_buf = buf;
3104
3105 copy = buf->end - buf->free;
3106 copy = nxt_min(rest, copy);
3107
3108 p = nxt_cpymem(p, buf->free, copy);
3109
3110 buf->free += copy;
3111 rest -= copy;
3112
3113 if (rest == 0) {
3114 if (buf->end == buf->free) {
3115 buf = nxt_unit_buf_next(buf);
3116 }
3117
3118 break;
3119 }
3120
3121 buf = nxt_unit_buf_next(buf);
3122 }
3123
3124 *b = last_buf;
3125
3126 read = size - rest;
3127
3128 *len -= read;
3129
3130 return read;
3131}
3132
3133
3134void
3135nxt_unit_request_done(nxt_unit_request_info_t *req, int rc)
3136{
3137 uint32_t size;
3138 nxt_port_msg_t msg;
3139 nxt_unit_impl_t *lib;
3140 nxt_unit_request_info_impl_t *req_impl;
3141
3142 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
3143
3144 nxt_unit_req_debug(req, "done: %d", rc);
3145
3146 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3147 goto skip_response_send;
3148 }
3149
3150 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
3151
3152 size = nxt_length("Content-Type") + nxt_length("text/plain");
3153
3154 rc = nxt_unit_response_init(req, 200, 1, size);
3155 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3156 goto skip_response_send;
3157 }
3158
3159 rc = nxt_unit_response_add_field(req, "Content-Type",
3160 nxt_length("Content-Type"),
3161 "text/plain", nxt_length("text/plain"));
3162 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3163 goto skip_response_send;
3164 }
3165 }
3166
3167 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) {
3168
3169 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT;
3170
3171 nxt_unit_buf_send_done(req->response_buf);
3172
3173 return;
3174 }
3175
3176skip_response_send:
3177
3178 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit);
3179
3180 msg.stream = req_impl->stream;
3181 msg.pid = lib->pid;
3182 msg.reply_port = 0;
3183 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA
3184 : _NXT_PORT_MSG_RPC_ERROR;
3185 msg.last = 1;
3186 msg.mmap = 0;
3187 msg.nf = 0;
3188 msg.mf = 0;
3189 msg.tracking = 0;
3190
3191 (void) nxt_unit_port_send(req->ctx, req->response_port,
3192 &msg, sizeof(msg), NULL, 0);
3193
3194 nxt_unit_request_info_release(req);
3195}
3196
3197
3198int
3199nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode,
3200 uint8_t last, const void *start, size_t size)
3201{
3202 const struct iovec iov = { (void *) start, size };
3203
3204 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1);
3205}
3206
3207
3208int
3209nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode,
3210 uint8_t last, const struct iovec *iov, int iovcnt)
3211{
3212 int i, rc;
3213 size_t l, copy;
3214 uint32_t payload_len, buf_size, alloc_size;
3215 const uint8_t *b;
3216 nxt_unit_buf_t *buf;
3217 nxt_unit_mmap_buf_t mmap_buf;
3218 nxt_websocket_header_t *wh;
3219 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
3220
3221 payload_len = 0;
3222
3223 for (i = 0; i < iovcnt; i++) {
3224 payload_len += iov[i].iov_len;
3225 }
3226
3227 buf_size = 10 + payload_len;
3228 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE);
3229
3230 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
3231 alloc_size, alloc_size,
3232 &mmap_buf, local_buf);
3233 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3234 return rc;
3235 }
3236
3237 buf = &mmap_buf.buf;
3238
3239 buf->start[0] = 0;
3240 buf->start[1] = 0;
3241
3242 buf_size -= buf->end - buf->start;
3243
3244 wh = (void *) buf->free;
3245
3246 buf->free = nxt_websocket_frame_init(wh, payload_len);
3247 wh->fin = last;
3248 wh->opcode = opcode;
3249
3250 for (i = 0; i < iovcnt; i++) {
3251 b = iov[i].iov_base;
3252 l = iov[i].iov_len;
3253
3254 while (l > 0) {
3255 copy = buf->end - buf->free;
3256 copy = nxt_min(l, copy);
3257
3258 buf->free = nxt_cpymem(buf->free, b, copy);
3259 b += copy;
3260 l -= copy;
3261
3262 if (l > 0) {
3263 if (nxt_fast_path(buf->free > buf->start)) {
3264 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
3265
3266 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3267 return rc;
3268 }
3269 }
3270
3271 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE);
3272
3273 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
3274 alloc_size, alloc_size,
3275 &mmap_buf, local_buf);
3276 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3277 return rc;
3278 }
3279
3280 buf_size -= buf->end - buf->start;
3281 }
3282 }
3283 }
3284
3285 if (buf->free > buf->start) {
3286 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
3287 }
3288
3289 return rc;
3290}
3291
3292
3293ssize_t
3294nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst,
3295 size_t size)
3296{
3297 ssize_t res;
3298 uint8_t *b;
3299 uint64_t i, d;
3300
3301 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length,
3302 dst, size);
3303
3304 if (ws->mask == NULL) {
3305 return res;
3306 }
3307
3308 b = dst;
3309 d = (ws->payload_len - ws->content_length - res) % 4;
3310
3311 for (i = 0; i < (uint64_t) res; i++) {
3312 b[i] ^= ws->mask[ (i + d) % 4 ];
3313 }
3314
3315 return res;
3316}
3317
3318
3319int
3320nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws)
3321{
3322 char *b;
3323 size_t size, hsize;
3324 nxt_unit_websocket_frame_impl_t *ws_impl;
3325
3326 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws);
3327
3328 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) {
3329 return NXT_UNIT_OK;
3330 }
3331
3332 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start;
3333
3334 b = nxt_unit_malloc(ws->req->ctx, size);
3335 if (nxt_slow_path(b == NULL)) {
3336 return NXT_UNIT_ERROR;
3337 }
3338
3339 memcpy(b, ws_impl->buf->buf.start, size);
3340
3341 hsize = nxt_websocket_frame_header_size(b);
3342
3343 ws_impl->buf->buf.start = b;
3344 ws_impl->buf->buf.free = b + hsize;
3345 ws_impl->buf->buf.end = b + size;
3346
3347 ws_impl->buf->free_ptr = b;
3348
3349 ws_impl->ws.header = (nxt_websocket_header_t *) b;
3350
3351 if (ws_impl->ws.header->mask) {
3352 ws_impl->ws.mask = (uint8_t *) b + hsize - 4;
3353
3354 } else {
3355 ws_impl->ws.mask = NULL;
3356 }
3357
3358 return NXT_UNIT_OK;
3359}
3360
3361
3362void
3363nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws)
3364{
3365 nxt_unit_websocket_frame_release(ws);
3366}
3367
3368
3369static nxt_port_mmap_header_t *
3370nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
3371 nxt_chunk_id_t *c, int *n, int min_n)
3372{
3373 int res, nchunks, i;
3374 uint32_t outgoing_size;
3375 nxt_unit_mmap_t *mm, *mm_end;
3376 nxt_unit_impl_t *lib;
3377 nxt_port_mmap_header_t *hdr;
3378
3379 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3380
3381 pthread_mutex_lock(&lib->outgoing.mutex);
3382
3383retry:
3384
3385 outgoing_size = lib->outgoing.size;
3386
3387 mm_end = lib->outgoing.elts + outgoing_size;
3388
3389 for (mm = lib->outgoing.elts; mm < mm_end; mm++) {
3390 hdr = mm->hdr;
3391
3392 if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port->id.id) {
3393 continue;
3394 }
3395
3396 *c = 0;
3397
3398 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) {
3399 nchunks = 1;
3400
3401 while (nchunks < *n) {
3402 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map,
3403 *c + nchunks);
3404
3405 if (res == 0) {
3406 if (nchunks >= min_n) {
3407 *n = nchunks;
3408
3409 goto unlock;
3410 }
3411
3412 for (i = 0; i < nchunks; i++) {
3413 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i);
3414 }
3415
3416 *c += nchunks + 1;
3417 nchunks = 0;
3418 break;
3419 }
3420
3421 nchunks++;
3422 }
3423
3424 if (nchunks >= min_n) {
3425 *n = nchunks;
3426
3427 goto unlock;
3428 }
3429 }
3430
3431 hdr->oosm = 1;
3432 }
3433
3434 if (outgoing_size >= lib->shm_mmap_limit) {
3435 /* Cannot allocate more shared memory. */
3436 pthread_mutex_unlock(&lib->outgoing.mutex);
3437
3438 if (min_n == 0) {
3439 *n = 0;
3440 }
3441
3442 if (nxt_slow_path(lib->outgoing.allocated_chunks + min_n
3443 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT))
3444 {
3445 /* Memory allocated by application, but not send to router. */
3446 return NULL;
3447 }
3448
3449 /* Notify router about OOSM condition. */
3450
3451 res = nxt_unit_send_oosm(ctx, port);
3452 if (nxt_slow_path(res != NXT_UNIT_OK)) {
3453 return NULL;
3454 }
3455
3456 /* Return if caller can handle OOSM condition. Non-blocking mode. */
3457
3458 if (min_n == 0) {
3459 return NULL;
3460 }
3461
3462 nxt_unit_debug(ctx, "oosm: waiting for ACK");
3463
3464 res = nxt_unit_wait_shm_ack(ctx);
3465 if (nxt_slow_path(res != NXT_UNIT_OK)) {
3466 return NULL;
3467 }
3468
3469 nxt_unit_debug(ctx, "oosm: retry");
3470
3471 pthread_mutex_lock(&lib->outgoing.mutex);
3472
3473 goto retry;
3474 }
3475
3476 *c = 0;
3477 hdr = nxt_unit_new_mmap(ctx, port, *n);
3478
3479unlock:
3480
3481 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, *n);
3482
3483 nxt_unit_debug(ctx, "allocated_chunks %d",
3484 (int) lib->outgoing.allocated_chunks);
3485
3486 pthread_mutex_unlock(&lib->outgoing.mutex);
3487
3488 return hdr;
3489}
3490
3491
3492static int
3493nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
3494{
3495 ssize_t res;
3496 nxt_port_msg_t msg;
3497 nxt_unit_impl_t *lib;
3498
3499 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3500
3501 msg.stream = 0;
3502 msg.pid = lib->pid;
3503 msg.reply_port = 0;
3504 msg.type = _NXT_PORT_MSG_OOSM;
3505 msg.last = 0;
3506 msg.mmap = 0;
3507 msg.nf = 0;
3508 msg.mf = 0;
3509 msg.tracking = 0;
3510
3511 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0);
3512 if (nxt_slow_path(res != sizeof(msg))) {
3513 return NXT_UNIT_ERROR;
3514 }
3515
3516 return NXT_UNIT_OK;
3517}
3518
3519
3520static int
3521nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx)
3522{
3523 int res;
3524 nxt_unit_ctx_impl_t *ctx_impl;
3525 nxt_unit_read_buf_t *rbuf;
3526
3527 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
3528
3529 while (1) {
3530 rbuf = nxt_unit_read_buf_get(ctx);
3531 if (nxt_slow_path(rbuf == NULL)) {
3532 return NXT_UNIT_ERROR;
3533 }
3534
3535 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
3536 if (res == NXT_UNIT_ERROR) {
3537 nxt_unit_read_buf_release(ctx, rbuf);
3538
3539 return NXT_UNIT_ERROR;
3540 }
3541
3542 if (nxt_unit_is_shm_ack(rbuf)) {
3543 nxt_unit_read_buf_release(ctx, rbuf);
3544 break;
3545 }
3546
3547 pthread_mutex_lock(&ctx_impl->mutex);
3548
3549 nxt_queue_insert_tail(&ctx_impl->pending_rbuf, &rbuf->link);
3550
3551 pthread_mutex_unlock(&ctx_impl->mutex);
3552
3553 if (nxt_unit_is_quit(rbuf)) {
3554 nxt_unit_debug(ctx, "oosm: quit received");
3555
3556 return NXT_UNIT_ERROR;
3557 }
3558 }
3559
3560 return NXT_UNIT_OK;
3561}
3562
3563
3564static nxt_unit_mmap_t *
3565nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i)
3566{
3567 uint32_t cap, n;
3568 nxt_unit_mmap_t *e;
3569
3570 if (nxt_fast_path(mmaps->size > i)) {
3571 return mmaps->elts + i;
3572 }
3573
3574 cap = mmaps->cap;
3575
3576 if (cap == 0) {
3577 cap = i + 1;
3578 }
3579
3580 while (i + 1 > cap) {
3581
3582 if (cap < 16) {
3583 cap = cap * 2;
3584
3585 } else {
3586 cap = cap + cap / 2;
3587 }
3588 }
3589
3590 if (cap != mmaps->cap) {
3591
3592 e = realloc(mmaps->elts, cap * sizeof(nxt_unit_mmap_t));
3593 if (nxt_slow_path(e == NULL)) {
3594 return NULL;
3595 }
3596
3597 mmaps->elts = e;
3598
3599 for (n = mmaps->cap; n < cap; n++) {
3600 e = mmaps->elts + n;
3601
3602 e->hdr = NULL;
3603 nxt_queue_init(&e->awaiting_rbuf);
3604 }
3605
3606 mmaps->cap = cap;
3607 }
3608
3609 if (i + 1 > mmaps->size) {
3610 mmaps->size = i + 1;
3611 }
3612
3613 return mmaps->elts + i;
3614}
3615
3616
3617static nxt_port_mmap_header_t *
3618nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n)
3619{
3620 int i, fd, rc;
3621 void *mem;
3622 nxt_unit_mmap_t *mm;
3623 nxt_unit_impl_t *lib;
3624 nxt_port_mmap_header_t *hdr;
3625
3626 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3627
3628 mm = nxt_unit_mmap_at(&lib->outgoing, lib->outgoing.size);
3629 if (nxt_slow_path(mm == NULL)) {
3630 nxt_unit_alert(ctx, "failed to add mmap to outgoing array");
3631
3632 return NULL;
3633 }
3634
3635 fd = nxt_unit_shm_open(ctx, PORT_MMAP_SIZE);
3636 if (nxt_slow_path(fd == -1)) {
3637 goto remove_fail;
3638 }
3639
3640 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
3641 if (nxt_slow_path(mem == MAP_FAILED)) {
3642 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd,
3643 strerror(errno), errno);
3644
3645 nxt_unit_close(fd);
3646
3647 goto remove_fail;
3648 }
3649
3650 mm->hdr = mem;
3651 hdr = mem;
3652
3653 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map));
3654 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map));
3655
3656 hdr->id = lib->outgoing.size - 1;
3657 hdr->src_pid = lib->pid;
3658 hdr->dst_pid = port->id.pid;
3659 hdr->sent_over = port->id.id;
3660
3661 /* Mark first n chunk(s) as busy */
3662 for (i = 0; i < n; i++) {
3663 nxt_port_mmap_set_chunk_busy(hdr->free_map, i);
3664 }
3665
3666 /* Mark as busy chunk followed the last available chunk. */
3667 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT);
3668 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT);
3669
3670 pthread_mutex_unlock(&lib->outgoing.mutex);
3671
3672 rc = nxt_unit_send_mmap(ctx, port, fd);
3673 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3674 munmap(mem, PORT_MMAP_SIZE);
3675 hdr = NULL;
3676
3677 } else {
3678 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d",
3679 hdr->id, (int) lib->pid, (int) port->id.pid);
3680 }
3681
3682 nxt_unit_close(fd);
3683
3684 pthread_mutex_lock(&lib->outgoing.mutex);
3685
3686 if (nxt_fast_path(hdr != NULL)) {
3687 return hdr;
3688 }
3689
3690remove_fail:
3691
3692 lib->outgoing.size--;
3693
3694 return NULL;
3695}
3696
3697
3698static int
3699nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size)
3700{
3701 int fd;
3702 nxt_unit_impl_t *lib;
3703
3704 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3705
3706#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN)
3707 char name[64];
3708
3709 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p",
3710 lib->pid, (void *) pthread_self());
3711#endif
3712
3713#if (NXT_HAVE_MEMFD_CREATE)
3714
3715 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC);
3716 if (nxt_slow_path(fd == -1)) {
3717 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name,
3718 strerror(errno), errno);
3719
3720 return -1;
3721 }
3722
3723 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd);
3724
3725#elif (NXT_HAVE_SHM_OPEN_ANON)
3726
3727 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR);
3728 if (nxt_slow_path(fd == -1)) {
3729 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)",
3730 strerror(errno), errno);
3731
3732 return -1;
3733 }
3734
3735#elif (NXT_HAVE_SHM_OPEN)
3736
3737 /* Just in case. */
3738 shm_unlink(name);
3739
3740 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
3741 if (nxt_slow_path(fd == -1)) {
3742 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name,
3743 strerror(errno), errno);
3744
3745 return -1;
3746 }
3747
3748 if (nxt_slow_path(shm_unlink(name) == -1)) {
3749 nxt_unit_alert(ctx, "shm_unlink(%s) failed: %s (%d)", name,
3750 strerror(errno), errno);
3751 }
3752
3753#else
3754
3755#error No working shared memory implementation.
3756
3757#endif
3758
3759 if (nxt_slow_path(ftruncate(fd, size) == -1)) {
3760 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd,
3761 strerror(errno), errno);
3762
3763 nxt_unit_close(fd);
3764
3765 return -1;
3766 }
3767
3768 return fd;
3769}
3770
3771
3772static int
3773nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd)
3774{
3775 ssize_t res;
3776 nxt_port_msg_t msg;
3777 nxt_unit_impl_t *lib;
3778 union {
3779 struct cmsghdr cm;
3780 char space[CMSG_SPACE(sizeof(int))];
3781 } cmsg;
3782
3783 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3784
3785 msg.stream = 0;
3786 msg.pid = lib->pid;
3787 msg.reply_port = 0;
3788 msg.type = _NXT_PORT_MSG_MMAP;
3789 msg.last = 0;
3790 msg.mmap = 0;
3791 msg.nf = 0;
3792 msg.mf = 0;
3793 msg.tracking = 0;
3794
3795 /*
3796 * Fill all padding fields with 0.
3797 * Code in Go 1.11 validate cmsghdr using padding field as part of len.
3798 * See Cmsghdr definition and socketControlMessageHeaderAndData function.
3799 */
3800 memset(&cmsg, 0, sizeof(cmsg));
3801
3802 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int));
3803 cmsg.cm.cmsg_level = SOL_SOCKET;
3804 cmsg.cm.cmsg_type = SCM_RIGHTS;
3805
3806 /*
3807 * memcpy() is used instead of simple
3808 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
3809 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
3810 * dereferencing type-punned pointer will break strict-aliasing rules
3811 *
3812 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
3813 * in the same simple assignment as in the code above.
3814 */
3815 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int));
3816
3817 res = nxt_unit_port_send(ctx, port, &msg, sizeof(msg),
3818 &cmsg, sizeof(cmsg));
3819 if (nxt_slow_path(res != sizeof(msg))) {
3820 return NXT_UNIT_ERROR;
3821 }
3822
3823 return NXT_UNIT_OK;
3824}
3825
3826
3827static int
3828nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
3829 uint32_t size, uint32_t min_size,
3830 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf)
3831{
3832 int nchunks, min_nchunks;
3833 nxt_chunk_id_t c;
3834 nxt_port_mmap_header_t *hdr;
3835
3836 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) {
3837 if (local_buf != NULL) {
3838 mmap_buf->free_ptr = NULL;
3839 mmap_buf->plain_ptr = local_buf;
3840
3841 } else {
3842 mmap_buf->free_ptr = nxt_unit_malloc(ctx,
3843 size + sizeof(nxt_port_msg_t));
3844 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) {
3845 return NXT_UNIT_ERROR;
3846 }
3847
3848 mmap_buf->plain_ptr = mmap_buf->free_ptr;
3849 }
3850
3851 mmap_buf->hdr = NULL;
3852 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t);
3853 mmap_buf->buf.free = mmap_buf->buf.start;
3854 mmap_buf->buf.end = mmap_buf->buf.start + size;
3855
3856 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)",
3857 mmap_buf->buf.start, (int) size);
3858
3859 return NXT_UNIT_OK;
3860 }
3861
3862 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE;
3863 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE;
3864
3865 hdr = nxt_unit_mmap_get(ctx, port, &c, &nchunks, min_nchunks);
3866 if (nxt_slow_path(hdr == NULL)) {
3867 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) {
3868 mmap_buf->hdr = NULL;
3869 mmap_buf->buf.start = NULL;
3870 mmap_buf->buf.free = NULL;
3871 mmap_buf->buf.end = NULL;
3872 mmap_buf->free_ptr = NULL;
3873
3874 return NXT_UNIT_OK;
3875 }
3876
3877 return NXT_UNIT_ERROR;
3878 }
3879
3880 mmap_buf->hdr = hdr;
3881 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c);
3882 mmap_buf->buf.free = mmap_buf->buf.start;
3883 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE;
3884 mmap_buf->free_ptr = NULL;
3885 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
3886
3887 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)",
3888 (int) hdr->id, (int) c,
3889 (int) (nchunks * PORT_MMAP_CHUNK_SIZE));
3890
3891 return NXT_UNIT_OK;
3892}
3893
3894
3895static int
3896nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd)
3897{
3898 int rc;
3899 void *mem;
3900 nxt_queue_t awaiting_rbuf;
3901 struct stat mmap_stat;
3902 nxt_unit_mmap_t *mm;
3903 nxt_unit_impl_t *lib;
3904 nxt_unit_ctx_impl_t *ctx_impl;
3905 nxt_unit_read_buf_t *rbuf;
3906 nxt_port_mmap_header_t *hdr;
3907
3908 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3909
3910 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid);
3911
3912 if (fstat(fd, &mmap_stat) == -1) {
3913 nxt_unit_alert(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd,
3914 strerror(errno), errno);
3915
3916 return NXT_UNIT_ERROR;
3917 }
3918
3919 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE,
3920 MAP_SHARED, fd, 0);
3921 if (nxt_slow_path(mem == MAP_FAILED)) {
3922 nxt_unit_alert(ctx, "incoming_mmap: mmap() failed: %s (%d)",
3923 strerror(errno), errno);
3924
3925 return NXT_UNIT_ERROR;
3926 }
3927
3928 hdr = mem;
3929
3930 if (nxt_slow_path(hdr->src_pid != pid)) {
3931
3932 nxt_unit_alert(ctx, "incoming_mmap: unexpected pid in mmap header "
3933 "detected: %d != %d or %d != %d", (int) hdr->src_pid,
3934 (int) pid, (int) hdr->dst_pid, (int) lib->pid);
3935
3936 munmap(mem, PORT_MMAP_SIZE);
3937
3938 return NXT_UNIT_ERROR;
3939 }
3940
3941 nxt_queue_init(&awaiting_rbuf);
3942
3943 pthread_mutex_lock(&lib->incoming.mutex);
3944
3945 mm = nxt_unit_mmap_at(&lib->incoming, hdr->id);
3946 if (nxt_slow_path(mm == NULL)) {
3947 nxt_unit_alert(ctx, "incoming_mmap: failed to add to incoming array");
3948
3949 munmap(mem, PORT_MMAP_SIZE);
3950
3951 rc = NXT_UNIT_ERROR;
3952
3953 } else {
3954 mm->hdr = hdr;
3955
3956 hdr->sent_over = 0xFFFFu;
3957
3958 nxt_queue_add(&awaiting_rbuf, &mm->awaiting_rbuf);
3959 nxt_queue_init(&mm->awaiting_rbuf);
3960
3961 rc = NXT_UNIT_OK;
3962 }
3963
3964 pthread_mutex_unlock(&lib->incoming.mutex);
3965
3966 nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) {
3967
3968 ctx_impl = rbuf->ctx_impl;
3969
3970 pthread_mutex_lock(&ctx_impl->mutex);
3971
3972 nxt_queue_insert_head(&ctx_impl->pending_rbuf, &rbuf->link);
3973
3974 pthread_mutex_unlock(&ctx_impl->mutex);
3975
3976 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1);
3977
3978 } nxt_queue_loop;
3979
3980 return rc;
3981}
3982
3983
3984static void
3985nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps)
3986{
3987 pthread_mutex_init(&mmaps->mutex, NULL);
3988
3989 mmaps->size = 0;
3990 mmaps->cap = 0;
3991 mmaps->elts = NULL;
3992 mmaps->allocated_chunks = 0;
3993}
3994
3995
3996nxt_inline void
3997nxt_unit_process_use(nxt_unit_process_t *process)
3998{
3999 nxt_atomic_fetch_add(&process->use_count, 1);
4000}
4001
4002
4003nxt_inline void
4004nxt_unit_process_release(nxt_unit_process_t *process)
4005{
4006 long c;
4007
4008 c = nxt_atomic_fetch_add(&process->use_count, -1);
4009
4010 if (c == 1) {
4011 nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid);
4012
4013 nxt_unit_free(NULL, process);
4014 }
4015}
4016
4017
4018static void
4019nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps)
4020{
4021 nxt_unit_mmap_t *mm, *end;
4022
4023 if (mmaps->elts != NULL) {
4024 end = mmaps->elts + mmaps->size;
4025
4026 for (mm = mmaps->elts; mm < end; mm++) {
4027 munmap(mm->hdr, PORT_MMAP_SIZE);
4028 }
4029
4030 nxt_unit_free(NULL, mmaps->elts);
4031 }
4032
4033 pthread_mutex_destroy(&mmaps->mutex);
4034}
4035
4036
4037static int
4038nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps,
4039 pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr,
4040 nxt_unit_read_buf_t *rbuf)
4041{
4042 int res, need_rbuf;
4043 nxt_unit_mmap_t *mm;
4044 nxt_unit_ctx_impl_t *ctx_impl;
4045
4046 mm = nxt_unit_mmap_at(mmaps, id);
4047 if (nxt_slow_path(mm == NULL)) {
4048 nxt_unit_alert(ctx, "failed to allocate mmap");
4049
4050 pthread_mutex_unlock(&mmaps->mutex);
4051
4052 *hdr = NULL;
4053
4054 return NXT_UNIT_ERROR;
4055 }
4056
4057 *hdr = mm->hdr;
4058
4059 if (nxt_fast_path(*hdr != NULL)) {
4060 return NXT_UNIT_OK;
4061 }
4062
4063 need_rbuf = nxt_queue_is_empty(&mm->awaiting_rbuf);
4064
4065 nxt_queue_insert_tail(&mm->awaiting_rbuf, &rbuf->link);
4066
4067 pthread_mutex_unlock(&mmaps->mutex);
4068
4069 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4070
4071 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
4072
4073 if (need_rbuf) {
4074 res = nxt_unit_get_mmap(ctx, pid, id);
4075 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
4076 return NXT_UNIT_ERROR;
4077 }
4078 }
4079
4080 return NXT_UNIT_AGAIN;
4081}
4082
4083
4084static int
4085nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg,
4086 nxt_unit_read_buf_t *rbuf)
4087{
4088 int res;
4089 void *start;
4090 uint32_t size;
4091 nxt_unit_impl_t *lib;
4092 nxt_unit_mmaps_t *mmaps;
4093 nxt_unit_mmap_buf_t *b, **incoming_tail;
4094 nxt_port_mmap_msg_t *mmap_msg, *end;
4095 nxt_port_mmap_header_t *hdr;
4096
4097 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) {
4098 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)",
4099 recv_msg->stream, (int) recv_msg->size);
4100
4101 return NXT_UNIT_ERROR;
4102 }
4103
4104 mmap_msg = recv_msg->start;
4105 end = nxt_pointer_to(recv_msg->start, recv_msg->size);
4106
4107 incoming_tail = &recv_msg->incoming_buf;
4108
4109 /* Allocating buffer structures. */
4110 for (; mmap_msg < end; mmap_msg++) {
4111 b = nxt_unit_mmap_buf_get(ctx);
4112 if (nxt_slow_path(b == NULL)) {
4113 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf",
4114 recv_msg->stream);
4115
4116 while (recv_msg->incoming_buf != NULL) {
4117 nxt_unit_mmap_buf_release(recv_msg->incoming_buf);
4118 }
4119
4120 return NXT_UNIT_ERROR;
4121 }
4122
4123 nxt_unit_mmap_buf_insert(incoming_tail, b);
4124 incoming_tail = &b->next;
4125 }
4126
4127 b = recv_msg->incoming_buf;
4128 mmap_msg = recv_msg->start;
4129
4130 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4131
4132 mmaps = &lib->incoming;
4133
4134 pthread_mutex_lock(&mmaps->mutex);
4135
4136 for (; mmap_msg < end; mmap_msg++) {
4137 res = nxt_unit_check_rbuf_mmap(ctx, mmaps,
4138 recv_msg->pid, mmap_msg->mmap_id,
4139 &hdr, rbuf);
4140
4141 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4142 while (recv_msg->incoming_buf != NULL) {
4143 nxt_unit_mmap_buf_release(recv_msg->incoming_buf);
4144 }
4145
4146 return res;
4147 }
4148
4149 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id);
4150 size = mmap_msg->size;
4151
4152 if (recv_msg->start == mmap_msg) {
4153 recv_msg->start = start;
4154 recv_msg->size = size;
4155 }
4156
4157 b->buf.start = start;
4158 b->buf.free = start;
4159 b->buf.end = b->buf.start + size;
4160 b->hdr = hdr;
4161
4162 b = b->next;
4163
4164 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)",
4165 recv_msg->stream,
4166 start, (int) size,
4167 (int) hdr->src_pid, (int) hdr->dst_pid,
4168 (int) hdr->id, (int) mmap_msg->chunk_id,
4169 (int) mmap_msg->size);
4170 }
4171
4172 pthread_mutex_unlock(&mmaps->mutex);
4173
4174 return NXT_UNIT_OK;
4175}
4176
4177
4178static int
4179nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id)
4180{
4181 ssize_t res;
4182 nxt_unit_impl_t *lib;
4183 nxt_unit_ctx_impl_t *ctx_impl;
4184
4185 struct {
4186 nxt_port_msg_t msg;
4187 nxt_port_msg_get_mmap_t get_mmap;
4188 } m;
4189
4190 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4191 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4192
4193 memset(&m.msg, 0, sizeof(nxt_port_msg_t));
4194
4195 m.msg.pid = lib->pid;
4196 m.msg.reply_port = ctx_impl->read_port->id.id;
4197 m.msg.type = _NXT_PORT_MSG_GET_MMAP;
4198
4199 m.get_mmap.id = id;
4200
4201 nxt_unit_debug(ctx, "get_mmap: %d %d", (int) pid, (int) id);
4202
4203 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0);
4204 if (nxt_slow_path(res != sizeof(m))) {
4205 return NXT_UNIT_ERROR;
4206 }
4207
4208 return NXT_UNIT_OK;
4209}
4210
4211
4212static void
4213nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr,
4214 void *start, uint32_t size)
4215{
4216 int freed_chunks;
4217 u_char *p, *end;
4218 nxt_chunk_id_t c;
4219 nxt_unit_impl_t *lib;
4220
4221 memset(start, 0xA5, size);
4222
4223 p = start;
4224 end = p + size;
4225 c = nxt_port_mmap_chunk_id(hdr, p);
4226 freed_chunks = 0;
4227
4228 while (p < end) {
4229 nxt_port_mmap_set_chunk_free(hdr->free_map, c);
4230
4231 p += PORT_MMAP_CHUNK_SIZE;
4232 c++;
4233 freed_chunks++;
4234 }
4235
4236 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4237
4238 if (hdr->src_pid == lib->pid && freed_chunks != 0) {
4239 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, -freed_chunks);
4240
4241 nxt_unit_debug(ctx, "allocated_chunks %d",
4242 (int) lib->outgoing.allocated_chunks);
4243 }
4244
4245 if (hdr->dst_pid == lib->pid
4246 && freed_chunks != 0
4247 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0))
4248 {
4249 nxt_unit_send_shm_ack(ctx, hdr->src_pid);
4250 }
4251}
4252
4253
4254static int
4255nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid)
4256{
4257 ssize_t res;
4258 nxt_port_msg_t msg;
4259 nxt_unit_impl_t *lib;
4260
4261 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4262
4263 msg.stream = 0;
4264 msg.pid = lib->pid;
4265 msg.reply_port = 0;
4266 msg.type = _NXT_PORT_MSG_SHM_ACK;
4267 msg.last = 0;
4268 msg.mmap = 0;
4269 msg.nf = 0;
4270 msg.mf = 0;
4271 msg.tracking = 0;
4272
4273 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0);
4274 if (nxt_slow_path(res != sizeof(msg))) {
4275 return NXT_UNIT_ERROR;
4276 }
4277
4278 return NXT_UNIT_OK;
4279}
4280
4281
4282static nxt_int_t
4283nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data)
4284{
4285 nxt_process_t *process;
4286
4287 process = data;
4288
4289 if (lhq->key.length == sizeof(pid_t)
4290 && *(pid_t *) lhq->key.start == process->pid)
4291 {
4292 return NXT_OK;
4293 }
4294
4295 return NXT_DECLINED;
4296}
4297
4298
4299static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = {
4300 NXT_LVLHSH_DEFAULT,
4301 nxt_unit_lvlhsh_pid_test,
4302 nxt_unit_lvlhsh_alloc,
4303 nxt_unit_lvlhsh_free,
4304};
4305
4306
4307static inline void
4308nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid)
4309{
4310 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid));
4311 lhq->key.length = sizeof(*pid);
4312 lhq->key.start = (u_char *) pid;
4313 lhq->proto = &lvlhsh_processes_proto;
4314}
4315
4316
4317static nxt_unit_process_t *
4318nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid)
4319{
4320 nxt_unit_impl_t *lib;
4321 nxt_unit_process_t *process;
4322 nxt_lvlhsh_query_t lhq;
4323
4324 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4325
4326 nxt_unit_process_lhq_pid(&lhq, &pid);
4327
4328 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) {
4329 process = lhq.value;
4330 nxt_unit_process_use(process);
4331
4332 return process;
4333 }
4334
4335 process = nxt_unit_malloc(ctx, sizeof(nxt_unit_process_t));
4336 if (nxt_slow_path(process == NULL)) {
4337 nxt_unit_alert(ctx, "failed to allocate process for #%d", (int) pid);
4338
4339 return NULL;
4340 }
4341
4342 process->pid = pid;
4343 process->use_count = 2;
4344 process->next_port_id = 0;
4345 process->lib = lib;
4346
4347 nxt_queue_init(&process->ports);
4348
4349 lhq.replace = 0;
4350 lhq.value = process;
4351
4352 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) {
4353
4354 case NXT_OK:
4355 break;
4356
4357 default:
4358 nxt_unit_alert(ctx, "process %d insert failed", (int) pid);
4359
4360 nxt_unit_free(ctx, process);
4361 process = NULL;
4362 break;
4363 }
4364
4365 return process;
4366}
4367
4368
4369static nxt_unit_process_t *
4370nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove)
4371{
4372 int rc;
4373 nxt_lvlhsh_query_t lhq;
4374
4375 nxt_unit_process_lhq_pid(&lhq, &pid);
4376
4377 if (remove) {
4378 rc = nxt_lvlhsh_delete(&lib->processes, &lhq);
4379
4380 } else {
4381 rc = nxt_lvlhsh_find(&lib->processes, &lhq);
4382 }
4383
4384 if (rc == NXT_OK) {
4385 if (!remove) {
4386 nxt_unit_process_use(lhq.value);
4387 }
4388
4389 return lhq.value;
4390 }
4391
4392 return NULL;
4393}
4394
4395
4396static nxt_unit_process_t *
4397nxt_unit_process_pop_first(nxt_unit_impl_t *lib)
4398{
4399 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL);
4400}
4401
4402
4403int
4404nxt_unit_run(nxt_unit_ctx_t *ctx)
4405{
4406 int rc;
4407 nxt_unit_impl_t *lib;
4408
4409 nxt_unit_ctx_use(ctx);
4410
4411 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4412 rc = NXT_UNIT_OK;
4413
4414 while (nxt_fast_path(lib->online)) {
4415 rc = nxt_unit_run_once_impl(ctx);
4416
4417 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4418 break;
4419 }
4420 }
4421
4422 nxt_unit_ctx_release(ctx);
4423
4424 return rc;
4425}
4426
4427
4428int
4429nxt_unit_run_once(nxt_unit_ctx_t *ctx)
4430{
4431 int rc;
4432
4433 nxt_unit_ctx_use(ctx);
4434
4435 rc = nxt_unit_run_once_impl(ctx);
4436
4437 nxt_unit_ctx_release(ctx);
4438
4439 return rc;
4440}
4441
4442
4443static int
4444nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx)
4445{
4446 int rc;
4447 nxt_unit_read_buf_t *rbuf;
4448
4449 rbuf = nxt_unit_read_buf_get(ctx);
4450 if (nxt_slow_path(rbuf == NULL)) {
4451 return NXT_UNIT_ERROR;
4452 }
4453
4454 rc = nxt_unit_read_buf(ctx, rbuf);
4455 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4456 nxt_unit_read_buf_release(ctx, rbuf);
4457
4458 return rc;
4459 }
4460
4461 rc = nxt_unit_process_msg(ctx, rbuf);
4462 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4463 return NXT_UNIT_ERROR;
4464 }
4465
4466 rc = nxt_unit_process_pending_rbuf(ctx);
4467 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4468 return NXT_UNIT_ERROR;
4469 }
4470
4471 nxt_unit_process_ready_req(ctx);
4472
4473 return rc;
4474}
4475
4476
4477static int
4478nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf)
4479{
4480 int nevents, res, err;
4481 nxt_unit_impl_t *lib;
4482 nxt_unit_ctx_impl_t *ctx_impl;
4483 nxt_unit_port_impl_t *port_impl;
4484 struct pollfd fds[2];
4485
4486 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4487 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4488
4489 if (ctx_impl->wait_items > 0 || lib->shared_port == NULL) {
4490
4491 return nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4492 }
4493
4494 port_impl = nxt_container_of(ctx_impl->read_port, nxt_unit_port_impl_t,
4495 port);
4496
4497retry:
4498
4499 if (port_impl->from_socket == 0) {
4500 res = nxt_unit_port_queue_recv(ctx_impl->read_port, rbuf);
4501 if (res == NXT_UNIT_OK) {
4502 if (nxt_unit_is_read_socket(rbuf)) {
4503 port_impl->from_socket++;
4504
4505 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d",
4506 (int) ctx_impl->read_port->id.pid,
4507 (int) ctx_impl->read_port->id.id,
4508 port_impl->from_socket);
4509
4510 } else {
4511 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d",
4512 (int) ctx_impl->read_port->id.pid,
4513 (int) ctx_impl->read_port->id.id,
4514 (int) rbuf->size);
4515
4516 return NXT_UNIT_OK;
4517 }
4518 }
4519 }
4520
4521 res = nxt_unit_app_queue_recv(lib->shared_port, rbuf);
4522 if (res == NXT_UNIT_OK) {
4523 return NXT_UNIT_OK;
4524 }
4525
4526 fds[0].fd = ctx_impl->read_port->in_fd;
4527 fds[0].events = POLLIN;
4528 fds[0].revents = 0;
4529
4530 fds[1].fd = lib->shared_port->in_fd;
4531 fds[1].events = POLLIN;
4532 fds[1].revents = 0;
4533
4534 nevents = poll(fds, 2, -1);
4535 if (nxt_slow_path(nevents == -1)) {
4536 err = errno;
4537
4538 if (err == EINTR) {
4539 goto retry;
4540 }
4541
4542 nxt_unit_alert(ctx, "poll(%d,%d) failed: %s (%d)",
4543 fds[0].fd, fds[1].fd, strerror(err), err);
4544
4545 rbuf->size = -1;
4546
4547 return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR;
4548 }
4549
4550 nxt_unit_debug(ctx, "poll(%d,%d): %d, revents [%04uXi, %04uXi]",
4551 fds[0].fd, fds[1].fd, nevents, fds[0].revents,
4552 fds[1].revents);
4553
4554 if ((fds[0].revents & POLLIN) != 0) {
4555 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4556 if (res == NXT_UNIT_AGAIN) {
4557 goto retry;
4558 }
4559
4560 return res;
4561 }
4562
4563 if ((fds[1].revents & POLLIN) != 0) {
4564 res = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf);
4565 if (res == NXT_UNIT_AGAIN) {
4566 goto retry;
4567 }
4568
4569 return res;
4570 }
4571
4572 nxt_unit_alert(ctx, "poll(%d,%d): %d unexpected revents [%04uXi, %04uXi]",
4573 fds[0].fd, fds[1].fd, nevents, fds[0].revents,
4574 fds[1].revents);
4575
4576 return NXT_UNIT_ERROR;
4577}
4578
4579
4580static int
4581nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx)
4582{
4583 int rc;
4584 nxt_queue_t pending_rbuf;
4585 nxt_unit_ctx_impl_t *ctx_impl;
4586 nxt_unit_read_buf_t *rbuf;
4587
4588 nxt_queue_init(&pending_rbuf);
4589
4590 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4591
4592 pthread_mutex_lock(&ctx_impl->mutex);
4593
4594 if (nxt_queue_is_empty(&ctx_impl->pending_rbuf)) {
4595 pthread_mutex_unlock(&ctx_impl->mutex);
4596
4597 return NXT_UNIT_OK;
4598 }
4599
4600 nxt_queue_add(&pending_rbuf, &ctx_impl->pending_rbuf);
4601 nxt_queue_init(&ctx_impl->pending_rbuf);
4602
4603 pthread_mutex_unlock(&ctx_impl->mutex);
4604
4605 rc = NXT_UNIT_OK;
4606
4607 nxt_queue_each(rbuf, &pending_rbuf, nxt_unit_read_buf_t, link) {
4608
4609 if (nxt_fast_path(rc != NXT_UNIT_ERROR)) {
4610 rc = nxt_unit_process_msg(&ctx_impl->ctx, rbuf);
4611
4612 } else {
4613 nxt_unit_read_buf_release(ctx, rbuf);
4614 }
4615
4616 } nxt_queue_loop;
4617
4618 return rc;
4619}
4620
4621
4622static void
4623nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx)
4624{
4625 int res;
4626 nxt_queue_t ready_req;
4627 nxt_unit_impl_t *lib;
4628 nxt_unit_ctx_impl_t *ctx_impl;
4629 nxt_unit_request_info_t *req;
4630 nxt_unit_request_info_impl_t *req_impl;
4631
4632 nxt_queue_init(&ready_req);
4633
4634 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4635
4636 pthread_mutex_lock(&ctx_impl->mutex);
4637
4638 if (nxt_queue_is_empty(&ctx_impl->ready_req)) {
4639 pthread_mutex_unlock(&ctx_impl->mutex);
4640
4641 return;
4642 }
4643
4644 nxt_queue_add(&ready_req, &ctx_impl->ready_req);
4645 nxt_queue_init(&ctx_impl->ready_req);
4646
4647 pthread_mutex_unlock(&ctx_impl->mutex);
4648
4649 nxt_queue_each(req_impl, &ready_req,
4650 nxt_unit_request_info_impl_t, port_wait_link)
4651 {
4652 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit);
4653
4654 req = &req_impl->req;
4655
4656 res = nxt_unit_send_req_headers_ack(req);
4657 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4658 nxt_unit_request_done(req, NXT_UNIT_ERROR);
4659
4660 continue;
4661 }
4662
4663 if (req->content_length
4664 > (uint64_t) (req->content_buf->end - req->content_buf->free))
4665 {
4666 res = nxt_unit_request_hash_add(ctx, req);
4667 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4668 nxt_unit_req_warn(req, "failed to add request to hash");
4669
4670 nxt_unit_request_done(req, NXT_UNIT_ERROR);
4671
4672 continue;
4673 }
4674
4675 /*
4676 * If application have separate data handler, we may start
4677 * request processing and process data when it is arrived.
4678 */
4679 if (lib->callbacks.data_handler == NULL) {
4680 continue;
4681 }
4682 }
4683
4684 lib->callbacks.request_handler(&req_impl->req);
4685
4686 } nxt_queue_loop;
4687}
4688
4689
4690int
4691nxt_unit_run_ctx(nxt_unit_ctx_t *ctx)
4692{
4693 int rc;
4694 nxt_unit_impl_t *lib;
4695 nxt_unit_read_buf_t *rbuf;
4696 nxt_unit_ctx_impl_t *ctx_impl;
4697
4698 nxt_unit_ctx_use(ctx);
4699
4700 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4701 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4702
4703 rc = NXT_UNIT_OK;
4704
4705 while (nxt_fast_path(lib->online)) {
4706 rbuf = nxt_unit_read_buf_get(ctx);
4707 if (nxt_slow_path(rbuf == NULL)) {
4708 rc = NXT_UNIT_ERROR;
4709 break;
4710 }
4711
4712 retry:
4713
4714 rc = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4715 if (rc == NXT_UNIT_AGAIN) {
4716 goto retry;
4717 }
4718
4719 rc = nxt_unit_process_msg(ctx, rbuf);
4720 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4721 break;
4722 }
4723
4724 rc = nxt_unit_process_pending_rbuf(ctx);
4725 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4726 break;
4727 }
4728
4729 nxt_unit_process_ready_req(ctx);
4730 }
4731
4732 nxt_unit_ctx_release(ctx);
4733
4734 return rc;
4735}
4736
4737
4738nxt_inline int
4739nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf)
4740{
4741 nxt_port_msg_t *port_msg;
4742
4743 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4744 port_msg = (nxt_port_msg_t *) rbuf->buf;
4745
4746 return port_msg->type == _NXT_PORT_MSG_READ_QUEUE;
4747 }
4748
4749 return 0;
4750}
4751
4752
4753nxt_inline int
4754nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf)
4755{
4756 if (nxt_fast_path(rbuf->size == 1)) {
4757 return rbuf->buf[0] == _NXT_PORT_MSG_READ_SOCKET;
4758 }
4759
4760 return 0;
4761}
4762
4763
4764nxt_inline int
4765nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf)
4766{
4767 nxt_port_msg_t *port_msg;
4768
4769 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4770 port_msg = (nxt_port_msg_t *) rbuf->buf;
4771
4772 return port_msg->type == _NXT_PORT_MSG_SHM_ACK;
4773 }
4774
4775 return 0;
4776}
4777
4778
4779nxt_inline int
4780nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf)
4781{
4782 nxt_port_msg_t *port_msg;
4783
4784 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4785 port_msg = (nxt_port_msg_t *) rbuf->buf;
4786
4787 return port_msg->type == _NXT_PORT_MSG_QUIT;
4788 }
4789
4790 return 0;
4791}
4792
4793
4794int
4795nxt_unit_run_shared(nxt_unit_ctx_t *ctx)
4796{
4797 int rc;
4798 nxt_unit_impl_t *lib;
4799 nxt_unit_read_buf_t *rbuf;
4800
4801 nxt_unit_ctx_use(ctx);
4802
4803 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4804 rc = NXT_UNIT_OK;
4805
4806 while (nxt_fast_path(lib->online)) {
4807 rbuf = nxt_unit_read_buf_get(ctx);
4808 if (nxt_slow_path(rbuf == NULL)) {
4809 rc = NXT_UNIT_ERROR;
4810 break;
4811 }
4812
4813 retry:
4814
4815 rc = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf);
4816 if (rc == NXT_UNIT_AGAIN) {
4817 goto retry;
4818 }
4819
4820 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4821 nxt_unit_read_buf_release(ctx, rbuf);
4822 break;
4823 }
4824
4825 rc = nxt_unit_process_msg(ctx, rbuf);
4826 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4827 break;
4828 }
4829
4830 rc = nxt_unit_process_pending_rbuf(ctx);
4831 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4832 break;
4833 }
4834
4835 nxt_unit_process_ready_req(ctx);
4836 }
4837
4838 nxt_unit_ctx_release(ctx);
4839
4840 return rc;
4841}
4842
4843
4844int
4845nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
4846{
4847 int rc;
4848
4849 nxt_unit_ctx_use(ctx);
4850
4851 rc = nxt_unit_process_port_msg_impl(ctx, port);
4852
4853 nxt_unit_ctx_release(ctx);
4854
4855 return rc;
4856}
4857
4858
4859static int
4860nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
4861{
4862 int rc;
4863 nxt_unit_impl_t *lib;
4864 nxt_unit_read_buf_t *rbuf;
4865
4866 rbuf = nxt_unit_read_buf_get(ctx);
4867 if (nxt_slow_path(rbuf == NULL)) {
4868 return NXT_UNIT_ERROR;
4869 }
4870
4871 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4872
4873retry:
4874
4875 if (port == lib->shared_port) {
4876 rc = nxt_unit_shared_port_recv(ctx, port, rbuf);
4877
4878 } else {
4879 rc = nxt_unit_ctx_port_recv(ctx, port, rbuf);
4880 }
4881
4882 if (rc != NXT_UNIT_OK) {
4883 nxt_unit_read_buf_release(ctx, rbuf);
4884 return rc;
4885 }
4886
4887 rc = nxt_unit_process_msg(ctx, rbuf);
4888 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4889 return NXT_UNIT_ERROR;
4890 }
4891
4892 rc = nxt_unit_process_pending_rbuf(ctx);
4893 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4894 return NXT_UNIT_ERROR;
4895 }
4896
4897 nxt_unit_process_ready_req(ctx);
4898
4899 rbuf = nxt_unit_read_buf_get(ctx);
4900 if (nxt_slow_path(rbuf == NULL)) {
4901 return NXT_UNIT_ERROR;
4902 }
4903
4904 if (lib->online) {
4905 goto retry;
4906 }
4907
4908 return rc;
4909}
4910
4911
4912void
4913nxt_unit_done(nxt_unit_ctx_t *ctx)
4914{
4915 nxt_unit_ctx_release(ctx);
4916}
4917
4918
4919nxt_unit_ctx_t *
4920nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data)
4921{
4922 int rc, queue_fd;
4923 void *mem;
4924 nxt_unit_impl_t *lib;
4925 nxt_unit_port_t *port;
4926 nxt_unit_ctx_impl_t *new_ctx;
4927 nxt_unit_port_impl_t *port_impl;
4928
4929 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4930
4931 new_ctx = nxt_unit_malloc(ctx, sizeof(nxt_unit_ctx_impl_t)
4932 + lib->request_data_size);
4933 if (nxt_slow_path(new_ctx == NULL)) {
4934 nxt_unit_alert(ctx, "failed to allocate context");
4935
4936 return NULL;
4937 }
4938
4939 rc = nxt_unit_ctx_init(lib, new_ctx, data);
4940 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4941 nxt_unit_free(ctx, new_ctx);
4942
4943 return NULL;
4944 }
4945
4946 queue_fd = -1;
4947
4948 port = nxt_unit_create_port(ctx);
4949 if (nxt_slow_path(port == NULL)) {
4950 goto fail;
4951 }
4952
4953 new_ctx->read_port = port;
4954
4955 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t));
4956 if (nxt_slow_path(queue_fd == -1)) {
4957 goto fail;
4958 }
4959
4960 mem = mmap(NULL, sizeof(nxt_port_queue_t),
4961 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0);
4962 if (nxt_slow_path(mem == MAP_FAILED)) {
4963 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd,
4964 strerror(errno), errno);
4965
4966 goto fail;
4967 }
4968
4969 nxt_port_queue_init(mem);
4970
4971 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
4972 port_impl->queue = mem;
4973
4974 rc = nxt_unit_send_port(ctx, lib->router_port, port, queue_fd);
4975 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4976 goto fail;
4977 }
4978
4979 nxt_unit_close(queue_fd);
4980
4981 return &new_ctx->ctx;
4982
4983fail:
4984
4985 if (queue_fd != -1) {
4986 nxt_unit_close(queue_fd);
4987 }
4988
4989 nxt_unit_ctx_release(&new_ctx->ctx);
4990
4991 return NULL;
4992}
4993
4994
4995static void
4996nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl)
4997{
4998 nxt_unit_impl_t *lib;
4999 nxt_unit_mmap_buf_t *mmap_buf;
5000 nxt_unit_request_info_impl_t *req_impl;
5001 nxt_unit_websocket_frame_impl_t *ws_impl;
5002
5003 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit);
5004
5005 nxt_queue_each(req_impl, &ctx_impl->active_req,
5006 nxt_unit_request_info_impl_t, link)
5007 {
5008 nxt_unit_req_warn(&req_impl->req, "active request on ctx free");
5009
5010 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR);
5011
5012 } nxt_queue_loop;
5013
5014 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]);
5015 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]);
5016
5017 while (ctx_impl->free_buf != NULL) {
5018 mmap_buf = ctx_impl->free_buf;
5019 nxt_unit_mmap_buf_unlink(mmap_buf);
5020 nxt_unit_free(&ctx_impl->ctx, mmap_buf);
5021 }
5022
5023 nxt_queue_each(req_impl, &ctx_impl->free_req,
5024 nxt_unit_request_info_impl_t, link)
5025 {
5026 nxt_unit_request_info_free(req_impl);
5027
5028 } nxt_queue_loop;
5029
5030 nxt_queue_each(ws_impl, &ctx_impl->free_ws,
5031 nxt_unit_websocket_frame_impl_t, link)
5032 {
5033 nxt_unit_websocket_frame_free(&ctx_impl->ctx, ws_impl);
5034
5035 } nxt_queue_loop;
5036
5037 pthread_mutex_destroy(&ctx_impl->mutex);
5038
5039 nxt_queue_remove(&ctx_impl->link);
5040
5041 if (nxt_fast_path(ctx_impl->read_port != NULL)) {
5042 nxt_unit_remove_port(lib, &ctx_impl->read_port->id);
5043 nxt_unit_port_release(ctx_impl->read_port);
5044 }
5045
5046 if (ctx_impl != &lib->main_ctx) {
5047 nxt_unit_free(&lib->main_ctx.ctx, ctx_impl);
5048 }
5049
5050 nxt_unit_lib_release(lib);
5051}
5052
5053
5054/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */
5055#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET)
5056#define NXT_UNIX_SOCKET SOCK_SEQPACKET
5057#else
5058#define NXT_UNIX_SOCKET SOCK_DGRAM
5059#endif
5060
5061
5062void
5063nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id)
5064{
5065 nxt_unit_port_hash_id_t port_hash_id;
5066
5067 port_hash_id.pid = pid;
5068 port_hash_id.id = id;
5069
5070 port_id->pid = pid;
5071 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id));
5072 port_id->id = id;
5073}
5074
5075
5076static nxt_unit_port_t *
5077nxt_unit_create_port(nxt_unit_ctx_t *ctx)
5078{
5079 int rc, port_sockets[2];
5080 nxt_unit_impl_t *lib;
5081 nxt_unit_port_t new_port, *port;
5082 nxt_unit_process_t *process;
5083
5084 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5085
5086 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets);
5087 if (nxt_slow_path(rc != 0)) {
5088 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)",
5089 strerror(errno), errno);
5090
5091 return NULL;
5092 }
5093
5094 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d",
5095 port_sockets[0], port_sockets[1]);
5096
5097 pthread_mutex_lock(&lib->mutex);
5098
5099 process = nxt_unit_process_get(ctx, lib->pid);
5100 if (nxt_slow_path(process == NULL)) {
5101 pthread_mutex_unlock(&lib->mutex);
5102
5103 nxt_unit_close(port_sockets[0]);
5104 nxt_unit_close(port_sockets[1]);
5105
5106 return NULL;
5107 }
5108
5109 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++);
5110
5111 new_port.in_fd = port_sockets[0];
5112 new_port.out_fd = port_sockets[1];
5113 new_port.data = NULL;
5114
5115 pthread_mutex_unlock(&lib->mutex);
5116
5117 nxt_unit_process_release(process);
5118
5119 port = nxt_unit_add_port(ctx, &new_port, NULL);
5120 if (nxt_slow_path(port == NULL)) {
5121 nxt_unit_close(port_sockets[0]);
5122 nxt_unit_close(port_sockets[1]);
5123 }
5124
5125 return port;
5126}
5127
5128
5129static int
5130nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst,
5131 nxt_unit_port_t *port, int queue_fd)
5132{
5133 ssize_t res;
5134 nxt_unit_impl_t *lib;
5135 int fds[2] = { port->out_fd, queue_fd };
5136
5137 struct {
5138 nxt_port_msg_t msg;
5139 nxt_port_msg_new_port_t new_port;
5140 } m;
5141
5142 union {
5143 struct cmsghdr cm;
5144 char space[CMSG_SPACE(sizeof(int) * 2)];
5145 } cmsg;
5146
5147 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5148
5149 m.msg.stream = 0;
5150 m.msg.pid = lib->pid;
5151 m.msg.reply_port = 0;
5152 m.msg.type = _NXT_PORT_MSG_NEW_PORT;
5153 m.msg.last = 0;
5154 m.msg.mmap = 0;
5155 m.msg.nf = 0;
5156 m.msg.mf = 0;
5157 m.msg.tracking = 0;
5158
5159 m.new_port.id = port->id.id;
5160 m.new_port.pid = port->id.pid;
5161 m.new_port.type = NXT_PROCESS_APP;
5162 m.new_port.max_size = 16 * 1024;
5163 m.new_port.max_share = 64 * 1024;
5164
5165 memset(&cmsg, 0, sizeof(cmsg));
5166
5167 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int) * 2);
5168 cmsg.cm.cmsg_level = SOL_SOCKET;
5169 cmsg.cm.cmsg_type = SCM_RIGHTS;
5170
5171 /*
5172 * memcpy() is used instead of simple
5173 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
5174 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
5175 * dereferencing type-punned pointer will break strict-aliasing rules
5176 *
5177 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
5178 * in the same simple assignment as in the code above.
5179 */
5180 memcpy(CMSG_DATA(&cmsg.cm), fds, sizeof(int) * 2);
5181
5182 res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &cmsg, sizeof(cmsg));
5183
5184 return (res == sizeof(m)) ? NXT_UNIT_OK : NXT_UNIT_ERROR;
5185}
5186
5187
5188nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port)
5189{
5190 nxt_unit_port_impl_t *port_impl;
5191
5192 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5193
5194 nxt_atomic_fetch_add(&port_impl->use_count, 1);
5195}
5196
5197
5198nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port)
5199{
5200 long c;
5201 nxt_unit_port_impl_t *port_impl;
5202
5203 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5204
5205 c = nxt_atomic_fetch_add(&port_impl->use_count, -1);
5206
5207 if (c == 1) {
5208 nxt_unit_debug(NULL, "destroy port{%d,%d} in_fd %d out_fd %d",
5209 (int) port->id.pid, (int) port->id.id,
5210 port->in_fd, port->out_fd);
5211
5212 nxt_unit_process_release(port_impl->process);
5213
5214 if (port->in_fd != -1) {
5215 nxt_unit_close(port->in_fd);
5216
5217 port->in_fd = -1;
5218 }
5219
5220 if (port->out_fd != -1) {
5221 nxt_unit_close(port->out_fd);
5222
5223 port->out_fd = -1;
5224 }
5225
5226 if (port_impl->queue != NULL) {
5227 munmap(port_impl->queue, (port->id.id == (nxt_port_id_t) -1)
5228 ? sizeof(nxt_app_queue_t)
5229 : sizeof(nxt_port_queue_t));
5230 }
5231
5232 nxt_unit_free(NULL, port_impl);
5233 }
5234}
5235
5236
5237static nxt_unit_port_t *
5238nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue)
5239{
5240 int rc;
5241 nxt_queue_t awaiting_req;
5242 nxt_unit_impl_t *lib;
5243 nxt_unit_port_t *old_port;
5244 nxt_unit_process_t *process;
5245 nxt_unit_ctx_impl_t *ctx_impl;
5246 nxt_unit_port_impl_t *new_port, *old_port_impl;
5247 nxt_unit_request_info_impl_t *req_impl;
5248
5249 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5250
5251 pthread_mutex_lock(&lib->mutex);
5252
5253 old_port = nxt_unit_port_hash_find(&lib->ports, &port->id, 0);
5254
5255 if (nxt_slow_path(old_port != NULL)) {
5256 nxt_unit_debug(ctx, "add_port: duplicate port{%d,%d} "
5257 "in_fd %d out_fd %d queue %p",
5258 port->id.pid, port->id.id,
5259 port->in_fd, port->out_fd, queue);
5260
5261 if (old_port->data == NULL) {
5262 old_port->data = port->data;
5263 port->data = NULL;
5264 }
5265
5266 if (old_port->in_fd == -1) {
5267 old_port->in_fd = port->in_fd;
5268 port->in_fd = -1;
5269 }
5270
5271 if (port->in_fd != -1) {
5272 nxt_unit_close(port->in_fd);
5273 port->in_fd = -1;
5274 }
5275
5276 if (old_port->out_fd == -1) {
5277 old_port->out_fd = port->out_fd;
5278 port->out_fd = -1;
5279 }
5280
5281 if (port->out_fd != -1) {
5282 nxt_unit_close(port->out_fd);
5283 port->out_fd = -1;
5284 }
5285
5286 *port = *old_port;
5287
5288 nxt_queue_init(&awaiting_req);
5289
5290 old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port);
5291
5292 if (old_port_impl->queue == NULL) {
5293 old_port_impl->queue = queue;
5294 }
5295
5296 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) {
5297 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req);
5298 nxt_queue_init(&old_port_impl->awaiting_req);
5299 }
5300
5301 old_port_impl->ready = (port->in_fd != -1 || port->out_fd != -1);
5302
5303 pthread_mutex_unlock(&lib->mutex);
5304
5305 if (lib->callbacks.add_port != NULL
5306 && (port->in_fd != -1 || port->out_fd != -1))
5307 {
5308 lib->callbacks.add_port(ctx, old_port);
5309 }
5310
5311 nxt_queue_each(req_impl, &awaiting_req,
5312 nxt_unit_request_info_impl_t, port_wait_link)
5313 {
5314 nxt_queue_remove(&req_impl->port_wait_link);
5315
5316 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t,
5317 ctx);
5318
5319 pthread_mutex_lock(&ctx_impl->mutex);
5320
5321 nxt_queue_insert_tail(&ctx_impl->ready_req,
5322 &req_impl->port_wait_link);
5323
5324 pthread_mutex_unlock(&ctx_impl->mutex);
5325
5326 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1);
5327
5328 } nxt_queue_loop;
5329
5330 return old_port;
5331 }
5332
5333 new_port = NULL;
5334
5335 nxt_unit_debug(ctx, "add_port: port{%d,%d} in_fd %d out_fd %d queue %p",
5336 port->id.pid, port->id.id,
5337 port->in_fd, port->out_fd, queue);
5338
5339 process = nxt_unit_process_get(ctx, port->id.pid);
5340 if (nxt_slow_path(process == NULL)) {
5341 goto unlock;
5342 }
5343
5344 if (port->id.id >= process->next_port_id) {
5345 process->next_port_id = port->id.id + 1;
5346 }
5347
5348 new_port = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t));
5349 if (nxt_slow_path(new_port == NULL)) {
5350 nxt_unit_alert(ctx, "add_port: %d,%d malloc() failed",
5351 port->id.pid, port->id.id);
5352
5353 goto unlock;
5354 }
5355
5356 new_port->port = *port;
5357
5358 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port);
5359 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
5360 nxt_unit_alert(ctx, "add_port: %d,%d hash_add failed",
5361 port->id.pid, port->id.id);
5362
5363 nxt_unit_free(ctx, new_port);
5364
5365 new_port = NULL;
5366
5367 goto unlock;
5368 }
5369
5370 nxt_queue_insert_tail(&process->ports, &new_port->link);
5371
5372 new_port->use_count = 2;
5373 new_port->process = process;
5374 new_port->ready = (port->in_fd != -1 || port->out_fd != -1);
5375 new_port->queue = queue;
5376 new_port->from_socket = 0;
5377 new_port->socket_rbuf = NULL;
5378
5379 nxt_queue_init(&new_port->awaiting_req);
5380
5381 process = NULL;
5382
5383unlock:
5384
5385 pthread_mutex_unlock(&lib->mutex);
5386
5387 if (nxt_slow_path(process != NULL)) {
5388 nxt_unit_process_release(process);
5389 }
5390
5391 if (lib->callbacks.add_port != NULL
5392 && new_port != NULL
5393 && (port->in_fd != -1 || port->out_fd != -1))
5394 {
5395 lib->callbacks.add_port(ctx, &new_port->port);
5396 }
5397
5398 return &new_port->port;
5399}
5400
5401
5402static void
5403nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id)
5404{
5405 nxt_unit_port_t *port;
5406 nxt_unit_port_impl_t *port_impl;
5407
5408 pthread_mutex_lock(&lib->mutex);
5409
5410 port = nxt_unit_remove_port_unsafe(lib, port_id);
5411
5412 if (nxt_fast_path(port != NULL)) {
5413 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5414
5415 nxt_queue_remove(&port_impl->link);
5416 }
5417
5418 pthread_mutex_unlock(&lib->mutex);
5419
5420 if (lib->callbacks.remove_port != NULL && port != NULL) {
5421 lib->callbacks.remove_port(&lib->unit, port);
5422 }
5423
5424 if (nxt_fast_path(port != NULL)) {
5425 nxt_unit_port_release(port);
5426 }
5427}
5428
5429
5430static nxt_unit_port_t *
5431nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id)
5432{
5433 nxt_unit_port_t *port;
5434
5435 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1);
5436 if (nxt_slow_path(port == NULL)) {
5437 nxt_unit_debug(NULL, "remove_port: port{%d,%d} not found",
5438 (int) port_id->pid, (int) port_id->id);
5439
5440 return NULL;
5441 }
5442
5443 nxt_unit_debug(NULL, "remove_port: port{%d,%d}, fds %d,%d, data %p",
5444 (int) port_id->pid, (int) port_id->id,
5445 port->in_fd, port->out_fd, port->data);
5446
5447 return port;
5448}
5449
5450
5451static void
5452nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid)
5453{
5454 nxt_unit_process_t *process;
5455
5456 pthread_mutex_lock(&lib->mutex);
5457
5458 process = nxt_unit_process_find(lib, pid, 1);
5459 if (nxt_slow_path(process == NULL)) {
5460 nxt_unit_debug(NULL, "remove_pid: process %d not found", (int) pid);
5461
5462 pthread_mutex_unlock(&lib->mutex);
5463
5464 return;
5465 }
5466
5467 nxt_unit_remove_process(lib, process);
5468
5469 if (lib->callbacks.remove_pid != NULL) {
5470 lib->callbacks.remove_pid(&lib->unit, pid);
5471 }
5472}
5473
5474
5475static void
5476nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process)
5477{
5478 nxt_queue_t ports;
5479 nxt_unit_port_impl_t *port;
5480
5481 nxt_queue_init(&ports);
5482
5483 nxt_queue_add(&ports, &process->ports);
5484
5485 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) {
5486
5487 nxt_unit_remove_port_unsafe(lib, &port->port.id);
5488
5489 } nxt_queue_loop;
5490
5491 pthread_mutex_unlock(&lib->mutex);
5492
5493 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) {
5494
5495 nxt_queue_remove(&port->link);
5496
5497 if (lib->callbacks.remove_port != NULL) {
5498 lib->callbacks.remove_port(&lib->unit, &port->port);
5499 }
5500
5501 nxt_unit_port_release(&port->port);
5502
5503 } nxt_queue_loop;
5504
5505 nxt_unit_process_release(process);
5506}
5507
5508
5509static void
5510nxt_unit_quit(nxt_unit_ctx_t *ctx)
5511{
5512 nxt_unit_impl_t *lib;
5513
5514 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5515
5516 if (lib->online) {
5517 lib->online = 0;
5518
5519 if (lib->callbacks.quit != NULL) {
5520 lib->callbacks.quit(ctx);
5521 }
5522 }
5523}
5524
5525
5526static int
5527nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id)
5528{
5529 ssize_t res;
5530 nxt_unit_impl_t *lib;
5531 nxt_unit_ctx_impl_t *ctx_impl;
5532
5533 struct {
5534 nxt_port_msg_t msg;
5535 nxt_port_msg_get_port_t get_port;
5536 } m;
5537
5538 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5539 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
5540
5541 memset(&m.msg, 0, sizeof(nxt_port_msg_t));
5542
5543 m.msg.pid = lib->pid;
5544 m.msg.reply_port = ctx_impl->read_port->id.id;
5545 m.msg.type = _NXT_PORT_MSG_GET_PORT;
5546
5547 m.get_port.id = port_id->id;
5548 m.get_port.pid = port_id->pid;
5549
5550 nxt_unit_debug(ctx, "get_port: %d %d", (int) port_id->pid,
5551 (int) port_id->id);
5552
5553 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0);
5554 if (nxt_slow_path(res != sizeof(m))) {
5555 return NXT_UNIT_ERROR;
5556 }
5557
5558 return NXT_UNIT_OK;
5559}
5560
5561
5562static ssize_t
5563nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5564 const void *buf, size_t buf_size, const void *oob, size_t oob_size)
5565{
5566 int notify;
5567 ssize_t ret;
5568 nxt_int_t rc;
5569 nxt_port_msg_t msg;
5570 nxt_unit_impl_t *lib;
5571 nxt_unit_port_impl_t *port_impl;
5572
5573 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5574
5575 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5576 if (port_impl->queue != NULL && oob_size == 0
5577 && buf_size <= NXT_PORT_QUEUE_MSG_SIZE)
5578 {
5579 rc = nxt_port_queue_send(port_impl->queue, buf, buf_size, &notify);
5580 if (nxt_slow_path(rc != NXT_OK)) {
5581 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow",
5582 (int) port->id.pid, (int) port->id.id);
5583
5584 return -1;
5585 }
5586
5587 nxt_unit_debug(ctx, "port{%d,%d} enqueue %d notify %d",
5588 (int) port->id.pid, (int) port->id.id,
5589 (int) buf_size, notify);
5590
5591 if (notify) {
5592 memcpy(&msg, buf, sizeof(nxt_port_msg_t));
5593
5594 msg.type = _NXT_PORT_MSG_READ_QUEUE;
5595
5596 if (lib->callbacks.port_send == NULL) {
5597 ret = nxt_unit_sendmsg(ctx, port->out_fd, &msg,
5598 sizeof(nxt_port_msg_t), NULL, 0);
5599
5600 nxt_unit_debug(ctx, "port{%d,%d} send %d read_queue",
5601 (int) port->id.pid, (int) port->id.id,
5602 (int) ret);
5603
5604 } else {
5605 ret = lib->callbacks.port_send(ctx, port, &msg,
5606 sizeof(nxt_port_msg_t), NULL, 0);
5607
5608 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d read_queue",
5609 (int) port->id.pid, (int) port->id.id,
5610 (int) ret);
5611 }
5612
5613 }
5614
5615 return buf_size;
5616 }
5617
5618 if (port_impl->queue != NULL) {
5619 msg.type = _NXT_PORT_MSG_READ_SOCKET;
5620
5621 rc = nxt_port_queue_send(port_impl->queue, &msg.type, 1, &notify);
5622 if (nxt_slow_path(rc != NXT_OK)) {
5623 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow",
5624 (int) port->id.pid, (int) port->id.id);
5625
5626 return -1;
5627 }
5628
5629 nxt_unit_debug(ctx, "port{%d,%d} enqueue 1 read_socket notify %d",
5630 (int) port->id.pid, (int) port->id.id, notify);
5631 }
5632
5633 if (lib->callbacks.port_send != NULL) {
5634 ret = lib->callbacks.port_send(ctx, port, buf, buf_size,
5635 oob, oob_size);
5636
5637 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d",
5638 (int) port->id.pid, (int) port->id.id,
5639 (int) ret);
5640
5641 } else {
5642 ret = nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size,
5643 oob, oob_size);
5644
5645 nxt_unit_debug(ctx, "port{%d,%d} sendmsg %d",
5646 (int) port->id.pid, (int) port->id.id,
5647 (int) ret);
5648 }
5649
5650 return ret;
5651}
5652
5653
5654static ssize_t
5655nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd,
5656 const void *buf, size_t buf_size, const void *oob, size_t oob_size)
5657{
5658 int err;
5659 ssize_t res;
5660 struct iovec iov[1];
5661 struct msghdr msg;
5662
5663 iov[0].iov_base = (void *) buf;
5664 iov[0].iov_len = buf_size;
5665
5666 msg.msg_name = NULL;
5667 msg.msg_namelen = 0;
5668 msg.msg_iov = iov;
5669 msg.msg_iovlen = 1;
5670 msg.msg_flags = 0;
5671 msg.msg_control = (void *) oob;
5672 msg.msg_controllen = oob_size;
5673
5674retry:
5675
5676 res = sendmsg(fd, &msg, 0);
5677
5678 if (nxt_slow_path(res == -1)) {
5679 err = errno;
5680
5681 if (err == EINTR) {
5682 goto retry;
5683 }
5684
5685 /*
5686 * FIXME: This should be "alert" after router graceful shutdown
5687 * implementation.
5688 */
5689 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)",
5690 fd, (int) buf_size, strerror(err), err);
5691
5692 } else {
5693 nxt_unit_debug(ctx, "sendmsg(%d, %d): %d", fd, (int) buf_size,
5694 (int) res);
5695 }
5696
5697 return res;
5698}
5699
5700
5701static int
5702nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5703 nxt_unit_read_buf_t *rbuf)
5704{
5705 int res, read;
5706 nxt_unit_port_impl_t *port_impl;
5707
5708 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5709
5710 read = 0;
5711
5712retry:
5713
5714 if (port_impl->from_socket > 0) {
5715 if (port_impl->socket_rbuf != NULL
5716 && port_impl->socket_rbuf->size > 0)
5717 {
5718 port_impl->from_socket--;
5719
5720 nxt_unit_rbuf_cpy(rbuf, port_impl->socket_rbuf);
5721 port_impl->socket_rbuf->size = 0;
5722
5723 nxt_unit_debug(ctx, "port{%d,%d} use suspended message %d",
5724 (int) port->id.pid, (int) port->id.id,
5725 (int) rbuf->size);
5726
5727 return NXT_UNIT_OK;
5728 }
5729
5730 } else {
5731 res = nxt_unit_port_queue_recv(port, rbuf);
5732
5733 if (res == NXT_UNIT_OK) {
5734 if (nxt_unit_is_read_socket(rbuf)) {
5735 port_impl->from_socket++;
5736
5737 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d",
5738 (int) port->id.pid, (int) port->id.id,
5739 port_impl->from_socket);
5740
5741 goto retry;
5742 }
5743
5744 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d",
5745 (int) port->id.pid, (int) port->id.id,
5746 (int) rbuf->size);
5747
5748 return NXT_UNIT_OK;
5749 }
5750 }
5751
5752 if (read) {
5753 return NXT_UNIT_AGAIN;
5754 }
5755
5756 res = nxt_unit_port_recv(ctx, port, rbuf);
5757 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
5758 return NXT_UNIT_ERROR;
5759 }
5760
5761 read = 1;
5762
5763 if (nxt_unit_is_read_queue(rbuf)) {
5764 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue",
5765 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5766
1
2/*
3 * Copyright (C) NGINX, Inc.
4 */
5
6#include <stdlib.h>
7
8#include "nxt_main.h"
9#include "nxt_port_memory_int.h"
10#include "nxt_port_queue.h"
11#include "nxt_app_queue.h"
12
13#include "nxt_unit.h"
14#include "nxt_unit_request.h"
15#include "nxt_unit_response.h"
16#include "nxt_unit_websocket.h"
17
18#include "nxt_websocket.h"
19
20#if (NXT_HAVE_MEMFD_CREATE)
21#include <linux/memfd.h>
22#endif
23
24#define NXT_UNIT_MAX_PLAIN_SIZE 1024
25#define NXT_UNIT_LOCAL_BUF_SIZE \
26 (NXT_UNIT_MAX_PLAIN_SIZE + sizeof(nxt_port_msg_t))
27
28typedef struct nxt_unit_impl_s nxt_unit_impl_t;
29typedef struct nxt_unit_mmap_s nxt_unit_mmap_t;
30typedef struct nxt_unit_mmaps_s nxt_unit_mmaps_t;
31typedef struct nxt_unit_process_s nxt_unit_process_t;
32typedef struct nxt_unit_mmap_buf_s nxt_unit_mmap_buf_t;
33typedef struct nxt_unit_recv_msg_s nxt_unit_recv_msg_t;
34typedef struct nxt_unit_read_buf_s nxt_unit_read_buf_t;
35typedef struct nxt_unit_ctx_impl_s nxt_unit_ctx_impl_t;
36typedef struct nxt_unit_port_impl_s nxt_unit_port_impl_t;
37typedef struct nxt_unit_request_info_impl_s nxt_unit_request_info_impl_t;
38typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t;
39
40static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init);
41static int nxt_unit_ctx_init(nxt_unit_impl_t *lib,
42 nxt_unit_ctx_impl_t *ctx_impl, void *data);
43nxt_inline void nxt_unit_ctx_use(nxt_unit_ctx_t *ctx);
44nxt_inline void nxt_unit_ctx_release(nxt_unit_ctx_t *ctx);
45nxt_inline void nxt_unit_lib_use(nxt_unit_impl_t *lib);
46nxt_inline void nxt_unit_lib_release(nxt_unit_impl_t *lib);
47nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head,
48 nxt_unit_mmap_buf_t *mmap_buf);
49nxt_inline void nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev,
50 nxt_unit_mmap_buf_t *mmap_buf);
51nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf);
52static int nxt_unit_read_env(nxt_unit_port_t *ready_port,
53 nxt_unit_port_t *router_port, nxt_unit_port_t *read_port,
54 int *log_fd, uint32_t *stream, uint32_t *shm_limit);
55static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream,
56 int queue_fd);
57static int nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf);
58static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx,
59 nxt_unit_recv_msg_t *recv_msg);
60static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx,
61 nxt_unit_recv_msg_t *recv_msg);
62static int nxt_unit_process_req_body(nxt_unit_ctx_t *ctx,
63 nxt_unit_recv_msg_t *recv_msg);
64static int nxt_unit_request_check_response_port(nxt_unit_request_info_t *req,
65 nxt_unit_port_id_t *port_id);
66static int nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req);
67static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx,
68 nxt_unit_recv_msg_t *recv_msg);
69static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx);
70static nxt_unit_request_info_impl_t *nxt_unit_request_info_get(
71 nxt_unit_ctx_t *ctx);
72static void nxt_unit_request_info_release(nxt_unit_request_info_t *req);
73static void nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req);
74static nxt_unit_websocket_frame_impl_t *nxt_unit_websocket_frame_get(
75 nxt_unit_ctx_t *ctx);
76static void nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws);
77static void nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx,
78 nxt_unit_websocket_frame_impl_t *ws);
79static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx);
80static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf);
81static int nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req,
82 nxt_unit_mmap_buf_t *mmap_buf, int last);
83static void nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf);
84static void nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf);
85static nxt_unit_read_buf_t *nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx);
86static nxt_unit_read_buf_t *nxt_unit_read_buf_get_impl(
87 nxt_unit_ctx_impl_t *ctx_impl);
88static void nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx,
89 nxt_unit_read_buf_t *rbuf);
90static nxt_unit_mmap_buf_t *nxt_unit_request_preread(
91 nxt_unit_request_info_t *req, size_t size);
92static ssize_t nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst,
93 size_t size);
94static nxt_port_mmap_header_t *nxt_unit_mmap_get(nxt_unit_ctx_t *ctx,
95 nxt_unit_port_t *port, nxt_chunk_id_t *c, int *n, int min_n);
96static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port);
97static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx);
98static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i);
99static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx,
100 nxt_unit_port_t *port, int n);
101static int nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size);
102static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
103 int fd);
104static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx,
105 nxt_unit_port_t *port, uint32_t size,
106 uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf);
107static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd);
108
109static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps);
110nxt_inline void nxt_unit_process_use(nxt_unit_process_t *process);
111nxt_inline void nxt_unit_process_release(nxt_unit_process_t *process);
112static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps);
113static int nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx,
114 nxt_unit_mmaps_t *mmaps, pid_t pid, uint32_t id,
115 nxt_port_mmap_header_t **hdr, nxt_unit_read_buf_t *rbuf);
116static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx,
117 nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf);
118static int nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id);
119static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx,
120 nxt_port_mmap_header_t *hdr, void *start, uint32_t size);
121static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid);
122
123static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid);
124static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_impl_t *lib,
125 pid_t pid, int remove);
126static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib);
127static int nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx);
128static int nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf);
129static int nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx);
130static void nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx);
131nxt_inline int nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf);
132nxt_inline int nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf);
133nxt_inline int nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf);
134nxt_inline int nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf);
135static int nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx,
136 nxt_unit_port_t *port);
137static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl);
138static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx);
139
140static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst,
141 nxt_unit_port_t *port, int queue_fd);
142
143nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port);
144nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port);
145static nxt_unit_port_t *nxt_unit_add_port(nxt_unit_ctx_t *ctx,
146 nxt_unit_port_t *port, void *queue);
147static void nxt_unit_remove_port(nxt_unit_impl_t *lib,
148 nxt_unit_port_id_t *port_id);
149static nxt_unit_port_t *nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib,
150 nxt_unit_port_id_t *port_id);
151static void nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid);
152static void nxt_unit_remove_process(nxt_unit_impl_t *lib,
153 nxt_unit_process_t *process);
154static void nxt_unit_quit(nxt_unit_ctx_t *ctx);
155static int nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id);
156static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx,
157 nxt_unit_port_t *port, const void *buf, size_t buf_size,
158 const void *oob, size_t oob_size);
159static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd,
160 const void *buf, size_t buf_size, const void *oob, size_t oob_size);
161static int nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
162 nxt_unit_read_buf_t *rbuf);
163nxt_inline void nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst,
164 nxt_unit_read_buf_t *src);
165static int nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
166 nxt_unit_read_buf_t *rbuf);
167static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
168 nxt_unit_read_buf_t *rbuf);
169static int nxt_unit_port_queue_recv(nxt_unit_port_t *port,
170 nxt_unit_read_buf_t *rbuf);
171static int nxt_unit_app_queue_recv(nxt_unit_port_t *port,
172 nxt_unit_read_buf_t *rbuf);
173nxt_inline int nxt_unit_close(int fd);
174static int nxt_unit_fd_blocking(int fd);
175
176static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash,
177 nxt_unit_port_t *port);
178static nxt_unit_port_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash,
179 nxt_unit_port_id_t *port_id, int remove);
180
181static int nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx,
182 nxt_unit_request_info_t *req);
183static nxt_unit_request_info_t *nxt_unit_request_hash_find(
184 nxt_unit_ctx_t *ctx, uint32_t stream, int remove);
185
186static char * nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level);
187static void *nxt_unit_lvlhsh_alloc(void *data, size_t size);
188static void nxt_unit_lvlhsh_free(void *data, void *p);
189static int nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length);
190
191
192struct nxt_unit_mmap_buf_s {
193 nxt_unit_buf_t buf;
194
195 nxt_unit_mmap_buf_t *next;
196 nxt_unit_mmap_buf_t **prev;
197
198 nxt_port_mmap_header_t *hdr;
199 nxt_unit_request_info_t *req;
200 nxt_unit_ctx_impl_t *ctx_impl;
201 char *free_ptr;
202 char *plain_ptr;
203};
204
205
206struct nxt_unit_recv_msg_s {
207 uint32_t stream;
208 nxt_pid_t pid;
209 nxt_port_id_t reply_port;
210
211 uint8_t last; /* 1 bit */
212 uint8_t mmap; /* 1 bit */
213
214 void *start;
215 uint32_t size;
216
217 int fd[2];
218
219 nxt_unit_mmap_buf_t *incoming_buf;
220};
221
222
223typedef enum {
224 NXT_UNIT_RS_START = 0,
225 NXT_UNIT_RS_RESPONSE_INIT,
226 NXT_UNIT_RS_RESPONSE_HAS_CONTENT,
227 NXT_UNIT_RS_RESPONSE_SENT,
228 NXT_UNIT_RS_RELEASED,
229} nxt_unit_req_state_t;
230
231
232struct nxt_unit_request_info_impl_s {
233 nxt_unit_request_info_t req;
234
235 uint32_t stream;
236
237 nxt_unit_mmap_buf_t *outgoing_buf;
238 nxt_unit_mmap_buf_t *incoming_buf;
239
240 nxt_unit_req_state_t state;
241 uint8_t websocket;
242 uint8_t in_hash;
243
244 /* for nxt_unit_ctx_impl_t.free_req or active_req */
245 nxt_queue_link_t link;
246 /* for nxt_unit_port_impl_t.awaiting_req */
247 nxt_queue_link_t port_wait_link;
248
249 char extra_data[];
250};
251
252
253struct nxt_unit_websocket_frame_impl_s {
254 nxt_unit_websocket_frame_t ws;
255
256 nxt_unit_mmap_buf_t *buf;
257
258 nxt_queue_link_t link;
259
260 nxt_unit_ctx_impl_t *ctx_impl;
261};
262
263
264struct nxt_unit_read_buf_s {
265 nxt_queue_link_t link;
266 nxt_unit_ctx_impl_t *ctx_impl;
267 ssize_t size;
268 char buf[16384];
269 char oob[256];
270};
271
272
273struct nxt_unit_ctx_impl_s {
274 nxt_unit_ctx_t ctx;
275
276 nxt_atomic_t use_count;
277 nxt_atomic_t wait_items;
278
279 pthread_mutex_t mutex;
280
281 nxt_unit_port_t *read_port;
282
283 nxt_queue_link_t link;
284
285 nxt_unit_mmap_buf_t *free_buf;
286
287 /* of nxt_unit_request_info_impl_t */
288 nxt_queue_t free_req;
289
290 /* of nxt_unit_websocket_frame_impl_t */
291 nxt_queue_t free_ws;
292
293 /* of nxt_unit_request_info_impl_t */
294 nxt_queue_t active_req;
295
296 /* of nxt_unit_request_info_impl_t */
297 nxt_lvlhsh_t requests;
298
299 /* of nxt_unit_request_info_impl_t */
300 nxt_queue_t ready_req;
301
302 /* of nxt_unit_read_buf_t */
303 nxt_queue_t pending_rbuf;
304
305 /* of nxt_unit_read_buf_t */
306 nxt_queue_t free_rbuf;
307
308 nxt_unit_mmap_buf_t ctx_buf[2];
309 nxt_unit_read_buf_t ctx_read_buf;
310
311 nxt_unit_request_info_impl_t req;
312};
313
314
315struct nxt_unit_mmap_s {
316 nxt_port_mmap_header_t *hdr;
317
318 /* of nxt_unit_read_buf_t */
319 nxt_queue_t awaiting_rbuf;
320};
321
322
323struct nxt_unit_mmaps_s {
324 pthread_mutex_t mutex;
325 uint32_t size;
326 uint32_t cap;
327 nxt_atomic_t allocated_chunks;
328 nxt_unit_mmap_t *elts;
329};
330
331
332struct nxt_unit_impl_s {
333 nxt_unit_t unit;
334 nxt_unit_callbacks_t callbacks;
335
336 nxt_atomic_t use_count;
337
338 uint32_t request_data_size;
339 uint32_t shm_mmap_limit;
340
341 pthread_mutex_t mutex;
342
343 nxt_lvlhsh_t processes; /* of nxt_unit_process_t */
344 nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */
345
346 nxt_unit_port_t *router_port;
347 nxt_unit_port_t *shared_port;
348
349 nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */
350
351 nxt_unit_mmaps_t incoming;
352 nxt_unit_mmaps_t outgoing;
353
354 pid_t pid;
355 int log_fd;
356 int online;
357
358 nxt_unit_ctx_impl_t main_ctx;
359};
360
361
362struct nxt_unit_port_impl_s {
363 nxt_unit_port_t port;
364
365 nxt_atomic_t use_count;
366
367 /* for nxt_unit_process_t.ports */
368 nxt_queue_link_t link;
369 nxt_unit_process_t *process;
370
371 /* of nxt_unit_request_info_impl_t */
372 nxt_queue_t awaiting_req;
373
374 int ready;
375
376 void *queue;
377
378 int from_socket;
379 nxt_unit_read_buf_t *socket_rbuf;
380};
381
382
383struct nxt_unit_process_s {
384 pid_t pid;
385
386 nxt_queue_t ports; /* of nxt_unit_port_impl_t */
387
388 nxt_unit_impl_t *lib;
389
390 nxt_atomic_t use_count;
391
392 uint32_t next_port_id;
393};
394
395
396/* Explicitly using 32 bit types to avoid possible alignment. */
397typedef struct {
398 int32_t pid;
399 uint32_t id;
400} nxt_unit_port_hash_id_t;
401
402
403nxt_unit_ctx_t *
404nxt_unit_init(nxt_unit_init_t *init)
405{
406 int rc, queue_fd;
407 void *mem;
408 uint32_t ready_stream, shm_limit;
409 nxt_unit_ctx_t *ctx;
410 nxt_unit_impl_t *lib;
411 nxt_unit_port_t ready_port, router_port, read_port;
412
413 lib = nxt_unit_create(init);
414 if (nxt_slow_path(lib == NULL)) {
415 return NULL;
416 }
417
418 queue_fd = -1;
419 mem = MAP_FAILED;
420
421 if (init->ready_port.id.pid != 0
422 && init->ready_stream != 0
423 && init->read_port.id.pid != 0)
424 {
425 ready_port = init->ready_port;
426 ready_stream = init->ready_stream;
427 router_port = init->router_port;
428 read_port = init->read_port;
429 lib->log_fd = init->log_fd;
430
431 nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid,
432 ready_port.id.id);
433 nxt_unit_port_id_init(&router_port.id, router_port.id.pid,
434 router_port.id.id);
435 nxt_unit_port_id_init(&read_port.id, read_port.id.pid,
436 read_port.id.id);
437
438 } else {
439 rc = nxt_unit_read_env(&ready_port, &router_port, &read_port,
440 &lib->log_fd, &ready_stream, &shm_limit);
441 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
442 goto fail;
443 }
444
445 lib->shm_mmap_limit = (shm_limit + PORT_MMAP_DATA_SIZE - 1)
446 / PORT_MMAP_DATA_SIZE;
447 }
448
449 if (nxt_slow_path(lib->shm_mmap_limit < 1)) {
450 lib->shm_mmap_limit = 1;
451 }
452
453 lib->pid = read_port.id.pid;
454
455 ctx = &lib->main_ctx.ctx;
456
457 rc = nxt_unit_fd_blocking(router_port.out_fd);
458 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
459 goto fail;
460 }
461
462 lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL);
463 if (nxt_slow_path(lib->router_port == NULL)) {
464 nxt_unit_alert(NULL, "failed to add router_port");
465
466 goto fail;
467 }
468
469 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t));
470 if (nxt_slow_path(queue_fd == -1)) {
471 goto fail;
472 }
473
474 mem = mmap(NULL, sizeof(nxt_port_queue_t),
475 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0);
476 if (nxt_slow_path(mem == MAP_FAILED)) {
477 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd,
478 strerror(errno), errno);
479
480 goto fail;
481 }
482
483 nxt_port_queue_init(mem);
484
485 rc = nxt_unit_fd_blocking(read_port.in_fd);
486 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
487 goto fail;
488 }
489
490 lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem);
491 if (nxt_slow_path(lib->main_ctx.read_port == NULL)) {
492 nxt_unit_alert(NULL, "failed to add read_port");
493
494 goto fail;
495 }
496
497 rc = nxt_unit_fd_blocking(ready_port.out_fd);
498 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
499 goto fail;
500 }
501
502 rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream, queue_fd);
503 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
504 nxt_unit_alert(NULL, "failed to send READY message");
505
506 goto fail;
507 }
508
509 nxt_unit_close(ready_port.out_fd);
510 nxt_unit_close(queue_fd);
511
512 return ctx;
513
514fail:
515
516 if (mem != MAP_FAILED) {
517 munmap(mem, sizeof(nxt_port_queue_t));
518 }
519
520 if (queue_fd != -1) {
521 nxt_unit_close(queue_fd);
522 }
523
524 nxt_unit_ctx_release(&lib->main_ctx.ctx);
525
526 return NULL;
527}
528
529
530static nxt_unit_impl_t *
531nxt_unit_create(nxt_unit_init_t *init)
532{
533 int rc;
534 nxt_unit_impl_t *lib;
535 nxt_unit_callbacks_t *cb;
536
537 lib = nxt_unit_malloc(NULL,
538 sizeof(nxt_unit_impl_t) + init->request_data_size);
539 if (nxt_slow_path(lib == NULL)) {
540 nxt_unit_alert(NULL, "failed to allocate unit struct");
541
542 return NULL;
543 }
544
545 rc = pthread_mutex_init(&lib->mutex, NULL);
546 if (nxt_slow_path(rc != 0)) {
547 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc);
548
549 goto fail;
550 }
551
552 lib->unit.data = init->data;
553 lib->callbacks = init->callbacks;
554
555 lib->request_data_size = init->request_data_size;
556 lib->shm_mmap_limit = (init->shm_limit + PORT_MMAP_DATA_SIZE - 1)
557 / PORT_MMAP_DATA_SIZE;
558
559 lib->processes.slot = NULL;
560 lib->ports.slot = NULL;
561
562 lib->log_fd = STDERR_FILENO;
563 lib->online = 1;
564
565 nxt_queue_init(&lib->contexts);
566
567 lib->use_count = 0;
568 lib->router_port = NULL;
569 lib->shared_port = NULL;
570
571 rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data);
572 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
573 pthread_mutex_destroy(&lib->mutex);
574 goto fail;
575 }
576
577 cb = &lib->callbacks;
578
579 if (cb->request_handler == NULL) {
580 nxt_unit_alert(NULL, "request_handler is NULL");
581
582 pthread_mutex_destroy(&lib->mutex);
583 goto fail;
584 }
585
586 nxt_unit_mmaps_init(&lib->incoming);
587 nxt_unit_mmaps_init(&lib->outgoing);
588
589 return lib;
590
591fail:
592
593 nxt_unit_free(NULL, lib);
594
595 return NULL;
596}
597
598
599static int
600nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl,
601 void *data)
602{
603 int rc;
604
605 ctx_impl->ctx.data = data;
606 ctx_impl->ctx.unit = &lib->unit;
607
608 rc = pthread_mutex_init(&ctx_impl->mutex, NULL);
609 if (nxt_slow_path(rc != 0)) {
610 nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc);
611
612 return NXT_UNIT_ERROR;
613 }
614
615 nxt_unit_lib_use(lib);
616
617 nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link);
618
619 ctx_impl->use_count = 1;
620 ctx_impl->wait_items = 0;
621
622 nxt_queue_init(&ctx_impl->free_req);
623 nxt_queue_init(&ctx_impl->free_ws);
624 nxt_queue_init(&ctx_impl->active_req);
625 nxt_queue_init(&ctx_impl->ready_req);
626 nxt_queue_init(&ctx_impl->pending_rbuf);
627 nxt_queue_init(&ctx_impl->free_rbuf);
628
629 ctx_impl->free_buf = NULL;
630 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]);
631 nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]);
632
633 nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link);
634 nxt_queue_insert_tail(&ctx_impl->free_rbuf, &ctx_impl->ctx_read_buf.link);
635
636 ctx_impl->ctx_read_buf.ctx_impl = ctx_impl;
637
638 ctx_impl->req.req.ctx = &ctx_impl->ctx;
639 ctx_impl->req.req.unit = &lib->unit;
640
641 ctx_impl->read_port = NULL;
642 ctx_impl->requests.slot = 0;
643
644 return NXT_UNIT_OK;
645}
646
647
648nxt_inline void
649nxt_unit_ctx_use(nxt_unit_ctx_t *ctx)
650{
651 nxt_unit_ctx_impl_t *ctx_impl;
652
653 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
654
655 nxt_atomic_fetch_add(&ctx_impl->use_count, 1);
656}
657
658
659nxt_inline void
660nxt_unit_ctx_release(nxt_unit_ctx_t *ctx)
661{
662 long c;
663 nxt_unit_ctx_impl_t *ctx_impl;
664
665 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
666
667 c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1);
668
669 if (c == 1) {
670 nxt_unit_ctx_free(ctx_impl);
671 }
672}
673
674
675nxt_inline void
676nxt_unit_lib_use(nxt_unit_impl_t *lib)
677{
678 nxt_atomic_fetch_add(&lib->use_count, 1);
679}
680
681
682nxt_inline void
683nxt_unit_lib_release(nxt_unit_impl_t *lib)
684{
685 long c;
686 nxt_unit_process_t *process;
687
688 c = nxt_atomic_fetch_add(&lib->use_count, -1);
689
690 if (c == 1) {
691 for ( ;; ) {
692 pthread_mutex_lock(&lib->mutex);
693
694 process = nxt_unit_process_pop_first(lib);
695 if (process == NULL) {
696 pthread_mutex_unlock(&lib->mutex);
697
698 break;
699 }
700
701 nxt_unit_remove_process(lib, process);
702 }
703
704 pthread_mutex_destroy(&lib->mutex);
705
706 if (nxt_fast_path(lib->router_port != NULL)) {
707 nxt_unit_port_release(lib->router_port);
708 }
709
710 if (nxt_fast_path(lib->shared_port != NULL)) {
711 nxt_unit_port_release(lib->shared_port);
712 }
713
714 nxt_unit_mmaps_destroy(&lib->incoming);
715 nxt_unit_mmaps_destroy(&lib->outgoing);
716
717 nxt_unit_free(NULL, lib);
718 }
719}
720
721
722nxt_inline void
723nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head,
724 nxt_unit_mmap_buf_t *mmap_buf)
725{
726 mmap_buf->next = *head;
727
728 if (mmap_buf->next != NULL) {
729 mmap_buf->next->prev = &mmap_buf->next;
730 }
731
732 *head = mmap_buf;
733 mmap_buf->prev = head;
734}
735
736
737nxt_inline void
738nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev,
739 nxt_unit_mmap_buf_t *mmap_buf)
740{
741 while (*prev != NULL) {
742 prev = &(*prev)->next;
743 }
744
745 nxt_unit_mmap_buf_insert(prev, mmap_buf);
746}
747
748
749nxt_inline void
750nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf)
751{
752 nxt_unit_mmap_buf_t **prev;
753
754 prev = mmap_buf->prev;
755
756 if (mmap_buf->next != NULL) {
757 mmap_buf->next->prev = prev;
758 }
759
760 if (prev != NULL) {
761 *prev = mmap_buf->next;
762 }
763}
764
765
766static int
767nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port,
768 nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream,
769 uint32_t *shm_limit)
770{
771 int rc;
772 int ready_fd, router_fd, read_fd;
773 char *unit_init, *version_end;
774 long version_length;
775 int64_t ready_pid, router_pid, read_pid;
776 uint32_t ready_stream, router_id, ready_id, read_id;
777
778 unit_init = getenv(NXT_UNIT_INIT_ENV);
779 if (nxt_slow_path(unit_init == NULL)) {
780 nxt_unit_alert(NULL, "%s is not in the current environment",
781 NXT_UNIT_INIT_ENV);
782
783 return NXT_UNIT_ERROR;
784 }
785
786 nxt_unit_debug(NULL, "%s='%s'", NXT_UNIT_INIT_ENV, unit_init);
787
788 version_length = nxt_length(NXT_VERSION);
789
790 version_end = strchr(unit_init, ';');
791 if (version_end == NULL
792 || version_end - unit_init != version_length
793 || memcmp(unit_init, NXT_VERSION, version_length) != 0)
794 {
795 nxt_unit_alert(NULL, "version check error");
796
797 return NXT_UNIT_ERROR;
798 }
799
800 rc = sscanf(version_end + 1,
801 "%"PRIu32";"
802 "%"PRId64",%"PRIu32",%d;"
803 "%"PRId64",%"PRIu32",%d;"
804 "%"PRId64",%"PRIu32",%d;"
805 "%d,%"PRIu32,
806 &ready_stream,
807 &ready_pid, &ready_id, &ready_fd,
808 &router_pid, &router_id, &router_fd,
809 &read_pid, &read_id, &read_fd,
810 log_fd, shm_limit);
811
812 if (nxt_slow_path(rc != 12)) {
813 nxt_unit_alert(NULL, "failed to scan variables: %d", rc);
814
815 return NXT_UNIT_ERROR;
816 }
817
818 nxt_unit_port_id_init(&ready_port->id, (pid_t) ready_pid, ready_id);
819
820 ready_port->in_fd = -1;
821 ready_port->out_fd = ready_fd;
822 ready_port->data = NULL;
823
824 nxt_unit_port_id_init(&router_port->id, (pid_t) router_pid, router_id);
825
826 router_port->in_fd = -1;
827 router_port->out_fd = router_fd;
828 router_port->data = NULL;
829
830 nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id);
831
832 read_port->in_fd = read_fd;
833 read_port->out_fd = -1;
834 read_port->data = NULL;
835
836 *stream = ready_stream;
837
838 return NXT_UNIT_OK;
839}
840
841
842static int
843nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, int queue_fd)
844{
845 ssize_t res;
846 nxt_port_msg_t msg;
847 nxt_unit_impl_t *lib;
848
849 union {
850 struct cmsghdr cm;
851 char space[CMSG_SPACE(sizeof(int))];
852 } cmsg;
853
854 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
855
856 msg.stream = stream;
857 msg.pid = lib->pid;
858 msg.reply_port = 0;
859 msg.type = _NXT_PORT_MSG_PROCESS_READY;
860 msg.last = 1;
861 msg.mmap = 0;
862 msg.nf = 0;
863 msg.mf = 0;
864 msg.tracking = 0;
865
866 memset(&cmsg, 0, sizeof(cmsg));
867
868 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int));
869 cmsg.cm.cmsg_level = SOL_SOCKET;
870 cmsg.cm.cmsg_type = SCM_RIGHTS;
871
872 /*
873 * memcpy() is used instead of simple
874 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
875 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
876 * dereferencing type-punned pointer will break strict-aliasing rules
877 *
878 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
879 * in the same simple assignment as in the code above.
880 */
881 memcpy(CMSG_DATA(&cmsg.cm), &queue_fd, sizeof(int));
882
883 res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg),
884 &cmsg, sizeof(cmsg));
885 if (res != sizeof(msg)) {
886 return NXT_UNIT_ERROR;
887 }
888
889 return NXT_UNIT_OK;
890}
891
892
893static int
894nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf)
895{
896 int rc;
897 pid_t pid;
898 struct cmsghdr *cm;
899 nxt_port_msg_t *port_msg;
900 nxt_unit_impl_t *lib;
901 nxt_unit_recv_msg_t recv_msg;
902
903 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
904
905 recv_msg.fd[0] = -1;
906 recv_msg.fd[1] = -1;
907 port_msg = (nxt_port_msg_t *) rbuf->buf;
908 cm = (struct cmsghdr *) rbuf->oob;
909
910 if (cm->cmsg_level == SOL_SOCKET
911 && cm->cmsg_type == SCM_RIGHTS)
912 {
913 if (cm->cmsg_len == CMSG_LEN(sizeof(int))) {
914 memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int));
915 }
916
917 if (cm->cmsg_len == CMSG_LEN(sizeof(int) * 2)) {
918 memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int) * 2);
919 }
920 }
921
922 recv_msg.incoming_buf = NULL;
923
924 if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) {
925 if (nxt_slow_path(rbuf->size == 0)) {
926 nxt_unit_debug(ctx, "read port closed");
927
928 nxt_unit_quit(ctx);
929 rc = NXT_UNIT_OK;
930 goto done;
931 }
932
933 nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size);
934
935 rc = NXT_UNIT_ERROR;
936 goto done;
937 }
938
939 nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d",
940 port_msg->stream, (int) port_msg->type,
941 recv_msg.fd[0], recv_msg.fd[1]);
942
943 recv_msg.stream = port_msg->stream;
944 recv_msg.pid = port_msg->pid;
945 recv_msg.reply_port = port_msg->reply_port;
946 recv_msg.last = port_msg->last;
947 recv_msg.mmap = port_msg->mmap;
948
949 recv_msg.start = port_msg + 1;
950 recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t);
951
952 if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) {
953 nxt_unit_alert(ctx, "#%"PRIu32": unknown message type (%d)",
954 port_msg->stream, (int) port_msg->type);
955 rc = NXT_UNIT_ERROR;
956 goto done;
957 }
958
959 /* Fragmentation is unsupported. */
960 if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) {
961 nxt_unit_alert(ctx, "#%"PRIu32": fragmented message type (%d)",
962 port_msg->stream, (int) port_msg->type);
963 rc = NXT_UNIT_ERROR;
964 goto done;
965 }
966
967 if (port_msg->mmap) {
968 rc = nxt_unit_mmap_read(ctx, &recv_msg, rbuf);
969
970 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
971 if (rc == NXT_UNIT_AGAIN) {
972 recv_msg.fd[0] = -1;
973 recv_msg.fd[1] = -1;
974 }
975
976 goto done;
977 }
978 }
979
980 switch (port_msg->type) {
981
982 case _NXT_PORT_MSG_QUIT:
983 nxt_unit_debug(ctx, "#%"PRIu32": quit", port_msg->stream);
984
985 nxt_unit_quit(ctx);
986 rc = NXT_UNIT_OK;
987 break;
988
989 case _NXT_PORT_MSG_NEW_PORT:
990 rc = nxt_unit_process_new_port(ctx, &recv_msg);
991 break;
992
993 case _NXT_PORT_MSG_CHANGE_FILE:
994 nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d",
995 port_msg->stream, recv_msg.fd[0]);
996
997 if (dup2(recv_msg.fd[0], lib->log_fd) == -1) {
998 nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)",
999 port_msg->stream, recv_msg.fd[0], lib->log_fd,
1000 strerror(errno), errno);
1001
1002 rc = NXT_UNIT_ERROR;
1003 goto done;
1004 }
1005
1006 rc = NXT_UNIT_OK;
1007 break;
1008
1009 case _NXT_PORT_MSG_MMAP:
1010 if (nxt_slow_path(recv_msg.fd[0] < 0)) {
1011 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap",
1012 port_msg->stream, recv_msg.fd[0]);
1013
1014 rc = NXT_UNIT_ERROR;
1015 goto done;
1016 }
1017
1018 rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]);
1019 break;
1020
1021 case _NXT_PORT_MSG_REQ_HEADERS:
1022 rc = nxt_unit_process_req_headers(ctx, &recv_msg);
1023 break;
1024
1025 case _NXT_PORT_MSG_REQ_BODY:
1026 rc = nxt_unit_process_req_body(ctx, &recv_msg);
1027 break;
1028
1029 case _NXT_PORT_MSG_WEBSOCKET:
1030 rc = nxt_unit_process_websocket(ctx, &recv_msg);
1031 break;
1032
1033 case _NXT_PORT_MSG_REMOVE_PID:
1034 if (nxt_slow_path(recv_msg.size != sizeof(pid))) {
1035 nxt_unit_alert(ctx, "#%"PRIu32": remove_pid: invalid message size "
1036 "(%d != %d)", port_msg->stream, (int) recv_msg.size,
1037 (int) sizeof(pid));
1038
1039 rc = NXT_UNIT_ERROR;
1040 goto done;
1041 }
1042
1043 memcpy(&pid, recv_msg.start, sizeof(pid));
1044
1045 nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d",
1046 port_msg->stream, (int) pid);
1047
1048 nxt_unit_remove_pid(lib, pid);
1049
1050 rc = NXT_UNIT_OK;
1051 break;
1052
1053 case _NXT_PORT_MSG_SHM_ACK:
1054 rc = nxt_unit_process_shm_ack(ctx);
1055 break;
1056
1057 default:
1058 nxt_unit_debug(ctx, "#%"PRIu32": ignore message type: %d",
1059 port_msg->stream, (int) port_msg->type);
1060
1061 rc = NXT_UNIT_ERROR;
1062 goto done;
1063 }
1064
1065done:
1066
1067 if (recv_msg.fd[0] != -1) {
1068 nxt_unit_close(recv_msg.fd[0]);
1069 }
1070
1071 if (recv_msg.fd[1] != -1) {
1072 nxt_unit_close(recv_msg.fd[1]);
1073 }
1074
1075 while (recv_msg.incoming_buf != NULL) {
1076 nxt_unit_mmap_buf_free(recv_msg.incoming_buf);
1077 }
1078
1079 if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) {
1080#if (NXT_DEBUG)
1081 memset(rbuf->buf, 0xAC, rbuf->size);
1082#endif
1083 nxt_unit_read_buf_release(ctx, rbuf);
1084 }
1085
1086 return rc;
1087}
1088
1089
1090static int
1091nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1092{
1093 void *mem;
1094 nxt_unit_impl_t *lib;
1095 nxt_unit_port_t new_port, *port;
1096 nxt_port_msg_new_port_t *new_port_msg;
1097
1098 if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) {
1099 nxt_unit_warn(ctx, "#%"PRIu32": new_port: "
1100 "invalid message size (%d)",
1101 recv_msg->stream, (int) recv_msg->size);
1102
1103 return NXT_UNIT_ERROR;
1104 }
1105
1106 if (nxt_slow_path(recv_msg->fd[0] < 0)) {
1107 nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port",
1108 recv_msg->stream, recv_msg->fd[0]);
1109
1110 return NXT_UNIT_ERROR;
1111 }
1112
1113 new_port_msg = recv_msg->start;
1114
1115 nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd[0] %d fd[1] %d",
1116 recv_msg->stream, (int) new_port_msg->pid,
1117 (int) new_port_msg->id, recv_msg->fd[0], recv_msg->fd[1]);
1118
1119 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1120
1121 if (new_port_msg->id == (nxt_port_id_t) -1) {
1122 nxt_unit_port_id_init(&new_port.id, lib->pid, new_port_msg->id);
1123
1124 new_port.in_fd = recv_msg->fd[0];
1125 new_port.out_fd = -1;
1126
1127 mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE,
1128 MAP_SHARED, recv_msg->fd[1], 0);
1129
1130 } else {
1131 if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd[0])
1132 != NXT_UNIT_OK))
1133 {
1134 return NXT_UNIT_ERROR;
1135 }
1136
1137 nxt_unit_port_id_init(&new_port.id, new_port_msg->pid,
1138 new_port_msg->id);
1139
1140 new_port.in_fd = -1;
1141 new_port.out_fd = recv_msg->fd[0];
1142
1143 mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE,
1144 MAP_SHARED, recv_msg->fd[1], 0);
1145 }
1146
1147 if (nxt_slow_path(mem == MAP_FAILED)) {
1148 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd[1],
1149 strerror(errno), errno);
1150
1151 return NXT_UNIT_ERROR;
1152 }
1153
1154 new_port.data = NULL;
1155
1156 recv_msg->fd[0] = -1;
1157
1158 port = nxt_unit_add_port(ctx, &new_port, mem);
1159 if (nxt_slow_path(port == NULL)) {
1160 return NXT_UNIT_ERROR;
1161 }
1162
1163 if (new_port_msg->id == (nxt_port_id_t) -1) {
1164 lib->shared_port = port;
1165
1166 } else {
1167 nxt_unit_port_release(port);
1168 }
1169
1170 return NXT_UNIT_OK;
1171}
1172
1173
1174static int
1175nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1176{
1177 int res;
1178 nxt_unit_impl_t *lib;
1179 nxt_unit_port_id_t port_id;
1180 nxt_unit_request_t *r;
1181 nxt_unit_mmap_buf_t *b;
1182 nxt_unit_request_info_t *req;
1183 nxt_unit_request_info_impl_t *req_impl;
1184
1185 if (nxt_slow_path(recv_msg->mmap == 0)) {
1186 nxt_unit_warn(ctx, "#%"PRIu32": data is not in shared memory",
1187 recv_msg->stream);
1188
1189 return NXT_UNIT_ERROR;
1190 }
1191
1192 if (nxt_slow_path(recv_msg->size < sizeof(nxt_unit_request_t))) {
1193 nxt_unit_warn(ctx, "#%"PRIu32": data too short: %d while at least "
1194 "%d expected", recv_msg->stream, (int) recv_msg->size,
1195 (int) sizeof(nxt_unit_request_t));
1196
1197 return NXT_UNIT_ERROR;
1198 }
1199
1200 req_impl = nxt_unit_request_info_get(ctx);
1201 if (nxt_slow_path(req_impl == NULL)) {
1202 nxt_unit_warn(ctx, "#%"PRIu32": request info allocation failed",
1203 recv_msg->stream);
1204
1205 return NXT_UNIT_ERROR;
1206 }
1207
1208 req = &req_impl->req;
1209
1210 req->request = recv_msg->start;
1211
1212 b = recv_msg->incoming_buf;
1213
1214 req->request_buf = &b->buf;
1215 req->response = NULL;
1216 req->response_buf = NULL;
1217
1218 r = req->request;
1219
1220 req->content_length = r->content_length;
1221
1222 req->content_buf = req->request_buf;
1223 req->content_buf->free = nxt_unit_sptr_get(&r->preread_content);
1224
1225 req_impl->stream = recv_msg->stream;
1226
1227 req_impl->outgoing_buf = NULL;
1228
1229 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1230 b->req = req;
1231 }
1232
1233 /* "Move" incoming buffer list to req_impl. */
1234 req_impl->incoming_buf = recv_msg->incoming_buf;
1235 req_impl->incoming_buf->prev = &req_impl->incoming_buf;
1236 recv_msg->incoming_buf = NULL;
1237
1238 req->content_fd = recv_msg->fd[0];
1239 recv_msg->fd[0] = -1;
1240
1241 req->response_max_fields = 0;
1242 req_impl->state = NXT_UNIT_RS_START;
1243 req_impl->websocket = 0;
1244 req_impl->in_hash = 0;
1245
1246 nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream,
1247 (int) r->method_length,
1248 (char *) nxt_unit_sptr_get(&r->method),
1249 (int) r->target_length,
1250 (char *) nxt_unit_sptr_get(&r->target),
1251 (int) r->content_length);
1252
1253 nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port);
1254
1255 res = nxt_unit_request_check_response_port(req, &port_id);
1256 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1257 return NXT_UNIT_ERROR;
1258 }
1259
1260 if (nxt_fast_path(res == NXT_UNIT_OK)) {
1261 res = nxt_unit_send_req_headers_ack(req);
1262 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1263 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1264
1265 return NXT_UNIT_ERROR;
1266 }
1267
1268 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1269
1270 if (req->content_length
1271 > (uint64_t) (req->content_buf->end - req->content_buf->free))
1272 {
1273 res = nxt_unit_request_hash_add(ctx, req);
1274 if (nxt_slow_path(res != NXT_UNIT_OK)) {
1275 nxt_unit_req_warn(req, "failed to add request to hash");
1276
1277 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1278
1279 return NXT_UNIT_ERROR;
1280 }
1281
1282 /*
1283 * If application have separate data handler, we may start
1284 * request processing and process data when it is arrived.
1285 */
1286 if (lib->callbacks.data_handler == NULL) {
1287 return NXT_UNIT_OK;
1288 }
1289 }
1290
1291 lib->callbacks.request_handler(req);
1292 }
1293
1294 return NXT_UNIT_OK;
1295}
1296
1297
1298static int
1299nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1300{
1301 uint64_t l;
1302 nxt_unit_impl_t *lib;
1303 nxt_unit_mmap_buf_t *b;
1304 nxt_unit_request_info_t *req;
1305
1306 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last);
1307 if (req == NULL) {
1308 return NXT_UNIT_OK;
1309 }
1310
1311 l = req->content_buf->end - req->content_buf->free;
1312
1313 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1314 b->req = req;
1315 l += b->buf.end - b->buf.free;
1316 }
1317
1318 if (recv_msg->incoming_buf != NULL) {
1319 b = nxt_container_of(req->content_buf, nxt_unit_mmap_buf_t, buf);
1320
1321 /* "Move" incoming buffer list to req_impl. */
1322 nxt_unit_mmap_buf_insert_tail(&b->next, recv_msg->incoming_buf);
1323 recv_msg->incoming_buf = NULL;
1324 }
1325
1326 req->content_fd = recv_msg->fd[0];
1327 recv_msg->fd[0] = -1;
1328
1329 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1330
1331 if (lib->callbacks.data_handler != NULL) {
1332 lib->callbacks.data_handler(req);
1333
1334 return NXT_UNIT_OK;
1335 }
1336
1337 if (req->content_fd != -1 || l == req->content_length) {
1338 lib->callbacks.request_handler(req);
1339 }
1340
1341 return NXT_UNIT_OK;
1342}
1343
1344
1345static int
1346nxt_unit_request_check_response_port(nxt_unit_request_info_t *req,
1347 nxt_unit_port_id_t *port_id)
1348{
1349 int res;
1350 nxt_unit_ctx_t *ctx;
1351 nxt_unit_impl_t *lib;
1352 nxt_unit_port_t *port;
1353 nxt_unit_process_t *process;
1354 nxt_unit_ctx_impl_t *ctx_impl;
1355 nxt_unit_port_impl_t *port_impl;
1356 nxt_unit_request_info_impl_t *req_impl;
1357
1358 ctx = req->ctx;
1359 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1360 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1361
1362 pthread_mutex_lock(&lib->mutex);
1363
1364 port = nxt_unit_port_hash_find(&lib->ports, port_id, 0);
1365 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
1366
1367 if (nxt_fast_path(port != NULL)) {
1368 req->response_port = port;
1369
1370 if (nxt_fast_path(port_impl->ready)) {
1371 pthread_mutex_unlock(&lib->mutex);
1372
1373 nxt_unit_debug(ctx, "check_response_port: found port{%d,%d}",
1374 (int) port->id.pid, (int) port->id.id);
1375
1376 return NXT_UNIT_OK;
1377 }
1378
1379 nxt_unit_debug(ctx, "check_response_port: "
1380 "port{%d,%d} already requested",
1381 (int) port->id.pid, (int) port->id.id);
1382
1383 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1384
1385 nxt_queue_insert_tail(&port_impl->awaiting_req,
1386 &req_impl->port_wait_link);
1387
1388 pthread_mutex_unlock(&lib->mutex);
1389
1390 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
1391
1392 return NXT_UNIT_AGAIN;
1393 }
1394
1395 port_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t));
1396 if (nxt_slow_path(port_impl == NULL)) {
1397 nxt_unit_alert(ctx, "check_response_port: malloc(%d) failed",
1398 (int) sizeof(nxt_unit_port_impl_t));
1399
1400 pthread_mutex_unlock(&lib->mutex);
1401
1402 return NXT_UNIT_ERROR;
1403 }
1404
1405 port = &port_impl->port;
1406
1407 port->id = *port_id;
1408 port->in_fd = -1;
1409 port->out_fd = -1;
1410 port->data = NULL;
1411
1412 res = nxt_unit_port_hash_add(&lib->ports, port);
1413 if (nxt_slow_path(res != NXT_UNIT_OK)) {
1414 nxt_unit_alert(ctx, "check_response_port: %d,%d hash_add failed",
1415 port->id.pid, port->id.id);
1416
1417 pthread_mutex_unlock(&lib->mutex);
1418
1419 nxt_unit_free(ctx, port);
1420
1421 return NXT_UNIT_ERROR;
1422 }
1423
1424 process = nxt_unit_process_find(lib, port_id->pid, 0);
1425 if (nxt_slow_path(process == NULL)) {
1426 nxt_unit_alert(ctx, "check_response_port: process %d not found",
1427 port->id.pid);
1428
1429 nxt_unit_port_hash_find(&lib->ports, port_id, 1);
1430
1431 pthread_mutex_unlock(&lib->mutex);
1432
1433 nxt_unit_free(ctx, port);
1434
1435 return NXT_UNIT_ERROR;
1436 }
1437
1438 nxt_queue_insert_tail(&process->ports, &port_impl->link);
1439
1440 port_impl->process = process;
1441 port_impl->queue = NULL;
1442 port_impl->from_socket = 0;
1443 port_impl->socket_rbuf = NULL;
1444
1445 nxt_queue_init(&port_impl->awaiting_req);
1446
1447 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1448
1449 nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link);
1450
1451 port_impl->use_count = 2;
1452 port_impl->ready = 0;
1453
1454 req->response_port = port;
1455
1456 pthread_mutex_unlock(&lib->mutex);
1457
1458 res = nxt_unit_get_port(ctx, port_id);
1459 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
1460 return NXT_UNIT_ERROR;
1461 }
1462
1463 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
1464
1465 return NXT_UNIT_AGAIN;
1466}
1467
1468
1469static int
1470nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req)
1471{
1472 ssize_t res;
1473 nxt_port_msg_t msg;
1474 nxt_unit_impl_t *lib;
1475 nxt_unit_ctx_impl_t *ctx_impl;
1476 nxt_unit_request_info_impl_t *req_impl;
1477
1478 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
1479 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx);
1480 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1481
1482 memset(&msg, 0, sizeof(nxt_port_msg_t));
1483
1484 msg.stream = req_impl->stream;
1485 msg.pid = lib->pid;
1486 msg.reply_port = ctx_impl->read_port->id.id;
1487 msg.type = _NXT_PORT_MSG_REQ_HEADERS_ACK;
1488
1489 res = nxt_unit_port_send(req->ctx, req->response_port,
1490 &msg, sizeof(msg), NULL, 0);
1491 if (nxt_slow_path(res != sizeof(msg))) {
1492 return NXT_UNIT_ERROR;
1493 }
1494
1495 return NXT_UNIT_OK;
1496}
1497
1498
1499static int
1500nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg)
1501{
1502 size_t hsize;
1503 nxt_unit_impl_t *lib;
1504 nxt_unit_mmap_buf_t *b;
1505 nxt_unit_callbacks_t *cb;
1506 nxt_unit_request_info_t *req;
1507 nxt_unit_request_info_impl_t *req_impl;
1508 nxt_unit_websocket_frame_impl_t *ws_impl;
1509
1510 req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last);
1511 if (nxt_slow_path(req == NULL)) {
1512 return NXT_UNIT_OK;
1513 }
1514
1515 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1516
1517 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1518 cb = &lib->callbacks;
1519
1520 if (cb->websocket_handler && recv_msg->size >= 2) {
1521 ws_impl = nxt_unit_websocket_frame_get(ctx);
1522 if (nxt_slow_path(ws_impl == NULL)) {
1523 nxt_unit_warn(ctx, "#%"PRIu32": websocket frame allocation failed",
1524 req_impl->stream);
1525
1526 return NXT_UNIT_ERROR;
1527 }
1528
1529 ws_impl->ws.req = req;
1530
1531 ws_impl->buf = NULL;
1532
1533 if (recv_msg->mmap) {
1534 for (b = recv_msg->incoming_buf; b != NULL; b = b->next) {
1535 b->req = req;
1536 }
1537
1538 /* "Move" incoming buffer list to ws_impl. */
1539 ws_impl->buf = recv_msg->incoming_buf;
1540 ws_impl->buf->prev = &ws_impl->buf;
1541 recv_msg->incoming_buf = NULL;
1542
1543 b = ws_impl->buf;
1544
1545 } else {
1546 b = nxt_unit_mmap_buf_get(ctx);
1547 if (nxt_slow_path(b == NULL)) {
1548 nxt_unit_alert(ctx, "#%"PRIu32": failed to allocate buf",
1549 req_impl->stream);
1550
1551 nxt_unit_websocket_frame_release(&ws_impl->ws);
1552
1553 return NXT_UNIT_ERROR;
1554 }
1555
1556 b->req = req;
1557 b->buf.start = recv_msg->start;
1558 b->buf.free = b->buf.start;
1559 b->buf.end = b->buf.start + recv_msg->size;
1560
1561 nxt_unit_mmap_buf_insert(&ws_impl->buf, b);
1562 }
1563
1564 ws_impl->ws.header = (void *) b->buf.start;
1565 ws_impl->ws.payload_len = nxt_websocket_frame_payload_len(
1566 ws_impl->ws.header);
1567
1568 hsize = nxt_websocket_frame_header_size(ws_impl->ws.header);
1569
1570 if (ws_impl->ws.header->mask) {
1571 ws_impl->ws.mask = (uint8_t *) b->buf.start + hsize - 4;
1572
1573 } else {
1574 ws_impl->ws.mask = NULL;
1575 }
1576
1577 b->buf.free += hsize;
1578
1579 ws_impl->ws.content_buf = &b->buf;
1580 ws_impl->ws.content_length = ws_impl->ws.payload_len;
1581
1582 nxt_unit_req_debug(req, "websocket_handler: opcode=%d, "
1583 "payload_len=%"PRIu64,
1584 ws_impl->ws.header->opcode,
1585 ws_impl->ws.payload_len);
1586
1587 cb->websocket_handler(&ws_impl->ws);
1588 }
1589
1590 if (recv_msg->last) {
1591 req_impl->websocket = 0;
1592
1593 if (cb->close_handler) {
1594 nxt_unit_req_debug(req, "close_handler");
1595
1596 cb->close_handler(req);
1597
1598 } else {
1599 nxt_unit_request_done(req, NXT_UNIT_ERROR);
1600 }
1601 }
1602
1603 return NXT_UNIT_OK;
1604}
1605
1606
1607static int
1608nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx)
1609{
1610 nxt_unit_impl_t *lib;
1611 nxt_unit_callbacks_t *cb;
1612
1613 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1614 cb = &lib->callbacks;
1615
1616 if (cb->shm_ack_handler != NULL) {
1617 cb->shm_ack_handler(ctx);
1618 }
1619
1620 return NXT_UNIT_OK;
1621}
1622
1623
1624static nxt_unit_request_info_impl_t *
1625nxt_unit_request_info_get(nxt_unit_ctx_t *ctx)
1626{
1627 nxt_unit_impl_t *lib;
1628 nxt_queue_link_t *lnk;
1629 nxt_unit_ctx_impl_t *ctx_impl;
1630 nxt_unit_request_info_impl_t *req_impl;
1631
1632 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1633
1634 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
1635
1636 pthread_mutex_lock(&ctx_impl->mutex);
1637
1638 if (nxt_queue_is_empty(&ctx_impl->free_req)) {
1639 pthread_mutex_unlock(&ctx_impl->mutex);
1640
1641 req_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_request_info_impl_t)
1642 + lib->request_data_size);
1643 if (nxt_slow_path(req_impl == NULL)) {
1644 return NULL;
1645 }
1646
1647 req_impl->req.unit = ctx->unit;
1648 req_impl->req.ctx = ctx;
1649
1650 pthread_mutex_lock(&ctx_impl->mutex);
1651
1652 } else {
1653 lnk = nxt_queue_first(&ctx_impl->free_req);
1654 nxt_queue_remove(lnk);
1655
1656 req_impl = nxt_container_of(lnk, nxt_unit_request_info_impl_t, link);
1657 }
1658
1659 nxt_queue_insert_tail(&ctx_impl->active_req, &req_impl->link);
1660
1661 pthread_mutex_unlock(&ctx_impl->mutex);
1662
1663 req_impl->req.data = lib->request_data_size ? req_impl->extra_data : NULL;
1664
1665 return req_impl;
1666}
1667
1668
1669static void
1670nxt_unit_request_info_release(nxt_unit_request_info_t *req)
1671{
1672 nxt_unit_ctx_impl_t *ctx_impl;
1673 nxt_unit_request_info_impl_t *req_impl;
1674
1675 ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx);
1676 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1677
1678 req->response = NULL;
1679 req->response_buf = NULL;
1680
1681 if (req_impl->in_hash) {
1682 nxt_unit_request_hash_find(req->ctx, req_impl->stream, 1);
1683 }
1684
1685 req_impl->websocket = 0;
1686
1687 while (req_impl->outgoing_buf != NULL) {
1688 nxt_unit_mmap_buf_free(req_impl->outgoing_buf);
1689 }
1690
1691 while (req_impl->incoming_buf != NULL) {
1692 nxt_unit_mmap_buf_free(req_impl->incoming_buf);
1693 }
1694
1695 if (req->content_fd != -1) {
1696 nxt_unit_close(req->content_fd);
1697
1698 req->content_fd = -1;
1699 }
1700
1701 if (req->response_port != NULL) {
1702 nxt_unit_port_release(req->response_port);
1703
1704 req->response_port = NULL;
1705 }
1706
1707 pthread_mutex_lock(&ctx_impl->mutex);
1708
1709 nxt_queue_remove(&req_impl->link);
1710
1711 nxt_queue_insert_tail(&ctx_impl->free_req, &req_impl->link);
1712
1713 pthread_mutex_unlock(&ctx_impl->mutex);
1714
1715 req_impl->state = NXT_UNIT_RS_RELEASED;
1716}
1717
1718
1719static void
1720nxt_unit_request_info_free(nxt_unit_request_info_impl_t *req_impl)
1721{
1722 nxt_unit_ctx_impl_t *ctx_impl;
1723
1724 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, ctx);
1725
1726 nxt_queue_remove(&req_impl->link);
1727
1728 if (req_impl != &ctx_impl->req) {
1729 nxt_unit_free(&ctx_impl->ctx, req_impl);
1730 }
1731}
1732
1733
1734static nxt_unit_websocket_frame_impl_t *
1735nxt_unit_websocket_frame_get(nxt_unit_ctx_t *ctx)
1736{
1737 nxt_queue_link_t *lnk;
1738 nxt_unit_ctx_impl_t *ctx_impl;
1739 nxt_unit_websocket_frame_impl_t *ws_impl;
1740
1741 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
1742
1743 pthread_mutex_lock(&ctx_impl->mutex);
1744
1745 if (nxt_queue_is_empty(&ctx_impl->free_ws)) {
1746 pthread_mutex_unlock(&ctx_impl->mutex);
1747
1748 ws_impl = nxt_unit_malloc(ctx, sizeof(nxt_unit_websocket_frame_impl_t));
1749 if (nxt_slow_path(ws_impl == NULL)) {
1750 return NULL;
1751 }
1752
1753 } else {
1754 lnk = nxt_queue_first(&ctx_impl->free_ws);
1755 nxt_queue_remove(lnk);
1756
1757 pthread_mutex_unlock(&ctx_impl->mutex);
1758
1759 ws_impl = nxt_container_of(lnk, nxt_unit_websocket_frame_impl_t, link);
1760 }
1761
1762 ws_impl->ctx_impl = ctx_impl;
1763
1764 return ws_impl;
1765}
1766
1767
1768static void
1769nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws)
1770{
1771 nxt_unit_websocket_frame_impl_t *ws_impl;
1772
1773 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws);
1774
1775 while (ws_impl->buf != NULL) {
1776 nxt_unit_mmap_buf_free(ws_impl->buf);
1777 }
1778
1779 ws->req = NULL;
1780
1781 pthread_mutex_lock(&ws_impl->ctx_impl->mutex);
1782
1783 nxt_queue_insert_tail(&ws_impl->ctx_impl->free_ws, &ws_impl->link);
1784
1785 pthread_mutex_unlock(&ws_impl->ctx_impl->mutex);
1786}
1787
1788
1789static void
1790nxt_unit_websocket_frame_free(nxt_unit_ctx_t *ctx,
1791 nxt_unit_websocket_frame_impl_t *ws_impl)
1792{
1793 nxt_queue_remove(&ws_impl->link);
1794
1795 nxt_unit_free(ctx, ws_impl);
1796}
1797
1798
1799uint16_t
1800nxt_unit_field_hash(const char *name, size_t name_length)
1801{
1802 u_char ch;
1803 uint32_t hash;
1804 const char *p, *end;
1805
1806 hash = 159406; /* Magic value copied from nxt_http_parse.c */
1807 end = name + name_length;
1808
1809 for (p = name; p < end; p++) {
1810 ch = *p;
1811 hash = (hash << 4) + hash + nxt_lowcase(ch);
1812 }
1813
1814 hash = (hash >> 16) ^ hash;
1815
1816 return hash;
1817}
1818
1819
1820void
1821nxt_unit_request_group_dup_fields(nxt_unit_request_info_t *req)
1822{
1823 char *name;
1824 uint32_t i, j;
1825 nxt_unit_field_t *fields, f;
1826 nxt_unit_request_t *r;
1827
1828 static nxt_str_t content_length = nxt_string("content-length");
1829 static nxt_str_t content_type = nxt_string("content-type");
1830 static nxt_str_t cookie = nxt_string("cookie");
1831
1832 nxt_unit_req_debug(req, "group_dup_fields");
1833
1834 r = req->request;
1835 fields = r->fields;
1836
1837 for (i = 0; i < r->fields_count; i++) {
1838 name = nxt_unit_sptr_get(&fields[i].name);
1839
1840 switch (fields[i].hash) {
1841 case NXT_UNIT_HASH_CONTENT_LENGTH:
1842 if (fields[i].name_length == content_length.length
1843 && nxt_unit_memcasecmp(name, content_length.start,
1844 content_length.length) == 0)
1845 {
1846 r->content_length_field = i;
1847 }
1848
1849 break;
1850
1851 case NXT_UNIT_HASH_CONTENT_TYPE:
1852 if (fields[i].name_length == content_type.length
1853 && nxt_unit_memcasecmp(name, content_type.start,
1854 content_type.length) == 0)
1855 {
1856 r->content_type_field = i;
1857 }
1858
1859 break;
1860
1861 case NXT_UNIT_HASH_COOKIE:
1862 if (fields[i].name_length == cookie.length
1863 && nxt_unit_memcasecmp(name, cookie.start,
1864 cookie.length) == 0)
1865 {
1866 r->cookie_field = i;
1867 }
1868
1869 break;
1870 }
1871
1872 for (j = i + 1; j < r->fields_count; j++) {
1873 if (fields[i].hash != fields[j].hash
1874 || fields[i].name_length != fields[j].name_length
1875 || nxt_unit_memcasecmp(name,
1876 nxt_unit_sptr_get(&fields[j].name),
1877 fields[j].name_length) != 0)
1878 {
1879 continue;
1880 }
1881
1882 f = fields[j];
1883 f.value.offset += (j - (i + 1)) * sizeof(f);
1884
1885 while (j > i + 1) {
1886 fields[j] = fields[j - 1];
1887 fields[j].name.offset -= sizeof(f);
1888 fields[j].value.offset -= sizeof(f);
1889 j--;
1890 }
1891
1892 fields[j] = f;
1893
1894 /* Assign the same name pointer for further grouping simplicity. */
1895 nxt_unit_sptr_set(&fields[j].name, name);
1896
1897 i++;
1898 }
1899 }
1900}
1901
1902
1903int
1904nxt_unit_response_init(nxt_unit_request_info_t *req,
1905 uint16_t status, uint32_t max_fields_count, uint32_t max_fields_size)
1906{
1907 uint32_t buf_size;
1908 nxt_unit_buf_t *buf;
1909 nxt_unit_request_info_impl_t *req_impl;
1910
1911 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1912
1913 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
1914 nxt_unit_req_warn(req, "init: response already sent");
1915
1916 return NXT_UNIT_ERROR;
1917 }
1918
1919 nxt_unit_req_debug(req, "init: %d, max fields %d/%d", (int) status,
1920 (int) max_fields_count, (int) max_fields_size);
1921
1922 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT)) {
1923 nxt_unit_req_debug(req, "duplicate response init");
1924 }
1925
1926 /*
1927 * Each field name and value 0-terminated by libunit,
1928 * this is the reason of '+ 2' below.
1929 */
1930 buf_size = sizeof(nxt_unit_response_t)
1931 + max_fields_count * (sizeof(nxt_unit_field_t) + 2)
1932 + max_fields_size;
1933
1934 if (nxt_slow_path(req->response_buf != NULL)) {
1935 buf = req->response_buf;
1936
1937 if (nxt_fast_path(buf_size <= (uint32_t) (buf->end - buf->start))) {
1938 goto init_response;
1939 }
1940
1941 nxt_unit_buf_free(buf);
1942
1943 req->response_buf = NULL;
1944 req->response = NULL;
1945 req->response_max_fields = 0;
1946
1947 req_impl->state = NXT_UNIT_RS_START;
1948 }
1949
1950 buf = nxt_unit_response_buf_alloc(req, buf_size);
1951 if (nxt_slow_path(buf == NULL)) {
1952 return NXT_UNIT_ERROR;
1953 }
1954
1955init_response:
1956
1957 memset(buf->start, 0, sizeof(nxt_unit_response_t));
1958
1959 req->response_buf = buf;
1960
1961 req->response = (nxt_unit_response_t *) buf->start;
1962 req->response->status = status;
1963
1964 buf->free = buf->start + sizeof(nxt_unit_response_t)
1965 + max_fields_count * sizeof(nxt_unit_field_t);
1966
1967 req->response_max_fields = max_fields_count;
1968 req_impl->state = NXT_UNIT_RS_RESPONSE_INIT;
1969
1970 return NXT_UNIT_OK;
1971}
1972
1973
1974int
1975nxt_unit_response_realloc(nxt_unit_request_info_t *req,
1976 uint32_t max_fields_count, uint32_t max_fields_size)
1977{
1978 char *p;
1979 uint32_t i, buf_size;
1980 nxt_unit_buf_t *buf;
1981 nxt_unit_field_t *f, *src;
1982 nxt_unit_response_t *resp;
1983 nxt_unit_request_info_impl_t *req_impl;
1984
1985 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
1986
1987 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
1988 nxt_unit_req_warn(req, "realloc: response not init");
1989
1990 return NXT_UNIT_ERROR;
1991 }
1992
1993 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
1994 nxt_unit_req_warn(req, "realloc: response already sent");
1995
1996 return NXT_UNIT_ERROR;
1997 }
1998
1999 if (nxt_slow_path(max_fields_count < req->response->fields_count)) {
2000 nxt_unit_req_warn(req, "realloc: new max_fields_count is too small");
2001
2002 return NXT_UNIT_ERROR;
2003 }
2004
2005 /*
2006 * Each field name and value 0-terminated by libunit,
2007 * this is the reason of '+ 2' below.
2008 */
2009 buf_size = sizeof(nxt_unit_response_t)
2010 + max_fields_count * (sizeof(nxt_unit_field_t) + 2)
2011 + max_fields_size;
2012
2013 nxt_unit_req_debug(req, "realloc %"PRIu32"", buf_size);
2014
2015 buf = nxt_unit_response_buf_alloc(req, buf_size);
2016 if (nxt_slow_path(buf == NULL)) {
2017 nxt_unit_req_warn(req, "realloc: new buf allocation failed");
2018 return NXT_UNIT_ERROR;
2019 }
2020
2021 resp = (nxt_unit_response_t *) buf->start;
2022
2023 memset(resp, 0, sizeof(nxt_unit_response_t));
2024
2025 resp->status = req->response->status;
2026 resp->content_length = req->response->content_length;
2027
2028 p = buf->start + max_fields_count * sizeof(nxt_unit_field_t);
2029 f = resp->fields;
2030
2031 for (i = 0; i < req->response->fields_count; i++) {
2032 src = req->response->fields + i;
2033
2034 if (nxt_slow_path(src->skip != 0)) {
2035 continue;
2036 }
2037
2038 if (nxt_slow_path(src->name_length + src->value_length + 2
2039 > (uint32_t) (buf->end - p)))
2040 {
2041 nxt_unit_req_warn(req, "realloc: not enough space for field"
2042 " #%"PRIu32" (%p), (%"PRIu32" + %"PRIu32") required",
2043 i, src, src->name_length, src->value_length);
2044
2045 goto fail;
2046 }
2047
2048 nxt_unit_sptr_set(&f->name, p);
2049 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->name), src->name_length);
2050 *p++ = '\0';
2051
2052 nxt_unit_sptr_set(&f->value, p);
2053 p = nxt_cpymem(p, nxt_unit_sptr_get(&src->value), src->value_length);
2054 *p++ = '\0';
2055
2056 f->hash = src->hash;
2057 f->skip = 0;
2058 f->name_length = src->name_length;
2059 f->value_length = src->value_length;
2060
2061 resp->fields_count++;
2062 f++;
2063 }
2064
2065 if (req->response->piggyback_content_length > 0) {
2066 if (nxt_slow_path(req->response->piggyback_content_length
2067 > (uint32_t) (buf->end - p)))
2068 {
2069 nxt_unit_req_warn(req, "realloc: not enought space for content"
2070 " #%"PRIu32", %"PRIu32" required",
2071 i, req->response->piggyback_content_length);
2072
2073 goto fail;
2074 }
2075
2076 resp->piggyback_content_length =
2077 req->response->piggyback_content_length;
2078
2079 nxt_unit_sptr_set(&resp->piggyback_content, p);
2080 p = nxt_cpymem(p, nxt_unit_sptr_get(&req->response->piggyback_content),
2081 req->response->piggyback_content_length);
2082 }
2083
2084 buf->free = p;
2085
2086 nxt_unit_buf_free(req->response_buf);
2087
2088 req->response = resp;
2089 req->response_buf = buf;
2090 req->response_max_fields = max_fields_count;
2091
2092 return NXT_UNIT_OK;
2093
2094fail:
2095
2096 nxt_unit_buf_free(buf);
2097
2098 return NXT_UNIT_ERROR;
2099}
2100
2101
2102int
2103nxt_unit_response_is_init(nxt_unit_request_info_t *req)
2104{
2105 nxt_unit_request_info_impl_t *req_impl;
2106
2107 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2108
2109 return req_impl->state >= NXT_UNIT_RS_RESPONSE_INIT;
2110}
2111
2112
2113int
2114nxt_unit_response_add_field(nxt_unit_request_info_t *req,
2115 const char *name, uint8_t name_length,
2116 const char *value, uint32_t value_length)
2117{
2118 nxt_unit_buf_t *buf;
2119 nxt_unit_field_t *f;
2120 nxt_unit_response_t *resp;
2121 nxt_unit_request_info_impl_t *req_impl;
2122
2123 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2124
2125 if (nxt_slow_path(req_impl->state != NXT_UNIT_RS_RESPONSE_INIT)) {
2126 nxt_unit_req_warn(req, "add_field: response not initialized or "
2127 "already sent");
2128
2129 return NXT_UNIT_ERROR;
2130 }
2131
2132 resp = req->response;
2133
2134 if (nxt_slow_path(resp->fields_count >= req->response_max_fields)) {
2135 nxt_unit_req_warn(req, "add_field: too many response fields");
2136
2137 return NXT_UNIT_ERROR;
2138 }
2139
2140 buf = req->response_buf;
2141
2142 if (nxt_slow_path(name_length + value_length + 2
2143 > (uint32_t) (buf->end - buf->free)))
2144 {
2145 nxt_unit_req_warn(req, "add_field: response buffer overflow");
2146
2147 return NXT_UNIT_ERROR;
2148 }
2149
2150 nxt_unit_req_debug(req, "add_field #%"PRIu32": %.*s: %.*s",
2151 resp->fields_count,
2152 (int) name_length, name,
2153 (int) value_length, value);
2154
2155 f = resp->fields + resp->fields_count;
2156
2157 nxt_unit_sptr_set(&f->name, buf->free);
2158 buf->free = nxt_cpymem(buf->free, name, name_length);
2159 *buf->free++ = '\0';
2160
2161 nxt_unit_sptr_set(&f->value, buf->free);
2162 buf->free = nxt_cpymem(buf->free, value, value_length);
2163 *buf->free++ = '\0';
2164
2165 f->hash = nxt_unit_field_hash(name, name_length);
2166 f->skip = 0;
2167 f->name_length = name_length;
2168 f->value_length = value_length;
2169
2170 resp->fields_count++;
2171
2172 return NXT_UNIT_OK;
2173}
2174
2175
2176int
2177nxt_unit_response_add_content(nxt_unit_request_info_t *req,
2178 const void* src, uint32_t size)
2179{
2180 nxt_unit_buf_t *buf;
2181 nxt_unit_response_t *resp;
2182 nxt_unit_request_info_impl_t *req_impl;
2183
2184 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2185
2186 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2187 nxt_unit_req_warn(req, "add_content: response not initialized yet");
2188
2189 return NXT_UNIT_ERROR;
2190 }
2191
2192 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2193 nxt_unit_req_warn(req, "add_content: response already sent");
2194
2195 return NXT_UNIT_ERROR;
2196 }
2197
2198 buf = req->response_buf;
2199
2200 if (nxt_slow_path(size > (uint32_t) (buf->end - buf->free))) {
2201 nxt_unit_req_warn(req, "add_content: buffer overflow");
2202
2203 return NXT_UNIT_ERROR;
2204 }
2205
2206 resp = req->response;
2207
2208 if (resp->piggyback_content_length == 0) {
2209 nxt_unit_sptr_set(&resp->piggyback_content, buf->free);
2210 req_impl->state = NXT_UNIT_RS_RESPONSE_HAS_CONTENT;
2211 }
2212
2213 resp->piggyback_content_length += size;
2214
2215 buf->free = nxt_cpymem(buf->free, src, size);
2216
2217 return NXT_UNIT_OK;
2218}
2219
2220
2221int
2222nxt_unit_response_send(nxt_unit_request_info_t *req)
2223{
2224 int rc;
2225 nxt_unit_mmap_buf_t *mmap_buf;
2226 nxt_unit_request_info_impl_t *req_impl;
2227
2228 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2229
2230 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2231 nxt_unit_req_warn(req, "send: response is not initialized yet");
2232
2233 return NXT_UNIT_ERROR;
2234 }
2235
2236 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2237 nxt_unit_req_warn(req, "send: response already sent");
2238
2239 return NXT_UNIT_ERROR;
2240 }
2241
2242 if (req->request->websocket_handshake && req->response->status == 101) {
2243 nxt_unit_response_upgrade(req);
2244 }
2245
2246 nxt_unit_req_debug(req, "send: %"PRIu32" fields, %d bytes",
2247 req->response->fields_count,
2248 (int) (req->response_buf->free
2249 - req->response_buf->start));
2250
2251 mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf);
2252
2253 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0);
2254 if (nxt_fast_path(rc == NXT_UNIT_OK)) {
2255 req->response = NULL;
2256 req->response_buf = NULL;
2257 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT;
2258
2259 nxt_unit_mmap_buf_free(mmap_buf);
2260 }
2261
2262 return rc;
2263}
2264
2265
2266int
2267nxt_unit_response_is_sent(nxt_unit_request_info_t *req)
2268{
2269 nxt_unit_request_info_impl_t *req_impl;
2270
2271 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2272
2273 return req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT;
2274}
2275
2276
2277nxt_unit_buf_t *
2278nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size)
2279{
2280 int rc;
2281 nxt_unit_mmap_buf_t *mmap_buf;
2282 nxt_unit_request_info_impl_t *req_impl;
2283
2284 if (nxt_slow_path(size > PORT_MMAP_DATA_SIZE)) {
2285 nxt_unit_req_warn(req, "response_buf_alloc: "
2286 "requested buffer (%"PRIu32") too big", size);
2287
2288 return NULL;
2289 }
2290
2291 nxt_unit_req_debug(req, "response_buf_alloc: %"PRIu32, size);
2292
2293 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2294
2295 mmap_buf = nxt_unit_mmap_buf_get(req->ctx);
2296 if (nxt_slow_path(mmap_buf == NULL)) {
2297 nxt_unit_req_alert(req, "response_buf_alloc: failed to allocate buf");
2298
2299 return NULL;
2300 }
2301
2302 mmap_buf->req = req;
2303
2304 nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf);
2305
2306 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
2307 size, size, mmap_buf,
2308 NULL);
2309 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2310 nxt_unit_mmap_buf_release(mmap_buf);
2311
2312 return NULL;
2313 }
2314
2315 return &mmap_buf->buf;
2316}
2317
2318
2319static nxt_unit_mmap_buf_t *
2320nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx)
2321{
2322 nxt_unit_mmap_buf_t *mmap_buf;
2323 nxt_unit_ctx_impl_t *ctx_impl;
2324
2325 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2326
2327 pthread_mutex_lock(&ctx_impl->mutex);
2328
2329 if (ctx_impl->free_buf == NULL) {
2330 pthread_mutex_unlock(&ctx_impl->mutex);
2331
2332 mmap_buf = nxt_unit_malloc(ctx, sizeof(nxt_unit_mmap_buf_t));
2333 if (nxt_slow_path(mmap_buf == NULL)) {
2334 return NULL;
2335 }
2336
2337 } else {
2338 mmap_buf = ctx_impl->free_buf;
2339
2340 nxt_unit_mmap_buf_unlink(mmap_buf);
2341
2342 pthread_mutex_unlock(&ctx_impl->mutex);
2343 }
2344
2345 mmap_buf->ctx_impl = ctx_impl;
2346
2347 mmap_buf->hdr = NULL;
2348 mmap_buf->free_ptr = NULL;
2349
2350 return mmap_buf;
2351}
2352
2353
2354static void
2355nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf)
2356{
2357 nxt_unit_mmap_buf_unlink(mmap_buf);
2358
2359 pthread_mutex_lock(&mmap_buf->ctx_impl->mutex);
2360
2361 nxt_unit_mmap_buf_insert(&mmap_buf->ctx_impl->free_buf, mmap_buf);
2362
2363 pthread_mutex_unlock(&mmap_buf->ctx_impl->mutex);
2364}
2365
2366
2367int
2368nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req)
2369{
2370 return req->request->websocket_handshake;
2371}
2372
2373
2374int
2375nxt_unit_response_upgrade(nxt_unit_request_info_t *req)
2376{
2377 int rc;
2378 nxt_unit_request_info_impl_t *req_impl;
2379
2380 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2381
2382 if (nxt_slow_path(req_impl->websocket != 0)) {
2383 nxt_unit_req_debug(req, "upgrade: already upgraded");
2384
2385 return NXT_UNIT_OK;
2386 }
2387
2388 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2389 nxt_unit_req_warn(req, "upgrade: response is not initialized yet");
2390
2391 return NXT_UNIT_ERROR;
2392 }
2393
2394 if (nxt_slow_path(req_impl->state >= NXT_UNIT_RS_RESPONSE_SENT)) {
2395 nxt_unit_req_warn(req, "upgrade: response already sent");
2396
2397 return NXT_UNIT_ERROR;
2398 }
2399
2400 rc = nxt_unit_request_hash_add(req->ctx, req);
2401 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2402 nxt_unit_req_warn(req, "upgrade: failed to add request to hash");
2403
2404 return NXT_UNIT_ERROR;
2405 }
2406
2407 req_impl->websocket = 1;
2408
2409 req->response->status = 101;
2410
2411 return NXT_UNIT_OK;
2412}
2413
2414
2415int
2416nxt_unit_response_is_websocket(nxt_unit_request_info_t *req)
2417{
2418 nxt_unit_request_info_impl_t *req_impl;
2419
2420 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2421
2422 return req_impl->websocket;
2423}
2424
2425
2426nxt_unit_request_info_t *
2427nxt_unit_get_request_info_from_data(void *data)
2428{
2429 nxt_unit_request_info_impl_t *req_impl;
2430
2431 req_impl = nxt_container_of(data, nxt_unit_request_info_impl_t, extra_data);
2432
2433 return &req_impl->req;
2434}
2435
2436
2437int
2438nxt_unit_buf_send(nxt_unit_buf_t *buf)
2439{
2440 int rc;
2441 nxt_unit_mmap_buf_t *mmap_buf;
2442 nxt_unit_request_info_t *req;
2443 nxt_unit_request_info_impl_t *req_impl;
2444
2445 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2446
2447 req = mmap_buf->req;
2448 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2449
2450 nxt_unit_req_debug(req, "buf_send: %d bytes",
2451 (int) (buf->free - buf->start));
2452
2453 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2454 nxt_unit_req_warn(req, "buf_send: response not initialized yet");
2455
2456 return NXT_UNIT_ERROR;
2457 }
2458
2459 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) {
2460 nxt_unit_req_warn(req, "buf_send: headers not sent yet");
2461
2462 return NXT_UNIT_ERROR;
2463 }
2464
2465 if (nxt_fast_path(buf->free > buf->start)) {
2466 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0);
2467 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2468 return rc;
2469 }
2470 }
2471
2472 nxt_unit_mmap_buf_free(mmap_buf);
2473
2474 return NXT_UNIT_OK;
2475}
2476
2477
2478static void
2479nxt_unit_buf_send_done(nxt_unit_buf_t *buf)
2480{
2481 int rc;
2482 nxt_unit_mmap_buf_t *mmap_buf;
2483 nxt_unit_request_info_t *req;
2484
2485 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2486
2487 req = mmap_buf->req;
2488
2489 rc = nxt_unit_mmap_buf_send(req, mmap_buf, 1);
2490 if (nxt_slow_path(rc == NXT_UNIT_OK)) {
2491 nxt_unit_mmap_buf_free(mmap_buf);
2492
2493 nxt_unit_request_info_release(req);
2494
2495 } else {
2496 nxt_unit_request_done(req, rc);
2497 }
2498}
2499
2500
2501static int
2502nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req,
2503 nxt_unit_mmap_buf_t *mmap_buf, int last)
2504{
2505 struct {
2506 nxt_port_msg_t msg;
2507 nxt_port_mmap_msg_t mmap_msg;
2508 } m;
2509
2510 int rc;
2511 u_char *last_used, *first_free;
2512 ssize_t res;
2513 nxt_chunk_id_t first_free_chunk;
2514 nxt_unit_buf_t *buf;
2515 nxt_unit_impl_t *lib;
2516 nxt_port_mmap_header_t *hdr;
2517 nxt_unit_request_info_impl_t *req_impl;
2518
2519 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
2520 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2521
2522 buf = &mmap_buf->buf;
2523 hdr = mmap_buf->hdr;
2524
2525 m.mmap_msg.size = buf->free - buf->start;
2526
2527 m.msg.stream = req_impl->stream;
2528 m.msg.pid = lib->pid;
2529 m.msg.reply_port = 0;
2530 m.msg.type = _NXT_PORT_MSG_DATA;
2531 m.msg.last = last != 0;
2532 m.msg.mmap = hdr != NULL && m.mmap_msg.size > 0;
2533 m.msg.nf = 0;
2534 m.msg.mf = 0;
2535 m.msg.tracking = 0;
2536
2537 rc = NXT_UNIT_ERROR;
2538
2539 if (m.msg.mmap) {
2540 m.mmap_msg.mmap_id = hdr->id;
2541 m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr,
2542 (u_char *) buf->start);
2543
2544 nxt_unit_debug(req->ctx, "#%"PRIu32": send mmap: (%d,%d,%d)",
2545 req_impl->stream,
2546 (int) m.mmap_msg.mmap_id,
2547 (int) m.mmap_msg.chunk_id,
2548 (int) m.mmap_msg.size);
2549
2550 res = nxt_unit_port_send(req->ctx, req->response_port, &m, sizeof(m),
2551 NULL, 0);
2552 if (nxt_slow_path(res != sizeof(m))) {
2553 goto free_buf;
2554 }
2555
2556 last_used = (u_char *) buf->free - 1;
2557 first_free_chunk = nxt_port_mmap_chunk_id(hdr, last_used) + 1;
2558
2559 if (buf->end - buf->free >= PORT_MMAP_CHUNK_SIZE) {
2560 first_free = nxt_port_mmap_chunk_start(hdr, first_free_chunk);
2561
2562 buf->start = (char *) first_free;
2563 buf->free = buf->start;
2564
2565 if (buf->end < buf->start) {
2566 buf->end = buf->start;
2567 }
2568
2569 } else {
2570 buf->start = NULL;
2571 buf->free = NULL;
2572 buf->end = NULL;
2573
2574 mmap_buf->hdr = NULL;
2575 }
2576
2577 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks,
2578 (int) m.mmap_msg.chunk_id - (int) first_free_chunk);
2579
2580 nxt_unit_debug(req->ctx, "allocated_chunks %d",
2581 (int) lib->outgoing.allocated_chunks);
2582
2583 } else {
2584 if (nxt_slow_path(mmap_buf->plain_ptr == NULL
2585 || mmap_buf->plain_ptr > buf->start - sizeof(m.msg)))
2586 {
2587 nxt_unit_alert(req->ctx,
2588 "#%"PRIu32": failed to send plain memory buffer"
2589 ": no space reserved for message header",
2590 req_impl->stream);
2591
2592 goto free_buf;
2593 }
2594
2595 memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg));
2596
2597 nxt_unit_debug(req->ctx, "#%"PRIu32": send plain: %d",
2598 req_impl->stream,
2599 (int) (sizeof(m.msg) + m.mmap_msg.size));
2600
2601 res = nxt_unit_port_send(req->ctx, req->response_port,
2602 buf->start - sizeof(m.msg),
2603 m.mmap_msg.size + sizeof(m.msg),
2604 NULL, 0);
2605 if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) {
2606 goto free_buf;
2607 }
2608 }
2609
2610 rc = NXT_UNIT_OK;
2611
2612free_buf:
2613
2614 nxt_unit_free_outgoing_buf(mmap_buf);
2615
2616 return rc;
2617}
2618
2619
2620void
2621nxt_unit_buf_free(nxt_unit_buf_t *buf)
2622{
2623 nxt_unit_mmap_buf_free(nxt_container_of(buf, nxt_unit_mmap_buf_t, buf));
2624}
2625
2626
2627static void
2628nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf)
2629{
2630 nxt_unit_free_outgoing_buf(mmap_buf);
2631
2632 nxt_unit_mmap_buf_release(mmap_buf);
2633}
2634
2635
2636static void
2637nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf)
2638{
2639 if (mmap_buf->hdr != NULL) {
2640 nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx,
2641 mmap_buf->hdr, mmap_buf->buf.start,
2642 mmap_buf->buf.end - mmap_buf->buf.start);
2643
2644 mmap_buf->hdr = NULL;
2645
2646 return;
2647 }
2648
2649 if (mmap_buf->free_ptr != NULL) {
2650 nxt_unit_free(&mmap_buf->ctx_impl->ctx, mmap_buf->free_ptr);
2651
2652 mmap_buf->free_ptr = NULL;
2653 }
2654}
2655
2656
2657static nxt_unit_read_buf_t *
2658nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx)
2659{
2660 nxt_unit_ctx_impl_t *ctx_impl;
2661 nxt_unit_read_buf_t *rbuf;
2662
2663 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2664
2665 pthread_mutex_lock(&ctx_impl->mutex);
2666
2667 rbuf = nxt_unit_read_buf_get_impl(ctx_impl);
2668
2669 pthread_mutex_unlock(&ctx_impl->mutex);
2670
2671 memset(rbuf->oob, 0, sizeof(struct cmsghdr));
2672
2673 return rbuf;
2674}
2675
2676
2677static nxt_unit_read_buf_t *
2678nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl)
2679{
2680 nxt_queue_link_t *link;
2681 nxt_unit_read_buf_t *rbuf;
2682
2683 if (!nxt_queue_is_empty(&ctx_impl->free_rbuf)) {
2684 link = nxt_queue_first(&ctx_impl->free_rbuf);
2685 nxt_queue_remove(link);
2686
2687 rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link);
2688
2689 return rbuf;
2690 }
2691
2692 rbuf = nxt_unit_malloc(&ctx_impl->ctx, sizeof(nxt_unit_read_buf_t));
2693
2694 if (nxt_fast_path(rbuf != NULL)) {
2695 rbuf->ctx_impl = ctx_impl;
2696 }
2697
2698 return rbuf;
2699}
2700
2701
2702static void
2703nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx,
2704 nxt_unit_read_buf_t *rbuf)
2705{
2706 nxt_unit_ctx_impl_t *ctx_impl;
2707
2708 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
2709
2710 pthread_mutex_lock(&ctx_impl->mutex);
2711
2712 nxt_queue_insert_head(&ctx_impl->free_rbuf, &rbuf->link);
2713
2714 pthread_mutex_unlock(&ctx_impl->mutex);
2715}
2716
2717
2718nxt_unit_buf_t *
2719nxt_unit_buf_next(nxt_unit_buf_t *buf)
2720{
2721 nxt_unit_mmap_buf_t *mmap_buf;
2722
2723 mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf);
2724
2725 if (mmap_buf->next == NULL) {
2726 return NULL;
2727 }
2728
2729 return &mmap_buf->next->buf;
2730}
2731
2732
2733uint32_t
2734nxt_unit_buf_max(void)
2735{
2736 return PORT_MMAP_DATA_SIZE;
2737}
2738
2739
2740uint32_t
2741nxt_unit_buf_min(void)
2742{
2743 return PORT_MMAP_CHUNK_SIZE;
2744}
2745
2746
2747int
2748nxt_unit_response_write(nxt_unit_request_info_t *req, const void *start,
2749 size_t size)
2750{
2751 ssize_t res;
2752
2753 res = nxt_unit_response_write_nb(req, start, size, size);
2754
2755 return res < 0 ? -res : NXT_UNIT_OK;
2756}
2757
2758
2759ssize_t
2760nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start,
2761 size_t size, size_t min_size)
2762{
2763 int rc;
2764 ssize_t sent;
2765 uint32_t part_size, min_part_size, buf_size;
2766 const char *part_start;
2767 nxt_unit_mmap_buf_t mmap_buf;
2768 nxt_unit_request_info_impl_t *req_impl;
2769 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
2770
2771 nxt_unit_req_debug(req, "write: %d", (int) size);
2772
2773 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2774
2775 part_start = start;
2776 sent = 0;
2777
2778 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2779 nxt_unit_req_alert(req, "write: response not initialized yet");
2780
2781 return -NXT_UNIT_ERROR;
2782 }
2783
2784 /* Check if response is not send yet. */
2785 if (nxt_slow_path(req->response_buf != NULL)) {
2786 part_size = req->response_buf->end - req->response_buf->free;
2787 part_size = nxt_min(size, part_size);
2788
2789 rc = nxt_unit_response_add_content(req, part_start, part_size);
2790 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2791 return -rc;
2792 }
2793
2794 rc = nxt_unit_response_send(req);
2795 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2796 return -rc;
2797 }
2798
2799 size -= part_size;
2800 part_start += part_size;
2801 sent += part_size;
2802
2803 min_size -= nxt_min(min_size, part_size);
2804 }
2805
2806 while (size > 0) {
2807 part_size = nxt_min(size, PORT_MMAP_DATA_SIZE);
2808 min_part_size = nxt_min(min_size, part_size);
2809 min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE);
2810
2811 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, part_size,
2812 min_part_size, &mmap_buf, local_buf);
2813 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2814 return -rc;
2815 }
2816
2817 buf_size = mmap_buf.buf.end - mmap_buf.buf.free;
2818 if (nxt_slow_path(buf_size == 0)) {
2819 return sent;
2820 }
2821 part_size = nxt_min(buf_size, part_size);
2822
2823 mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free,
2824 part_start, part_size);
2825
2826 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
2827 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2828 return -rc;
2829 }
2830
2831 size -= part_size;
2832 part_start += part_size;
2833 sent += part_size;
2834
2835 min_size -= nxt_min(min_size, part_size);
2836 }
2837
2838 return sent;
2839}
2840
2841
2842int
2843nxt_unit_response_write_cb(nxt_unit_request_info_t *req,
2844 nxt_unit_read_info_t *read_info)
2845{
2846 int rc;
2847 ssize_t n;
2848 uint32_t buf_size;
2849 nxt_unit_buf_t *buf;
2850 nxt_unit_mmap_buf_t mmap_buf;
2851 nxt_unit_request_info_impl_t *req_impl;
2852 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
2853
2854 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
2855
2856 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
2857 nxt_unit_req_alert(req, "write: response not initialized yet");
2858
2859 return NXT_UNIT_ERROR;
2860 }
2861
2862 /* Check if response is not send yet. */
2863 if (nxt_slow_path(req->response_buf != NULL)) {
2864
2865 /* Enable content in headers buf. */
2866 rc = nxt_unit_response_add_content(req, "", 0);
2867 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2868 nxt_unit_req_error(req, "Failed to add piggyback content");
2869
2870 return rc;
2871 }
2872
2873 buf = req->response_buf;
2874
2875 while (buf->end - buf->free > 0) {
2876 n = read_info->read(read_info, buf->free, buf->end - buf->free);
2877 if (nxt_slow_path(n < 0)) {
2878 nxt_unit_req_error(req, "Read error");
2879
2880 return NXT_UNIT_ERROR;
2881 }
2882
2883 /* Manually increase sizes. */
2884 buf->free += n;
2885 req->response->piggyback_content_length += n;
2886
2887 if (read_info->eof) {
2888 break;
2889 }
2890 }
2891
2892 rc = nxt_unit_response_send(req);
2893 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2894 nxt_unit_req_error(req, "Failed to send headers with content");
2895
2896 return rc;
2897 }
2898
2899 if (read_info->eof) {
2900 return NXT_UNIT_OK;
2901 }
2902 }
2903
2904 while (!read_info->eof) {
2905 nxt_unit_req_debug(req, "write_cb, alloc %"PRIu32"",
2906 read_info->buf_size);
2907
2908 buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE);
2909
2910 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
2911 buf_size, buf_size,
2912 &mmap_buf, local_buf);
2913 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2914 return rc;
2915 }
2916
2917 buf = &mmap_buf.buf;
2918
2919 while (!read_info->eof && buf->end > buf->free) {
2920 n = read_info->read(read_info, buf->free, buf->end - buf->free);
2921 if (nxt_slow_path(n < 0)) {
2922 nxt_unit_req_error(req, "Read error");
2923
2924 nxt_unit_free_outgoing_buf(&mmap_buf);
2925
2926 return NXT_UNIT_ERROR;
2927 }
2928
2929 buf->free += n;
2930 }
2931
2932 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
2933 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
2934 nxt_unit_req_error(req, "Failed to send content");
2935
2936 return rc;
2937 }
2938 }
2939
2940 return NXT_UNIT_OK;
2941}
2942
2943
2944ssize_t
2945nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size)
2946{
2947 ssize_t buf_res, res;
2948
2949 buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length,
2950 dst, size);
2951
2952 nxt_unit_req_debug(req, "read: %d", (int) buf_res);
2953
2954 if (buf_res < (ssize_t) size && req->content_fd != -1) {
2955 res = read(req->content_fd, dst, size);
2956 if (nxt_slow_path(res < 0)) {
2957 nxt_unit_req_alert(req, "failed to read content: %s (%d)",
2958 strerror(errno), errno);
2959
2960 return res;
2961 }
2962
2963 if (res < (ssize_t) size) {
2964 nxt_unit_close(req->content_fd);
2965
2966 req->content_fd = -1;
2967 }
2968
2969 req->content_length -= res;
2970 size -= res;
2971
2972 dst = nxt_pointer_to(dst, res);
2973
2974 } else {
2975 res = 0;
2976 }
2977
2978 return buf_res + res;
2979}
2980
2981
2982ssize_t
2983nxt_unit_request_readline_size(nxt_unit_request_info_t *req, size_t max_size)
2984{
2985 char *p;
2986 size_t l_size, b_size;
2987 nxt_unit_buf_t *b;
2988 nxt_unit_mmap_buf_t *mmap_buf, *preread_buf;
2989
2990 if (req->content_length == 0) {
2991 return 0;
2992 }
2993
2994 l_size = 0;
2995
2996 b = req->content_buf;
2997
2998 while (b != NULL) {
2999 b_size = b->end - b->free;
3000 p = memchr(b->free, '\n', b_size);
3001
3002 if (p != NULL) {
3003 p++;
3004 l_size += p - b->free;
3005 break;
3006 }
3007
3008 l_size += b_size;
3009
3010 if (max_size <= l_size) {
3011 break;
3012 }
3013
3014 mmap_buf = nxt_container_of(b, nxt_unit_mmap_buf_t, buf);
3015 if (mmap_buf->next == NULL
3016 && req->content_fd != -1
3017 && l_size < req->content_length)
3018 {
3019 preread_buf = nxt_unit_request_preread(req, 16384);
3020 if (nxt_slow_path(preread_buf == NULL)) {
3021 return -1;
3022 }
3023
3024 nxt_unit_mmap_buf_insert(&mmap_buf->next, preread_buf);
3025 }
3026
3027 b = nxt_unit_buf_next(b);
3028 }
3029
3030 return nxt_min(max_size, l_size);
3031}
3032
3033
3034static nxt_unit_mmap_buf_t *
3035nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size)
3036{
3037 ssize_t res;
3038 nxt_unit_mmap_buf_t *mmap_buf;
3039
3040 if (req->content_fd == -1) {
3041 nxt_unit_req_alert(req, "preread: content_fd == -1");
3042 return NULL;
3043 }
3044
3045 mmap_buf = nxt_unit_mmap_buf_get(req->ctx);
3046 if (nxt_slow_path(mmap_buf == NULL)) {
3047 nxt_unit_req_alert(req, "preread: failed to allocate buf");
3048 return NULL;
3049 }
3050
3051 mmap_buf->free_ptr = nxt_unit_malloc(req->ctx, size);
3052 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) {
3053 nxt_unit_req_alert(req, "preread: failed to allocate buf memory");
3054 nxt_unit_mmap_buf_release(mmap_buf);
3055 return NULL;
3056 }
3057
3058 mmap_buf->plain_ptr = mmap_buf->free_ptr;
3059
3060 mmap_buf->hdr = NULL;
3061 mmap_buf->buf.start = mmap_buf->free_ptr;
3062 mmap_buf->buf.free = mmap_buf->buf.start;
3063 mmap_buf->buf.end = mmap_buf->buf.start + size;
3064
3065 res = read(req->content_fd, mmap_buf->free_ptr, size);
3066 if (res < 0) {
3067 nxt_unit_req_alert(req, "failed to read content: %s (%d)",
3068 strerror(errno), errno);
3069
3070 nxt_unit_mmap_buf_free(mmap_buf);
3071
3072 return NULL;
3073 }
3074
3075 if (res < (ssize_t) size) {
3076 nxt_unit_close(req->content_fd);
3077
3078 req->content_fd = -1;
3079 }
3080
3081 nxt_unit_req_debug(req, "preread: read %d", (int) res);
3082
3083 mmap_buf->buf.end = mmap_buf->buf.free + res;
3084
3085 return mmap_buf;
3086}
3087
3088
3089static ssize_t
3090nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size)
3091{
3092 u_char *p;
3093 size_t rest, copy, read;
3094 nxt_unit_buf_t *buf, *last_buf;
3095
3096 p = dst;
3097 rest = size;
3098
3099 buf = *b;
3100 last_buf = buf;
3101
3102 while (buf != NULL) {
3103 last_buf = buf;
3104
3105 copy = buf->end - buf->free;
3106 copy = nxt_min(rest, copy);
3107
3108 p = nxt_cpymem(p, buf->free, copy);
3109
3110 buf->free += copy;
3111 rest -= copy;
3112
3113 if (rest == 0) {
3114 if (buf->end == buf->free) {
3115 buf = nxt_unit_buf_next(buf);
3116 }
3117
3118 break;
3119 }
3120
3121 buf = nxt_unit_buf_next(buf);
3122 }
3123
3124 *b = last_buf;
3125
3126 read = size - rest;
3127
3128 *len -= read;
3129
3130 return read;
3131}
3132
3133
3134void
3135nxt_unit_request_done(nxt_unit_request_info_t *req, int rc)
3136{
3137 uint32_t size;
3138 nxt_port_msg_t msg;
3139 nxt_unit_impl_t *lib;
3140 nxt_unit_request_info_impl_t *req_impl;
3141
3142 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
3143
3144 nxt_unit_req_debug(req, "done: %d", rc);
3145
3146 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3147 goto skip_response_send;
3148 }
3149
3150 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) {
3151
3152 size = nxt_length("Content-Type") + nxt_length("text/plain");
3153
3154 rc = nxt_unit_response_init(req, 200, 1, size);
3155 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3156 goto skip_response_send;
3157 }
3158
3159 rc = nxt_unit_response_add_field(req, "Content-Type",
3160 nxt_length("Content-Type"),
3161 "text/plain", nxt_length("text/plain"));
3162 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3163 goto skip_response_send;
3164 }
3165 }
3166
3167 if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_SENT)) {
3168
3169 req_impl->state = NXT_UNIT_RS_RESPONSE_SENT;
3170
3171 nxt_unit_buf_send_done(req->response_buf);
3172
3173 return;
3174 }
3175
3176skip_response_send:
3177
3178 lib = nxt_container_of(req->unit, nxt_unit_impl_t, unit);
3179
3180 msg.stream = req_impl->stream;
3181 msg.pid = lib->pid;
3182 msg.reply_port = 0;
3183 msg.type = (rc == NXT_UNIT_OK) ? _NXT_PORT_MSG_DATA
3184 : _NXT_PORT_MSG_RPC_ERROR;
3185 msg.last = 1;
3186 msg.mmap = 0;
3187 msg.nf = 0;
3188 msg.mf = 0;
3189 msg.tracking = 0;
3190
3191 (void) nxt_unit_port_send(req->ctx, req->response_port,
3192 &msg, sizeof(msg), NULL, 0);
3193
3194 nxt_unit_request_info_release(req);
3195}
3196
3197
3198int
3199nxt_unit_websocket_send(nxt_unit_request_info_t *req, uint8_t opcode,
3200 uint8_t last, const void *start, size_t size)
3201{
3202 const struct iovec iov = { (void *) start, size };
3203
3204 return nxt_unit_websocket_sendv(req, opcode, last, &iov, 1);
3205}
3206
3207
3208int
3209nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode,
3210 uint8_t last, const struct iovec *iov, int iovcnt)
3211{
3212 int i, rc;
3213 size_t l, copy;
3214 uint32_t payload_len, buf_size, alloc_size;
3215 const uint8_t *b;
3216 nxt_unit_buf_t *buf;
3217 nxt_unit_mmap_buf_t mmap_buf;
3218 nxt_websocket_header_t *wh;
3219 char local_buf[NXT_UNIT_LOCAL_BUF_SIZE];
3220
3221 payload_len = 0;
3222
3223 for (i = 0; i < iovcnt; i++) {
3224 payload_len += iov[i].iov_len;
3225 }
3226
3227 buf_size = 10 + payload_len;
3228 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE);
3229
3230 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
3231 alloc_size, alloc_size,
3232 &mmap_buf, local_buf);
3233 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3234 return rc;
3235 }
3236
3237 buf = &mmap_buf.buf;
3238
3239 buf->start[0] = 0;
3240 buf->start[1] = 0;
3241
3242 buf_size -= buf->end - buf->start;
3243
3244 wh = (void *) buf->free;
3245
3246 buf->free = nxt_websocket_frame_init(wh, payload_len);
3247 wh->fin = last;
3248 wh->opcode = opcode;
3249
3250 for (i = 0; i < iovcnt; i++) {
3251 b = iov[i].iov_base;
3252 l = iov[i].iov_len;
3253
3254 while (l > 0) {
3255 copy = buf->end - buf->free;
3256 copy = nxt_min(l, copy);
3257
3258 buf->free = nxt_cpymem(buf->free, b, copy);
3259 b += copy;
3260 l -= copy;
3261
3262 if (l > 0) {
3263 if (nxt_fast_path(buf->free > buf->start)) {
3264 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
3265
3266 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3267 return rc;
3268 }
3269 }
3270
3271 alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE);
3272
3273 rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port,
3274 alloc_size, alloc_size,
3275 &mmap_buf, local_buf);
3276 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3277 return rc;
3278 }
3279
3280 buf_size -= buf->end - buf->start;
3281 }
3282 }
3283 }
3284
3285 if (buf->free > buf->start) {
3286 rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0);
3287 }
3288
3289 return rc;
3290}
3291
3292
3293ssize_t
3294nxt_unit_websocket_read(nxt_unit_websocket_frame_t *ws, void *dst,
3295 size_t size)
3296{
3297 ssize_t res;
3298 uint8_t *b;
3299 uint64_t i, d;
3300
3301 res = nxt_unit_buf_read(&ws->content_buf, &ws->content_length,
3302 dst, size);
3303
3304 if (ws->mask == NULL) {
3305 return res;
3306 }
3307
3308 b = dst;
3309 d = (ws->payload_len - ws->content_length - res) % 4;
3310
3311 for (i = 0; i < (uint64_t) res; i++) {
3312 b[i] ^= ws->mask[ (i + d) % 4 ];
3313 }
3314
3315 return res;
3316}
3317
3318
3319int
3320nxt_unit_websocket_retain(nxt_unit_websocket_frame_t *ws)
3321{
3322 char *b;
3323 size_t size, hsize;
3324 nxt_unit_websocket_frame_impl_t *ws_impl;
3325
3326 ws_impl = nxt_container_of(ws, nxt_unit_websocket_frame_impl_t, ws);
3327
3328 if (ws_impl->buf->free_ptr != NULL || ws_impl->buf->hdr != NULL) {
3329 return NXT_UNIT_OK;
3330 }
3331
3332 size = ws_impl->buf->buf.end - ws_impl->buf->buf.start;
3333
3334 b = nxt_unit_malloc(ws->req->ctx, size);
3335 if (nxt_slow_path(b == NULL)) {
3336 return NXT_UNIT_ERROR;
3337 }
3338
3339 memcpy(b, ws_impl->buf->buf.start, size);
3340
3341 hsize = nxt_websocket_frame_header_size(b);
3342
3343 ws_impl->buf->buf.start = b;
3344 ws_impl->buf->buf.free = b + hsize;
3345 ws_impl->buf->buf.end = b + size;
3346
3347 ws_impl->buf->free_ptr = b;
3348
3349 ws_impl->ws.header = (nxt_websocket_header_t *) b;
3350
3351 if (ws_impl->ws.header->mask) {
3352 ws_impl->ws.mask = (uint8_t *) b + hsize - 4;
3353
3354 } else {
3355 ws_impl->ws.mask = NULL;
3356 }
3357
3358 return NXT_UNIT_OK;
3359}
3360
3361
3362void
3363nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws)
3364{
3365 nxt_unit_websocket_frame_release(ws);
3366}
3367
3368
3369static nxt_port_mmap_header_t *
3370nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
3371 nxt_chunk_id_t *c, int *n, int min_n)
3372{
3373 int res, nchunks, i;
3374 uint32_t outgoing_size;
3375 nxt_unit_mmap_t *mm, *mm_end;
3376 nxt_unit_impl_t *lib;
3377 nxt_port_mmap_header_t *hdr;
3378
3379 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3380
3381 pthread_mutex_lock(&lib->outgoing.mutex);
3382
3383retry:
3384
3385 outgoing_size = lib->outgoing.size;
3386
3387 mm_end = lib->outgoing.elts + outgoing_size;
3388
3389 for (mm = lib->outgoing.elts; mm < mm_end; mm++) {
3390 hdr = mm->hdr;
3391
3392 if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port->id.id) {
3393 continue;
3394 }
3395
3396 *c = 0;
3397
3398 while (nxt_port_mmap_get_free_chunk(hdr->free_map, c)) {
3399 nchunks = 1;
3400
3401 while (nchunks < *n) {
3402 res = nxt_port_mmap_chk_set_chunk_busy(hdr->free_map,
3403 *c + nchunks);
3404
3405 if (res == 0) {
3406 if (nchunks >= min_n) {
3407 *n = nchunks;
3408
3409 goto unlock;
3410 }
3411
3412 for (i = 0; i < nchunks; i++) {
3413 nxt_port_mmap_set_chunk_free(hdr->free_map, *c + i);
3414 }
3415
3416 *c += nchunks + 1;
3417 nchunks = 0;
3418 break;
3419 }
3420
3421 nchunks++;
3422 }
3423
3424 if (nchunks >= min_n) {
3425 *n = nchunks;
3426
3427 goto unlock;
3428 }
3429 }
3430
3431 hdr->oosm = 1;
3432 }
3433
3434 if (outgoing_size >= lib->shm_mmap_limit) {
3435 /* Cannot allocate more shared memory. */
3436 pthread_mutex_unlock(&lib->outgoing.mutex);
3437
3438 if (min_n == 0) {
3439 *n = 0;
3440 }
3441
3442 if (nxt_slow_path(lib->outgoing.allocated_chunks + min_n
3443 >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT))
3444 {
3445 /* Memory allocated by application, but not send to router. */
3446 return NULL;
3447 }
3448
3449 /* Notify router about OOSM condition. */
3450
3451 res = nxt_unit_send_oosm(ctx, port);
3452 if (nxt_slow_path(res != NXT_UNIT_OK)) {
3453 return NULL;
3454 }
3455
3456 /* Return if caller can handle OOSM condition. Non-blocking mode. */
3457
3458 if (min_n == 0) {
3459 return NULL;
3460 }
3461
3462 nxt_unit_debug(ctx, "oosm: waiting for ACK");
3463
3464 res = nxt_unit_wait_shm_ack(ctx);
3465 if (nxt_slow_path(res != NXT_UNIT_OK)) {
3466 return NULL;
3467 }
3468
3469 nxt_unit_debug(ctx, "oosm: retry");
3470
3471 pthread_mutex_lock(&lib->outgoing.mutex);
3472
3473 goto retry;
3474 }
3475
3476 *c = 0;
3477 hdr = nxt_unit_new_mmap(ctx, port, *n);
3478
3479unlock:
3480
3481 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, *n);
3482
3483 nxt_unit_debug(ctx, "allocated_chunks %d",
3484 (int) lib->outgoing.allocated_chunks);
3485
3486 pthread_mutex_unlock(&lib->outgoing.mutex);
3487
3488 return hdr;
3489}
3490
3491
3492static int
3493nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
3494{
3495 ssize_t res;
3496 nxt_port_msg_t msg;
3497 nxt_unit_impl_t *lib;
3498
3499 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3500
3501 msg.stream = 0;
3502 msg.pid = lib->pid;
3503 msg.reply_port = 0;
3504 msg.type = _NXT_PORT_MSG_OOSM;
3505 msg.last = 0;
3506 msg.mmap = 0;
3507 msg.nf = 0;
3508 msg.mf = 0;
3509 msg.tracking = 0;
3510
3511 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0);
3512 if (nxt_slow_path(res != sizeof(msg))) {
3513 return NXT_UNIT_ERROR;
3514 }
3515
3516 return NXT_UNIT_OK;
3517}
3518
3519
3520static int
3521nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx)
3522{
3523 int res;
3524 nxt_unit_ctx_impl_t *ctx_impl;
3525 nxt_unit_read_buf_t *rbuf;
3526
3527 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
3528
3529 while (1) {
3530 rbuf = nxt_unit_read_buf_get(ctx);
3531 if (nxt_slow_path(rbuf == NULL)) {
3532 return NXT_UNIT_ERROR;
3533 }
3534
3535 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
3536 if (res == NXT_UNIT_ERROR) {
3537 nxt_unit_read_buf_release(ctx, rbuf);
3538
3539 return NXT_UNIT_ERROR;
3540 }
3541
3542 if (nxt_unit_is_shm_ack(rbuf)) {
3543 nxt_unit_read_buf_release(ctx, rbuf);
3544 break;
3545 }
3546
3547 pthread_mutex_lock(&ctx_impl->mutex);
3548
3549 nxt_queue_insert_tail(&ctx_impl->pending_rbuf, &rbuf->link);
3550
3551 pthread_mutex_unlock(&ctx_impl->mutex);
3552
3553 if (nxt_unit_is_quit(rbuf)) {
3554 nxt_unit_debug(ctx, "oosm: quit received");
3555
3556 return NXT_UNIT_ERROR;
3557 }
3558 }
3559
3560 return NXT_UNIT_OK;
3561}
3562
3563
3564static nxt_unit_mmap_t *
3565nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i)
3566{
3567 uint32_t cap, n;
3568 nxt_unit_mmap_t *e;
3569
3570 if (nxt_fast_path(mmaps->size > i)) {
3571 return mmaps->elts + i;
3572 }
3573
3574 cap = mmaps->cap;
3575
3576 if (cap == 0) {
3577 cap = i + 1;
3578 }
3579
3580 while (i + 1 > cap) {
3581
3582 if (cap < 16) {
3583 cap = cap * 2;
3584
3585 } else {
3586 cap = cap + cap / 2;
3587 }
3588 }
3589
3590 if (cap != mmaps->cap) {
3591
3592 e = realloc(mmaps->elts, cap * sizeof(nxt_unit_mmap_t));
3593 if (nxt_slow_path(e == NULL)) {
3594 return NULL;
3595 }
3596
3597 mmaps->elts = e;
3598
3599 for (n = mmaps->cap; n < cap; n++) {
3600 e = mmaps->elts + n;
3601
3602 e->hdr = NULL;
3603 nxt_queue_init(&e->awaiting_rbuf);
3604 }
3605
3606 mmaps->cap = cap;
3607 }
3608
3609 if (i + 1 > mmaps->size) {
3610 mmaps->size = i + 1;
3611 }
3612
3613 return mmaps->elts + i;
3614}
3615
3616
3617static nxt_port_mmap_header_t *
3618nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n)
3619{
3620 int i, fd, rc;
3621 void *mem;
3622 nxt_unit_mmap_t *mm;
3623 nxt_unit_impl_t *lib;
3624 nxt_port_mmap_header_t *hdr;
3625
3626 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3627
3628 mm = nxt_unit_mmap_at(&lib->outgoing, lib->outgoing.size);
3629 if (nxt_slow_path(mm == NULL)) {
3630 nxt_unit_alert(ctx, "failed to add mmap to outgoing array");
3631
3632 return NULL;
3633 }
3634
3635 fd = nxt_unit_shm_open(ctx, PORT_MMAP_SIZE);
3636 if (nxt_slow_path(fd == -1)) {
3637 goto remove_fail;
3638 }
3639
3640 mem = mmap(NULL, PORT_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
3641 if (nxt_slow_path(mem == MAP_FAILED)) {
3642 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd,
3643 strerror(errno), errno);
3644
3645 nxt_unit_close(fd);
3646
3647 goto remove_fail;
3648 }
3649
3650 mm->hdr = mem;
3651 hdr = mem;
3652
3653 memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map));
3654 memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map));
3655
3656 hdr->id = lib->outgoing.size - 1;
3657 hdr->src_pid = lib->pid;
3658 hdr->dst_pid = port->id.pid;
3659 hdr->sent_over = port->id.id;
3660
3661 /* Mark first n chunk(s) as busy */
3662 for (i = 0; i < n; i++) {
3663 nxt_port_mmap_set_chunk_busy(hdr->free_map, i);
3664 }
3665
3666 /* Mark as busy chunk followed the last available chunk. */
3667 nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT);
3668 nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT);
3669
3670 pthread_mutex_unlock(&lib->outgoing.mutex);
3671
3672 rc = nxt_unit_send_mmap(ctx, port, fd);
3673 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
3674 munmap(mem, PORT_MMAP_SIZE);
3675 hdr = NULL;
3676
3677 } else {
3678 nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d",
3679 hdr->id, (int) lib->pid, (int) port->id.pid);
3680 }
3681
3682 nxt_unit_close(fd);
3683
3684 pthread_mutex_lock(&lib->outgoing.mutex);
3685
3686 if (nxt_fast_path(hdr != NULL)) {
3687 return hdr;
3688 }
3689
3690remove_fail:
3691
3692 lib->outgoing.size--;
3693
3694 return NULL;
3695}
3696
3697
3698static int
3699nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size)
3700{
3701 int fd;
3702 nxt_unit_impl_t *lib;
3703
3704 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3705
3706#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN)
3707 char name[64];
3708
3709 snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p",
3710 lib->pid, (void *) pthread_self());
3711#endif
3712
3713#if (NXT_HAVE_MEMFD_CREATE)
3714
3715 fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC);
3716 if (nxt_slow_path(fd == -1)) {
3717 nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name,
3718 strerror(errno), errno);
3719
3720 return -1;
3721 }
3722
3723 nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd);
3724
3725#elif (NXT_HAVE_SHM_OPEN_ANON)
3726
3727 fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR);
3728 if (nxt_slow_path(fd == -1)) {
3729 nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)",
3730 strerror(errno), errno);
3731
3732 return -1;
3733 }
3734
3735#elif (NXT_HAVE_SHM_OPEN)
3736
3737 /* Just in case. */
3738 shm_unlink(name);
3739
3740 fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
3741 if (nxt_slow_path(fd == -1)) {
3742 nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name,
3743 strerror(errno), errno);
3744
3745 return -1;
3746 }
3747
3748 if (nxt_slow_path(shm_unlink(name) == -1)) {
3749 nxt_unit_alert(ctx, "shm_unlink(%s) failed: %s (%d)", name,
3750 strerror(errno), errno);
3751 }
3752
3753#else
3754
3755#error No working shared memory implementation.
3756
3757#endif
3758
3759 if (nxt_slow_path(ftruncate(fd, size) == -1)) {
3760 nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd,
3761 strerror(errno), errno);
3762
3763 nxt_unit_close(fd);
3764
3765 return -1;
3766 }
3767
3768 return fd;
3769}
3770
3771
3772static int
3773nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd)
3774{
3775 ssize_t res;
3776 nxt_port_msg_t msg;
3777 nxt_unit_impl_t *lib;
3778 union {
3779 struct cmsghdr cm;
3780 char space[CMSG_SPACE(sizeof(int))];
3781 } cmsg;
3782
3783 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3784
3785 msg.stream = 0;
3786 msg.pid = lib->pid;
3787 msg.reply_port = 0;
3788 msg.type = _NXT_PORT_MSG_MMAP;
3789 msg.last = 0;
3790 msg.mmap = 0;
3791 msg.nf = 0;
3792 msg.mf = 0;
3793 msg.tracking = 0;
3794
3795 /*
3796 * Fill all padding fields with 0.
3797 * Code in Go 1.11 validate cmsghdr using padding field as part of len.
3798 * See Cmsghdr definition and socketControlMessageHeaderAndData function.
3799 */
3800 memset(&cmsg, 0, sizeof(cmsg));
3801
3802 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int));
3803 cmsg.cm.cmsg_level = SOL_SOCKET;
3804 cmsg.cm.cmsg_type = SCM_RIGHTS;
3805
3806 /*
3807 * memcpy() is used instead of simple
3808 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
3809 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
3810 * dereferencing type-punned pointer will break strict-aliasing rules
3811 *
3812 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
3813 * in the same simple assignment as in the code above.
3814 */
3815 memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int));
3816
3817 res = nxt_unit_port_send(ctx, port, &msg, sizeof(msg),
3818 &cmsg, sizeof(cmsg));
3819 if (nxt_slow_path(res != sizeof(msg))) {
3820 return NXT_UNIT_ERROR;
3821 }
3822
3823 return NXT_UNIT_OK;
3824}
3825
3826
3827static int
3828nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
3829 uint32_t size, uint32_t min_size,
3830 nxt_unit_mmap_buf_t *mmap_buf, char *local_buf)
3831{
3832 int nchunks, min_nchunks;
3833 nxt_chunk_id_t c;
3834 nxt_port_mmap_header_t *hdr;
3835
3836 if (size <= NXT_UNIT_MAX_PLAIN_SIZE) {
3837 if (local_buf != NULL) {
3838 mmap_buf->free_ptr = NULL;
3839 mmap_buf->plain_ptr = local_buf;
3840
3841 } else {
3842 mmap_buf->free_ptr = nxt_unit_malloc(ctx,
3843 size + sizeof(nxt_port_msg_t));
3844 if (nxt_slow_path(mmap_buf->free_ptr == NULL)) {
3845 return NXT_UNIT_ERROR;
3846 }
3847
3848 mmap_buf->plain_ptr = mmap_buf->free_ptr;
3849 }
3850
3851 mmap_buf->hdr = NULL;
3852 mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t);
3853 mmap_buf->buf.free = mmap_buf->buf.start;
3854 mmap_buf->buf.end = mmap_buf->buf.start + size;
3855
3856 nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)",
3857 mmap_buf->buf.start, (int) size);
3858
3859 return NXT_UNIT_OK;
3860 }
3861
3862 nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE;
3863 min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE;
3864
3865 hdr = nxt_unit_mmap_get(ctx, port, &c, &nchunks, min_nchunks);
3866 if (nxt_slow_path(hdr == NULL)) {
3867 if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) {
3868 mmap_buf->hdr = NULL;
3869 mmap_buf->buf.start = NULL;
3870 mmap_buf->buf.free = NULL;
3871 mmap_buf->buf.end = NULL;
3872 mmap_buf->free_ptr = NULL;
3873
3874 return NXT_UNIT_OK;
3875 }
3876
3877 return NXT_UNIT_ERROR;
3878 }
3879
3880 mmap_buf->hdr = hdr;
3881 mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c);
3882 mmap_buf->buf.free = mmap_buf->buf.start;
3883 mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE;
3884 mmap_buf->free_ptr = NULL;
3885 mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
3886
3887 nxt_unit_debug(ctx, "outgoing mmap allocation: (%d,%d,%d)",
3888 (int) hdr->id, (int) c,
3889 (int) (nchunks * PORT_MMAP_CHUNK_SIZE));
3890
3891 return NXT_UNIT_OK;
3892}
3893
3894
3895static int
3896nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd)
3897{
3898 int rc;
3899 void *mem;
3900 nxt_queue_t awaiting_rbuf;
3901 struct stat mmap_stat;
3902 nxt_unit_mmap_t *mm;
3903 nxt_unit_impl_t *lib;
3904 nxt_unit_ctx_impl_t *ctx_impl;
3905 nxt_unit_read_buf_t *rbuf;
3906 nxt_port_mmap_header_t *hdr;
3907
3908 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
3909
3910 nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid);
3911
3912 if (fstat(fd, &mmap_stat) == -1) {
3913 nxt_unit_alert(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd,
3914 strerror(errno), errno);
3915
3916 return NXT_UNIT_ERROR;
3917 }
3918
3919 mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE,
3920 MAP_SHARED, fd, 0);
3921 if (nxt_slow_path(mem == MAP_FAILED)) {
3922 nxt_unit_alert(ctx, "incoming_mmap: mmap() failed: %s (%d)",
3923 strerror(errno), errno);
3924
3925 return NXT_UNIT_ERROR;
3926 }
3927
3928 hdr = mem;
3929
3930 if (nxt_slow_path(hdr->src_pid != pid)) {
3931
3932 nxt_unit_alert(ctx, "incoming_mmap: unexpected pid in mmap header "
3933 "detected: %d != %d or %d != %d", (int) hdr->src_pid,
3934 (int) pid, (int) hdr->dst_pid, (int) lib->pid);
3935
3936 munmap(mem, PORT_MMAP_SIZE);
3937
3938 return NXT_UNIT_ERROR;
3939 }
3940
3941 nxt_queue_init(&awaiting_rbuf);
3942
3943 pthread_mutex_lock(&lib->incoming.mutex);
3944
3945 mm = nxt_unit_mmap_at(&lib->incoming, hdr->id);
3946 if (nxt_slow_path(mm == NULL)) {
3947 nxt_unit_alert(ctx, "incoming_mmap: failed to add to incoming array");
3948
3949 munmap(mem, PORT_MMAP_SIZE);
3950
3951 rc = NXT_UNIT_ERROR;
3952
3953 } else {
3954 mm->hdr = hdr;
3955
3956 hdr->sent_over = 0xFFFFu;
3957
3958 nxt_queue_add(&awaiting_rbuf, &mm->awaiting_rbuf);
3959 nxt_queue_init(&mm->awaiting_rbuf);
3960
3961 rc = NXT_UNIT_OK;
3962 }
3963
3964 pthread_mutex_unlock(&lib->incoming.mutex);
3965
3966 nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) {
3967
3968 ctx_impl = rbuf->ctx_impl;
3969
3970 pthread_mutex_lock(&ctx_impl->mutex);
3971
3972 nxt_queue_insert_head(&ctx_impl->pending_rbuf, &rbuf->link);
3973
3974 pthread_mutex_unlock(&ctx_impl->mutex);
3975
3976 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1);
3977
3978 } nxt_queue_loop;
3979
3980 return rc;
3981}
3982
3983
3984static void
3985nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps)
3986{
3987 pthread_mutex_init(&mmaps->mutex, NULL);
3988
3989 mmaps->size = 0;
3990 mmaps->cap = 0;
3991 mmaps->elts = NULL;
3992 mmaps->allocated_chunks = 0;
3993}
3994
3995
3996nxt_inline void
3997nxt_unit_process_use(nxt_unit_process_t *process)
3998{
3999 nxt_atomic_fetch_add(&process->use_count, 1);
4000}
4001
4002
4003nxt_inline void
4004nxt_unit_process_release(nxt_unit_process_t *process)
4005{
4006 long c;
4007
4008 c = nxt_atomic_fetch_add(&process->use_count, -1);
4009
4010 if (c == 1) {
4011 nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid);
4012
4013 nxt_unit_free(NULL, process);
4014 }
4015}
4016
4017
4018static void
4019nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps)
4020{
4021 nxt_unit_mmap_t *mm, *end;
4022
4023 if (mmaps->elts != NULL) {
4024 end = mmaps->elts + mmaps->size;
4025
4026 for (mm = mmaps->elts; mm < end; mm++) {
4027 munmap(mm->hdr, PORT_MMAP_SIZE);
4028 }
4029
4030 nxt_unit_free(NULL, mmaps->elts);
4031 }
4032
4033 pthread_mutex_destroy(&mmaps->mutex);
4034}
4035
4036
4037static int
4038nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps,
4039 pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr,
4040 nxt_unit_read_buf_t *rbuf)
4041{
4042 int res, need_rbuf;
4043 nxt_unit_mmap_t *mm;
4044 nxt_unit_ctx_impl_t *ctx_impl;
4045
4046 mm = nxt_unit_mmap_at(mmaps, id);
4047 if (nxt_slow_path(mm == NULL)) {
4048 nxt_unit_alert(ctx, "failed to allocate mmap");
4049
4050 pthread_mutex_unlock(&mmaps->mutex);
4051
4052 *hdr = NULL;
4053
4054 return NXT_UNIT_ERROR;
4055 }
4056
4057 *hdr = mm->hdr;
4058
4059 if (nxt_fast_path(*hdr != NULL)) {
4060 return NXT_UNIT_OK;
4061 }
4062
4063 need_rbuf = nxt_queue_is_empty(&mm->awaiting_rbuf);
4064
4065 nxt_queue_insert_tail(&mm->awaiting_rbuf, &rbuf->link);
4066
4067 pthread_mutex_unlock(&mmaps->mutex);
4068
4069 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4070
4071 nxt_atomic_fetch_add(&ctx_impl->wait_items, 1);
4072
4073 if (need_rbuf) {
4074 res = nxt_unit_get_mmap(ctx, pid, id);
4075 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
4076 return NXT_UNIT_ERROR;
4077 }
4078 }
4079
4080 return NXT_UNIT_AGAIN;
4081}
4082
4083
4084static int
4085nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg,
4086 nxt_unit_read_buf_t *rbuf)
4087{
4088 int res;
4089 void *start;
4090 uint32_t size;
4091 nxt_unit_impl_t *lib;
4092 nxt_unit_mmaps_t *mmaps;
4093 nxt_unit_mmap_buf_t *b, **incoming_tail;
4094 nxt_port_mmap_msg_t *mmap_msg, *end;
4095 nxt_port_mmap_header_t *hdr;
4096
4097 if (nxt_slow_path(recv_msg->size < sizeof(nxt_port_mmap_msg_t))) {
4098 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: too small message (%d)",
4099 recv_msg->stream, (int) recv_msg->size);
4100
4101 return NXT_UNIT_ERROR;
4102 }
4103
4104 mmap_msg = recv_msg->start;
4105 end = nxt_pointer_to(recv_msg->start, recv_msg->size);
4106
4107 incoming_tail = &recv_msg->incoming_buf;
4108
4109 /* Allocating buffer structures. */
4110 for (; mmap_msg < end; mmap_msg++) {
4111 b = nxt_unit_mmap_buf_get(ctx);
4112 if (nxt_slow_path(b == NULL)) {
4113 nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf",
4114 recv_msg->stream);
4115
4116 while (recv_msg->incoming_buf != NULL) {
4117 nxt_unit_mmap_buf_release(recv_msg->incoming_buf);
4118 }
4119
4120 return NXT_UNIT_ERROR;
4121 }
4122
4123 nxt_unit_mmap_buf_insert(incoming_tail, b);
4124 incoming_tail = &b->next;
4125 }
4126
4127 b = recv_msg->incoming_buf;
4128 mmap_msg = recv_msg->start;
4129
4130 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4131
4132 mmaps = &lib->incoming;
4133
4134 pthread_mutex_lock(&mmaps->mutex);
4135
4136 for (; mmap_msg < end; mmap_msg++) {
4137 res = nxt_unit_check_rbuf_mmap(ctx, mmaps,
4138 recv_msg->pid, mmap_msg->mmap_id,
4139 &hdr, rbuf);
4140
4141 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4142 while (recv_msg->incoming_buf != NULL) {
4143 nxt_unit_mmap_buf_release(recv_msg->incoming_buf);
4144 }
4145
4146 return res;
4147 }
4148
4149 start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id);
4150 size = mmap_msg->size;
4151
4152 if (recv_msg->start == mmap_msg) {
4153 recv_msg->start = start;
4154 recv_msg->size = size;
4155 }
4156
4157 b->buf.start = start;
4158 b->buf.free = start;
4159 b->buf.end = b->buf.start + size;
4160 b->hdr = hdr;
4161
4162 b = b->next;
4163
4164 nxt_unit_debug(ctx, "#%"PRIu32": mmap_read: [%p,%d] %d->%d,(%d,%d,%d)",
4165 recv_msg->stream,
4166 start, (int) size,
4167 (int) hdr->src_pid, (int) hdr->dst_pid,
4168 (int) hdr->id, (int) mmap_msg->chunk_id,
4169 (int) mmap_msg->size);
4170 }
4171
4172 pthread_mutex_unlock(&mmaps->mutex);
4173
4174 return NXT_UNIT_OK;
4175}
4176
4177
4178static int
4179nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id)
4180{
4181 ssize_t res;
4182 nxt_unit_impl_t *lib;
4183 nxt_unit_ctx_impl_t *ctx_impl;
4184
4185 struct {
4186 nxt_port_msg_t msg;
4187 nxt_port_msg_get_mmap_t get_mmap;
4188 } m;
4189
4190 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4191 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4192
4193 memset(&m.msg, 0, sizeof(nxt_port_msg_t));
4194
4195 m.msg.pid = lib->pid;
4196 m.msg.reply_port = ctx_impl->read_port->id.id;
4197 m.msg.type = _NXT_PORT_MSG_GET_MMAP;
4198
4199 m.get_mmap.id = id;
4200
4201 nxt_unit_debug(ctx, "get_mmap: %d %d", (int) pid, (int) id);
4202
4203 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0);
4204 if (nxt_slow_path(res != sizeof(m))) {
4205 return NXT_UNIT_ERROR;
4206 }
4207
4208 return NXT_UNIT_OK;
4209}
4210
4211
4212static void
4213nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr,
4214 void *start, uint32_t size)
4215{
4216 int freed_chunks;
4217 u_char *p, *end;
4218 nxt_chunk_id_t c;
4219 nxt_unit_impl_t *lib;
4220
4221 memset(start, 0xA5, size);
4222
4223 p = start;
4224 end = p + size;
4225 c = nxt_port_mmap_chunk_id(hdr, p);
4226 freed_chunks = 0;
4227
4228 while (p < end) {
4229 nxt_port_mmap_set_chunk_free(hdr->free_map, c);
4230
4231 p += PORT_MMAP_CHUNK_SIZE;
4232 c++;
4233 freed_chunks++;
4234 }
4235
4236 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4237
4238 if (hdr->src_pid == lib->pid && freed_chunks != 0) {
4239 nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, -freed_chunks);
4240
4241 nxt_unit_debug(ctx, "allocated_chunks %d",
4242 (int) lib->outgoing.allocated_chunks);
4243 }
4244
4245 if (hdr->dst_pid == lib->pid
4246 && freed_chunks != 0
4247 && nxt_atomic_cmp_set(&hdr->oosm, 1, 0))
4248 {
4249 nxt_unit_send_shm_ack(ctx, hdr->src_pid);
4250 }
4251}
4252
4253
4254static int
4255nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid)
4256{
4257 ssize_t res;
4258 nxt_port_msg_t msg;
4259 nxt_unit_impl_t *lib;
4260
4261 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4262
4263 msg.stream = 0;
4264 msg.pid = lib->pid;
4265 msg.reply_port = 0;
4266 msg.type = _NXT_PORT_MSG_SHM_ACK;
4267 msg.last = 0;
4268 msg.mmap = 0;
4269 msg.nf = 0;
4270 msg.mf = 0;
4271 msg.tracking = 0;
4272
4273 res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0);
4274 if (nxt_slow_path(res != sizeof(msg))) {
4275 return NXT_UNIT_ERROR;
4276 }
4277
4278 return NXT_UNIT_OK;
4279}
4280
4281
4282static nxt_int_t
4283nxt_unit_lvlhsh_pid_test(nxt_lvlhsh_query_t *lhq, void *data)
4284{
4285 nxt_process_t *process;
4286
4287 process = data;
4288
4289 if (lhq->key.length == sizeof(pid_t)
4290 && *(pid_t *) lhq->key.start == process->pid)
4291 {
4292 return NXT_OK;
4293 }
4294
4295 return NXT_DECLINED;
4296}
4297
4298
4299static const nxt_lvlhsh_proto_t lvlhsh_processes_proto nxt_aligned(64) = {
4300 NXT_LVLHSH_DEFAULT,
4301 nxt_unit_lvlhsh_pid_test,
4302 nxt_unit_lvlhsh_alloc,
4303 nxt_unit_lvlhsh_free,
4304};
4305
4306
4307static inline void
4308nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid)
4309{
4310 lhq->key_hash = nxt_murmur_hash2(pid, sizeof(*pid));
4311 lhq->key.length = sizeof(*pid);
4312 lhq->key.start = (u_char *) pid;
4313 lhq->proto = &lvlhsh_processes_proto;
4314}
4315
4316
4317static nxt_unit_process_t *
4318nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid)
4319{
4320 nxt_unit_impl_t *lib;
4321 nxt_unit_process_t *process;
4322 nxt_lvlhsh_query_t lhq;
4323
4324 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4325
4326 nxt_unit_process_lhq_pid(&lhq, &pid);
4327
4328 if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) {
4329 process = lhq.value;
4330 nxt_unit_process_use(process);
4331
4332 return process;
4333 }
4334
4335 process = nxt_unit_malloc(ctx, sizeof(nxt_unit_process_t));
4336 if (nxt_slow_path(process == NULL)) {
4337 nxt_unit_alert(ctx, "failed to allocate process for #%d", (int) pid);
4338
4339 return NULL;
4340 }
4341
4342 process->pid = pid;
4343 process->use_count = 2;
4344 process->next_port_id = 0;
4345 process->lib = lib;
4346
4347 nxt_queue_init(&process->ports);
4348
4349 lhq.replace = 0;
4350 lhq.value = process;
4351
4352 switch (nxt_lvlhsh_insert(&lib->processes, &lhq)) {
4353
4354 case NXT_OK:
4355 break;
4356
4357 default:
4358 nxt_unit_alert(ctx, "process %d insert failed", (int) pid);
4359
4360 nxt_unit_free(ctx, process);
4361 process = NULL;
4362 break;
4363 }
4364
4365 return process;
4366}
4367
4368
4369static nxt_unit_process_t *
4370nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove)
4371{
4372 int rc;
4373 nxt_lvlhsh_query_t lhq;
4374
4375 nxt_unit_process_lhq_pid(&lhq, &pid);
4376
4377 if (remove) {
4378 rc = nxt_lvlhsh_delete(&lib->processes, &lhq);
4379
4380 } else {
4381 rc = nxt_lvlhsh_find(&lib->processes, &lhq);
4382 }
4383
4384 if (rc == NXT_OK) {
4385 if (!remove) {
4386 nxt_unit_process_use(lhq.value);
4387 }
4388
4389 return lhq.value;
4390 }
4391
4392 return NULL;
4393}
4394
4395
4396static nxt_unit_process_t *
4397nxt_unit_process_pop_first(nxt_unit_impl_t *lib)
4398{
4399 return nxt_lvlhsh_retrieve(&lib->processes, &lvlhsh_processes_proto, NULL);
4400}
4401
4402
4403int
4404nxt_unit_run(nxt_unit_ctx_t *ctx)
4405{
4406 int rc;
4407 nxt_unit_impl_t *lib;
4408
4409 nxt_unit_ctx_use(ctx);
4410
4411 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4412 rc = NXT_UNIT_OK;
4413
4414 while (nxt_fast_path(lib->online)) {
4415 rc = nxt_unit_run_once_impl(ctx);
4416
4417 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4418 break;
4419 }
4420 }
4421
4422 nxt_unit_ctx_release(ctx);
4423
4424 return rc;
4425}
4426
4427
4428int
4429nxt_unit_run_once(nxt_unit_ctx_t *ctx)
4430{
4431 int rc;
4432
4433 nxt_unit_ctx_use(ctx);
4434
4435 rc = nxt_unit_run_once_impl(ctx);
4436
4437 nxt_unit_ctx_release(ctx);
4438
4439 return rc;
4440}
4441
4442
4443static int
4444nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx)
4445{
4446 int rc;
4447 nxt_unit_read_buf_t *rbuf;
4448
4449 rbuf = nxt_unit_read_buf_get(ctx);
4450 if (nxt_slow_path(rbuf == NULL)) {
4451 return NXT_UNIT_ERROR;
4452 }
4453
4454 rc = nxt_unit_read_buf(ctx, rbuf);
4455 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4456 nxt_unit_read_buf_release(ctx, rbuf);
4457
4458 return rc;
4459 }
4460
4461 rc = nxt_unit_process_msg(ctx, rbuf);
4462 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4463 return NXT_UNIT_ERROR;
4464 }
4465
4466 rc = nxt_unit_process_pending_rbuf(ctx);
4467 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4468 return NXT_UNIT_ERROR;
4469 }
4470
4471 nxt_unit_process_ready_req(ctx);
4472
4473 return rc;
4474}
4475
4476
4477static int
4478nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf)
4479{
4480 int nevents, res, err;
4481 nxt_unit_impl_t *lib;
4482 nxt_unit_ctx_impl_t *ctx_impl;
4483 nxt_unit_port_impl_t *port_impl;
4484 struct pollfd fds[2];
4485
4486 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4487 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4488
4489 if (ctx_impl->wait_items > 0 || lib->shared_port == NULL) {
4490
4491 return nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4492 }
4493
4494 port_impl = nxt_container_of(ctx_impl->read_port, nxt_unit_port_impl_t,
4495 port);
4496
4497retry:
4498
4499 if (port_impl->from_socket == 0) {
4500 res = nxt_unit_port_queue_recv(ctx_impl->read_port, rbuf);
4501 if (res == NXT_UNIT_OK) {
4502 if (nxt_unit_is_read_socket(rbuf)) {
4503 port_impl->from_socket++;
4504
4505 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d",
4506 (int) ctx_impl->read_port->id.pid,
4507 (int) ctx_impl->read_port->id.id,
4508 port_impl->from_socket);
4509
4510 } else {
4511 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d",
4512 (int) ctx_impl->read_port->id.pid,
4513 (int) ctx_impl->read_port->id.id,
4514 (int) rbuf->size);
4515
4516 return NXT_UNIT_OK;
4517 }
4518 }
4519 }
4520
4521 res = nxt_unit_app_queue_recv(lib->shared_port, rbuf);
4522 if (res == NXT_UNIT_OK) {
4523 return NXT_UNIT_OK;
4524 }
4525
4526 fds[0].fd = ctx_impl->read_port->in_fd;
4527 fds[0].events = POLLIN;
4528 fds[0].revents = 0;
4529
4530 fds[1].fd = lib->shared_port->in_fd;
4531 fds[1].events = POLLIN;
4532 fds[1].revents = 0;
4533
4534 nevents = poll(fds, 2, -1);
4535 if (nxt_slow_path(nevents == -1)) {
4536 err = errno;
4537
4538 if (err == EINTR) {
4539 goto retry;
4540 }
4541
4542 nxt_unit_alert(ctx, "poll(%d,%d) failed: %s (%d)",
4543 fds[0].fd, fds[1].fd, strerror(err), err);
4544
4545 rbuf->size = -1;
4546
4547 return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR;
4548 }
4549
4550 nxt_unit_debug(ctx, "poll(%d,%d): %d, revents [%04uXi, %04uXi]",
4551 fds[0].fd, fds[1].fd, nevents, fds[0].revents,
4552 fds[1].revents);
4553
4554 if ((fds[0].revents & POLLIN) != 0) {
4555 res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4556 if (res == NXT_UNIT_AGAIN) {
4557 goto retry;
4558 }
4559
4560 return res;
4561 }
4562
4563 if ((fds[1].revents & POLLIN) != 0) {
4564 res = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf);
4565 if (res == NXT_UNIT_AGAIN) {
4566 goto retry;
4567 }
4568
4569 return res;
4570 }
4571
4572 nxt_unit_alert(ctx, "poll(%d,%d): %d unexpected revents [%04uXi, %04uXi]",
4573 fds[0].fd, fds[1].fd, nevents, fds[0].revents,
4574 fds[1].revents);
4575
4576 return NXT_UNIT_ERROR;
4577}
4578
4579
4580static int
4581nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx)
4582{
4583 int rc;
4584 nxt_queue_t pending_rbuf;
4585 nxt_unit_ctx_impl_t *ctx_impl;
4586 nxt_unit_read_buf_t *rbuf;
4587
4588 nxt_queue_init(&pending_rbuf);
4589
4590 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4591
4592 pthread_mutex_lock(&ctx_impl->mutex);
4593
4594 if (nxt_queue_is_empty(&ctx_impl->pending_rbuf)) {
4595 pthread_mutex_unlock(&ctx_impl->mutex);
4596
4597 return NXT_UNIT_OK;
4598 }
4599
4600 nxt_queue_add(&pending_rbuf, &ctx_impl->pending_rbuf);
4601 nxt_queue_init(&ctx_impl->pending_rbuf);
4602
4603 pthread_mutex_unlock(&ctx_impl->mutex);
4604
4605 rc = NXT_UNIT_OK;
4606
4607 nxt_queue_each(rbuf, &pending_rbuf, nxt_unit_read_buf_t, link) {
4608
4609 if (nxt_fast_path(rc != NXT_UNIT_ERROR)) {
4610 rc = nxt_unit_process_msg(&ctx_impl->ctx, rbuf);
4611
4612 } else {
4613 nxt_unit_read_buf_release(ctx, rbuf);
4614 }
4615
4616 } nxt_queue_loop;
4617
4618 return rc;
4619}
4620
4621
4622static void
4623nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx)
4624{
4625 int res;
4626 nxt_queue_t ready_req;
4627 nxt_unit_impl_t *lib;
4628 nxt_unit_ctx_impl_t *ctx_impl;
4629 nxt_unit_request_info_t *req;
4630 nxt_unit_request_info_impl_t *req_impl;
4631
4632 nxt_queue_init(&ready_req);
4633
4634 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4635
4636 pthread_mutex_lock(&ctx_impl->mutex);
4637
4638 if (nxt_queue_is_empty(&ctx_impl->ready_req)) {
4639 pthread_mutex_unlock(&ctx_impl->mutex);
4640
4641 return;
4642 }
4643
4644 nxt_queue_add(&ready_req, &ctx_impl->ready_req);
4645 nxt_queue_init(&ctx_impl->ready_req);
4646
4647 pthread_mutex_unlock(&ctx_impl->mutex);
4648
4649 nxt_queue_each(req_impl, &ready_req,
4650 nxt_unit_request_info_impl_t, port_wait_link)
4651 {
4652 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit);
4653
4654 req = &req_impl->req;
4655
4656 res = nxt_unit_send_req_headers_ack(req);
4657 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4658 nxt_unit_request_done(req, NXT_UNIT_ERROR);
4659
4660 continue;
4661 }
4662
4663 if (req->content_length
4664 > (uint64_t) (req->content_buf->end - req->content_buf->free))
4665 {
4666 res = nxt_unit_request_hash_add(ctx, req);
4667 if (nxt_slow_path(res != NXT_UNIT_OK)) {
4668 nxt_unit_req_warn(req, "failed to add request to hash");
4669
4670 nxt_unit_request_done(req, NXT_UNIT_ERROR);
4671
4672 continue;
4673 }
4674
4675 /*
4676 * If application have separate data handler, we may start
4677 * request processing and process data when it is arrived.
4678 */
4679 if (lib->callbacks.data_handler == NULL) {
4680 continue;
4681 }
4682 }
4683
4684 lib->callbacks.request_handler(&req_impl->req);
4685
4686 } nxt_queue_loop;
4687}
4688
4689
4690int
4691nxt_unit_run_ctx(nxt_unit_ctx_t *ctx)
4692{
4693 int rc;
4694 nxt_unit_impl_t *lib;
4695 nxt_unit_read_buf_t *rbuf;
4696 nxt_unit_ctx_impl_t *ctx_impl;
4697
4698 nxt_unit_ctx_use(ctx);
4699
4700 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4701 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
4702
4703 rc = NXT_UNIT_OK;
4704
4705 while (nxt_fast_path(lib->online)) {
4706 rbuf = nxt_unit_read_buf_get(ctx);
4707 if (nxt_slow_path(rbuf == NULL)) {
4708 rc = NXT_UNIT_ERROR;
4709 break;
4710 }
4711
4712 retry:
4713
4714 rc = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf);
4715 if (rc == NXT_UNIT_AGAIN) {
4716 goto retry;
4717 }
4718
4719 rc = nxt_unit_process_msg(ctx, rbuf);
4720 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4721 break;
4722 }
4723
4724 rc = nxt_unit_process_pending_rbuf(ctx);
4725 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4726 break;
4727 }
4728
4729 nxt_unit_process_ready_req(ctx);
4730 }
4731
4732 nxt_unit_ctx_release(ctx);
4733
4734 return rc;
4735}
4736
4737
4738nxt_inline int
4739nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf)
4740{
4741 nxt_port_msg_t *port_msg;
4742
4743 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4744 port_msg = (nxt_port_msg_t *) rbuf->buf;
4745
4746 return port_msg->type == _NXT_PORT_MSG_READ_QUEUE;
4747 }
4748
4749 return 0;
4750}
4751
4752
4753nxt_inline int
4754nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf)
4755{
4756 if (nxt_fast_path(rbuf->size == 1)) {
4757 return rbuf->buf[0] == _NXT_PORT_MSG_READ_SOCKET;
4758 }
4759
4760 return 0;
4761}
4762
4763
4764nxt_inline int
4765nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf)
4766{
4767 nxt_port_msg_t *port_msg;
4768
4769 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4770 port_msg = (nxt_port_msg_t *) rbuf->buf;
4771
4772 return port_msg->type == _NXT_PORT_MSG_SHM_ACK;
4773 }
4774
4775 return 0;
4776}
4777
4778
4779nxt_inline int
4780nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf)
4781{
4782 nxt_port_msg_t *port_msg;
4783
4784 if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) {
4785 port_msg = (nxt_port_msg_t *) rbuf->buf;
4786
4787 return port_msg->type == _NXT_PORT_MSG_QUIT;
4788 }
4789
4790 return 0;
4791}
4792
4793
4794int
4795nxt_unit_run_shared(nxt_unit_ctx_t *ctx)
4796{
4797 int rc;
4798 nxt_unit_impl_t *lib;
4799 nxt_unit_read_buf_t *rbuf;
4800
4801 nxt_unit_ctx_use(ctx);
4802
4803 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4804 rc = NXT_UNIT_OK;
4805
4806 while (nxt_fast_path(lib->online)) {
4807 rbuf = nxt_unit_read_buf_get(ctx);
4808 if (nxt_slow_path(rbuf == NULL)) {
4809 rc = NXT_UNIT_ERROR;
4810 break;
4811 }
4812
4813 retry:
4814
4815 rc = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf);
4816 if (rc == NXT_UNIT_AGAIN) {
4817 goto retry;
4818 }
4819
4820 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4821 nxt_unit_read_buf_release(ctx, rbuf);
4822 break;
4823 }
4824
4825 rc = nxt_unit_process_msg(ctx, rbuf);
4826 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4827 break;
4828 }
4829
4830 rc = nxt_unit_process_pending_rbuf(ctx);
4831 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4832 break;
4833 }
4834
4835 nxt_unit_process_ready_req(ctx);
4836 }
4837
4838 nxt_unit_ctx_release(ctx);
4839
4840 return rc;
4841}
4842
4843
4844int
4845nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
4846{
4847 int rc;
4848
4849 nxt_unit_ctx_use(ctx);
4850
4851 rc = nxt_unit_process_port_msg_impl(ctx, port);
4852
4853 nxt_unit_ctx_release(ctx);
4854
4855 return rc;
4856}
4857
4858
4859static int
4860nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port)
4861{
4862 int rc;
4863 nxt_unit_impl_t *lib;
4864 nxt_unit_read_buf_t *rbuf;
4865
4866 rbuf = nxt_unit_read_buf_get(ctx);
4867 if (nxt_slow_path(rbuf == NULL)) {
4868 return NXT_UNIT_ERROR;
4869 }
4870
4871 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4872
4873retry:
4874
4875 if (port == lib->shared_port) {
4876 rc = nxt_unit_shared_port_recv(ctx, port, rbuf);
4877
4878 } else {
4879 rc = nxt_unit_ctx_port_recv(ctx, port, rbuf);
4880 }
4881
4882 if (rc != NXT_UNIT_OK) {
4883 nxt_unit_read_buf_release(ctx, rbuf);
4884 return rc;
4885 }
4886
4887 rc = nxt_unit_process_msg(ctx, rbuf);
4888 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4889 return NXT_UNIT_ERROR;
4890 }
4891
4892 rc = nxt_unit_process_pending_rbuf(ctx);
4893 if (nxt_slow_path(rc == NXT_UNIT_ERROR)) {
4894 return NXT_UNIT_ERROR;
4895 }
4896
4897 nxt_unit_process_ready_req(ctx);
4898
4899 rbuf = nxt_unit_read_buf_get(ctx);
4900 if (nxt_slow_path(rbuf == NULL)) {
4901 return NXT_UNIT_ERROR;
4902 }
4903
4904 if (lib->online) {
4905 goto retry;
4906 }
4907
4908 return rc;
4909}
4910
4911
4912void
4913nxt_unit_done(nxt_unit_ctx_t *ctx)
4914{
4915 nxt_unit_ctx_release(ctx);
4916}
4917
4918
4919nxt_unit_ctx_t *
4920nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data)
4921{
4922 int rc, queue_fd;
4923 void *mem;
4924 nxt_unit_impl_t *lib;
4925 nxt_unit_port_t *port;
4926 nxt_unit_ctx_impl_t *new_ctx;
4927 nxt_unit_port_impl_t *port_impl;
4928
4929 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
4930
4931 new_ctx = nxt_unit_malloc(ctx, sizeof(nxt_unit_ctx_impl_t)
4932 + lib->request_data_size);
4933 if (nxt_slow_path(new_ctx == NULL)) {
4934 nxt_unit_alert(ctx, "failed to allocate context");
4935
4936 return NULL;
4937 }
4938
4939 rc = nxt_unit_ctx_init(lib, new_ctx, data);
4940 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4941 nxt_unit_free(ctx, new_ctx);
4942
4943 return NULL;
4944 }
4945
4946 queue_fd = -1;
4947
4948 port = nxt_unit_create_port(ctx);
4949 if (nxt_slow_path(port == NULL)) {
4950 goto fail;
4951 }
4952
4953 new_ctx->read_port = port;
4954
4955 queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t));
4956 if (nxt_slow_path(queue_fd == -1)) {
4957 goto fail;
4958 }
4959
4960 mem = mmap(NULL, sizeof(nxt_port_queue_t),
4961 PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0);
4962 if (nxt_slow_path(mem == MAP_FAILED)) {
4963 nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd,
4964 strerror(errno), errno);
4965
4966 goto fail;
4967 }
4968
4969 nxt_port_queue_init(mem);
4970
4971 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
4972 port_impl->queue = mem;
4973
4974 rc = nxt_unit_send_port(ctx, lib->router_port, port, queue_fd);
4975 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
4976 goto fail;
4977 }
4978
4979 nxt_unit_close(queue_fd);
4980
4981 return &new_ctx->ctx;
4982
4983fail:
4984
4985 if (queue_fd != -1) {
4986 nxt_unit_close(queue_fd);
4987 }
4988
4989 nxt_unit_ctx_release(&new_ctx->ctx);
4990
4991 return NULL;
4992}
4993
4994
4995static void
4996nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl)
4997{
4998 nxt_unit_impl_t *lib;
4999 nxt_unit_mmap_buf_t *mmap_buf;
5000 nxt_unit_request_info_impl_t *req_impl;
5001 nxt_unit_websocket_frame_impl_t *ws_impl;
5002
5003 lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit);
5004
5005 nxt_queue_each(req_impl, &ctx_impl->active_req,
5006 nxt_unit_request_info_impl_t, link)
5007 {
5008 nxt_unit_req_warn(&req_impl->req, "active request on ctx free");
5009
5010 nxt_unit_request_done(&req_impl->req, NXT_UNIT_ERROR);
5011
5012 } nxt_queue_loop;
5013
5014 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[0]);
5015 nxt_unit_mmap_buf_unlink(&ctx_impl->ctx_buf[1]);
5016
5017 while (ctx_impl->free_buf != NULL) {
5018 mmap_buf = ctx_impl->free_buf;
5019 nxt_unit_mmap_buf_unlink(mmap_buf);
5020 nxt_unit_free(&ctx_impl->ctx, mmap_buf);
5021 }
5022
5023 nxt_queue_each(req_impl, &ctx_impl->free_req,
5024 nxt_unit_request_info_impl_t, link)
5025 {
5026 nxt_unit_request_info_free(req_impl);
5027
5028 } nxt_queue_loop;
5029
5030 nxt_queue_each(ws_impl, &ctx_impl->free_ws,
5031 nxt_unit_websocket_frame_impl_t, link)
5032 {
5033 nxt_unit_websocket_frame_free(&ctx_impl->ctx, ws_impl);
5034
5035 } nxt_queue_loop;
5036
5037 pthread_mutex_destroy(&ctx_impl->mutex);
5038
5039 nxt_queue_remove(&ctx_impl->link);
5040
5041 if (nxt_fast_path(ctx_impl->read_port != NULL)) {
5042 nxt_unit_remove_port(lib, &ctx_impl->read_port->id);
5043 nxt_unit_port_release(ctx_impl->read_port);
5044 }
5045
5046 if (ctx_impl != &lib->main_ctx) {
5047 nxt_unit_free(&lib->main_ctx.ctx, ctx_impl);
5048 }
5049
5050 nxt_unit_lib_release(lib);
5051}
5052
5053
5054/* SOCK_SEQPACKET is disabled to test SOCK_DGRAM on all platforms. */
5055#if (0 || NXT_HAVE_AF_UNIX_SOCK_SEQPACKET)
5056#define NXT_UNIX_SOCKET SOCK_SEQPACKET
5057#else
5058#define NXT_UNIX_SOCKET SOCK_DGRAM
5059#endif
5060
5061
5062void
5063nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id)
5064{
5065 nxt_unit_port_hash_id_t port_hash_id;
5066
5067 port_hash_id.pid = pid;
5068 port_hash_id.id = id;
5069
5070 port_id->pid = pid;
5071 port_id->hash = nxt_murmur_hash2(&port_hash_id, sizeof(port_hash_id));
5072 port_id->id = id;
5073}
5074
5075
5076static nxt_unit_port_t *
5077nxt_unit_create_port(nxt_unit_ctx_t *ctx)
5078{
5079 int rc, port_sockets[2];
5080 nxt_unit_impl_t *lib;
5081 nxt_unit_port_t new_port, *port;
5082 nxt_unit_process_t *process;
5083
5084 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5085
5086 rc = socketpair(AF_UNIX, NXT_UNIX_SOCKET, 0, port_sockets);
5087 if (nxt_slow_path(rc != 0)) {
5088 nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)",
5089 strerror(errno), errno);
5090
5091 return NULL;
5092 }
5093
5094 nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d",
5095 port_sockets[0], port_sockets[1]);
5096
5097 pthread_mutex_lock(&lib->mutex);
5098
5099 process = nxt_unit_process_get(ctx, lib->pid);
5100 if (nxt_slow_path(process == NULL)) {
5101 pthread_mutex_unlock(&lib->mutex);
5102
5103 nxt_unit_close(port_sockets[0]);
5104 nxt_unit_close(port_sockets[1]);
5105
5106 return NULL;
5107 }
5108
5109 nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++);
5110
5111 new_port.in_fd = port_sockets[0];
5112 new_port.out_fd = port_sockets[1];
5113 new_port.data = NULL;
5114
5115 pthread_mutex_unlock(&lib->mutex);
5116
5117 nxt_unit_process_release(process);
5118
5119 port = nxt_unit_add_port(ctx, &new_port, NULL);
5120 if (nxt_slow_path(port == NULL)) {
5121 nxt_unit_close(port_sockets[0]);
5122 nxt_unit_close(port_sockets[1]);
5123 }
5124
5125 return port;
5126}
5127
5128
5129static int
5130nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst,
5131 nxt_unit_port_t *port, int queue_fd)
5132{
5133 ssize_t res;
5134 nxt_unit_impl_t *lib;
5135 int fds[2] = { port->out_fd, queue_fd };
5136
5137 struct {
5138 nxt_port_msg_t msg;
5139 nxt_port_msg_new_port_t new_port;
5140 } m;
5141
5142 union {
5143 struct cmsghdr cm;
5144 char space[CMSG_SPACE(sizeof(int) * 2)];
5145 } cmsg;
5146
5147 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5148
5149 m.msg.stream = 0;
5150 m.msg.pid = lib->pid;
5151 m.msg.reply_port = 0;
5152 m.msg.type = _NXT_PORT_MSG_NEW_PORT;
5153 m.msg.last = 0;
5154 m.msg.mmap = 0;
5155 m.msg.nf = 0;
5156 m.msg.mf = 0;
5157 m.msg.tracking = 0;
5158
5159 m.new_port.id = port->id.id;
5160 m.new_port.pid = port->id.pid;
5161 m.new_port.type = NXT_PROCESS_APP;
5162 m.new_port.max_size = 16 * 1024;
5163 m.new_port.max_share = 64 * 1024;
5164
5165 memset(&cmsg, 0, sizeof(cmsg));
5166
5167 cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int) * 2);
5168 cmsg.cm.cmsg_level = SOL_SOCKET;
5169 cmsg.cm.cmsg_type = SCM_RIGHTS;
5170
5171 /*
5172 * memcpy() is used instead of simple
5173 * *(int *) CMSG_DATA(&cmsg.cm) = fd;
5174 * because GCC 4.4 with -O2/3/s optimization may issue a warning:
5175 * dereferencing type-punned pointer will break strict-aliasing rules
5176 *
5177 * Fortunately, GCC with -O1 compiles this nxt_memcpy()
5178 * in the same simple assignment as in the code above.
5179 */
5180 memcpy(CMSG_DATA(&cmsg.cm), fds, sizeof(int) * 2);
5181
5182 res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &cmsg, sizeof(cmsg));
5183
5184 return (res == sizeof(m)) ? NXT_UNIT_OK : NXT_UNIT_ERROR;
5185}
5186
5187
5188nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port)
5189{
5190 nxt_unit_port_impl_t *port_impl;
5191
5192 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5193
5194 nxt_atomic_fetch_add(&port_impl->use_count, 1);
5195}
5196
5197
5198nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port)
5199{
5200 long c;
5201 nxt_unit_port_impl_t *port_impl;
5202
5203 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5204
5205 c = nxt_atomic_fetch_add(&port_impl->use_count, -1);
5206
5207 if (c == 1) {
5208 nxt_unit_debug(NULL, "destroy port{%d,%d} in_fd %d out_fd %d",
5209 (int) port->id.pid, (int) port->id.id,
5210 port->in_fd, port->out_fd);
5211
5212 nxt_unit_process_release(port_impl->process);
5213
5214 if (port->in_fd != -1) {
5215 nxt_unit_close(port->in_fd);
5216
5217 port->in_fd = -1;
5218 }
5219
5220 if (port->out_fd != -1) {
5221 nxt_unit_close(port->out_fd);
5222
5223 port->out_fd = -1;
5224 }
5225
5226 if (port_impl->queue != NULL) {
5227 munmap(port_impl->queue, (port->id.id == (nxt_port_id_t) -1)
5228 ? sizeof(nxt_app_queue_t)
5229 : sizeof(nxt_port_queue_t));
5230 }
5231
5232 nxt_unit_free(NULL, port_impl);
5233 }
5234}
5235
5236
5237static nxt_unit_port_t *
5238nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue)
5239{
5240 int rc;
5241 nxt_queue_t awaiting_req;
5242 nxt_unit_impl_t *lib;
5243 nxt_unit_port_t *old_port;
5244 nxt_unit_process_t *process;
5245 nxt_unit_ctx_impl_t *ctx_impl;
5246 nxt_unit_port_impl_t *new_port, *old_port_impl;
5247 nxt_unit_request_info_impl_t *req_impl;
5248
5249 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5250
5251 pthread_mutex_lock(&lib->mutex);
5252
5253 old_port = nxt_unit_port_hash_find(&lib->ports, &port->id, 0);
5254
5255 if (nxt_slow_path(old_port != NULL)) {
5256 nxt_unit_debug(ctx, "add_port: duplicate port{%d,%d} "
5257 "in_fd %d out_fd %d queue %p",
5258 port->id.pid, port->id.id,
5259 port->in_fd, port->out_fd, queue);
5260
5261 if (old_port->data == NULL) {
5262 old_port->data = port->data;
5263 port->data = NULL;
5264 }
5265
5266 if (old_port->in_fd == -1) {
5267 old_port->in_fd = port->in_fd;
5268 port->in_fd = -1;
5269 }
5270
5271 if (port->in_fd != -1) {
5272 nxt_unit_close(port->in_fd);
5273 port->in_fd = -1;
5274 }
5275
5276 if (old_port->out_fd == -1) {
5277 old_port->out_fd = port->out_fd;
5278 port->out_fd = -1;
5279 }
5280
5281 if (port->out_fd != -1) {
5282 nxt_unit_close(port->out_fd);
5283 port->out_fd = -1;
5284 }
5285
5286 *port = *old_port;
5287
5288 nxt_queue_init(&awaiting_req);
5289
5290 old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port);
5291
5292 if (old_port_impl->queue == NULL) {
5293 old_port_impl->queue = queue;
5294 }
5295
5296 if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) {
5297 nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req);
5298 nxt_queue_init(&old_port_impl->awaiting_req);
5299 }
5300
5301 old_port_impl->ready = (port->in_fd != -1 || port->out_fd != -1);
5302
5303 pthread_mutex_unlock(&lib->mutex);
5304
5305 if (lib->callbacks.add_port != NULL
5306 && (port->in_fd != -1 || port->out_fd != -1))
5307 {
5308 lib->callbacks.add_port(ctx, old_port);
5309 }
5310
5311 nxt_queue_each(req_impl, &awaiting_req,
5312 nxt_unit_request_info_impl_t, port_wait_link)
5313 {
5314 nxt_queue_remove(&req_impl->port_wait_link);
5315
5316 ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t,
5317 ctx);
5318
5319 pthread_mutex_lock(&ctx_impl->mutex);
5320
5321 nxt_queue_insert_tail(&ctx_impl->ready_req,
5322 &req_impl->port_wait_link);
5323
5324 pthread_mutex_unlock(&ctx_impl->mutex);
5325
5326 nxt_atomic_fetch_add(&ctx_impl->wait_items, -1);
5327
5328 } nxt_queue_loop;
5329
5330 return old_port;
5331 }
5332
5333 new_port = NULL;
5334
5335 nxt_unit_debug(ctx, "add_port: port{%d,%d} in_fd %d out_fd %d queue %p",
5336 port->id.pid, port->id.id,
5337 port->in_fd, port->out_fd, queue);
5338
5339 process = nxt_unit_process_get(ctx, port->id.pid);
5340 if (nxt_slow_path(process == NULL)) {
5341 goto unlock;
5342 }
5343
5344 if (port->id.id >= process->next_port_id) {
5345 process->next_port_id = port->id.id + 1;
5346 }
5347
5348 new_port = nxt_unit_malloc(ctx, sizeof(nxt_unit_port_impl_t));
5349 if (nxt_slow_path(new_port == NULL)) {
5350 nxt_unit_alert(ctx, "add_port: %d,%d malloc() failed",
5351 port->id.pid, port->id.id);
5352
5353 goto unlock;
5354 }
5355
5356 new_port->port = *port;
5357
5358 rc = nxt_unit_port_hash_add(&lib->ports, &new_port->port);
5359 if (nxt_slow_path(rc != NXT_UNIT_OK)) {
5360 nxt_unit_alert(ctx, "add_port: %d,%d hash_add failed",
5361 port->id.pid, port->id.id);
5362
5363 nxt_unit_free(ctx, new_port);
5364
5365 new_port = NULL;
5366
5367 goto unlock;
5368 }
5369
5370 nxt_queue_insert_tail(&process->ports, &new_port->link);
5371
5372 new_port->use_count = 2;
5373 new_port->process = process;
5374 new_port->ready = (port->in_fd != -1 || port->out_fd != -1);
5375 new_port->queue = queue;
5376 new_port->from_socket = 0;
5377 new_port->socket_rbuf = NULL;
5378
5379 nxt_queue_init(&new_port->awaiting_req);
5380
5381 process = NULL;
5382
5383unlock:
5384
5385 pthread_mutex_unlock(&lib->mutex);
5386
5387 if (nxt_slow_path(process != NULL)) {
5388 nxt_unit_process_release(process);
5389 }
5390
5391 if (lib->callbacks.add_port != NULL
5392 && new_port != NULL
5393 && (port->in_fd != -1 || port->out_fd != -1))
5394 {
5395 lib->callbacks.add_port(ctx, &new_port->port);
5396 }
5397
5398 return &new_port->port;
5399}
5400
5401
5402static void
5403nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id)
5404{
5405 nxt_unit_port_t *port;
5406 nxt_unit_port_impl_t *port_impl;
5407
5408 pthread_mutex_lock(&lib->mutex);
5409
5410 port = nxt_unit_remove_port_unsafe(lib, port_id);
5411
5412 if (nxt_fast_path(port != NULL)) {
5413 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5414
5415 nxt_queue_remove(&port_impl->link);
5416 }
5417
5418 pthread_mutex_unlock(&lib->mutex);
5419
5420 if (lib->callbacks.remove_port != NULL && port != NULL) {
5421 lib->callbacks.remove_port(&lib->unit, port);
5422 }
5423
5424 if (nxt_fast_path(port != NULL)) {
5425 nxt_unit_port_release(port);
5426 }
5427}
5428
5429
5430static nxt_unit_port_t *
5431nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id)
5432{
5433 nxt_unit_port_t *port;
5434
5435 port = nxt_unit_port_hash_find(&lib->ports, port_id, 1);
5436 if (nxt_slow_path(port == NULL)) {
5437 nxt_unit_debug(NULL, "remove_port: port{%d,%d} not found",
5438 (int) port_id->pid, (int) port_id->id);
5439
5440 return NULL;
5441 }
5442
5443 nxt_unit_debug(NULL, "remove_port: port{%d,%d}, fds %d,%d, data %p",
5444 (int) port_id->pid, (int) port_id->id,
5445 port->in_fd, port->out_fd, port->data);
5446
5447 return port;
5448}
5449
5450
5451static void
5452nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid)
5453{
5454 nxt_unit_process_t *process;
5455
5456 pthread_mutex_lock(&lib->mutex);
5457
5458 process = nxt_unit_process_find(lib, pid, 1);
5459 if (nxt_slow_path(process == NULL)) {
5460 nxt_unit_debug(NULL, "remove_pid: process %d not found", (int) pid);
5461
5462 pthread_mutex_unlock(&lib->mutex);
5463
5464 return;
5465 }
5466
5467 nxt_unit_remove_process(lib, process);
5468
5469 if (lib->callbacks.remove_pid != NULL) {
5470 lib->callbacks.remove_pid(&lib->unit, pid);
5471 }
5472}
5473
5474
5475static void
5476nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process)
5477{
5478 nxt_queue_t ports;
5479 nxt_unit_port_impl_t *port;
5480
5481 nxt_queue_init(&ports);
5482
5483 nxt_queue_add(&ports, &process->ports);
5484
5485 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) {
5486
5487 nxt_unit_remove_port_unsafe(lib, &port->port.id);
5488
5489 } nxt_queue_loop;
5490
5491 pthread_mutex_unlock(&lib->mutex);
5492
5493 nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) {
5494
5495 nxt_queue_remove(&port->link);
5496
5497 if (lib->callbacks.remove_port != NULL) {
5498 lib->callbacks.remove_port(&lib->unit, &port->port);
5499 }
5500
5501 nxt_unit_port_release(&port->port);
5502
5503 } nxt_queue_loop;
5504
5505 nxt_unit_process_release(process);
5506}
5507
5508
5509static void
5510nxt_unit_quit(nxt_unit_ctx_t *ctx)
5511{
5512 nxt_unit_impl_t *lib;
5513
5514 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5515
5516 if (lib->online) {
5517 lib->online = 0;
5518
5519 if (lib->callbacks.quit != NULL) {
5520 lib->callbacks.quit(ctx);
5521 }
5522 }
5523}
5524
5525
5526static int
5527nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id)
5528{
5529 ssize_t res;
5530 nxt_unit_impl_t *lib;
5531 nxt_unit_ctx_impl_t *ctx_impl;
5532
5533 struct {
5534 nxt_port_msg_t msg;
5535 nxt_port_msg_get_port_t get_port;
5536 } m;
5537
5538 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5539 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
5540
5541 memset(&m.msg, 0, sizeof(nxt_port_msg_t));
5542
5543 m.msg.pid = lib->pid;
5544 m.msg.reply_port = ctx_impl->read_port->id.id;
5545 m.msg.type = _NXT_PORT_MSG_GET_PORT;
5546
5547 m.get_port.id = port_id->id;
5548 m.get_port.pid = port_id->pid;
5549
5550 nxt_unit_debug(ctx, "get_port: %d %d", (int) port_id->pid,
5551 (int) port_id->id);
5552
5553 res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0);
5554 if (nxt_slow_path(res != sizeof(m))) {
5555 return NXT_UNIT_ERROR;
5556 }
5557
5558 return NXT_UNIT_OK;
5559}
5560
5561
5562static ssize_t
5563nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5564 const void *buf, size_t buf_size, const void *oob, size_t oob_size)
5565{
5566 int notify;
5567 ssize_t ret;
5568 nxt_int_t rc;
5569 nxt_port_msg_t msg;
5570 nxt_unit_impl_t *lib;
5571 nxt_unit_port_impl_t *port_impl;
5572
5573 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5574
5575 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5576 if (port_impl->queue != NULL && oob_size == 0
5577 && buf_size <= NXT_PORT_QUEUE_MSG_SIZE)
5578 {
5579 rc = nxt_port_queue_send(port_impl->queue, buf, buf_size, &notify);
5580 if (nxt_slow_path(rc != NXT_OK)) {
5581 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow",
5582 (int) port->id.pid, (int) port->id.id);
5583
5584 return -1;
5585 }
5586
5587 nxt_unit_debug(ctx, "port{%d,%d} enqueue %d notify %d",
5588 (int) port->id.pid, (int) port->id.id,
5589 (int) buf_size, notify);
5590
5591 if (notify) {
5592 memcpy(&msg, buf, sizeof(nxt_port_msg_t));
5593
5594 msg.type = _NXT_PORT_MSG_READ_QUEUE;
5595
5596 if (lib->callbacks.port_send == NULL) {
5597 ret = nxt_unit_sendmsg(ctx, port->out_fd, &msg,
5598 sizeof(nxt_port_msg_t), NULL, 0);
5599
5600 nxt_unit_debug(ctx, "port{%d,%d} send %d read_queue",
5601 (int) port->id.pid, (int) port->id.id,
5602 (int) ret);
5603
5604 } else {
5605 ret = lib->callbacks.port_send(ctx, port, &msg,
5606 sizeof(nxt_port_msg_t), NULL, 0);
5607
5608 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d read_queue",
5609 (int) port->id.pid, (int) port->id.id,
5610 (int) ret);
5611 }
5612
5613 }
5614
5615 return buf_size;
5616 }
5617
5618 if (port_impl->queue != NULL) {
5619 msg.type = _NXT_PORT_MSG_READ_SOCKET;
5620
5621 rc = nxt_port_queue_send(port_impl->queue, &msg.type, 1, &notify);
5622 if (nxt_slow_path(rc != NXT_OK)) {
5623 nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow",
5624 (int) port->id.pid, (int) port->id.id);
5625
5626 return -1;
5627 }
5628
5629 nxt_unit_debug(ctx, "port{%d,%d} enqueue 1 read_socket notify %d",
5630 (int) port->id.pid, (int) port->id.id, notify);
5631 }
5632
5633 if (lib->callbacks.port_send != NULL) {
5634 ret = lib->callbacks.port_send(ctx, port, buf, buf_size,
5635 oob, oob_size);
5636
5637 nxt_unit_debug(ctx, "port{%d,%d} sendcb %d",
5638 (int) port->id.pid, (int) port->id.id,
5639 (int) ret);
5640
5641 } else {
5642 ret = nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size,
5643 oob, oob_size);
5644
5645 nxt_unit_debug(ctx, "port{%d,%d} sendmsg %d",
5646 (int) port->id.pid, (int) port->id.id,
5647 (int) ret);
5648 }
5649
5650 return ret;
5651}
5652
5653
5654static ssize_t
5655nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd,
5656 const void *buf, size_t buf_size, const void *oob, size_t oob_size)
5657{
5658 int err;
5659 ssize_t res;
5660 struct iovec iov[1];
5661 struct msghdr msg;
5662
5663 iov[0].iov_base = (void *) buf;
5664 iov[0].iov_len = buf_size;
5665
5666 msg.msg_name = NULL;
5667 msg.msg_namelen = 0;
5668 msg.msg_iov = iov;
5669 msg.msg_iovlen = 1;
5670 msg.msg_flags = 0;
5671 msg.msg_control = (void *) oob;
5672 msg.msg_controllen = oob_size;
5673
5674retry:
5675
5676 res = sendmsg(fd, &msg, 0);
5677
5678 if (nxt_slow_path(res == -1)) {
5679 err = errno;
5680
5681 if (err == EINTR) {
5682 goto retry;
5683 }
5684
5685 /*
5686 * FIXME: This should be "alert" after router graceful shutdown
5687 * implementation.
5688 */
5689 nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)",
5690 fd, (int) buf_size, strerror(err), err);
5691
5692 } else {
5693 nxt_unit_debug(ctx, "sendmsg(%d, %d): %d", fd, (int) buf_size,
5694 (int) res);
5695 }
5696
5697 return res;
5698}
5699
5700
5701static int
5702nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5703 nxt_unit_read_buf_t *rbuf)
5704{
5705 int res, read;
5706 nxt_unit_port_impl_t *port_impl;
5707
5708 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5709
5710 read = 0;
5711
5712retry:
5713
5714 if (port_impl->from_socket > 0) {
5715 if (port_impl->socket_rbuf != NULL
5716 && port_impl->socket_rbuf->size > 0)
5717 {
5718 port_impl->from_socket--;
5719
5720 nxt_unit_rbuf_cpy(rbuf, port_impl->socket_rbuf);
5721 port_impl->socket_rbuf->size = 0;
5722
5723 nxt_unit_debug(ctx, "port{%d,%d} use suspended message %d",
5724 (int) port->id.pid, (int) port->id.id,
5725 (int) rbuf->size);
5726
5727 return NXT_UNIT_OK;
5728 }
5729
5730 } else {
5731 res = nxt_unit_port_queue_recv(port, rbuf);
5732
5733 if (res == NXT_UNIT_OK) {
5734 if (nxt_unit_is_read_socket(rbuf)) {
5735 port_impl->from_socket++;
5736
5737 nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d",
5738 (int) port->id.pid, (int) port->id.id,
5739 port_impl->from_socket);
5740
5741 goto retry;
5742 }
5743
5744 nxt_unit_debug(ctx, "port{%d,%d} dequeue %d",
5745 (int) port->id.pid, (int) port->id.id,
5746 (int) rbuf->size);
5747
5748 return NXT_UNIT_OK;
5749 }
5750 }
5751
5752 if (read) {
5753 return NXT_UNIT_AGAIN;
5754 }
5755
5756 res = nxt_unit_port_recv(ctx, port, rbuf);
5757 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
5758 return NXT_UNIT_ERROR;
5759 }
5760
5761 read = 1;
5762
5763 if (nxt_unit_is_read_queue(rbuf)) {
5764 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue",
5765 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5766
5767 if (port_impl->from_socket) {
5768 nxt_unit_warn(ctx, "port protocol warning: READ_QUEUE after READ_SOCKET");
5769 }
5770
5771 goto retry;
5772 }
5773
5774 nxt_unit_debug(ctx, "port{%d,%d} recvmsg %d",
5775 (int) port->id.pid, (int) port->id.id,
5776 (int) rbuf->size);
5777
5778 if (res == NXT_UNIT_AGAIN) {
5779 return NXT_UNIT_AGAIN;
5780 }
5781
5782 if (port_impl->from_socket > 0) {
5783 port_impl->from_socket--;
5784
5785 return NXT_UNIT_OK;
5786 }
5787
5788 nxt_unit_debug(ctx, "port{%d,%d} suspend message %d",
5789 (int) port->id.pid, (int) port->id.id,
5790 (int) rbuf->size);
5791
5792 if (port_impl->socket_rbuf == NULL) {
5793 port_impl->socket_rbuf = nxt_unit_read_buf_get(ctx);
5794
5795 if (nxt_slow_path(port_impl->socket_rbuf == NULL)) {
5796 return NXT_UNIT_ERROR;
5797 }
5798
5799 port_impl->socket_rbuf->size = 0;
5800 }
5801
5802 if (port_impl->socket_rbuf->size > 0) {
5803 nxt_unit_alert(ctx, "too many port socket messages");
5804
5805 return NXT_UNIT_ERROR;
5806 }
5807
5808 nxt_unit_rbuf_cpy(port_impl->socket_rbuf, rbuf);
5809
5810 memset(rbuf->oob, 0, sizeof(struct cmsghdr));
5811
5812 goto retry;
5813}
5814
5815
5816nxt_inline void
5817nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, nxt_unit_read_buf_t *src)
5818{
5819 memcpy(dst->buf, src->buf, src->size);
5820 dst->size = src->size;
5821 memcpy(dst->oob, src->oob, sizeof(src->oob));
5822}
5823
5824
5825static int
5826nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5827 nxt_unit_read_buf_t *rbuf)
5828{
5829 int res;
5830
5831retry:
5832
5833 res = nxt_unit_app_queue_recv(port, rbuf);
5834
5835 if (res == NXT_UNIT_AGAIN) {
5836 res = nxt_unit_port_recv(ctx, port, rbuf);
5837 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
5838 return NXT_UNIT_ERROR;
5839 }
5840
5841 if (nxt_unit_is_read_queue(rbuf)) {
5842 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue",
5843 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5844
5845 goto retry;
5846 }
5847 }
5848
5849 return res;
5850}
5851
5852
5853static int
5854nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5855 nxt_unit_read_buf_t *rbuf)
5856{
5857 int fd, err;
5858 struct iovec iov[1];
5859 struct msghdr msg;
5860 nxt_unit_impl_t *lib;
5861
5862 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5863
5864 if (lib->callbacks.port_recv != NULL) {
5865 rbuf->size = lib->callbacks.port_recv(ctx, port,
5866 rbuf->buf, sizeof(rbuf->buf),
5867 rbuf->oob, sizeof(rbuf->oob));
5868
5869 nxt_unit_debug(ctx, "port{%d,%d} recvcb %d",
5870 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5871
5872 if (nxt_slow_path(rbuf->size < 0)) {
5873 return NXT_UNIT_ERROR;
5874 }
5875
5876 return NXT_UNIT_OK;
5877 }
5878
5879 iov[0].iov_base = rbuf->buf;
5880 iov[0].iov_len = sizeof(rbuf->buf);
5881
5882 msg.msg_name = NULL;
5883 msg.msg_namelen = 0;
5884 msg.msg_iov = iov;
5885 msg.msg_iovlen = 1;
5886 msg.msg_flags = 0;
5887 msg.msg_control = rbuf->oob;
5888 msg.msg_controllen = sizeof(rbuf->oob);
5889
5890 fd = port->in_fd;
5891
5892retry:
5893
5894 rbuf->size = recvmsg(fd, &msg, 0);
5895
5896 if (nxt_slow_path(rbuf->size == -1)) {
5897 err = errno;
5898
5899 if (err == EINTR) {
5900 goto retry;
5901 }
5902
5903 if (err == EAGAIN) {
5904 nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)",
5905 fd, strerror(err), err);
5906
5907 return NXT_UNIT_AGAIN;
5908 }
5909
5910 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)",
5911 fd, strerror(err), err);
5912
5913 return NXT_UNIT_ERROR;
5914 }
5915
5916 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) rbuf->size);
5917
5918 return NXT_UNIT_OK;
5919}
5920
5921
5922static int
5923nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf)
5924{
5925 nxt_unit_port_impl_t *port_impl;
5926
5927 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5928
5929 rbuf->size = nxt_port_queue_recv(port_impl->queue, rbuf->buf);
5930
5931 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK;
5932}
5933
5934
5935static int
5936nxt_unit_app_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf)
5937{
5938 uint32_t cookie;
5939 nxt_port_msg_t *port_msg;
5940 nxt_app_queue_t *queue;
5941 nxt_unit_port_impl_t *port_impl;
5942
5943 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5944 queue = port_impl->queue;
5945
5946retry:
5947
5948 rbuf->size = nxt_app_queue_recv(queue, rbuf->buf, &cookie);
5949
5950 nxt_unit_debug(NULL, "app_queue_recv: %d", (int) rbuf->size);
5951
5952 if (rbuf->size >= (ssize_t) sizeof(nxt_port_msg_t)) {
5953 port_msg = (nxt_port_msg_t *) rbuf->buf;
5954
5955 if (nxt_app_queue_cancel(queue, cookie, port_msg->stream)) {
5956 return NXT_UNIT_OK;
5957 }
5958
5959 nxt_unit_debug(NULL, "app_queue_recv: message cancelled");
5960
5961 goto retry;
5962 }
5963
5964 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK;
5965}
5966
5967
5968nxt_inline int
5969nxt_unit_close(int fd)
5970{
5971 int res;
5972
5973 res = close(fd);
5974
5975 if (nxt_slow_path(res == -1)) {
5976 nxt_unit_alert(NULL, "close(%d) failed: %s (%d)",
5977 fd, strerror(errno), errno);
5978
5979 } else {
5980 nxt_unit_debug(NULL, "close(%d): %d", fd, res);
5981 }
5982
5983 return res;
5984}
5985
5986
5987static int
5988nxt_unit_fd_blocking(int fd)
5989{
5990 int nb;
5991
5992 nb = 0;
5993
5994 if (nxt_slow_path(ioctl(fd, FIONBIO, &nb) == -1)) {
5995 nxt_unit_alert(NULL, "ioctl(%d, FIONBIO, 0) failed: %s (%d)",
5996 fd, strerror(errno), errno);
5997
5998 return NXT_UNIT_ERROR;
5999 }
6000
6001 return NXT_UNIT_OK;
6002}
6003
6004
6005static nxt_int_t
6006nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data)
6007{
6008 nxt_unit_port_t *port;
6009 nxt_unit_port_hash_id_t *port_id;
6010
6011 port = data;
6012 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start;
6013
6014 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t)
6015 && port_id->pid == port->id.pid
6016 && port_id->id == port->id.id)
6017 {
6018 return NXT_OK;
6019 }
6020
6021 return NXT_DECLINED;
6022}
6023
6024
6025static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = {
6026 NXT_LVLHSH_DEFAULT,
6027 nxt_unit_port_hash_test,
6028 nxt_unit_lvlhsh_alloc,
6029 nxt_unit_lvlhsh_free,
6030};
6031
6032
6033static inline void
6034nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq,
6035 nxt_unit_port_hash_id_t *port_hash_id,
6036 nxt_unit_port_id_t *port_id)
6037{
6038 port_hash_id->pid = port_id->pid;
6039 port_hash_id->id = port_id->id;
6040
6041 if (nxt_fast_path(port_id->hash != 0)) {
6042 lhq->key_hash = port_id->hash;
6043
6044 } else {
6045 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id));
6046
6047 port_id->hash = lhq->key_hash;
6048
6049 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X",
6050 (int) port_id->pid, (int) port_id->id,
6051 (int) port_id->hash);
6052 }
6053
6054 lhq->key.length = sizeof(nxt_unit_port_hash_id_t);
6055 lhq->key.start = (u_char *) port_hash_id;
6056 lhq->proto = &lvlhsh_ports_proto;
6057 lhq->pool = NULL;
6058}
6059
6060
6061static int
6062nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port)
6063{
6064 nxt_int_t res;
6065 nxt_lvlhsh_query_t lhq;
6066 nxt_unit_port_hash_id_t port_hash_id;
6067
6068 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id);
6069 lhq.replace = 0;
6070 lhq.value = port;
6071
6072 res = nxt_lvlhsh_insert(port_hash, &lhq);
6073
6074 switch (res) {
6075
6076 case NXT_OK:
6077 return NXT_UNIT_OK;
6078
6079 default:
6080 return NXT_UNIT_ERROR;
6081 }
6082}
6083
6084
6085static nxt_unit_port_t *
6086nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id,
6087 int remove)
6088{
6089 nxt_int_t res;
6090 nxt_lvlhsh_query_t lhq;
6091 nxt_unit_port_hash_id_t port_hash_id;
6092
6093 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id);
6094
6095 if (remove) {
6096 res = nxt_lvlhsh_delete(port_hash, &lhq);
6097
6098 } else {
6099 res = nxt_lvlhsh_find(port_hash, &lhq);
6100 }
6101
6102 switch (res) {
6103
6104 case NXT_OK:
6105 if (!remove) {
6106 nxt_unit_port_use(lhq.value);
6107 }
6108
6109 return lhq.value;
6110
6111 default:
6112 return NULL;
6113 }
6114}
6115
6116
6117static nxt_int_t
6118nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data)
6119{
6120 return NXT_OK;
6121}
6122
6123
6124static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = {
6125 NXT_LVLHSH_DEFAULT,
6126 nxt_unit_request_hash_test,
6127 nxt_unit_lvlhsh_alloc,
6128 nxt_unit_lvlhsh_free,
6129};
6130
6131
6132static int
6133nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx,
6134 nxt_unit_request_info_t *req)
6135{
6136 uint32_t *stream;
6137 nxt_int_t res;
6138 nxt_lvlhsh_query_t lhq;
6139 nxt_unit_ctx_impl_t *ctx_impl;
6140 nxt_unit_request_info_impl_t *req_impl;
6141
6142 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
6143 if (req_impl->in_hash) {
6144 return NXT_UNIT_OK;
6145 }
6146
6147 stream = &req_impl->stream;
6148
6149 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream));
6150 lhq.key.length = sizeof(*stream);
6151 lhq.key.start = (u_char *) stream;
6152 lhq.proto = &lvlhsh_requests_proto;
6153 lhq.pool = NULL;
6154 lhq.replace = 0;
6155 lhq.value = req_impl;
6156
6157 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
6158
6159 pthread_mutex_lock(&ctx_impl->mutex);
6160
6161 res = nxt_lvlhsh_insert(&ctx_impl->requests, &lhq);
6162
6163 pthread_mutex_unlock(&ctx_impl->mutex);
6164
6165 switch (res) {
6166
6167 case NXT_OK:
6168 req_impl->in_hash = 1;
6169 return NXT_UNIT_OK;
6170
6171 default:
6172 return NXT_UNIT_ERROR;
6173 }
6174}
6175
6176
6177static nxt_unit_request_info_t *
6178nxt_unit_request_hash_find(nxt_unit_ctx_t *ctx, uint32_t stream, int remove)
6179{
6180 nxt_int_t res;
6181 nxt_lvlhsh_query_t lhq;
6182 nxt_unit_ctx_impl_t *ctx_impl;
6183 nxt_unit_request_info_impl_t *req_impl;
6184
6185 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream));
6186 lhq.key.length = sizeof(stream);
6187 lhq.key.start = (u_char *) &stream;
6188 lhq.proto = &lvlhsh_requests_proto;
6189 lhq.pool = NULL;
6190
6191 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
6192
6193 pthread_mutex_lock(&ctx_impl->mutex);
6194
6195 if (remove) {
6196 res = nxt_lvlhsh_delete(&ctx_impl->requests, &lhq);
6197
6198 } else {
6199 res = nxt_lvlhsh_find(&ctx_impl->requests, &lhq);
6200 }
6201
6202 pthread_mutex_unlock(&ctx_impl->mutex);
6203
6204 switch (res) {
6205
6206 case NXT_OK:
6207 req_impl = nxt_container_of(lhq.value, nxt_unit_request_info_impl_t,
6208 req);
6209 if (remove) {
6210 req_impl->in_hash = 0;
6211 }
6212
6213 return lhq.value;
6214
6215 default:
6216 return NULL;
6217 }
6218}
6219
6220
6221void
6222nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...)
6223{
6224 int log_fd, n;
6225 char msg[NXT_MAX_ERROR_STR], *p, *end;
6226 pid_t pid;
6227 va_list ap;
6228 nxt_unit_impl_t *lib;
6229
6230 if (nxt_fast_path(ctx != NULL)) {
6231 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
6232
6233 pid = lib->pid;
6234 log_fd = lib->log_fd;
6235
6236 } else {
6237 pid = getpid();
6238 log_fd = STDERR_FILENO;
6239 }
6240
6241 p = msg;
6242 end = p + sizeof(msg) - 1;
6243
6244 p = nxt_unit_snprint_prefix(p, end, pid, level);
6245
6246 va_start(ap, fmt);
6247 p += vsnprintf(p, end - p, fmt, ap);
6248 va_end(ap);
6249
6250 if (nxt_slow_path(p > end)) {
6251 memcpy(end - 5, "[...]", 5);
6252 p = end;
6253 }
6254
6255 *p++ = '\n';
6256
6257 n = write(log_fd, msg, p - msg);
6258 if (nxt_slow_path(n < 0)) {
6259 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg);
6260 }
6261}
6262
6263
6264void
6265nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...)
6266{
6267 int log_fd, n;
6268 char msg[NXT_MAX_ERROR_STR], *p, *end;
6269 pid_t pid;
6270 va_list ap;
6271 nxt_unit_impl_t *lib;
6272 nxt_unit_request_info_impl_t *req_impl;
6273
6274 if (nxt_fast_path(req != NULL)) {
6275 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
6276
6277 pid = lib->pid;
6278 log_fd = lib->log_fd;
6279
6280 } else {
6281 pid = getpid();
6282 log_fd = STDERR_FILENO;
6283 }
6284
6285 p = msg;
6286 end = p + sizeof(msg) - 1;
6287
6288 p = nxt_unit_snprint_prefix(p, end, pid, level);
6289
6290 if (nxt_fast_path(req != NULL)) {
6291 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
6292
6293 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream);
6294 }
6295
6296 va_start(ap, fmt);
6297 p += vsnprintf(p, end - p, fmt, ap);
6298 va_end(ap);
6299
6300 if (nxt_slow_path(p > end)) {
6301 memcpy(end - 5, "[...]", 5);
6302 p = end;
6303 }
6304
6305 *p++ = '\n';
6306
6307 n = write(log_fd, msg, p - msg);
6308 if (nxt_slow_path(n < 0)) {
6309 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg);
6310 }
6311}
6312
6313
6314static const char * nxt_unit_log_levels[] = {
6315 "alert",
6316 "error",
6317 "warn",
6318 "notice",
6319 "info",
6320 "debug",
6321};
6322
6323
6324static char *
6325nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level)
6326{
6327 struct tm tm;
6328 struct timespec ts;
6329
6330 (void) clock_gettime(CLOCK_REALTIME, &ts);
6331
6332#if (NXT_HAVE_LOCALTIME_R)
6333 (void) localtime_r(&ts.tv_sec, &tm);
6334#else
6335 tm = *localtime(&ts.tv_sec);
6336#endif
6337
6338#if (NXT_DEBUG)
6339 p += snprintf(p, end - p,
6340 "%4d/%02d/%02d %02d:%02d:%02d.%03d ",
6341 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
6342 tm.tm_hour, tm.tm_min, tm.tm_sec,
6343 (int) ts.tv_nsec / 1000000);
6344#else
6345 p += snprintf(p, end - p,
6346 "%4d/%02d/%02d %02d:%02d:%02d ",
6347 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
6348 tm.tm_hour, tm.tm_min, tm.tm_sec);
6349#endif
6350
6351 p += snprintf(p, end - p,
6352 "[%s] %d#%"PRIu64" [unit] ", nxt_unit_log_levels[level],
6353 (int) pid,
6354 (uint64_t) (uintptr_t) nxt_thread_get_tid());
6355
6356 return p;
6357}
6358
6359
6360static void *
6361nxt_unit_lvlhsh_alloc(void *data, size_t size)
6362{
6363 int err;
6364 void *p;
6365
6366 err = posix_memalign(&p, size, size);
6367
6368 if (nxt_fast_path(err == 0)) {
6369 nxt_unit_debug(NULL, "posix_memalign(%d, %d): %p",
6370 (int) size, (int) size, p);
6371 return p;
6372 }
6373
6374 nxt_unit_alert(NULL, "posix_memalign(%d, %d) failed: %s (%d)",
6375 (int) size, (int) size, strerror(err), err);
6376 return NULL;
6377}
6378
6379
6380static void
6381nxt_unit_lvlhsh_free(void *data, void *p)
6382{
6383 nxt_unit_free(NULL, p);
6384}
6385
6386
6387void *
6388nxt_unit_malloc(nxt_unit_ctx_t *ctx, size_t size)
6389{
6390 void *p;
6391
6392 p = malloc(size);
6393
6394 if (nxt_fast_path(p != NULL)) {
6395 nxt_unit_debug(ctx, "malloc(%d): %p", (int) size, p);
6396
6397 } else {
6398 nxt_unit_alert(ctx, "malloc(%d) failed: %s (%d)",
6399 (int) size, strerror(errno), errno);
6400 }
6401
6402 return p;
6403}
6404
6405
6406void
6407nxt_unit_free(nxt_unit_ctx_t *ctx, void *p)
6408{
6409 nxt_unit_debug(ctx, "free(%p)", p);
6410
6411 free(p);
6412}
6413
6414
6415static int
6416nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length)
6417{
6418 u_char c1, c2;
6419 nxt_int_t n;
6420 const u_char *s1, *s2;
6421
6422 s1 = p1;
6423 s2 = p2;
6424
6425 while (length-- != 0) {
6426 c1 = *s1++;
6427 c2 = *s2++;
6428
6429 c1 = nxt_lowcase(c1);
6430 c2 = nxt_lowcase(c2);
6431
6432 n = c1 - c2;
6433
6434 if (n != 0) {
6435 return n;
6436 }
6437 }
6438
6439 return 0;
6440}
5767 goto retry;
5768 }
5769
5770 nxt_unit_debug(ctx, "port{%d,%d} recvmsg %d",
5771 (int) port->id.pid, (int) port->id.id,
5772 (int) rbuf->size);
5773
5774 if (res == NXT_UNIT_AGAIN) {
5775 return NXT_UNIT_AGAIN;
5776 }
5777
5778 if (port_impl->from_socket > 0) {
5779 port_impl->from_socket--;
5780
5781 return NXT_UNIT_OK;
5782 }
5783
5784 nxt_unit_debug(ctx, "port{%d,%d} suspend message %d",
5785 (int) port->id.pid, (int) port->id.id,
5786 (int) rbuf->size);
5787
5788 if (port_impl->socket_rbuf == NULL) {
5789 port_impl->socket_rbuf = nxt_unit_read_buf_get(ctx);
5790
5791 if (nxt_slow_path(port_impl->socket_rbuf == NULL)) {
5792 return NXT_UNIT_ERROR;
5793 }
5794
5795 port_impl->socket_rbuf->size = 0;
5796 }
5797
5798 if (port_impl->socket_rbuf->size > 0) {
5799 nxt_unit_alert(ctx, "too many port socket messages");
5800
5801 return NXT_UNIT_ERROR;
5802 }
5803
5804 nxt_unit_rbuf_cpy(port_impl->socket_rbuf, rbuf);
5805
5806 memset(rbuf->oob, 0, sizeof(struct cmsghdr));
5807
5808 goto retry;
5809}
5810
5811
5812nxt_inline void
5813nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, nxt_unit_read_buf_t *src)
5814{
5815 memcpy(dst->buf, src->buf, src->size);
5816 dst->size = src->size;
5817 memcpy(dst->oob, src->oob, sizeof(src->oob));
5818}
5819
5820
5821static int
5822nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5823 nxt_unit_read_buf_t *rbuf)
5824{
5825 int res;
5826
5827retry:
5828
5829 res = nxt_unit_app_queue_recv(port, rbuf);
5830
5831 if (res == NXT_UNIT_AGAIN) {
5832 res = nxt_unit_port_recv(ctx, port, rbuf);
5833 if (nxt_slow_path(res == NXT_UNIT_ERROR)) {
5834 return NXT_UNIT_ERROR;
5835 }
5836
5837 if (nxt_unit_is_read_queue(rbuf)) {
5838 nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue",
5839 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5840
5841 goto retry;
5842 }
5843 }
5844
5845 return res;
5846}
5847
5848
5849static int
5850nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port,
5851 nxt_unit_read_buf_t *rbuf)
5852{
5853 int fd, err;
5854 struct iovec iov[1];
5855 struct msghdr msg;
5856 nxt_unit_impl_t *lib;
5857
5858 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
5859
5860 if (lib->callbacks.port_recv != NULL) {
5861 rbuf->size = lib->callbacks.port_recv(ctx, port,
5862 rbuf->buf, sizeof(rbuf->buf),
5863 rbuf->oob, sizeof(rbuf->oob));
5864
5865 nxt_unit_debug(ctx, "port{%d,%d} recvcb %d",
5866 (int) port->id.pid, (int) port->id.id, (int) rbuf->size);
5867
5868 if (nxt_slow_path(rbuf->size < 0)) {
5869 return NXT_UNIT_ERROR;
5870 }
5871
5872 return NXT_UNIT_OK;
5873 }
5874
5875 iov[0].iov_base = rbuf->buf;
5876 iov[0].iov_len = sizeof(rbuf->buf);
5877
5878 msg.msg_name = NULL;
5879 msg.msg_namelen = 0;
5880 msg.msg_iov = iov;
5881 msg.msg_iovlen = 1;
5882 msg.msg_flags = 0;
5883 msg.msg_control = rbuf->oob;
5884 msg.msg_controllen = sizeof(rbuf->oob);
5885
5886 fd = port->in_fd;
5887
5888retry:
5889
5890 rbuf->size = recvmsg(fd, &msg, 0);
5891
5892 if (nxt_slow_path(rbuf->size == -1)) {
5893 err = errno;
5894
5895 if (err == EINTR) {
5896 goto retry;
5897 }
5898
5899 if (err == EAGAIN) {
5900 nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)",
5901 fd, strerror(err), err);
5902
5903 return NXT_UNIT_AGAIN;
5904 }
5905
5906 nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)",
5907 fd, strerror(err), err);
5908
5909 return NXT_UNIT_ERROR;
5910 }
5911
5912 nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) rbuf->size);
5913
5914 return NXT_UNIT_OK;
5915}
5916
5917
5918static int
5919nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf)
5920{
5921 nxt_unit_port_impl_t *port_impl;
5922
5923 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5924
5925 rbuf->size = nxt_port_queue_recv(port_impl->queue, rbuf->buf);
5926
5927 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK;
5928}
5929
5930
5931static int
5932nxt_unit_app_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf)
5933{
5934 uint32_t cookie;
5935 nxt_port_msg_t *port_msg;
5936 nxt_app_queue_t *queue;
5937 nxt_unit_port_impl_t *port_impl;
5938
5939 port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port);
5940 queue = port_impl->queue;
5941
5942retry:
5943
5944 rbuf->size = nxt_app_queue_recv(queue, rbuf->buf, &cookie);
5945
5946 nxt_unit_debug(NULL, "app_queue_recv: %d", (int) rbuf->size);
5947
5948 if (rbuf->size >= (ssize_t) sizeof(nxt_port_msg_t)) {
5949 port_msg = (nxt_port_msg_t *) rbuf->buf;
5950
5951 if (nxt_app_queue_cancel(queue, cookie, port_msg->stream)) {
5952 return NXT_UNIT_OK;
5953 }
5954
5955 nxt_unit_debug(NULL, "app_queue_recv: message cancelled");
5956
5957 goto retry;
5958 }
5959
5960 return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK;
5961}
5962
5963
5964nxt_inline int
5965nxt_unit_close(int fd)
5966{
5967 int res;
5968
5969 res = close(fd);
5970
5971 if (nxt_slow_path(res == -1)) {
5972 nxt_unit_alert(NULL, "close(%d) failed: %s (%d)",
5973 fd, strerror(errno), errno);
5974
5975 } else {
5976 nxt_unit_debug(NULL, "close(%d): %d", fd, res);
5977 }
5978
5979 return res;
5980}
5981
5982
5983static int
5984nxt_unit_fd_blocking(int fd)
5985{
5986 int nb;
5987
5988 nb = 0;
5989
5990 if (nxt_slow_path(ioctl(fd, FIONBIO, &nb) == -1)) {
5991 nxt_unit_alert(NULL, "ioctl(%d, FIONBIO, 0) failed: %s (%d)",
5992 fd, strerror(errno), errno);
5993
5994 return NXT_UNIT_ERROR;
5995 }
5996
5997 return NXT_UNIT_OK;
5998}
5999
6000
6001static nxt_int_t
6002nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data)
6003{
6004 nxt_unit_port_t *port;
6005 nxt_unit_port_hash_id_t *port_id;
6006
6007 port = data;
6008 port_id = (nxt_unit_port_hash_id_t *) lhq->key.start;
6009
6010 if (lhq->key.length == sizeof(nxt_unit_port_hash_id_t)
6011 && port_id->pid == port->id.pid
6012 && port_id->id == port->id.id)
6013 {
6014 return NXT_OK;
6015 }
6016
6017 return NXT_DECLINED;
6018}
6019
6020
6021static const nxt_lvlhsh_proto_t lvlhsh_ports_proto nxt_aligned(64) = {
6022 NXT_LVLHSH_DEFAULT,
6023 nxt_unit_port_hash_test,
6024 nxt_unit_lvlhsh_alloc,
6025 nxt_unit_lvlhsh_free,
6026};
6027
6028
6029static inline void
6030nxt_unit_port_hash_lhq(nxt_lvlhsh_query_t *lhq,
6031 nxt_unit_port_hash_id_t *port_hash_id,
6032 nxt_unit_port_id_t *port_id)
6033{
6034 port_hash_id->pid = port_id->pid;
6035 port_hash_id->id = port_id->id;
6036
6037 if (nxt_fast_path(port_id->hash != 0)) {
6038 lhq->key_hash = port_id->hash;
6039
6040 } else {
6041 lhq->key_hash = nxt_murmur_hash2(port_hash_id, sizeof(*port_hash_id));
6042
6043 port_id->hash = lhq->key_hash;
6044
6045 nxt_unit_debug(NULL, "calculate hash for port_id (%d, %d): %04X",
6046 (int) port_id->pid, (int) port_id->id,
6047 (int) port_id->hash);
6048 }
6049
6050 lhq->key.length = sizeof(nxt_unit_port_hash_id_t);
6051 lhq->key.start = (u_char *) port_hash_id;
6052 lhq->proto = &lvlhsh_ports_proto;
6053 lhq->pool = NULL;
6054}
6055
6056
6057static int
6058nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port)
6059{
6060 nxt_int_t res;
6061 nxt_lvlhsh_query_t lhq;
6062 nxt_unit_port_hash_id_t port_hash_id;
6063
6064 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, &port->id);
6065 lhq.replace = 0;
6066 lhq.value = port;
6067
6068 res = nxt_lvlhsh_insert(port_hash, &lhq);
6069
6070 switch (res) {
6071
6072 case NXT_OK:
6073 return NXT_UNIT_OK;
6074
6075 default:
6076 return NXT_UNIT_ERROR;
6077 }
6078}
6079
6080
6081static nxt_unit_port_t *
6082nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id,
6083 int remove)
6084{
6085 nxt_int_t res;
6086 nxt_lvlhsh_query_t lhq;
6087 nxt_unit_port_hash_id_t port_hash_id;
6088
6089 nxt_unit_port_hash_lhq(&lhq, &port_hash_id, port_id);
6090
6091 if (remove) {
6092 res = nxt_lvlhsh_delete(port_hash, &lhq);
6093
6094 } else {
6095 res = nxt_lvlhsh_find(port_hash, &lhq);
6096 }
6097
6098 switch (res) {
6099
6100 case NXT_OK:
6101 if (!remove) {
6102 nxt_unit_port_use(lhq.value);
6103 }
6104
6105 return lhq.value;
6106
6107 default:
6108 return NULL;
6109 }
6110}
6111
6112
6113static nxt_int_t
6114nxt_unit_request_hash_test(nxt_lvlhsh_query_t *lhq, void *data)
6115{
6116 return NXT_OK;
6117}
6118
6119
6120static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = {
6121 NXT_LVLHSH_DEFAULT,
6122 nxt_unit_request_hash_test,
6123 nxt_unit_lvlhsh_alloc,
6124 nxt_unit_lvlhsh_free,
6125};
6126
6127
6128static int
6129nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx,
6130 nxt_unit_request_info_t *req)
6131{
6132 uint32_t *stream;
6133 nxt_int_t res;
6134 nxt_lvlhsh_query_t lhq;
6135 nxt_unit_ctx_impl_t *ctx_impl;
6136 nxt_unit_request_info_impl_t *req_impl;
6137
6138 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
6139 if (req_impl->in_hash) {
6140 return NXT_UNIT_OK;
6141 }
6142
6143 stream = &req_impl->stream;
6144
6145 lhq.key_hash = nxt_murmur_hash2(stream, sizeof(*stream));
6146 lhq.key.length = sizeof(*stream);
6147 lhq.key.start = (u_char *) stream;
6148 lhq.proto = &lvlhsh_requests_proto;
6149 lhq.pool = NULL;
6150 lhq.replace = 0;
6151 lhq.value = req_impl;
6152
6153 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
6154
6155 pthread_mutex_lock(&ctx_impl->mutex);
6156
6157 res = nxt_lvlhsh_insert(&ctx_impl->requests, &lhq);
6158
6159 pthread_mutex_unlock(&ctx_impl->mutex);
6160
6161 switch (res) {
6162
6163 case NXT_OK:
6164 req_impl->in_hash = 1;
6165 return NXT_UNIT_OK;
6166
6167 default:
6168 return NXT_UNIT_ERROR;
6169 }
6170}
6171
6172
6173static nxt_unit_request_info_t *
6174nxt_unit_request_hash_find(nxt_unit_ctx_t *ctx, uint32_t stream, int remove)
6175{
6176 nxt_int_t res;
6177 nxt_lvlhsh_query_t lhq;
6178 nxt_unit_ctx_impl_t *ctx_impl;
6179 nxt_unit_request_info_impl_t *req_impl;
6180
6181 lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream));
6182 lhq.key.length = sizeof(stream);
6183 lhq.key.start = (u_char *) &stream;
6184 lhq.proto = &lvlhsh_requests_proto;
6185 lhq.pool = NULL;
6186
6187 ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx);
6188
6189 pthread_mutex_lock(&ctx_impl->mutex);
6190
6191 if (remove) {
6192 res = nxt_lvlhsh_delete(&ctx_impl->requests, &lhq);
6193
6194 } else {
6195 res = nxt_lvlhsh_find(&ctx_impl->requests, &lhq);
6196 }
6197
6198 pthread_mutex_unlock(&ctx_impl->mutex);
6199
6200 switch (res) {
6201
6202 case NXT_OK:
6203 req_impl = nxt_container_of(lhq.value, nxt_unit_request_info_impl_t,
6204 req);
6205 if (remove) {
6206 req_impl->in_hash = 0;
6207 }
6208
6209 return lhq.value;
6210
6211 default:
6212 return NULL;
6213 }
6214}
6215
6216
6217void
6218nxt_unit_log(nxt_unit_ctx_t *ctx, int level, const char *fmt, ...)
6219{
6220 int log_fd, n;
6221 char msg[NXT_MAX_ERROR_STR], *p, *end;
6222 pid_t pid;
6223 va_list ap;
6224 nxt_unit_impl_t *lib;
6225
6226 if (nxt_fast_path(ctx != NULL)) {
6227 lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit);
6228
6229 pid = lib->pid;
6230 log_fd = lib->log_fd;
6231
6232 } else {
6233 pid = getpid();
6234 log_fd = STDERR_FILENO;
6235 }
6236
6237 p = msg;
6238 end = p + sizeof(msg) - 1;
6239
6240 p = nxt_unit_snprint_prefix(p, end, pid, level);
6241
6242 va_start(ap, fmt);
6243 p += vsnprintf(p, end - p, fmt, ap);
6244 va_end(ap);
6245
6246 if (nxt_slow_path(p > end)) {
6247 memcpy(end - 5, "[...]", 5);
6248 p = end;
6249 }
6250
6251 *p++ = '\n';
6252
6253 n = write(log_fd, msg, p - msg);
6254 if (nxt_slow_path(n < 0)) {
6255 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg);
6256 }
6257}
6258
6259
6260void
6261nxt_unit_req_log(nxt_unit_request_info_t *req, int level, const char *fmt, ...)
6262{
6263 int log_fd, n;
6264 char msg[NXT_MAX_ERROR_STR], *p, *end;
6265 pid_t pid;
6266 va_list ap;
6267 nxt_unit_impl_t *lib;
6268 nxt_unit_request_info_impl_t *req_impl;
6269
6270 if (nxt_fast_path(req != NULL)) {
6271 lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit);
6272
6273 pid = lib->pid;
6274 log_fd = lib->log_fd;
6275
6276 } else {
6277 pid = getpid();
6278 log_fd = STDERR_FILENO;
6279 }
6280
6281 p = msg;
6282 end = p + sizeof(msg) - 1;
6283
6284 p = nxt_unit_snprint_prefix(p, end, pid, level);
6285
6286 if (nxt_fast_path(req != NULL)) {
6287 req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req);
6288
6289 p += snprintf(p, end - p, "#%"PRIu32": ", req_impl->stream);
6290 }
6291
6292 va_start(ap, fmt);
6293 p += vsnprintf(p, end - p, fmt, ap);
6294 va_end(ap);
6295
6296 if (nxt_slow_path(p > end)) {
6297 memcpy(end - 5, "[...]", 5);
6298 p = end;
6299 }
6300
6301 *p++ = '\n';
6302
6303 n = write(log_fd, msg, p - msg);
6304 if (nxt_slow_path(n < 0)) {
6305 fprintf(stderr, "Failed to write log: %.*s", (int) (p - msg), msg);
6306 }
6307}
6308
6309
6310static const char * nxt_unit_log_levels[] = {
6311 "alert",
6312 "error",
6313 "warn",
6314 "notice",
6315 "info",
6316 "debug",
6317};
6318
6319
6320static char *
6321nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level)
6322{
6323 struct tm tm;
6324 struct timespec ts;
6325
6326 (void) clock_gettime(CLOCK_REALTIME, &ts);
6327
6328#if (NXT_HAVE_LOCALTIME_R)
6329 (void) localtime_r(&ts.tv_sec, &tm);
6330#else
6331 tm = *localtime(&ts.tv_sec);
6332#endif
6333
6334#if (NXT_DEBUG)
6335 p += snprintf(p, end - p,
6336 "%4d/%02d/%02d %02d:%02d:%02d.%03d ",
6337 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
6338 tm.tm_hour, tm.tm_min, tm.tm_sec,
6339 (int) ts.tv_nsec / 1000000);
6340#else
6341 p += snprintf(p, end - p,
6342 "%4d/%02d/%02d %02d:%02d:%02d ",
6343 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
6344 tm.tm_hour, tm.tm_min, tm.tm_sec);
6345#endif
6346
6347 p += snprintf(p, end - p,
6348 "[%s] %d#%"PRIu64" [unit] ", nxt_unit_log_levels[level],
6349 (int) pid,
6350 (uint64_t) (uintptr_t) nxt_thread_get_tid());
6351
6352 return p;
6353}
6354
6355
6356static void *
6357nxt_unit_lvlhsh_alloc(void *data, size_t size)
6358{
6359 int err;
6360 void *p;
6361
6362 err = posix_memalign(&p, size, size);
6363
6364 if (nxt_fast_path(err == 0)) {
6365 nxt_unit_debug(NULL, "posix_memalign(%d, %d): %p",
6366 (int) size, (int) size, p);
6367 return p;
6368 }
6369
6370 nxt_unit_alert(NULL, "posix_memalign(%d, %d) failed: %s (%d)",
6371 (int) size, (int) size, strerror(err), err);
6372 return NULL;
6373}
6374
6375
6376static void
6377nxt_unit_lvlhsh_free(void *data, void *p)
6378{
6379 nxt_unit_free(NULL, p);
6380}
6381
6382
6383void *
6384nxt_unit_malloc(nxt_unit_ctx_t *ctx, size_t size)
6385{
6386 void *p;
6387
6388 p = malloc(size);
6389
6390 if (nxt_fast_path(p != NULL)) {
6391 nxt_unit_debug(ctx, "malloc(%d): %p", (int) size, p);
6392
6393 } else {
6394 nxt_unit_alert(ctx, "malloc(%d) failed: %s (%d)",
6395 (int) size, strerror(errno), errno);
6396 }
6397
6398 return p;
6399}
6400
6401
6402void
6403nxt_unit_free(nxt_unit_ctx_t *ctx, void *p)
6404{
6405 nxt_unit_debug(ctx, "free(%p)", p);
6406
6407 free(p);
6408}
6409
6410
6411static int
6412nxt_unit_memcasecmp(const void *p1, const void *p2, size_t length)
6413{
6414 u_char c1, c2;
6415 nxt_int_t n;
6416 const u_char *s1, *s2;
6417
6418 s1 = p1;
6419 s2 = p2;
6420
6421 while (length-- != 0) {
6422 c1 = *s1++;
6423 c2 = *s2++;
6424
6425 c1 = nxt_lowcase(c1);
6426 c2 = nxt_lowcase(c2);
6427
6428 n = c1 - c2;
6429
6430 if (n != 0) {
6431 return n;
6432 }
6433 }
6434
6435 return 0;
6436}