1
2 /*
3 * Copyright (C) Max Romanov
4 * Copyright (C) NGINX, Inc.
5 */
6
7 #ifndef _NXT_PORT_MEMORY_INT_H_INCLUDED_
8 #define _NXT_PORT_MEMORY_INT_H_INCLUDED_
9
10
11 #include <stdint.h>
12 #include <nxt_atomic.h>
13
14
15 #ifdef NXT_MMAP_TINY_CHUNK
16
17 #define PORT_MMAP_CHUNK_SIZE 16
18 #define PORT_MMAP_HEADER_SIZE 1024
19 #define PORT_MMAP_DATA_SIZE 1024
20
21 #else
22
23 #define PORT_MMAP_CHUNK_SIZE (1024 * 16)
24 #define PORT_MMAP_HEADER_SIZE (1024 * 4)
25 #define PORT_MMAP_DATA_SIZE (1024 * 1024 * 10)
26
27 #endif
28
29
30 #define PORT_MMAP_SIZE (PORT_MMAP_HEADER_SIZE + PORT_MMAP_DATA_SIZE)
31 #define PORT_MMAP_CHUNK_COUNT (PORT_MMAP_DATA_SIZE / PORT_MMAP_CHUNK_SIZE)
32
33
34 typedef uint32_t nxt_chunk_id_t;
35
36 typedef nxt_atomic_uint_t nxt_free_map_t;
37
38 #define FREE_BITS (sizeof(nxt_free_map_t) * 8)
39
40 #define FREE_IDX(nchunk) ((nchunk) / FREE_BITS)
41
42 #define FREE_MASK(nchunk) \
43 ( 1ULL << ( (nchunk) % FREE_BITS ) )
44
45 #define MAX_FREE_IDX FREE_IDX(PORT_MMAP_CHUNK_COUNT)
46
47
48 /* Mapped at the start of shared memory segment. */
49 struct nxt_port_mmap_header_s {
50 uint32_t id;
51 nxt_pid_t src_pid; /* For sanity check. */
52 nxt_pid_t dst_pid; /* For sanity check. */
53 nxt_port_id_t sent_over;
54 nxt_atomic_t oosm;
55 nxt_free_map_t free_map[MAX_FREE_IDX];
56 nxt_free_map_t free_map_padding;
57 nxt_free_map_t free_tracking_map[MAX_FREE_IDX];
58 nxt_free_map_t free_tracking_map_padding;
59 nxt_atomic_t tracking[PORT_MMAP_CHUNK_COUNT];
60 };
61
62
63 struct nxt_port_mmap_handler_s {
64 nxt_port_mmap_header_t *hdr;
65 nxt_atomic_t use_count;
66 nxt_fd_t fd;
67 };
68
69 /*
70 * Element of nxt_process_t.incoming/outgoing, shared memory segment
71 * descriptor.
72 */
73 struct nxt_port_mmap_s {
74 nxt_port_mmap_handler_t *mmap_handler;
75 };
76
77 typedef struct nxt_port_mmap_msg_s nxt_port_mmap_msg_t;
78
79 /* Passed as a second iov chunk when 'mmap' bit in nxt_port_msg_t is 1. */
80 struct nxt_port_mmap_msg_s {
81 uint32_t mmap_id; /* Mmap index in nxt_process_t.outgoing. */
82 nxt_chunk_id_t chunk_id; /* Mmap chunk index. */
83 uint32_t size; /* Payload data size. */
84 };
85
86
87 typedef struct nxt_port_mmap_tracking_msg_s nxt_port_mmap_tracking_msg_t;
88
89 struct nxt_port_mmap_tracking_msg_s {
90 uint32_t mmap_id; /* Mmap index in nxt_process_t.outgoing. */
91 nxt_chunk_id_t tracking_id; /* Tracking index. */
92 };
93
94 nxt_inline nxt_bool_t
95 nxt_port_mmap_get_free_chunk(nxt_free_map_t *m, nxt_chunk_id_t *c);
96
97 #define nxt_port_mmap_get_chunk_busy(m, c) \
98 ((m[FREE_IDX(c)] & FREE_MASK(c)) == 0)
99
100 nxt_inline void
101 nxt_port_mmap_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c);
102
103 nxt_inline nxt_bool_t
104 nxt_port_mmap_chk_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c);
105
106 nxt_inline void
107 nxt_port_mmap_set_chunk_free(nxt_free_map_t *m, nxt_chunk_id_t c);
108
109 nxt_inline nxt_chunk_id_t
nxt_port_mmap_chunk_id(nxt_port_mmap_header_t * hdr,u_char * p)110 nxt_port_mmap_chunk_id(nxt_port_mmap_header_t *hdr, u_char *p)
111 {
112 u_char *mm_start;
113
114 mm_start = (u_char *) hdr;
115
116 return ((p - mm_start) - PORT_MMAP_HEADER_SIZE) / PORT_MMAP_CHUNK_SIZE;
117 }
118
119
120 nxt_inline u_char *
nxt_port_mmap_chunk_start(nxt_port_mmap_header_t * hdr,nxt_chunk_id_t c)121 nxt_port_mmap_chunk_start(nxt_port_mmap_header_t *hdr, nxt_chunk_id_t c)
122 {
123 u_char *mm_start;
124
125 mm_start = (u_char *) hdr;
126
127 return mm_start + PORT_MMAP_HEADER_SIZE + c * PORT_MMAP_CHUNK_SIZE;
128 }
129
130
131 nxt_inline nxt_bool_t
nxt_port_mmap_get_free_chunk(nxt_free_map_t * m,nxt_chunk_id_t * c)132 nxt_port_mmap_get_free_chunk(nxt_free_map_t *m, nxt_chunk_id_t *c)
133 {
134 const nxt_free_map_t default_mask = (nxt_free_map_t) -1;
135
136 int ffs;
137 size_t i, start;
138 nxt_chunk_id_t chunk;
139 nxt_free_map_t bits, mask;
140
141 start = FREE_IDX(*c);
142 mask = default_mask << ((*c) % FREE_BITS);
143
144 for (i = start; i < MAX_FREE_IDX; i++) {
145 bits = m[i] & mask;
146 mask = default_mask;
147
148 if (bits == 0) {
149 continue;
150 }
151
152 ffs = __builtin_ffsll(bits);
153 if (ffs != 0) {
154 chunk = i * FREE_BITS + ffs - 1;
155
156 if (nxt_port_mmap_chk_set_chunk_busy(m, chunk)) {
157 *c = chunk;
158 return 1;
159 }
160 }
161 }
162
163 return 0;
164 }
165
166
167 nxt_inline void
nxt_port_mmap_set_chunk_busy(nxt_free_map_t * m,nxt_chunk_id_t c)168 nxt_port_mmap_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c)
169 {
170 nxt_atomic_and_fetch(m + FREE_IDX(c), ~FREE_MASK(c));
171 }
172
173
174 nxt_inline nxt_bool_t
nxt_port_mmap_chk_set_chunk_busy(nxt_free_map_t * m,nxt_chunk_id_t c)175 nxt_port_mmap_chk_set_chunk_busy(nxt_free_map_t *m, nxt_chunk_id_t c)
176 {
177 nxt_free_map_t *f;
178 nxt_free_map_t free_val, busy_val;
179
180 f = m + FREE_IDX(c);
181
182 while ( (*f & FREE_MASK(c)) != 0 ) {
183
184 free_val = *f | FREE_MASK(c);
185 busy_val = free_val & ~FREE_MASK(c);
186
187 if (nxt_atomic_cmp_set(f, free_val, busy_val) != 0) {
188 return 1;
189 }
190 }
191
192 return 0;
193 }
194
195
196 nxt_inline void
nxt_port_mmap_set_chunk_free(nxt_free_map_t * m,nxt_chunk_id_t c)197 nxt_port_mmap_set_chunk_free(nxt_free_map_t *m, nxt_chunk_id_t c)
198 {
199 nxt_atomic_or_fetch(m + FREE_IDX(c), FREE_MASK(c));
200 }
201
202
203 #endif /* _NXT_PORT_MEMORY_INT_H_INCLUDED_ */
204