nxt_http_chunk_parse.c (1:fdc027c56872) nxt_http_chunk_parse.c (611:323e11065f83)
1
2/*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7#include <nxt_main.h>
8
9
10#define NXT_HTTP_CHUNK_MIDDLE 0
11#define NXT_HTTP_CHUNK_END_ON_BORDER 1
12#define NXT_HTTP_CHUNK_END 2
13
14
15#define \
16nxt_size_is_sufficient(cs) \
17 (cs < ((__typeof__(cs)) 1 << (sizeof(cs) * 8 - 4)))
18
19
20static nxt_int_t nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp,
21 nxt_buf_t ***tail, nxt_buf_t *in);
22
23
24nxt_buf_t *
25nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp,
26 nxt_buf_t *in)
27{
28 u_char c, ch;
29 nxt_int_t ret;
30 nxt_buf_t *b, *out, *nb, **tail;
31 enum {
32 sw_start = 0,
33 sw_chunk_size,
34 sw_chunk_size_linefeed,
35 sw_chunk_end_newline,
36 sw_chunk_end_linefeed,
37 sw_chunk,
38 } state;
39
40 out = NULL;
41 tail = &out;
42
43 state = hcp->state;
44
45 for (b = in; b != NULL; b = b->next) {
46
47 hcp->pos = b->mem.pos;
48
49 while (hcp->pos < b->mem.free) {
50 /*
51 * The sw_chunk state is tested outside the switch
52 * to preserve hcp->pos and to not touch memory.
53 */
54 if (state == sw_chunk) {
55 ret = nxt_http_chunk_buffer(hcp, &tail, b);
56
57 if (ret == NXT_HTTP_CHUNK_MIDDLE) {
58 goto next;
59 }
60
61 if (nxt_slow_path(ret == NXT_ERROR)) {
62 hcp->error = 1;
63 goto done;
64 }
65
66 state = sw_chunk_end_newline;
67
68 if (ret == NXT_HTTP_CHUNK_END_ON_BORDER) {
69 goto next;
70 }
71
72 /* ret == NXT_HTTP_CHUNK_END_ON_BORDER */
73 }
74
75 ch = *hcp->pos++;
76
77 switch (state) {
78
79 case sw_start:
80 state = sw_chunk_size;
81
82 c = ch - '0';
83
84 if (c <= 9) {
85 hcp->chunk_size = c;
86 continue;
87 }
88
89 c = (ch | 0x20) - 'a';
90
91 if (c <= 5) {
1
2/*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7#include <nxt_main.h>
8
9
10#define NXT_HTTP_CHUNK_MIDDLE 0
11#define NXT_HTTP_CHUNK_END_ON_BORDER 1
12#define NXT_HTTP_CHUNK_END 2
13
14
15#define \
16nxt_size_is_sufficient(cs) \
17 (cs < ((__typeof__(cs)) 1 << (sizeof(cs) * 8 - 4)))
18
19
20static nxt_int_t nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp,
21 nxt_buf_t ***tail, nxt_buf_t *in);
22
23
24nxt_buf_t *
25nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp,
26 nxt_buf_t *in)
27{
28 u_char c, ch;
29 nxt_int_t ret;
30 nxt_buf_t *b, *out, *nb, **tail;
31 enum {
32 sw_start = 0,
33 sw_chunk_size,
34 sw_chunk_size_linefeed,
35 sw_chunk_end_newline,
36 sw_chunk_end_linefeed,
37 sw_chunk,
38 } state;
39
40 out = NULL;
41 tail = &out;
42
43 state = hcp->state;
44
45 for (b = in; b != NULL; b = b->next) {
46
47 hcp->pos = b->mem.pos;
48
49 while (hcp->pos < b->mem.free) {
50 /*
51 * The sw_chunk state is tested outside the switch
52 * to preserve hcp->pos and to not touch memory.
53 */
54 if (state == sw_chunk) {
55 ret = nxt_http_chunk_buffer(hcp, &tail, b);
56
57 if (ret == NXT_HTTP_CHUNK_MIDDLE) {
58 goto next;
59 }
60
61 if (nxt_slow_path(ret == NXT_ERROR)) {
62 hcp->error = 1;
63 goto done;
64 }
65
66 state = sw_chunk_end_newline;
67
68 if (ret == NXT_HTTP_CHUNK_END_ON_BORDER) {
69 goto next;
70 }
71
72 /* ret == NXT_HTTP_CHUNK_END_ON_BORDER */
73 }
74
75 ch = *hcp->pos++;
76
77 switch (state) {
78
79 case sw_start:
80 state = sw_chunk_size;
81
82 c = ch - '0';
83
84 if (c <= 9) {
85 hcp->chunk_size = c;
86 continue;
87 }
88
89 c = (ch | 0x20) - 'a';
90
91 if (c <= 5) {
92 hcp->chunk_size = 0x0a + c;
92 hcp->chunk_size = 0x0A + c;
93 continue;
94 }
95
96 goto chunk_error;
97
98 case sw_chunk_size:
99
100 c = ch - '0';
101
102 if (c > 9) {
103 c = (ch | 0x20) - 'a';
104
105 if (nxt_fast_path(c <= 5)) {
93 continue;
94 }
95
96 goto chunk_error;
97
98 case sw_chunk_size:
99
100 c = ch - '0';
101
102 if (c > 9) {
103 c = (ch | 0x20) - 'a';
104
105 if (nxt_fast_path(c <= 5)) {
106 c += 0x0a;
106 c += 0x0A;
107
108 } else if (nxt_fast_path(ch == NXT_CR)) {
109 state = sw_chunk_size_linefeed;
110 continue;
111
112 } else {
113 goto chunk_error;
114 }
115 }
116
117 if (nxt_fast_path(nxt_size_is_sufficient(hcp->chunk_size))) {
118 hcp->chunk_size = (hcp->chunk_size << 4) + c;
119 continue;
120 }
121
122 goto chunk_error;
123
124 case sw_chunk_size_linefeed:
125 if (nxt_fast_path(ch == NXT_LF)) {
126
127 if (hcp->chunk_size != 0) {
128 state = sw_chunk;
129 continue;
130 }
131
132 hcp->last = 1;
133 state = sw_chunk_end_newline;
134 continue;
135 }
136
137 goto chunk_error;
138
139 case sw_chunk_end_newline:
140 if (nxt_fast_path(ch == NXT_CR)) {
141 state = sw_chunk_end_linefeed;
142 continue;
143 }
144
145 goto chunk_error;
146
147 case sw_chunk_end_linefeed:
148 if (nxt_fast_path(ch == NXT_LF)) {
149
150 if (!hcp->last) {
151 state = sw_start;
152 continue;
153 }
154
155 goto done;
156 }
157
158 goto chunk_error;
159
160 case sw_chunk:
161 /*
162 * This state is processed before the switch.
163 * It added here just to suppress a warning.
164 */
165 continue;
166 }
167 }
168
169 if (b->retain == 0) {
170 /* No chunk data was found in a buffer. */
171 nxt_thread_current_work_queue_add(task->thread,
172 b->completion_handler,
173 task, b, b->parent);
174
175 }
176
177 next:
178
179 continue;
180 }
181
182 hcp->state = state;
183
184 return out;
185
186chunk_error:
187
188 hcp->chunk_error = 1;
189
190done:
191
192 nb = nxt_buf_sync_alloc(hcp->mem_pool, NXT_BUF_SYNC_LAST);
193
194 if (nxt_fast_path(nb != NULL)) {
195 *tail = nb;
196
197 } else {
198 hcp->error = 1;
199 }
200
201 // STUB: hcp->chunk_error = 1;
202 // STUB: hcp->error = 1;
203
204 return out;
205}
206
207
208static nxt_int_t
209nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail,
210 nxt_buf_t *in)
211{
212 u_char *p;
213 size_t size;
214 nxt_buf_t *b;
215
216 p = hcp->pos;
217 size = in->mem.free - p;
218
219 if (hcp->chunk_size >= size && in->retain == 0) {
220 /*
221 * Use original buffer if the buffer is lesser than or equal
222 * to a chunk size and this is the first chunk in the buffer.
223 */
224 in->mem.pos = p;
225 **tail = in;
226 *tail = &in->next;
227
228 } else {
229 b = nxt_buf_mem_alloc(hcp->mem_pool, 0, 0);
230 if (nxt_slow_path(b == NULL)) {
231 return NXT_ERROR;
232 }
233
234 **tail = b;
235 *tail = &b->next;
236
237 b->parent = in;
238 in->retain++;
239 b->mem.pos = p;
240 b->mem.start = p;
241
242 if (hcp->chunk_size < size) {
243 p += hcp->chunk_size;
244 hcp->pos = p;
245
246 b->mem.free = p;
247 b->mem.end = p;
248
249 return NXT_HTTP_CHUNK_END;
250 }
251
252 b->mem.free = in->mem.free;
253 b->mem.end = in->mem.free;
254 }
255
256 hcp->chunk_size -= size;
257
258 if (hcp->chunk_size == 0) {
259 return NXT_HTTP_CHUNK_END_ON_BORDER;
260 }
261
262 return NXT_HTTP_CHUNK_MIDDLE;
263}
107
108 } else if (nxt_fast_path(ch == NXT_CR)) {
109 state = sw_chunk_size_linefeed;
110 continue;
111
112 } else {
113 goto chunk_error;
114 }
115 }
116
117 if (nxt_fast_path(nxt_size_is_sufficient(hcp->chunk_size))) {
118 hcp->chunk_size = (hcp->chunk_size << 4) + c;
119 continue;
120 }
121
122 goto chunk_error;
123
124 case sw_chunk_size_linefeed:
125 if (nxt_fast_path(ch == NXT_LF)) {
126
127 if (hcp->chunk_size != 0) {
128 state = sw_chunk;
129 continue;
130 }
131
132 hcp->last = 1;
133 state = sw_chunk_end_newline;
134 continue;
135 }
136
137 goto chunk_error;
138
139 case sw_chunk_end_newline:
140 if (nxt_fast_path(ch == NXT_CR)) {
141 state = sw_chunk_end_linefeed;
142 continue;
143 }
144
145 goto chunk_error;
146
147 case sw_chunk_end_linefeed:
148 if (nxt_fast_path(ch == NXT_LF)) {
149
150 if (!hcp->last) {
151 state = sw_start;
152 continue;
153 }
154
155 goto done;
156 }
157
158 goto chunk_error;
159
160 case sw_chunk:
161 /*
162 * This state is processed before the switch.
163 * It added here just to suppress a warning.
164 */
165 continue;
166 }
167 }
168
169 if (b->retain == 0) {
170 /* No chunk data was found in a buffer. */
171 nxt_thread_current_work_queue_add(task->thread,
172 b->completion_handler,
173 task, b, b->parent);
174
175 }
176
177 next:
178
179 continue;
180 }
181
182 hcp->state = state;
183
184 return out;
185
186chunk_error:
187
188 hcp->chunk_error = 1;
189
190done:
191
192 nb = nxt_buf_sync_alloc(hcp->mem_pool, NXT_BUF_SYNC_LAST);
193
194 if (nxt_fast_path(nb != NULL)) {
195 *tail = nb;
196
197 } else {
198 hcp->error = 1;
199 }
200
201 // STUB: hcp->chunk_error = 1;
202 // STUB: hcp->error = 1;
203
204 return out;
205}
206
207
208static nxt_int_t
209nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail,
210 nxt_buf_t *in)
211{
212 u_char *p;
213 size_t size;
214 nxt_buf_t *b;
215
216 p = hcp->pos;
217 size = in->mem.free - p;
218
219 if (hcp->chunk_size >= size && in->retain == 0) {
220 /*
221 * Use original buffer if the buffer is lesser than or equal
222 * to a chunk size and this is the first chunk in the buffer.
223 */
224 in->mem.pos = p;
225 **tail = in;
226 *tail = &in->next;
227
228 } else {
229 b = nxt_buf_mem_alloc(hcp->mem_pool, 0, 0);
230 if (nxt_slow_path(b == NULL)) {
231 return NXT_ERROR;
232 }
233
234 **tail = b;
235 *tail = &b->next;
236
237 b->parent = in;
238 in->retain++;
239 b->mem.pos = p;
240 b->mem.start = p;
241
242 if (hcp->chunk_size < size) {
243 p += hcp->chunk_size;
244 hcp->pos = p;
245
246 b->mem.free = p;
247 b->mem.end = p;
248
249 return NXT_HTTP_CHUNK_END;
250 }
251
252 b->mem.free = in->mem.free;
253 b->mem.end = in->mem.free;
254 }
255
256 hcp->chunk_size -= size;
257
258 if (hcp->chunk_size == 0) {
259 return NXT_HTTP_CHUNK_END_ON_BORDER;
260 }
261
262 return NXT_HTTP_CHUNK_MIDDLE;
263}