/src/dovecot/src/lib/buffer.c
Line | Count | Source |
1 | | /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */ |
2 | | |
3 | | /* @UNSAFE: whole file */ |
4 | | |
5 | | #include "lib.h" |
6 | | #include "safe-memset.h" |
7 | | #include "buffer.h" |
8 | | |
9 | | /* Disable our memcpy() safety wrapper. This file is very performance sensitive |
10 | | and it's been checked to work correctly with memcpy(). */ |
11 | | #undef memcpy |
12 | | |
13 | | struct real_buffer { |
14 | | union { |
15 | | struct buffer buf; |
16 | | struct { |
17 | | /* public: */ |
18 | | const void *r_buffer; |
19 | | size_t used; |
20 | | /* private: */ |
21 | | unsigned char *w_buffer; |
22 | | size_t dirty, alloc, writable_size, max_size; |
23 | | |
24 | | pool_t pool; |
25 | | |
26 | | bool alloced:1; |
27 | | bool dynamic:1; |
28 | | }; |
29 | | }; |
30 | | }; |
31 | | typedef int buffer_check_sizes[COMPILE_ERROR_IF_TRUE(sizeof(struct real_buffer) > sizeof(buffer_t)) ?1:1]; |
32 | | |
33 | | static void buffer_alloc(struct real_buffer *buf, size_t size) |
34 | 904k | { |
35 | 904k | i_assert(buf->w_buffer == NULL || buf->alloced); |
36 | | |
37 | 904k | if (size == buf->alloc) |
38 | 0 | return; |
39 | | |
40 | 904k | i_assert(size > buf->alloc); |
41 | | |
42 | 904k | if (buf->w_buffer == NULL) |
43 | 897k | buf->w_buffer = p_malloc(buf->pool, size); |
44 | 6.62k | else |
45 | 6.62k | buf->w_buffer = p_realloc(buf->pool, buf->w_buffer, buf->alloc, size); |
46 | 904k | buf->alloc = size; |
47 | 904k | buf->writable_size = size-1; /* -1 for str_c() NUL */ |
48 | | |
49 | 904k | buf->r_buffer = buf->w_buffer; |
50 | 904k | buf->alloced = TRUE; |
51 | 904k | } |
52 | | |
53 | | static inline void |
54 | | buffer_check_limits(struct real_buffer *buf, size_t pos, size_t data_size) |
55 | 1.45M | { |
56 | 1.45M | size_t new_size; |
57 | | |
58 | 1.45M | if (unlikely(buf->max_size - pos < data_size)) |
59 | 0 | i_panic("Buffer write out of range (%zu + %zu)", pos, data_size); |
60 | | |
61 | 1.45M | new_size = pos + data_size; |
62 | | |
63 | 1.45M | if (new_size > buf->used && buf->used < buf->dirty) { |
64 | | /* clear used..dirty area */ |
65 | 7.61k | size_t max = I_MIN(I_MIN(buf->alloc, buf->dirty), new_size); |
66 | | |
67 | 7.61k | memset(buf->w_buffer + buf->used, 0, max - buf->used); |
68 | 7.61k | } |
69 | | |
70 | | /* Use buf->writable_size instead of buf->alloc to always keep +1 byte |
71 | | available in case str_c() is called for this buffer. This is mainly |
72 | | for cases where the buffer is allocated from data stack, and str_c() |
73 | | is called in a separate stack frame. */ |
74 | 1.45M | if (new_size > buf->writable_size) { |
75 | 6.62k | if (unlikely(!buf->dynamic)) { |
76 | 0 | i_panic("Buffer full (%zu > %zu, pool %s)", |
77 | 0 | pos + data_size, buf->alloc, |
78 | 0 | buf->pool == NULL ? "<none>" : |
79 | 0 | pool_get_name(buf->pool)); |
80 | 0 | } |
81 | | |
82 | 6.62k | size_t new_alloc_size = |
83 | 6.62k | pool_get_exp_grown_size(buf->pool, buf->alloc, |
84 | 6.62k | new_size + 1); |
85 | 6.62k | if (new_alloc_size > buf->max_size) { |
86 | | /* limit to max_size, but do include +1 for |
87 | | str_c() NUL */ |
88 | 0 | new_alloc_size = buf->max_size + 1; |
89 | 0 | } |
90 | 6.62k | buffer_alloc(buf, new_alloc_size); |
91 | 6.62k | } |
92 | | #if 0 |
93 | | else if (new_size > buf->used && buf->alloced && |
94 | | !buf->pool->alloconly_pool && !buf->pool->datastack_pool) { |
95 | | void *new_buf; |
96 | | |
97 | | /* buffer's size increased: move the buffer's memory elsewhere. |
98 | | this should help catch bugs where old pointers are tried to |
99 | | be used to access the buffer's memory */ |
100 | | new_buf = p_malloc(buf->pool, buf->alloc); |
101 | | memcpy(new_buf, buf->w_buffer, buf->alloc); |
102 | | p_free(buf->pool, buf->w_buffer); |
103 | | |
104 | | buf->w_buffer = new_buf; |
105 | | buf->r_buffer = new_buf; |
106 | | } |
107 | | #endif |
108 | | |
109 | 1.45M | if (new_size > buf->used) |
110 | 1.36M | buf->used = new_size; |
111 | 1.45M | i_assert(buf->used <= buf->alloc); |
112 | 1.45M | i_assert(buf->w_buffer != NULL); |
113 | 1.45M | } |
114 | | |
115 | | static inline void |
116 | | buffer_check_append_limits(struct real_buffer *buf, size_t data_size) |
117 | 1.44M | { |
118 | | /* Fast path: See if data to be appended fits into allocated buffer. |
119 | | If it does, we don't even need to memset() the dirty buffer since |
120 | | it's going to be filled with the newly appended data. */ |
121 | 1.44M | if (buf->writable_size - buf->used < data_size) |
122 | 6.61k | buffer_check_limits(buf, buf->used, data_size); |
123 | 1.43M | else |
124 | 1.43M | buf->used += data_size; |
125 | 1.44M | } |
126 | | |
127 | | #undef buffer_create_from_data |
128 | | void buffer_create_from_data(buffer_t *buffer, void *data, size_t size) |
129 | 0 | { |
130 | 0 | struct real_buffer *buf; |
131 | |
|
132 | 0 | i_assert(sizeof(*buffer) >= sizeof(struct real_buffer)); |
133 | | |
134 | 0 | buf = container_of(buffer, struct real_buffer, buf); |
135 | 0 | i_zero(buf); |
136 | 0 | buf->alloc = buf->writable_size = buf->max_size = size; |
137 | 0 | buf->r_buffer = buf->w_buffer = data; |
138 | | /* clear the whole memory area. unnecessary usually, but if the |
139 | | buffer is used by e.g. str_c() it tries to access uninitialized |
140 | | memory */ |
141 | 0 | memset(data, 0, size); |
142 | 0 | } |
143 | | |
144 | | #undef buffer_create_from_const_data |
145 | | void buffer_create_from_const_data(buffer_t *buffer, |
146 | | const void *data, size_t size) |
147 | 0 | { |
148 | 0 | struct real_buffer *buf; |
149 | |
|
150 | 0 | i_assert(sizeof(*buffer) >= sizeof(struct real_buffer)); |
151 | | |
152 | 0 | buf = container_of(buffer, struct real_buffer, buf); |
153 | 0 | i_zero(buf); |
154 | |
|
155 | 0 | buf->used = buf->alloc = buf->writable_size = buf->max_size = size; |
156 | 0 | buf->r_buffer = data; |
157 | 0 | i_assert(buf->w_buffer == NULL); |
158 | 0 | } |
159 | | |
160 | | buffer_t *buffer_create_dynamic(pool_t pool, size_t init_size) |
161 | 317k | { |
162 | 317k | return buffer_create_dynamic_max(pool, init_size, SIZE_MAX); |
163 | 317k | } |
164 | | |
165 | | buffer_t *buffer_create_dynamic_max(pool_t pool, size_t init_size, |
166 | | size_t max_size) |
167 | 897k | { |
168 | 897k | struct real_buffer *buf; |
169 | | |
170 | | #ifdef DEBUG |
171 | | /* we increment this by 1 later on, so if it's SIZE_MAX |
172 | | it turns into 0 and hides a potential bug. |
173 | | |
174 | | Too scary to use in production for now, though. This |
175 | | can change in future. */ |
176 | | i_assert(init_size < SIZE_MAX); |
177 | | #endif |
178 | | |
179 | 897k | buf = p_new(pool, struct real_buffer, 1); |
180 | 897k | buf->pool = pool; |
181 | 897k | buf->dynamic = TRUE; |
182 | 897k | buf->max_size = max_size; |
183 | | /* buffer_alloc() reserves +1 for str_c() NIL, so add +1 here to |
184 | | init_size so we can actually write that much to the buffer without |
185 | | realloc */ |
186 | 897k | buffer_alloc(buf, init_size+1); |
187 | 897k | return &buf->buf; |
188 | 897k | } |
189 | | |
190 | | void buffer_free(buffer_t **_buf) |
191 | 274k | { |
192 | 274k | if (*_buf == NULL) |
193 | 127k | return; |
194 | 146k | struct real_buffer *buf = container_of(*_buf, struct real_buffer, buf); |
195 | | |
196 | 146k | *_buf = NULL; |
197 | 146k | if (buf->alloced) |
198 | 146k | p_free(buf->pool, buf->w_buffer); |
199 | 146k | if (buf->pool != NULL) |
200 | 146k | p_free(buf->pool, buf); |
201 | 146k | } |
202 | | |
203 | | void *buffer_free_without_data(buffer_t **_buf) |
204 | 0 | { |
205 | 0 | struct real_buffer *buf = container_of(*_buf, struct real_buffer, buf); |
206 | 0 | void *data; |
207 | |
|
208 | 0 | *_buf = NULL; |
209 | |
|
210 | 0 | data = buf->w_buffer; |
211 | 0 | p_free(buf->pool, buf); |
212 | 0 | return data; |
213 | 0 | } |
214 | | |
215 | | pool_t buffer_get_pool(const buffer_t *_buf) |
216 | 0 | { |
217 | 0 | const struct real_buffer *buf = |
218 | 0 | container_of(_buf, const struct real_buffer, buf); |
219 | |
|
220 | 0 | return buf->pool; |
221 | 0 | } |
222 | | |
223 | | void buffer_write(buffer_t *_buf, size_t pos, |
224 | | const void *data, size_t data_size) |
225 | 18.5k | { |
226 | 18.5k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
227 | | |
228 | 18.5k | buffer_check_limits(buf, pos, data_size); |
229 | 18.5k | if (data_size > 0) |
230 | 18.5k | memcpy(buf->w_buffer + pos, data, data_size); |
231 | 18.5k | } |
232 | | |
233 | | void buffer_append(buffer_t *_buf, const void *data, size_t data_size) |
234 | 1.19M | { |
235 | 1.19M | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
236 | | |
237 | 1.19M | if (data_size > 0) { |
238 | 1.14M | size_t pos = buf->used; |
239 | 1.14M | buffer_check_append_limits(buf, data_size); |
240 | 1.14M | memcpy(buf->w_buffer + pos, data, data_size); |
241 | 1.14M | } |
242 | 1.19M | } |
243 | | |
244 | | void buffer_append_c(buffer_t *_buf, unsigned char chr) |
245 | 302k | { |
246 | 302k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
247 | 302k | size_t pos = buf->used; |
248 | | |
249 | 302k | buffer_check_append_limits(buf, 1); |
250 | 302k | buf->w_buffer[pos] = chr; |
251 | 302k | } |
252 | | |
253 | | void buffer_insert(buffer_t *_buf, size_t pos, |
254 | | const void *data, size_t data_size) |
255 | 22.2k | { |
256 | 22.2k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
257 | | |
258 | 22.2k | if (pos >= buf->used) |
259 | 12.3k | buffer_write(_buf, pos, data, data_size); |
260 | 9.89k | else if (data_size > 0) { |
261 | 9.89k | buffer_copy(_buf, pos + data_size, _buf, pos, SIZE_MAX); |
262 | 9.89k | memcpy(buf->w_buffer + pos, data, data_size); |
263 | 9.89k | } |
264 | 22.2k | } |
265 | | |
266 | | void buffer_delete(buffer_t *_buf, size_t pos, size_t size) |
267 | 64.9k | { |
268 | 64.9k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
269 | 64.9k | size_t end_size; |
270 | | |
271 | 64.9k | if (pos >= buf->used) |
272 | 0 | return; |
273 | 64.9k | end_size = buf->used - pos; |
274 | | |
275 | 64.9k | if (size < end_size) { |
276 | | /* delete from between */ |
277 | 0 | end_size -= size; |
278 | 0 | memmove(buf->w_buffer + pos, |
279 | 0 | buf->w_buffer + pos + size, end_size); |
280 | 64.9k | } else { |
281 | | /* delete the rest of the buffer */ |
282 | 64.9k | end_size = 0; |
283 | 64.9k | } |
284 | | |
285 | 64.9k | buffer_set_used_size(_buf, pos + end_size); |
286 | 64.9k | } |
287 | | |
288 | | void buffer_replace(buffer_t *_buf, size_t pos, size_t size, |
289 | | const void *data, size_t data_size) |
290 | 0 | { |
291 | 0 | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
292 | 0 | size_t end_size; |
293 | |
|
294 | 0 | if (pos >= buf->used) { |
295 | 0 | buffer_write(_buf, pos, data, data_size); |
296 | 0 | return; |
297 | 0 | } |
298 | 0 | end_size = buf->used - pos; |
299 | |
|
300 | 0 | if (size < end_size) { |
301 | 0 | end_size -= size; |
302 | 0 | if (data_size == 0) { |
303 | | /* delete from between */ |
304 | 0 | memmove(buf->w_buffer + pos, |
305 | 0 | buf->w_buffer + pos + size, end_size); |
306 | 0 | } else { |
307 | | /* insert */ |
308 | 0 | buffer_copy(_buf, pos + data_size, _buf, pos + size, |
309 | 0 | SIZE_MAX); |
310 | 0 | memcpy(buf->w_buffer + pos, data, data_size); |
311 | 0 | } |
312 | 0 | } else { |
313 | | /* overwrite the end */ |
314 | 0 | end_size = 0; |
315 | 0 | buffer_write(_buf, pos, data, data_size); |
316 | 0 | } |
317 | |
|
318 | 0 | buffer_set_used_size(_buf, pos + data_size + end_size); |
319 | 0 | } |
320 | | |
321 | | |
322 | | void buffer_write_zero(buffer_t *_buf, size_t pos, size_t data_size) |
323 | 120k | { |
324 | 120k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
325 | | |
326 | 120k | buffer_check_limits(buf, pos, data_size); |
327 | 120k | memset(buf->w_buffer + pos, 0, data_size); |
328 | 120k | } |
329 | | |
330 | | void buffer_append_zero(buffer_t *_buf, size_t data_size) |
331 | 0 | { |
332 | 0 | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
333 | | |
334 | | /* NOTE: When appending it's enough to check that the limits are |
335 | | valid, because the data is already guaranteed to be zero-filled. */ |
336 | 0 | buffer_check_limits(buf, buf->used, data_size); |
337 | 0 | } |
338 | | |
339 | | void buffer_insert_zero(buffer_t *_buf, size_t pos, size_t data_size) |
340 | 0 | { |
341 | 0 | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
342 | |
|
343 | 0 | if (pos >= buf->used) |
344 | 0 | buffer_write_zero(_buf, pos, data_size); |
345 | 0 | else { |
346 | 0 | buffer_copy(_buf, pos + data_size, _buf, pos, SIZE_MAX); |
347 | 0 | memset(buf->w_buffer + pos, 0, data_size); |
348 | 0 | } |
349 | 0 | } |
350 | | |
351 | | void buffer_copy(buffer_t *_dest, size_t dest_pos, |
352 | | const buffer_t *_src, size_t src_pos, size_t copy_size) |
353 | 9.89k | { |
354 | 9.89k | struct real_buffer *dest = container_of(_dest, struct real_buffer, buf); |
355 | 9.89k | const struct real_buffer *src = |
356 | 9.89k | container_of(_src, const struct real_buffer, buf); |
357 | 9.89k | size_t max_size; |
358 | | |
359 | 9.89k | i_assert(src_pos <= src->used); |
360 | | |
361 | 9.89k | max_size = src->used - src_pos; |
362 | 9.89k | if (copy_size > max_size) |
363 | 9.89k | copy_size = max_size; |
364 | | |
365 | 9.89k | buffer_check_limits(dest, dest_pos, copy_size); |
366 | 9.89k | i_assert(src->r_buffer != NULL); |
367 | | |
368 | 9.89k | if (src == dest) { |
369 | 9.89k | memmove(dest->w_buffer + dest_pos, |
370 | 9.89k | CONST_PTR_OFFSET(src->r_buffer, src_pos), copy_size); |
371 | 9.89k | } else { |
372 | 0 | memcpy(dest->w_buffer + dest_pos, |
373 | 0 | CONST_PTR_OFFSET(src->r_buffer, src_pos), copy_size); |
374 | 0 | } |
375 | 9.89k | } |
376 | | |
377 | | void buffer_append_buf(buffer_t *dest, const buffer_t *src, |
378 | | size_t src_pos, size_t copy_size) |
379 | 0 | { |
380 | 0 | buffer_copy(dest, dest->used, src, src_pos, copy_size); |
381 | 0 | } |
382 | | |
383 | | void *buffer_get_space_unsafe(buffer_t *_buf, size_t pos, size_t size) |
384 | 1.30M | { |
385 | 1.30M | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
386 | | |
387 | 1.30M | buffer_check_limits(buf, pos, size); |
388 | 1.30M | return buf->w_buffer + pos; |
389 | 1.30M | } |
390 | | |
391 | | void *buffer_append_space_unsafe(buffer_t *buf, size_t size) |
392 | 1.17M | { |
393 | | /* NOTE: can't use buffer_check_append_limits() here because it doesn't |
394 | | guarantee that the buffer is zero-filled. */ |
395 | 1.17M | return buffer_get_space_unsafe(buf, buf->used, size); |
396 | 1.17M | } |
397 | | |
398 | | void *buffer_get_modifiable_data(const buffer_t *_buf, size_t *used_size_r) |
399 | 1.03M | { |
400 | 1.03M | const struct real_buffer *buf = |
401 | 1.03M | container_of(_buf, const struct real_buffer, buf); |
402 | | |
403 | 1.03M | if (used_size_r != NULL) |
404 | 0 | *used_size_r = buf->used; |
405 | 1.03M | i_assert(buf->used == 0 || buf->w_buffer != NULL); |
406 | 1.03M | return buf->w_buffer; |
407 | 1.03M | } |
408 | | |
409 | | void buffer_set_used_size(buffer_t *_buf, size_t used_size) |
410 | 119k | { |
411 | 119k | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
412 | | |
413 | 119k | i_assert(used_size <= buf->alloc); |
414 | | |
415 | 119k | if (buf->used > buf->dirty) |
416 | 47.9k | buf->dirty = buf->used; |
417 | | |
418 | 119k | buf->used = used_size; |
419 | 119k | } |
420 | | |
421 | | void buffer_clear_safe(buffer_t *_buf) |
422 | 0 | { |
423 | 0 | struct real_buffer *buf = container_of(_buf, struct real_buffer, buf); |
424 | | |
425 | | /* Can be NULL with const data based buffers */ |
426 | 0 | if (buf->w_buffer != NULL) |
427 | 0 | safe_memset(buf->w_buffer, 0, I_MAX(buf->used, buf->dirty)); |
428 | 0 | buffer_clear(_buf); |
429 | 0 | } |
430 | | |
431 | | size_t buffer_get_size(const buffer_t *_buf) |
432 | 378k | { |
433 | 378k | const struct real_buffer *buf = |
434 | 378k | container_of(_buf, const struct real_buffer, buf); |
435 | | |
436 | 378k | return buf->alloc; |
437 | 378k | } |
438 | | |
439 | | size_t buffer_get_writable_size(const buffer_t *_buf) |
440 | 64.5k | { |
441 | 64.5k | const struct real_buffer *buf = |
442 | 64.5k | container_of(_buf, const struct real_buffer, buf); |
443 | | |
444 | | /* Use buf->writable_size instead of buf->alloc to reserve +1 for |
445 | | str_c() NUL in buffer_check_limits(). Otherwise the caller might |
446 | | increase the buffer's alloc size unnecessarily when it just wants |
447 | | to access the entire buffer. */ |
448 | 64.5k | return buf->writable_size; |
449 | 64.5k | } |
450 | | |
451 | | size_t buffer_get_avail_size(const buffer_t *_buf) |
452 | 12.3k | { |
453 | 12.3k | const struct real_buffer *buf = |
454 | 12.3k | container_of(_buf, const struct real_buffer, buf); |
455 | | |
456 | 12.3k | i_assert(buf->alloc >= buf->used); |
457 | 12.3k | return ((buf->dynamic ? SIZE_MAX : buf->alloc) - buf->used); |
458 | 12.3k | } |
459 | | |
460 | | bool buffer_cmp(const buffer_t *buf1, const buffer_t *buf2) |
461 | 0 | { |
462 | 0 | if (buf1->used != buf2->used) |
463 | 0 | return FALSE; |
464 | 0 | if (buf1->used == 0) |
465 | 0 | return TRUE; |
466 | | |
467 | 0 | return memcmp(buf1->data, buf2->data, buf1->used) == 0; |
468 | 0 | } |
469 | | |
470 | | void buffer_verify_pool(buffer_t *_buf) |
471 | 0 | { |
472 | 0 | const struct real_buffer *buf = |
473 | 0 | container_of(_buf, struct real_buffer, buf); |
474 | 0 | void *ret; |
475 | |
|
476 | 0 | if (buf->pool != NULL && buf->pool->datastack_pool && buf->alloc > 0) { |
477 | | /* this doesn't really do anything except verify the |
478 | | stack frame */ |
479 | 0 | ret = p_realloc(buf->pool, buf->w_buffer, |
480 | 0 | buf->alloc, buf->alloc); |
481 | 0 | i_assert(ret == buf->w_buffer); |
482 | 0 | } |
483 | 0 | } |
484 | | |
485 | | void ATTR_NO_SANITIZE_IMPLICIT_CONVERSION |
486 | | ATTR_NO_SANITIZE_INTEGER |
487 | | buffer_truncate_rshift_bits(buffer_t *buf, size_t bits) |
488 | 0 | { |
489 | | /* no-op if it's shorten than bits in any case.. */ |
490 | 0 | if (buf->used * 8 < bits) return; |
491 | | |
492 | 0 | if (bits > 0) { |
493 | | /* truncate it to closest byte boundary */ |
494 | 0 | size_t bytes = ((bits + 7) & ~(size_t)7) / 8; |
495 | | /* remaining bits */ |
496 | 0 | bits = bits % 8; |
497 | 0 | buffer_set_used_size(buf, I_MIN(bytes, buf->used)); |
498 | 0 | unsigned char *ptr = buffer_get_modifiable_data(buf, &bytes); |
499 | | /* right shift over byte array */ |
500 | 0 | if (bits > 0) { |
501 | 0 | for(size_t i=bytes-1;i>0;i--) |
502 | 0 | ptr[i] = (ptr[i]>>(8-bits)) + |
503 | 0 | ((ptr[i-1]&(0xff>>(bits)))<<bits); |
504 | 0 | ptr[0] = ptr[0]>>(8-bits); |
505 | 0 | } |
506 | 0 | } else { |
507 | 0 | buffer_set_used_size(buf, 0); |
508 | 0 | } |
509 | 0 | } |