/src/haproxy/src/stats-file.c
Line | Count | Source |
1 | | #include <haproxy/stats-file.h> |
2 | | |
3 | | #include <errno.h> |
4 | | #include <fcntl.h> |
5 | | #include <stdio.h> |
6 | | #include <stdlib.h> |
7 | | #include <string.h> |
8 | | #include <sys/mman.h> |
9 | | #include <sys/stat.h> |
10 | | |
11 | | #include <import/ebmbtree.h> |
12 | | #include <import/ebsttree.h> |
13 | | #include <import/ist.h> |
14 | | #include <haproxy/api.h> |
15 | | #include <haproxy/atomic.h> |
16 | | #include <haproxy/buf.h> |
17 | | #include <haproxy/chunk.h> |
18 | | #include <haproxy/clock.h> |
19 | | #include <haproxy/errors.h> |
20 | | #include <haproxy/global.h> |
21 | | #include <haproxy/guid.h> |
22 | | #include <haproxy/intops.h> |
23 | | #include <haproxy/list.h> |
24 | | #include <haproxy/listener-t.h> |
25 | | #include <haproxy/obj_type.h> |
26 | | #include <haproxy/proxy-t.h> |
27 | | #include <haproxy/server-t.h> |
28 | | #include <haproxy/stats.h> |
29 | | #include <haproxy/task.h> |
30 | | #include <haproxy/time.h> |
31 | | #include <haproxy/tools.h> |
32 | | |
33 | | struct shm_stats_file_hdr *shm_stats_file_hdr = NULL; |
34 | | static int shm_stats_file_fd = -1; |
35 | | int shm_stats_file_slot = -1; |
36 | | int shm_stats_file_max_objects = -1; |
37 | | |
38 | | /* Dump all fields from <stats> into <out> for stats-file. */ |
39 | | int stats_dump_fields_file(struct buffer *out, |
40 | | const struct field *line, size_t stats_count, |
41 | | struct show_stat_ctx *ctx) |
42 | 0 | { |
43 | 0 | struct guid_node *guid; |
44 | 0 | struct listener *l; |
45 | 0 | int i; |
46 | |
|
47 | 0 | switch (ctx->px_st) { |
48 | 0 | case STAT_PX_ST_FE: |
49 | 0 | case STAT_PX_ST_BE: |
50 | 0 | guid = &__objt_proxy(ctx->obj1)->guid; |
51 | 0 | break; |
52 | | |
53 | 0 | case STAT_PX_ST_LI: |
54 | 0 | l = LIST_ELEM(ctx->obj2, struct listener *, by_fe); |
55 | 0 | guid = &l->guid; |
56 | 0 | break; |
57 | | |
58 | 0 | case STAT_PX_ST_SV: |
59 | 0 | guid = &__objt_server(ctx->obj2)->guid; |
60 | 0 | break; |
61 | | |
62 | 0 | default: |
63 | 0 | ABORT_NOW(); |
64 | 0 | return 1; |
65 | 0 | } |
66 | | |
67 | | /* Skip objects without GUID. */ |
68 | 0 | if (!guid->key) |
69 | 0 | return 1; |
70 | | |
71 | 0 | chunk_appendf(out, "%s,", (char *)guid->key); |
72 | |
|
73 | 0 | for (i = 0; i < stats_count; ++i) { |
74 | | /* Empty field for stats-file is used to skip its output, |
75 | | * including any separator. |
76 | | */ |
77 | 0 | if (field_format(line, i) == FF_EMPTY) |
78 | 0 | continue; |
79 | | |
80 | 0 | if (!stats_emit_raw_data_field(out, &line[i])) |
81 | 0 | return 0; |
82 | 0 | if (!chunk_strcat(out, ",")) |
83 | 0 | return 0; |
84 | 0 | } |
85 | | |
86 | 0 | chunk_strcat(out, "\n"); |
87 | 0 | return 1; |
88 | 0 | } |
89 | | |
90 | | void stats_dump_file_header(int type, struct buffer *out) |
91 | 0 | { |
92 | 0 | const struct stat_col *col; |
93 | 0 | int i; |
94 | | |
95 | | /* Caller must specified ither FE or BE. */ |
96 | 0 | BUG_ON(!(type & ((1 << STATS_TYPE_FE) | (1 << STATS_TYPE_BE)))); |
97 | |
|
98 | 0 | if (type & (1 << STATS_TYPE_FE)) { |
99 | 0 | chunk_strcat(out, "#fe guid,"); |
100 | 0 | for (i = 0; i < ST_I_PX_MAX; ++i) { |
101 | 0 | col = &stat_cols_px[i]; |
102 | 0 | if (stcol_is_generic(col) && |
103 | 0 | col->cap & (STATS_PX_CAP_FE|STATS_PX_CAP_LI)) { |
104 | 0 | chunk_appendf(out, "%s,", col->name); |
105 | 0 | } |
106 | 0 | } |
107 | 0 | } |
108 | 0 | else { |
109 | 0 | chunk_appendf(out, "#be guid,"); |
110 | 0 | for (i = 0; i < ST_I_PX_MAX; ++i) { |
111 | 0 | col = &stat_cols_px[i]; |
112 | 0 | if (stcol_is_generic(col) && |
113 | 0 | col->cap & (STATS_PX_CAP_BE|STATS_PX_CAP_SRV)) { |
114 | 0 | chunk_appendf(out, "%s,", col->name); |
115 | 0 | } |
116 | 0 | } |
117 | 0 | } |
118 | |
|
119 | 0 | chunk_strcat(out, "\n"); |
120 | 0 | } |
121 | | |
122 | | /* Parse an identified header line <header> starting with '#' character. |
123 | | * |
124 | | * If the section is recognized, <domain> will point to the current stats-file |
125 | | * scope. <cols> will be filled as a matrix to identify each stat_col position |
126 | | * using <st_tree> as prefilled proxy stats columns. If stats-file section is |
127 | | * unknown, only <domain> will be set to STFILE_DOMAIN_UNSET. |
128 | | * |
129 | | * Returns 0 on success. On fatal error, non-zero is returned and parsing should |
130 | | * be interrupted. |
131 | | */ |
132 | | static int parse_header_line(struct ist header, struct eb_root *st_tree, |
133 | | enum stfile_domain *domain, |
134 | | const struct stat_col *cols[]) |
135 | 0 | { |
136 | 0 | enum stfile_domain dom = STFILE_DOMAIN_UNSET; |
137 | 0 | struct ist token; |
138 | 0 | char last; |
139 | 0 | int i; |
140 | |
|
141 | 0 | header = iststrip(header); |
142 | 0 | last = istptr(header)[istlen(header) - 1]; |
143 | 0 | token = istsplit(&header, ' '); |
144 | | |
145 | | /* A header line is considered valid if: |
146 | | * - a space delimiter is found and first token is several chars |
147 | | * - last line character must be a comma separator |
148 | | */ |
149 | 0 | if (!istlen(header) || istlen(token) == 1 || last != ',') |
150 | 0 | goto err; |
151 | | |
152 | 0 | if (isteq(token, ist("#fe"))) |
153 | 0 | dom = STFILE_DOMAIN_PX_FE; |
154 | 0 | else if (isteq(token, ist("#be"))) |
155 | 0 | dom = STFILE_DOMAIN_PX_BE; |
156 | | |
157 | | /* Remove 'guid' field. */ |
158 | 0 | token = istsplit(&header, ','); |
159 | 0 | if (!isteq(token, ist("guid"))) { |
160 | | /* Fatal error if FE/BE domain without guid token. */ |
161 | 0 | if (dom == STFILE_DOMAIN_PX_FE || dom == STFILE_DOMAIN_PX_BE) |
162 | 0 | goto err; |
163 | 0 | } |
164 | | |
165 | | /* Unknown domain. Following lines should be ignored until next header. */ |
166 | 0 | if (dom == STFILE_DOMAIN_UNSET) |
167 | 0 | return 0; |
168 | | |
169 | | /* Generate matrix of stats column into cols[]. */ |
170 | 0 | memset(cols, 0, sizeof(void *) * STAT_FILE_MAX_COL_COUNT); |
171 | |
|
172 | 0 | i = 0; |
173 | 0 | while (istlen(header) && i < STAT_FILE_MAX_COL_COUNT) { |
174 | 0 | struct stcol_node *col_node; |
175 | 0 | const struct stat_col *col; |
176 | 0 | struct ebmb_node *node; |
177 | | |
178 | | /* Lookup column by its name into <st_tree>. */ |
179 | 0 | token = istsplit(&header, ','); |
180 | 0 | node = ebst_lookup(st_tree, ist0(token)); |
181 | 0 | if (!node) { |
182 | 0 | ++i; |
183 | 0 | continue; |
184 | 0 | } |
185 | | |
186 | 0 | col_node = ebmb_entry(node, struct stcol_node, name); |
187 | 0 | col = col_node->col; |
188 | | |
189 | | /* Ignore column if its cap is not valid with current stats-file section. */ |
190 | 0 | if ((dom == STFILE_DOMAIN_PX_FE && |
191 | 0 | !(col->cap & (STATS_PX_CAP_FE|STATS_PX_CAP_LI))) || |
192 | 0 | (dom == STFILE_DOMAIN_PX_BE && |
193 | 0 | !(col->cap & (STATS_PX_CAP_BE|STATS_PX_CAP_SRV)))) { |
194 | 0 | ++i; |
195 | 0 | continue; |
196 | 0 | } |
197 | | |
198 | 0 | cols[i] = col; |
199 | 0 | ++i; |
200 | 0 | } |
201 | |
|
202 | 0 | *domain = dom; |
203 | 0 | return 0; |
204 | | |
205 | 0 | err: |
206 | 0 | *domain = STFILE_DOMAIN_UNSET; |
207 | 0 | return 1; |
208 | 0 | } |
209 | | |
210 | | /* Preload an individual counter instance stored at <counter> with <token> |
211 | | * value> for the <col> stat column. |
212 | | * |
213 | | * Returns 0 on success else non-zero if counter was not updated. |
214 | | */ |
215 | | static int load_ctr(const struct stat_col *col, const struct ist token, |
216 | | void* counter) |
217 | 0 | { |
218 | 0 | const enum field_nature fn = stcol_nature(col); |
219 | 0 | const enum field_format ff = stcol_format(col); |
220 | 0 | const char *ptr = istptr(token); |
221 | 0 | struct field value; |
222 | |
|
223 | 0 | switch (ff) { |
224 | 0 | case FF_U64: |
225 | 0 | value.u.u64 = read_uint64(&ptr, istend(token)); |
226 | 0 | break; |
227 | | |
228 | 0 | case FF_S32: |
229 | 0 | case FF_U32: |
230 | 0 | value.u.u32 = read_uint(&ptr, istend(token)); |
231 | 0 | break; |
232 | | |
233 | 0 | default: |
234 | | /* Unsupported field nature. */ |
235 | 0 | return 1; |
236 | 0 | } |
237 | | |
238 | | /* Do not load value if non numeric characters present. */ |
239 | 0 | if (ptr != istend(token)) |
240 | 0 | return 1; |
241 | | |
242 | 0 | if (fn == FN_COUNTER && ff == FF_U64) { |
243 | 0 | *(uint64_t *)counter = value.u.u64; |
244 | 0 | } |
245 | 0 | else if (fn == FN_RATE && ff == FF_U32) { |
246 | 0 | preload_freq_ctr(counter, value.u.u32); |
247 | 0 | } |
248 | 0 | else if (fn == FN_AGE && (ff == FF_U32 || ff == FF_S32)) { |
249 | 0 | *(uint32_t *)counter = ns_to_sec(now_ns) - value.u.u32; |
250 | 0 | } |
251 | 0 | else { |
252 | | /* Unsupported field format/nature combination. */ |
253 | 0 | return 1; |
254 | 0 | } |
255 | | |
256 | 0 | return 0; |
257 | 0 | } |
258 | | |
259 | | /* Parse a non header stats-file line <line>. Specify current parsing <domain> |
260 | | * and <cols> stats column matrix derived from the last header line. |
261 | | * |
262 | | * Returns 0 on success else non-zero. |
263 | | */ |
264 | | static int parse_stat_line(struct ist line, |
265 | | enum stfile_domain domain, |
266 | | const struct stat_col *cols[]) |
267 | 0 | { |
268 | 0 | struct guid_node *node; |
269 | 0 | struct listener *li; |
270 | 0 | struct server *srv; |
271 | 0 | struct proxy *px; |
272 | 0 | struct ist token; |
273 | 0 | char *base_off, *base_off_shared; |
274 | 0 | char *guid; |
275 | 0 | int i, off; |
276 | |
|
277 | 0 | token = istsplit(&line, ','); |
278 | 0 | guid = ist0(token); |
279 | 0 | if (!guid_is_valid_fmt(guid, NULL)) |
280 | 0 | goto err; |
281 | | |
282 | 0 | node = guid_lookup(guid); |
283 | 0 | if (!node) { |
284 | | /* Silently ignored unknown GUID. */ |
285 | 0 | return 0; |
286 | 0 | } |
287 | | |
288 | 0 | switch (obj_type(node->obj_type)) { |
289 | 0 | case OBJ_TYPE_PROXY: |
290 | 0 | px = __objt_proxy(node->obj_type); |
291 | |
|
292 | 0 | if (domain == STFILE_DOMAIN_PX_FE) { |
293 | 0 | if (!(px->cap & PR_CAP_FE)) |
294 | 0 | return 0; /* silently ignored fe/be mismatch */ |
295 | | |
296 | 0 | if (!px->fe_counters.shared.tg) |
297 | 0 | return 0; |
298 | | |
299 | 0 | base_off_shared = (char *)px->fe_counters.shared.tg[0]; |
300 | 0 | if (!base_off_shared) |
301 | 0 | return 0; // not allocated |
302 | | |
303 | 0 | base_off = (char *)&px->fe_counters; |
304 | |
|
305 | 0 | off = 0; |
306 | 0 | } |
307 | 0 | else if (domain == STFILE_DOMAIN_PX_BE) { |
308 | 0 | if (!(px->cap & PR_CAP_BE)) |
309 | 0 | return 0; /* silently ignored fe/be mismatch */ |
310 | | |
311 | 0 | if (!px->be_counters.shared.tg) |
312 | 0 | return 0; |
313 | | |
314 | 0 | base_off_shared = (char *)px->be_counters.shared.tg[0]; |
315 | 0 | if (!base_off_shared) |
316 | 0 | return 0; // not allocated |
317 | | |
318 | 0 | base_off = (char *)&px->be_counters; |
319 | |
|
320 | 0 | off = 1; |
321 | 0 | } |
322 | 0 | else { |
323 | 0 | goto err; |
324 | 0 | } |
325 | | |
326 | 0 | break; |
327 | | |
328 | 0 | case OBJ_TYPE_LISTENER: |
329 | 0 | if (domain != STFILE_DOMAIN_PX_FE) |
330 | 0 | goto err; |
331 | | |
332 | 0 | li = __objt_listener(node->obj_type); |
333 | | /* Listeners counters are not allocated if 'option socket-stats' unset. */ |
334 | 0 | if (!li->counters) |
335 | 0 | return 0; |
336 | | |
337 | 0 | if (!li->counters->shared.tg) |
338 | 0 | return 0; |
339 | | |
340 | 0 | base_off_shared = (char *)li->counters->shared.tg[0]; |
341 | 0 | if (!base_off_shared) |
342 | 0 | return 0; // not allocated |
343 | | |
344 | 0 | base_off = (char *)li->counters; |
345 | |
|
346 | 0 | off = 0; |
347 | 0 | break; |
348 | | |
349 | 0 | case OBJ_TYPE_SERVER: |
350 | 0 | if (domain != STFILE_DOMAIN_PX_BE) |
351 | 0 | goto err; |
352 | | |
353 | 0 | srv = __objt_server(node->obj_type); |
354 | 0 | if (!srv->counters.shared.tg) |
355 | 0 | return 0; |
356 | | |
357 | 0 | base_off_shared = (char *)srv->counters.shared.tg[0]; |
358 | 0 | if (!base_off_shared) |
359 | 0 | return 0; // not allocated |
360 | | |
361 | 0 | base_off = (char *)&srv->counters; |
362 | |
|
363 | 0 | off = 1; |
364 | 0 | break; |
365 | | |
366 | 0 | default: |
367 | 0 | goto err; |
368 | 0 | } |
369 | | |
370 | 0 | i = 0; |
371 | 0 | while (istlen(line) && i < STAT_FILE_MAX_COL_COUNT) { |
372 | 0 | const struct stat_col *col = cols[i++]; |
373 | |
|
374 | 0 | token = istsplit(&line, ','); |
375 | 0 | if (!istlen(token)) |
376 | 0 | continue; |
377 | | |
378 | 0 | if (!col) |
379 | 0 | continue; |
380 | | |
381 | 0 | if (col->flags & STAT_COL_FL_SHARED) |
382 | 0 | load_ctr(col, token, base_off_shared + col->metric.offset[off]); |
383 | 0 | else |
384 | 0 | load_ctr(col, token, base_off + col->metric.offset[off]); |
385 | 0 | } |
386 | |
|
387 | 0 | return 0; |
388 | | |
389 | 0 | err: |
390 | 0 | return 1; |
391 | 0 | } |
392 | | |
393 | | /* Parse a stats-file and preload haproxy internal counters. */ |
394 | | void apply_stats_file(void) |
395 | 0 | { |
396 | 0 | const struct stat_col *cols[STAT_FILE_MAX_COL_COUNT]; |
397 | 0 | struct eb_root st_tree = EB_ROOT; |
398 | 0 | enum stfile_domain domain; |
399 | 0 | int valid_format = 0; |
400 | 0 | FILE *file; |
401 | 0 | struct ist istline; |
402 | 0 | char *line = NULL; |
403 | 0 | int linenum; |
404 | |
|
405 | 0 | if (!global.stats_file) |
406 | 0 | return; |
407 | | |
408 | 0 | file = fopen(global.stats_file, "r"); |
409 | 0 | if (!file) { |
410 | 0 | ha_warning("config: Can't load stats-file '%s': cannot open file.\n", global.stats_file); |
411 | 0 | return; |
412 | 0 | } |
413 | | |
414 | | /* Generate stat columns map indexed by name. */ |
415 | 0 | if (generate_stat_tree(&st_tree, stat_cols_px)) { |
416 | 0 | ha_warning("config: Can't load stats-file '%s': not enough memory.\n", global.stats_file); |
417 | 0 | goto out; |
418 | 0 | } |
419 | | |
420 | 0 | line = malloc(sizeof(char) * LINESIZE); |
421 | 0 | if (!line) { |
422 | 0 | ha_warning("config: Can't load stats-file '%s': line alloc error.\n", global.stats_file); |
423 | 0 | goto out; |
424 | 0 | } |
425 | | |
426 | 0 | linenum = 0; |
427 | 0 | domain = STFILE_DOMAIN_UNSET; |
428 | 0 | while (1) { |
429 | 0 | if (!fgets(line, LINESIZE, file)) |
430 | 0 | break; |
431 | | |
432 | 0 | ++linenum; |
433 | 0 | istline = iststrip(ist(line)); |
434 | 0 | if (!istlen(istline)) |
435 | 0 | continue; |
436 | | |
437 | | /* comment line starts by // */ |
438 | 0 | if (istmatch(istline, ist("//")) != 0) |
439 | 0 | continue; |
440 | | |
441 | 0 | if (*istptr(istline) == '#') { |
442 | 0 | if (parse_header_line(istline, &st_tree, &domain, cols)) { |
443 | 0 | if (!valid_format) { |
444 | 0 | ha_warning("config: Invalid stats-file format in file '%s'.\n", global.stats_file); |
445 | 0 | break; |
446 | 0 | } |
447 | | |
448 | 0 | ha_warning("config: Ignored stats-file header line '%d' in file '%s'.\n", linenum, global.stats_file); |
449 | 0 | } |
450 | | |
451 | 0 | valid_format = 1; |
452 | 0 | } |
453 | 0 | else if (domain != STFILE_DOMAIN_UNSET) { |
454 | 0 | if (parse_stat_line(istline, domain, cols)) |
455 | 0 | ha_warning("config: Ignored stats-file line %d in file '%s'.\n", linenum, global.stats_file); |
456 | 0 | } |
457 | 0 | else { |
458 | | /* Stop parsing if first line is not a valid header. |
459 | | * Allows to immediately stop reading garbage file. |
460 | | */ |
461 | 0 | if (!valid_format) { |
462 | 0 | ha_warning("config: Invalid stats-file format in file '%s'.\n", global.stats_file); |
463 | 0 | break; |
464 | 0 | } |
465 | 0 | } |
466 | 0 | } |
467 | |
|
468 | 0 | out: |
469 | 0 | while (!eb_is_empty(&st_tree)) { |
470 | 0 | struct ebmb_node *node = ebmb_first(&st_tree); |
471 | 0 | struct stcol_node *snode = ebmb_entry(node, struct stcol_node, name); |
472 | |
|
473 | 0 | ebmb_delete(node); |
474 | 0 | ha_free(&snode); |
475 | 0 | } |
476 | |
|
477 | 0 | ha_free(&line); |
478 | 0 | fclose(file); |
479 | 0 | } |
480 | | |
481 | | /* returns 1 if <hdr> shm version is compatible with current version |
482 | | * defined in stats-file-t.h or 0 if it is not compatible. |
483 | | */ |
484 | | static int shm_stats_file_check_ver(struct shm_stats_file_hdr *hdr) |
485 | 0 | { |
486 | | /* for now we don't even support minor version difference but this may |
487 | | * change later |
488 | | */ |
489 | 0 | if (hdr->version.major != SHM_STATS_FILE_VER_MAJOR || |
490 | 0 | hdr->version.minor != SHM_STATS_FILE_VER_MINOR) |
491 | 0 | return 0; |
492 | 0 | return 1; |
493 | 0 | } |
494 | | |
495 | | static inline int shm_hb_is_stale(uint hb) |
496 | 0 | { |
497 | 0 | return (hb == TICK_ETERNITY || tick_is_expired(hb, now_ms)); |
498 | 0 | } |
499 | | |
500 | | /* returns 1 if the slot <id> is free in <hdr>, else 0 |
501 | | */ |
502 | | static int shm_stats_file_slot_isfree(struct shm_stats_file_hdr *hdr, int id) |
503 | 0 | { |
504 | 0 | uint hb; |
505 | |
|
506 | 0 | hb = HA_ATOMIC_LOAD(&hdr->slots[id].heartbeat); |
507 | 0 | return shm_hb_is_stale(hb); |
508 | 0 | } |
509 | | |
510 | | /* returns free slot id on success or -1 if no more slots are available |
511 | | * on success, the free slot is already reserved for the process pid |
512 | | */ |
513 | | int shm_stats_file_get_free_slot(struct shm_stats_file_hdr *hdr) |
514 | 0 | { |
515 | 0 | int it = 0; |
516 | 0 | uint hb; |
517 | |
|
518 | 0 | while (it < sizeof(hdr->slots) / sizeof(hdr->slots[0])) { |
519 | 0 | hb = HA_ATOMIC_LOAD(&hdr->slots[it].heartbeat); |
520 | | /* try to own a stale entry */ |
521 | 0 | while (shm_hb_is_stale(hb)) { |
522 | 0 | int new_hb = tick_add(now_ms, MS_TO_TICKS(SHM_STATS_FILE_HEARTBEAT_TIMEOUT * 1000)); |
523 | |
|
524 | 0 | if (HA_ATOMIC_CAS(&hdr->slots[it].heartbeat, &hb, new_hb)) { |
525 | 0 | shm_stats_file_hdr->slots[it].pid = getpid(); |
526 | 0 | return it; |
527 | 0 | } |
528 | | /* another process was faster than us */ |
529 | 0 | __ha_cpu_relax(); |
530 | 0 | } |
531 | 0 | it += 1; |
532 | 0 | } |
533 | 0 | return -1; |
534 | 0 | } |
535 | | |
536 | | /* since shm file was opened using O_APPEND flag, let's grow |
537 | | * the file by <bytes> in an atomic manner (O_APPEND offers such guarantee), |
538 | | * so that even if multiple processes try to grow the file simultaneously, |
539 | | * the file can only grow bigger and never shrink |
540 | | * |
541 | | * We do this way because ftruncate() between multiple processes |
542 | | * could result in the file being shrunk if one of the process |
543 | | * is not aware that the file was already expanded in the meantime |
544 | | * |
545 | | * Returns 1 on success and 0 on failure |
546 | | */ |
547 | | static int shm_file_grow(unsigned int bytes) |
548 | 0 | { |
549 | 0 | char buf[1024] = {0}; |
550 | 0 | ssize_t ret; |
551 | |
|
552 | 0 | while (bytes) { |
553 | 0 | ret = write(shm_stats_file_fd, buf, MIN(sizeof(buf), bytes)); |
554 | 0 | if (ret <= 0) |
555 | 0 | return 0; |
556 | 0 | bytes -= ret; |
557 | 0 | } |
558 | 0 | return 1; |
559 | 0 | } |
560 | | |
561 | | /* returns NULL if no free object or pointer to existing object if |
562 | | * object can be reused |
563 | | */ |
564 | | static struct shm_stats_file_object *shm_stats_file_reuse_object(void) |
565 | 0 | { |
566 | 0 | int it = 0; |
567 | 0 | int objects; |
568 | 0 | struct shm_stats_file_object *free_obj; |
569 | |
|
570 | 0 | BUG_ON(!shm_stats_file_hdr); |
571 | 0 | objects = HA_ATOMIC_LOAD(&shm_stats_file_hdr->objects); |
572 | 0 | if (!objects) |
573 | 0 | return NULL; |
574 | 0 | while (it < objects) { |
575 | 0 | uint64_t users; |
576 | 0 | int free = 0; |
577 | |
|
578 | 0 | free_obj = SHM_STATS_FILE_OBJECT(shm_stats_file_hdr, it); |
579 | 0 | users = HA_ATOMIC_LOAD(&free_obj->users); |
580 | 0 | if (!users) |
581 | 0 | free = 1; // no doubt, no user using this object |
582 | 0 | else { |
583 | 0 | int slot = 0; |
584 | | |
585 | | /* if one or multiple users crashed or forgot to remove their bit |
586 | | * from obj->users but aren't making use of it anymore, we can detect |
587 | | * it by checking if the process related to "used" users slot are still |
588 | | * effectively active |
589 | | */ |
590 | 0 | free = 1; // consider all users are inactive for now |
591 | |
|
592 | 0 | while (slot < sizeof(shm_stats_file_hdr->slots) / sizeof(shm_stats_file_hdr->slots[0])) { |
593 | 0 | if ((users & (1ULL << slot)) && |
594 | 0 | !shm_stats_file_slot_isfree(shm_stats_file_hdr, slot)) { |
595 | | /* user still alive, so supposedly making use of it */ |
596 | 0 | free = 0; |
597 | 0 | break; |
598 | 0 | } |
599 | 0 | slot++; |
600 | 0 | } |
601 | 0 | } |
602 | 0 | if (free) { |
603 | 0 | uint64_t nusers = (1ULL << shm_stats_file_slot); |
604 | | |
605 | | /* we use CAS here because we want to make sure that we are the only |
606 | | * process who exclusively owns the object as we are about to reset it. |
607 | | * In case of failure, we also don't expect our bit to be set, so |
608 | | * CAS is the best fit here. First we set the obj's users bits to 0 |
609 | | * to make sure no other process will try to preload it (it may hold |
610 | | * garbage content) as we are about to reset it with our data, then |
611 | | * we do another CAS to confirm we are the owner of the object |
612 | | */ |
613 | 0 | if (HA_ATOMIC_CAS(&free_obj->users, &users, 0)) { |
614 | | /* we set obj tgid to 0 so it can't be looked up in |
615 | | * shm_stats_file_preload (tgid 0 is invalid) |
616 | | */ |
617 | 0 | HA_ATOMIC_STORE(&free_obj->tgid, 0); |
618 | | |
619 | | /* now we finally try to acquire the object */ |
620 | 0 | users = 0; |
621 | 0 | if (HA_ATOMIC_CAS(&free_obj->users, &users, nusers)) |
622 | 0 | return free_obj; |
623 | 0 | } |
624 | | /* failed to CAS because of concurrent access, give up on this one */ |
625 | 0 | } |
626 | 0 | it += 1; |
627 | 0 | } |
628 | 0 | return NULL; |
629 | 0 | } |
630 | | |
631 | | /* returns pointer to new object in case of success and NULL in case |
632 | | * of failure (if adding the maximum number of objects is already |
633 | | * reached) |
634 | | * |
635 | | * <errmsg> will be set in case of failure to give more hints about the |
636 | | * error, it must be freed accordingly |
637 | | */ |
638 | | struct shm_stats_file_object *shm_stats_file_add_object(char **errmsg) |
639 | 0 | { |
640 | 0 | struct shm_stats_file_object *new_obj; |
641 | 0 | uint64_t expected_users; |
642 | 0 | int objects, objects_slots; |
643 | 0 | static uint last_failed_attempt = TICK_ETERNITY; |
644 | | |
645 | | /* if previous object reuse failed, don't try a new opportunistic |
646 | | * reuse immediately because chances are high the new reuse attempt |
647 | | * will also fail, and repeated failed reuse attempts could be costly |
648 | | * with large number of objects |
649 | | */ |
650 | 0 | if (last_failed_attempt != TICK_ETERNITY && |
651 | 0 | !tick_is_expired(last_failed_attempt + MS_TO_TICKS(50), now_ms)) |
652 | 0 | goto add; |
653 | | |
654 | 0 | new_obj = shm_stats_file_reuse_object(); |
655 | 0 | if (new_obj) { |
656 | 0 | last_failed_attempt = TICK_ETERNITY; |
657 | 0 | return new_obj; |
658 | 0 | } |
659 | 0 | else |
660 | 0 | last_failed_attempt = now_ms; |
661 | | |
662 | 0 | add: |
663 | 0 | objects = HA_ATOMIC_LOAD(&shm_stats_file_hdr->objects); |
664 | |
|
665 | 0 | if (objects >= shm_stats_file_max_objects) { |
666 | 0 | memprintf(errmsg, "Cannot add additional object to '%s' file, maximum number already reached (%d). " |
667 | 0 | "Adjust \"shm-stats-file-max-objects\" directive if needed.", |
668 | 0 | global.shm_stats_file, shm_stats_file_max_objects / global.nbtgroups); |
669 | 0 | return NULL; |
670 | 0 | } |
671 | | |
672 | 0 | objects_slots = HA_ATOMIC_LOAD(&shm_stats_file_hdr->objects_slots); |
673 | | /* we increase objects slots by following half power of two curve to |
674 | | * reduce waste while ensuring we don't grow the shm file (costly) |
675 | | * too often |
676 | | */ |
677 | 0 | if (objects + 1 > objects_slots) { |
678 | 0 | int nobjects_slots; |
679 | |
|
680 | 0 | if (objects_slots < 2) |
681 | 0 | nobjects_slots = objects_slots + 1; |
682 | 0 | else if ((objects_slots & (objects_slots - 1)) == 0) |
683 | 0 | nobjects_slots = objects_slots + objects_slots / 2; |
684 | 0 | else |
685 | 0 | nobjects_slots = (objects_slots & (objects_slots - 1)) * 2; |
686 | |
|
687 | 0 | if (shm_file_grow((nobjects_slots - objects_slots) * sizeof(struct shm_stats_file_object)) == 0) { |
688 | 0 | memprintf(errmsg, "Error when trying to increase shm stats file size for '%s': %s", |
689 | 0 | global.shm_stats_file, strerror(errno)); |
690 | 0 | return NULL; |
691 | 0 | } |
692 | 0 | HA_ATOMIC_ADD(&shm_stats_file_hdr->objects_slots, nobjects_slots - objects_slots); |
693 | 0 | } |
694 | | |
695 | | /* try to use this new slot */ |
696 | 0 | new_obj = SHM_STATS_FILE_OBJECT(shm_stats_file_hdr, objects); |
697 | 0 | memset(new_obj, 0, sizeof(*new_obj)); // ensure object is reset before using it |
698 | |
|
699 | 0 | if (HA_ATOMIC_FETCH_ADD(&shm_stats_file_hdr->objects, 1) != objects) { |
700 | | /* a concurrent shm_stats_file_add_object stole our slot, retry */ |
701 | 0 | __ha_cpu_relax(); |
702 | 0 | goto add; |
703 | 0 | } |
704 | | |
705 | 0 | expected_users = 0; |
706 | 0 | if (!HA_ATOMIC_CAS(&new_obj->users, &expected_users, (1ULL << shm_stats_file_slot))) { |
707 | | /* a parallel reuse stole us the object, retry */ |
708 | 0 | __ha_cpu_relax(); |
709 | 0 | goto add; |
710 | 0 | } |
711 | | |
712 | 0 | return new_obj; |
713 | 0 | }; |
714 | | |
715 | | static struct task *shm_stats_file_hb(struct task *task, void *context, unsigned int state) |
716 | 0 | { |
717 | 0 | if (stopping) |
718 | 0 | return NULL; |
719 | | |
720 | | /* only update the heartbeat if it hasn't expired. Else it means the slot could have |
721 | | * been reused and it isn't safe to use anymore. |
722 | | * If this happens, raise a warning and stop using it |
723 | | */ |
724 | 0 | if (tick_is_expired(HA_ATOMIC_LOAD(&shm_stats_file_hdr->slots[shm_stats_file_slot].heartbeat), now_ms)) { |
725 | 0 | ha_warning("shm_stats_file: heartbeat for the current process slot already expired, it is not safe to use it anymore\n"); |
726 | 0 | task->expire = TICK_ETERNITY; |
727 | 0 | return task; |
728 | 0 | } |
729 | 0 | HA_ATOMIC_STORE(&shm_stats_file_hdr->slots[shm_stats_file_slot].heartbeat, |
730 | 0 | tick_add(now_ms, MS_TO_TICKS(SHM_STATS_FILE_HEARTBEAT_TIMEOUT * 1000))); |
731 | 0 | task->expire = tick_add(now_ms, 1000); // next update in 1 sec |
732 | |
|
733 | 0 | return task; |
734 | 0 | } |
735 | | |
736 | | /* loads shm_stats_file content and tries to associate existing objects from |
737 | | * the shared memory (if any) to objects defined in current haproxy config |
738 | | * based on GUIDs |
739 | | */ |
740 | | static void shm_stats_file_preload(void) |
741 | 0 | { |
742 | 0 | int it = 0; |
743 | 0 | int objects; |
744 | 0 | struct shm_stats_file_object *curr_obj; |
745 | |
|
746 | 0 | BUG_ON(!shm_stats_file_hdr); |
747 | 0 | objects = HA_ATOMIC_LOAD(&shm_stats_file_hdr->objects); |
748 | 0 | if (!objects) |
749 | 0 | return; // nothing to do |
750 | | |
751 | 0 | while (it < objects) { |
752 | 0 | struct guid_node *node; |
753 | 0 | uint64_t users; |
754 | 0 | uint16_t obj_tgid; |
755 | |
|
756 | 0 | curr_obj = SHM_STATS_FILE_OBJECT(shm_stats_file_hdr, it); |
757 | |
|
758 | 0 | users = HA_ATOMIC_FETCH_OR(&curr_obj->users, (1ULL << shm_stats_file_slot)); |
759 | | |
760 | | /* ignore object if not used by anyone: when a process properly deinits, |
761 | | * it removes its user bit from the object, thus an object without any |
762 | | * bit should be considered as empty object |
763 | | */ |
764 | 0 | if (!users) |
765 | 0 | goto release; |
766 | | |
767 | 0 | obj_tgid = HA_ATOMIC_LOAD(&curr_obj->tgid); |
768 | | |
769 | | /* ignore object if greater than our max tgid */ |
770 | 0 | if (obj_tgid <= global.nbtgroups && |
771 | 0 | (node = guid_lookup(curr_obj->guid))) { |
772 | 0 | switch (*node->obj_type) { |
773 | 0 | case OBJ_TYPE_LISTENER: |
774 | 0 | { |
775 | 0 | struct listener *li; |
776 | |
|
777 | 0 | BUG_ON(curr_obj->type != SHM_STATS_FILE_OBJECT_TYPE_FE); |
778 | 0 | li = __objt_listener(node->obj_type); |
779 | | // counters are optional for listeners |
780 | 0 | if (li->counters) { |
781 | 0 | if (!li->counters->shared.tg) |
782 | 0 | li->counters->shared.tg = calloc(global.nbtgroups, sizeof(*li->counters->shared.tg)); |
783 | 0 | if (li->counters->shared.tg == NULL) |
784 | 0 | goto release; |
785 | 0 | li->counters->shared.tg[obj_tgid - 1] = &curr_obj->data.fe; |
786 | 0 | } |
787 | 0 | break; |
788 | 0 | } |
789 | 0 | case OBJ_TYPE_SERVER: |
790 | 0 | { |
791 | 0 | struct server *sv; |
792 | |
|
793 | 0 | BUG_ON(curr_obj->type != SHM_STATS_FILE_OBJECT_TYPE_BE); |
794 | 0 | sv = __objt_server(node->obj_type); |
795 | 0 | if (!sv->counters.shared.tg) |
796 | 0 | sv->counters.shared.tg = calloc(global.nbtgroups, sizeof(*sv->counters.shared.tg)); |
797 | 0 | if (sv->counters.shared.tg == NULL) |
798 | 0 | goto release; |
799 | 0 | sv->counters.shared.tg[obj_tgid - 1] = &curr_obj->data.be; |
800 | 0 | break; |
801 | 0 | } |
802 | 0 | case OBJ_TYPE_PROXY: |
803 | 0 | { |
804 | 0 | struct proxy *px; |
805 | |
|
806 | 0 | px = __objt_proxy(node->obj_type); |
807 | 0 | if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_FE) { |
808 | 0 | if (!px->fe_counters.shared.tg) |
809 | 0 | px->fe_counters.shared.tg = calloc(global.nbtgroups, sizeof(*px->fe_counters.shared.tg)); |
810 | 0 | if (px->fe_counters.shared.tg == NULL) |
811 | 0 | goto release; |
812 | 0 | px->fe_counters.shared.tg[obj_tgid - 1] = &curr_obj->data.fe; |
813 | 0 | } else if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_BE) { |
814 | 0 | if (!px->be_counters.shared.tg) |
815 | 0 | px->be_counters.shared.tg = calloc(global.nbtgroups, sizeof(*px->be_counters.shared.tg)); |
816 | 0 | if (px->fe_counters.shared.tg == NULL) |
817 | 0 | goto release; |
818 | 0 | px->be_counters.shared.tg[obj_tgid - 1] = &curr_obj->data.be; |
819 | 0 | } else |
820 | 0 | goto release; // not supported |
821 | 0 | break; |
822 | 0 | } |
823 | 0 | default: |
824 | | /* not supported */ |
825 | 0 | goto release; |
826 | 0 | } |
827 | | /* success */ |
828 | 0 | goto next; |
829 | 0 | } |
830 | | |
831 | 0 | release: |
832 | | /* we don't use this object, remove ourselves from object's users */ |
833 | 0 | HA_ATOMIC_AND(&curr_obj->users, ~(1ULL << shm_stats_file_slot)); |
834 | 0 | next: |
835 | 0 | it += 1; |
836 | 0 | } |
837 | 0 | } |
838 | | |
839 | | /* prepare and and initialize shm stats memory file as needed */ |
840 | | int shm_stats_file_prepare(void) |
841 | 0 | { |
842 | 0 | struct task *heartbeat_task; |
843 | 0 | volatile ullong *local_global_now_ns; |
844 | 0 | volatile uint *local_global_now_ms; |
845 | 0 | int first = 0; // process responsible for initializing the shm memory |
846 | 0 | int slot; |
847 | 0 | int objects; |
848 | |
|
849 | 0 | BUG_ON(sizeof(struct shm_stats_file_hdr) != 672, "shm_stats_file_hdr struct size changed, " |
850 | 0 | "it is part of the exported API: ensure all precautions were taken (ie: shm_stats_file " |
851 | 0 | "version change) before adjusting this"); |
852 | 0 | BUG_ON(sizeof(struct shm_stats_file_object) != 552, "shm_stats_file_object struct size changed, " |
853 | 0 | "it is part of the exported API: ensure all precautions were taken (ie: shm_stats_file " |
854 | 0 | "version change) before adjusting this"); |
855 | | |
856 | | /* do nothing if master process or shm_stats_file not configured */ |
857 | 0 | if (master || !global.shm_stats_file) |
858 | 0 | return ERR_NONE; |
859 | | |
860 | | /* compute final shm_stats_file_max_objects value */ |
861 | 0 | if (shm_stats_file_max_objects == -1) |
862 | 0 | shm_stats_file_max_objects = SHM_STATS_FILE_MAX_OBJECTS * global.nbtgroups; |
863 | 0 | else |
864 | 0 | shm_stats_file_max_objects = shm_stats_file_max_objects * global.nbtgroups; |
865 | |
|
866 | 0 | shm_stats_file_fd = open(global.shm_stats_file, O_RDWR | O_APPEND | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); |
867 | 0 | if (shm_stats_file_fd == -1) { |
868 | 0 | shm_stats_file_fd = open(global.shm_stats_file, O_RDWR | O_APPEND, S_IRUSR | S_IWUSR); |
869 | 0 | if (shm_stats_file_fd == -1) { |
870 | 0 | ha_alert("config: cannot open shm stats file '%s': %s\n", global.shm_stats_file, strerror(errno)); |
871 | 0 | return ERR_ALERT | ERR_FATAL; |
872 | 0 | } |
873 | 0 | } |
874 | 0 | else { |
875 | 0 | first = 1; |
876 | 0 | if (shm_file_grow(sizeof(*shm_stats_file_hdr)) == 0) { |
877 | 0 | ha_alert("config: unable to resize shm stats file '%s'\n", global.shm_stats_file); |
878 | 0 | return ERR_ALERT | ERR_FATAL; |
879 | 0 | } |
880 | 0 | } |
881 | | /* mmap maximum contiguous address space for expected objects even if the backing shm is |
882 | | * smaller: it will allow for on the fly shm resizing without having to remap |
883 | | */ |
884 | 0 | shm_stats_file_hdr = mmap(NULL, |
885 | 0 | SHM_STATS_FILE_MAPPING_SIZE(shm_stats_file_max_objects), |
886 | 0 | PROT_READ | PROT_WRITE, MAP_SHARED, shm_stats_file_fd, 0); |
887 | 0 | if (shm_stats_file_hdr == MAP_FAILED || shm_stats_file_hdr == NULL) { |
888 | 0 | ha_alert("config: failed to map shm stats file '%s'\n", global.shm_stats_file); |
889 | 0 | return ERR_ALERT | ERR_FATAL; |
890 | 0 | } |
891 | | |
892 | 0 | if (first) { |
893 | | /* let's init some members */ |
894 | 0 | memset(shm_stats_file_hdr, 0, sizeof(*shm_stats_file_hdr)); |
895 | 0 | shm_stats_file_hdr->version.major = SHM_STATS_FILE_VER_MAJOR; |
896 | 0 | shm_stats_file_hdr->version.minor = SHM_STATS_FILE_VER_MINOR; |
897 | | |
898 | | /* set global clock for the first time */ |
899 | 0 | shm_stats_file_hdr->global_now_ms = *global_now_ms; |
900 | 0 | shm_stats_file_hdr->global_now_ns = *global_now_ns; |
901 | 0 | } |
902 | 0 | else if (!shm_stats_file_check_ver(shm_stats_file_hdr)) |
903 | 0 | goto err_version; |
904 | | |
905 | | /* from now on use the shared global time, but save local global time |
906 | | * in case reverting is required |
907 | | */ |
908 | 0 | local_global_now_ms = global_now_ms; |
909 | 0 | local_global_now_ns = global_now_ns; |
910 | 0 | global_now_ms = &shm_stats_file_hdr->global_now_ms; |
911 | 0 | global_now_ns = &shm_stats_file_hdr->global_now_ns; |
912 | |
|
913 | 0 | if (!first) { |
914 | 0 | llong new_offset, adjt_offset; |
915 | | |
916 | | /* Given the clock from the shared map and our current clock which is considered |
917 | | * up-to-date, we can now compute the now_offset that we will be using instead |
918 | | * of the default one in order to make our clock consistent with the shared one |
919 | | * |
920 | | * First we remove the original offset from now_ns to get pure now_ns |
921 | | * then we compare now_ns with the shared clock, which gives us the |
922 | | * relative offset we should be using to make our monotonic clock |
923 | | * coincide with the shared one. |
924 | | */ |
925 | 0 | new_offset = HA_ATOMIC_LOAD(global_now_ns) - (now_ns - clock_get_now_offset()); |
926 | | |
927 | | /* set adjusted offset which corresponds to the corrected offset |
928 | | * relative to the new offset we calculated instead or the default |
929 | | * one |
930 | | */ |
931 | 0 | adjt_offset = -clock_get_now_offset() + new_offset; |
932 | | |
933 | | /* we now rely on global_now_* from the shm, so the boot |
934 | | * offset that was initially applied in clock_init_process_date() |
935 | | * is no longer relevant. So we fix it by applying the one from the |
936 | | * initial process instead |
937 | | */ |
938 | 0 | now_ns = now_ns + adjt_offset; |
939 | 0 | start_time_ns = start_time_ns + adjt_offset; |
940 | 0 | clock_set_now_offset(new_offset); |
941 | | |
942 | | /* ensure global_now_* is consistent before continuing */ |
943 | 0 | clock_update_global_date(); |
944 | 0 | } |
945 | | |
946 | | /* sync local and global clocks, so all clocks are consistent */ |
947 | 0 | clock_update_date(0, 1); |
948 | | |
949 | | /* check if the map is outdated and must be reset: |
950 | | * let's consider the map is outdated unless we find an occupied slot |
951 | | */ |
952 | 0 | check_outdated: |
953 | 0 | if (first) |
954 | 0 | goto skip_check_outdated; // not needed |
955 | 0 | first = 1; |
956 | 0 | slot = 0; |
957 | 0 | objects = HA_ATOMIC_LOAD(&shm_stats_file_hdr->objects); |
958 | 0 | while (slot < sizeof(shm_stats_file_hdr->slots) / sizeof(shm_stats_file_hdr->slots[0])) { |
959 | 0 | if (!shm_stats_file_slot_isfree(shm_stats_file_hdr, slot)) { |
960 | 0 | first = 0; |
961 | 0 | break; |
962 | 0 | } |
963 | 0 | slot += 1; |
964 | 0 | } |
965 | 0 | if (first) { |
966 | | /* no more slots occupied, let's reset the map but take some precautions |
967 | | * to ensure another reset doesn't occur in parallel |
968 | | */ |
969 | 0 | if (!HA_ATOMIC_CAS(&shm_stats_file_hdr->objects, &objects, 0)) { |
970 | 0 | __ha_cpu_relax(); |
971 | 0 | goto check_outdated; |
972 | 0 | } |
973 | 0 | } |
974 | | |
975 | 0 | skip_check_outdated: |
976 | | |
977 | | /* reserve our slot */ |
978 | 0 | slot = shm_stats_file_get_free_slot(shm_stats_file_hdr); |
979 | 0 | if (slot == -1) { |
980 | 0 | ha_warning("config: failed to get shm stats file slot for '%s', all slots are occupied\n", global.shm_stats_file); |
981 | | /* stop using shared clock since we withdraw from the shared memory, |
982 | | * simply update the local clock and switch to using it instead |
983 | | */ |
984 | 0 | *local_global_now_ms = HA_ATOMIC_LOAD(global_now_ms); |
985 | 0 | *local_global_now_ns = HA_ATOMIC_LOAD(global_now_ns); |
986 | | |
987 | | /* shared memory mapping no longer needed */ |
988 | 0 | munmap(shm_stats_file_hdr, sizeof(*shm_stats_file_hdr)); |
989 | 0 | shm_stats_file_hdr = NULL; |
990 | |
|
991 | 0 | global_now_ms = local_global_now_ms; |
992 | 0 | global_now_ns = local_global_now_ns; |
993 | 0 | return ERR_WARN; |
994 | 0 | } |
995 | | |
996 | 0 | shm_stats_file_slot = slot; |
997 | | |
998 | | /* start the task responsible for updating the heartbeat */ |
999 | 0 | heartbeat_task = task_new_anywhere(); |
1000 | 0 | if (!heartbeat_task) { |
1001 | 0 | ha_alert("config: failed to create the heartbeat task for shm stats file '%s'\n", global.shm_stats_file); |
1002 | 0 | return ERR_ALERT | ERR_FATAL; |
1003 | 0 | } |
1004 | 0 | heartbeat_task->process = shm_stats_file_hb; |
1005 | 0 | task_schedule(heartbeat_task, tick_add(now_ms, 1000)); |
1006 | | |
1007 | | /* try to preload existing objects in the shm (if any) */ |
1008 | 0 | shm_stats_file_preload(); |
1009 | |
|
1010 | 0 | end: |
1011 | 0 | return ERR_NONE; |
1012 | | |
1013 | 0 | err_version: |
1014 | 0 | ha_warning("config: incompatible map shm stats file version '%s'\n", global.shm_stats_file); |
1015 | 0 | return ERR_WARN; |
1016 | 0 | } |
1017 | | |
1018 | | static void cleanup_shm_stats_file(void) |
1019 | 0 | { |
1020 | 0 | if (shm_stats_file_hdr) { |
1021 | | /* mark the process slot we occupied as unused */ |
1022 | 0 | HA_ATOMIC_STORE(&shm_stats_file_hdr->slots[shm_stats_file_slot].heartbeat, TICK_ETERNITY); |
1023 | 0 | shm_stats_file_hdr->slots[shm_stats_file_slot].pid = -1; |
1024 | |
|
1025 | 0 | munmap(shm_stats_file_hdr, SHM_STATS_FILE_MAPPING_SIZE(shm_stats_file_max_objects)); |
1026 | 0 | close(shm_stats_file_fd); |
1027 | 0 | } |
1028 | 0 | } |
1029 | | REGISTER_POST_DEINIT(cleanup_shm_stats_file); |