/src/fluent-bit/lib/librdkafka-2.10.1/src/rdkafka_cgrp.h
Line | Count | Source |
1 | | /* |
2 | | * librdkafka - Apache Kafka C library |
3 | | * |
4 | | * Copyright (c) 2012-2022, Magnus Edenhill |
5 | | * 2023, Confluent Inc. |
6 | | * All rights reserved. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions are met: |
10 | | * |
11 | | * 1. Redistributions of source code must retain the above copyright notice, |
12 | | * this list of conditions and the following disclaimer. |
13 | | * 2. Redistributions in binary form must reproduce the above copyright notice, |
14 | | * this list of conditions and the following disclaimer in the documentation |
15 | | * and/or other materials provided with the distribution. |
16 | | * |
17 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
18 | | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
21 | | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
22 | | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
23 | | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
24 | | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
25 | | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
26 | | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
27 | | * POSSIBILITY OF SUCH DAMAGE. |
28 | | */ |
29 | | #ifndef _RDKAFKA_CGRP_H_ |
30 | | #define _RDKAFKA_CGRP_H_ |
31 | | |
32 | | #include "rdinterval.h" |
33 | | |
34 | | #include "rdkafka_assignor.h" |
35 | | |
36 | | |
37 | | /** |
38 | | * Client groups implementation |
39 | | * |
40 | | * Client groups handling for a single cgrp is assigned to a single |
41 | | * rd_kafka_broker_t object at any given time. |
42 | | * The main thread will call cgrp_serve() to serve its cgrps. |
43 | | * |
44 | | * This means that the cgrp itself does not need to be locked since it |
45 | | * is only ever used from the main thread. |
46 | | * |
47 | | */ |
48 | | |
49 | | |
50 | | extern const char *rd_kafka_cgrp_join_state_names[]; |
51 | | |
52 | | /** |
53 | | * Client group |
54 | | */ |
55 | | typedef struct rd_kafka_cgrp_s { |
56 | | const rd_kafkap_str_t *rkcg_group_id; |
57 | | rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */ |
58 | | rd_kafkap_str_t *rkcg_group_instance_id; |
59 | | const rd_kafkap_str_t *rkcg_client_id; |
60 | | rd_kafkap_str_t *rkcg_client_rack; |
61 | | |
62 | | enum { |
63 | | /* Init state */ |
64 | | RD_KAFKA_CGRP_STATE_INIT, |
65 | | |
66 | | /* Cgrp has been stopped. This is a final state */ |
67 | | RD_KAFKA_CGRP_STATE_TERM, |
68 | | |
69 | | /* Query for group coordinator */ |
70 | | RD_KAFKA_CGRP_STATE_QUERY_COORD, |
71 | | |
72 | | /* Outstanding query, awaiting response */ |
73 | | RD_KAFKA_CGRP_STATE_WAIT_COORD, |
74 | | |
75 | | /* Wait ack from assigned cgrp manager broker thread */ |
76 | | RD_KAFKA_CGRP_STATE_WAIT_BROKER, |
77 | | |
78 | | /* Wait for manager broker thread to connect to broker */ |
79 | | RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT, |
80 | | |
81 | | /* Coordinator is up and manager is assigned. */ |
82 | | RD_KAFKA_CGRP_STATE_UP, |
83 | | } rkcg_state; |
84 | | rd_ts_t rkcg_ts_statechange; /* Timestamp of last |
85 | | * state change. */ |
86 | | |
87 | | |
88 | | enum { |
89 | | /* all: join or rejoin, possibly with an existing assignment. */ |
90 | | RD_KAFKA_CGRP_JOIN_STATE_INIT, |
91 | | |
92 | | /* all: JoinGroupRequest sent, awaiting response. */ |
93 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN, |
94 | | |
95 | | /* all: MetadataRequest sent, awaiting response. |
96 | | * While metadata requests may be issued at any time, |
97 | | * this state is only set upon a proper (re)join. */ |
98 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA, |
99 | | |
100 | | /* Follower: SyncGroupRequest sent, awaiting response. */ |
101 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC, |
102 | | |
103 | | /* all: waiting for application to call *_assign() */ |
104 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL, |
105 | | |
106 | | /* all: waiting for application to call *_unassign() */ |
107 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL, |
108 | | |
109 | | /* all: waiting for full assignment to decommission */ |
110 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE, |
111 | | |
112 | | /* all: waiting for partial assignment to decommission */ |
113 | | RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE, |
114 | | |
115 | | /* all: synchronized and assigned |
116 | | * may be an empty assignment. */ |
117 | | RD_KAFKA_CGRP_JOIN_STATE_STEADY, |
118 | | } rkcg_join_state; |
119 | | |
120 | | /* State when group leader */ |
121 | | struct { |
122 | | rd_kafka_group_member_t *members; |
123 | | int member_cnt; |
124 | | } rkcg_group_leader; |
125 | | |
126 | | rd_kafka_q_t *rkcg_q; /* Application poll queue */ |
127 | | rd_kafka_q_t *rkcg_ops; /* Manager ops queue */ |
128 | | rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */ |
129 | | int rkcg_flags; |
130 | 0 | #define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */ |
131 | | #define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \ |
132 | 0 | 0x8 /* Send LeaveGroup when \ |
133 | | * unassign is done */ |
134 | | #define RD_KAFKA_CGRP_F_SUBSCRIPTION \ |
135 | 0 | 0x10 /* If set: \ |
136 | | * subscription \ |
137 | | * else: \ |
138 | | * static assignment */ |
139 | | #define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \ |
140 | 0 | 0x20 /* A Heartbeat request \ |
141 | | * is in transit, dont \ |
142 | | * send a new one. */ |
143 | | #define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \ |
144 | 0 | 0x40 /* Subscription contains \ |
145 | | * wildcards. */ |
146 | | #define RD_KAFKA_CGRP_F_WAIT_LEAVE \ |
147 | 0 | 0x80 /* Wait for LeaveGroup \ |
148 | | * to be sent. \ |
149 | | * This is used to stall \ |
150 | | * termination until \ |
151 | | * the LeaveGroupRequest \ |
152 | | * is responded to, \ |
153 | | * otherwise it risks \ |
154 | | * being dropped in the \ |
155 | | * output queue when \ |
156 | | * the broker is destroyed. \ |
157 | | */ |
158 | | #define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \ |
159 | 0 | 0x100 /**< max.poll.interval.ms \ |
160 | | * was exceeded and we \ |
161 | | * left the group. \ |
162 | | * Do not rejoin until \ |
163 | | * the application has \ |
164 | | * polled again. */ |
165 | | |
166 | | rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/ |
167 | | rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */ |
168 | | rd_kafka_timer_t rkcg_serve_timer; /* Timer for next serve. */ |
169 | | int rkcg_heartbeat_intvl_ms; /* KIP 848: received |
170 | | * heartbeat interval in |
171 | | * milliseconds */ |
172 | | rd_interval_t rkcg_join_intvl; /* JoinGroup interval */ |
173 | | rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */ |
174 | | |
175 | | rd_ts_t rkcg_ts_session_timeout; /**< Absolute session |
176 | | * timeout enforced by |
177 | | * the consumer, this |
178 | | * value is updated on |
179 | | * Heartbeat success, |
180 | | * etc. */ |
181 | | rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error, |
182 | | * used for logging. */ |
183 | | |
184 | | TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */ |
185 | | |
186 | | rd_list_t rkcg_toppars; /* Toppars subscribed to*/ |
187 | | |
188 | | int32_t rkcg_generation_id; /* Current generation id (classic) |
189 | | * or member epoch (consumer). */ |
190 | | |
191 | | rd_kafka_assignor_t *rkcg_assignor; /**< The current partition |
192 | | * assignor. used by both |
193 | | * leader and members. */ |
194 | | void *rkcg_assignor_state; /**< current partition |
195 | | * assignor state */ |
196 | | |
197 | | int32_t rkcg_coord_id; /**< Current coordinator id, |
198 | | * or -1 if not known. */ |
199 | | |
200 | | rd_kafka_group_protocol_t |
201 | | rkcg_group_protocol; /**< Group protocol to use */ |
202 | | |
203 | | rd_kafkap_str_t *rkcg_group_remote_assignor; /**< Group remote |
204 | | * assignor to use */ |
205 | | |
206 | | rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator |
207 | | * broker handle, or NULL. |
208 | | * rkcg_coord's nodename is |
209 | | * updated to this broker's |
210 | | * nodename when there is a |
211 | | * coordinator change. */ |
212 | | rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator |
213 | | * broker handle. |
214 | | * Will be updated when the |
215 | | * coordinator changes. */ |
216 | | |
217 | | int16_t rkcg_wait_resp; /**< Awaiting response for this |
218 | | * ApiKey. |
219 | | * Makes sure only one |
220 | | * JoinGroup or SyncGroup |
221 | | * request is outstanding. |
222 | | * Unset value is -1. */ |
223 | | |
224 | | /** Current subscription */ |
225 | | rd_kafka_topic_partition_list_t *rkcg_subscription; |
226 | | /** The actual topics subscribed (after metadata+wildcard matching). |
227 | | * Sorted. */ |
228 | | rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */ |
229 | | /** Subscribed topics that are errored/not available. */ |
230 | | rd_kafka_topic_partition_list_t *rkcg_errored_topics; |
231 | | /** If a SUBSCRIBE op is received during a COOPERATIVE rebalance, |
232 | | * actioning this will be postponed until after the rebalance |
233 | | * completes. The waiting subscription is stored here. */ |
234 | | rd_kafka_topic_partition_list_t *rkcg_next_subscription; |
235 | | |
236 | | /** |
237 | | * Subscription regex pattern. All the provided regex patterns are |
238 | | * stored as a single string with each pattern separated by '|'. |
239 | | * |
240 | | * Only applicable for the consumer protocol introduced in KIP-848. |
241 | | * |
242 | | * rkcg_subscription = rkcg_subscription_topics + |
243 | | * rkcg_subscription_regex |
244 | | */ |
245 | | rd_kafkap_str_t *rkcg_subscription_regex; |
246 | | |
247 | | /** |
248 | | * Full topic names extracted out from the rkcg_subscription. |
249 | | * |
250 | | * Only applicable for the consumer protocol introduced in KIP-848. |
251 | | * |
252 | | * For the consumer protocol, this field doesn't include regex |
253 | | * subscriptions. For that please refer `rkcg_subscription_regex` |
254 | | * |
255 | | * rkcg_subscription = rkcg_subscription_topics + |
256 | | * rkcg_subscription_regex |
257 | | */ |
258 | | rd_kafka_topic_partition_list_t *rkcg_subscription_topics; |
259 | | |
260 | | /** If a (un)SUBSCRIBE op is received during a COOPERATIVE rebalance, |
261 | | * actioning this will be posponed until after the rebalance |
262 | | * completes. This flag is used to signal a waiting unsubscribe |
263 | | * operation. Mutually exclusive with rkcg_next_subscription. */ |
264 | | rd_bool_t rkcg_next_unsubscribe; |
265 | | |
266 | | /** Assignment considered lost */ |
267 | | rd_atomic32_t rkcg_assignment_lost; |
268 | | |
269 | | /** Current assignment of partitions from last SyncGroup response. |
270 | | * NULL means no assignment, else empty or non-empty assignment. |
271 | | * |
272 | | * This group assignment is the actual set of partitions that were |
273 | | * assigned to our consumer by the consumer group leader and should |
274 | | * not be confused with the rk_consumer.assignment which is the |
275 | | * partitions assigned by the application using assign(), et.al. |
276 | | * |
277 | | * The group assignment and the consumer assignment are typically |
278 | | * identical, but not necessarily since an application is free to |
279 | | * assign() any partition, not just the partitions it is handed |
280 | | * through the rebalance callback. |
281 | | * |
282 | | * Yes, this nomenclature is ambigious but has historical reasons, |
283 | | * so for now just try to remember that: |
284 | | * - group assignment == consumer group assignment. |
285 | | * - assignment == actual used assignment, i.e., fetched partitions. |
286 | | * |
287 | | * @remark This list is always sorted. |
288 | | */ |
289 | | rd_kafka_topic_partition_list_t *rkcg_group_assignment; |
290 | | |
291 | | /** The partitions to incrementally assign following a |
292 | | * currently in-progress incremental unassign. */ |
293 | | rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment; |
294 | | |
295 | | /** Current acked assignment, start with an empty list. */ |
296 | | rd_kafka_topic_partition_list_t *rkcg_current_assignment; |
297 | | |
298 | | /** Assignment the is currently reconciling. |
299 | | * Can be NULL in case there's no reconciliation ongoing. */ |
300 | | rd_kafka_topic_partition_list_t *rkcg_target_assignment; |
301 | | |
302 | | /** Next assignment that will be reconciled once current |
303 | | * reconciliation finishes. Can be NULL. */ |
304 | | rd_kafka_topic_partition_list_t *rkcg_next_target_assignment; |
305 | | |
306 | | /** Number of backoff retries when expediting next heartbeat. */ |
307 | | int rkcg_expedite_heartbeat_retries; |
308 | | |
309 | | /** Flags for KIP-848 state machine. */ |
310 | | int rkcg_consumer_flags; |
311 | | /** Coordinator is waiting for an acknowledgement of currently reconciled |
312 | | * target assignment. Cleared when an HB succeeds |
313 | | * after reconciliation finishes. */ |
314 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK 0x1 |
315 | | /** Member is sending an acknowledgement for a reconciled assignment */ |
316 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK 0x2 |
317 | | /** A new subscription needs to be sent to the Coordinator. */ |
318 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION 0x4 |
319 | | /** A new subscription is being sent to the Coordinator. */ |
320 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION 0x8 |
321 | | /** Consumer has subscribed at least once, |
322 | | * if it didn't happen rebalance protocol is still |
323 | | * considered NONE, otherwise it depends on the |
324 | | * configured partition assignors. */ |
325 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE 0x10 |
326 | | /** Send a complete request in next heartbeat */ |
327 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST 0x20 |
328 | | /** Member is fenced, need to rejoin */ |
329 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN 0x40 |
330 | | /** Member is fenced, rejoining */ |
331 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE 0x80 |
332 | | /** Serve pending assignments after heartbeat */ |
333 | 0 | #define RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING 0x100 |
334 | | |
335 | | /** Rejoin the group following a currently in-progress |
336 | | * incremental unassign. */ |
337 | | rd_bool_t rkcg_rebalance_rejoin; |
338 | | |
339 | | rd_ts_t rkcg_ts_last_err; /* Timestamp of last error |
340 | | * propagated to application */ |
341 | | rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to |
342 | | * application. |
343 | | * This is for silencing |
344 | | * same errors. */ |
345 | | |
346 | | rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */ |
347 | | rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max |
348 | | * poll interval. */ |
349 | | |
350 | | rd_kafka_t *rkcg_rk; |
351 | | |
352 | | rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op |
353 | | * (OP_TERMINATE) |
354 | | * to this rko's queue. */ |
355 | | |
356 | | rd_ts_t rkcg_ts_terminate; /* Timestamp of when |
357 | | * cgrp termination was |
358 | | * initiated. */ |
359 | | |
360 | | rd_atomic32_t rkcg_terminated; /**< Consumer has been closed */ |
361 | | |
362 | | rd_atomic32_t rkcg_subscription_version; /**< Subscription version */ |
363 | | |
364 | | /* Protected by rd_kafka_*lock() */ |
365 | | struct { |
366 | | rd_ts_t ts_rebalance; /* Timestamp of |
367 | | * last rebalance */ |
368 | | int rebalance_cnt; /* Number of |
369 | | rebalances */ |
370 | | char rebalance_reason[256]; /**< Last rebalance |
371 | | * reason */ |
372 | | int assignment_size; /* Partition count |
373 | | * of last rebalance |
374 | | * assignment */ |
375 | | } rkcg_c; |
376 | | |
377 | | /* Timestamp of last rebalance start */ |
378 | | rd_ts_t rkcg_ts_rebalance_start; |
379 | | |
380 | | } rd_kafka_cgrp_t; |
381 | | |
382 | | |
383 | | |
384 | | /* Check if broker is the coordinator */ |
385 | | #define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \ |
386 | | ((rkcg)->rkcg_coord_id != -1 && \ |
387 | | (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid) |
388 | | |
389 | | /** |
390 | | * @returns true if cgrp is using static group membership |
391 | | */ |
392 | | #define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \ |
393 | 0 | !RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id) |
394 | | |
395 | | extern const char *rd_kafka_cgrp_state_names[]; |
396 | | extern const char *rd_kafka_cgrp_join_state_names[]; |
397 | | |
398 | | void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg); |
399 | | rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk, |
400 | | rd_kafka_group_protocol_t group_protocol, |
401 | | const rd_kafkap_str_t *group_id, |
402 | | const rd_kafkap_str_t *client_id); |
403 | | void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg); |
404 | | |
405 | | void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg, |
406 | | rd_kafka_toppar_t *rktp, |
407 | | rd_kafka_replyq_t replyq, |
408 | | rd_kafka_op_type_t type, |
409 | | rd_kafka_resp_err_t err); |
410 | | void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko); |
411 | | void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq); |
412 | | |
413 | | |
414 | | rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg, |
415 | | const char *pattern); |
416 | | rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg, |
417 | | const char *pattern); |
418 | | |
419 | | int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic); |
420 | | |
421 | | void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id); |
422 | | |
423 | | void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state); |
424 | | |
425 | | rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg); |
426 | | void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason); |
427 | | void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg, |
428 | | rd_kafka_resp_err_t err, |
429 | | const char *reason); |
430 | | void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg, |
431 | | rd_bool_t do_join); |
432 | 0 | #define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp) |
433 | | |
434 | | #define rd_kafka_cgrp_same_subscription_version(rk_cgrp, \ |
435 | | cgrp_subscription_version) \ |
436 | 0 | ((rk_cgrp) && \ |
437 | 0 | (cgrp_subscription_version == -1 || \ |
438 | 0 | rd_atomic32_get(&(rk_cgrp)->rkcg_subscription_version) == \ |
439 | 0 | cgrp_subscription_version)) |
440 | | |
441 | | void rd_kafka_cgrp_assigned_offsets_commit( |
442 | | rd_kafka_cgrp_t *rkcg, |
443 | | const rd_kafka_topic_partition_list_t *offsets, |
444 | | rd_bool_t set_offsets, |
445 | | const char *reason); |
446 | | |
447 | | void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg); |
448 | | |
449 | | rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg); |
450 | | |
451 | | |
452 | | struct rd_kafka_consumer_group_metadata_s { |
453 | | char *group_id; |
454 | | int32_t generation_id; |
455 | | char *member_id; |
456 | | char *group_instance_id; /**< Optional (NULL) */ |
457 | | }; |
458 | | |
459 | | rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( |
460 | | const rd_kafka_consumer_group_metadata_t *cgmetadata); |
461 | | |
462 | | static RD_UNUSED const char * |
463 | 0 | rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) { |
464 | 0 | switch (protocol) { |
465 | 0 | case RD_KAFKA_REBALANCE_PROTOCOL_EAGER: |
466 | 0 | return "EAGER"; |
467 | 0 | case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE: |
468 | 0 | return "COOPERATIVE"; |
469 | 0 | default: |
470 | 0 | return "NONE"; |
471 | 0 | } |
472 | 0 | } Unexecuted instantiation: rdkafka.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_assignor.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_broker.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_buf.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_cgrp.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_conf.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_feature.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_metadata.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_metadata_cache.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_msg.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_offset.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_op.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_partition.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_pattern.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_queue.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_range_assignor.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_request.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_roundrobin_assignor.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_sasl.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_sasl_plain.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_sticky_assignor.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_subscription.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_assignment.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_timer.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_topic.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_transport.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_interceptor.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_header.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_admin.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_aux.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_background.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_idempotence.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_txnmgr.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_cert.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_coord.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_mock.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_mock_handlers.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_mock_cgrp.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_error.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_fetcher.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_telemetry.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_telemetry_decode.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_telemetry_encode.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdunittest.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: snappy.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_ssl.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_plugin.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdavl.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_event.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_lz4.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_msgset_reader.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdkafka_msgset_writer.c:rd_kafka_rebalance_protocol2str Unexecuted instantiation: rdlog.c:rd_kafka_rebalance_protocol2str |
473 | | |
474 | | void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg, |
475 | | const char *reason); |
476 | | |
477 | | #endif /* _RDKAFKA_CGRP_H_ */ |