Line data Source code
1 : #include "fd_txsend_tile.h"
2 :
3 : #include "../../disco/topo/fd_topo.h"
4 : #include "../../disco/fd_txn_m.h"
5 : #include "../../disco/metrics/fd_metrics.h"
6 : #include "../../disco/keyguard/fd_keyguard.h"
7 : #include "../../disco/keyguard/fd_keyload.h"
8 : #include "../../discof/tower/fd_tower_tile.h"
9 : #include "generated/fd_txsend_tile_seccomp.h"
10 :
11 : #include "../../util/net/fd_net_headers.h"
12 : #include "../../waltz/quic/fd_quic.h"
13 :
14 0 : #define IN_KIND_SIGN (0UL)
15 0 : #define IN_KIND_GOSSIP (1UL)
16 0 : #define IN_KIND_EPOCH (2UL)
17 0 : #define IN_KIND_TOWER (3UL)
18 0 : #define IN_KIND_NET (4UL)
19 :
20 : fd_quic_limits_t quic_limits = {
21 : .conn_cnt = 128UL,
22 : .handshake_cnt = 128UL,
23 : .conn_id_cnt = FD_QUIC_MIN_CONN_ID_CNT,
24 : .inflight_frame_cnt = 16UL * 128UL,
25 : .min_inflight_frame_cnt_conn = 4UL,
26 : .stream_id_cnt = 64UL,
27 : .tx_buf_sz = FD_TXN_MTU,
28 : .stream_pool_cnt = 128UL,
29 : };
30 :
31 : #define MAP_NAME peer_map
32 0 : #define MAP_KEY pubkey
33 : #define MAP_ELE_T peer_entry_t
34 : #define MAP_KEY_T fd_pubkey_t
35 : #define MAP_PREV map.prev
36 0 : #define MAP_NEXT map.next
37 : #define MAP_KEY_EQ(k0,k1) fd_pubkey_eq( k0, k1 )
38 : #define MAP_KEY_HASH(key,seed) (seed^fd_ulong_load_8( (key)->uc ))
39 : #define MAP_IMPL_STYLE 2
40 : #include "../../util/tmpl/fd_map_chain.c"
41 :
42 : FD_FN_CONST static inline ulong
43 0 : scratch_align( void ) {
44 0 : return fd_ulong_max( 128UL, fd_quic_align() );
45 0 : }
46 :
47 : FD_FN_PURE static inline ulong
48 0 : scratch_footprint( fd_topo_tile_t const * tile FD_PARAM_UNUSED) {
49 0 : ulong l = FD_LAYOUT_INIT;
50 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_txsend_tile_t), sizeof(fd_txsend_tile_t) );
51 0 : l = FD_LAYOUT_APPEND( l, fd_quic_align(), fd_quic_footprint( &quic_limits ) );
52 0 : l = FD_LAYOUT_APPEND( l, peer_map_align(), peer_map_footprint( 2UL*FD_CONTACT_INFO_TABLE_SIZE ) );
53 0 : return FD_LAYOUT_FINI( l, scratch_align() );
54 0 : }
55 :
56 : static void
57 0 : during_housekeeping( fd_txsend_tile_t * ctx ) {
58 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_UNHALT_PENDING ) ) {
59 0 : FD_LOG_DEBUG(( "keyswitch: unhalting" ));
60 0 : ctx->halt_net_frags = 0;
61 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
62 0 : }
63 :
64 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
65 0 : FD_LOG_DEBUG(( "keyswitch: switching identity" ));
66 0 : ulong seq_must_complete = ctx->keyswitch->param;
67 0 : if( FD_UNLIKELY( fd_seq_lt( ctx->tower_in_expect_seq, seq_must_complete ) ) ) {
68 : /* See fd_keyswitch.h, we need to flush any in-flight shreds from
69 : the leader pipeline before switching key. */
70 0 : FD_LOG_WARNING(( "Flushing in-flight unpublished votes from tower, must reach seq %lu, currently at %lu ...", seq_must_complete, ctx->tower_in_expect_seq ));
71 0 : return;
72 0 : }
73 :
74 : /* Halt net frags to avoid potential quic callback in after_frag */
75 0 : ctx->halt_net_frags = 1;
76 :
77 0 : fd_quic_set_identity_public_key( ctx->quic, ctx->keyswitch->bytes );
78 :
79 0 : memcpy( ctx->identity_key, ctx->keyswitch->bytes, 32UL );
80 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
81 0 : }
82 0 : }
83 :
84 : static void
85 0 : metrics_write( fd_txsend_tile_t * ctx ) {
86 0 : FD_MCNT_SET( TXSEND, RECEIVED_BYTES, ctx->quic->metrics.net_rx_byte_cnt );
87 0 : FD_MCNT_ENUM_COPY( TXSEND, RECEIVED_FRAMES, ctx->quic->metrics.frame_rx_cnt );
88 0 : FD_MCNT_SET( TXSEND, RECEIVED_PACKETS, ctx->quic->metrics.net_rx_pkt_cnt );
89 0 : FD_MCNT_SET( TXSEND, STREAM_RECEIVED_BYTES, ctx->quic->metrics.stream_rx_byte_cnt );
90 0 : FD_MCNT_SET( TXSEND, STREAM_RECEIVED_EVENTS, ctx->quic->metrics.stream_rx_event_cnt );
91 :
92 0 : FD_MCNT_SET( TXSEND, SENT_PACKETS, ctx->quic->metrics.net_tx_pkt_cnt );
93 0 : FD_MCNT_SET( TXSEND, SENT_BYTES, ctx->quic->metrics.net_tx_byte_cnt );
94 0 : FD_MCNT_SET( TXSEND, RETRY_SENT, ctx->quic->metrics.retry_tx_cnt );
95 0 : FD_MCNT_ENUM_COPY( TXSEND, ACK_TX, ctx->quic->metrics.ack_tx );
96 :
97 0 : FD_MGAUGE_ENUM_COPY( TXSEND, CONNECTIONS_STATE, ctx->quic->metrics.conn_state_cnt );
98 0 : FD_MGAUGE_SET( TXSEND, CONNECTIONS_ALLOC, ctx->quic->metrics.conn_alloc_cnt );
99 0 : FD_MCNT_SET( TXSEND, CONNECTIONS_CREATED, ctx->quic->metrics.conn_created_cnt );
100 0 : FD_MCNT_SET( TXSEND, CONNECTIONS_CLOSED, ctx->quic->metrics.conn_closed_cnt );
101 0 : FD_MCNT_SET( TXSEND, CONNECTIONS_ABORTED, ctx->quic->metrics.conn_aborted_cnt );
102 0 : FD_MCNT_SET( TXSEND, CONNECTIONS_TIMED_OUT, ctx->quic->metrics.conn_timeout_cnt );
103 0 : FD_MCNT_SET( TXSEND, CONNECTIONS_RETRIED, ctx->quic->metrics.conn_retry_cnt );
104 0 : FD_MCNT_SET( TXSEND, CONNECTION_ERROR_NO_SLOTS, ctx->quic->metrics.conn_err_no_slots_cnt );
105 0 : FD_MCNT_SET( TXSEND, CONNECTION_ERROR_RETRY_FAIL, ctx->quic->metrics.conn_err_retry_fail_cnt );
106 :
107 0 : FD_MCNT_ENUM_COPY( TXSEND, PKT_CRYPTO_FAILED, ctx->quic->metrics.pkt_decrypt_fail_cnt );
108 0 : FD_MCNT_ENUM_COPY( TXSEND, PKT_NO_KEY, ctx->quic->metrics.pkt_no_key_cnt );
109 0 : FD_MCNT_ENUM_COPY( TXSEND, PKT_NO_CONN, ctx->quic->metrics.pkt_no_conn_cnt );
110 0 : FD_MCNT_ENUM_COPY( TXSEND, FRAME_TX_ALLOC, ctx->quic->metrics.frame_tx_alloc_cnt );
111 0 : FD_MCNT_SET( TXSEND, PKT_NET_HEADER_INVALID, ctx->quic->metrics.pkt_net_hdr_err_cnt );
112 0 : FD_MCNT_SET( TXSEND, PKT_QUIC_HEADER_INVALID, ctx->quic->metrics.pkt_quic_hdr_err_cnt );
113 0 : FD_MCNT_SET( TXSEND, PKT_UNDERSZ, ctx->quic->metrics.pkt_undersz_cnt );
114 0 : FD_MCNT_SET( TXSEND, PKT_OVERSZ, ctx->quic->metrics.pkt_oversz_cnt );
115 0 : FD_MCNT_SET( TXSEND, PKT_VERNEG, ctx->quic->metrics.pkt_verneg_cnt );
116 0 : FD_MCNT_ENUM_COPY( TXSEND, PKT_RETRANSMISSIONS, ctx->quic->metrics.pkt_retransmissions_cnt );
117 :
118 0 : FD_MCNT_SET( TXSEND, HANDSHAKES_CREATED, ctx->quic->metrics.hs_created_cnt );
119 0 : FD_MCNT_SET( TXSEND, HANDSHAKE_ERROR_ALLOC_FAIL, ctx->quic->metrics.hs_err_alloc_fail_cnt );
120 0 : FD_MCNT_SET( TXSEND, HANDSHAKE_EVICTED, ctx->quic->metrics.hs_evicted_cnt );
121 :
122 0 : FD_MCNT_SET( TXSEND, FRAME_FAIL_PARSE, ctx->quic->metrics.frame_rx_err_cnt );
123 :
124 0 : FD_MHIST_COPY( TXSEND, SERVICE_DURATION_SECONDS, ctx->quic->metrics.service_duration );
125 0 : FD_MHIST_COPY( TXSEND, RECEIVE_DURATION_SECONDS, ctx->quic->metrics.receive_duration );
126 0 : }
127 :
128 : static void
129 : quic_tls_cv_sign( void * signer_ctx,
130 : uchar signature[ static 64 ],
131 0 : uchar const payload[ static 130 ] ) {
132 0 : fd_txsend_tile_t * ctx = signer_ctx;
133 :
134 0 : fd_keyguard_client_sign( ctx->keyguard_client, signature, payload, 130UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
135 0 : }
136 :
137 : static void
138 : send_to_net( fd_txsend_tile_t * ctx,
139 : fd_ip4_hdr_t const * ip4_hdr,
140 : fd_udp_hdr_t const * udp_hdr,
141 : uchar const * payload,
142 : ulong payload_sz,
143 0 : long now ) {
144 0 : uint const ip_dst = FD_LOAD( uint, ip4_hdr->daddr_c );
145 0 : ulong const ip_sz = FD_IP4_GET_LEN( *ip4_hdr );
146 :
147 0 : fd_txsend_out_t * net_out_link = ctx->net_out;
148 0 : uchar * packet_l2 = fd_chunk_to_laddr( net_out_link->mem, net_out_link->chunk );
149 0 : uchar * packet_l3 = packet_l2 + sizeof(fd_eth_hdr_t);
150 0 : uchar * packet_l4 = packet_l3 + ip_sz;
151 0 : uchar * packet_l5 = packet_l4 + sizeof(fd_udp_hdr_t);
152 :
153 0 : fd_memcpy( packet_l2, ctx->packet_hdr->eth, sizeof(fd_eth_hdr_t) );
154 0 : fd_memcpy( packet_l3, ip4_hdr, ip_sz );
155 0 : fd_memcpy( packet_l4, udp_hdr, sizeof(fd_udp_hdr_t) );
156 0 : fd_memcpy( packet_l5, payload, payload_sz );
157 :
158 0 : ulong sig = fd_disco_netmux_sig( ip_dst, 0U, ip_dst, DST_PROTO_OUTGOING, FD_NETMUX_SIG_MIN_HDR_SZ );
159 0 : ulong sz_l2 = sizeof(fd_eth_hdr_t) + ip_sz + sizeof(fd_udp_hdr_t) + payload_sz;
160 :
161 0 : ulong tspub = (ulong)fd_frag_meta_ts_comp( now );
162 0 : fd_stem_publish( ctx->stem, net_out_link->idx, sig, net_out_link->chunk, sz_l2, 0UL, 0, tspub );
163 0 : net_out_link->chunk = fd_dcache_compact_next( net_out_link->chunk, sz_l2, net_out_link->chunk0, net_out_link->wmark );
164 0 : }
165 :
166 : static int
167 : quic_tx_aio_send( void * _ctx,
168 : fd_aio_pkt_info_t const * batch,
169 : ulong batch_cnt,
170 : ulong * opt_batch_idx,
171 0 : int flush FD_PARAM_UNUSED ) {
172 0 : fd_txsend_tile_t * ctx = _ctx;
173 :
174 0 : long now = fd_log_wallclock();
175 :
176 0 : for( ulong i=0; i<batch_cnt; i++ ) {
177 0 : if( FD_UNLIKELY( batch[ i ].buf_sz<FD_NETMUX_SIG_MIN_HDR_SZ ) ) continue;
178 0 : uchar * buf = batch[ i ].buf;
179 0 : fd_ip4_hdr_t * ip4_hdr = fd_type_pun( buf );
180 0 : ulong const ip4_len = FD_IP4_GET_LEN( *ip4_hdr );
181 0 : fd_udp_hdr_t * udp_hdr = fd_type_pun( buf + ip4_len );
182 0 : uchar * payload = buf + ip4_len + sizeof(fd_udp_hdr_t);
183 0 : FD_TEST( batch[ i ].buf_sz >= ip4_len + sizeof(fd_udp_hdr_t) );
184 0 : ulong payload_sz = batch[ i ].buf_sz - ip4_len - sizeof(fd_udp_hdr_t);
185 0 : send_to_net( ctx, ip4_hdr, udp_hdr, payload, payload_sz, now );
186 0 : }
187 :
188 0 : if( FD_LIKELY( opt_batch_idx ) ) {
189 0 : *opt_batch_idx = batch_cnt;
190 0 : }
191 :
192 0 : return FD_AIO_SUCCESS;
193 0 : }
194 :
195 : static void
196 : quic_conn_final( fd_quic_conn_t * conn,
197 0 : void * ctx ) {
198 0 : fd_txsend_tile_t * tile = ctx;
199 :
200 0 : for( ulong i=0UL; i<tile->conns_len; i++ ) {
201 0 : if( FD_UNLIKELY( tile->conns[ i ].conn==conn ) ) {
202 0 : peer_entry_t * peer = peer_map_ele_query( tile->peer_map, &tile->conns[ i ].pubkey, NULL, tile->peers );
203 0 : if( FD_LIKELY( peer ) ) {
204 0 : for( ulong j=0UL; j<2UL; j++ ) {
205 0 : if( peer->quic_conns[ j ]==conn ) peer->quic_conns[ j ] = NULL;
206 0 : }
207 0 : }
208 0 : if( FD_UNLIKELY( i!=tile->conns_len-1UL ) ) tile->conns[ i ] = tile->conns[ tile->conns_len-1UL ];
209 0 : tile->conns_len--;
210 0 : return;
211 0 : }
212 0 : }
213 :
214 0 : FD_LOG_ERR(( "unknown connection finalized" ));
215 0 : }
216 :
217 : /* This QUIC servicing is very precarious. Recall a few facts,
218 :
219 : 1) QUIC needs to be serviced periodically to make progress
220 : 2) QUIC servicing may produce outgoing packets that need to be sent
221 : to the network
222 : 3) Elsewhere, the the tile publishes frags to the verify tile to
223 : send our own votes into our leader pipeline
224 :
225 : You could service QUIC in before_credit, as the QUIC tile does, but
226 : this has a problem. If you publish frags in before_credit, you might
227 : overrun the downstream consumer. For net tile, this is OK because it
228 : expects that (as does verify). But the credit counting mechanism
229 : doesn't expect this behavior and will underflow. (That's also not
230 : ideal, in case some plugin wanted to listen reliably on quic->verify
231 : they could not, if it got underflowed). Here though, we want to
232 : avoid dropping outgoing votes to verify, since they might be needed
233 : for liveness of a small cluster.
234 :
235 : We thus take the trade of servicing QUIC in after_credit, which means
236 : it could theoretically get backpressured by verify, however this
237 : isn't realistic in practice, as verify polls round robin and there's
238 : only one vote per slot. */
239 :
240 : static inline void
241 : after_credit( fd_txsend_tile_t * ctx,
242 : fd_stem_context_t * stem,
243 : int * opt_poll_in,
244 0 : int * charge_busy ) {
245 0 : ctx->stem = stem;
246 :
247 0 : *charge_busy = fd_quic_service( ctx->quic, fd_log_wallclock() );
248 0 : *opt_poll_in = !*charge_busy; /* refetch credits to prevent above documented situation */
249 :
250 0 : if( FD_UNLIKELY( ctx->leader_schedules<2UL ) ) return;
251 0 : if( FD_UNLIKELY( ctx->voted_slot==ULONG_MAX ) ) return;
252 :
253 0 : fd_pubkey_t const * leaders[ 7UL ];
254 :
255 0 : for( ulong i=0UL; i<7UL; i++ ) {
256 0 : ulong target_slot = ctx->voted_slot+1UL + i*FD_EPOCH_SLOTS_PER_ROTATION;
257 0 : leaders[ i ] = fd_multi_epoch_leaders_get_leader_for_slot( ctx->mleaders, target_slot );
258 0 : FD_TEST( leaders[ i ] );
259 0 : }
260 :
261 : /* Disconnect any QUIC connection to a leader that does not have a
262 : rotation coming up in the next 7 slots. */
263 0 : for( ulong i=0UL; i<ctx->conns_len; i++ ) {
264 0 : int keep_conn = 0;
265 0 : for( ulong j=0UL; j<7UL; j++ ) {
266 0 : if( fd_pubkey_eq( &ctx->conns[ i ].pubkey, leaders[ j ] ) ) {
267 0 : keep_conn = 1;
268 0 : break;
269 0 : }
270 0 : }
271 :
272 0 : if( FD_UNLIKELY( !keep_conn ) ) fd_quic_conn_close( ctx->conns[ i ].conn, 0U );
273 0 : }
274 :
275 : /* Connect to any leader that does not have a connection yet. */
276 0 : for( ulong i=0UL; i<7UL; i++ ) {
277 0 : fd_pubkey_t const * leader = leaders[ i ];
278 0 : peer_entry_t * peer = peer_map_ele_query( ctx->peer_map, leader, NULL, ctx->peers );
279 0 : if( FD_UNLIKELY( !peer ) ) continue; /* no contact info */
280 :
281 0 : for( ulong j=0UL; j<2UL; j++ ) {
282 0 : if( FD_UNLIKELY( ctx->conns_len==128UL ) ) break; /* connection limit reached */
283 0 : if( FD_LIKELY( peer->quic_conns[ j ] ) ) continue; /* already connected */
284 0 : if( FD_UNLIKELY( !peer->quic_ip_addrs[ j ] || !peer->quic_ports[ j ] ) ) continue;
285 :
286 : /* Don't try to reconnect more than once every two seconds ...
287 : Basically Agave limits us to 8 connections per minute, so if we
288 : keep trying to reconnect rapidly it's much less effective than
289 : waiting a little bit to ensure we stay under the threshold.
290 :
291 : We should probably make this a bit more sophisticated, with a
292 : simple model that considers past connection attempts, and
293 : future leader slots (e.g. we might still want to burn an
294 : attempt if a leader slot is imminent, even if we recently tried
295 : to connect). For now the dumb logic seems to work well enough. */
296 0 : long now = fd_log_wallclock();
297 0 : if( FD_UNLIKELY( peer->quic_last_connected[ j ]+2e9L>now ) ) continue;
298 :
299 0 : fd_quic_conn_t * conn = fd_quic_connect( ctx->quic,
300 0 : peer->quic_ip_addrs[ j ],
301 0 : peer->quic_ports[ j ],
302 0 : ctx->src_ip_addr,
303 0 : ctx->src_port,
304 0 : now );
305 0 : FD_TEST( conn ); /* never out of connection objects, per above check */
306 0 : ctx->conns[ ctx->conns_len ].conn = conn;
307 0 : ctx->conns[ ctx->conns_len ].pubkey = *leader;
308 0 : peer->quic_conns[ j ] = conn;
309 0 : peer->quic_last_connected[ j ] = now;
310 0 : ctx->conns_len++;
311 0 : }
312 0 : }
313 0 : }
314 :
315 : void
316 : send_vote_to_leader( fd_txsend_tile_t * ctx,
317 : fd_pubkey_t const * leader_pubkey,
318 : uchar const * vote_payload,
319 0 : ulong vote_payload_sz ) {
320 0 : peer_entry_t const * peer = peer_map_ele_query_const( ctx->peer_map, leader_pubkey, NULL, ctx->peers );
321 0 : if( FD_UNLIKELY( !peer ) ) return; /* no known contact info */
322 :
323 0 : for( ulong i=0UL; i<2UL; i++ ) {
324 0 : if( FD_UNLIKELY( !peer->udp_ip_addrs[ i ] | !peer->udp_ports[ i ] ) ) continue;
325 :
326 0 : fd_ip4_hdr_t * ip4_hdr = ctx->packet_hdr->ip4;
327 0 : fd_udp_hdr_t * udp_hdr = ctx->packet_hdr->udp;
328 :
329 0 : ip4_hdr->daddr = peer->udp_ip_addrs[ i ];
330 0 : ip4_hdr->net_tot_len = fd_ushort_bswap( (ushort)(vote_payload_sz+sizeof(fd_ip4_hdr_t)+sizeof(fd_udp_hdr_t)) );
331 0 : ip4_hdr->net_id = fd_ushort_bswap( ctx->net_id++ );
332 0 : ip4_hdr->check = 0;
333 0 : ip4_hdr->check = fd_ip4_hdr_check_fast( ip4_hdr );
334 :
335 0 : udp_hdr->net_dport = fd_ushort_bswap( peer->udp_ports[ i ] );
336 0 : udp_hdr->net_len = fd_ushort_bswap( (ushort)( vote_payload_sz+sizeof(fd_udp_hdr_t) ) );
337 0 : send_to_net( ctx, ip4_hdr, udp_hdr, vote_payload, vote_payload_sz, fd_log_wallclock() );
338 0 : }
339 :
340 0 : for( ulong i=0UL; i<2UL; i++ ) {
341 0 : fd_quic_conn_t * conn = peer->quic_conns[ i ];
342 0 : if( FD_UNLIKELY( !conn ) ) continue;
343 :
344 0 : fd_quic_stream_t * stream = fd_quic_conn_new_stream( conn );
345 0 : if( FD_UNLIKELY( !stream ) ) continue;
346 :
347 0 : fd_quic_stream_send( stream, vote_payload, vote_payload_sz, 1 );
348 0 : }
349 0 : }
350 :
351 : static inline void
352 : handle_contact_info_update( fd_txsend_tile_t * ctx,
353 0 : fd_gossip_update_message_t const * msg ) {
354 0 : peer_entry_t * entry = &ctx->peers[ msg->contact_info->idx ];
355 0 : if( FD_UNLIKELY( entry->tombstoned ) ) {
356 0 : FD_TEST( peer_map_ele_remove( ctx->peer_map, &entry->pubkey, NULL, ctx->peers ) );
357 0 : entry->quic_last_connected[ 0 ] = 0L;
358 0 : entry->quic_last_connected[ 1 ] = 0L;
359 0 : for( ulong i=0UL; i<2UL; i++ ) {
360 0 : entry->quic_ip_addrs[ i ] = 0U;
361 0 : entry->quic_ports [ i ] = 0U;
362 0 : entry->udp_ip_addrs [ i ] = 0U;
363 0 : entry->udp_ports [ i ] = 0U;
364 0 : }
365 0 : }
366 :
367 0 : entry->tombstoned = 0;
368 0 : fd_memcpy( entry->pubkey.uc, msg->origin, 32UL );
369 :
370 : /* The new pubkey might already exist in the map via a stale
371 : tombstoned entry at a different idx, if a validator's pubkey
372 : migrated between contact info table slots. Evict the stale entry
373 : to prevent duplicate keys in the map. */
374 0 : peer_entry_t * stale = peer_map_ele_query( ctx->peer_map, &entry->pubkey, NULL, ctx->peers );
375 0 : if( FD_UNLIKELY( stale ) ) {
376 0 : peer_map_ele_remove( ctx->peer_map, &stale->pubkey, NULL, ctx->peers );
377 0 : entry->quic_last_connected[ 0 ] = 0L;
378 0 : entry->quic_last_connected[ 1 ] = 0L;
379 0 : for( ulong i=0UL; i<2UL; i++ ) {
380 0 : entry->quic_ip_addrs[ i ] = 0U;
381 0 : entry->quic_ports [ i ] = 0U;
382 0 : entry->udp_ip_addrs [ i ] = 0U;
383 0 : entry->udp_ports [ i ] = 0U;
384 0 : }
385 0 : stale->tombstoned = 0;
386 0 : }
387 :
388 0 : static ulong const quic_socket_idx[ 2UL ] = {
389 0 : FD_GOSSIP_CONTACT_INFO_SOCKET_TPU_VOTE_QUIC,
390 0 : FD_GOSSIP_CONTACT_INFO_SOCKET_TPU_QUIC,
391 0 : };
392 :
393 0 : static ulong const udp_socket_idx[ 2UL ] = {
394 0 : FD_GOSSIP_CONTACT_INFO_SOCKET_TPU_VOTE,
395 0 : FD_GOSSIP_CONTACT_INFO_SOCKET_TPU,
396 0 : };
397 :
398 : /* If an IP address or port is updated via. gossip to be 0, it's no
399 : longer reachable and we just ignore the update, since there's a
400 : chance the old one is still valid. */
401 :
402 0 : for( ulong i=0UL; i<2UL; i++ ) {
403 0 : if( FD_LIKELY( !msg->contact_info->value->sockets[ quic_socket_idx[ i ] ].is_ipv6 && msg->contact_info->value->sockets[ quic_socket_idx[ i ] ].ip4 ) ) {
404 0 : entry->quic_ip_addrs[ i ] = msg->contact_info->value->sockets[ quic_socket_idx[ i ] ].ip4;
405 0 : }
406 0 : if( FD_LIKELY( fd_ushort_bswap( msg->contact_info->value->sockets[ quic_socket_idx[ i ] ].port ) ) ) {
407 0 : entry->quic_ports [ i ] = fd_ushort_bswap( msg->contact_info->value->sockets[ quic_socket_idx[ i ] ].port );
408 0 : }
409 0 : }
410 :
411 0 : for( ulong i=0UL; i<2UL; i++ ) {
412 0 : if( FD_LIKELY( !msg->contact_info->value->sockets[ udp_socket_idx[ i ] ].is_ipv6 && msg->contact_info->value->sockets[ udp_socket_idx[ i ] ].ip4 ) ) {
413 0 : entry->udp_ip_addrs[ i ] = msg->contact_info->value->sockets[ udp_socket_idx[ i ] ].ip4;
414 0 : }
415 0 : if( FD_LIKELY( fd_ushort_bswap( msg->contact_info->value->sockets[ udp_socket_idx[ i ] ].port ) ) ) {
416 0 : entry->udp_ports [ i ] = fd_ushort_bswap( msg->contact_info->value->sockets[ udp_socket_idx[ i ] ].port );
417 0 : }
418 0 : }
419 :
420 0 : FD_TEST( peer_map_ele_insert( ctx->peer_map, entry, ctx->peers ) );
421 0 : }
422 :
423 : static inline void
424 : handle_contact_info_remove( fd_txsend_tile_t * ctx,
425 0 : fd_gossip_update_message_t const * msg ) {
426 0 : peer_entry_t * entry = &ctx->peers[ msg->contact_info_remove->idx ];
427 0 : entry->tombstoned = 1;
428 0 : }
429 :
430 : static void
431 : handle_vote_msg( fd_txsend_tile_t * ctx,
432 : fd_stem_context_t * stem,
433 0 : fd_tower_slot_done_t const * slot_done ) {
434 0 : if( FD_UNLIKELY( slot_done->vote_slot==ULONG_MAX ) ) return;
435 0 : if( FD_UNLIKELY( !slot_done->has_vote_txn ) ) return;
436 :
437 0 : ctx->voted_slot = slot_done->vote_slot;
438 :
439 0 : fd_txn_m_t * txnm = fd_chunk_to_laddr( ctx->txsend_out->mem, ctx->txsend_out->chunk );
440 0 : FD_TEST( slot_done->vote_txn_sz<=FD_TXN_MTU );
441 0 : txnm->payload_sz = (ushort)slot_done->vote_txn_sz;
442 0 : txnm->source_ipv4 = ctx->src_ip_addr;
443 0 : txnm->source_tpu = FD_TXN_M_TPU_SOURCE_TXSEND;
444 0 : txnm->block_engine.bundle_id = 0UL;
445 0 : fd_memcpy( fd_txn_m_payload( txnm ), slot_done->vote_txn, slot_done->vote_txn_sz );
446 :
447 0 : txnm->txn_t_sz = (ushort)fd_txn_parse( slot_done->vote_txn, slot_done->vote_txn_sz, fd_txn_m_txn_t( txnm ), NULL );
448 0 : FD_TEST( txnm->txn_t_sz );
449 :
450 0 : uchar * payload = fd_txn_m_payload( txnm );
451 0 : fd_txn_t const * txn = fd_txn_m_txn_t_const( txnm );
452 :
453 0 : uchar * signatures = payload + txn->signature_off;
454 0 : uchar const * message = payload + txn->message_off;
455 0 : ulong message_sz = slot_done->vote_txn_sz - txn->message_off;
456 0 : fd_keyguard_client_vote_txn_sign( ctx->keyguard_client, signatures, slot_done->authority_idx, message, message_sz );
457 :
458 0 : for( ulong i=0UL; i<3UL; i++ ) {
459 0 : ulong target_slot = slot_done->vote_slot+1UL + i*FD_EPOCH_SLOTS_PER_ROTATION;
460 0 : fd_pubkey_t const * leader = fd_multi_epoch_leaders_get_leader_for_slot( ctx->mleaders, target_slot );
461 0 : FD_TEST( leader );
462 0 : send_vote_to_leader( ctx, leader, payload, slot_done->vote_txn_sz );
463 0 : }
464 :
465 0 : ulong msg_sz = fd_txn_m_realized_footprint( txnm, 0, 0 );
466 0 : fd_stem_publish( stem, ctx->txsend_out->idx, 1UL, ctx->txsend_out->chunk, msg_sz, 0UL, 0, 0 );
467 0 : ctx->txsend_out->chunk = fd_dcache_compact_next( ctx->txsend_out->chunk, msg_sz, ctx->txsend_out->chunk0, ctx->txsend_out->wmark );
468 0 : }
469 :
470 :
471 : static inline int
472 : before_frag( fd_txsend_tile_t * ctx,
473 : ulong in_idx,
474 : ulong seq,
475 0 : ulong sig ) {
476 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_TOWER ) ) ctx->tower_in_expect_seq = seq+1UL;
477 0 : if( FD_UNLIKELY( ctx->halt_net_frags && ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) return -1;
478 :
479 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
480 0 : return sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO && sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE;
481 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_TOWER ) ) {
482 0 : return sig!=FD_TOWER_SIG_SLOT_DONE;
483 0 : }
484 :
485 0 : return 0;
486 0 : }
487 :
488 : static void
489 : during_frag( fd_txsend_tile_t * ctx,
490 : ulong in_idx,
491 : ulong seq,
492 : ulong sig,
493 : ulong chunk,
494 : ulong sz,
495 0 : ulong ctl ) {
496 0 : (void)seq; (void)sig;
497 :
498 0 : ctx->chunk = chunk;
499 :
500 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
501 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
502 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark, ctx->in[ in_idx ].mtu ));
503 :
504 0 : fd_epoch_info_msg_t const * msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
505 0 : FD_TEST( msg->staked_cnt<=MAX_COMPRESSED_STAKE_WEIGHTS ); /* implicit sz verification since sz field on frag_meta too small */
506 0 : } else {
507 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) )
508 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu,%lu]", chunk, sz, ctx->in[in_idx].chunk0, ctx->in[in_idx].wmark, ctx->in[ in_idx ].mtu ));
509 0 : }
510 :
511 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
512 0 : void const * src = fd_net_rx_translate_frag( &ctx->net_in_bounds[ in_idx ], chunk, ctl, sz );
513 0 : fd_memcpy( ctx->quic_buf, src, sz );
514 0 : }
515 0 : }
516 :
517 : static void
518 : after_frag( fd_txsend_tile_t * ctx,
519 : ulong in_idx,
520 : ulong seq,
521 : ulong sig,
522 : ulong sz,
523 : ulong tsorig,
524 : ulong tspub,
525 0 : fd_stem_context_t * stem ) {
526 0 : (void)seq; (void)sig; (void)tsorig; (void)tspub;
527 :
528 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
529 0 : uchar * ip_packet = ctx->quic_buf+sizeof(fd_eth_hdr_t);
530 0 : ulong ip_packet_sz = sz-sizeof(fd_eth_hdr_t);
531 0 : fd_quic_process_packet( ctx->quic, ip_packet, ip_packet_sz, fd_log_wallclock() );
532 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
533 0 : if( FD_LIKELY( sig==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO ) ) handle_contact_info_update( ctx, fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, ctx->chunk ) );
534 0 : else handle_contact_info_remove( ctx, fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, ctx->chunk ) );
535 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_TOWER ) ) {
536 0 : handle_vote_msg( ctx, stem, fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, ctx->chunk ) );
537 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
538 0 : fd_multi_epoch_leaders_epoch_msg_init( ctx->mleaders, fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, ctx->chunk ) );
539 0 : fd_multi_epoch_leaders_stake_msg_fini( ctx->mleaders );
540 0 : ctx->leader_schedules++;
541 0 : } else {
542 0 : FD_LOG_ERR(( "unknown in_kind %d on link %lu", ctx->in_kind[ in_idx ], in_idx ));
543 0 : }
544 0 : }
545 :
546 : static void
547 : privileged_init( fd_topo_t * topo,
548 0 : fd_topo_tile_t * tile ) {
549 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
550 :
551 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
552 0 : fd_txsend_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_txsend_tile_t), sizeof(fd_txsend_tile_t) );
553 :
554 0 : if( FD_UNLIKELY( !strcmp( tile->txsend.identity_key_path, "" ) ) )
555 0 : FD_LOG_ERR(( "identity_key_path not set" ));
556 :
557 0 : ctx->identity_key[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->txsend.identity_key_path, /* pubkey only: */ 1 ) );
558 :
559 0 : FD_TEST( fd_rng_secure( &ctx->seed, sizeof(ctx->seed) ) );
560 0 : }
561 :
562 : static inline fd_txsend_out_t
563 : out1( fd_topo_t const * topo,
564 : fd_topo_tile_t const * tile,
565 0 : char const * name ) {
566 0 : ulong idx = ULONG_MAX;
567 :
568 0 : for( ulong i=0UL; i<tile->out_cnt; i++ ) {
569 0 : fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
570 0 : if( !strcmp( link->name, name ) ) {
571 0 : if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
572 0 : idx = i;
573 0 : }
574 0 : }
575 :
576 0 : if( FD_UNLIKELY( idx==ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had no output link named %s", tile->name, tile->kind_id, name ));
577 :
578 0 : void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
579 0 : ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
580 0 : ulong wmark = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
581 :
582 0 : return (fd_txsend_out_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
583 0 : }
584 :
585 : static void
586 : unprivileged_init( fd_topo_t * topo,
587 0 : fd_topo_tile_t * tile ) {
588 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
589 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
590 0 : fd_txsend_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_txsend_tile_t), sizeof(fd_txsend_tile_t) );
591 0 : void * _quic = FD_SCRATCH_ALLOC_APPEND( l, fd_quic_align(), fd_quic_footprint( &quic_limits ) );
592 0 : void * _peer_map = FD_SCRATCH_ALLOC_APPEND( l, peer_map_align(), peer_map_footprint( 2UL*FD_CONTACT_INFO_TABLE_SIZE ) );
593 :
594 0 : ctx->quic = fd_quic_join( fd_quic_new( _quic, &quic_limits ) );
595 0 : FD_TEST( ctx->quic );
596 :
597 0 : ctx->leader_schedules = 0UL;
598 :
599 0 : ctx->mleaders = fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) );
600 0 : FD_TEST( ctx->mleaders );
601 :
602 0 : ctx->peer_map = peer_map_join( peer_map_new( _peer_map, 2UL*FD_CONTACT_INFO_TABLE_SIZE, ctx->seed ) );
603 0 : FD_TEST( ctx->peer_map );
604 :
605 0 : fd_aio_t * quic_tx_aio = fd_aio_join( fd_aio_new( ctx->quic_tx_aio, ctx, quic_tx_aio_send ) );
606 0 : FD_TEST( quic_tx_aio );
607 0 : fd_quic_set_aio_net_tx( ctx->quic, quic_tx_aio );
608 :
609 0 : ctx->quic->config.role = FD_QUIC_ROLE_CLIENT;
610 0 : ctx->quic->config.idle_timeout = 30e9L;
611 0 : ctx->quic->config.ack_delay = 25e6L;
612 0 : ctx->quic->config.keep_alive = 1;
613 0 : ctx->quic->config.sign = quic_tls_cv_sign;
614 0 : ctx->quic->config.sign_ctx = ctx;
615 0 : fd_memcpy( ctx->quic->config.identity_public_key, ctx->identity_key, sizeof(ctx->identity_key) );
616 :
617 0 : ctx->quic->cb.conn_final = quic_conn_final;
618 0 : ctx->quic->cb.quic_ctx = ctx;
619 :
620 0 : FD_TEST( fd_quic_init( ctx->quic ));
621 :
622 0 : for( ulong i=0UL; i<FD_CONTACT_INFO_TABLE_SIZE; i++ ) {
623 0 : ctx->peers[ i ].tombstoned = 0;
624 0 : for( ulong j=0UL; j<2UL; j++ ) {
625 0 : ctx->peers[ i ].quic_ip_addrs[ j ] = 0;
626 0 : ctx->peers[ i ].quic_ports [ j ] = 0;
627 0 : ctx->peers[ i ].udp_ip_addrs [ j ] = 0;
628 0 : ctx->peers[ i ].udp_ports [ j ] = 0;
629 0 : ctx->peers[ i ].quic_last_connected[ j ] = 0L;
630 0 : ctx->peers[ i ].quic_conns[ j ] = NULL;
631 0 : }
632 0 : }
633 :
634 0 : ctx->conns_len = 0UL;
635 0 : ctx->voted_slot = ULONG_MAX;
636 0 : ctx->net_id = 0;
637 :
638 0 : ctx->src_ip_addr = tile->txsend.ip_addr;
639 0 : ctx->src_port = tile->txsend.txsend_src_port;
640 0 : fd_ip4_udp_hdr_init( ctx->packet_hdr, FD_TXN_MTU, ctx->src_ip_addr, ctx->src_port );
641 :
642 0 : FD_TEST( tile->in_cnt<sizeof(ctx->in_kind)/sizeof(ctx->in_kind[ 0 ]) );
643 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
644 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
645 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
646 :
647 0 : ctx->in[ i ].mem = link_wksp->wksp;
648 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
649 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
650 0 : ctx->in[ i ].mtu = link->mtu;
651 :
652 0 : if( !strcmp( link->name, "net_txsend" ) ) {
653 0 : fd_net_rx_bounds_init( &ctx->net_in_bounds[ i ], link->dcache );
654 0 : ctx->in_kind[ i ] = IN_KIND_NET;
655 0 : } else if( !strcmp( link->name, "gossip_out" ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP;
656 0 : else if( !strcmp( link->name, "replay_epoch" ) ) ctx->in_kind[ i ] = IN_KIND_EPOCH;
657 0 : else if( !strcmp( link->name, "tower_out" ) ) ctx->in_kind[ i ] = IN_KIND_TOWER;
658 0 : else if( !strcmp( link->name, "sign_txsend" ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
659 0 : else FD_LOG_ERR(( "unexpected input link name %s", link->name ));
660 0 : }
661 :
662 0 : *ctx->txsend_out = out1( topo, tile, "txsend_out" );
663 0 : *ctx->net_out = out1( topo, tile, "txsend_net" );
664 :
665 0 : ulong sign_in_idx = fd_topo_find_tile_in_link ( topo, tile, "sign_txsend", tile->kind_id );
666 0 : ulong sign_out_idx = fd_topo_find_tile_out_link( topo, tile, "txsend_sign", tile->kind_id );
667 0 : FD_TEST( sign_in_idx!=ULONG_MAX );
668 0 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
669 0 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ sign_out_idx ] ];
670 0 : if( FD_UNLIKELY( !fd_keyguard_client_join( fd_keyguard_client_new( ctx->keyguard_client,
671 0 : sign_out->mcache,
672 0 : sign_out->dcache,
673 0 : sign_in->mcache,
674 0 : sign_in->dcache,
675 0 : sign_out->mtu ) ) ) ) {
676 0 : FD_LOG_ERR(( "failed to construct keyguard" ));
677 0 : }
678 :
679 0 : ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
680 0 : FD_TEST( ctx->keyswitch );
681 :
682 0 : ctx->tower_in_expect_seq = 0UL;
683 0 : ctx->halt_net_frags = 0;
684 :
685 0 : fd_histf_join( fd_histf_new( ctx->quic->metrics.service_duration, FD_MHIST_SECONDS_MIN( TXSEND, SERVICE_DURATION_SECONDS ),
686 0 : FD_MHIST_SECONDS_MAX( TXSEND, SERVICE_DURATION_SECONDS ) ) );
687 0 : fd_histf_join( fd_histf_new( ctx->quic->metrics.receive_duration, FD_MHIST_SECONDS_MIN( TXSEND, RECEIVE_DURATION_SECONDS ),
688 0 : FD_MHIST_SECONDS_MAX( TXSEND, RECEIVE_DURATION_SECONDS ) ) );
689 :
690 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, scratch_align() );
691 0 : if( FD_UNLIKELY( scratch_top != (ulong)scratch + scratch_footprint( tile ) ) ) {
692 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
693 0 : }
694 0 : }
695 :
696 : static ulong
697 : populate_allowed_seccomp( fd_topo_t const * topo FD_PARAM_UNUSED,
698 : fd_topo_tile_t const * tile FD_PARAM_UNUSED,
699 : ulong out_cnt,
700 0 : struct sock_filter * out ) {
701 :
702 0 : populate_sock_filter_policy_fd_txsend_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
703 0 : return sock_filter_policy_fd_txsend_tile_instr_cnt;
704 0 : }
705 :
706 : static ulong
707 : populate_allowed_fds( fd_topo_t const * topo FD_PARAM_UNUSED,
708 : fd_topo_tile_t const * tile FD_PARAM_UNUSED,
709 : ulong out_fds_cnt,
710 0 : int * out_fds ) {
711 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
712 :
713 0 : ulong out_cnt = 0;
714 0 : out_fds[ out_cnt++ ] = 2UL; /* stderr */
715 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
716 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
717 0 : return out_cnt;
718 0 : }
719 :
720 0 : #define STEM_BURST 1UL
721 0 : #define STEM_LAZY (128L*3000L)
722 :
723 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_txsend_tile_t
724 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_txsend_tile_t)
725 :
726 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
727 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
728 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
729 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
730 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
731 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
732 :
733 : #include "../../disco/stem/fd_stem.c"
734 :
735 : fd_topo_run_tile_t fd_tile_txsend = {
736 : .name = "txsend",
737 : .populate_allowed_seccomp = populate_allowed_seccomp,
738 : .populate_allowed_fds = populate_allowed_fds,
739 : .scratch_align = scratch_align,
740 : .scratch_footprint = scratch_footprint,
741 : .privileged_init = privileged_init,
742 : .unprivileged_init = unprivileged_init,
743 : .run = stem_run,
744 : };
|