LCOV - code coverage report
Current view: top level - disco/shred - fd_shred_tile.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 831 0.0 %
Date: 2026-03-19 18:19:27 Functions: 0 16 0.0 %

          Line data    Source code
       1             : #include "../tiles.h"
       2             : 
       3             : #include "generated/fd_shred_tile_seccomp.h"
       4             : #include "../../util/pod/fd_pod_format.h"
       5             : #include "fd_shredder.h"
       6             : #include "fd_shred_batch.h"
       7             : #include "fd_shred_dest.h"
       8             : #include "fd_fec_resolver.h"
       9             : #include "fd_stake_ci.h"
      10             : #include "fd_rnonce_ss.h"
      11             : #include "../store/fd_store.h"
      12             : #include "../keyguard/fd_keyload.h"
      13             : #include "../keyguard/fd_keyguard.h"
      14             : #include "../keyguard/fd_keyswitch.h"
      15             : #include "../fd_disco.h"
      16             : #include "../net/fd_net_tile.h"
      17             : #include "../../flamenco/leaders/fd_leaders.h"
      18             : #include "../../util/net/fd_net_headers.h"
      19             : #include "../../flamenco/gossip/fd_gossip_message.h"
      20             : #include "../../flamenco/types/fd_types.h"
      21             : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
      22             : #include "../../discof/tower/fd_tower_slot_rooted.h"
      23             : 
      24             : /* The shred tile handles shreds from two data sources: shreds generated
      25             :    from microblocks from the leader pipeline, and shreds retransmitted
      26             :    from the network.
      27             : 
      28             :    They have rather different semantics, but at the end of the day, they
      29             :    both result in a bunch of shreds and FEC sets that need to be sent to
      30             :    the blockstore and on the network, which is why one tile handles
      31             :    both.
      32             : 
      33             :    We segment the memory for the two types of shreds into two halves of
      34             :    a dcache because they follow somewhat different flow control
      35             :    patterns. For flow control, the normal guarantee we want to provide
      36             :    is that the dcache entry is not overwritten unless the mcache entry
      37             :    has also been overwritten.  The normal way to do this when using both
      38             :    cyclically and with a 1-to-1 mapping is to make the dcache at least
      39             :    `burst` entries bigger than the mcache.
      40             : 
      41             :    In this tile, we use one output mcache (of depth d) with one output
      42             :    dcache (which is logically partitioned into two) for the two sources
      43             :    of data.  The worst case for flow control is when we're only sending
      44             :    with one of the dcache partitions at a time though, so we can
      45             :    consider them separately.
      46             : 
      47             :    Leader pipeline: Every entry triggers s FEC sets to be created, where
      48             :    s is in [0, FD_SHRED_BATCH_FEC_SETS_MAX].  Each FEC set corresponds
      49             :    to 1 dcache entry and 1 mcache entry.  This means we can have d FEC
      50             :    sets exposed while producing FD_SHRED_BATCH_FEC_SETS_MAX more FEC
      51             :    sets, so the leader pipeline section of the dcache needs at least
      52             :    d+FD_SHRED_BATCH_FEC_SETS_MAX entries.
      53             : 
      54             :    From the network: The FEC resolver doesn't use a cyclic order, but it
      55             :    does promise that once it returns an FEC set, it will return at least
      56             :    complete_depth FEC sets before returning it again.  This means we
      57             :    want at most complete_depth-1 FEC sets exposed, so
      58             :    complete_depth=d+1 FEC sets.  The FEC resolver has the
      59             :    ability to keep individual shreds for partial_depth calls, but
      60             :    because in this version of the shred tile, we send each shred to all
      61             :    its destinations as soon as we get it, we don't need that
      62             :    functionality, so we set partial_depth=1.
      63             : 
      64             :    Adding these up and plugging in the current value of
      65             :    BATCH_FEC_SETS_MAX, we get 2*d+6+fec_resolver_depth FEC sets.  The
      66             :    topology code doesn't allow specifying mcache depth and dcache depth
      67             :    independently.  That means we have to lie about the MTU and burst.
      68             :    We say the MTU is double what it actually is, and then the burst is
      69             :    4+fec_resolver_depth/2.  That means we get
      70             :    2*d+2*(4+fec_resolver_depth/2) >= 2*d+6+fec_resolver_depth FEC sets.
      71             : 
      72             :    A note on parallelization.  From the network, shreds are distributed
      73             :    to tiles based on a validator-specific seeded hash of (slot, FEC set
      74             :    index) so all the shreds for a given FEC set (and any equivocating
      75             :    FEC set) are processed by the same tile.  From the leader pipeline,
      76             :    the original implementation used to parallelize by batch of
      77             :    microblocks (so within a block, batches were distributed to different
      78             :    tiles).  To support chained merkle shreds, the current implementation
      79             :    processes all the batches on tile 0 -- this should be a temporary
      80             :    state while Solana moves to a newer shred format that support better
      81             :    parallelization. */
      82             : 
      83             : #define FD_SHRED_TILE_SCRATCH_ALIGN 128UL
      84             : 
      85           0 : #define IN_KIND_CONTACT ( 0UL)
      86           0 : #define IN_KIND_EPOCH   ( 1UL) /* Firedancer */
      87           0 : #define IN_KIND_STAKE   ( 2UL) /* Frankendancer */
      88           0 : #define IN_KIND_POH     ( 3UL)
      89           0 : #define IN_KIND_NET     ( 4UL)
      90           0 : #define IN_KIND_SIGN    ( 5UL)
      91             : #define IN_KIND_REPAIR  ( 6UL)
      92           0 : #define IN_KIND_IPECHO  ( 7UL)
      93           0 : #define IN_KIND_GOSSIP  ( 8UL)
      94           0 : #define IN_KIND_ROOTED  ( 9UL)
      95           0 : #define IN_KIND_ROOTEDH (10UL)
      96             : 
      97           0 : #define NET_OUT_IDX     1
      98           0 : #define SIGN_OUT_IDX    2
      99             : 
     100             : FD_STATIC_ASSERT( sizeof(fd_entry_batch_meta_t)==56UL,      poh_shred_mtu   );
     101             : FD_STATIC_ASSERT( sizeof(fd_fec_set_t)==FD_SHRED_STORE_MTU, shred_store_mtu );
     102             : 
     103           0 : #define FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT 2
     104             : 
     105             : /* Number of entries in the block_ids table. Each entry is 32 byte.
     106             :    This table is used to keep track of block ids that we create
     107             :    when we're leader, so that we can access them whenever we need
     108             :    a *parent* block id for a new block. Larger table allows to
     109             :    retrieve older parent block ids. Currently it's set for worst
     110             :    case parent offset of USHORT_MAX (max allowed in a shred),
     111             :    making the total table 2MiB.
     112             :    See also comment on chained_merkle_root. */
     113           0 : #define BLOCK_IDS_TABLE_CNT USHORT_MAX
     114             : 
     115             : /* See note on parallelization above. Currently we process all batches in tile 0. */
     116             : #if 1
     117             : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->round_robin_id==0 )
     118             : #else
     119             : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->batch_cnt%ctx->round_robin_cnt==ctx->round_robin_id )
     120             : #endif
     121             : 
     122             : /* The behavior of the shred tile is slightly different for
     123             :    Frankendancer vs Firedancer.  For example, Frankendancer produces
     124             :    chained merkle shreds, while Firedancer doesn't yet.  We can check
     125             :    at runtime the difference by inspecting the topology. The simplest
     126             :    way is to test if ctx->store is initialized.
     127             : 
     128             :    FIXME don't assume only frank vs. fire */
     129             : #define IS_FIREDANCER ( ctx->store!=NULL )
     130             : 
     131             : typedef union {
     132             :   struct {
     133             :     fd_wksp_t * mem;
     134             :     ulong       chunk0;
     135             :     ulong       wmark;
     136             :   };
     137             :   fd_net_rx_bounds_t net_rx;
     138             : } fd_shred_in_ctx_t;
     139             : 
     140             : typedef struct {
     141             :   fd_shredder_t      * shredder;
     142             :   fd_fec_resolver_t  * resolver;
     143             :   fd_pubkey_t          identity_key[1]; /* Just the public key */
     144             : 
     145             :   ulong                round_robin_id;
     146             :   ulong                round_robin_cnt;
     147             :   /* Number of batches shredded from PoH during the current slot.
     148             :      This should be the same for all the shred tiles. */
     149             :   ulong                batch_cnt;
     150             :   /* Slot of the most recent microblock we've seen from PoH,
     151             :      or 0 if we haven't seen one yet */
     152             :   ulong                slot;
     153             : 
     154             :   fd_rnonce_ss_t       repair_nonce_ss[1];
     155             : 
     156             :   fd_keyswitch_t *     keyswitch;
     157             :   fd_keyguard_client_t keyguard_client[1];
     158             : 
     159             :   fd_fec_set_t       * fec_sets;
     160             : 
     161             :   fd_stake_ci_t      * stake_ci;
     162             :   /* These are used in between during_frag and after_frag */
     163             :   fd_shred_dest_weighted_t * new_dest_ptr;
     164             :   ulong                      new_dest_cnt;
     165             :   ulong                      shredded_txn_cnt;
     166             :   ulong                      new_root;
     167             : 
     168             :   ulong poh_in_expect_seq;
     169             : 
     170             :   ushort net_id;
     171             : 
     172             :   int skip_frag;
     173             : 
     174             :   ulong                    adtl_dests_leader_cnt;
     175             :   fd_shred_dest_weighted_t adtl_dests_leader    [ FD_TOPO_ADTL_DESTS_MAX ];
     176             :   ulong                    adtl_dests_retransmit_cnt;
     177             :   fd_shred_dest_weighted_t adtl_dests_retransmit[ FD_TOPO_ADTL_DESTS_MAX ];
     178             : 
     179             :   fd_ip4_udp_hdrs_t data_shred_net_hdr  [1];
     180             :   fd_ip4_udp_hdrs_t parity_shred_net_hdr[1];
     181             : 
     182             :   ulong shredder_fec_set_idx;     /* In [0, shredder_max_fec_set_idx) */
     183             :   ulong shredder_max_fec_set_idx; /* exclusive */
     184             : 
     185             :   uchar shredder_merkle_root[32];
     186             : 
     187             :   ulong send_fec_set_idx[ FD_SHRED_BATCH_FEC_SETS_MAX ];
     188             :   ulong send_fec_set_cnt;
     189             :   ulong tsorig;  /* timestamp of the last packet in compressed form */
     190             : 
     191             :   /* Includes Ethernet, IP, UDP headers */
     192             :   ulong shred_buffer_sz;
     193             :   uchar shred_buffer[ FD_NET_MTU ];
     194             : 
     195             :   /* resolver_seed gets generated in privileged_init but used in
     196             :      unprivileged_init, so we store it here in between. */
     197             :   ulong resolver_seed;
     198             : 
     199             :   fd_shred_in_ctx_t in[ 32 ];
     200             :   int               in_kind[ 32 ];
     201             : 
     202             :   fd_wksp_t * net_out_mem;
     203             :   ulong       net_out_chunk0;
     204             :   ulong       net_out_wmark;
     205             :   ulong       net_out_chunk;
     206             : 
     207             :   ulong       store_out_idx;
     208             :   fd_wksp_t * store_out_mem;
     209             :   ulong       store_out_chunk0;
     210             :   ulong       store_out_wmark;
     211             :   ulong       store_out_chunk;
     212             : 
     213             :   /* This is the output link for shreds that is currently consumed by
     214             :      the repair and replay tile. */
     215             :   ulong       shred_out_idx;
     216             :   fd_wksp_t * shred_out_mem;
     217             :   ulong       shred_out_chunk0;
     218             :   ulong       shred_out_wmark;
     219             :   ulong       shred_out_chunk;
     220             : 
     221             :   fd_store_t * store;
     222             : 
     223             :   fd_gossip_update_message_t gossip_upd_buf[1];
     224             : 
     225             :   struct {
     226             :     fd_histf_t contact_info_cnt[ 1 ];
     227             :     fd_histf_t batch_sz[ 1 ];
     228             :     fd_histf_t batch_microblock_cnt[ 1 ];
     229             :     fd_histf_t shredding_timing[ 1 ];
     230             :     fd_histf_t add_shred_timing[ 1 ];
     231             :     ulong shred_processing_result[ FD_FEC_RESOLVER_ADD_SHRED_RETVAL_CNT+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ];
     232             :     ulong invalid_block_id_cnt;
     233             :     ulong shred_rejected_unchained_cnt;
     234             :     ulong repair_rcv_cnt;
     235             :     ulong repair_rcv_bytes;
     236             :     ulong turbine_rcv_cnt;
     237             :     ulong turbine_rcv_bytes;
     238             :     ulong bad_nonce;
     239             :   } metrics[ 1 ];
     240             : 
     241             :   struct {
     242             :     ulong txn_cnt;
     243             :     ulong pos; /* in payload, range [0, FD_SHRED_BATCH_RAW_BUF_SZ-8UL) */
     244             :     ulong slot; /* set to 0 when pos==0 */
     245             :     union {
     246             :       struct {
     247             :         ulong microblock_cnt;
     248             :         uchar payload[ FD_SHRED_BATCH_RAW_BUF_SZ - 8UL ];
     249             :       };
     250             :       uchar raw[ FD_SHRED_BATCH_RAW_BUF_SZ ];
     251             :     };
     252             :   } pending_batch;
     253             : 
     254             :   fd_epoch_schedule_t            epoch_schedule[1];
     255             :   fd_shred_features_activation_t features_activation[1];
     256             :   /* too large to be left in the stack */
     257             :   fd_shred_dest_idx_t scratchpad_dests[ FD_SHRED_DEST_MAX_FANOUT*(FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX) ];
     258             : 
     259             :   uchar * chained_merkle_root;
     260             :   fd_bmtree_node_t out_merkle_roots[ FD_SHRED_BATCH_FEC_SETS_MAX ];
     261             :   uchar block_ids[ BLOCK_IDS_TABLE_CNT ][ FD_SHRED_MERKLE_ROOT_SZ ];
     262             : } fd_shred_ctx_t;
     263             : 
     264             : /* shred features are generally considered active at the epoch *following*
     265             :    the epoch in which the feature gate is activated.
     266             : 
     267             :    As an optimization, when the activation slot is received, it is converted
     268             :    into the first slot of the subsequent epoch.  This allows for a more
     269             :    efficient check (shred_slot >= feature_slot) and avoids the overhead of
     270             :    repeatedly converting slots into epochs for comparison.
     271             : 
     272             :    This function is only for Firedancer, while Frankendancer already receives
     273             :    the final activation slot from POH tile.
     274             : 
     275             :    In Agave, this is done with check_feature_activation():
     276             :    https://github.com/anza-xyz/agave/blob/v3.1.4/turbine/src/cluster_nodes.rs#L771
     277             :    https://github.com/anza-xyz/agave/blob/v3.1.4/core/src/shred_fetch_stage.rs#L456 */
     278             : static inline ulong
     279           0 : fd_shred_get_feature_activation_slot0( ulong feature_slot, fd_shred_ctx_t * ctx ) {
     280             :   /* if the feature does not have an activation slot yet, return ULONG_MAX */
     281           0 :   if( FD_UNLIKELY( feature_slot==ULONG_MAX ) ) {
     282           0 :     return ULONG_MAX;
     283           0 :   }
     284             :   /* if we don't have an epoch schedule yet, return ULONG_MAX */
     285           0 :   if( FD_UNLIKELY( ctx->epoch_schedule->slots_per_epoch==0 ) ) {
     286           0 :     return ULONG_MAX;
     287           0 :   }
     288             :   /* compute the activation epoch, add one, return the first slot. */
     289           0 :   ulong feature_epoch = 1 + fd_slot_to_epoch( ctx->epoch_schedule, feature_slot, NULL );
     290           0 :   return fd_epoch_slot0( ctx->epoch_schedule, feature_epoch );
     291           0 : }
     292             : 
     293             : FD_FN_CONST static inline ulong
     294           0 : scratch_align( void ) {
     295           0 :   return 128UL;
     296           0 : }
     297             : 
     298             : FD_FN_PURE static inline ulong
     299           0 : scratch_footprint( fd_topo_tile_t const * tile ) {
     300             : 
     301           0 :   ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, tile->shred.depth+1UL,
     302           0 :                                                             128UL * tile->shred.fec_resolver_depth );
     303           0 :   ulong l = FD_LAYOUT_INIT;
     304           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_shred_ctx_t),          sizeof(fd_shred_ctx_t)                  );
     305           0 :   l = FD_LAYOUT_APPEND( l, fd_stake_ci_align(),              fd_stake_ci_footprint()                 );
     306           0 :   l = FD_LAYOUT_APPEND( l, fd_fec_resolver_align(),          fec_resolver_footprint                  );
     307           0 :   l = FD_LAYOUT_APPEND( l, fd_shredder_align(),              fd_shredder_footprint()                 );
     308           0 :   return FD_LAYOUT_FINI( l, scratch_align() );
     309           0 : }
     310             : 
     311             : static inline void
     312           0 : during_housekeeping( fd_shred_ctx_t * ctx ) {
     313           0 :   if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
     314           0 :     ulong seq_must_complete = ctx->keyswitch->param;
     315             : 
     316           0 :     if( FD_UNLIKELY( fd_seq_lt( ctx->poh_in_expect_seq, seq_must_complete ) ) ) {
     317             :       /* See fd_keyswitch.h, we need to flush any in-flight shreds from
     318             :          the leader pipeline before switching key. */
     319           0 :       FD_LOG_WARNING(( "Flushing in-flight unpublished shreds, must reach seq %lu, currently at %lu ...", seq_must_complete, ctx->poh_in_expect_seq ));
     320           0 :       return;
     321           0 :     }
     322             : 
     323           0 :     memcpy( ctx->identity_key->uc, ctx->keyswitch->bytes, 32UL );
     324           0 :     fd_stake_ci_set_identity( ctx->stake_ci, ctx->identity_key );
     325           0 :     fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
     326           0 :   }
     327           0 : }
     328             : 
     329             : static inline void
     330           0 : metrics_write( fd_shred_ctx_t * ctx ) {
     331           0 :   FD_MHIST_COPY( SHRED, CLUSTER_CONTACT_INFO_CNT,   ctx->metrics->contact_info_cnt             );
     332           0 :   FD_MHIST_COPY( SHRED, BATCH_SZ,                   ctx->metrics->batch_sz                     );
     333           0 :   FD_MHIST_COPY( SHRED, BATCH_MICROBLOCK_CNT,       ctx->metrics->batch_microblock_cnt         );
     334           0 :   FD_MHIST_COPY( SHRED, SHREDDING_DURATION_SECONDS, ctx->metrics->shredding_timing             );
     335           0 :   FD_MHIST_COPY( SHRED, ADD_SHRED_DURATION_SECONDS, ctx->metrics->add_shred_timing             );
     336           0 :   FD_MCNT_SET  ( SHRED, SHRED_REPAIR_RCV,           ctx->metrics->repair_rcv_cnt               );
     337           0 :   FD_MCNT_SET  ( SHRED, SHRED_REPAIR_RCV_BYTES,     ctx->metrics->repair_rcv_bytes             );
     338           0 :   FD_MCNT_SET  ( SHRED, SHRED_TURBINE_RCV,          ctx->metrics->turbine_rcv_cnt              );
     339           0 :   FD_MCNT_SET  ( SHRED, SHRED_TURBINE_RCV_BYTES,    ctx->metrics->turbine_rcv_bytes            );
     340           0 :   FD_MCNT_SET  ( SHRED, BAD_NONCE,                  ctx->metrics->bad_nonce                    );
     341             : 
     342           0 :   FD_MCNT_SET  ( SHRED, INVALID_BLOCK_ID,           ctx->metrics->invalid_block_id_cnt         );
     343           0 :   FD_MCNT_SET  ( SHRED, SHRED_REJECTED_UNCHAINED,   ctx->metrics->shred_rejected_unchained_cnt );
     344             : 
     345           0 :   FD_MCNT_ENUM_COPY( SHRED, SHRED_PROCESSED, ctx->metrics->shred_processing_result             );
     346           0 : }
     347             : 
     348             : static inline void
     349             : handle_new_cluster_contact_info( fd_shred_ctx_t * ctx,
     350           0 :                                  uchar const    * buf ) {
     351           0 :   ulong const * header = (ulong const *)fd_type_pun_const( buf );
     352             : 
     353           0 :   ulong dest_cnt = header[ 0 ];
     354           0 :   fd_histf_sample( ctx->metrics->contact_info_cnt, dest_cnt );
     355             : 
     356           0 :   if( dest_cnt >= MAX_SHRED_DESTS )
     357           0 :     FD_LOG_ERR(( "Cluster nodes had %lu destinations, which was more than the max of %lu", dest_cnt, MAX_SHRED_DESTS ));
     358             : 
     359           0 :   fd_shred_dest_wire_t const * in_dests = fd_type_pun_const( header+1UL );
     360           0 :   fd_shred_dest_weighted_t * dests = fd_stake_ci_dest_add_init( ctx->stake_ci );
     361             : 
     362           0 :   ctx->new_dest_ptr = dests;
     363           0 :   ctx->new_dest_cnt = dest_cnt;
     364             : 
     365           0 :   for( ulong i=0UL; i<dest_cnt; i++ ) {
     366           0 :     memcpy( dests[i].pubkey.uc, in_dests[i].pubkey, 32UL );
     367           0 :     dests[i].ip4  = in_dests[i].ip4_addr;
     368           0 :     dests[i].port = in_dests[i].udp_port;
     369           0 :   }
     370           0 : }
     371             : 
     372             : static inline void
     373           0 : finalize_new_cluster_contact_info( fd_shred_ctx_t * ctx ) {
     374           0 :   fd_stake_ci_dest_add_fini( ctx->stake_ci, ctx->new_dest_cnt );
     375           0 : }
     376             : 
     377             : static inline int
     378             : before_frag( fd_shred_ctx_t * ctx,
     379             :              ulong            in_idx,
     380             :              ulong            seq,
     381           0 :              ulong            sig ) {
     382           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_IPECHO ) ) {
     383           0 :     FD_TEST( sig!=0UL && sig<=USHORT_MAX );
     384           0 :     fd_shredder_set_shred_version    ( ctx->shredder, (ushort)sig );
     385           0 :     fd_fec_resolver_set_shred_version( ctx->resolver, (ushort)sig );
     386           0 :     return 1;
     387           0 :   }
     388             : 
     389           0 :   if( FD_UNLIKELY( !ctx->shredder->shred_version ) ) return -1;
     390             : 
     391           0 :   if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
     392           0 :     ctx->poh_in_expect_seq = seq+1UL;
     393           0 :     return (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_MICROBLOCK) & (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_FEAT_ACT_SLOT);
     394           0 :   }
     395           0 :   if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
     396           0 :     return (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_SHRED) & (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_REPAIR);
     397           0 :   }
     398           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ){
     399           0 :     return sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO &&
     400           0 :            sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE;
     401           0 :   }
     402           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH ) ) {
     403           0 :     return sig!=0UL; /* only care about rooted banks, not completed blockhash */
     404           0 :   }
     405           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTED ) ) {
     406           0 :     return sig!=FD_TOWER_SIG_SLOT_ROOTED; /* only care about slot_confirmed messages */
     407           0 :   }
     408           0 :   return 0;
     409           0 : }
     410             : 
     411             : static void
     412             : during_frag( fd_shred_ctx_t * ctx,
     413             :              ulong            in_idx,
     414             :              ulong            seq FD_PARAM_UNUSED,
     415             :              ulong            sig,
     416             :              ulong            chunk,
     417             :              ulong            sz,
     418           0 :              ulong            ctl ) {
     419             : 
     420           0 :   ctx->skip_frag = 0;
     421             : 
     422           0 :   ctx->tsorig = fd_frag_meta_ts_comp( fd_tickcount() );
     423             : 
     424           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
     425           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_NET_MTU ) )
     426           0 :     FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     427           0 :                 ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     428             : 
     429           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     430           0 :     fd_memcpy( ctx->shred_buffer, dcache_entry, sz );
     431           0 :     return;
     432           0 :   }
     433             : 
     434           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
     435           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
     436           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     437           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     438             : 
     439           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     440           0 :     handle_new_cluster_contact_info( ctx, dcache_entry );
     441           0 :     return;
     442           0 :   }
     443             : 
     444           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
     445           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>sizeof(fd_gossip_update_message_t) ) )
     446           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     447           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     448           0 :     uchar const * gossip_upd_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     449           0 :     fd_memcpy( ctx->gossip_upd_buf, gossip_upd_msg, sz );
     450           0 :     return;
     451           0 :   }
     452             : 
     453             :   /* Firedancer only */
     454           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTED ) ) {
     455           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
     456           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     457           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     458           0 :     fd_tower_slot_rooted_t const * rooted_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     459           0 :     ctx->new_root = rooted_msg->slot;
     460           0 :     return;
     461           0 :   }
     462             : 
     463             :   /* Frankendancer only */
     464           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH ) ) {
     465           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
     466           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     467           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     468             :     /* The message format is a pointer to the bank (which is in the
     469             :        agave address space, so we couldn't access it even if we wanted
     470             :        to) followed by the rooted slot. */
     471           0 :     ulong const * replay_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     472           0 :     ctx->new_root = replay_msg[ 1 ];
     473           0 :     return;
     474           0 :   }
     475             : 
     476             :   /* Firedancer only */
     477           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
     478           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
     479           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     480           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     481             : 
     482           0 :     uchar const *               dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     483           0 :     fd_epoch_info_msg_t const * epoch_msg    = fd_type_pun_const( dcache_entry );
     484             : 
     485           0 :     fd_stake_ci_epoch_msg_init( ctx->stake_ci, epoch_msg );
     486             : 
     487           0 :     *ctx->epoch_schedule                                = epoch_msg->epoch_schedule;
     488           0 :     ctx->features_activation->enforce_fixed_fec_set     = fd_shred_get_feature_activation_slot0(
     489           0 :       epoch_msg->features.enforce_fixed_fec_set, ctx );
     490           0 :     ctx->features_activation->switch_to_chacha8_turbine = fd_shred_get_feature_activation_slot0(
     491           0 :       epoch_msg->features.switch_to_chacha8_turbine, ctx );
     492           0 :     ctx->features_activation->discard_unexpected_data_complete_shreds = fd_shred_get_feature_activation_slot0(
     493           0 :       epoch_msg->features.discard_unexpected_data_complete_shreds, ctx );
     494             : 
     495           0 :     fd_fec_resolver_set_discard_unexpected_data_complete_shreds( ctx->resolver,
     496           0 :       ctx->features_activation->discard_unexpected_data_complete_shreds );
     497             : 
     498           0 :     return;
     499           0 :   }
     500             : 
     501           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
     502           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
     503           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     504           0 :                    ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     505             : 
     506           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     507           0 :     fd_stake_ci_stake_msg_init( ctx->stake_ci, fd_type_pun_const( dcache_entry ) );
     508           0 :     return;
     509           0 :   }
     510             : 
     511           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
     512           0 :     ctx->send_fec_set_cnt = 0UL;
     513             : 
     514           0 :     if( FD_UNLIKELY( (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_FEAT_ACT_SLOT) ) ) {
     515             :       /* There is a subset of FD_SHRED_FEATURES_ACTIVATION_... slots that
     516             :           the shred tile needs to be aware of.  Since this requires the
     517             :           bank, we are forced (so far) to receive them from the poh tile
     518             :           (as a POH_PKT_TYPE_FEAT_ACT_SLOT). */
     519           0 :       uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     520           0 :       if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=(sizeof(fd_shred_features_activation_t)) ) )
     521           0 :         FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     522           0 :               ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     523             : 
     524           0 :       fd_shred_features_activation_t const * act_data = (fd_shred_features_activation_t const *)dcache_entry;
     525           0 :       memcpy( ctx->features_activation, act_data, sizeof(fd_shred_features_activation_t) );
     526             : 
     527           0 :       fd_fec_resolver_set_discard_unexpected_data_complete_shreds( ctx->resolver,
     528           0 :         ctx->features_activation->discard_unexpected_data_complete_shreds );
     529           0 :     }
     530           0 :     else { /* (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_MICROBLOCK) */
     531             :       /* This is a frag from the PoH tile.  We'll copy it to our pending
     532             :         microblock batch and shred it if necessary (last in block or
     533             :         above watermark).  We just go ahead and shred it here, even
     534             :         though we may get overrun.  If we do end up getting overrun, we
     535             :         just won't send these shreds out and we'll reuse the FEC set for
     536             :         the next one.  From a higher level though, if we do get overrun,
     537             :         a bunch of shreds will never be transmitted, and we'll end up
     538             :         producing a block that never lands on chain. */
     539             : 
     540           0 :       uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
     541           0 :       if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_POH_SHRED_MTU ||
     542           0 :           sz<(sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t)) ) )
     543           0 :         FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
     544           0 :               ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
     545             : 
     546           0 :       fd_entry_batch_meta_t const * entry_meta = (fd_entry_batch_meta_t const *)dcache_entry;
     547           0 :       uchar const *                 entry      = dcache_entry + sizeof(fd_entry_batch_meta_t);
     548           0 :       ulong                         entry_sz   = sz           - sizeof(fd_entry_batch_meta_t);
     549             : 
     550           0 :       fd_entry_batch_header_t const * microblock = (fd_entry_batch_header_t const *)entry;
     551             : 
     552             :       /* It should never be possible for this to fail, but we check it
     553             :         anyway. */
     554           0 :       FD_TEST( entry_sz + ctx->pending_batch.pos <= sizeof(ctx->pending_batch.payload) );
     555             : 
     556           0 :       ulong target_slot = fd_disco_poh_sig_slot( sig );
     557           0 :       if( FD_UNLIKELY( (ctx->pending_batch.microblock_cnt>0) & (ctx->pending_batch.slot!=target_slot) ) ) {
     558             :         /* TODO: The Agave client sends a dummy entry batch with only 1
     559             :           byte and the block-complete bit set.  This helps other
     560             :           validators know that the block is dead and they should not try
     561             :           to continue building a fork on it.  We probably want a similar
     562             :           approach eventually. */
     563           0 :         FD_LOG_WARNING(( "Abandoning %lu microblocks for slot %lu and switching to slot %lu",
     564           0 :               ctx->pending_batch.microblock_cnt, ctx->pending_batch.slot, target_slot ));
     565           0 :         ctx->pending_batch.slot           = 0UL;
     566           0 :         ctx->pending_batch.pos            = 0UL;
     567           0 :         ctx->pending_batch.microblock_cnt = 0UL;
     568           0 :         ctx->pending_batch.txn_cnt        = 0UL;
     569           0 :         ctx->batch_cnt                    = 0UL;
     570             : 
     571           0 :         FD_MCNT_INC( SHRED, MICROBLOCKS_ABANDONED, 1UL );
     572           0 :       }
     573             : 
     574           0 :       ctx->pending_batch.slot = target_slot;
     575           0 :       if( FD_UNLIKELY( target_slot!=ctx->slot )) {
     576             :         /* Reset batch count if we are in a new slot */
     577           0 :         ctx->batch_cnt = 0UL;
     578           0 :         ctx->slot      = target_slot;
     579             : 
     580             :         /* At the beginning of a new slot, prepare chained_merkle_root.
     581             :            chained_merkle_root is initialized at the block_id of the parent
     582             :            block, there's two cases:
     583             : 
     584             :            1. block_id is passed in by the poh tile:
     585             :               - it's always passed when parent block had a different leader
     586             :               - it may be passed when we were leader for parent block (there
     587             :                 are race conditions when it's not passed)
     588             : 
     589             :            2. block_id is taken from block_ids table if we were the leader
     590             :               for the parent block (when we were NOT the leader, because of
     591             :               equivocation, we can't store block_id in the table)
     592             : 
     593             :            chained_merkle_root is stored in block_ids table at target_slot
     594             :            and it's progressively updated as more microblocks are received.
     595             :            As a result, when we move to a new slot, the block_ids table at
     596             :            the old slot will contain the block_id.
     597             : 
     598             :            The block_ids table is designed to protect against the race condition
     599             :            case in 1., therefore the table may not be set in some cases, e.g. if
     600             :            a validator (re)starts, but in those cases we don't expect the race
     601             :            condition to apply. */
     602           0 :         ctx->chained_merkle_root = ctx->block_ids[ target_slot % BLOCK_IDS_TABLE_CNT ];
     603           0 :         if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
     604           0 :           if( FD_LIKELY( entry_meta->parent_block_id_valid ) ) {
     605             :             /* 1. Initialize chained_merkle_root sent from poh tile */
     606           0 :             memcpy( ctx->chained_merkle_root, entry_meta->parent_block_id, FD_SHRED_MERKLE_ROOT_SZ );
     607           0 :           } else {
     608           0 :             ulong parent_slot = target_slot - entry_meta->parent_offset;
     609           0 :             fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, parent_slot );
     610           0 :             fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, parent_slot );
     611             : 
     612           0 :             if( lsched && slot_leader && fd_memeq( slot_leader, ctx->identity_key, sizeof(fd_pubkey_t) ) ) {
     613             :               /* 2. Initialize chained_merkle_root from block_ids table, if we were the leader */
     614           0 :               memcpy( ctx->chained_merkle_root, ctx->block_ids[ parent_slot % BLOCK_IDS_TABLE_CNT ], FD_SHRED_MERKLE_ROOT_SZ );
     615           0 :             } else {
     616             :               /* This should never happen, log a metric and set chained_merkle_root to 0 */
     617           0 :               ctx->metrics->invalid_block_id_cnt++;
     618           0 :               memset( ctx->chained_merkle_root, 0, FD_SHRED_MERKLE_ROOT_SZ );
     619           0 :             }
     620           0 :           }
     621           0 :         }
     622           0 :       }
     623             : 
     624           0 :       if( FD_LIKELY( !SHOULD_PROCESS_THESE_SHREDS ) ) {
     625             :         /* If we are not processing this batch, filter in after_frag. */
     626           0 :         ctx->skip_frag = 1;
     627           0 :       }
     628             : 
     629           0 :       ulong   pending_batch_wmark = FD_SHRED_BATCH_WMARK_CHAINED;
     630           0 :       uchar * chained_merkle_root = ctx->chained_merkle_root;
     631           0 :       ulong   load_for_32_shreds  = FD_SHREDDER_CHAINED_FEC_SET_PAYLOAD_SZ;
     632             :       /* All fec sets in the last batch of a block need to be resigned.
     633             :          This needs to match Agave's behavior - as a reference, see:
     634             :          https://github.com/anza-xyz/agave/blob/v2.3/ledger/src/shred/merkle.rs#L1040 */
     635           0 :       if( FD_UNLIKELY( entry_meta->block_complete ) ) {
     636           0 :         pending_batch_wmark = FD_SHRED_BATCH_WMARK_RESIGNED;
     637             :         /* chained_merkle_root also applies to resigned FEC sets. */
     638           0 :         load_for_32_shreds = FD_SHREDDER_RESIGNED_FEC_SET_PAYLOAD_SZ;
     639           0 :       }
     640             : 
     641             :       /* If this microblock completes the block, the batch is then
     642             :          finalized here.  Otherwise, we check whether the new entry
     643             :          would exceed the pending_batch_wmark.  If true, then the
     644             :          batch is closed now, shredded, and a new batch is started
     645             :          with the incoming microblock.  If false, no shredding takes
     646             :          place, and the microblock is added to the current batch. */
     647           0 :       int batch_would_exceed_wmark = ( ctx->pending_batch.pos + entry_sz ) > pending_batch_wmark;
     648           0 :       int include_in_current_batch = entry_meta->block_complete | ( !batch_would_exceed_wmark );
     649           0 :       int process_current_batch    = entry_meta->block_complete | batch_would_exceed_wmark;
     650           0 :       int init_new_batch           = !include_in_current_batch;
     651             : 
     652           0 :       if( FD_LIKELY( include_in_current_batch ) ) {
     653           0 :         if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
     654             :           /* Ugh, yet another memcpy */
     655           0 :           fd_memcpy( ctx->pending_batch.payload + ctx->pending_batch.pos, entry, entry_sz );
     656           0 :         }
     657           0 :         ctx->pending_batch.pos            += entry_sz;
     658           0 :         ctx->pending_batch.microblock_cnt += 1UL;
     659           0 :         ctx->pending_batch.txn_cnt        += microblock->txn_cnt;
     660           0 :       }
     661             : 
     662           0 :       if( FD_LIKELY( process_current_batch )) {
     663             :         /* Batch and padding size calculation. */
     664           0 :         ulong batch_sz        = sizeof(ulong) + ctx->pending_batch.pos; /* without padding */
     665           0 :         ulong batch_sz_padded = load_for_32_shreds * ( ( batch_sz + load_for_32_shreds - 1UL ) / load_for_32_shreds );
     666           0 :         ulong padding_sz      = batch_sz_padded - batch_sz;
     667             : 
     668           0 :         if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
     669             :           /* If it's our turn, shred this batch. FD_UNLIKELY because shred
     670             :              tile cnt generally >= 2 */
     671             : 
     672           0 :           long shredding_timing = -fd_tickcount();
     673             : 
     674           0 :           fd_memset( ctx->pending_batch.payload + ctx->pending_batch.pos, 0, padding_sz );
     675             : 
     676           0 :           ctx->send_fec_set_cnt = 0UL; /* verbose */
     677           0 :           ctx->shredded_txn_cnt = ctx->pending_batch.txn_cnt;
     678             : 
     679           0 :           fd_shredder_init_batch( ctx->shredder, ctx->pending_batch.raw, batch_sz_padded, target_slot, entry_meta );
     680             : 
     681           0 :           ulong pend_sz  = batch_sz_padded;
     682           0 :           ulong pend_idx = 0;
     683           0 :           while( pend_sz > 0UL ) {
     684             : 
     685           0 :             fd_fec_set_t * out = ctx->fec_sets + ctx->shredder_fec_set_idx;
     686             : 
     687           0 :             FD_TEST( fd_shredder_next_fec_set( ctx->shredder, out, chained_merkle_root ) );
     688           0 :             memcpy( ctx->out_merkle_roots[pend_idx].hash, chained_merkle_root, 32UL );
     689             : 
     690           0 :             out->data_shred_rcvd   = 0U;
     691           0 :             out->parity_shred_rcvd = 0U;
     692             : 
     693           0 :             ctx->send_fec_set_idx[ ctx->send_fec_set_cnt ] = ctx->shredder_fec_set_idx;
     694           0 :             ctx->send_fec_set_cnt += 1UL;
     695           0 :             ctx->shredder_fec_set_idx = (ctx->shredder_fec_set_idx+1UL)%ctx->shredder_max_fec_set_idx;
     696             : 
     697           0 :             pend_sz -= load_for_32_shreds;
     698           0 :             pend_idx++;
     699           0 :           }
     700             : 
     701           0 :           fd_shredder_fini_batch( ctx->shredder );
     702           0 :           shredding_timing += fd_tickcount();
     703             : 
     704             :           /* Update metrics */
     705           0 :           fd_histf_sample( ctx->metrics->batch_sz,             batch_sz /* without padding */    );
     706           0 :           fd_histf_sample( ctx->metrics->batch_microblock_cnt, ctx->pending_batch.microblock_cnt );
     707           0 :           fd_histf_sample( ctx->metrics->shredding_timing,     (ulong)shredding_timing           );
     708           0 :         } else {
     709           0 :           ctx->send_fec_set_cnt = 0UL; /* verbose */
     710             : 
     711           0 :           fd_shredder_skip_batch( ctx->shredder, batch_sz_padded, target_slot, entry_meta->block_complete );
     712           0 :         }
     713             : 
     714           0 :         ctx->pending_batch.slot           = 0UL;
     715           0 :         ctx->pending_batch.pos            = 0UL;
     716           0 :         ctx->pending_batch.microblock_cnt = 0UL;
     717           0 :         ctx->pending_batch.txn_cnt        = 0UL;
     718           0 :         ctx->batch_cnt++;
     719           0 :       }
     720             : 
     721           0 :       if( FD_UNLIKELY( init_new_batch ) ) {
     722             :         /* TODO: this assumes that SHOULD_PROCESS_THESE_SHREDS is
     723             :            constant across batches.  Otherwise, the condition may
     724             :            need to be removed (or adjusted). */
     725           0 :         if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
     726             :           /* Ugh, yet another memcpy */
     727           0 :           fd_memcpy( ctx->pending_batch.payload + 0UL /* verbose */, entry, entry_sz );
     728           0 :         }
     729           0 :         ctx->pending_batch.slot           = target_slot;
     730           0 :         ctx->pending_batch.pos            = entry_sz;
     731           0 :         ctx->pending_batch.microblock_cnt = 1UL;
     732           0 :         ctx->pending_batch.txn_cnt        = microblock->txn_cnt;
     733           0 :       }
     734           0 :     }
     735           0 :   } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
     736             :     /* The common case, from the net tile.  The FEC resolver API does
     737             :        not present a prepare/commit model. If we get overrun between
     738             :        when the FEC resolver verifies the signature and when it stores
     739             :        the local copy, we could end up storing and retransmitting
     740             :        garbage.  Instead we copy it locally, sadly, and only give it to
     741             :        the FEC resolver when we know it won't be overrun anymore. */
     742           0 :     uchar const * dcache_entry = fd_net_rx_translate_frag( &ctx->in[ in_idx ].net_rx, chunk, ctl, sz );
     743           0 :     ulong hdr_sz = fd_disco_netmux_sig_hdr_sz( sig );
     744           0 :     FD_TEST( hdr_sz <= sz ); /* Should be ensured by the net tile */
     745           0 :     fd_shred_t const * shred = fd_shred_parse( dcache_entry+hdr_sz, sz-hdr_sz );
     746           0 :     if( FD_UNLIKELY( !shred ) ) {
     747           0 :       ctx->skip_frag = 1;
     748           0 :       return;
     749           0 :     };
     750             : 
     751           0 :     if( FD_UNLIKELY( fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR ) ) {
     752           0 :       ctx->metrics->repair_rcv_cnt++;
     753           0 :       ctx->metrics->repair_rcv_bytes += sz;
     754           0 :     } else {
     755           0 :       ctx->metrics->turbine_rcv_cnt++;
     756           0 :       ctx->metrics->turbine_rcv_bytes += sz;
     757           0 :     }
     758             : 
     759             :     /* Drop unchained merkle shreds */
     760           0 :     int is_unchained = !fd_shred_is_chained( fd_shred_type( shred->variant ) );
     761           0 :     if( FD_UNLIKELY( is_unchained ) ) {
     762           0 :       ctx->metrics->shred_rejected_unchained_cnt++;
     763           0 :       ctx->skip_frag = 1;
     764           0 :       return;
     765           0 :     };
     766             : 
     767             :     /* all shreds in the same FEC set will have the same signature
     768             :        so we can round-robin shreds between the shred tiles based on
     769             :        just the signature without splitting individual FEC sets. */
     770           0 :     ulong sig = fd_ulong_load_8( shred->signature );
     771           0 :     if( FD_LIKELY( sig%ctx->round_robin_cnt!=ctx->round_robin_id ) ) {
     772           0 :       ctx->skip_frag = 1;
     773           0 :       return;
     774           0 :     }
     775           0 :     fd_memcpy( ctx->shred_buffer, dcache_entry+hdr_sz, sz-hdr_sz );
     776           0 :     ctx->shred_buffer_sz = sz-hdr_sz;
     777           0 :   }
     778           0 : }
     779             : 
     780             : static inline void
     781             : send_shred( fd_shred_ctx_t                 * ctx,
     782             :             fd_stem_context_t              * stem,
     783             :             fd_shred_t const               * shred,
     784             :             fd_shred_dest_weighted_t const * dest,
     785           0 :             ulong                            tsorig ) {
     786             : 
     787           0 :   if( FD_UNLIKELY( !dest->ip4 ) ) return;
     788             : 
     789           0 :   uchar * packet = fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk );
     790             : 
     791           0 :   int is_data = fd_shred_is_data( fd_shred_type( shred->variant ) );
     792           0 :   fd_ip4_udp_hdrs_t * hdr  = (fd_ip4_udp_hdrs_t *)packet;
     793           0 :   *hdr = *( is_data ? ctx->data_shred_net_hdr : ctx->parity_shred_net_hdr );
     794             : 
     795           0 :   fd_ip4_hdr_t * ip4 = hdr->ip4;
     796           0 :   ip4->daddr  = dest->ip4;
     797           0 :   ip4->net_id = fd_ushort_bswap( ctx->net_id++ );
     798           0 :   ip4->check  = 0U;
     799           0 :   ip4->check  = fd_ip4_hdr_check_fast( ip4 );
     800             : 
     801           0 :   hdr->udp->net_dport = fd_ushort_bswap( dest->port );
     802             : 
     803           0 :   ulong shred_sz = fd_ulong_if( is_data, FD_SHRED_MIN_SZ, FD_SHRED_MAX_SZ );
     804           0 : #if FD_HAS_AVX
     805             :   /* We're going to copy this shred potentially a bunch of times without
     806             :      reading it again, and we'd rather not thrash our cache, so we want
     807             :      to use non-temporal writes here.  We need to make sure we don't
     808             :      touch the cache line containing the network headers that we just
     809             :      wrote to though.  We know the destination is 64 byte aligned.  */
     810           0 :   FD_STATIC_ASSERT( sizeof(*hdr)<64UL, non_temporal );
     811             :   /* src[0:sizeof(hdrs)] is invalid, but now we want to copy
     812             :      dest[i]=src[i] for i>=sizeof(hdrs), so it simplifies the code. */
     813           0 :   uchar const * src = (uchar const *)((ulong)shred - sizeof(fd_ip4_udp_hdrs_t));
     814           0 :   memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), src+sizeof(fd_ip4_udp_hdrs_t), 64UL-sizeof(fd_ip4_udp_hdrs_t) );
     815             : 
     816           0 :   ulong end_offset = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
     817           0 :   ulong i;
     818           0 :   for( i=64UL; end_offset-i<64UL; i+=64UL ) {
     819           0 : #  if FD_HAS_AVX512
     820           0 :     _mm512_stream_si512( (void *)(packet+i     ), _mm512_loadu_si512( (void const *)(src+i     ) ) );
     821             : #  else
     822             :     _mm256_stream_si256( (void *)(packet+i     ), _mm256_loadu_si256( (void const *)(src+i     ) ) );
     823             :     _mm256_stream_si256( (void *)(packet+i+32UL), _mm256_loadu_si256( (void const *)(src+i+32UL) ) );
     824             : #  endif
     825           0 :   }
     826           0 :   _mm_sfence();
     827           0 :   fd_memcpy( packet+i, src+i, end_offset-i ); /* Copy the last partial cache line */
     828             : 
     829             : #else
     830             :   fd_memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), shred, shred_sz );
     831             : #endif
     832             : 
     833           0 :   ulong pkt_sz = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
     834           0 :   ulong tspub  = fd_frag_meta_ts_comp( fd_tickcount() );
     835           0 :   ulong sig    = fd_disco_netmux_sig( dest->ip4, dest->port, dest->ip4, DST_PROTO_OUTGOING, sizeof(fd_ip4_udp_hdrs_t) );
     836           0 :   ulong const chunk = ctx->net_out_chunk;
     837           0 :   fd_stem_publish( stem, NET_OUT_IDX, sig, chunk, pkt_sz, 0UL, tsorig, tspub );
     838           0 :   ctx->net_out_chunk = fd_dcache_compact_next( chunk, pkt_sz, ctx->net_out_chunk0, ctx->net_out_wmark );
     839           0 : }
     840             : 
     841             : static void
     842             : after_frag( fd_shred_ctx_t *    ctx,
     843             :             ulong               in_idx,
     844             :             ulong               seq,
     845             :             ulong               sig,
     846             :             ulong               sz,
     847             :             ulong               tsorig,
     848             :             ulong               _tspub,
     849           0 :             fd_stem_context_t * stem ) {
     850           0 :   (void)seq;
     851           0 :   (void)sz;
     852           0 :   (void)tsorig;
     853           0 :   (void)_tspub;
     854             : 
     855           0 :   if( FD_UNLIKELY( ctx->skip_frag ) ) return;
     856             : 
     857           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
     858           0 :     finalize_new_cluster_contact_info( ctx );
     859           0 :     return;
     860           0 :   }
     861             : 
     862           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
     863           0 :     fd_stake_ci_epoch_msg_fini( ctx->stake_ci );
     864           0 :     return;
     865           0 :   }
     866             : 
     867           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
     868           0 :     fd_stake_ci_stake_msg_fini( ctx->stake_ci );
     869           0 :     return;
     870           0 :   }
     871             : 
     872           0 :   if( FD_UNLIKELY( (ctx->in_kind[ in_idx ]==IN_KIND_ROOTED) | (ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH) ) ) {
     873           0 :     if( FD_LIKELY( (ctx->new_root > 0UL) & (ctx->new_root<ULONG_MAX) ) ) fd_fec_resolver_advance_slot_old( ctx->resolver, ctx->new_root );
     874           0 :     return;
     875           0 :   }
     876             : 
     877           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
     878           0 :     if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO ) {
     879           0 :       fd_gossip_contact_info_t const * ci = ctx->gossip_upd_buf->contact_info->value;
     880           0 :       fd_ip4_port_t tvu_addr;
     881           0 :       tvu_addr.addr = ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].is_ipv6 ? 0U : ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].ip4;
     882           0 :       tvu_addr.port = ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].port;
     883           0 :       if( !tvu_addr.l ){
     884           0 :         fd_stake_ci_dest_remove( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ) );
     885           0 :       } else {
     886           0 :         fd_stake_ci_dest_update( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ), tvu_addr.addr, fd_ushort_bswap( tvu_addr.port ) );
     887           0 :       }
     888           0 :     } else if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE ) {
     889           0 :       if( FD_UNLIKELY( !memcmp( ctx->identity_key->uc, ctx->gossip_upd_buf->origin, 32UL ) ) ) {
     890             :         /* If our own contact info was dropped, we update with dummy IP
     891             :            instead of removing since stake_ci expects our contact info
     892             :            in the sdests table all the time. fd_stake_ci_new initializes
     893             :            both ei->sdests with our contact info so this should always
     894             :            update (and not append). */
     895           0 :         fd_stake_ci_dest_update( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ), 1U, 0U );
     896           0 :       } else {
     897           0 :         fd_stake_ci_dest_remove( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ) );
     898           0 :       }
     899           0 :     }
     900           0 :     return;
     901           0 :   }
     902             : 
     903           0 :   if( FD_UNLIKELY( (ctx->in_kind[ in_idx ]==IN_KIND_POH) & (ctx->send_fec_set_cnt==0UL) ) ) {
     904             :     /* Entry from PoH that didn't trigger a new FEC set to be made */
     905           0 :     return;
     906           0 :   }
     907           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
     908           0 :     return;
     909           0 :   }
     910             : 
     911           0 :   ulong fanout = 200UL; /* Default Agave's DATA_PLANE_FANOUT = 200UL */
     912             : 
     913           0 :   if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
     914           0 :     uchar * shred_buffer    = ctx->shred_buffer;
     915           0 :     ulong   shred_buffer_sz = ctx->shred_buffer_sz;
     916             : 
     917           0 :     fd_shred_t const * shred = fd_shred_parse( shred_buffer, shred_buffer_sz );
     918             : 
     919           0 :     if( FD_UNLIKELY( !shred       ) ) { ctx->metrics->shred_processing_result[ 1 ]++; return; }
     920             : 
     921           0 :     fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, shred->slot );
     922           0 :     if( FD_UNLIKELY( !lsched      ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; }
     923             : 
     924           0 :     fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, shred->slot );
     925           0 :     if( FD_UNLIKELY( !slot_leader ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; } /* Count this as bad slot too */
     926             : 
     927           0 :     fd_fec_set_t const * out_fec_set[1];
     928           0 :     fd_shred_t const   * out_shred[1];
     929           0 :     fd_fec_resolver_spilled_t spilled_fec = { 0 };
     930           0 :     int from_repair = 0;
     931             : 
     932           0 :     uint nonce = UINT_MAX;
     933           0 :     ulong shred_sz = fd_shred_sz( shred );
     934           0 :     if( FD_UNLIKELY( (fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR) & (shred_buffer_sz>=shred_sz+sizeof(uint)) ) ) {
     935           0 :       nonce = FD_LOAD(uint, shred_buffer + shred_sz );
     936           0 :       long est_now_ns = fd_log_wallclock(); /* TODO: switch to fd_clock for performance */
     937           0 :       int nonce_okay = fd_rnonce_ss_verify( ctx->repair_nonce_ss, nonce, shred->slot, shred->idx, est_now_ns );
     938           0 :       ctx->metrics->bad_nonce += (ulong)(!nonce_okay);
     939           0 :       from_repair = nonce_okay;
     940           0 :     }
     941             : 
     942           0 :     long add_shred_timing  = -fd_tickcount();
     943           0 :     int rv = fd_fec_resolver_add_shred( ctx->resolver, shred, shred_buffer_sz, from_repair, slot_leader->uc, out_fec_set, out_shred, &ctx->out_merkle_roots[0], &spilled_fec );
     944           0 :     add_shred_timing      +=  fd_tickcount();
     945             : 
     946           0 :     fd_histf_sample( ctx->metrics->add_shred_timing, (ulong)add_shred_timing );
     947           0 :     ctx->metrics->shred_processing_result[ rv + FD_FEC_RESOLVER_ADD_SHRED_RETVAL_OFF+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ]++;
     948             : 
     949           0 :     if( FD_UNLIKELY( ctx->shred_out_idx!=ULONG_MAX &&  /* Only send to repair in full Firedancer */
     950           0 :                      spilled_fec.slot!=0 ) ) {
     951             :       /* We've spilled an in-progress FEC set in the fec_resolver. We
     952             :          need to let repair know to clear out it's cached info for that
     953             :          fec set and re-repair those shreds. */
     954           0 :       ulong sig_  = fd_disco_shred_out_shred_sig( 0, spilled_fec.slot, spilled_fec.fec_set_idx, FD_FEC_SHRED_CNT-1U );
     955           0 :       ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
     956           0 :       fd_stem_publish( stem, ctx->shred_out_idx, sig_, ctx->shred_out_chunk, 0, 0, ctx->tsorig, tspub );
     957           0 :     }
     958             : 
     959           0 :     if( (rv==FD_FEC_RESOLVER_SHRED_OKAY) | (rv==FD_FEC_RESOLVER_SHRED_COMPLETES) | (rv==FD_FEC_RESOLVER_SHRED_DUPLICATE) ) {
     960           0 :       if( FD_LIKELY( fd_disco_netmux_sig_proto( sig ) != DST_PROTO_REPAIR && rv!=FD_FEC_RESOLVER_SHRED_DUPLICATE ) ) {
     961             :         /* Relay this shred */
     962           0 :         ulong max_dest_cnt[1];
     963           0 :         do {
     964             :           /* If we've validated the shred and it COMPLETES but we can't
     965             :             compute the destination for whatever reason, don't forward
     966             :             the shred, but still send it to the blockstore. */
     967           0 :           fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, shred->slot );
     968           0 :           if( FD_UNLIKELY( !sdest ) ) break;
     969           0 :           int use_chacha8 = ( shred->slot >= ctx->features_activation->switch_to_chacha8_turbine );
     970           0 :           fd_shred_dest_idx_t * dests = fd_shred_dest_compute_children( sdest, &shred, 1UL, ctx->scratchpad_dests, 1UL, fanout, fanout, max_dest_cnt, use_chacha8 );
     971           0 :           if( FD_UNLIKELY( !dests ) ) break;
     972             : 
     973           0 :           for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++ ) send_shred( ctx, stem, *out_shred, ctx->adtl_dests_retransmit+i, ctx->tsorig );
     974           0 :           for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, *out_shred, fd_shred_dest_idx_to_dest( sdest, dests[ j ] ), ctx->tsorig );
     975           0 :         } while( 0 );
     976           0 :       }
     977             : 
     978           0 :       if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* Only send to repair/replay in full Firedancer */
     979             : 
     980             :         /* Construct the sig from the shred. */
     981             : 
     982           0 :         int  is_code               = fd_shred_is_code( fd_shred_type( shred->variant ) );
     983           0 :         uint shred_idx_or_data_cnt = shred->idx;
     984           0 :         if( FD_LIKELY( is_code ) ) shred_idx_or_data_cnt = shred->code.data_cnt;  /* optimize for code_cnt >= data_cnt */
     985           0 :         ulong _sig = fd_disco_shred_out_shred_sig( fd_disco_netmux_sig_proto(sig)==DST_PROTO_SHRED, shred->slot, shred->fec_set_idx, shred_idx_or_data_cnt );
     986             : 
     987             :         /* Copy the shred header into the frag and publish. */
     988             : 
     989           0 :         ulong sz = fd_shred_header_sz( shred->variant );
     990           0 :         fd_memcpy( fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ), shred, sz );
     991             : 
     992           0 :         fd_memcpy( (uchar *)fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ) + sz, &ctx->out_merkle_roots[0], FD_SHRED_MERKLE_ROOT_SZ );
     993           0 :         sz += FD_SHRED_MERKLE_ROOT_SZ;
     994             : 
     995           0 :         fd_memcpy( (uchar *)fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ) + sz, (uchar *)shred + fd_shred_chain_off( shred->variant ), FD_SHRED_MERKLE_ROOT_SZ );
     996           0 :         sz += FD_SHRED_MERKLE_ROOT_SZ;
     997             : 
     998           0 :         FD_STORE(uint, fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ) + sz, nonce );
     999           0 :         sz += 4UL;
    1000             : 
    1001           0 :         ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
    1002           0 :         fd_stem_publish( stem, ctx->shred_out_idx, _sig, ctx->shred_out_chunk, sz, 0UL, ctx->tsorig, tspub );
    1003           0 :         ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sz, ctx->shred_out_chunk0, ctx->shred_out_wmark );
    1004           0 :       }
    1005           0 :     }
    1006           0 :     if( FD_LIKELY( rv!=FD_FEC_RESOLVER_SHRED_COMPLETES ) ) return;
    1007             : 
    1008           0 :     FD_TEST( ctx->fec_sets <= *out_fec_set );
    1009           0 :     ctx->send_fec_set_idx[ 0UL ] = (ulong)(*out_fec_set - ctx->fec_sets);
    1010           0 :     ctx->send_fec_set_cnt = 1UL;
    1011           0 :     ctx->shredded_txn_cnt = 0UL;
    1012           0 :   }
    1013             : 
    1014           0 :   if( FD_UNLIKELY( ctx->send_fec_set_cnt==0UL ) ) return;
    1015             : 
    1016             :   /* Try to distribute shredded txn count across the fec sets.
    1017             :      This is an approximation, but it is acceptable. */
    1018           0 :   ulong shredded_txn_cnt_per_fec_set  = ctx->shredded_txn_cnt / ctx->send_fec_set_cnt;
    1019           0 :   ulong shredded_txn_cnt_remain       = ctx->shredded_txn_cnt - shredded_txn_cnt_per_fec_set * ctx->send_fec_set_cnt;
    1020           0 :   ulong shredded_txn_cnt_last_fec_set = shredded_txn_cnt_per_fec_set + shredded_txn_cnt_remain;
    1021             : 
    1022             :   /* If this shred completes a FEC set or is part of a microblock from
    1023             :     pack (ie. we're leader), we now have a full FEC set: so we notify
    1024             :     repair and insert into the blockstore, as well as retransmit. */
    1025             : 
    1026           0 :   for( ulong fset_k=0; fset_k<ctx->send_fec_set_cnt; fset_k++ ) {
    1027             : 
    1028           0 :     fd_fec_set_t * set = ctx->fec_sets + ctx->send_fec_set_idx[ fset_k ];
    1029             : 
    1030           0 :     fd_shred_t const * last = set->data_shreds[ FD_FEC_SHRED_CNT - 1 ].s;
    1031             : 
    1032             :     /* Compute merkle root and chained merkle root. */
    1033             : 
    1034           0 :     if( FD_LIKELY( ctx->store ) ) { /* firedancer-only */
    1035             : 
    1036             :       /* Insert shreds into the store. We do this regardless of whether
    1037             :          we are leader. */
    1038             : 
    1039           0 :       fd_store_fec_t * fec = fd_store_insert( ctx->store, ctx->round_robin_id, (fd_hash_t *)fd_type_pun( &ctx->out_merkle_roots[fset_k] ) );
    1040             : 
    1041             :       /* Firedancer is configured such that the store never fills up, as
    1042             :          the reasm is responsible for also evicting from store (based on
    1043             :          its eviction policy, see fd_reasm.h). fec is only NULL when the
    1044             :          store is full, so this is either a bug or misconfiguration. */
    1045             : 
    1046           0 :       if( FD_UNLIKELY( !fec ) ) FD_LOG_CRIT(( "store full" ));
    1047             : 
    1048             :       /* It's safe to memcpy the FEC payload outside of the shared lock,
    1049             :          because the store ele is guaranteed to remain valid here.  It
    1050             :          is not possible for a fd_store_remove to interleave, because
    1051             :          remove is only called by replay_tile, which (crucially) is only
    1052             :          sent this FEC via stem publish after we have finished copying.
    1053             : 
    1054             :          Copying outside the shared lock scope also means that we can
    1055             :          lower the duration for which the shared lock is held, and
    1056             :          enables replay to acquire the exclusive lock for removes
    1057             :          without getting starved. */
    1058             : 
    1059           0 :       if( FD_LIKELY( !fec->data_sz ) ) {
    1060             :         /* if data_sz is non-zero, we've already inserted this FEC set into the store */
    1061           0 :         for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ ) {
    1062           0 :           fd_shred_t * data_shred = set->data_shreds[i].s;
    1063           0 :           ulong        payload_sz = fd_shred_payload_sz( data_shred );
    1064           0 :           if( FD_UNLIKELY( fec->data_sz + payload_sz > FD_STORE_DATA_MAX ) ) {
    1065             : 
    1066             :             /* This code is only reachable if shred tile has completed the
    1067             :                FEC set, which implies it was able to validate it, yet
    1068             :                somehow the total payload sz of this FEC set exceeds the
    1069             :                maximum payload sz.  This indicates either a serious bug or
    1070             :                shred tile is compromised so FD_LOG_CRIT. */
    1071             : 
    1072           0 :             FD_LOG_CRIT(( "Shred tile %lu: completed FEC set %lu %u data_sz: %lu exceeds FD_STORE_DATA_MAX: %lu. Ignoring FEC set.", ctx->round_robin_id, data_shred->slot, data_shred->fec_set_idx, fec->data_sz + payload_sz, FD_STORE_DATA_MAX ));
    1073           0 :           }
    1074           0 :           fd_memcpy( fec->data + fec->data_sz, fd_shred_data_payload( data_shred ), payload_sz );
    1075           0 :           fec->data_sz += payload_sz;
    1076           0 :           if( FD_LIKELY( i<32UL ) ) fec->block_offs[ i ] = (uint)payload_sz +  (i==0UL ? 0U : fec->block_offs[ i-1UL ]);
    1077           0 :         }
    1078           0 :       }
    1079           0 :     }
    1080             : 
    1081           0 :     if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
    1082             : 
    1083             :       /* Additionally, publish a frag to notify repair and replay that
    1084             :          the FEC set is complete.  Note the ordering wrt store shred
    1085             :          insertion above is intentional: shreds are inserted into the
    1086             :          store before notifying repair and replay.  This is because the
    1087             :          replay tile assumes the shreds are already in the store when
    1088             :          replay gets a notification from the shred tile that the FEC is
    1089             :          complete.  We we don't know whether shred will finish inserting
    1090             :          into store first or repair will finish validating the FEC set
    1091             :          first.  The header and merkle root of the last shred in the FEC
    1092             :          set are sent as part of this frag.
    1093             : 
    1094             :          This message, the shred msg, and the FEC evict msg constitute
    1095             :          the max 3 possible messages to repair/replay per after_frag.
    1096             :          In reality, it is only possible to publish all 3 in the case
    1097             :          where we receive a coding shred first for a FEC set where
    1098             :          (N=1,K=18), which allows for the FEC set to be instantly
    1099             :          completed by the singular coding shred, and that also happens
    1100             :          to evict a FEC set from the curr_map.  When fix-32 arrives, the
    1101             :          link burst value can be lowered to 2. */
    1102             : 
    1103           0 :       int is_leader_fec = ctx->in_kind[ in_idx ]==IN_KIND_POH;
    1104             : 
    1105           0 :       ulong   sig   = fd_disco_shred_out_fec_sig( last->slot, last->fec_set_idx, (uint)FD_FEC_SHRED_CNT, last->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE );
    1106           0 :       uchar * chunk = fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk );
    1107           0 :       memcpy( chunk,                                                         last,                                                FD_SHRED_DATA_HEADER_SZ );
    1108           0 :       memcpy( chunk+FD_SHRED_DATA_HEADER_SZ,                                 ctx->out_merkle_roots[fset_k].hash,                  FD_SHRED_MERKLE_ROOT_SZ );
    1109           0 :       memcpy( chunk+FD_SHRED_DATA_HEADER_SZ +  FD_SHRED_MERKLE_ROOT_SZ,      (uchar *)last + fd_shred_chain_off( last->variant ), FD_SHRED_MERKLE_ROOT_SZ );
    1110           0 :       memcpy( chunk+FD_SHRED_DATA_HEADER_SZ + (FD_SHRED_MERKLE_ROOT_SZ*2UL), &is_leader_fec,                                      sizeof(int));
    1111             : 
    1112           0 :       ulong sz    = FD_SHRED_DATA_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ * 2 + sizeof(int);
    1113           0 :       ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
    1114           0 :       fd_stem_publish( stem, ctx->shred_out_idx, sig, ctx->shred_out_chunk, sz, 0UL, ctx->tsorig, tspub );
    1115           0 :       ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sz, ctx->shred_out_chunk0, ctx->shred_out_wmark );
    1116             : 
    1117           0 :     } else if( FD_UNLIKELY( ctx->store_out_idx != ULONG_MAX ) ) { /* frankendancer-only */
    1118             : 
    1119             :       /* Send to the blockstore */
    1120             : 
    1121           0 :       ulong txn_cnt = fd_ulong_if( fset_k==ctx->send_fec_set_cnt-1UL, shredded_txn_cnt_last_fec_set, shredded_txn_cnt_per_fec_set );
    1122             :       /* If the low 32 bits of sig are 0, the store tile will do extra
    1123             :          checks */
    1124           0 :       ulong new_sig = txn_cnt<<32 | (ulong)(ctx->in_kind[ in_idx ]!=IN_KIND_NET);
    1125           0 :       ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
    1126             :       /* The size is actually slightly larger than USHORT_MAX, but the store tile
    1127             :          knows to use sizeof(fd_fec_set_t) instead of the sz field.  Put
    1128             :          USHORT_MAX so that monitoring tools are at least close. */
    1129           0 :       ulong sz = fd_ulong_min( sizeof(fd_fec_set_t), USHORT_MAX );
    1130           0 :       fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, set ), sz, 0UL, ctx->tsorig, tspub );
    1131           0 :     }
    1132             : 
    1133             :     /* Compute all the destinations for all the new shreds */
    1134             : 
    1135           0 :     fd_shred_t const * new_shreds[ FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX ];
    1136           0 :     ulong k=0UL;
    1137           0 :     for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ )
    1138           0 :       if( !(set->data_shred_rcvd   & (1U<<i)) ) new_shreds[ k++ ] = set->data_shreds  [ i ].s;
    1139           0 :     for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ )
    1140           0 :       if( !(set->parity_shred_rcvd & (1U<<i)) ) new_shreds[ k++ ] = set->parity_shreds[ i ].s;
    1141             : 
    1142           0 :     if( FD_UNLIKELY( !k ) ) return;
    1143           0 :     fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, new_shreds[ 0 ]->slot );
    1144           0 :     if( FD_UNLIKELY( !sdest ) ) return;
    1145           0 :     int use_chacha8 = ( new_shreds[ 0 ]->slot >= ctx->features_activation->switch_to_chacha8_turbine );
    1146             : 
    1147           0 :     ulong out_stride;
    1148           0 :     ulong max_dest_cnt[1];
    1149           0 :     fd_shred_dest_idx_t * dests;
    1150           0 :     if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
    1151           0 :       for( ulong i=0UL; i<k; i++ ) {
    1152           0 :         for( ulong j=0UL; j<ctx->adtl_dests_retransmit_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_retransmit+j, ctx->tsorig );
    1153           0 :       }
    1154           0 :       out_stride = k;
    1155             :       /* In the case of feature activation, the fanout used below is
    1156             :           the same as the one calculated/modified previously at the
    1157             :           beginning of after_frag() for IN_KIND_NET in this slot. */
    1158           0 :       dests = fd_shred_dest_compute_children( sdest, new_shreds, k, ctx->scratchpad_dests, k, fanout, fanout, max_dest_cnt, use_chacha8 );
    1159           0 :     } else {
    1160           0 :       for( ulong i=0UL; i<k; i++ ) {
    1161           0 :         for( ulong j=0UL; j<ctx->adtl_dests_leader_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_leader+j, ctx->tsorig );
    1162           0 :       }
    1163           0 :       out_stride = 1UL;
    1164           0 :       *max_dest_cnt = 1UL;
    1165           0 :       dests = fd_shred_dest_compute_first   ( sdest, new_shreds, k, ctx->scratchpad_dests, use_chacha8 );
    1166           0 :     }
    1167           0 :     if( FD_UNLIKELY( !dests ) ) return;
    1168             : 
    1169             :     /* Send only the ones we didn't receive. */
    1170           0 :     for( ulong i=0UL; i<k; i++ ) {
    1171           0 :       for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], fd_shred_dest_idx_to_dest( sdest, dests[ j*out_stride+i ]), ctx->tsorig );
    1172           0 :     }
    1173           0 :   }
    1174           0 : }
    1175             : 
    1176             : static void
    1177             : privileged_init( fd_topo_t *      topo,
    1178           0 :                  fd_topo_tile_t * tile ) {
    1179           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    1180           0 :   FD_TEST( scratch!=NULL );
    1181             : 
    1182           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    1183           0 :   fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
    1184             : 
    1185           0 :   if( FD_UNLIKELY( !strcmp( tile->shred.identity_key_path, "" ) ) )
    1186           0 :     FD_LOG_ERR(( "identity_key_path not set" ));
    1187             : 
    1188           0 :   ctx->identity_key[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->shred.identity_key_path, /* pubkey only: */ 1 ) );
    1189             : 
    1190           0 :   if( FD_UNLIKELY( !fd_rng_secure( &(ctx->resolver_seed), sizeof(ulong) ) ) ) {
    1191           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    1192           0 :   }
    1193             :   /* This is only needed in frankendancer, but we'll overwrite it with
    1194             :      the value the repair tile generated in full firedancer. */
    1195           0 :   if( FD_UNLIKELY( !fd_rng_secure( ctx->repair_nonce_ss->bytes, sizeof(fd_rnonce_ss_t) ) ) ) {
    1196           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    1197           0 :   }
    1198           0 : }
    1199             : 
    1200             : static void
    1201             : fd_shred_signer( void *        signer_ctx,
    1202             :                  uchar         signature[ static 64 ],
    1203           0 :                  uchar const   merkle_root[ static 32 ] ) {
    1204           0 :   fd_keyguard_client_sign( signer_ctx, signature, merkle_root, 32UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
    1205           0 : }
    1206             : 
    1207             : static void
    1208             : unprivileged_init( fd_topo_t *      topo,
    1209           0 :                    fd_topo_tile_t * tile ) {
    1210             : 
    1211           0 :   FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX   ]].name, "shred_net"   ) );
    1212           0 :   FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ SIGN_OUT_IDX  ]].name, "shred_sign"  ) );
    1213             : 
    1214           0 :   if( FD_UNLIKELY( !tile->out_cnt ) )
    1215           0 :     FD_LOG_ERR(( "shred tile has no primary output link" ));
    1216             : 
    1217           0 :   ulong shred_store_mcache_depth = tile->shred.depth;
    1218           0 :   if( topo->links[ tile->out_link_id[ 0 ] ].depth != shred_store_mcache_depth )
    1219           0 :     FD_LOG_ERR(( "shred tile out depths are not equal %lu %lu",
    1220           0 :                  topo->links[ tile->out_link_id[ 0 ] ].depth, shred_store_mcache_depth ));
    1221             : 
    1222           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    1223           0 :   FD_TEST( scratch!=NULL );
    1224             : 
    1225           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    1226           0 :   fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
    1227             : 
    1228           0 :   ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
    1229           0 :   ctx->round_robin_id  = tile->kind_id;
    1230           0 :   ctx->batch_cnt       = 0UL;
    1231           0 :   ctx->slot            = ULONG_MAX;
    1232             : 
    1233             :   /* If the default partial_depth is ever changed, correspondingly
    1234             :      change the size of the fd_fec_intra_pool in fd_fec_repair. */
    1235           0 :   ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, shred_store_mcache_depth + 1UL,
    1236           0 :                                                             128UL * tile->shred.fec_resolver_depth );
    1237             :   /* See long comment at the top of this file for the computation of
    1238             :      fec_set_cnt. */
    1239           0 :   ulong fec_set_cnt            = 2UL*shred_store_mcache_depth + tile->shred.fec_resolver_depth + FD_SHRED_BATCH_FEC_SETS_MAX + 2UL;
    1240           0 :   ulong fec_sets_required_sz   = fec_set_cnt*sizeof(fd_fec_set_t);
    1241             : 
    1242           0 :   void * fec_sets_shmem = NULL;
    1243           0 :   ctx->shred_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_out", ctx->round_robin_id );
    1244           0 :   ctx->store_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_store",  ctx->round_robin_id );
    1245           0 :   if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
    1246           0 :     fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
    1247           0 :     ctx->shred_out_mem    = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
    1248           0 :     ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
    1249           0 :     ctx->shred_out_wmark  = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
    1250           0 :     ctx->shred_out_chunk  = ctx->shred_out_chunk0;
    1251           0 :     FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
    1252           0 :     ulong fec_sets_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "fec_sets" );
    1253           0 :     if( FD_UNLIKELY( fec_sets_obj_id == ULONG_MAX ) ) FD_LOG_ERR(( "invalid firedancer topo" ));
    1254           0 :     fd_topo_obj_t const * obj = &topo->objs[ fec_sets_obj_id ];
    1255           0 :     if( FD_UNLIKELY( obj->footprint<(fec_sets_required_sz*ctx->round_robin_cnt) ) ) {
    1256           0 :       FD_LOG_ERR(( "fec_sets wksp obj too small. It is %lu bytes but must be at least %lu bytes. ",
    1257           0 :                    obj->footprint,
    1258           0 :                    fec_sets_required_sz ));
    1259           0 :     }
    1260           0 :     fec_sets_shmem = (uchar *)fd_topo_obj_laddr( topo, fec_sets_obj_id ) + (ctx->round_robin_id * fec_sets_required_sz);
    1261             : 
    1262             :     /* Initialize the rnonce.  The repair tile sets it, so we can only
    1263             :        do this in firedancer mode.  In frankendancer mode, we initialize
    1264             :        it randomly in privileged_init just so that an attacker can't
    1265             :        guess it. */
    1266           0 :     FD_LOG_DEBUG(( "Loading rnonce_ss" ));
    1267           0 :     ulong rnonce_ss_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "rnonce_ss" );
    1268           0 :     FD_TEST( rnonce_ss_id!=ULONG_MAX );
    1269           0 :     void const * shared_rnonce = fd_topo_obj_laddr( topo, rnonce_ss_id );
    1270           0 :     ulong * nonce_initialized = (ulong *)(sizeof(fd_rnonce_ss_t)+(uchar const *)shared_rnonce);
    1271           0 :     while( !FD_VOLATILE_CONST( *nonce_initialized ) ) FD_SPIN_PAUSE();
    1272           0 :     FD_COMPILER_MFENCE();
    1273           0 :     memcpy( ctx->repair_nonce_ss, shared_rnonce, sizeof(fd_rnonce_ss_t) );
    1274           0 :     FD_LOG_DEBUG(( "Loaded rnonce_ss" ));
    1275             : 
    1276           0 :   } else if ( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
    1277           0 :     FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ ctx->store_out_idx ]].name, "shred_store" ) );
    1278           0 :     fec_sets_shmem = topo->links[ tile->out_link_id[ ctx->store_out_idx ] ].dcache;
    1279           0 :     if( FD_UNLIKELY( fd_dcache_data_sz( fec_sets_shmem )<fec_sets_required_sz ) ) {
    1280           0 :       FD_LOG_ERR(( "shred_store dcache too small. It is %lu bytes but must be at least %lu bytes. ",
    1281           0 :                   fd_dcache_data_sz( fec_sets_shmem ),
    1282           0 :                   fec_sets_required_sz ));
    1283           0 :     }
    1284           0 :   }
    1285             : 
    1286           0 :   if( FD_UNLIKELY( !tile->shred.fec_resolver_depth ) ) FD_LOG_ERR(( "fec_resolver_depth not set" ));
    1287           0 :   if( FD_UNLIKELY( !tile->shred.shred_listen_port  ) ) FD_LOG_ERR(( "shred_listen_port not set" ));
    1288             : 
    1289           0 :   void * _stake_ci = FD_SCRATCH_ALLOC_APPEND( l, fd_stake_ci_align(),              fd_stake_ci_footprint()            );
    1290           0 :   void * _resolver = FD_SCRATCH_ALLOC_APPEND( l, fd_fec_resolver_align(),          fec_resolver_footprint             );
    1291           0 :   void * _shredder = FD_SCRATCH_ALLOC_APPEND( l, fd_shredder_align(),              fd_shredder_footprint()            );
    1292             : 
    1293           0 :   fd_fec_set_t * fec_sets  = (fd_fec_set_t *)fec_sets_shmem;
    1294             : 
    1295           0 : #define NONNULL( x ) (__extension__({                                        \
    1296           0 :       __typeof__((x)) __x = (x);                                             \
    1297           0 :       if( FD_UNLIKELY( !__x ) ) FD_LOG_ERR(( #x " was unexpectedly NULL" )); \
    1298           0 :       __x; }))
    1299             : 
    1300           0 :   int has_ipecho_in = fd_topo_find_tile_in_link( topo, tile, "ipecho_out", 0UL )!=ULONG_MAX;
    1301           0 :   ushort expected_shred_version = tile->shred.expected_shred_version;
    1302           0 :   if( FD_UNLIKELY( !has_ipecho_in && !expected_shred_version ) ) {
    1303           0 :     ulong busy_obj_id = fd_pod_query_ulong( topo->props, "pohh_shred", ULONG_MAX );
    1304           0 :     FD_TEST( busy_obj_id!=ULONG_MAX );
    1305           0 :     ulong * gossip_shred_version = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
    1306           0 :     FD_LOG_INFO(( "Waiting for shred version to be determined via gossip." ));
    1307           0 :     ulong _expected_shred_version = ULONG_MAX;
    1308           0 :     do {
    1309           0 :       _expected_shred_version = FD_VOLATILE_CONST( *gossip_shred_version );
    1310           0 :     } while( _expected_shred_version==ULONG_MAX );
    1311             : 
    1312           0 :     if( FD_UNLIKELY( _expected_shred_version>USHORT_MAX ) ) FD_LOG_ERR(( "invalid shred version %lu", _expected_shred_version ));
    1313           0 :     FD_LOG_INFO(( "Using shred version %hu", (ushort)_expected_shred_version ));
    1314           0 :     expected_shred_version = (ushort)_expected_shred_version;
    1315           0 :   }
    1316             : 
    1317           0 :   ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
    1318           0 :   FD_TEST( ctx->keyswitch );
    1319             : 
    1320             :   /* populate ctx */
    1321           0 :   ulong sign_in_idx = fd_topo_find_tile_in_link( topo, tile, "sign_shred", tile->kind_id );
    1322           0 :   FD_TEST( sign_in_idx!=ULONG_MAX );
    1323           0 :   fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
    1324           0 :   fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ SIGN_OUT_IDX ] ];
    1325           0 :   NONNULL( fd_keyguard_client_join( fd_keyguard_client_new( ctx->keyguard_client,
    1326           0 :                                                             sign_out->mcache,
    1327           0 :                                                             sign_out->dcache,
    1328           0 :                                                             sign_in->mcache,
    1329           0 :                                                             sign_in->dcache,
    1330           0 :                                                             sign_out->mtu ) ) );
    1331             : 
    1332           0 :   ulong shred_limit = fd_ulong_if( tile->shred.larger_shred_limits_per_block, 32UL*32UL*1024UL, 32UL*1024UL );
    1333           0 :   fd_fec_set_t * resolver_sets = fec_sets + shred_store_mcache_depth + FD_SHRED_BATCH_FEC_SETS_MAX;
    1334           0 :   ctx->shredder = NONNULL( fd_shredder_join     ( fd_shredder_new     ( _shredder, fd_shred_signer, ctx->keyguard_client ) ) );
    1335           0 :   ctx->resolver = NONNULL( fd_fec_resolver_join ( fd_fec_resolver_new ( _resolver,
    1336           0 :                                                                         fd_shred_signer, ctx->keyguard_client,
    1337           0 :                                                                         tile->shred.fec_resolver_depth, 1UL,
    1338           0 :                                                                         shred_store_mcache_depth+1UL,
    1339           0 :                                                                         128UL * tile->shred.fec_resolver_depth, resolver_sets,
    1340           0 :                                                                         shred_limit,
    1341           0 :                                                                         ctx->resolver_seed ) ) );
    1342             : 
    1343           0 :   if( FD_LIKELY( !!expected_shred_version ) ) {
    1344           0 :     fd_shredder_set_shred_version    ( ctx->shredder, expected_shred_version );
    1345           0 :     fd_fec_resolver_set_shred_version( ctx->resolver, expected_shred_version );
    1346           0 :   }
    1347             : 
    1348           0 :   ctx->fec_sets = fec_sets;
    1349             : 
    1350           0 :   ctx->stake_ci = fd_stake_ci_join( fd_stake_ci_new( _stake_ci, ctx->identity_key ) );
    1351             : 
    1352           0 :   ctx->net_id   = (ushort)0;
    1353             : 
    1354           0 :   fd_ip4_udp_hdr_init( ctx->data_shred_net_hdr,   FD_SHRED_MIN_SZ, 0, tile->shred.shred_listen_port );
    1355           0 :   fd_ip4_udp_hdr_init( ctx->parity_shred_net_hdr, FD_SHRED_MAX_SZ, 0, tile->shred.shred_listen_port );
    1356             : 
    1357           0 :   ctx->adtl_dests_retransmit_cnt = tile->shred.adtl_dests_retransmit_cnt;
    1358           0 :   for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++) {
    1359           0 :     ctx->adtl_dests_retransmit[ i ].ip4 = tile->shred.adtl_dests_retransmit[ i ].ip;
    1360           0 :     ctx->adtl_dests_retransmit[ i ].port = tile->shred.adtl_dests_retransmit[ i ].port;
    1361           0 :   }
    1362           0 :   ctx->adtl_dests_leader_cnt = tile->shred.adtl_dests_leader_cnt;
    1363           0 :   for( ulong i=0UL; i<ctx->adtl_dests_leader_cnt; i++) {
    1364           0 :     ctx->adtl_dests_leader[i].ip4  = tile->shred.adtl_dests_leader[i].ip;
    1365           0 :     ctx->adtl_dests_leader[i].port = tile->shred.adtl_dests_leader[i].port;
    1366           0 :   }
    1367             : 
    1368           0 :   uchar has_contact_info_in = 0;
    1369           0 :   for( ulong i=0UL; i<tile->in_cnt; i++ ) {
    1370           0 :     fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
    1371           0 :     fd_topo_wksp_t const * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
    1372             : 
    1373           0 :     if( FD_LIKELY(      !strcmp( link->name, "net_shred"    ) ) ) {
    1374           0 :       ctx->in_kind[ i ] = IN_KIND_NET;
    1375           0 :       fd_net_rx_bounds_init( &ctx->in[ i ].net_rx, link->dcache );
    1376           0 :       continue; /* only net_rx needs to be set in this case. */
    1377           0 :     }
    1378           0 :     else if( FD_LIKELY( !strcmp( link->name, "poh_shred"    ) ) )   ctx->in_kind[ i ] = IN_KIND_POH;   /* Firedancer */
    1379           0 :     else if( FD_LIKELY( !strcmp( link->name, "pohh_shred"   ) ) )   ctx->in_kind[ i ] = IN_KIND_POH;   /* Frankendancer */
    1380           0 :     else if( FD_LIKELY( !strcmp( link->name, "stake_out"    ) ) )   ctx->in_kind[ i ] = IN_KIND_STAKE; /* Frankendancer */
    1381           0 :     else if( FD_LIKELY( !strcmp( link->name, "replay_epoch" ) ) )   ctx->in_kind[ i ] = IN_KIND_EPOCH; /* Firedancer */
    1382           0 :     else if( FD_LIKELY( !strcmp( link->name, "sign_shred"   ) ) )   ctx->in_kind[ i ] = IN_KIND_SIGN;
    1383           0 :     else if( FD_LIKELY( !strcmp( link->name, "ipecho_out"   ) ) )   ctx->in_kind[ i ] = IN_KIND_IPECHO;
    1384           0 :     else if( FD_LIKELY( !strcmp( link->name, "tower_out"    ) ) )   ctx->in_kind[ i ] = IN_KIND_ROOTED;
    1385           0 :     else if( FD_LIKELY( !strcmp( link->name, "replay_resol" ) ) )   ctx->in_kind[ i ] = IN_KIND_ROOTEDH;
    1386           0 :     else if( FD_LIKELY( !strcmp( link->name, "crds_shred"   ) ) ) { ctx->in_kind[ i ] = IN_KIND_CONTACT;
    1387           0 :       if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
    1388           0 :       has_contact_info_in = 1;
    1389           0 :     }
    1390           0 :     else if( FD_LIKELY( !strcmp( link->name, "gossip_out"   ) ) ) { ctx->in_kind[ i ] = IN_KIND_GOSSIP;
    1391           0 :       if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
    1392           0 :       has_contact_info_in = 1;
    1393           0 :     }
    1394             : 
    1395           0 :     else FD_LOG_ERR(( "shred tile has unexpected input link %lu %s", i, link->name ));
    1396             : 
    1397           0 :     if( FD_LIKELY( !!link->mtu ) ) {
    1398           0 :       ctx->in[ i ].mem    = link_wksp->wksp;
    1399           0 :       ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
    1400           0 :       ctx->in[ i ].wmark  = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
    1401           0 :     }
    1402           0 :   }
    1403             : 
    1404           0 :   fd_topo_link_t * net_out = &topo->links[ tile->out_link_id[ NET_OUT_IDX ] ];
    1405             : 
    1406           0 :   ctx->net_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( net_out->dcache ), net_out->dcache );
    1407           0 :   ctx->net_out_mem    = topo->workspaces[ topo->objs[ net_out->dcache_obj_id ].wksp_id ].wksp;
    1408           0 :   ctx->net_out_wmark  = fd_dcache_compact_wmark ( ctx->net_out_mem, net_out->dcache, net_out->mtu );
    1409           0 :   ctx->net_out_chunk  = ctx->net_out_chunk0;
    1410             : 
    1411           0 :   ctx->store = NULL;
    1412           0 :   ulong store_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "store" );
    1413           0 :   if( FD_LIKELY( store_obj_id!=ULONG_MAX ) ) { /* firedancer-only */
    1414           0 :     ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
    1415           0 :     FD_TEST( ctx->store->magic==FD_STORE_MAGIC );
    1416           0 :     FD_TEST( ctx->store->part_cnt==ctx->round_robin_cnt ); /* single-writer (shred tile) per store part */
    1417           0 :     FD_TEST( !fd_store_verify( ctx->store ) );
    1418           0 :   }
    1419             : 
    1420           0 :   if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
    1421           0 :     fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
    1422           0 :     ctx->shred_out_mem         = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
    1423           0 :     ctx->shred_out_chunk0      = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
    1424           0 :     ctx->shred_out_wmark       = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
    1425           0 :     ctx->shred_out_chunk       = ctx->shred_out_chunk0;
    1426           0 :     FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
    1427           0 :   }
    1428             : 
    1429           0 :   if( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
    1430           0 :     fd_topo_link_t * store_out = &topo->links[ tile->out_link_id[ ctx->store_out_idx ] ];
    1431           0 :     ctx->store_out_mem         = topo->workspaces[ topo->objs[ store_out->dcache_obj_id ].wksp_id ].wksp;
    1432           0 :     ctx->store_out_chunk0      = fd_dcache_compact_chunk0( ctx->store_out_mem, store_out->dcache );
    1433           0 :     ctx->store_out_wmark       = fd_dcache_compact_wmark ( ctx->store_out_mem, store_out->dcache, store_out->mtu );
    1434           0 :     ctx->store_out_chunk       = ctx->store_out_chunk0;
    1435           0 :     FD_TEST( fd_dcache_compact_is_safe( ctx->store_out_mem, store_out->dcache, store_out->mtu, store_out->depth ) );
    1436           0 :   }
    1437             : 
    1438           0 :   ctx->poh_in_expect_seq = 0UL;
    1439             : 
    1440           0 :   ctx->shredder_fec_set_idx = 0UL;
    1441           0 :   ctx->shredder_max_fec_set_idx = shred_store_mcache_depth + FD_SHRED_BATCH_FEC_SETS_MAX;
    1442             : 
    1443           0 :   ctx->chained_merkle_root = NULL;
    1444           0 :   memset( ctx->out_merkle_roots, 0, sizeof(ctx->out_merkle_roots) );
    1445             : 
    1446           0 :   for( ulong i=0UL; i<FD_SHRED_BATCH_FEC_SETS_MAX; i++ ) { ctx->send_fec_set_idx[ i ] = ULONG_MAX; }
    1447           0 :   ctx->send_fec_set_cnt = 0UL;
    1448             : 
    1449           0 :   ctx->shred_buffer_sz  = 0UL;
    1450           0 :   memset( ctx->shred_buffer, 0xFF, FD_NET_MTU );
    1451             : 
    1452           0 :   fd_histf_join( fd_histf_new( ctx->metrics->contact_info_cnt,     FD_MHIST_MIN(         SHRED, CLUSTER_CONTACT_INFO_CNT   ),
    1453           0 :                                                                    FD_MHIST_MAX(         SHRED, CLUSTER_CONTACT_INFO_CNT   ) ) );
    1454           0 :   fd_histf_join( fd_histf_new( ctx->metrics->batch_sz,             FD_MHIST_MIN(         SHRED, BATCH_SZ                   ),
    1455           0 :                                                                    FD_MHIST_MAX(         SHRED, BATCH_SZ                   ) ) );
    1456           0 :   fd_histf_join( fd_histf_new( ctx->metrics->batch_microblock_cnt, FD_MHIST_MIN(         SHRED, BATCH_MICROBLOCK_CNT       ),
    1457           0 :                                                                    FD_MHIST_MAX(         SHRED, BATCH_MICROBLOCK_CNT       ) ) );
    1458           0 :   fd_histf_join( fd_histf_new( ctx->metrics->shredding_timing,     FD_MHIST_SECONDS_MIN( SHRED, SHREDDING_DURATION_SECONDS ),
    1459           0 :                                                                    FD_MHIST_SECONDS_MAX( SHRED, SHREDDING_DURATION_SECONDS ) ) );
    1460           0 :   fd_histf_join( fd_histf_new( ctx->metrics->add_shred_timing,     FD_MHIST_SECONDS_MIN( SHRED, ADD_SHRED_DURATION_SECONDS ),
    1461           0 :                                                                    FD_MHIST_SECONDS_MAX( SHRED, ADD_SHRED_DURATION_SECONDS ) ) );
    1462           0 :   memset( ctx->metrics->shred_processing_result, '\0', sizeof(ctx->metrics->shred_processing_result) );
    1463           0 :   ctx->metrics->invalid_block_id_cnt         = 0UL;
    1464           0 :   ctx->metrics->shred_rejected_unchained_cnt = 0UL;
    1465           0 :   ctx->metrics->repair_rcv_cnt               = 0UL;
    1466           0 :   ctx->metrics->repair_rcv_bytes             = 0UL;
    1467           0 :   ctx->metrics->turbine_rcv_cnt              = 0UL;
    1468           0 :   ctx->metrics->turbine_rcv_bytes            = 0UL;
    1469           0 :   ctx->metrics->bad_nonce                    = 0UL;
    1470             : 
    1471           0 :   ctx->pending_batch.microblock_cnt = 0UL;
    1472           0 :   ctx->pending_batch.txn_cnt        = 0UL;
    1473           0 :   ctx->pending_batch.pos            = 0UL;
    1474           0 :   ctx->pending_batch.slot           = 0UL;
    1475           0 :   memset( ctx->pending_batch.payload, 0, sizeof(ctx->pending_batch.payload) );
    1476             : 
    1477           0 :   memset( ctx->epoch_schedule, 0, sizeof(ctx->epoch_schedule) );
    1478           0 :   for( ulong i=0UL; i<FD_SHRED_FEATURES_ACTIVATION_SLOT_CNT; i++ ) {
    1479           0 :     ctx->features_activation->slots[i] = FD_SHRED_FEATURES_ACTIVATION_SLOT_DISABLED;
    1480           0 :   }
    1481             : 
    1482           0 :   ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
    1483           0 :   if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
    1484           0 :     FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
    1485             : 
    1486           0 :   memset( ctx->block_ids, 0, sizeof(ctx->block_ids) );
    1487           0 : }
    1488             : 
    1489             : static ulong
    1490             : populate_allowed_seccomp( fd_topo_t const *      topo,
    1491             :                           fd_topo_tile_t const * tile,
    1492             :                           ulong                  out_cnt,
    1493           0 :                           struct sock_filter *   out ) {
    1494           0 :   (void)topo;
    1495           0 :   (void)tile;
    1496             : 
    1497           0 :   populate_sock_filter_policy_fd_shred_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
    1498           0 :   return sock_filter_policy_fd_shred_tile_instr_cnt;
    1499           0 : }
    1500             : 
    1501             : static ulong
    1502             : populate_allowed_fds( fd_topo_t const *      topo,
    1503             :                       fd_topo_tile_t const * tile,
    1504             :                       ulong                  out_fds_cnt,
    1505           0 :                       int *                  out_fds ) {
    1506           0 :   (void)topo;
    1507           0 :   (void)tile;
    1508             : 
    1509           0 :   if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
    1510             : 
    1511           0 :   ulong out_cnt = 0UL;
    1512           0 :   out_fds[ out_cnt++ ] = 2; /* stderr */
    1513           0 :   if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
    1514           0 :     out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
    1515           0 :   return out_cnt;
    1516           0 : }
    1517             : 
    1518             : /* Excluding net_out (where the link is unreliable), STEM_BURST needs
    1519             :    to guarantee enough credits for the worst case. There are 4 cases
    1520             :    to consider: (IN_KIND_NET/IN_KIND_POH) x (Frankendancer/Firedancer)
    1521             :    In the IN_KIND_NET case:  (Frankendancer) sends 1 frag to
    1522             :    store;  (Firedancer) that is one frag for the shred to repair, and
    1523             :    then another frag to repair for the FEC set.
    1524             :    In the IN_KIND_POH case:  (Frankendancer) there might be
    1525             :    FD_SHRED_BATCH_FEC_SETS_MAX FEC sets;  (Firedancer) that is
    1526             :    FD_SHRED_BATCH_FEC_SETS_MAX frags to repair (one per FEC set).
    1527             :    Therefore, the worst case is IN_KIND_POH for Frankendancer. */
    1528           0 : #define STEM_BURST (FD_SHRED_BATCH_FEC_SETS_MAX)
    1529             : 
    1530             : /* See explanation in fd_pack */
    1531           0 : #define STEM_LAZY  (128L*3000L)
    1532             : 
    1533           0 : #define STEM_CALLBACK_CONTEXT_TYPE  fd_shred_ctx_t
    1534           0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_shred_ctx_t)
    1535             : 
    1536           0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
    1537           0 : #define STEM_CALLBACK_METRICS_WRITE       metrics_write
    1538           0 : #define STEM_CALLBACK_BEFORE_FRAG         before_frag
    1539           0 : #define STEM_CALLBACK_DURING_FRAG         during_frag
    1540           0 : #define STEM_CALLBACK_AFTER_FRAG          after_frag
    1541             : 
    1542             : #include "../stem/fd_stem.c"
    1543             : 
    1544             : fd_topo_run_tile_t fd_tile_shred = {
    1545             :   .name                     = "shred",
    1546             :   .populate_allowed_seccomp = populate_allowed_seccomp,
    1547             :   .populate_allowed_fds     = populate_allowed_fds,
    1548             :   .scratch_align            = scratch_align,
    1549             :   .scratch_footprint        = scratch_footprint,
    1550             :   .privileged_init          = privileged_init,
    1551             :   .unprivileged_init        = unprivileged_init,
    1552             :   .run                      = stem_run,
    1553             : };

Generated by: LCOV version 1.14