LCOV - code coverage report
Current view: top level - discof/repair - fd_policy.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 47 0.0 %
Date: 2026-03-19 18:19:27 Functions: 0 35 0.0 %

          Line data    Source code
       1             : #ifndef HEADER_fd_src_discof_repair_fd_policy_h
       2             : #define HEADER_fd_src_discof_repair_fd_policy_h
       3             : 
       4             : /* fd_policy implements the policy of the Repair agent.  It determines
       5             :    what next repair request the validator should make. It also
       6             :    determines which peer(s) the validator should make the request to.
       7             : 
       8             :    The default policy implementation is to prioritize discovering
       9             :    ancestry for orphaned slots first (making an orphan request), and
      10             :    then making forward progress on the main ancestry tree (making a
      11             :    regular request) when there are no orphan requests to make.
      12             : 
      13             :    Regular shred requests are made round-robin BFS with time-based
      14             :    dedup: round-robin through all the repair peers we know about, and
      15             :    BFS down the repair forest (see fd_forest.h).
      16             : 
      17             :    This policy dedups identical repair requests that occur within a
      18             :    specified amount of time window of each other. */
      19             : 
      20             : #include "../../flamenco/types/fd_types_custom.h"
      21             : #include "../forest/fd_forest.h"
      22             : #include "../../util/net/fd_net_headers.h"
      23             : #include "fd_repair.h"
      24             : #include "../../disco/shred/fd_rnonce_ss.h"
      25             : 
      26             : /* fd_policy_dedup implements a dedup cache for already sent Repair
      27             :    requests.  It is backed by a map and linked list, in which the least
      28             :    recently used (oldest Repair request) in the map is evicted when the
      29             :    map is full. */
      30             : 
      31             : typedef struct fd_policy_dedup fd_policy_dedup_t; /* forward decl */
      32             : 
      33             : /* fd_policy_dedup_ele describes an element in the dedup cache.  The key
      34             :    compactly encodes an fd_repair_req_t.
      35             : 
      36             :    | kind (2 bits)       | slot (32 bits)  | shred_idx (15 bits) |
      37             :    | 0x0 (SHRED)         | slot            | shred_idx           |
      38             :    | 0x1 (HIGHEST_SHRED) | slot            | >=shred_idx         |
      39             :    | 0x2 (ORPHAN)        | orphan slot     | N/A                 |
      40             : 
      41             :    Note the common header (sig, from, to, ts, nonce) is not included. */
      42             : 
      43             : struct fd_policy_dedup_ele {
      44             :   ulong key;      /* compact encoding of fd_repair_req_t detailed above */
      45             :   ulong prev;     /* reserved by lru */
      46             :   ulong next;
      47             :   ulong hash;     /* reserved by pool and map_chain */
      48             :   long  req_ts;   /* timestamp when the request was sent */
      49             : };
      50             : typedef struct fd_policy_dedup_ele fd_policy_dedup_ele_t;
      51             : 
      52             : FD_FN_CONST static inline ulong
      53           0 : fd_policy_dedup_key( uint kind, ulong slot, uint shred_idx ) {
      54           0 :   return (ulong)kind << 62 | slot << 30 | shred_idx << 15;
      55           0 : }
      56             : 
      57           0 : FD_FN_CONST static inline uint  fd_policy_dedup_key_kind     ( ulong key ) { return (uint)fd_ulong_extract( key, 62, 63 ); }
      58           0 : FD_FN_CONST static inline ulong fd_policy_dedup_key_slot     ( ulong key ) { return       fd_ulong_extract( key, 30, 61 ); }
      59           0 : FD_FN_CONST static inline uint  fd_policy_dedup_key_shred_idx( ulong key ) { return (uint)fd_ulong_extract( key, 15, 29  ); }
      60             : 
      61             : #define POOL_NAME fd_policy_dedup_pool
      62           0 : #define POOL_T    fd_policy_dedup_ele_t
      63           0 : #define POOL_NEXT hash
      64             : #include "../../util/tmpl/fd_pool.c"
      65             : 
      66             : #define MAP_NAME  fd_policy_dedup_map
      67             : #define MAP_ELE_T fd_policy_dedup_ele_t
      68           0 : #define MAP_NEXT  hash
      69             : #include "../../util/tmpl/fd_map_chain.c"
      70             : 
      71             : #define DLIST_NAME   fd_policy_dedup_lru
      72             : #define DLIST_ELE_T  fd_policy_dedup_ele_t
      73           0 : #define DLIST_NEXT   next
      74           0 : #define DLIST_PREV   prev
      75             : #include "../../util/tmpl/fd_dlist.c"
      76             : struct fd_policy_dedup {
      77             :   fd_policy_dedup_map_t * map;  /* map of dedup elements */
      78             :   fd_policy_dedup_ele_t * pool; /* memory pool of dedup elements */
      79             :   fd_policy_dedup_lru_t * lru;  /* singly-linked list of dedup elements by insertion order */
      80             : };
      81             : 
      82             : /* fd_policy_peer_t describes a peer validator that serves repairs.
      83             :    Peers are discovered through gossip, via a "ContactInfo" message that
      84             :    shares the validator's ip and repair server port. */
      85             : 
      86             : struct fd_policy_peer {
      87             :   fd_pubkey_t key;     /* map key, pubkey of the validator */
      88             :   ulong       next;    /* reserved for map_chain, pool */
      89             :   uint        ip4;     /* ip4 addr of the peer */
      90             :   ushort      port;    /* repair server port of the peer */
      91             :   ulong       req_cnt; /* count of requests we've sent to this peer */
      92             :   ulong       res_cnt; /* count of responses we've received from this peer */
      93             : 
      94             :   struct {
      95             :     ulong next;
      96             :     ulong prev;
      97             :   } dlist;
      98             : 
      99             :   /* below are for measuring bandwidth usage */
     100             :   long  first_req_ts;
     101             :   long  last_req_ts;
     102             : 
     103             :   long  first_resp_ts;
     104             :   long  last_resp_ts;
     105             : 
     106             :   long  total_lat; /* total RTT over all responses in ns */
     107             :   ulong stake;
     108             : 
     109             :   uint ping;  /* whether this peer currently has a ping in our sign queue */
     110             : };
     111             : typedef struct fd_policy_peer fd_policy_peer_t;
     112             : 
     113             : #define MAP_NAME                 fd_policy_peer_map
     114             : #define MAP_ELE_T                fd_policy_peer_t
     115             : #define MAP_KEY_T                fd_pubkey_t
     116           0 : #define MAP_KEY_EQ(k0,k1)        (!memcmp( (k0)->uc, (k1)->uc, 32UL ))
     117           0 : #define MAP_KEY_HASH(key,seed)   (seed^fd_ulong_load_8( (key)->uc ))
     118             : #include "../../util/tmpl/fd_map_chain.c"
     119             : 
     120             : #define POOL_NAME fd_policy_peer_pool
     121           0 : #define POOL_T    fd_policy_peer_t
     122             : #include "../../util/tmpl/fd_pool.c"
     123             : 
     124             : #define DLIST_NAME  fd_policy_peer_dlist
     125             : #define DLIST_ELE_T fd_policy_peer_t
     126           0 : #define DLIST_NEXT  dlist.next
     127           0 : #define DLIST_PREV  dlist.prev
     128             : #include "../../util/tmpl/fd_dlist.c"
     129             : 
     130             : /* fd_policy_peers implements the data structures and bookkeeping for
     131             :    selecting repair peers via round-robin. */
     132             : 
     133             : struct fd_policy_peers {
     134             :   fd_policy_peer_t * pool;        /* memory pool of peers */
     135             :   fd_policy_peer_dlist_t * fast;  /* [0, FD_POLICY_LATENCY_THRESH]   ms latency group FD_POLICY_LATENCY_FAST */
     136             :   fd_policy_peer_dlist_t * slow;  /* (FD_POLICY_LATENCY_THRESH, inf) ms latency group FD_POLICY_LATENCY_SLOW */
     137             :   fd_policy_peer_map_t * map;     /* map keyed by pubkey to peer data */
     138             :   struct {
     139             :      uint stage;                         /* < sizeof(bucket_stages)        */
     140             :      fd_policy_peer_dlist_iter_t iter;   /* round-robin index of next peer */
     141             :   } select;
     142             : };
     143             : typedef struct fd_policy_peers fd_policy_peers_t;
     144             : 
     145           0 : #define FD_POLICY_LATENCY_FAST 1
     146             : #define FD_POLICY_LATENCY_SLOW 3
     147             : 
     148             : /* Policy parameters start */
     149           0 : #define FD_POLICY_LATENCY_THRESH 80e6L /* less than this is a BEST peer, otherwise a WORST peer */
     150             : #define FD_POLICY_DEDUP_TIMEOUT  80e6L /* how long wait to request the same shred */
     151             : 
     152             : /* Round robins through ALL the worst peers once, then round robins
     153             :    through ALL the best peers once, then round robins through ALL the
     154             :    best peers again, etc. All peers are initially added to the worst
     155             :    bucket, and moved once round trip times have been recorded. */
     156             : 
     157             : static const uint bucket_stages[7] = {
     158             :    FD_POLICY_LATENCY_SLOW, /* do a cycle through worst peers 1/7 times to see if any improvements are made */
     159             :    FD_POLICY_LATENCY_FAST,
     160             :    FD_POLICY_LATENCY_FAST,
     161             :    FD_POLICY_LATENCY_FAST,
     162             :    FD_POLICY_LATENCY_FAST,
     163             :    FD_POLICY_LATENCY_FAST,
     164             :    FD_POLICY_LATENCY_FAST,
     165             : };
     166             : /* Policy parameters end */
     167             : 
     168             : struct fd_policy {
     169             :   fd_policy_dedup_t dedup; /* dedup cache of already sent requests */
     170             :   fd_policy_peers_t peers; /* repair peers (strategy & data) */
     171             :   long              tsmax; /* maximum time for an iteration before resetting the DFS to root */
     172             :   long              tsref; /* reference timestamp for resetting DFS */
     173             : 
     174             :   fd_rnonce_ss_t    rnonce_ss[1];
     175             : 
     176             :   ulong turbine_slot0;
     177             : };
     178             : typedef struct fd_policy fd_policy_t;
     179             : 
     180             : /* Constructors */
     181             : 
     182             : /* fd_policy_{align,footprint} return the required alignment and
     183             :    footprint of a memory region suitable for use as policy with up to
     184             :    ele_max eles and vote_max votes. */
     185             : 
     186             : FD_FN_CONST static inline ulong
     187           0 : fd_policy_align( void ) {
     188           0 :   return 128UL;
     189           0 : }
     190             : 
     191             : FD_FN_CONST static inline ulong
     192           0 : fd_policy_footprint( ulong dedup_max, ulong peer_max ) {
     193           0 :   ulong peer_chain_cnt = fd_policy_peer_map_chain_cnt_est( peer_max );
     194           0 :   return FD_LAYOUT_FINI(
     195           0 :     FD_LAYOUT_APPEND(
     196           0 :     FD_LAYOUT_APPEND(
     197           0 :     FD_LAYOUT_APPEND(
     198           0 :     FD_LAYOUT_APPEND(
     199           0 :     FD_LAYOUT_APPEND(
     200           0 :     FD_LAYOUT_APPEND(
     201           0 :     FD_LAYOUT_APPEND(
     202           0 :     FD_LAYOUT_APPEND(
     203           0 :     FD_LAYOUT_INIT,
     204           0 :       fd_policy_align(),            sizeof(fd_policy_t)                             ),
     205           0 :       fd_policy_dedup_map_align(),  fd_policy_dedup_map_footprint ( dedup_max )     ),
     206           0 :       fd_policy_dedup_pool_align(), fd_policy_dedup_pool_footprint( dedup_max )     ),
     207           0 :       fd_policy_dedup_lru_align(),  fd_policy_dedup_lru_footprint()                 ),
     208           0 :       fd_policy_peer_map_align(),   fd_policy_peer_map_footprint ( peer_chain_cnt ) ),
     209           0 :       fd_policy_peer_pool_align(),  fd_policy_peer_pool_footprint( peer_max )       ),
     210           0 :       fd_policy_peer_dlist_align(), fd_policy_peer_dlist_footprint()                ),
     211           0 :       fd_policy_peer_dlist_align(), fd_policy_peer_dlist_footprint()                ),
     212           0 :     fd_policy_align() );
     213           0 : }
     214             : 
     215             : /* fd_policy_new formats an unused memory region for use as a policy.
     216             :    mem is a non-NULL pointer to this region in the local address space
     217             :    with the required footprint and alignment.  rnonce_ss is copied
     218             :    locally, so the read interest is not retained after this function
     219             :    returns. */
     220             : 
     221             : void *
     222             : fd_policy_new( void * shmem, ulong dedup_max, ulong peer_max, ulong seed, fd_rnonce_ss_t const * rnonce_ss );
     223             : 
     224             : /* fd_policy_join joins the caller to the policy.  policy points to the
     225             :    first byte of the memory region backing the policy in the caller's
     226             :    address space.  Returns a pointer in the local address space to
     227             :    policy on success. */
     228             : 
     229             : fd_policy_t *
     230             : fd_policy_join( void * policy );
     231             : 
     232             : /* fd_policy_leave leaves a current local join.  Returns a pointer to
     233             :    the underlying shared memory region on success and NULL on failure
     234             :    (logs details).  Reasons for failure include policy is NULL. */
     235             : 
     236             : void *
     237             : fd_policy_leave( fd_policy_t const * policy );
     238             : 
     239             : /* fd_policy_delete unformats a memory region used as a policy.  Assumes
     240             :    only the nobody is joined to the region.  Returns a pointer to the
     241             :    underlying shared memory region or NULL if used obviously in error
     242             :    (e.g. policy is obviously not a policy ... logs details).  The
     243             :    ownership of the memory region is transferred to the caller. */
     244             : 
     245             : void *
     246             : fd_policy_delete( void * policy );
     247             : 
     248             : /* fd_policy_next returns the next repair request that should be made.
     249             :    Currently implements the default round-robin DFS strategy. */
     250             : 
     251             : fd_repair_msg_t const *
     252             : fd_policy_next( fd_policy_t * policy, fd_forest_t * forest, fd_repair_t * repair, long now, ulong highest_known_slot, int * charge_busy );
     253             : 
     254             : /* fd_policy_peer_upsert upserts a peer into the policy.  If the peer
     255             :    does not exist, it is created.  If the peer already exists, it is
     256             :    updated.  Returns a pointer to the peer if a new peer was created,
     257             :    otherwise NULL (including on updates). */
     258             : fd_policy_peer_t const *
     259             : fd_policy_peer_upsert( fd_policy_t * policy, fd_pubkey_t const * key, fd_ip4_port_t const * addr );
     260             : 
     261             : fd_policy_peer_t *
     262             : fd_policy_peer_query( fd_policy_t * policy, fd_pubkey_t const * key );
     263             : 
     264             : int
     265             : fd_policy_peer_remove( fd_policy_t * policy, fd_pubkey_t const * key );
     266             : 
     267             : fd_pubkey_t const *
     268             : fd_policy_peer_select( fd_policy_t * policy );
     269             : 
     270             : void
     271             : fd_policy_peer_request_update( fd_policy_t * policy, fd_pubkey_t const * to );
     272             : 
     273             : static inline fd_policy_peer_dlist_t *
     274           0 : fd_policy_peer_latency_bucket( fd_policy_t * policy, long total_rtt /* ns */, ulong res_cnt ) {
     275           0 :    if( res_cnt == 0 || (long)(total_rtt / (long)res_cnt) > FD_POLICY_LATENCY_THRESH ) return policy->peers.slow;
     276           0 :    return policy->peers.fast;
     277           0 : }
     278             : 
     279             : void
     280             : fd_policy_peer_response_update( fd_policy_t * policy, fd_pubkey_t const * to, long rtt );
     281             : 
     282             : void
     283             : fd_policy_set_turbine_slot0( fd_policy_t * policy, ulong slot );
     284             : 
     285             : #endif /* HEADER_fd_src_choreo_policy_fd_policy_h */

Generated by: LCOV version 1.14