LCOV - code coverage report
Current view: top level - tango/dcache - fd_dcache.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 40 163 24.5 %
Date: 2026-03-19 18:19:27 Functions: 5 12 41.7 %

          Line data    Source code
       1             : #include "fd_dcache_private.h"
       2             : 
       3             : ulong
       4             : fd_dcache_req_data_sz( ulong mtu,
       5             :                        ulong depth,
       6             :                        ulong burst,
       7           0 :                        int   compact ) {
       8             : 
       9           0 :   if( FD_UNLIKELY( !mtu   ) ) return 0UL; /* zero mtu (technically unnecessary) */
      10           0 :   if( FD_UNLIKELY( !depth ) ) return 0UL; /* zero depth */
      11           0 :   if( FD_UNLIKELY( !burst ) ) return 0UL; /* zero burst */
      12             : 
      13           0 :   ulong slot_footprint = FD_DCACHE_SLOT_FOOTPRINT( mtu );
      14           0 :   if( FD_UNLIKELY( !slot_footprint ) ) return 0UL; /* overflow */
      15             : 
      16           0 :   ulong slot_cnt = depth + burst;  
      17           0 :   if( FD_UNLIKELY( slot_cnt<depth ) ) return 0UL; /* overflow */
      18           0 :   slot_cnt += (ulong)!!compact;
      19           0 :   if( FD_UNLIKELY( !slot_cnt ) ) return 0UL; /* overflow (technically unnecessary) */
      20           0 :   if( FD_UNLIKELY( slot_cnt>(ULONG_MAX/slot_footprint) ) ) return 0UL; /* overflow */
      21             : 
      22           0 :   return slot_footprint*slot_cnt;
      23           0 : }
      24             : 
      25             : ulong
      26           3 : fd_dcache_align( void ) {
      27           3 :   return FD_DCACHE_ALIGN;
      28           3 : }
      29             : 
      30             : ulong
      31             : fd_dcache_footprint( ulong data_sz,
      32           1 :                      ulong app_sz ) {
      33             : 
      34           1 :   ulong data_footprint = fd_ulong_align_up( data_sz, FD_DCACHE_ALIGN );
      35           1 :   if( FD_UNLIKELY( data_footprint<data_sz ) ) return 0UL; /* overflow */
      36             : 
      37           1 :   ulong app_footprint  = fd_ulong_align_up( app_sz,  FD_DCACHE_ALIGN );
      38           1 :   if( FD_UNLIKELY( app_footprint<app_sz ) ) return 0UL; /* overflow */
      39             : 
      40           1 :   ulong footprint = data_footprint + app_footprint; /* data and app */
      41           1 :   if( FD_UNLIKELY( footprint<data_footprint ) ) return 0UL; /* overflow */
      42             : 
      43           1 :   footprint += sizeof(fd_dcache_private_hdr_t); /* header and guard */
      44           1 :   if( FD_UNLIKELY( footprint<sizeof(fd_dcache_private_hdr_t) ) ) return 0UL; /* overflow */
      45             : 
      46           1 :   return footprint;
      47           1 : }
      48             : 
      49             : void *
      50             : fd_dcache_new( void * shmem,
      51             :                ulong  data_sz,
      52           1 :                ulong  app_sz ) {
      53             : 
      54           1 :   if( FD_UNLIKELY( !shmem ) ) {
      55           0 :     FD_LOG_WARNING(( "NULL shmem" ));
      56           0 :     return NULL;
      57           0 :   }
      58             : 
      59           1 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shmem, fd_dcache_align() ) ) ) {
      60           0 :     FD_LOG_WARNING(( "misaligned shmem" ));
      61           0 :     return NULL;
      62           0 :   }
      63             : 
      64           1 :   ulong footprint = fd_dcache_footprint( data_sz, app_sz );
      65           1 :   if( FD_UNLIKELY( !footprint ) ) {
      66           0 :     FD_LOG_WARNING(( "bad data_sz (%lu) or app_sz (%lu)", data_sz, app_sz ));
      67           0 :     return NULL;
      68           0 :   }
      69             : 
      70           1 :   fd_memset( shmem, 0, sizeof(fd_dcache_private_hdr_t ) );
      71             : 
      72           1 :   fd_dcache_private_hdr_t * hdr = (fd_dcache_private_hdr_t *)shmem;
      73             : 
      74           1 :   hdr->data_sz = data_sz;
      75           1 :   hdr->app_sz  = app_sz;
      76           1 :   hdr->app_off = sizeof(fd_dcache_private_hdr_t) + fd_ulong_align_up( data_sz, FD_DCACHE_ALIGN );
      77             : 
      78           1 :   fd_memset( (uchar*)shmem+hdr->app_off, 0, app_sz );
      79             : 
      80           1 :   FD_COMPILER_MFENCE();
      81           1 :   FD_VOLATILE( hdr->magic ) = FD_DCACHE_MAGIC;
      82           1 :   FD_COMPILER_MFENCE();
      83             : 
      84           1 :   return shmem;
      85           1 : }
      86             : 
      87             : uchar *
      88           1 : fd_dcache_join( void * shdcache ) {
      89             : 
      90           1 :   if( FD_UNLIKELY( !shdcache ) ) {
      91           0 :     FD_LOG_WARNING(( "NULL shdcache" ));
      92           0 :     return NULL;
      93           0 :   }
      94             : 
      95           1 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shdcache, fd_dcache_align() ) ) ) {
      96           0 :     FD_LOG_WARNING(( "misaligned shdcache" ));
      97           0 :     return NULL;
      98           0 :   }
      99             : 
     100           1 :   fd_dcache_private_hdr_t * hdr = (fd_dcache_private_hdr_t *)shdcache;
     101           1 :   if( FD_UNLIKELY( hdr->magic!=FD_DCACHE_MAGIC ) ) {
     102           0 :     FD_LOG_WARNING(( "bad magic" ));
     103           0 :     return NULL;
     104           0 :   }
     105             : 
     106           1 :   return fd_dcache_private_dcache( hdr );
     107           1 : }
     108             : 
     109             : void *
     110           0 : fd_dcache_leave( uchar const * dcache ) {
     111             : 
     112           0 :   if( FD_UNLIKELY( !dcache ) ) {
     113           0 :     FD_LOG_WARNING(( "NULL dcache" ));
     114           0 :     return NULL;
     115           0 :   }
     116             : 
     117           0 :   return (void *)fd_dcache_private_hdr_const( dcache ); /* Kinda ugly const cast */
     118           0 : }
     119             : 
     120             : void *
     121           0 : fd_dcache_delete( void * shdcache ) {
     122             : 
     123           0 :   if( FD_UNLIKELY( !shdcache ) ) {
     124           0 :     FD_LOG_WARNING(( "NULL shdcache" ));
     125           0 :     return NULL;
     126           0 :   }
     127             : 
     128           0 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)shdcache, fd_dcache_align() ) ) ) {
     129           0 :     FD_LOG_WARNING(( "misaligned shdcache" ));
     130           0 :     return NULL;
     131           0 :   }
     132             : 
     133           0 :   fd_dcache_private_hdr_t * hdr = (fd_dcache_private_hdr_t *)shdcache;
     134           0 :   if( FD_UNLIKELY( hdr->magic != FD_DCACHE_MAGIC ) ) {
     135           0 :     FD_LOG_WARNING(( "bad magic" ));
     136           0 :     return NULL;
     137           0 :   }
     138             : 
     139           0 :   FD_COMPILER_MFENCE();
     140           0 :   FD_VOLATILE( hdr->magic ) = 0UL;
     141           0 :   FD_COMPILER_MFENCE();
     142             : 
     143           0 :   return shdcache;
     144           0 : }
     145             : 
     146             : ulong
     147           1 : fd_dcache_data_sz( uchar const * dcache ) {
     148           1 :   return fd_dcache_private_hdr_const( dcache )->data_sz;
     149           1 : }
     150             : 
     151             : ulong
     152           0 : fd_dcache_app_sz( uchar const * dcache ) {
     153           0 :   return fd_dcache_private_hdr_const( dcache )->app_sz;
     154           0 : }
     155             : 
     156             : uchar const *
     157           0 : fd_dcache_app_laddr_const( uchar const * dcache ) {
     158           0 :   fd_dcache_private_hdr_t const * hdr = fd_dcache_private_hdr_const( dcache );
     159           0 :   return (uchar const *)(((ulong)hdr) + hdr->app_off);
     160           0 : }
     161             : 
     162             : uchar *
     163           0 : fd_dcache_app_laddr( uchar * dcache ) {
     164           0 :   fd_dcache_private_hdr_t * hdr = fd_dcache_private_hdr( dcache );
     165           0 :   return (uchar *)(((ulong)hdr) + hdr->app_off);
     166           0 : }
     167             : 
     168             : int
     169             : fd_dcache_compact_is_safe( void const * base,
     170             :                            void const * dcache,
     171             :                            ulong        mtu,
     172           0 :                            ulong        depth ) {
     173             : 
     174             :   /* Validate base */
     175             : 
     176           0 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)base, 2UL*FD_CHUNK_SZ ) ) ) {
     177           0 :     FD_LOG_WARNING(( "base is not double chunk aligned" ));
     178           0 :     return 0;
     179           0 :   }
     180             : 
     181           0 :   if( FD_UNLIKELY( (ulong)dcache < (ulong)base ) ) {
     182           0 :     FD_LOG_WARNING(( "dcache before base" ));
     183           0 :     return 0;
     184           0 :   }
     185             : 
     186             :   /* Validate dcache */
     187             : 
     188           0 :   if( FD_UNLIKELY( !dcache ) ) {
     189           0 :     FD_LOG_WARNING(( "NULL dcache" ));
     190           0 :     return 0;
     191           0 :   }
     192             : 
     193           0 :   if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)dcache, 2UL*FD_CHUNK_SZ ) ) ) { /* Should be impossible if valid join */
     194           0 :     FD_LOG_WARNING(( "bad dcache (alignment)" ));
     195           0 :     return 0;
     196           0 :   }
     197             : 
     198           0 :   ulong data_sz = fd_dcache_data_sz( (uchar const *)dcache );
     199           0 :   if( FD_UNLIKELY( ((ulong)dcache + (ulong)data_sz) < (ulong)dcache ) ) { /* Should be impossible if valid join */
     200           0 :     FD_LOG_WARNING(( "bad dcache (data_sz)" ));
     201           0 :     return 0;
     202           0 :   }
     203             : 
     204           0 :   ulong chunk0 = ((ulong)dcache - (ulong)base) >> FD_CHUNK_LG_SZ; /* No overflow */
     205           0 :   ulong chunk1 = ((ulong)dcache + data_sz - (ulong)base) >> FD_CHUNK_LG_SZ; /* No overflow */
     206             : 
     207           0 :   if( FD_UNLIKELY( chunk1>(ulong)UINT_MAX ) ) {
     208           0 :     FD_LOG_WARNING(( "base to dcache address space span too large" ));
     209           0 :     return 0;
     210           0 :   }
     211             : 
     212             :   /* At this point, complete chunks in dcache cover [chunk0,chunk1)
     213             :      relative to the base address and any range of chunks in the dcache
     214             :      can be losslessly compressed into two 32-bit values. */
     215             : 
     216             :   /* Validate mtu */
     217             : 
     218           0 :   if( FD_UNLIKELY( !mtu ) ) {
     219           0 :     FD_LOG_WARNING(( "zero mtu" ));
     220           0 :     return 0;
     221           0 :   }
     222             : 
     223           0 :   ulong mtu_up = mtu + (2UL*FD_CHUNK_SZ-1UL);
     224             : 
     225           0 :   if( FD_UNLIKELY( mtu_up < mtu ) ) {
     226           0 :     FD_LOG_WARNING(( "too large mtu" ));
     227           0 :     return 0;
     228           0 :   }
     229             : 
     230           0 :   ulong chunk_mtu = (mtu_up >> (1+FD_CHUNK_LG_SZ)) << 1; /* >0 */
     231             : 
     232             :   /* At this point, mtu is non-zero, chunk_mtu is non-zero and a
     233             :      sufficient number of chunks to cover an mtu frag.  Further, the
     234             :      fd_dcache_chunk_next calculation is guaranteed overflow safe for
     235             :      any size in [0,mtu]. */
     236             : 
     237             :   /* Validate depth */
     238             : 
     239           0 :   if( FD_UNLIKELY( !depth ) ) {
     240           0 :     FD_LOG_WARNING(( "zero depth" ));
     241           0 :     return 0;
     242           0 :   }
     243             : 
     244           0 :   ulong overhead  = 2UL*chunk_mtu-1UL; /* no overflow chunk_sz >> 1, chunk_mtu << ULONG_MAX/2 */
     245           0 :   ulong depth_max = (ULONG_MAX-overhead) / chunk_mtu; /* no overflow as overhead < ULONG_MAX */
     246             : 
     247           0 :   if( FD_UNLIKELY( depth > depth_max ) ) {
     248           0 :     FD_LOG_WARNING(( "too large depth" ));
     249           0 :     return 0;
     250           0 :   }
     251             : 
     252           0 :   ulong chunk_req = depth*chunk_mtu + overhead; /* (depth+2)*chunk_mtu-1, no overflow */
     253             : 
     254           0 :   if( FD_UNLIKELY( (chunk1-chunk0) < chunk_req ) ) {
     255           0 :     FD_LOG_WARNING(( "too small dcache" ));
     256           0 :     return 0;
     257           0 :   }
     258             : 
     259           0 :   return 1;
     260           0 : }
     261             : 

Generated by: LCOV version 1.14