LCOV - code coverage report
Current view: top level - ballet/sha256 - fd_sha256_batch_avx.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 180 180 100.0 %
Date: 2025-08-05 05:04:49 Functions: 1 1 100.0 %

          Line data    Source code
       1             : #define FD_SHA256_BATCH_IMPL 1
       2             : 
       3             : #include "fd_sha256.h"
       4             : #include "fd_sha256_constants.h"
       5             : #include "../../util/simd/fd_avx.h"
       6             : 
       7             : FD_STATIC_ASSERT( FD_SHA256_BATCH_MAX==8UL, compat );
       8             : 
       9             : void
      10             : fd_sha256_private_batch_avx( ulong          batch_cnt,
      11             :                              void const *   _batch_data,
      12             :                              ulong const *  batch_sz,
      13    21138411 :                              void * const * _batch_hash ) {
      14             : 
      15             :   /* If the batch is too small, it's faster to run each part of the
      16             :      batch sequentially.  When we have SHA-NI instructions, the
      17             :      sequential implementation is faster, so we need a larger batch size
      18             :      to justify using the batched implementation. */
      19             : 
      20     1594061 : # if FD_HAS_SHANI
      21     1594061 : # define MIN_BATCH_CNT (6UL)
      22             : # else
      23    19544350 : # define MIN_BATCH_CNT (2UL)
      24    19544350 : # endif
      25             : 
      26    21138411 :   if( FD_UNLIKELY( batch_cnt<MIN_BATCH_CNT ) ) {
      27     4354111 :     void const * const * batch_data = (void const * const *)_batch_data;
      28     9313107 :     for( ulong batch_idx=0UL; batch_idx<batch_cnt; batch_idx++ )
      29     4958996 :       fd_sha256_hash( batch_data[ batch_idx ], batch_sz[ batch_idx ], _batch_hash[ batch_idx ] );
      30     4354111 :     return;
      31     4354111 :   }
      32             : 
      33    16784300 : # undef MIN_BATCH_CNT
      34             : 
      35             :   /* SHA appends to the end of each message 9 bytes of additional data
      36             :      (a messaging terminator byte and the big endian ulong with the
      37             :      message size in bits) and enough zero padding to make the message
      38             :      an integer number of blocks long.  We compute the 1 or 2 tail
      39             :      blocks of each message here.  We then process complete blocks of
      40             :      the original messages in place, switching to processing these tail
      41             :      blocks in the same pass toward the end.  TODO: This code could
      42             :      probably be SIMD optimized slightly more (this is where all the
      43             :      really performance suboptimally designed parts of SHA live so it is
      44             :      just inherently gross).  The main optimization would probably be to
      45             :      allow tail reading to use a faster memcpy and then maybe some
      46             :      vectorization of the bswap. */
      47             : 
      48    16784300 :   ulong const * batch_data = (ulong const *)_batch_data;
      49             : 
      50    16784300 :   ulong batch_tail_data[ FD_SHA256_BATCH_MAX ] __attribute__((aligned(32)));
      51    16784300 :   ulong batch_tail_rem [ FD_SHA256_BATCH_MAX ] __attribute__((aligned(32)));
      52             : 
      53    16784300 :   uchar scratch[ FD_SHA256_BATCH_MAX*2UL*FD_SHA256_PRIVATE_BUF_MAX ] __attribute__((aligned(128)));
      54    16784300 :   do {
      55    16784300 :     ulong scratch_free = (ulong)scratch;
      56             : 
      57    16784300 :     wv_t zero = wv_zero();
      58             : 
      59   142824604 :     for( ulong batch_idx=0UL; batch_idx<batch_cnt; batch_idx++ ) {
      60             : 
      61             :       /* Allocate the tail blocks for this message */
      62             : 
      63   126040304 :       ulong data = batch_data[ batch_idx ];
      64   126040304 :       ulong sz   = batch_sz  [ batch_idx ];
      65             : 
      66   126040304 :       ulong tail_data     = scratch_free;
      67   126040304 :       ulong tail_data_sz  = sz & (FD_SHA256_PRIVATE_BUF_MAX-1UL);
      68   126040304 :       ulong tail_data_off = fd_ulong_align_dn( sz,               FD_SHA256_PRIVATE_BUF_MAX );
      69   126040304 :       ulong tail_sz       = fd_ulong_align_up( tail_data_sz+9UL, FD_SHA256_PRIVATE_BUF_MAX );
      70             : 
      71   126040304 :       batch_tail_data[ batch_idx ] = tail_data;
      72   126040304 :       batch_tail_rem [ batch_idx ] = tail_sz >> FD_SHA256_PRIVATE_LG_BUF_MAX;
      73             : 
      74   126040304 :       scratch_free += tail_sz;
      75             : 
      76             :       /* Populate the tail blocks.  We first clear the blocks (note that
      77             :          it is okay to clobber bytes 64:127 if tail_sz only 64, saving a
      78             :          nasty branch).  Then we copy any straggler data bytes into the
      79             :          tail, terminate the message, and finally record the size of the
      80             :          message in bits at the end as a big endian ulong.  */
      81             : 
      82   126040304 :       wv_st( (ulong *) tail_data,     zero );
      83   126040304 :       wv_st( (ulong *)(tail_data+32), zero );
      84   126040304 :       wv_st( (ulong *)(tail_data+64), zero );
      85   126040304 :       wv_st( (ulong *)(tail_data+96), zero );
      86             : 
      87   126040304 : #     if 1
      88             :       /* Quick experiments found that, once again, straight memcpy is
      89             :          much slower than a fd_memcpy is slightly slower than a
      90             :          site-optimized handrolled memcpy (fd_memcpy would be less L1I
      91             :          cache footprint though).  They also found that doing the below
      92             :          in a branchless way is slightly worse and an ILP optimized
      93             :          version of the conditional calculation is about the same.  They
      94             :          also found that vectorizing the overall loop and/or Duffing the
      95             :          vectorized loop did not provide noticeable performance
      96             :          improvements under various styles of memcpy. */
      97   126040304 :       ulong src = data + tail_data_off;
      98   126040304 :       ulong dst = tail_data;
      99   126040304 :       ulong rem = tail_data_sz;
     100   146367152 :       while( rem>=32UL ) { wv_st( (ulong *)dst, wv_ldu( (ulong const *)src ) ); dst += 32UL; src += 32UL; rem -= 32UL; }
     101   263186794 :       while( rem>= 8UL ) { *(ulong  *)dst = FD_LOAD( ulong,  src );             dst +=  8UL; src +=  8UL; rem -=  8UL; }
     102   126040304 :       if   ( rem>= 4UL ) { *(uint   *)dst = FD_LOAD( uint,   src );             dst +=  4UL; src +=  4UL; rem -=  4UL; }
     103   126040304 :       if   ( rem>= 2UL ) { *(ushort *)dst = FD_LOAD( ushort, src );             dst +=  2UL; src +=  2UL; rem -=  2UL; }
     104   126040304 :       if   ( rem       ) { *(uchar  *)dst = FD_LOAD( uchar,  src );             dst++;                                 }
     105   126040304 :       *(uchar *)dst = (uchar)0x80;
     106             : #     else
     107             :       fd_memcpy( (void *)tail_data, (void const *)(data + tail_data_off), tail_data_sz );
     108             :       *((uchar *)(tail_data+tail_data_sz)) = (uchar)0x80;
     109             : #     endif
     110             : 
     111   126040304 :       *((ulong *)(tail_data+tail_sz-8UL )) = fd_ulong_bswap( sz<<3 );
     112   126040304 :     }
     113    16784300 :   } while(0);
     114             : 
     115    16784300 :   wu_t s0 = wu_bcast( FD_SHA256_INITIAL_A );
     116    16784300 :   wu_t s1 = wu_bcast( FD_SHA256_INITIAL_B );
     117    16784300 :   wu_t s2 = wu_bcast( FD_SHA256_INITIAL_C );
     118    16784300 :   wu_t s3 = wu_bcast( FD_SHA256_INITIAL_D );
     119    16784300 :   wu_t s4 = wu_bcast( FD_SHA256_INITIAL_E );
     120    16784300 :   wu_t s5 = wu_bcast( FD_SHA256_INITIAL_F );
     121    16784300 :   wu_t s6 = wu_bcast( FD_SHA256_INITIAL_G );
     122    16784300 :   wu_t s7 = wu_bcast( FD_SHA256_INITIAL_H );
     123             : 
     124    16784300 :   wv_t wv_64        = wv_bcast( FD_SHA256_PRIVATE_BUF_MAX );
     125    16784300 :   wv_t W_sentinel   = wv_bcast( (ulong)scratch );
     126    16784300 :   wc_t batch_lane   = wc_unpack( (1<<batch_cnt)-1 );
     127             : 
     128    16784300 :   wv_t tail_lo      = wv_ld( batch_tail_data   );
     129    16784300 :   wv_t tail_hi      = wv_ld( batch_tail_data+4 );
     130             : 
     131    16784300 :   wv_t tail_rem_lo  = wv_ld( batch_tail_rem    );
     132    16784300 :   wv_t tail_rem_hi  = wv_ld( batch_tail_rem+4  );
     133             : 
     134    16784300 :   wv_t W_lo         = wv_ld( batch_data        );
     135    16784300 :   wv_t W_hi         = wv_ld( batch_data+4      );
     136             : 
     137    16784300 :   wv_t block_rem_lo = wv_notczero( wc_expand( batch_lane, 0 ),
     138    16784300 :                         wv_add( wv_shr( wv_ld( batch_sz   ), FD_SHA256_PRIVATE_LG_BUF_MAX ), tail_rem_lo ) );
     139    16784300 :   wv_t block_rem_hi = wv_notczero( wc_expand( batch_lane, 1 ),
     140    16784300 :                         wv_add( wv_shr( wv_ld( batch_sz+4 ), FD_SHA256_PRIVATE_LG_BUF_MAX ), tail_rem_hi ) );
     141   252547758 :   for(;;) {
     142   252547758 :     wc_t active_lane_lo = wv_to_wc( block_rem_lo );
     143   252547758 :     wc_t active_lane_hi = wv_to_wc( block_rem_hi );
     144   252547758 :     if( FD_UNLIKELY( !wc_any( wc_or( active_lane_lo, active_lane_hi ) ) ) ) break;
     145             : 
     146             :     /* Switch lanes that have hit the end of their in-place bulk
     147             :        processing to their out-of-place scratch tail regions as
     148             :        necessary. */
     149             : 
     150   235763458 :     W_lo = wv_if( wv_eq( block_rem_lo, tail_rem_lo ), tail_lo, W_lo );
     151   235763458 :     W_hi = wv_if( wv_eq( block_rem_hi, tail_rem_hi ), tail_hi, W_hi );
     152             : 
     153             :     /* At this point, we have at least 1 block in this message segment
     154             :        pass that has not been processed.  Load the next 64 bytes of
     155             :        each unprocessed block.  Inactive lanes (e.g. message segments
     156             :        in this pass for which we've already processed all the blocks)
     157             :        will load garbage from a sentinel location (and the result of
     158             :        the state computations for the inactive lane will be ignored). */
     159             : 
     160   235763458 :     wv_t W03 = wv_if( active_lane_lo, W_lo, W_sentinel );
     161   235763458 :     uchar const * W0 = (uchar const *)wv_extract( W03, 0 );
     162   235763458 :     uchar const * W1 = (uchar const *)wv_extract( W03, 1 );
     163   235763458 :     uchar const * W2 = (uchar const *)wv_extract( W03, 2 );
     164   235763458 :     uchar const * W3 = (uchar const *)wv_extract( W03, 3 );
     165             : 
     166   235763458 :     wv_t W47 = wv_if( active_lane_hi, W_hi, W_sentinel );
     167   235763458 :     uchar const * W4 = (uchar const *)wv_extract( W47, 0 );
     168   235763458 :     uchar const * W5 = (uchar const *)wv_extract( W47, 1 );
     169   235763458 :     uchar const * W6 = (uchar const *)wv_extract( W47, 2 );
     170   235763458 :     uchar const * W7 = (uchar const *)wv_extract( W47, 3 );
     171             : 
     172   235763458 :     wu_t x0; wu_t x1; wu_t x2; wu_t x3; wu_t x4; wu_t x5; wu_t x6; wu_t x7;
     173   235763458 :     wu_transpose_8x8( wu_bswap( wu_ldu(W0   ) ), wu_bswap( wu_ldu(W1   ) ), wu_bswap( wu_ldu(W2   ) ), wu_bswap( wu_ldu(W3   ) ),
     174   235763458 :                       wu_bswap( wu_ldu(W4   ) ), wu_bswap( wu_ldu(W5   ) ), wu_bswap( wu_ldu(W6   ) ), wu_bswap( wu_ldu(W7   ) ),
     175   235763458 :                       x0, x1, x2, x3, x4, x5, x6, x7 );
     176             : 
     177   235763458 :     wu_t x8; wu_t x9; wu_t xa; wu_t xb; wu_t xc; wu_t xd; wu_t xe; wu_t xf;
     178   235763458 :     wu_transpose_8x8( wu_bswap( wu_ldu(W0+32) ), wu_bswap( wu_ldu(W1+32) ), wu_bswap( wu_ldu(W2+32) ), wu_bswap( wu_ldu(W3+32) ),
     179   235763458 :                       wu_bswap( wu_ldu(W4+32) ), wu_bswap( wu_ldu(W5+32) ), wu_bswap( wu_ldu(W6+32) ), wu_bswap( wu_ldu(W7+32) ),
     180   235763458 :                       x8, x9, xa, xb, xc, xd, xe, xf );
     181             : 
     182             :     /* Compute the SHA-256 state updates */
     183             : 
     184   235763458 :     wu_t a = s0; wu_t b = s1; wu_t c = s2; wu_t d = s3; wu_t e = s4; wu_t f = s5; wu_t g = s6; wu_t h = s7;
     185             : 
     186   235763458 : #   define Sigma0(x)  wu_xor( wu_rol(x,30), wu_xor( wu_rol(x,19), wu_rol(x,10) ) )
     187   235763458 : #   define Sigma1(x)  wu_xor( wu_rol(x,26), wu_xor( wu_rol(x,21), wu_rol(x, 7) ) )
     188   235763458 : #   define sigma0(x)  wu_xor( wu_rol(x,25), wu_xor( wu_rol(x,14), wu_shr(x, 3) ) )
     189   235763458 : #   define sigma1(x)  wu_xor( wu_rol(x,15), wu_xor( wu_rol(x,13), wu_shr(x,10) ) )
     190   235763458 : #   define Ch(x,y,z)  wu_xor( wu_and(x,y), wu_andnot(x,z) )
     191   235763458 : #   define Maj(x,y,z) wu_xor( wu_and(x,y), wu_xor( wu_and(x,z), wu_and(y,z) ) )
     192   235763458 : #   define SHA_CORE(xi,ki)                                                       \
     193 15088861312 :     T1 = wu_add( wu_add(xi,ki), wu_add( wu_add( h, Sigma1(e) ), Ch(e, f, g) ) ); \
     194 15088861312 :     T2 = wu_add( Sigma0(a), Maj(a, b, c) );                                      \
     195 15088861312 :     h = g;                                                                       \
     196 15088861312 :     g = f;                                                                       \
     197 15088861312 :     f = e;                                                                       \
     198 15088861312 :     e = wu_add( d, T1 );                                                         \
     199 15088861312 :     d = c;                                                                       \
     200 15088861312 :     c = b;                                                                       \
     201 15088861312 :     b = a;                                                                       \
     202 15088861312 :     a = wu_add( T1, T2 )
     203             : 
     204   235763458 :     wu_t T1;
     205   235763458 :     wu_t T2;
     206             : 
     207   235763458 :     SHA_CORE( x0, wu_bcast( fd_sha256_K[ 0] ) );
     208   235763458 :     SHA_CORE( x1, wu_bcast( fd_sha256_K[ 1] ) );
     209   235763458 :     SHA_CORE( x2, wu_bcast( fd_sha256_K[ 2] ) );
     210   235763458 :     SHA_CORE( x3, wu_bcast( fd_sha256_K[ 3] ) );
     211   235763458 :     SHA_CORE( x4, wu_bcast( fd_sha256_K[ 4] ) );
     212   235763458 :     SHA_CORE( x5, wu_bcast( fd_sha256_K[ 5] ) );
     213   235763458 :     SHA_CORE( x6, wu_bcast( fd_sha256_K[ 6] ) );
     214   235763458 :     SHA_CORE( x7, wu_bcast( fd_sha256_K[ 7] ) );
     215   235763458 :     SHA_CORE( x8, wu_bcast( fd_sha256_K[ 8] ) );
     216   235763458 :     SHA_CORE( x9, wu_bcast( fd_sha256_K[ 9] ) );
     217   235763458 :     SHA_CORE( xa, wu_bcast( fd_sha256_K[10] ) );
     218   235763458 :     SHA_CORE( xb, wu_bcast( fd_sha256_K[11] ) );
     219   235763458 :     SHA_CORE( xc, wu_bcast( fd_sha256_K[12] ) );
     220   235763458 :     SHA_CORE( xd, wu_bcast( fd_sha256_K[13] ) );
     221   235763458 :     SHA_CORE( xe, wu_bcast( fd_sha256_K[14] ) );
     222   235763458 :     SHA_CORE( xf, wu_bcast( fd_sha256_K[15] ) );
     223   943053832 :     for( ulong i=16UL; i<64UL; i+=16UL ) {
     224   707290374 :       x0 = wu_add( wu_add( x0, sigma0(x1) ), wu_add( sigma1(xe), x9 ) ); SHA_CORE( x0, wu_bcast( fd_sha256_K[i     ] ) );
     225   707290374 :       x1 = wu_add( wu_add( x1, sigma0(x2) ), wu_add( sigma1(xf), xa ) ); SHA_CORE( x1, wu_bcast( fd_sha256_K[i+ 1UL] ) );
     226   707290374 :       x2 = wu_add( wu_add( x2, sigma0(x3) ), wu_add( sigma1(x0), xb ) ); SHA_CORE( x2, wu_bcast( fd_sha256_K[i+ 2UL] ) );
     227   707290374 :       x3 = wu_add( wu_add( x3, sigma0(x4) ), wu_add( sigma1(x1), xc ) ); SHA_CORE( x3, wu_bcast( fd_sha256_K[i+ 3UL] ) );
     228   707290374 :       x4 = wu_add( wu_add( x4, sigma0(x5) ), wu_add( sigma1(x2), xd ) ); SHA_CORE( x4, wu_bcast( fd_sha256_K[i+ 4UL] ) );
     229   707290374 :       x5 = wu_add( wu_add( x5, sigma0(x6) ), wu_add( sigma1(x3), xe ) ); SHA_CORE( x5, wu_bcast( fd_sha256_K[i+ 5UL] ) );
     230   707290374 :       x6 = wu_add( wu_add( x6, sigma0(x7) ), wu_add( sigma1(x4), xf ) ); SHA_CORE( x6, wu_bcast( fd_sha256_K[i+ 6UL] ) );
     231   707290374 :       x7 = wu_add( wu_add( x7, sigma0(x8) ), wu_add( sigma1(x5), x0 ) ); SHA_CORE( x7, wu_bcast( fd_sha256_K[i+ 7UL] ) );
     232   707290374 :       x8 = wu_add( wu_add( x8, sigma0(x9) ), wu_add( sigma1(x6), x1 ) ); SHA_CORE( x8, wu_bcast( fd_sha256_K[i+ 8UL] ) );
     233   707290374 :       x9 = wu_add( wu_add( x9, sigma0(xa) ), wu_add( sigma1(x7), x2 ) ); SHA_CORE( x9, wu_bcast( fd_sha256_K[i+ 9UL] ) );
     234   707290374 :       xa = wu_add( wu_add( xa, sigma0(xb) ), wu_add( sigma1(x8), x3 ) ); SHA_CORE( xa, wu_bcast( fd_sha256_K[i+10UL] ) );
     235   707290374 :       xb = wu_add( wu_add( xb, sigma0(xc) ), wu_add( sigma1(x9), x4 ) ); SHA_CORE( xb, wu_bcast( fd_sha256_K[i+11UL] ) );
     236   707290374 :       xc = wu_add( wu_add( xc, sigma0(xd) ), wu_add( sigma1(xa), x5 ) ); SHA_CORE( xc, wu_bcast( fd_sha256_K[i+12UL] ) );
     237   707290374 :       xd = wu_add( wu_add( xd, sigma0(xe) ), wu_add( sigma1(xb), x6 ) ); SHA_CORE( xd, wu_bcast( fd_sha256_K[i+13UL] ) );
     238   707290374 :       xe = wu_add( wu_add( xe, sigma0(xf) ), wu_add( sigma1(xc), x7 ) ); SHA_CORE( xe, wu_bcast( fd_sha256_K[i+14UL] ) );
     239   707290374 :       xf = wu_add( wu_add( xf, sigma0(x0) ), wu_add( sigma1(xd), x8 ) ); SHA_CORE( xf, wu_bcast( fd_sha256_K[i+15UL] ) );
     240   707290374 :     }
     241             : 
     242   235763458 : #   undef SHA_CORE
     243   235763458 : #   undef Sigma0
     244   235763458 : #   undef Sigma1
     245   235763458 : #   undef sigma0
     246   235763458 : #   undef sigma1
     247   235763458 : #   undef Ch
     248   235763458 : #   undef Maj
     249             : 
     250             :     /* Apply the state updates to the active lanes */
     251             : 
     252   235763458 :     wc_t active_lane = wc_narrow( active_lane_lo, active_lane_hi );
     253   235763458 :     s0 = wu_add( s0, wu_notczero( active_lane, a ) );
     254   235763458 :     s1 = wu_add( s1, wu_notczero( active_lane, b ) );
     255   235763458 :     s2 = wu_add( s2, wu_notczero( active_lane, c ) );
     256   235763458 :     s3 = wu_add( s3, wu_notczero( active_lane, d ) );
     257   235763458 :     s4 = wu_add( s4, wu_notczero( active_lane, e ) );
     258   235763458 :     s5 = wu_add( s5, wu_notczero( active_lane, f ) );
     259   235763458 :     s6 = wu_add( s6, wu_notczero( active_lane, g ) );
     260   235763458 :     s7 = wu_add( s7, wu_notczero( active_lane, h ) );
     261             : 
     262             :     /* Advance to the next message segment blocks.  In pseudo code,
     263             :        the below is:
     264             : 
     265             :          W += 64; if( block_rem ) block_rem--;
     266             : 
     267             :        Since wc_to_wv_raw(false/true) is 0UL/~0UL, we can use wv_add /
     268             :        wc_to_wv_raw instead of wv_sub / wc_to_wv to save some ops.
     269             :        (Consider conditional increment / decrement operations?)
     270             : 
     271             :        Also since we do not load anything at W(lane) above unless
     272             :        block_rem(lane) is non-zero, we can omit vector conditional
     273             :        operations for W(lane) below to save some additional ops. */
     274             : 
     275   235763458 :     W_lo = wv_add( W_lo, wv_64 );
     276   235763458 :     W_hi = wv_add( W_hi, wv_64 );
     277             : 
     278   235763458 :     block_rem_lo = wv_add( block_rem_lo, wc_to_wv_raw( active_lane_lo ) );
     279   235763458 :     block_rem_hi = wv_add( block_rem_hi, wc_to_wv_raw( active_lane_hi ) );
     280   235763458 :   }
     281             : 
     282             :   /* Store the results.  FIXME: Probably could optimize the transpose
     283             :      further by taking into account needed stores (and then maybe go
     284             :      direct into memory ... would need a family of such transposed
     285             :      stores). */
     286             : 
     287    16784300 :   wu_transpose_8x8( s0,s1,s2,s3,s4,s5,s6,s7, s0,s1,s2,s3,s4,s5,s6,s7 );
     288             : 
     289    16784300 :   uint * const * batch_hash = (uint * const *)_batch_hash;
     290    16784300 :   switch( batch_cnt ) { /* application dependent prob */
     291    14458758 :   case 8UL: wu_stu( batch_hash[7], wu_bswap( s7 ) ); __attribute__((fallthrough));
     292    14785384 :   case 7UL: wu_stu( batch_hash[6], wu_bswap( s6 ) ); __attribute__((fallthrough));
     293    15228628 :   case 6UL: wu_stu( batch_hash[5], wu_bswap( s5 ) ); __attribute__((fallthrough));
     294    15594212 :   case 5UL: wu_stu( batch_hash[4], wu_bswap( s4 ) ); __attribute__((fallthrough));
     295    16035998 :   case 4UL: wu_stu( batch_hash[3], wu_bswap( s3 ) ); __attribute__((fallthrough));
     296    16368724 :   case 3UL: wu_stu( batch_hash[2], wu_bswap( s2 ) ); __attribute__((fallthrough));
     297    16784300 :   case 2UL: wu_stu( batch_hash[1], wu_bswap( s1 ) ); __attribute__((fallthrough));
     298    16784300 :   case 1UL: wu_stu( batch_hash[0], wu_bswap( s0 ) ); __attribute__((fallthrough));
     299    16784300 :   default: break;
     300    16784300 :   }
     301    16784300 : }

Generated by: LCOV version 1.14