LCOV - code coverage report
Current view: top level - ballet/bmtree - fd_bmtree.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 226 227 99.6 %
Date: 2024-11-13 11:58:15 Functions: 13 13 100.0 %

          Line data    Source code
       1             : /* This file declares a family of functions for different widths of
       2             :    binary Merkle trees based on the SHA-256 hash function.  It can be
       3             :    included multiple times to get different widths.  Example:
       4             : 
       5             :      #define BMTREE_NAME    bmt
       6             :      #define BMTREE_HASH_SZ 20
       7             :      #include "fd_bmtree_tmpl.c"
       8             : 
       9             :    will declare in the current compile unit a header only library
      10             :    with the following APIs:
      11             : 
      12             :      // Public node API
      13             : 
      14             :      struct __attribute__((aligned(32))) bmt_node {
      15             :        uchar hash[ 32 ]; // Only first 20 bytes are meaningful
      16             :      };
      17             : 
      18             :      typedef struct bmt_node bmt_node_t;
      19             : 
      20             :      bmt_node_t * bmtree_hash_leaf( bmt_node_t * node, void const * data, ulong data_sz );
      21             : 
      22             :      // Public commit API
      23             : 
      24             :      struct bmt_commit;
      25             :      typedef struct bmt_commit bmt_commit_t;
      26             : 
      27             :      ulong          bmt_commit_align    ( void );
      28             :      ulong          bmt_commit_footprint( void );
      29             :      bmt_commit_t * bmt_commit_init     ( void * mem );
      30             :      ulong          bmt_commit_leaf_cnt ( bmt_commit_t const * bmt );
      31             :      bmt_commit_t * bmt_commit_append   ( bmt_commit_t * bmt, bmt_node_t const * leaf, ulong leaf_cnt );
      32             :      uchar *        bmt_commit_fini     ( bmt_commit_t * bmt );
      33             : 
      34             :    See comments below for more details.
      35             : 
      36             :    Widths 20 and 32 are used in the Solana protocol.  Specification:
      37             : 
      38             :    https://github.com/solana-foundation/specs/blob/main/core/merkle-tree.md */
      39             : #include "fd_bmtree.h"
      40             : #include "../sha256/fd_sha256.h"
      41             : 
      42             : #define SET_NAME ipfset
      43             : #include "../../util/tmpl/fd_smallset.c"
      44             : 
      45             : #if FD_HAS_AVX
      46             : #include <x86intrin.h>
      47             : #endif
      48             : 
      49             : 
      50             : 
      51             : fd_bmtree_node_t *
      52             : fd_bmtree_hash_leaf( fd_bmtree_node_t * node,
      53             :                     void const *       data,
      54             :                     ulong              data_sz,
      55        6108 :                     ulong              prefix_sz ) {
      56             : 
      57             :   /* FIXME: Ideally we'd use the streamlined SHA-256 variant here but it
      58             :      is pretty wonky from a usability perspective to require users to
      59             :      allow us this API prepend a zero to their data region.  See note
      60             :      below for other nasty performance drags here in the implementation
      61             :      details (the algorithm conceptually is very clever and sound but
      62             :      the implementation requirements did not take into any consideration
      63             :      how real world computers and hardware actually work). */
      64             : 
      65        6108 :   fd_sha256_t sha[1];
      66        6108 :   fd_sha256_fini( fd_sha256_append( fd_sha256_append( fd_sha256_init( sha ), fd_bmtree_leaf_prefix, prefix_sz ), data, data_sz ), node->hash );
      67        6108 :   return node;
      68        6108 : }
      69             : 
      70             : /* bmtree_merge computes `SHA-256(prefix|a->hash|b->hash)` and writes
      71             :    the full hash into node->hash (which can then be truncated as
      72             :    necessary).  prefix is the first prefix_sz bytes of
      73             :    fd_bmtree_node_prefix and is typically FD_BMTREE_LONG_PREFIX_SZ or
      74             :    FD_BMTREE_SHORT_PREFIX_SZ.  In-place operation fine.  Returns node.
      75             :    */
      76             : 
      77             : static inline fd_bmtree_node_t *
      78             : fd_bmtree_private_merge( fd_bmtree_node_t       * node,
      79             :                          fd_bmtree_node_t const * a,
      80             :                          fd_bmtree_node_t const * b,
      81             :                          ulong                    hash_sz,
      82   112951788 :                          ulong                    prefix_sz ) {
      83             : 
      84             :   /* FIXME: As can be seen from the below, if we actually wanted to be
      85             :      fast, we'd not bother with 20 byte variant as we actually have to
      86             :      do more work for this given the SHA algorithm and the hardware work
      87             :      at a much coarser granularity (and it doesn't save any space in
      88             :      packets because you could just compute the 32 byte variant and then
      89             :      truncate the result to 20 bytes ... it'd be both faster and more
      90             :      secure).
      91             : 
      92             :      Further, we'd use a sane prefix (or maybe a suffix) length instead
      93             :      of a single byte (fine grained memory accesses are the death knell
      94             :      of real world performance ... it's actually more work for the CPU
      95             :      and hardware).
      96             : 
      97             :      And then, if we really cared, we'd probably replace the stock
      98             :      SHA256 implementation with a block level parallel SHA256 variant
      99             :      here and above.  This would have equivalent strength but be
     100             :      dramatically higher performance on real world software and
     101             :      hardware.
     102             : 
     103             :      And then, we could bake into the leaf / branch prefixes into the
     104             :      parallel block calcs to further reduce comp load and alignment
     105             :      swizzling.  This would make the calculation faster still in
     106             :      software and less area in hardware while preserving security.
     107             : 
     108             :      The net result would be a dramatically faster and significant more
     109             :      secure and less code in software and a lot easier to accelerate in
     110             :      hardware.
     111             : 
     112             :      In the meantime, we write abominations like the below to get some
     113             :      extra mileage out of commodity CPUs.  Practically helps speed this
     114             :      up tree construction low tens of percent in the large number of
     115             :      small leaves limit). */
     116             : 
     117   112951788 : # if FD_HAS_AVX
     118             : 
     119   112951788 :   __m256i avx_pre = _mm256_load_si256 ( (__m256i const *)fd_bmtree_node_prefix );
     120   112951788 :   __m256i avx_a   = _mm256_loadu_si256( (__m256i const *)a           );
     121   112951788 :   __m256i avx_b   = _mm256_loadu_si256( (__m256i const *)b           );
     122             : 
     123   112951788 :   uchar mem[96] __attribute__((aligned(32)));
     124             : 
     125   112951788 :   _mm256_store_si256(  (__m256i *)(mem),                     avx_pre );
     126   112951788 :   _mm256_storeu_si256( (__m256i *)(mem+prefix_sz),           avx_a   );
     127   112951788 :   _mm256_storeu_si256( (__m256i *)(mem+prefix_sz+hash_sz),   avx_b   );
     128             : 
     129   112951788 :   fd_sha256_hash( mem, prefix_sz+2UL*hash_sz, node );
     130             : 
     131             :   /* Consider FD_HAS_SSE only variant? */
     132             : 
     133             : # else
     134             : 
     135             :   fd_sha256_t sha[1];
     136             :   fd_sha256_fini( fd_sha256_append( fd_sha256_append( fd_sha256_append( fd_sha256_init( sha ),
     137             :                   fd_bmtree_node_prefix, prefix_sz ), a->hash, hash_sz ), b->hash, hash_sz ), node->hash );
     138             : 
     139             : # endif
     140             : 
     141   112951788 :   return node;
     142   112951788 : }
     143             : 
     144             : /* bmtree_depth returns the number of layers in a binary Merkle tree. */
     145             : 
     146             : FD_FN_CONST ulong
     147    54354294 : fd_bmtree_depth( ulong leaf_cnt ) {
     148    54354294 :   return fd_ulong_if(
     149    54354294 :     /* if */   leaf_cnt<=1UL,
     150    54354294 :     /* then */ leaf_cnt,
     151    54354294 :     /* else */ (ulong)fd_ulong_find_msb_w_default( leaf_cnt-1UL, -1 /*irrelevant*/ ) + 2UL
     152    54354294 :   );
     153    54354294 : }
     154             : 
     155             : FD_FN_CONST ulong
     156    30000000 : fd_bmtree_node_cnt( ulong leaf_cnt ) {
     157             :   /* Compute the number of nodes in a tree with inclusion_proof_leaf_cnt
     158             :      leaves. Based on the proposition that layer l having N_l nodes
     159             :      implies the above layer has floor((N_l+1)/2) nodes, we know that
     160             :      the kth layer above has floor(((N_l+2^(k-1)+2^(k-2)+...+1)/2^k)
     161             :      nodes, which is floor((N_l+2^k - 1)/2^k) = 1+floor((N_l-1)/2^k)
     162             :      nodes.  We stop when we get to 1 node though.  It seems like there
     163             :      should be a bit-twiddling way to calculate this faster, especially
     164             :      given that you can go all the way to 64 and correct with a value
     165             :      that comes from the MSB, but I couldn't find it.  */
     166    30000000 :   if( FD_UNLIKELY( leaf_cnt==0UL ) ) return 0UL;
     167    29999997 :   ulong cnt = 0UL;
     168    29999997 :   leaf_cnt--;
     169  1949999805 :   for( int i=0; i<64; i++ ) {
     170  1919999808 :     ulong term = leaf_cnt>>i;
     171  1919999808 :     cnt += term;
     172  1919999808 :   }
     173    29999997 :   cnt += (ulong)(2+fd_ulong_find_msb_w_default(leaf_cnt, -1));
     174    29999997 :   return cnt;
     175    30000000 : }
     176             : 
     177             : /* bmtree_commit_{footprint,align} return the alignment and footprint
     178             :    required for a memory region to be used as a bmtree_commit_t. */
     179           3 : FD_FN_CONST ulong fd_bmtree_commit_align    ( void ) { return FD_BMTREE_COMMIT_ALIGN; }
     180             : 
     181             : FD_FN_CONST ulong
     182         900 : fd_bmtree_commit_footprint( ulong inclusion_proof_layer_cnt ) {
     183             :   /* A complete binary tree with n layers has (2^n)-1 nodes.  We keep 1
     184             :      extra bmtree_node_t (included in sizeof(fd_bmtree_commit_t)) to
     185             :      avoid branches when appending commits. */
     186         900 :   return fd_ulong_align_up( sizeof(fd_bmtree_commit_t) +
     187         900 :     ( (1UL<<inclusion_proof_layer_cnt)-1UL       )*sizeof(fd_bmtree_node_t) +
     188         900 :     (((1UL<<inclusion_proof_layer_cnt)+63UL)/64UL)*sizeof(ulong),
     189         900 :     fd_bmtree_commit_align() );
     190         900 : }
     191             : 
     192             : 
     193             : /* bmtree_commit_init starts a vector commitment calculation */
     194             : 
     195             : fd_bmtree_commit_t *    /* Returns mem as a bmtree_commit_t *, commit will be in a calc */
     196             : fd_bmtree_commit_init( void * mem,     /* Assumed unused with required alignment and footprint */
     197             :                        ulong hash_sz,
     198             :                        ulong prefix_sz,
     199     1483062 :                        ulong inclusion_proof_layer_cnt ) {
     200     1483062 :   fd_bmtree_commit_t * state = (fd_bmtree_commit_t *) mem;
     201     1483062 :   ulong inclusion_proof_sz  = (1UL<<inclusion_proof_layer_cnt) - 1UL;
     202     1483062 :   state->leaf_cnt           = 0UL;
     203     1483062 :   state->hash_sz            = hash_sz;
     204     1483062 :   state->prefix_sz          = prefix_sz;
     205     1483062 :   state->inclusion_proof_sz = inclusion_proof_sz;
     206     1483062 :   state->inclusion_proofs_valid = (ulong*)(state->inclusion_proofs + inclusion_proof_sz);
     207     1483062 :   fd_memset( state->inclusion_proofs_valid, 0, sizeof(ulong)*(1UL + inclusion_proof_sz/ipfset_MAX) );
     208     1483062 :   return state;
     209     1483062 : }
     210             : 
     211             : 
     212             : /* bmtree_commit_append appends a range of leaf nodes.  Assumes that
     213             :    leaf_cnt + new_leaf_cnt << 2^63 (which, unless planning on running
     214             :    for millennia, is always true). */
     215             : 
     216             : fd_bmtree_commit_t *                                            /* Returns state */
     217             : fd_bmtree_commit_append( fd_bmtree_commit_t *                 state,           /* Assumed valid and in a calc */
     218             :                          fd_bmtree_node_t const * FD_RESTRICT new_leaf,        /* Indexed [0,new_leaf_cnt) */
     219     7580016 :                          ulong                                new_leaf_cnt ) {
     220     7580016 :   ulong                          leaf_cnt = state->leaf_cnt;
     221     7580016 :   fd_bmtree_node_t * FD_RESTRICT node_buf = state->node_buf;
     222             : 
     223   115122516 :   for( ulong new_leaf_idx=0UL; new_leaf_idx<new_leaf_cnt; new_leaf_idx++ ) {
     224             : 
     225             :     /* Accumulates a single leaf node into the tree.
     226             : 
     227             :        Maintains the invariant that the left node of the last node pair
     228             :        for each layer is copied to `state->node_buf`.
     229             : 
     230             :        This serves to allow the algorithm to derive a new parent branch
     231             :        node for any pair of children, once the (previously missing)
     232             :        right node becomes available. */
     233             : 
     234   107542500 :     fd_bmtree_node_t tmp[1];
     235   107542500 :     *tmp = new_leaf[ new_leaf_idx ];
     236             : 
     237             :     /* Walk the tree upwards from the bottom layer.
     238             : 
     239             :        `tmp` contains a previously missing right node which is used to
     240             :        derive a branch node, together with the previously buffered value
     241             :        in `node_buf`.
     242             : 
     243             :        Each iteration, merges that pair of nodes into a new branch node.
     244             :        Terminates if the new branch node is the left node of a pair. */
     245             : 
     246   107542500 :     ulong layer   = 0UL;           /* `layer` starts at 0 (leaf nodes) and increments each iteration. */
     247   107542500 :     ulong inc_idx = 2UL*leaf_cnt;  /* `inc_idx` is the index of the current node in the inclusion proof array */
     248   107542500 :     ulong cursor  = ++leaf_cnt;    /* `cursor` is the number of known nodes in the current layer. */
     249   212879703 :     while( !(cursor & 1UL) ) {     /* Continue while the right node in the last pair is available. */
     250   105337203 :       state->inclusion_proofs[ fd_ulong_min( inc_idx, state->inclusion_proof_sz ) ] = *tmp;
     251   105337203 :       fd_bmtree_private_merge( tmp, node_buf + layer, tmp, state->hash_sz, state->prefix_sz );
     252   105337203 :       inc_idx -= 1UL<<layer; layer++; cursor>>=1;      /* Move up one layer. */
     253   105337203 :     }
     254             : 
     255             :     /* Note on correctness of the above loop: The termination condition
     256             :        is that bit zero (LSB) of `cursor` is 1.  Because `cursor` shifts
     257             :        right every iteration, the loop terminates as long as any bit in
     258             :        `cursor` is set to 1. (i.e. `cursor!=0UL`) */
     259             : 
     260             :     /* Emplace left node (could be root node) into buffer.  FIXME:
     261             :        Consider computing this location upfront and doing this inplace
     262             :        instead of copying at end? (Probably a wash.) */
     263             : 
     264   107542500 :     node_buf[ layer ] = *tmp;
     265   107542500 :     state->inclusion_proofs[ fd_ulong_min( inc_idx, state->inclusion_proof_sz ) ] = *tmp;
     266   107542500 :   }
     267             : 
     268     7580016 :   state->leaf_cnt = leaf_cnt;
     269     7580016 :   return state;
     270     7580016 : }
     271             : 
     272             : /* bmtree_commit_fini seals the commitment calculation by deriving the
     273             :    root node.  Assumes state is valid, in calc on entry with at least
     274             :    one leaf in the tree.  The state will be valid but no longer in a
     275             :    calc on return.  Returns a pointer in the caller's address space to
     276             :    the first byte of a memory region of BMTREE_HASH_SZ with to the root
     277             :    hash on success.  The lifetime of the returned pointer is that of the
     278             :    state or until the memory used for state gets initialized for a new
     279             :    calc. */
     280             : 
     281             : uchar *
     282     1482066 : fd_bmtree_commit_fini( fd_bmtree_commit_t * state ) {
     283     1482066 :   ulong             leaf_cnt = state->leaf_cnt;
     284     1482066 :   fd_bmtree_node_t * node_buf = state->node_buf;
     285             : 
     286             :   /* Pointer to root node. */
     287     1482066 :   fd_bmtree_node_t * root = node_buf + (fd_bmtree_depth( leaf_cnt ) - 1UL);
     288             : 
     289             :   /* Further hashing required if leaf count is not a power of two. */
     290     1482066 :   if( FD_LIKELY( !fd_ulong_is_pow2( leaf_cnt ) ) ) {
     291             : 
     292             :     /* Start at the first layer where number of nodes is odd. */
     293      289236 :     ulong layer     = (ulong)fd_ulong_find_lsb( leaf_cnt );
     294      289236 :     ulong layer_cnt = leaf_cnt >> layer; /* number of nodes in this layer */
     295      289236 :     ulong inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     296             : 
     297             :     /* Allocate temporary node. */
     298      289236 :     fd_bmtree_node_t tmp[1];
     299      289236 :     *tmp = node_buf[layer];
     300             : 
     301             :     /* Ascend until we reach the root node.  Calculate branch nodes
     302             :        along the way.  We use the fd_ulong_if to encourage inlining of
     303             :        merge and unnecessary branch elimination by cmov. */
     304     1767048 :     while( layer_cnt>1UL ) {
     305     1477812 :       fd_bmtree_node_t const * tmp2 = fd_ptr_if( layer_cnt & 1UL, &tmp[0] /* 1 child */, node_buf+layer /* 2 children */ ); /* cmov */
     306     1477812 :       fd_bmtree_private_merge( tmp, tmp2, tmp, state->hash_sz, state->prefix_sz );
     307             : 
     308     1477812 :       layer++; layer_cnt = (layer_cnt+1UL) >> 1;
     309             : 
     310     1477812 :       inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     311     1477812 :       state->inclusion_proofs[ fd_ulong_min( inc_idx, state->inclusion_proof_sz ) ] = *tmp;
     312     1477812 :     }
     313             : 
     314             :     /* Fix up root node. */
     315      289236 :     *root = *tmp;
     316      289236 :   }
     317             : 
     318     1482066 :   return root->hash;
     319     1482066 : }
     320             : 
     321             : int
     322             : fd_bmtree_get_proof( fd_bmtree_commit_t * state,
     323             :                      uchar *              dest,
     324   101545275 :                      ulong                leaf_idx ) {
     325             : 
     326   101545275 :   ulong leaf_cnt = state->leaf_cnt;
     327   101545275 :   ulong hash_sz  = state->hash_sz;
     328             : 
     329   101545275 :   if( FD_UNLIKELY( leaf_idx >= leaf_cnt ) ) return 0UL;
     330             : 
     331   101545275 :   ulong inc_idx   = leaf_idx * 2UL;
     332   101545275 :   ulong layer     = 0UL;
     333   101545275 :   ulong layer_cnt = state->leaf_cnt;
     334             : 
     335   735067170 :   while( layer_cnt>1UL ) {
     336   633521895 :     ulong sibling_idx = inc_idx ^ (1UL<<(layer+1UL));
     337   633521895 :     ulong max_idx_for_layer = fd_ulong_insert_lsb( (leaf_cnt - 1UL)<<1, 1+(int)layer, (1UL<<layer)-1UL );
     338   633521895 :     sibling_idx = fd_ulong_if( sibling_idx>max_idx_for_layer, inc_idx /* Double link */, sibling_idx );
     339             : 
     340   633521895 :     if( FD_UNLIKELY( sibling_idx>=state->inclusion_proof_sz ) ) return -1;
     341   633521895 :     fd_memcpy( dest + layer*hash_sz, state->inclusion_proofs + sibling_idx, hash_sz );
     342             : 
     343   633521895 :     layer++; layer_cnt = (layer_cnt+1UL)>>1;
     344   633521895 :     inc_idx = fd_ulong_insert_lsb( inc_idx, (int)layer+1, (1UL<<layer)-1UL );
     345   633521895 :   }
     346             : 
     347   101545275 :   return (int)layer;
     348   101545275 : }
     349             : 
     350             : fd_bmtree_node_t *
     351             : fd_bmtree_from_proof( fd_bmtree_node_t const * leaf,
     352             :                                     ulong                    leaf_idx,
     353             :                                     fd_bmtree_node_t *       root,
     354             :                                     uchar const *            proof,
     355             :                                     ulong                    proof_depth,
     356             :                                     ulong                    hash_sz,
     357      395517 :                                     ulong                    prefix_sz ) {
     358      395517 :   fd_bmtree_node_t tmp[2]; /* 0 stores the generated node, 1 stores the node from the proof */
     359      395517 :   fd_bmtree_node_t * tmp_l;
     360      395517 :   fd_bmtree_node_t * tmp_r;
     361             : 
     362      395517 :   tmp[0] = *leaf;
     363             : 
     364      395517 :   if( FD_UNLIKELY( proof_depth < fd_bmtree_depth( leaf_idx+1UL )-1UL ) ) return NULL;
     365             : 
     366      394749 :   ulong inc_idx   = leaf_idx * 2UL;
     367     3420165 :   for( ulong layer=0UL; layer<proof_depth; layer++ ) {
     368     3025416 :     fd_memcpy( tmp+1, proof + layer*hash_sz, hash_sz );
     369             : 
     370     3025416 :     tmp_l = fd_ptr_if( 0UL==(inc_idx & (1UL<<(layer+1UL))), tmp+0, tmp+1 );
     371     3025416 :     tmp_r = fd_ptr_if( 0UL==(inc_idx & (1UL<<(layer+1UL))), tmp+1, tmp+0 );
     372             : 
     373     3025416 :     fd_bmtree_private_merge( tmp, tmp_l, tmp_r, hash_sz, prefix_sz );
     374             : 
     375     3025416 :     inc_idx = fd_ulong_insert_lsb( inc_idx, (int)layer+2, (2UL<<layer)-1UL );
     376     3025416 :   }
     377      394749 :   return fd_memcpy( root, tmp, 32UL );
     378      395517 : }
     379             : 
     380             : 
     381             : /* TODO: Make robust */
     382    13616769 : #define HAS(inc_idx) (ipfset_test( state->inclusion_proofs_valid[(inc_idx)/64UL], (inc_idx)%64UL ) )
     383             : 
     384             : int
     385             : fd_bmtree_commitp_insert_with_proof( fd_bmtree_commit_t *     state,
     386             :                                      ulong                    idx,
     387             :                                      fd_bmtree_node_t const * new_leaf,
     388             :                                      uchar            const * proof,
     389             :                                      ulong                    proof_depth,
     390     3301974 :                                      fd_bmtree_node_t       * opt_root ) {
     391     3301974 :   ulong inc_idx = 2UL * idx;
     392     3301974 :   ulong inclusion_proof_sz = state->inclusion_proof_sz;
     393     3301974 :   ulong hash_sz = state->hash_sz;
     394             : 
     395     3301974 :   if( FD_UNLIKELY( inc_idx >= inclusion_proof_sz ) ) return 0;
     396             :   /* We want to bail if proof_depth>=inclusion_proof_layer_cnt, but we
     397             :      only have inclusion_proof_size, which is
     398             :      (1<<inclusion_proof_layer_cnt)-1.  This is a monotonic increasing
     399             :      function for inclusion_proof_layer_cnt in [0, 63], so we just apply
     400             :      it to both sides of the inequality. */
     401     3301974 :   if( FD_UNLIKELY( (proof_depth>63UL) | (((1UL<<proof_depth)-1UL)>=inclusion_proof_sz) ) ) return 0;
     402             : 
     403     3301974 :   state->node_buf[ 0 ] = *new_leaf;
     404             : 
     405     3301974 :   ulong layer=0UL;
     406     4076766 :   for( ; layer<proof_depth; layer++ ) {
     407      972165 :     ulong sibling_idx = inc_idx ^ (2UL<<layer);
     408      972165 :     if( FD_UNLIKELY( HAS(sibling_idx) && !fd_memeq( proof+hash_sz*layer, state->inclusion_proofs[sibling_idx].hash, hash_sz ) ) )
     409       98685 :       return 0;
     410      873480 :     if( FD_UNLIKELY( HAS(inc_idx) && !fd_memeq( state->node_buf[layer].hash, state->inclusion_proofs[ inc_idx ].hash, hash_sz ) ) )
     411       98688 :       return 0;
     412             : 
     413      774792 :     ulong parent_idx = fd_ulong_insert_lsb( inc_idx, (int)layer+2, (2UL<<layer)-1UL );
     414             : 
     415      774792 :     if( HAS(sibling_idx) & HAS(inc_idx) ) state->node_buf[ layer+1UL ] = state->inclusion_proofs[ parent_idx ];
     416      104121 :     else {
     417      104121 :       fd_bmtree_node_t sibling;
     418      104121 :       fd_memcpy( sibling.hash, proof+hash_sz*layer, hash_sz );
     419             : 
     420      104121 :       fd_bmtree_node_t * tmp_l = fd_ptr_if( 0UL==(inc_idx & (2UL<<layer)), state->node_buf+layer, &sibling );
     421      104121 :       fd_bmtree_node_t * tmp_r = fd_ptr_if( 0UL==(inc_idx & (2UL<<layer)), &sibling, state->node_buf+layer );
     422             : 
     423      104121 :       fd_bmtree_private_merge( state->node_buf+layer+1UL, tmp_l, tmp_r, state->hash_sz, state->prefix_sz );
     424      104121 :     }
     425             : 
     426      774792 :     inc_idx = parent_idx;
     427      774792 :   }
     428             : 
     429     6107082 :   for( ; layer<63UL; layer++ ) {
     430     6107082 :     if( (inc_idx|(2UL<<layer)) >= inclusion_proof_sz    ) break; /* Sibling out of bounds => At root */
     431     6033156 :     if( HAS( inc_idx ) | !HAS( inc_idx ^ (2UL<<layer) ) ) break; /* Not able to derive any more */
     432             : 
     433     3002481 :     fd_bmtree_node_t * sibling = state->inclusion_proofs + (inc_idx ^ (2UL<<layer));
     434     3002481 :     fd_bmtree_node_t * tmp_l = fd_ptr_if( 0UL==(inc_idx & (2UL<<layer)), state->node_buf+layer, sibling );
     435     3002481 :     fd_bmtree_node_t * tmp_r = fd_ptr_if( 0UL==(inc_idx & (2UL<<layer)), sibling, state->node_buf+layer );
     436     3002481 :     fd_bmtree_private_merge( state->node_buf+layer+1UL, tmp_l, tmp_r, state->hash_sz, state->prefix_sz );
     437             : 
     438     3002481 :     inc_idx = fd_ulong_insert_lsb( inc_idx, (int)layer+2, (2UL<<layer)-1UL );
     439     3002481 :   }
     440             :   /* TODO: Prove inc_idx < inclusion_proof_sz at this point */
     441     3104601 :   if( FD_UNLIKELY( HAS(inc_idx) &&
     442     3104601 :         !fd_memeq( state->node_buf[layer].hash, state->inclusion_proofs[ inc_idx ].hash, state->hash_sz ) ) )
     443           3 :     return 0;
     444             : 
     445             :   /* Cache the nodes from the main branch */
     446     3104598 :   inc_idx = 2UL * idx;
     447     9986469 :   for( ulong i=0UL; i<=layer; i++ ) {
     448     6881871 :     state->inclusion_proofs[ inc_idx ] = state->node_buf[ i ];
     449     6881871 :     state->inclusion_proofs_valid[inc_idx/64UL] |= ipfset_ele( inc_idx%64UL );
     450     6881871 :     inc_idx = fd_ulong_insert_lsb( inc_idx, (int)i+2, (2UL<<i)-1UL );
     451     6881871 :   }
     452             : 
     453             :   /* Cache the inclusion proof */
     454     3104598 :   inc_idx = 2UL * idx;
     455     3879390 :   for( ulong i=0UL; i<proof_depth; i++ ) {
     456      774792 :     ulong sibling_idx = inc_idx ^ (2UL<<i);
     457      774792 :     fd_memcpy( state->inclusion_proofs[ sibling_idx ].hash, proof+hash_sz*i, hash_sz );
     458      774792 :     state->inclusion_proofs_valid[sibling_idx/64UL] |= ipfset_ele( sibling_idx%64UL );
     459      774792 :     inc_idx = fd_ulong_insert_lsb( inc_idx, (int)i+2, (2UL<<i)-1UL );
     460      774792 :   }
     461             : 
     462     3104598 :   if( FD_UNLIKELY( opt_root != NULL ) ) *opt_root = state->node_buf[ layer ];
     463             : 
     464     3104598 :   return 1;
     465     3104601 : }
     466             : 
     467             : uchar *
     468         873 : fd_bmtree_commitp_fini( fd_bmtree_commit_t * state, ulong leaf_cnt ) {
     469         873 :   ulong inclusion_proof_sz = state->inclusion_proof_sz;
     470         873 :   ulong hash_sz = state->hash_sz;
     471         873 :   fd_bmtree_node_t * node_buf = state->node_buf;
     472             : 
     473         873 :   if( FD_UNLIKELY( leaf_cnt==0UL ) ) return NULL;
     474             : 
     475             :   /* Further hashing required if leaf count is not a power of two. */
     476         873 :   if( FD_LIKELY( !fd_ulong_is_pow2( leaf_cnt ) ) ) {
     477             : 
     478             :     /* Start at the first layer where number of nodes is odd. */
     479         771 :     ulong layer     = (ulong)fd_ulong_find_lsb( leaf_cnt );
     480         771 :     ulong layer_cnt = leaf_cnt >> layer; /* number of nodes in this layer */
     481         771 :     ulong inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     482             : 
     483             :     /* When you go up and left in the tree, the index decreases.  If you
     484             :        are the left child of the parent (the only way you can go up and
     485             :        right), then bit 1<<(l+1) is unset, and going up and right will
     486             :        not change that.  This means that if you start at a leaf node in
     487             :        the right half of the tree (which is always the case for the last
     488             :        leaf node), then going up will never go past the next power of 2
     489             :        beyond the current one.  Since inclusion_proof_sz is a power of
     490             :        2, that means it suffices to check this once and not every time
     491             :        we go up the tree. */
     492             :     /* TODO: Make this argument more formal */
     493         771 :     if( FD_UNLIKELY( inc_idx >= inclusion_proof_sz ) ) return NULL;
     494             : 
     495         771 :     if( FD_UNLIKELY( !HAS(inc_idx) ) ) return NULL;
     496         771 :     node_buf[layer] = state->inclusion_proofs[inc_idx];
     497             : 
     498             :     /* Ascend until we reach the root node.  Calculate branch nodes
     499             :        along the way.  We use the fd_ulong_if to encourage inlining of
     500             :        merge and unnecessary branch elimination by cmov. */
     501        5526 :     while( layer_cnt>1UL ) {
     502             :       /* If this is a 2-child parent, make sure we have the sibling. */
     503        4755 :       if( FD_UNLIKELY( !(layer_cnt&1UL) & !HAS(inc_idx^(2UL<<layer)) ) ) return NULL;
     504             : 
     505        4755 :       fd_bmtree_node_t const * tmp_l = fd_ptr_if( layer_cnt & 1UL, node_buf+layer /* 1 child */, state->inclusion_proofs + (inc_idx^(2UL<<layer))/* 2 children */ ); /* cmov */
     506             : 
     507        4755 :       fd_bmtree_private_merge( node_buf+layer+1UL, tmp_l, node_buf+layer, hash_sz, state->prefix_sz );
     508             : 
     509        4755 :       layer++; layer_cnt = (layer_cnt+1UL) >> 1;
     510             : 
     511        4755 :       inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     512             : 
     513        4755 :       if( FD_UNLIKELY( HAS( inc_idx ) && !fd_memeq( node_buf[layer].hash, state->inclusion_proofs[inc_idx].hash, hash_sz ) ) )
     514           0 :         return NULL;
     515        4755 :     }
     516             : 
     517             :     /* Cache that path */
     518         771 :     layer     = (ulong)fd_ulong_find_lsb( leaf_cnt );
     519         771 :     layer_cnt = leaf_cnt >> layer; /* number of nodes in this layer */
     520         771 :     inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     521        5526 :     while( layer_cnt>1UL ) {
     522        4755 :       layer++; layer_cnt = (layer_cnt+1UL) >> 1;
     523        4755 :       inc_idx   = (layer_cnt<<(layer+1UL)) - (1UL<<layer) - 1UL;
     524             : 
     525        4755 :       state->inclusion_proofs[inc_idx] = node_buf[layer];
     526        4755 :       state->inclusion_proofs_valid[inc_idx/64UL] |= ipfset_ele( inc_idx%64UL );
     527        4755 :     }
     528         771 :   }
     529             : 
     530             :   /* Now check to make sure we have all the nodes we should */
     531         873 :   ulong root_idx = fd_ulong_pow2_up( leaf_cnt ) - 1UL;
     532             :   /* We should definitely have all nodes <= root_idx */
     533         873 :   ulong i=0UL;
     534       52134 :   for( ; i<(root_idx+1UL)/64UL; i++ ) if( FD_UNLIKELY( !ipfset_is_full( state->inclusion_proofs_valid[i] ) ) ) return NULL;
     535             : 
     536        6876 :   for( ulong layer=0UL; (1UL<<layer)-1UL < root_idx; layer++ ) {
     537             :     /* Loop over indices s.t. 64*( (root_idx+1)/64 ) <= index <=  that match the bit
     538             :        pattern 01..1 with `layer` 1s */
     539        6003 :     ulong min_idx_for_layer = fd_ulong_insert_lsb( 64UL*((root_idx+1UL)/64UL), 1+(int)layer, (1UL<<layer)-1UL );
     540        6003 :     ulong max_idx_for_layer = fd_ulong_insert_lsb( (leaf_cnt - 1UL)<<1,        1+(int)layer, (1UL<<layer)-1UL );
     541     2935467 :     for( ulong inc_idx=min_idx_for_layer; inc_idx<=max_idx_for_layer; inc_idx += 2UL<<layer ) {
     542     2929464 :       if( FD_UNLIKELY( !HAS(inc_idx) ) ) return NULL;
     543     2929464 :     }
     544        6003 :   }
     545             :   /* If the root idx is less than 63, the previous loop doesn't check
     546             :      it. */
     547         873 :   if( !HAS( root_idx ) ) return NULL;
     548             : 
     549         873 :   state->leaf_cnt = leaf_cnt;
     550         873 :   return state->inclusion_proofs[root_idx].hash;
     551         873 : }

Generated by: LCOV version 1.14