Line data Source code
1 : #define FD_SHA512_BATCH_IMPL 2
2 :
3 : #include "fd_sha512.h"
4 : #include "../../util/simd/fd_avx512.h"
5 :
6 : FD_STATIC_ASSERT( FD_SHA512_BATCH_MAX==8UL, compat );
7 :
8 : void
9 : fd_sha512_private_batch_avx( ulong batch_cnt,
10 : void const * batch_data,
11 : ulong const * batch_sz,
12 : void * const * batch_hash );
13 :
14 : void
15 : fd_sha512_private_batch_avx512( ulong batch_cnt,
16 : void const * _batch_data,
17 : ulong const * batch_sz,
18 1137255 : void * const * _batch_hash ) {
19 :
20 1137255 : if( FD_UNLIKELY( batch_cnt<5UL ) ) {
21 303679 : fd_sha512_private_batch_avx( batch_cnt, _batch_data, batch_sz, _batch_hash );
22 303679 : return;
23 303679 : }
24 :
25 : /* SHA appends to the end of each message 17 bytes of additional data
26 : (a messaging terminator byte and the big endian uint128 with the
27 : message size in bits) and enough zero padding to make the message
28 : an integer number of blocks long. We compute the 1 or 2 tail
29 : blocks of each message here. We then process complete blocks of
30 : the original messages in place, switching to processing these tail
31 : blocks in the same pass toward the end. TODO: This code could
32 : probably be SIMD optimized slightly more (this is where all the
33 : really performance suboptimally designed parts of SHA live so it is
34 : just inherently gross). The main optimization would probably be to
35 : allow tail reading to use a faster memcpy and then maybe some
36 : vectorization of the bswap. */
37 :
38 833576 : ulong const * batch_data = (ulong const *)_batch_data;
39 :
40 833576 : ulong batch_tail_data[ FD_SHA512_BATCH_MAX ] __attribute__((aligned(64)));
41 833576 : ulong batch_tail_rem [ FD_SHA512_BATCH_MAX ] __attribute__((aligned(64)));
42 :
43 833576 : uchar scratch[ FD_SHA512_BATCH_MAX*2UL*FD_SHA512_PRIVATE_BUF_MAX ] __attribute__((aligned(128)));
44 833576 : do {
45 833576 : ulong scratch_free = (ulong)scratch;
46 :
47 833576 : wwv_t zero = wwv_zero();
48 :
49 7046678 : for( ulong batch_idx=0UL; batch_idx<batch_cnt; batch_idx++ ) {
50 :
51 : /* Allocate the tail blocks for this message */
52 :
53 6213102 : ulong data = batch_data[ batch_idx ];
54 6213102 : ulong sz = batch_sz [ batch_idx ];
55 :
56 6213102 : ulong tail_data = scratch_free;
57 6213102 : ulong tail_data_sz = sz & (FD_SHA512_PRIVATE_BUF_MAX-1UL);
58 6213102 : ulong tail_data_off = fd_ulong_align_dn( sz, FD_SHA512_PRIVATE_BUF_MAX );
59 6213102 : ulong tail_sz = fd_ulong_align_up( tail_data_sz+17UL, FD_SHA512_PRIVATE_BUF_MAX );
60 :
61 6213102 : batch_tail_data[ batch_idx ] = tail_data;
62 6213102 : batch_tail_rem [ batch_idx ] = tail_sz >> FD_SHA512_PRIVATE_LG_BUF_MAX;
63 :
64 6213102 : scratch_free += tail_sz;
65 :
66 : /* Populate the tail blocks. We first clear the blocks (note that
67 : it is okay to clobber bytes 128:255 if tail_sz only 128, saving
68 : a nasty branch). Then we copy any straggler data bytes into
69 : the tail, terminate the message, and finally record the size of
70 : the message in bits at the end as a big endian ulong. */
71 :
72 6213102 : wwv_st( (ulong *) tail_data, zero );
73 6213102 : wwv_st( (ulong *)(tail_data+ 64UL), zero );
74 6213102 : wwv_st( (ulong *)(tail_data+128UL), zero );
75 6213102 : wwv_st( (ulong *)(tail_data+192UL), zero );
76 :
77 6213102 : # if 1 /* See notes in fd_sha256_batch_avx.c for more details here */
78 6213102 : ulong src = data + tail_data_off;
79 6213102 : ulong dst = tail_data;
80 6213102 : ulong rem = tail_data_sz;
81 9269790 : while( rem>=64UL ) { wwv_st( (ulong *)dst, wwv_ldu( (ulong const *)src ) ); dst += 64UL; src += 64UL; rem -= 64UL; }
82 11614765 : while( rem>= 8UL ) { *(ulong *)dst = FD_LOAD( ulong, src ); dst += 8UL; src += 8UL; rem -= 8UL; }
83 6213102 : if ( rem>= 4UL ) { *(uint *)dst = FD_LOAD( uint, src ); dst += 4UL; src += 4UL; rem -= 4UL; }
84 6213102 : if ( rem>= 2UL ) { *(ushort *)dst = FD_LOAD( ushort, src ); dst += 2UL; src += 2UL; rem -= 2UL; }
85 6213102 : if ( rem ) { *(uchar *)dst = FD_LOAD( uchar, src ); dst++; }
86 6213102 : *(uchar *)dst = (uchar)0x80;
87 : # else
88 : fd_memcpy( (void *)tail_data, (void const *)(data + tail_data_off), tail_data_sz );
89 : *((uchar *)(tail_data+tail_data_sz)) = (uchar)0x80;
90 : # endif
91 :
92 6213102 : *((ulong *)(tail_data+tail_sz-16UL )) = fd_ulong_bswap( sz>>61 );
93 6213102 : *((ulong *)(tail_data+tail_sz- 8UL )) = fd_ulong_bswap( sz<< 3 );
94 6213102 : }
95 833576 : } while(0);
96 :
97 833576 : wwv_t s0 = wwv_bcast( 0x6a09e667f3bcc908UL );
98 833576 : wwv_t s1 = wwv_bcast( 0xbb67ae8584caa73bUL );
99 833576 : wwv_t s2 = wwv_bcast( 0x3c6ef372fe94f82bUL );
100 833576 : wwv_t s3 = wwv_bcast( 0xa54ff53a5f1d36f1UL );
101 833576 : wwv_t s4 = wwv_bcast( 0x510e527fade682d1UL );
102 833576 : wwv_t s5 = wwv_bcast( 0x9b05688c2b3e6c1fUL );
103 833576 : wwv_t s6 = wwv_bcast( 0x1f83d9abfb41bd6bUL );
104 833576 : wwv_t s7 = wwv_bcast( 0x5be0cd19137e2179UL );
105 :
106 833576 : wwv_t zero = wwv_zero();
107 833576 : wwv_t one = wwv_one();
108 833576 : wwv_t wwv_128 = wwv_bcast( FD_SHA512_PRIVATE_BUF_MAX );
109 833576 : wwv_t W_sentinel = wwv_bcast( (ulong)scratch );
110 :
111 833576 : wwv_t tail = wwv_ld( batch_tail_data );
112 833576 : wwv_t tail_rem = wwv_ld( batch_tail_rem );
113 833576 : wwv_t W = wwv_ld( batch_data );
114 833576 : wwv_t block_rem = wwv_if( (1<<batch_cnt)-1,
115 833576 : wwv_add( wwv_shr( wwv_ld( batch_sz ), FD_SHA512_PRIVATE_LG_BUF_MAX ), tail_rem ), zero );
116 5896745 : for(;;) {
117 5896745 : int active_lane = wwv_ne( block_rem, zero );
118 5896745 : if( FD_UNLIKELY( !active_lane ) ) break;
119 :
120 : /* Switch lanes that have hit the end of their in-place bulk
121 : processing to their out-of-place scratch tail regions as
122 : necessary. */
123 :
124 5063169 : W = wwv_if( wwv_eq( block_rem, tail_rem ), tail, W );
125 :
126 : /* At this point, we have at least 1 block in this message segment
127 : pass that has not been processed. Load the next 128 bytes of
128 : each unprocessed block. Inactive lanes (e.g. message segments
129 : in this pass for which we've already processed all the blocks)
130 : will load garbage from a sentinel location (and the result of
131 : the state computations for the inactive lane will be ignored). */
132 :
133 5063169 : ulong _W0; ulong _W1; ulong _W2; ulong _W3; ulong _W4; ulong _W5; ulong _W6; ulong _W7;
134 5063169 : wwv_unpack( wwv_if( active_lane, W, W_sentinel ), _W0,_W1,_W2,_W3,_W4,_W5,_W6,_W7 );
135 5063169 : uchar const * W0 = (uchar const *)_W0; uchar const * W1 = (uchar const *)_W1;
136 5063169 : uchar const * W2 = (uchar const *)_W2; uchar const * W3 = (uchar const *)_W3;
137 5063169 : uchar const * W4 = (uchar const *)_W4; uchar const * W5 = (uchar const *)_W5;
138 5063169 : uchar const * W6 = (uchar const *)_W6; uchar const * W7 = (uchar const *)_W7;
139 :
140 5063169 : wwv_t x0; wwv_t x1; wwv_t x2; wwv_t x3; wwv_t x4; wwv_t x5; wwv_t x6; wwv_t x7;
141 5063169 : wwv_transpose_8x8( wwv_bswap( wwv_ldu( W0 ) ), wwv_bswap( wwv_ldu( W1 ) ),
142 5063169 : wwv_bswap( wwv_ldu( W2 ) ), wwv_bswap( wwv_ldu( W3 ) ),
143 5063169 : wwv_bswap( wwv_ldu( W4 ) ), wwv_bswap( wwv_ldu( W5 ) ),
144 5063169 : wwv_bswap( wwv_ldu( W6 ) ), wwv_bswap( wwv_ldu( W7 ) ), x0, x1, x2, x3, x4, x5, x6, x7 );
145 :
146 5063169 : wwv_t x8; wwv_t x9; wwv_t xa; wwv_t xb; wwv_t xc; wwv_t xd; wwv_t xe; wwv_t xf;
147 5063169 : wwv_transpose_8x8( wwv_bswap( wwv_ldu( W0+64 ) ), wwv_bswap( wwv_ldu( W1+64 ) ),
148 5063169 : wwv_bswap( wwv_ldu( W2+64 ) ), wwv_bswap( wwv_ldu( W3+64 ) ),
149 5063169 : wwv_bswap( wwv_ldu( W4+64 ) ), wwv_bswap( wwv_ldu( W5+64 ) ),
150 5063169 : wwv_bswap( wwv_ldu( W6+64 ) ), wwv_bswap( wwv_ldu( W7+64 ) ), x8, x9, xa, xb, xc, xd, xe, xf );
151 :
152 : /* Compute the SHA-512 state updates */
153 :
154 5063169 : wwv_t a = s0; wwv_t b = s1; wwv_t c = s2; wwv_t d = s3; wwv_t e = s4; wwv_t f = s5; wwv_t g = s6; wwv_t h = s7;
155 :
156 5063169 : static ulong const K[80] = { /* FIXME: Reuse with other functions */
157 5063169 : 0x428a2f98d728ae22UL, 0x7137449123ef65cdUL, 0xb5c0fbcfec4d3b2fUL, 0xe9b5dba58189dbbcUL,
158 5063169 : 0x3956c25bf348b538UL, 0x59f111f1b605d019UL, 0x923f82a4af194f9bUL, 0xab1c5ed5da6d8118UL,
159 5063169 : 0xd807aa98a3030242UL, 0x12835b0145706fbeUL, 0x243185be4ee4b28cUL, 0x550c7dc3d5ffb4e2UL,
160 5063169 : 0x72be5d74f27b896fUL, 0x80deb1fe3b1696b1UL, 0x9bdc06a725c71235UL, 0xc19bf174cf692694UL,
161 5063169 : 0xe49b69c19ef14ad2UL, 0xefbe4786384f25e3UL, 0x0fc19dc68b8cd5b5UL, 0x240ca1cc77ac9c65UL,
162 5063169 : 0x2de92c6f592b0275UL, 0x4a7484aa6ea6e483UL, 0x5cb0a9dcbd41fbd4UL, 0x76f988da831153b5UL,
163 5063169 : 0x983e5152ee66dfabUL, 0xa831c66d2db43210UL, 0xb00327c898fb213fUL, 0xbf597fc7beef0ee4UL,
164 5063169 : 0xc6e00bf33da88fc2UL, 0xd5a79147930aa725UL, 0x06ca6351e003826fUL, 0x142929670a0e6e70UL,
165 5063169 : 0x27b70a8546d22ffcUL, 0x2e1b21385c26c926UL, 0x4d2c6dfc5ac42aedUL, 0x53380d139d95b3dfUL,
166 5063169 : 0x650a73548baf63deUL, 0x766a0abb3c77b2a8UL, 0x81c2c92e47edaee6UL, 0x92722c851482353bUL,
167 5063169 : 0xa2bfe8a14cf10364UL, 0xa81a664bbc423001UL, 0xc24b8b70d0f89791UL, 0xc76c51a30654be30UL,
168 5063169 : 0xd192e819d6ef5218UL, 0xd69906245565a910UL, 0xf40e35855771202aUL, 0x106aa07032bbd1b8UL,
169 5063169 : 0x19a4c116b8d2d0c8UL, 0x1e376c085141ab53UL, 0x2748774cdf8eeb99UL, 0x34b0bcb5e19b48a8UL,
170 5063169 : 0x391c0cb3c5c95a63UL, 0x4ed8aa4ae3418acbUL, 0x5b9cca4f7763e373UL, 0x682e6ff3d6b2b8a3UL,
171 5063169 : 0x748f82ee5defb2fcUL, 0x78a5636f43172f60UL, 0x84c87814a1f0ab72UL, 0x8cc702081a6439ecUL,
172 5063169 : 0x90befffa23631e28UL, 0xa4506cebde82bde9UL, 0xbef9a3f7b2c67915UL, 0xc67178f2e372532bUL,
173 5063169 : 0xca273eceea26619cUL, 0xd186b8c721c0c207UL, 0xeada7dd6cde0eb1eUL, 0xf57d4f7fee6ed178UL,
174 5063169 : 0x06f067aa72176fbaUL, 0x0a637dc5a2c898a6UL, 0x113f9804bef90daeUL, 0x1b710b35131c471bUL,
175 5063169 : 0x28db77f523047d84UL, 0x32caab7b40c72493UL, 0x3c9ebe0a15c9bebcUL, 0x431d67c49c100d4cUL,
176 5063169 : 0x4cc5d4becb3e42b6UL, 0x597f299cfc657e2aUL, 0x5fcb6fab3ad6faecUL, 0x6c44198c4a475817UL
177 5063169 : };
178 :
179 5063169 : # define Sigma0(x) wwv_xor( wwv_ror(x,28), wwv_xor( wwv_ror(x,34), wwv_ror(x,39) ) )
180 5063169 : # define Sigma1(x) wwv_xor( wwv_ror(x,14), wwv_xor( wwv_ror(x,18), wwv_ror(x,41) ) )
181 5063169 : # define sigma0(x) wwv_xor( wwv_ror(x, 1), wwv_xor( wwv_ror(x, 8), wwv_shr(x, 7) ) )
182 5063169 : # define sigma1(x) wwv_xor( wwv_ror(x,19), wwv_xor( wwv_ror(x,61), wwv_shr(x, 6) ) )
183 5063169 : # define Ch(x,y,z) wwv_xor( wwv_and(x,y), wwv_andnot(x,z) )
184 5063169 : # define Maj(x,y,z) wwv_xor( wwv_and(x,y), wwv_xor( wwv_and(x,z), wwv_and(y,z) ) )
185 5063169 : # define SHA_CORE(xi,ki) \
186 405053520 : T1 = wwv_add( wwv_add(xi,ki), wwv_add( wwv_add( h, Sigma1(e) ), Ch(e, f, g) ) ); \
187 405053520 : T2 = wwv_add( Sigma0(a), Maj(a, b, c) ); \
188 405053520 : h = g; \
189 405053520 : g = f; \
190 405053520 : f = e; \
191 405053520 : e = wwv_add( d, T1 ); \
192 405053520 : d = c; \
193 405053520 : c = b; \
194 405053520 : b = a; \
195 405053520 : a = wwv_add( T1, T2 )
196 :
197 5063169 : wwv_t T1;
198 5063169 : wwv_t T2;
199 :
200 5063169 : SHA_CORE( x0, wwv_bcast( K[ 0] ) );
201 5063169 : SHA_CORE( x1, wwv_bcast( K[ 1] ) );
202 5063169 : SHA_CORE( x2, wwv_bcast( K[ 2] ) );
203 5063169 : SHA_CORE( x3, wwv_bcast( K[ 3] ) );
204 5063169 : SHA_CORE( x4, wwv_bcast( K[ 4] ) );
205 5063169 : SHA_CORE( x5, wwv_bcast( K[ 5] ) );
206 5063169 : SHA_CORE( x6, wwv_bcast( K[ 6] ) );
207 5063169 : SHA_CORE( x7, wwv_bcast( K[ 7] ) );
208 5063169 : SHA_CORE( x8, wwv_bcast( K[ 8] ) );
209 5063169 : SHA_CORE( x9, wwv_bcast( K[ 9] ) );
210 5063169 : SHA_CORE( xa, wwv_bcast( K[10] ) );
211 5063169 : SHA_CORE( xb, wwv_bcast( K[11] ) );
212 5063169 : SHA_CORE( xc, wwv_bcast( K[12] ) );
213 5063169 : SHA_CORE( xd, wwv_bcast( K[13] ) );
214 5063169 : SHA_CORE( xe, wwv_bcast( K[14] ) );
215 5063169 : SHA_CORE( xf, wwv_bcast( K[15] ) );
216 25315845 : for( ulong i=16UL; i<80UL; i+=16UL ) {
217 20252676 : x0 = wwv_add( wwv_add( x0, sigma0(x1) ), wwv_add( sigma1(xe), x9 ) ); SHA_CORE( x0, wwv_bcast( K[i ] ) );
218 20252676 : x1 = wwv_add( wwv_add( x1, sigma0(x2) ), wwv_add( sigma1(xf), xa ) ); SHA_CORE( x1, wwv_bcast( K[i+ 1UL] ) );
219 20252676 : x2 = wwv_add( wwv_add( x2, sigma0(x3) ), wwv_add( sigma1(x0), xb ) ); SHA_CORE( x2, wwv_bcast( K[i+ 2UL] ) );
220 20252676 : x3 = wwv_add( wwv_add( x3, sigma0(x4) ), wwv_add( sigma1(x1), xc ) ); SHA_CORE( x3, wwv_bcast( K[i+ 3UL] ) );
221 20252676 : x4 = wwv_add( wwv_add( x4, sigma0(x5) ), wwv_add( sigma1(x2), xd ) ); SHA_CORE( x4, wwv_bcast( K[i+ 4UL] ) );
222 20252676 : x5 = wwv_add( wwv_add( x5, sigma0(x6) ), wwv_add( sigma1(x3), xe ) ); SHA_CORE( x5, wwv_bcast( K[i+ 5UL] ) );
223 20252676 : x6 = wwv_add( wwv_add( x6, sigma0(x7) ), wwv_add( sigma1(x4), xf ) ); SHA_CORE( x6, wwv_bcast( K[i+ 6UL] ) );
224 20252676 : x7 = wwv_add( wwv_add( x7, sigma0(x8) ), wwv_add( sigma1(x5), x0 ) ); SHA_CORE( x7, wwv_bcast( K[i+ 7UL] ) );
225 20252676 : x8 = wwv_add( wwv_add( x8, sigma0(x9) ), wwv_add( sigma1(x6), x1 ) ); SHA_CORE( x8, wwv_bcast( K[i+ 8UL] ) );
226 20252676 : x9 = wwv_add( wwv_add( x9, sigma0(xa) ), wwv_add( sigma1(x7), x2 ) ); SHA_CORE( x9, wwv_bcast( K[i+ 9UL] ) );
227 20252676 : xa = wwv_add( wwv_add( xa, sigma0(xb) ), wwv_add( sigma1(x8), x3 ) ); SHA_CORE( xa, wwv_bcast( K[i+10UL] ) );
228 20252676 : xb = wwv_add( wwv_add( xb, sigma0(xc) ), wwv_add( sigma1(x9), x4 ) ); SHA_CORE( xb, wwv_bcast( K[i+11UL] ) );
229 20252676 : xc = wwv_add( wwv_add( xc, sigma0(xd) ), wwv_add( sigma1(xa), x5 ) ); SHA_CORE( xc, wwv_bcast( K[i+12UL] ) );
230 20252676 : xd = wwv_add( wwv_add( xd, sigma0(xe) ), wwv_add( sigma1(xb), x6 ) ); SHA_CORE( xd, wwv_bcast( K[i+13UL] ) );
231 20252676 : xe = wwv_add( wwv_add( xe, sigma0(xf) ), wwv_add( sigma1(xc), x7 ) ); SHA_CORE( xe, wwv_bcast( K[i+14UL] ) );
232 20252676 : xf = wwv_add( wwv_add( xf, sigma0(x0) ), wwv_add( sigma1(xd), x8 ) ); SHA_CORE( xf, wwv_bcast( K[i+15UL] ) );
233 20252676 : }
234 :
235 : /* Apply the state updates to the active lanes */
236 :
237 5063169 : s0 = wwv_add_if( active_lane, s0, a, s0 );
238 5063169 : s1 = wwv_add_if( active_lane, s1, b, s1 );
239 5063169 : s2 = wwv_add_if( active_lane, s2, c, s2 );
240 5063169 : s3 = wwv_add_if( active_lane, s3, d, s3 );
241 5063169 : s4 = wwv_add_if( active_lane, s4, e, s4 );
242 5063169 : s5 = wwv_add_if( active_lane, s5, f, s5 );
243 5063169 : s6 = wwv_add_if( active_lane, s6, g, s6 );
244 5063169 : s7 = wwv_add_if( active_lane, s7, h, s7 );
245 :
246 : /* Advance to the next message segment blocks. In pseudo code,
247 : the below is:
248 :
249 : W += 128; if( block_rem ) block_rem--;
250 :
251 : Since we do not load anything at W(lane) above unless
252 : block_rem(lane) is non-zero, we can omit vector conditional
253 : operations for W(lane) below. */
254 :
255 5063169 : W = wwv_add( W, wwv_128 );
256 5063169 : block_rem = wwv_sub_if( active_lane, block_rem, one, block_rem );
257 :
258 5063169 : # undef SHA_CORE
259 5063169 : # undef Sigma0
260 5063169 : # undef Sigma1
261 5063169 : # undef sigma0
262 5063169 : # undef sigma1
263 5063169 : # undef Ch
264 5063169 : # undef Maj
265 :
266 5063169 : }
267 :
268 833576 : wwv_transpose_8x8( s0,s1,s2,s3,s4,s5,s6,s7, s0,s1,s2,s3,s4,s5,s6,s7 );
269 :
270 833576 : ulong * const * batch_hash = (ulong * const *)_batch_hash;
271 833576 : switch( batch_cnt ) { /* application dependent prob */
272 605937 : case 8UL: wwv_stu( batch_hash[7], wwv_bswap( s7 ) ); __attribute__((fallthrough));
273 681753 : case 7UL: wwv_stu( batch_hash[6], wwv_bswap( s6 ) ); __attribute__((fallthrough));
274 757532 : case 6UL: wwv_stu( batch_hash[5], wwv_bswap( s5 ) ); __attribute__((fallthrough));
275 833576 : case 5UL: wwv_stu( batch_hash[4], wwv_bswap( s4 ) ); __attribute__((fallthrough));
276 833576 : case 4UL: wwv_stu( batch_hash[3], wwv_bswap( s3 ) ); __attribute__((fallthrough));
277 833576 : case 3UL: wwv_stu( batch_hash[2], wwv_bswap( s2 ) ); __attribute__((fallthrough));
278 833576 : case 2UL: wwv_stu( batch_hash[1], wwv_bswap( s1 ) ); __attribute__((fallthrough));
279 833576 : case 1UL: wwv_stu( batch_hash[0], wwv_bswap( s0 ) ); __attribute__((fallthrough));
280 833576 : default: break;
281 833576 : }
282 833576 : }
|