Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_16( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 12 : uchar const * erased ) {
11 12 : uchar _erased[ 16 ] W_ATTR;
12 12 : uchar pi[ 16 ] W_ATTR;
13 12 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 12 : ulong loaded_cnt = 0UL;
15 204 : for( ulong i=0UL; i<16UL; i++) {
16 192 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 192 : _erased[ i ] = !load_shred;
18 192 : loaded_cnt += (ulong)load_shred;
19 192 : }
20 12 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 12 : fd_reedsol_private_gen_pi_16( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 12 : gf_t diff = gf_zero();
27 :
28 390 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 378 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 378 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 378 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 378 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 378 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 378 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 378 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 378 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 378 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 378 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 378 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 378 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 378 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 378 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 378 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 378 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 : /* Technically, we only need to multiply the non-erased ones, since
48 : the erased ones are 0, but we know at least half of them are
49 : non-erased, and the branch is going to be just as costly as the
50 : multiply. */
51 378 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
52 378 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
53 378 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
54 378 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
55 378 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
56 378 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
57 378 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
58 378 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
59 378 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
60 378 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
61 378 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
62 378 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
63 378 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
64 378 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
65 378 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
66 378 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
67 378 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15
68 :
69 378 : FD_REEDSOL_GENERATE_IFFT( 16, 0, ALL_VARS );
70 :
71 378 : FD_REEDSOL_GENERATE_FDERIV( 16, ALL_VARS );
72 :
73 378 : FD_REEDSOL_GENERATE_FFT( 16, 0, ALL_VARS );
74 :
75 : /* Again, we only need to multiply the erased ones, since we don't
76 : use the value of the non-erased ones anymore, but I'll take
77 : multiplies over branches most days. */
78 378 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
79 378 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
80 378 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
81 378 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
82 378 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
83 378 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
84 378 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
85 378 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
86 378 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
87 378 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
88 378 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
89 378 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
90 378 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
91 378 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
92 378 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
93 378 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
94 : /* There are a couple of cases we have to handle:
95 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
96 : need to store the generated value.
97 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
98 : value that we ignored to ensure the data lies on a
99 : polynomial of the right order, so we need to compare the
100 : value we generated to the one that was there.
101 : - If i<shred_cnt and !_erased[ i ], then this is a value we
102 : actually used in the computation, but we destroyed it, so we
103 : need to reload the actual value of the shred in order to use the
104 : IFFT in the next step.
105 : - If i>=shred_cnt, do nothing, which will keep the value of the
106 : shred if it existed in the variable. */
107 6048 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
108 6048 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
109 6048 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
110 3567 : else var = gf_ldu( shred[ n ] + shred_pos ); \
111 6048 : } while( 0 )
112 5442 : #define STORE_COMPARE( n, var ) do{ \
113 5442 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
114 5442 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
115 5442 : } while( 0 )
116 378 : switch( fd_ulong_min( shred_cnt, 16UL ) ) {
117 378 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
118 378 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
119 378 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
120 378 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
121 378 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
122 378 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
123 378 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
124 378 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
125 378 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
126 378 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
127 378 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
128 378 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
129 378 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
130 378 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
131 378 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
132 378 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
133 378 : }
134 :
135 378 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 16UL );
136 378 : if( shreds_remaining>0UL ) {
137 378 : FD_REEDSOL_GENERATE_IFFT( 16, 0, ALL_VARS );
138 378 : FD_REEDSOL_GENERATE_FFT( 16, 16, ALL_VARS );
139 :
140 378 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
141 186 : case 16UL: STORE_COMPARE( 31, in15 ); FALLTHRU
142 186 : case 15UL: STORE_COMPARE( 30, in14 ); FALLTHRU
143 186 : case 14UL: STORE_COMPARE( 29, in13 ); FALLTHRU
144 186 : case 13UL: STORE_COMPARE( 28, in12 ); FALLTHRU
145 186 : case 12UL: STORE_COMPARE( 27, in11 ); FALLTHRU
146 186 : case 11UL: STORE_COMPARE( 26, in10 ); FALLTHRU
147 186 : case 10UL: STORE_COMPARE( 25, in09 ); FALLTHRU
148 282 : case 9UL: STORE_COMPARE( 24, in08 ); FALLTHRU
149 282 : case 8UL: STORE_COMPARE( 23, in07 ); FALLTHRU
150 378 : case 7UL: STORE_COMPARE( 22, in06 ); FALLTHRU
151 378 : case 6UL: STORE_COMPARE( 21, in05 ); FALLTHRU
152 378 : case 5UL: STORE_COMPARE( 20, in04 ); FALLTHRU
153 378 : case 4UL: STORE_COMPARE( 19, in03 ); FALLTHRU
154 378 : case 3UL: STORE_COMPARE( 18, in02 ); FALLTHRU
155 378 : case 2UL: STORE_COMPARE( 17, in01 ); FALLTHRU
156 378 : case 1UL: STORE_COMPARE( 16, in00 );
157 378 : }
158 378 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
159 378 : }
160 378 : if( shreds_remaining>0UL ) {
161 186 : FD_REEDSOL_GENERATE_IFFT( 16, 16, ALL_VARS );
162 186 : FD_REEDSOL_GENERATE_FFT( 16, 32, ALL_VARS );
163 :
164 186 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
165 0 : case 16UL: STORE_COMPARE( 47, in15 ); FALLTHRU
166 0 : case 15UL: STORE_COMPARE( 46, in14 ); FALLTHRU
167 0 : case 14UL: STORE_COMPARE( 45, in13 ); FALLTHRU
168 0 : case 13UL: STORE_COMPARE( 44, in12 ); FALLTHRU
169 0 : case 12UL: STORE_COMPARE( 43, in11 ); FALLTHRU
170 0 : case 11UL: STORE_COMPARE( 42, in10 ); FALLTHRU
171 0 : case 10UL: STORE_COMPARE( 41, in09 ); FALLTHRU
172 93 : case 9UL: STORE_COMPARE( 40, in08 ); FALLTHRU
173 93 : case 8UL: STORE_COMPARE( 39, in07 ); FALLTHRU
174 93 : case 7UL: STORE_COMPARE( 38, in06 ); FALLTHRU
175 93 : case 6UL: STORE_COMPARE( 37, in05 ); FALLTHRU
176 93 : case 5UL: STORE_COMPARE( 36, in04 ); FALLTHRU
177 93 : case 4UL: STORE_COMPARE( 35, in03 ); FALLTHRU
178 93 : case 3UL: STORE_COMPARE( 34, in02 ); FALLTHRU
179 93 : case 2UL: STORE_COMPARE( 33, in01 ); FALLTHRU
180 186 : case 1UL: STORE_COMPARE( 32, in00 );
181 186 : }
182 186 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
183 186 : }
184 378 : if( shreds_remaining>0UL ) {
185 0 : FD_REEDSOL_GENERATE_IFFT( 16, 32, ALL_VARS );
186 0 : FD_REEDSOL_GENERATE_FFT( 16, 48, ALL_VARS );
187 :
188 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
189 0 : case 16UL: STORE_COMPARE( 63, in15 ); FALLTHRU
190 0 : case 15UL: STORE_COMPARE( 62, in14 ); FALLTHRU
191 0 : case 14UL: STORE_COMPARE( 61, in13 ); FALLTHRU
192 0 : case 13UL: STORE_COMPARE( 60, in12 ); FALLTHRU
193 0 : case 12UL: STORE_COMPARE( 59, in11 ); FALLTHRU
194 0 : case 11UL: STORE_COMPARE( 58, in10 ); FALLTHRU
195 0 : case 10UL: STORE_COMPARE( 57, in09 ); FALLTHRU
196 0 : case 9UL: STORE_COMPARE( 56, in08 ); FALLTHRU
197 0 : case 8UL: STORE_COMPARE( 55, in07 ); FALLTHRU
198 0 : case 7UL: STORE_COMPARE( 54, in06 ); FALLTHRU
199 0 : case 6UL: STORE_COMPARE( 53, in05 ); FALLTHRU
200 0 : case 5UL: STORE_COMPARE( 52, in04 ); FALLTHRU
201 0 : case 4UL: STORE_COMPARE( 51, in03 ); FALLTHRU
202 0 : case 3UL: STORE_COMPARE( 50, in02 ); FALLTHRU
203 0 : case 2UL: STORE_COMPARE( 49, in01 ); FALLTHRU
204 0 : case 1UL: STORE_COMPARE( 48, in00 );
205 0 : }
206 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
207 0 : }
208 378 : if( shreds_remaining>0UL ) {
209 0 : FD_REEDSOL_GENERATE_IFFT( 16, 48, ALL_VARS );
210 0 : FD_REEDSOL_GENERATE_FFT( 16, 64, ALL_VARS );
211 :
212 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
213 0 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
214 0 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
215 0 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
216 0 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
217 0 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
218 0 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
219 0 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
220 0 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
221 0 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
222 0 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
223 0 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
224 0 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
225 0 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
226 0 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
227 0 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
228 0 : case 1UL: STORE_COMPARE( 64, in00 );
229 0 : }
230 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
231 0 : }
232 378 : if( shreds_remaining>0UL ) {
233 0 : FD_REEDSOL_GENERATE_IFFT( 16, 64, ALL_VARS );
234 0 : FD_REEDSOL_GENERATE_FFT( 16, 80, ALL_VARS );
235 :
236 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
237 0 : case 16UL: STORE_COMPARE( 95, in15 ); FALLTHRU
238 0 : case 15UL: STORE_COMPARE( 94, in14 ); FALLTHRU
239 0 : case 14UL: STORE_COMPARE( 93, in13 ); FALLTHRU
240 0 : case 13UL: STORE_COMPARE( 92, in12 ); FALLTHRU
241 0 : case 12UL: STORE_COMPARE( 91, in11 ); FALLTHRU
242 0 : case 11UL: STORE_COMPARE( 90, in10 ); FALLTHRU
243 0 : case 10UL: STORE_COMPARE( 89, in09 ); FALLTHRU
244 0 : case 9UL: STORE_COMPARE( 88, in08 ); FALLTHRU
245 0 : case 8UL: STORE_COMPARE( 87, in07 ); FALLTHRU
246 0 : case 7UL: STORE_COMPARE( 86, in06 ); FALLTHRU
247 0 : case 6UL: STORE_COMPARE( 85, in05 ); FALLTHRU
248 0 : case 5UL: STORE_COMPARE( 84, in04 ); FALLTHRU
249 0 : case 4UL: STORE_COMPARE( 83, in03 ); FALLTHRU
250 0 : case 3UL: STORE_COMPARE( 82, in02 ); FALLTHRU
251 0 : case 2UL: STORE_COMPARE( 81, in01 ); FALLTHRU
252 0 : case 1UL: STORE_COMPARE( 80, in00 );
253 0 : }
254 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
255 0 : }
256 378 : if( shreds_remaining>0UL ) {
257 0 : FD_REEDSOL_GENERATE_IFFT( 16, 80, ALL_VARS );
258 0 : FD_REEDSOL_GENERATE_FFT( 16, 96, ALL_VARS );
259 :
260 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
261 0 : case 16UL: STORE_COMPARE( 111, in15 ); FALLTHRU
262 0 : case 15UL: STORE_COMPARE( 110, in14 ); FALLTHRU
263 0 : case 14UL: STORE_COMPARE( 109, in13 ); FALLTHRU
264 0 : case 13UL: STORE_COMPARE( 108, in12 ); FALLTHRU
265 0 : case 12UL: STORE_COMPARE( 107, in11 ); FALLTHRU
266 0 : case 11UL: STORE_COMPARE( 106, in10 ); FALLTHRU
267 0 : case 10UL: STORE_COMPARE( 105, in09 ); FALLTHRU
268 0 : case 9UL: STORE_COMPARE( 104, in08 ); FALLTHRU
269 0 : case 8UL: STORE_COMPARE( 103, in07 ); FALLTHRU
270 0 : case 7UL: STORE_COMPARE( 102, in06 ); FALLTHRU
271 0 : case 6UL: STORE_COMPARE( 101, in05 ); FALLTHRU
272 0 : case 5UL: STORE_COMPARE( 100, in04 ); FALLTHRU
273 0 : case 4UL: STORE_COMPARE( 99, in03 ); FALLTHRU
274 0 : case 3UL: STORE_COMPARE( 98, in02 ); FALLTHRU
275 0 : case 2UL: STORE_COMPARE( 97, in01 ); FALLTHRU
276 0 : case 1UL: STORE_COMPARE( 96, in00 );
277 0 : }
278 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
279 0 : }
280 378 : if( shreds_remaining>0UL ) {
281 0 : FD_REEDSOL_GENERATE_IFFT( 16, 96, ALL_VARS );
282 0 : FD_REEDSOL_GENERATE_FFT( 16, 112, ALL_VARS );
283 :
284 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
285 0 : case 16UL: STORE_COMPARE( 127, in15 ); FALLTHRU
286 0 : case 15UL: STORE_COMPARE( 126, in14 ); FALLTHRU
287 0 : case 14UL: STORE_COMPARE( 125, in13 ); FALLTHRU
288 0 : case 13UL: STORE_COMPARE( 124, in12 ); FALLTHRU
289 0 : case 12UL: STORE_COMPARE( 123, in11 ); FALLTHRU
290 0 : case 11UL: STORE_COMPARE( 122, in10 ); FALLTHRU
291 0 : case 10UL: STORE_COMPARE( 121, in09 ); FALLTHRU
292 0 : case 9UL: STORE_COMPARE( 120, in08 ); FALLTHRU
293 0 : case 8UL: STORE_COMPARE( 119, in07 ); FALLTHRU
294 0 : case 7UL: STORE_COMPARE( 118, in06 ); FALLTHRU
295 0 : case 6UL: STORE_COMPARE( 117, in05 ); FALLTHRU
296 0 : case 5UL: STORE_COMPARE( 116, in04 ); FALLTHRU
297 0 : case 4UL: STORE_COMPARE( 115, in03 ); FALLTHRU
298 0 : case 3UL: STORE_COMPARE( 114, in02 ); FALLTHRU
299 0 : case 2UL: STORE_COMPARE( 113, in01 ); FALLTHRU
300 0 : case 1UL: STORE_COMPARE( 112, in00 );
301 0 : }
302 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
303 0 : }
304 378 : if( shreds_remaining>0UL ) {
305 0 : FD_REEDSOL_GENERATE_IFFT( 16, 112, ALL_VARS );
306 0 : FD_REEDSOL_GENERATE_FFT( 16, 128, ALL_VARS );
307 :
308 0 : switch( fd_ulong_min( shreds_remaining, 16UL ) ) {
309 0 : case 7UL: STORE_COMPARE( 134, in06 ); FALLTHRU
310 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
311 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
312 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
313 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
314 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
315 0 : case 1UL: STORE_COMPARE( 128, in00 );
316 0 : }
317 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 16UL );
318 0 : }
319 378 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
320 378 : shred_pos += GF_WIDTH;
321 378 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
322 378 : }
323 12 : return FD_REEDSOL_SUCCESS;
324 12 : }
|