Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_32( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 39 : uchar const * erased ) {
11 39 : uchar _erased[ 32 ] W_ATTR;
12 39 : uchar pi[ 32 ] W_ATTR;
13 39 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 39 : ulong loaded_cnt = 0UL;
15 1287 : for( ulong i=0UL; i<32UL; i++) {
16 1248 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 1248 : _erased[ i ] = !load_shred;
18 1248 : loaded_cnt += (ulong)load_shred;
19 1248 : }
20 39 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 39 : fd_reedsol_private_gen_pi_32( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 39 : gf_t diff = gf_zero();
27 :
28 1242 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 1203 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 1203 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 1203 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 1203 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 1203 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 1203 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 1203 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 1203 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 1203 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 1203 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 1203 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 1203 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 1203 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 1203 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 1203 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 1203 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 1203 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 1203 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 1203 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 1203 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 1203 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 1203 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 1203 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 1203 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 1203 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 1203 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 1203 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 1203 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 1203 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 1203 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 1203 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 1203 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 : /* Technically, we only need to multiply the non-erased ones, since
64 : the erased ones are 0, but we know at least half of them are
65 : non-erased, and the branch is going to be just as costly as the
66 : multiply. */
67 1203 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
68 1203 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
69 1203 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
70 1203 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
71 1203 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
72 1203 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
73 1203 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
74 1203 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
75 1203 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
76 1203 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
77 1203 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
78 1203 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
79 1203 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
80 1203 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
81 1203 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
82 1203 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
83 1203 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
84 1203 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
85 1203 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
86 1203 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
87 1203 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
88 1203 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
89 1203 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
90 1203 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
91 1203 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
92 1203 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
93 1203 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
94 1203 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
95 1203 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
96 1203 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
97 1203 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
98 1203 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
99 1203 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31
100 :
101 1203 : FD_REEDSOL_GENERATE_IFFT( 32, 0, ALL_VARS );
102 :
103 1203 : FD_REEDSOL_GENERATE_FDERIV( 32, ALL_VARS );
104 :
105 1203 : FD_REEDSOL_GENERATE_FFT( 32, 0, ALL_VARS );
106 :
107 : /* Again, we only need to multiply the erased ones, since we don't
108 : use the value of the non-erased ones anymore, but I'll take
109 : multiplies over branches most days. */
110 1203 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
111 1203 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
112 1203 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
113 1203 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
114 1203 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
115 1203 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
116 1203 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
117 1203 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
118 1203 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
119 1203 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
120 1203 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
121 1203 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
122 1203 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
123 1203 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
124 1203 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
125 1203 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
126 1203 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
127 1203 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
128 1203 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
129 1203 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
130 1203 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
131 1203 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
132 1203 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
133 1203 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
134 1203 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
135 1203 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
136 1203 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
137 1203 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
138 1203 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
139 1203 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
140 1203 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
141 1203 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
142 : /* There are a couple of cases we have to handle:
143 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
144 : need to store the generated value.
145 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
146 : value that we ignored to ensure the data lies on a
147 : polynomial of the right order, so we need to compare the
148 : value we generated to the one that was there.
149 : - If i<shred_cnt and !_erased[ i ], then this is a value we
150 : actually used in the computation, but we destroyed it, so we
151 : need to reload the actual value of the shred in order to use the
152 : IFFT in the next step.
153 : - If i>=shred_cnt, do nothing, which will keep the value of the
154 : shred if it existed in the variable. */
155 38496 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
156 38496 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
157 38496 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
158 38496 : else var = gf_ldu( shred[ n ] + shred_pos ); \
159 38496 : } while( 0 )
160 38496 : #define STORE_COMPARE( n, var ) do{ \
161 38496 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
162 38496 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
163 38496 : } while( 0 )
164 1203 : switch( fd_ulong_min( shred_cnt, 32UL ) ) {
165 1203 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
166 1203 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
167 1203 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
168 1203 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
169 1203 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
170 1203 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
171 1203 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
172 1203 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
173 1203 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
174 1203 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
175 1203 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
176 1203 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
177 1203 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
178 1203 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
179 1203 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
180 1203 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
181 1203 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
182 1203 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
183 1203 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
184 1203 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
185 1203 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
186 1203 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
187 1203 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
188 1203 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
189 1203 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
190 1203 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
191 1203 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
192 1203 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
193 1203 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
194 1203 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
195 1203 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
196 1203 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
197 1203 : }
198 :
199 1203 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 32UL );
200 1203 : if( shreds_remaining>0UL ) {
201 1203 : FD_REEDSOL_GENERATE_IFFT( 32, 0, ALL_VARS );
202 1203 : FD_REEDSOL_GENERATE_FFT( 32, 32, ALL_VARS );
203 :
204 1203 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
205 1203 : case 32UL: STORE_COMPARE( 63, in31 ); FALLTHRU
206 1203 : case 31UL: STORE_COMPARE( 62, in30 ); FALLTHRU
207 1203 : case 30UL: STORE_COMPARE( 61, in29 ); FALLTHRU
208 1203 : case 29UL: STORE_COMPARE( 60, in28 ); FALLTHRU
209 1203 : case 28UL: STORE_COMPARE( 59, in27 ); FALLTHRU
210 1203 : case 27UL: STORE_COMPARE( 58, in26 ); FALLTHRU
211 1203 : case 26UL: STORE_COMPARE( 57, in25 ); FALLTHRU
212 1203 : case 25UL: STORE_COMPARE( 56, in24 ); FALLTHRU
213 1203 : case 24UL: STORE_COMPARE( 55, in23 ); FALLTHRU
214 1203 : case 23UL: STORE_COMPARE( 54, in22 ); FALLTHRU
215 1203 : case 22UL: STORE_COMPARE( 53, in21 ); FALLTHRU
216 1203 : case 21UL: STORE_COMPARE( 52, in20 ); FALLTHRU
217 1203 : case 20UL: STORE_COMPARE( 51, in19 ); FALLTHRU
218 1203 : case 19UL: STORE_COMPARE( 50, in18 ); FALLTHRU
219 1203 : case 18UL: STORE_COMPARE( 49, in17 ); FALLTHRU
220 1203 : case 17UL: STORE_COMPARE( 48, in16 ); FALLTHRU
221 1203 : case 16UL: STORE_COMPARE( 47, in15 ); FALLTHRU
222 1203 : case 15UL: STORE_COMPARE( 46, in14 ); FALLTHRU
223 1203 : case 14UL: STORE_COMPARE( 45, in13 ); FALLTHRU
224 1203 : case 13UL: STORE_COMPARE( 44, in12 ); FALLTHRU
225 1203 : case 12UL: STORE_COMPARE( 43, in11 ); FALLTHRU
226 1203 : case 11UL: STORE_COMPARE( 42, in10 ); FALLTHRU
227 1203 : case 10UL: STORE_COMPARE( 41, in09 ); FALLTHRU
228 1203 : case 9UL: STORE_COMPARE( 40, in08 ); FALLTHRU
229 1203 : case 8UL: STORE_COMPARE( 39, in07 ); FALLTHRU
230 1203 : case 7UL: STORE_COMPARE( 38, in06 ); FALLTHRU
231 1203 : case 6UL: STORE_COMPARE( 37, in05 ); FALLTHRU
232 1203 : case 5UL: STORE_COMPARE( 36, in04 ); FALLTHRU
233 1203 : case 4UL: STORE_COMPARE( 35, in03 ); FALLTHRU
234 1203 : case 3UL: STORE_COMPARE( 34, in02 ); FALLTHRU
235 1203 : case 2UL: STORE_COMPARE( 33, in01 ); FALLTHRU
236 1203 : case 1UL: STORE_COMPARE( 32, in00 );
237 1203 : }
238 1203 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
239 1203 : }
240 1203 : if( shreds_remaining>0UL ) {
241 0 : FD_REEDSOL_GENERATE_IFFT( 32, 32, ALL_VARS );
242 0 : FD_REEDSOL_GENERATE_FFT( 32, 64, ALL_VARS );
243 :
244 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
245 0 : case 32UL: STORE_COMPARE( 95, in31 ); FALLTHRU
246 0 : case 31UL: STORE_COMPARE( 94, in30 ); FALLTHRU
247 0 : case 30UL: STORE_COMPARE( 93, in29 ); FALLTHRU
248 0 : case 29UL: STORE_COMPARE( 92, in28 ); FALLTHRU
249 0 : case 28UL: STORE_COMPARE( 91, in27 ); FALLTHRU
250 0 : case 27UL: STORE_COMPARE( 90, in26 ); FALLTHRU
251 0 : case 26UL: STORE_COMPARE( 89, in25 ); FALLTHRU
252 0 : case 25UL: STORE_COMPARE( 88, in24 ); FALLTHRU
253 0 : case 24UL: STORE_COMPARE( 87, in23 ); FALLTHRU
254 0 : case 23UL: STORE_COMPARE( 86, in22 ); FALLTHRU
255 0 : case 22UL: STORE_COMPARE( 85, in21 ); FALLTHRU
256 0 : case 21UL: STORE_COMPARE( 84, in20 ); FALLTHRU
257 0 : case 20UL: STORE_COMPARE( 83, in19 ); FALLTHRU
258 0 : case 19UL: STORE_COMPARE( 82, in18 ); FALLTHRU
259 0 : case 18UL: STORE_COMPARE( 81, in17 ); FALLTHRU
260 0 : case 17UL: STORE_COMPARE( 80, in16 ); FALLTHRU
261 0 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
262 0 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
263 0 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
264 0 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
265 0 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
266 0 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
267 0 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
268 0 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
269 0 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
270 0 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
271 0 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
272 0 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
273 0 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
274 0 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
275 0 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
276 0 : case 1UL: STORE_COMPARE( 64, in00 );
277 0 : }
278 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
279 0 : }
280 1203 : if( shreds_remaining>0UL ) {
281 0 : FD_REEDSOL_GENERATE_IFFT( 32, 64, ALL_VARS );
282 0 : FD_REEDSOL_GENERATE_FFT( 32, 96, ALL_VARS );
283 :
284 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
285 0 : case 32UL: STORE_COMPARE( 127, in31 ); FALLTHRU
286 0 : case 31UL: STORE_COMPARE( 126, in30 ); FALLTHRU
287 0 : case 30UL: STORE_COMPARE( 125, in29 ); FALLTHRU
288 0 : case 29UL: STORE_COMPARE( 124, in28 ); FALLTHRU
289 0 : case 28UL: STORE_COMPARE( 123, in27 ); FALLTHRU
290 0 : case 27UL: STORE_COMPARE( 122, in26 ); FALLTHRU
291 0 : case 26UL: STORE_COMPARE( 121, in25 ); FALLTHRU
292 0 : case 25UL: STORE_COMPARE( 120, in24 ); FALLTHRU
293 0 : case 24UL: STORE_COMPARE( 119, in23 ); FALLTHRU
294 0 : case 23UL: STORE_COMPARE( 118, in22 ); FALLTHRU
295 0 : case 22UL: STORE_COMPARE( 117, in21 ); FALLTHRU
296 0 : case 21UL: STORE_COMPARE( 116, in20 ); FALLTHRU
297 0 : case 20UL: STORE_COMPARE( 115, in19 ); FALLTHRU
298 0 : case 19UL: STORE_COMPARE( 114, in18 ); FALLTHRU
299 0 : case 18UL: STORE_COMPARE( 113, in17 ); FALLTHRU
300 0 : case 17UL: STORE_COMPARE( 112, in16 ); FALLTHRU
301 0 : case 16UL: STORE_COMPARE( 111, in15 ); FALLTHRU
302 0 : case 15UL: STORE_COMPARE( 110, in14 ); FALLTHRU
303 0 : case 14UL: STORE_COMPARE( 109, in13 ); FALLTHRU
304 0 : case 13UL: STORE_COMPARE( 108, in12 ); FALLTHRU
305 0 : case 12UL: STORE_COMPARE( 107, in11 ); FALLTHRU
306 0 : case 11UL: STORE_COMPARE( 106, in10 ); FALLTHRU
307 0 : case 10UL: STORE_COMPARE( 105, in09 ); FALLTHRU
308 0 : case 9UL: STORE_COMPARE( 104, in08 ); FALLTHRU
309 0 : case 8UL: STORE_COMPARE( 103, in07 ); FALLTHRU
310 0 : case 7UL: STORE_COMPARE( 102, in06 ); FALLTHRU
311 0 : case 6UL: STORE_COMPARE( 101, in05 ); FALLTHRU
312 0 : case 5UL: STORE_COMPARE( 100, in04 ); FALLTHRU
313 0 : case 4UL: STORE_COMPARE( 99, in03 ); FALLTHRU
314 0 : case 3UL: STORE_COMPARE( 98, in02 ); FALLTHRU
315 0 : case 2UL: STORE_COMPARE( 97, in01 ); FALLTHRU
316 0 : case 1UL: STORE_COMPARE( 96, in00 );
317 0 : }
318 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
319 0 : }
320 1203 : if( shreds_remaining>0UL ) {
321 0 : FD_REEDSOL_GENERATE_IFFT( 32, 96, ALL_VARS );
322 0 : FD_REEDSOL_GENERATE_FFT( 32, 128, ALL_VARS );
323 :
324 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
325 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
326 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
327 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
328 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
329 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
330 0 : case 1UL: STORE_COMPARE( 128, in00 );
331 0 : }
332 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
333 0 : }
334 1203 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
335 1203 : shred_pos += GF_WIDTH;
336 1203 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
337 1203 : #undef STORE_COMPARE_RELOAD
338 1203 : #undef STORE_COMPARE
339 1203 : #undef ALL_VARS
340 1203 : }
341 39 : return FD_REEDSOL_SUCCESS;
342 39 : }
|