Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_32( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 33 : uchar const * erased ) {
11 33 : uchar _erased[ 32 ] W_ATTR;
12 33 : uchar pi[ 32 ] W_ATTR;
13 33 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 33 : ulong loaded_cnt = 0UL;
15 1089 : for( ulong i=0UL; i<32UL; i++) {
16 1056 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 1056 : _erased[ i ] = !load_shred;
18 1056 : loaded_cnt += (ulong)load_shred;
19 1056 : }
20 33 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 33 : fd_reedsol_private_gen_pi_32( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 33 : gf_t diff = gf_zero();
27 :
28 1080 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 1047 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 1047 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 1047 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 1047 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 1047 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 1047 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 1047 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 1047 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 1047 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 1047 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 1047 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 1047 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 1047 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 1047 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 1047 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 1047 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 1047 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 1047 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 1047 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 1047 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 1047 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 1047 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 1047 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 1047 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 1047 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 1047 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 1047 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 1047 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 1047 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 1047 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 1047 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 1047 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 : /* Technically, we only need to multiply the non-erased ones, since
64 : the erased ones are 0, but we know at least half of them are
65 : non-erased, and the branch is going to be just as costly as the
66 : multiply. */
67 1047 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
68 1047 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
69 1047 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
70 1047 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
71 1047 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
72 1047 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
73 1047 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
74 1047 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
75 1047 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
76 1047 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
77 1047 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
78 1047 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
79 1047 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
80 1047 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
81 1047 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
82 1047 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
83 1047 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
84 1047 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
85 1047 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
86 1047 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
87 1047 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
88 1047 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
89 1047 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
90 1047 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
91 1047 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
92 1047 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
93 1047 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
94 1047 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
95 1047 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
96 1047 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
97 1047 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
98 1047 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
99 1047 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31
100 :
101 1047 : FD_REEDSOL_GENERATE_IFFT( 32, 0, ALL_VARS );
102 :
103 1047 : FD_REEDSOL_GENERATE_FDERIV( 32, ALL_VARS );
104 :
105 1047 : FD_REEDSOL_GENERATE_FFT( 32, 0, ALL_VARS );
106 :
107 : /* Again, we only need to multiply the erased ones, since we don't
108 : use the value of the non-erased ones anymore, but I'll take
109 : multiplies over branches most days. */
110 1047 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
111 1047 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
112 1047 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
113 1047 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
114 1047 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
115 1047 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
116 1047 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
117 1047 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
118 1047 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
119 1047 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
120 1047 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
121 1047 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
122 1047 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
123 1047 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
124 1047 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
125 1047 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
126 1047 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
127 1047 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
128 1047 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
129 1047 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
130 1047 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
131 1047 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
132 1047 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
133 1047 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
134 1047 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
135 1047 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
136 1047 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
137 1047 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
138 1047 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
139 1047 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
140 1047 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
141 1047 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
142 : /* There are a couple of cases we have to handle:
143 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
144 : need to store the generated value.
145 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
146 : value that we ignored to ensure the data lies on a
147 : polynomial of the right order, so we need to compare the
148 : value we generated to the one that was there.
149 : - If i<shred_cnt and !_erased[ i ], then this is a value we
150 : actually used in the computation, but we destroyed it, so we
151 : need to reload the actual value of the shred in order to use the
152 : IFFT in the next step.
153 : - If i>=shred_cnt, do nothing, which will keep the value of the
154 : shred if it existed in the variable. */
155 33504 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
156 33504 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
157 33504 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
158 33504 : else var = gf_ldu( shred[ n ] + shred_pos ); \
159 33504 : } while( 0 )
160 33504 : #define STORE_COMPARE( n, var ) do{ \
161 33504 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
162 33504 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
163 33504 : } while( 0 )
164 1047 : switch( fd_ulong_min( shred_cnt, 32UL ) ) {
165 1047 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
166 1047 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
167 1047 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
168 1047 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
169 1047 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
170 1047 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
171 1047 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
172 1047 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
173 1047 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
174 1047 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
175 1047 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
176 1047 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
177 1047 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
178 1047 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
179 1047 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
180 1047 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
181 1047 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
182 1047 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
183 1047 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
184 1047 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
185 1047 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
186 1047 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
187 1047 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
188 1047 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
189 1047 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
190 1047 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
191 1047 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
192 1047 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
193 1047 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
194 1047 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
195 1047 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
196 1047 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
197 1047 : }
198 :
199 1047 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 32UL );
200 1047 : if( shreds_remaining>0UL ) {
201 1047 : FD_REEDSOL_GENERATE_IFFT( 32, 0, ALL_VARS );
202 1047 : FD_REEDSOL_GENERATE_FFT( 32, 32, ALL_VARS );
203 :
204 1047 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
205 1047 : case 32UL: STORE_COMPARE( 63, in31 ); FALLTHRU
206 1047 : case 31UL: STORE_COMPARE( 62, in30 ); FALLTHRU
207 1047 : case 30UL: STORE_COMPARE( 61, in29 ); FALLTHRU
208 1047 : case 29UL: STORE_COMPARE( 60, in28 ); FALLTHRU
209 1047 : case 28UL: STORE_COMPARE( 59, in27 ); FALLTHRU
210 1047 : case 27UL: STORE_COMPARE( 58, in26 ); FALLTHRU
211 1047 : case 26UL: STORE_COMPARE( 57, in25 ); FALLTHRU
212 1047 : case 25UL: STORE_COMPARE( 56, in24 ); FALLTHRU
213 1047 : case 24UL: STORE_COMPARE( 55, in23 ); FALLTHRU
214 1047 : case 23UL: STORE_COMPARE( 54, in22 ); FALLTHRU
215 1047 : case 22UL: STORE_COMPARE( 53, in21 ); FALLTHRU
216 1047 : case 21UL: STORE_COMPARE( 52, in20 ); FALLTHRU
217 1047 : case 20UL: STORE_COMPARE( 51, in19 ); FALLTHRU
218 1047 : case 19UL: STORE_COMPARE( 50, in18 ); FALLTHRU
219 1047 : case 18UL: STORE_COMPARE( 49, in17 ); FALLTHRU
220 1047 : case 17UL: STORE_COMPARE( 48, in16 ); FALLTHRU
221 1047 : case 16UL: STORE_COMPARE( 47, in15 ); FALLTHRU
222 1047 : case 15UL: STORE_COMPARE( 46, in14 ); FALLTHRU
223 1047 : case 14UL: STORE_COMPARE( 45, in13 ); FALLTHRU
224 1047 : case 13UL: STORE_COMPARE( 44, in12 ); FALLTHRU
225 1047 : case 12UL: STORE_COMPARE( 43, in11 ); FALLTHRU
226 1047 : case 11UL: STORE_COMPARE( 42, in10 ); FALLTHRU
227 1047 : case 10UL: STORE_COMPARE( 41, in09 ); FALLTHRU
228 1047 : case 9UL: STORE_COMPARE( 40, in08 ); FALLTHRU
229 1047 : case 8UL: STORE_COMPARE( 39, in07 ); FALLTHRU
230 1047 : case 7UL: STORE_COMPARE( 38, in06 ); FALLTHRU
231 1047 : case 6UL: STORE_COMPARE( 37, in05 ); FALLTHRU
232 1047 : case 5UL: STORE_COMPARE( 36, in04 ); FALLTHRU
233 1047 : case 4UL: STORE_COMPARE( 35, in03 ); FALLTHRU
234 1047 : case 3UL: STORE_COMPARE( 34, in02 ); FALLTHRU
235 1047 : case 2UL: STORE_COMPARE( 33, in01 ); FALLTHRU
236 1047 : case 1UL: STORE_COMPARE( 32, in00 );
237 1047 : }
238 1047 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
239 1047 : }
240 1047 : if( shreds_remaining>0UL ) {
241 0 : FD_REEDSOL_GENERATE_IFFT( 32, 32, ALL_VARS );
242 0 : FD_REEDSOL_GENERATE_FFT( 32, 64, ALL_VARS );
243 :
244 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
245 0 : case 32UL: STORE_COMPARE( 95, in31 ); FALLTHRU
246 0 : case 31UL: STORE_COMPARE( 94, in30 ); FALLTHRU
247 0 : case 30UL: STORE_COMPARE( 93, in29 ); FALLTHRU
248 0 : case 29UL: STORE_COMPARE( 92, in28 ); FALLTHRU
249 0 : case 28UL: STORE_COMPARE( 91, in27 ); FALLTHRU
250 0 : case 27UL: STORE_COMPARE( 90, in26 ); FALLTHRU
251 0 : case 26UL: STORE_COMPARE( 89, in25 ); FALLTHRU
252 0 : case 25UL: STORE_COMPARE( 88, in24 ); FALLTHRU
253 0 : case 24UL: STORE_COMPARE( 87, in23 ); FALLTHRU
254 0 : case 23UL: STORE_COMPARE( 86, in22 ); FALLTHRU
255 0 : case 22UL: STORE_COMPARE( 85, in21 ); FALLTHRU
256 0 : case 21UL: STORE_COMPARE( 84, in20 ); FALLTHRU
257 0 : case 20UL: STORE_COMPARE( 83, in19 ); FALLTHRU
258 0 : case 19UL: STORE_COMPARE( 82, in18 ); FALLTHRU
259 0 : case 18UL: STORE_COMPARE( 81, in17 ); FALLTHRU
260 0 : case 17UL: STORE_COMPARE( 80, in16 ); FALLTHRU
261 0 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
262 0 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
263 0 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
264 0 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
265 0 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
266 0 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
267 0 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
268 0 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
269 0 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
270 0 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
271 0 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
272 0 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
273 0 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
274 0 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
275 0 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
276 0 : case 1UL: STORE_COMPARE( 64, in00 );
277 0 : }
278 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
279 0 : }
280 1047 : if( shreds_remaining>0UL ) {
281 0 : FD_REEDSOL_GENERATE_IFFT( 32, 64, ALL_VARS );
282 0 : FD_REEDSOL_GENERATE_FFT( 32, 96, ALL_VARS );
283 :
284 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
285 0 : case 32UL: STORE_COMPARE( 127, in31 ); FALLTHRU
286 0 : case 31UL: STORE_COMPARE( 126, in30 ); FALLTHRU
287 0 : case 30UL: STORE_COMPARE( 125, in29 ); FALLTHRU
288 0 : case 29UL: STORE_COMPARE( 124, in28 ); FALLTHRU
289 0 : case 28UL: STORE_COMPARE( 123, in27 ); FALLTHRU
290 0 : case 27UL: STORE_COMPARE( 122, in26 ); FALLTHRU
291 0 : case 26UL: STORE_COMPARE( 121, in25 ); FALLTHRU
292 0 : case 25UL: STORE_COMPARE( 120, in24 ); FALLTHRU
293 0 : case 24UL: STORE_COMPARE( 119, in23 ); FALLTHRU
294 0 : case 23UL: STORE_COMPARE( 118, in22 ); FALLTHRU
295 0 : case 22UL: STORE_COMPARE( 117, in21 ); FALLTHRU
296 0 : case 21UL: STORE_COMPARE( 116, in20 ); FALLTHRU
297 0 : case 20UL: STORE_COMPARE( 115, in19 ); FALLTHRU
298 0 : case 19UL: STORE_COMPARE( 114, in18 ); FALLTHRU
299 0 : case 18UL: STORE_COMPARE( 113, in17 ); FALLTHRU
300 0 : case 17UL: STORE_COMPARE( 112, in16 ); FALLTHRU
301 0 : case 16UL: STORE_COMPARE( 111, in15 ); FALLTHRU
302 0 : case 15UL: STORE_COMPARE( 110, in14 ); FALLTHRU
303 0 : case 14UL: STORE_COMPARE( 109, in13 ); FALLTHRU
304 0 : case 13UL: STORE_COMPARE( 108, in12 ); FALLTHRU
305 0 : case 12UL: STORE_COMPARE( 107, in11 ); FALLTHRU
306 0 : case 11UL: STORE_COMPARE( 106, in10 ); FALLTHRU
307 0 : case 10UL: STORE_COMPARE( 105, in09 ); FALLTHRU
308 0 : case 9UL: STORE_COMPARE( 104, in08 ); FALLTHRU
309 0 : case 8UL: STORE_COMPARE( 103, in07 ); FALLTHRU
310 0 : case 7UL: STORE_COMPARE( 102, in06 ); FALLTHRU
311 0 : case 6UL: STORE_COMPARE( 101, in05 ); FALLTHRU
312 0 : case 5UL: STORE_COMPARE( 100, in04 ); FALLTHRU
313 0 : case 4UL: STORE_COMPARE( 99, in03 ); FALLTHRU
314 0 : case 3UL: STORE_COMPARE( 98, in02 ); FALLTHRU
315 0 : case 2UL: STORE_COMPARE( 97, in01 ); FALLTHRU
316 0 : case 1UL: STORE_COMPARE( 96, in00 );
317 0 : }
318 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
319 0 : }
320 1047 : if( shreds_remaining>0UL ) {
321 0 : FD_REEDSOL_GENERATE_IFFT( 32, 96, ALL_VARS );
322 0 : FD_REEDSOL_GENERATE_FFT( 32, 128, ALL_VARS );
323 :
324 0 : switch( fd_ulong_min( shreds_remaining, 32UL ) ) {
325 0 : case 7UL: STORE_COMPARE( 134, in06 ); FALLTHRU
326 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
327 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
328 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
329 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
330 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
331 0 : case 1UL: STORE_COMPARE( 128, in00 );
332 0 : }
333 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 32UL );
334 0 : }
335 1047 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
336 1047 : shred_pos += GF_WIDTH;
337 1047 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
338 1047 : }
339 33 : return FD_REEDSOL_SUCCESS;
340 33 : }
|