Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_64( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 39 : uchar const * erased ) {
11 39 : uchar _erased[ 64 ] W_ATTR;
12 39 : uchar pi[ 64 ] W_ATTR;
13 39 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 39 : ulong loaded_cnt = 0UL;
15 2535 : for( ulong i=0UL; i<64UL; i++) {
16 2496 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 2496 : _erased[ i ] = !load_shred;
18 2496 : loaded_cnt += (ulong)load_shred;
19 2496 : }
20 39 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 39 : fd_reedsol_private_gen_pi_64( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 39 : gf_t diff = gf_zero();
27 :
28 1287 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 1248 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 1248 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 1248 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 1248 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 1248 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 1248 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 1248 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 1248 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 1248 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 1248 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 1248 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 1248 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 1248 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 1248 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 1248 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 1248 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 1248 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 1248 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 1248 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 1248 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 1248 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 1248 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 1248 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 1248 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 1248 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 1248 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 1248 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 1248 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 1248 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 1248 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 1248 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 1248 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 1248 : gf_t in32 = _erased[ 32 ] ? gf_zero() : gf_ldu( shred[ 32 ] + shred_pos );
64 1248 : gf_t in33 = _erased[ 33 ] ? gf_zero() : gf_ldu( shred[ 33 ] + shred_pos );
65 1248 : gf_t in34 = _erased[ 34 ] ? gf_zero() : gf_ldu( shred[ 34 ] + shred_pos );
66 1248 : gf_t in35 = _erased[ 35 ] ? gf_zero() : gf_ldu( shred[ 35 ] + shred_pos );
67 1248 : gf_t in36 = _erased[ 36 ] ? gf_zero() : gf_ldu( shred[ 36 ] + shred_pos );
68 1248 : gf_t in37 = _erased[ 37 ] ? gf_zero() : gf_ldu( shred[ 37 ] + shred_pos );
69 1248 : gf_t in38 = _erased[ 38 ] ? gf_zero() : gf_ldu( shred[ 38 ] + shred_pos );
70 1248 : gf_t in39 = _erased[ 39 ] ? gf_zero() : gf_ldu( shred[ 39 ] + shred_pos );
71 1248 : gf_t in40 = _erased[ 40 ] ? gf_zero() : gf_ldu( shred[ 40 ] + shred_pos );
72 1248 : gf_t in41 = _erased[ 41 ] ? gf_zero() : gf_ldu( shred[ 41 ] + shred_pos );
73 1248 : gf_t in42 = _erased[ 42 ] ? gf_zero() : gf_ldu( shred[ 42 ] + shred_pos );
74 1248 : gf_t in43 = _erased[ 43 ] ? gf_zero() : gf_ldu( shred[ 43 ] + shred_pos );
75 1248 : gf_t in44 = _erased[ 44 ] ? gf_zero() : gf_ldu( shred[ 44 ] + shred_pos );
76 1248 : gf_t in45 = _erased[ 45 ] ? gf_zero() : gf_ldu( shred[ 45 ] + shred_pos );
77 1248 : gf_t in46 = _erased[ 46 ] ? gf_zero() : gf_ldu( shred[ 46 ] + shred_pos );
78 1248 : gf_t in47 = _erased[ 47 ] ? gf_zero() : gf_ldu( shred[ 47 ] + shred_pos );
79 1248 : gf_t in48 = _erased[ 48 ] ? gf_zero() : gf_ldu( shred[ 48 ] + shred_pos );
80 1248 : gf_t in49 = _erased[ 49 ] ? gf_zero() : gf_ldu( shred[ 49 ] + shred_pos );
81 1248 : gf_t in50 = _erased[ 50 ] ? gf_zero() : gf_ldu( shred[ 50 ] + shred_pos );
82 1248 : gf_t in51 = _erased[ 51 ] ? gf_zero() : gf_ldu( shred[ 51 ] + shred_pos );
83 1248 : gf_t in52 = _erased[ 52 ] ? gf_zero() : gf_ldu( shred[ 52 ] + shred_pos );
84 1248 : gf_t in53 = _erased[ 53 ] ? gf_zero() : gf_ldu( shred[ 53 ] + shred_pos );
85 1248 : gf_t in54 = _erased[ 54 ] ? gf_zero() : gf_ldu( shred[ 54 ] + shred_pos );
86 1248 : gf_t in55 = _erased[ 55 ] ? gf_zero() : gf_ldu( shred[ 55 ] + shred_pos );
87 1248 : gf_t in56 = _erased[ 56 ] ? gf_zero() : gf_ldu( shred[ 56 ] + shred_pos );
88 1248 : gf_t in57 = _erased[ 57 ] ? gf_zero() : gf_ldu( shred[ 57 ] + shred_pos );
89 1248 : gf_t in58 = _erased[ 58 ] ? gf_zero() : gf_ldu( shred[ 58 ] + shred_pos );
90 1248 : gf_t in59 = _erased[ 59 ] ? gf_zero() : gf_ldu( shred[ 59 ] + shred_pos );
91 1248 : gf_t in60 = _erased[ 60 ] ? gf_zero() : gf_ldu( shred[ 60 ] + shred_pos );
92 1248 : gf_t in61 = _erased[ 61 ] ? gf_zero() : gf_ldu( shred[ 61 ] + shred_pos );
93 1248 : gf_t in62 = _erased[ 62 ] ? gf_zero() : gf_ldu( shred[ 62 ] + shred_pos );
94 1248 : gf_t in63 = _erased[ 63 ] ? gf_zero() : gf_ldu( shred[ 63 ] + shred_pos );
95 : /* Technically, we only need to multiply the non-erased ones, since
96 : the erased ones are 0, but we know at least half of them are
97 : non-erased, and the branch is going to be just as costly as the
98 : multiply. */
99 1248 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
100 1248 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
101 1248 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
102 1248 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
103 1248 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
104 1248 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
105 1248 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
106 1248 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
107 1248 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
108 1248 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
109 1248 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
110 1248 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
111 1248 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
112 1248 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
113 1248 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
114 1248 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
115 1248 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
116 1248 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
117 1248 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
118 1248 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
119 1248 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
120 1248 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
121 1248 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
122 1248 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
123 1248 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
124 1248 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
125 1248 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
126 1248 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
127 1248 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
128 1248 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
129 1248 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
130 1248 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
131 1248 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
132 1248 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
133 1248 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
134 1248 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
135 1248 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
136 1248 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
137 1248 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
138 1248 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
139 1248 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
140 1248 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
141 1248 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
142 1248 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
143 1248 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
144 1248 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
145 1248 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
146 1248 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
147 1248 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
148 1248 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
149 1248 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
150 1248 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
151 1248 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
152 1248 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
153 1248 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
154 1248 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
155 1248 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
156 1248 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
157 1248 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
158 1248 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
159 1248 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
160 1248 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
161 1248 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
162 1248 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
163 1248 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31, in32, in33, in34, in35, in36, in37, in38, in39, in40, in41, in42, in43, in44, in45, in46, in47, in48, in49, in50, in51, in52, in53, in54, in55, in56, in57, in58, in59, in60, in61, in62, in63
164 :
165 1248 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
166 :
167 1248 : FD_REEDSOL_GENERATE_FDERIV( 64, ALL_VARS );
168 :
169 1248 : FD_REEDSOL_GENERATE_FFT( 64, 0, ALL_VARS );
170 :
171 : /* Again, we only need to multiply the erased ones, since we don't
172 : use the value of the non-erased ones anymore, but I'll take
173 : multiplies over branches most days. */
174 1248 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
175 1248 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
176 1248 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
177 1248 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
178 1248 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
179 1248 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
180 1248 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
181 1248 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
182 1248 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
183 1248 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
184 1248 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
185 1248 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
186 1248 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
187 1248 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
188 1248 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
189 1248 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
190 1248 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
191 1248 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
192 1248 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
193 1248 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
194 1248 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
195 1248 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
196 1248 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
197 1248 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
198 1248 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
199 1248 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
200 1248 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
201 1248 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
202 1248 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
203 1248 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
204 1248 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
205 1248 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
206 1248 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
207 1248 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
208 1248 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
209 1248 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
210 1248 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
211 1248 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
212 1248 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
213 1248 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
214 1248 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
215 1248 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
216 1248 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
217 1248 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
218 1248 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
219 1248 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
220 1248 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
221 1248 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
222 1248 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
223 1248 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
224 1248 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
225 1248 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
226 1248 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
227 1248 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
228 1248 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
229 1248 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
230 1248 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
231 1248 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
232 1248 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
233 1248 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
234 1248 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
235 1248 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
236 1248 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
237 1248 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
238 : /* There are a couple of cases we have to handle:
239 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
240 : need to store the generated value.
241 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
242 : value that we ignored to ensure the data lies on a
243 : polynomial of the right order, so we need to compare the
244 : value we generated to the one that was there.
245 : - If i<shred_cnt and !_erased[ i ], then this is a value we
246 : actually used in the computation, but we destroyed it, so we
247 : need to reload the actual value of the shred in order to use the
248 : IFFT in the next step.
249 : - If i>=shred_cnt, do nothing, which will keep the value of the
250 : shred if it existed in the variable. */
251 79872 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
252 79872 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
253 79872 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
254 41568 : else var = gf_ldu( shred[ n ] + shred_pos ); \
255 79872 : } while( 0 )
256 3072 : #define STORE_COMPARE( n, var ) do{ \
257 3072 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
258 3072 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
259 3072 : } while( 0 )
260 1248 : switch( fd_ulong_min( shred_cnt, 64UL ) ) {
261 1248 : case 64UL: STORE_COMPARE_RELOAD( 63, in63 ); FALLTHRU
262 1248 : case 63UL: STORE_COMPARE_RELOAD( 62, in62 ); FALLTHRU
263 1248 : case 62UL: STORE_COMPARE_RELOAD( 61, in61 ); FALLTHRU
264 1248 : case 61UL: STORE_COMPARE_RELOAD( 60, in60 ); FALLTHRU
265 1248 : case 60UL: STORE_COMPARE_RELOAD( 59, in59 ); FALLTHRU
266 1248 : case 59UL: STORE_COMPARE_RELOAD( 58, in58 ); FALLTHRU
267 1248 : case 58UL: STORE_COMPARE_RELOAD( 57, in57 ); FALLTHRU
268 1248 : case 57UL: STORE_COMPARE_RELOAD( 56, in56 ); FALLTHRU
269 1248 : case 56UL: STORE_COMPARE_RELOAD( 55, in55 ); FALLTHRU
270 1248 : case 55UL: STORE_COMPARE_RELOAD( 54, in54 ); FALLTHRU
271 1248 : case 54UL: STORE_COMPARE_RELOAD( 53, in53 ); FALLTHRU
272 1248 : case 53UL: STORE_COMPARE_RELOAD( 52, in52 ); FALLTHRU
273 1248 : case 52UL: STORE_COMPARE_RELOAD( 51, in51 ); FALLTHRU
274 1248 : case 51UL: STORE_COMPARE_RELOAD( 50, in50 ); FALLTHRU
275 1248 : case 50UL: STORE_COMPARE_RELOAD( 49, in49 ); FALLTHRU
276 1248 : case 49UL: STORE_COMPARE_RELOAD( 48, in48 ); FALLTHRU
277 1248 : case 48UL: STORE_COMPARE_RELOAD( 47, in47 ); FALLTHRU
278 1248 : case 47UL: STORE_COMPARE_RELOAD( 46, in46 ); FALLTHRU
279 1248 : case 46UL: STORE_COMPARE_RELOAD( 45, in45 ); FALLTHRU
280 1248 : case 45UL: STORE_COMPARE_RELOAD( 44, in44 ); FALLTHRU
281 1248 : case 44UL: STORE_COMPARE_RELOAD( 43, in43 ); FALLTHRU
282 1248 : case 43UL: STORE_COMPARE_RELOAD( 42, in42 ); FALLTHRU
283 1248 : case 42UL: STORE_COMPARE_RELOAD( 41, in41 ); FALLTHRU
284 1248 : case 41UL: STORE_COMPARE_RELOAD( 40, in40 ); FALLTHRU
285 1248 : case 40UL: STORE_COMPARE_RELOAD( 39, in39 ); FALLTHRU
286 1248 : case 39UL: STORE_COMPARE_RELOAD( 38, in38 ); FALLTHRU
287 1248 : case 38UL: STORE_COMPARE_RELOAD( 37, in37 ); FALLTHRU
288 1248 : case 37UL: STORE_COMPARE_RELOAD( 36, in36 ); FALLTHRU
289 1248 : case 36UL: STORE_COMPARE_RELOAD( 35, in35 ); FALLTHRU
290 1248 : case 35UL: STORE_COMPARE_RELOAD( 34, in34 ); FALLTHRU
291 1248 : case 34UL: STORE_COMPARE_RELOAD( 33, in33 ); FALLTHRU
292 1248 : case 33UL: STORE_COMPARE_RELOAD( 32, in32 ); FALLTHRU
293 1248 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
294 1248 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
295 1248 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
296 1248 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
297 1248 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
298 1248 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
299 1248 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
300 1248 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
301 1248 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
302 1248 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
303 1248 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
304 1248 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
305 1248 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
306 1248 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
307 1248 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
308 1248 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
309 1248 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
310 1248 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
311 1248 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
312 1248 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
313 1248 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
314 1248 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
315 1248 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
316 1248 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
317 1248 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
318 1248 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
319 1248 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
320 1248 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
321 1248 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
322 1248 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
323 1248 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
324 1248 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
325 1248 : }
326 :
327 1248 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 64UL );
328 1248 : if( shreds_remaining>0UL ) {
329 96 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
330 96 : FD_REEDSOL_GENERATE_FFT( 64, 64, ALL_VARS );
331 :
332 96 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
333 0 : case 64UL: STORE_COMPARE( 127, in63 ); FALLTHRU
334 0 : case 63UL: STORE_COMPARE( 126, in62 ); FALLTHRU
335 0 : case 62UL: STORE_COMPARE( 125, in61 ); FALLTHRU
336 0 : case 61UL: STORE_COMPARE( 124, in60 ); FALLTHRU
337 0 : case 60UL: STORE_COMPARE( 123, in59 ); FALLTHRU
338 0 : case 59UL: STORE_COMPARE( 122, in58 ); FALLTHRU
339 0 : case 58UL: STORE_COMPARE( 121, in57 ); FALLTHRU
340 0 : case 57UL: STORE_COMPARE( 120, in56 ); FALLTHRU
341 0 : case 56UL: STORE_COMPARE( 119, in55 ); FALLTHRU
342 0 : case 55UL: STORE_COMPARE( 118, in54 ); FALLTHRU
343 0 : case 54UL: STORE_COMPARE( 117, in53 ); FALLTHRU
344 0 : case 53UL: STORE_COMPARE( 116, in52 ); FALLTHRU
345 0 : case 52UL: STORE_COMPARE( 115, in51 ); FALLTHRU
346 0 : case 51UL: STORE_COMPARE( 114, in50 ); FALLTHRU
347 0 : case 50UL: STORE_COMPARE( 113, in49 ); FALLTHRU
348 0 : case 49UL: STORE_COMPARE( 112, in48 ); FALLTHRU
349 0 : case 48UL: STORE_COMPARE( 111, in47 ); FALLTHRU
350 0 : case 47UL: STORE_COMPARE( 110, in46 ); FALLTHRU
351 0 : case 46UL: STORE_COMPARE( 109, in45 ); FALLTHRU
352 0 : case 45UL: STORE_COMPARE( 108, in44 ); FALLTHRU
353 0 : case 44UL: STORE_COMPARE( 107, in43 ); FALLTHRU
354 0 : case 43UL: STORE_COMPARE( 106, in42 ); FALLTHRU
355 0 : case 42UL: STORE_COMPARE( 105, in41 ); FALLTHRU
356 0 : case 41UL: STORE_COMPARE( 104, in40 ); FALLTHRU
357 0 : case 40UL: STORE_COMPARE( 103, in39 ); FALLTHRU
358 0 : case 39UL: STORE_COMPARE( 102, in38 ); FALLTHRU
359 0 : case 38UL: STORE_COMPARE( 101, in37 ); FALLTHRU
360 0 : case 37UL: STORE_COMPARE( 100, in36 ); FALLTHRU
361 0 : case 36UL: STORE_COMPARE( 99, in35 ); FALLTHRU
362 0 : case 35UL: STORE_COMPARE( 98, in34 ); FALLTHRU
363 0 : case 34UL: STORE_COMPARE( 97, in33 ); FALLTHRU
364 0 : case 33UL: STORE_COMPARE( 96, in32 ); FALLTHRU
365 96 : case 32UL: STORE_COMPARE( 95, in31 ); FALLTHRU
366 96 : case 31UL: STORE_COMPARE( 94, in30 ); FALLTHRU
367 96 : case 30UL: STORE_COMPARE( 93, in29 ); FALLTHRU
368 96 : case 29UL: STORE_COMPARE( 92, in28 ); FALLTHRU
369 96 : case 28UL: STORE_COMPARE( 91, in27 ); FALLTHRU
370 96 : case 27UL: STORE_COMPARE( 90, in26 ); FALLTHRU
371 96 : case 26UL: STORE_COMPARE( 89, in25 ); FALLTHRU
372 96 : case 25UL: STORE_COMPARE( 88, in24 ); FALLTHRU
373 96 : case 24UL: STORE_COMPARE( 87, in23 ); FALLTHRU
374 96 : case 23UL: STORE_COMPARE( 86, in22 ); FALLTHRU
375 96 : case 22UL: STORE_COMPARE( 85, in21 ); FALLTHRU
376 96 : case 21UL: STORE_COMPARE( 84, in20 ); FALLTHRU
377 96 : case 20UL: STORE_COMPARE( 83, in19 ); FALLTHRU
378 96 : case 19UL: STORE_COMPARE( 82, in18 ); FALLTHRU
379 96 : case 18UL: STORE_COMPARE( 81, in17 ); FALLTHRU
380 96 : case 17UL: STORE_COMPARE( 80, in16 ); FALLTHRU
381 96 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
382 96 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
383 96 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
384 96 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
385 96 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
386 96 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
387 96 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
388 96 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
389 96 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
390 96 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
391 96 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
392 96 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
393 96 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
394 96 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
395 96 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
396 96 : case 1UL: STORE_COMPARE( 64, in00 );
397 96 : }
398 96 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
399 96 : }
400 1248 : if( shreds_remaining>0UL ) {
401 0 : FD_REEDSOL_GENERATE_IFFT( 64, 64, ALL_VARS );
402 0 : FD_REEDSOL_GENERATE_FFT( 64, 128, ALL_VARS );
403 :
404 0 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
405 0 : case 7UL: STORE_COMPARE( 134, in06 ); FALLTHRU
406 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
407 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
408 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
409 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
410 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
411 0 : case 1UL: STORE_COMPARE( 128, in00 );
412 0 : }
413 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
414 0 : }
415 1248 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
416 1248 : shred_pos += GF_WIDTH;
417 1248 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
418 1248 : }
419 39 : return FD_REEDSOL_SUCCESS;
420 39 : }
|