Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_64( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 210 : uchar const * erased ) {
11 210 : uchar _erased[ 64 ] W_ATTR;
12 210 : uchar pi[ 64 ] W_ATTR;
13 210 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 210 : ulong loaded_cnt = 0UL;
15 13650 : for( ulong i=0UL; i<64UL; i++) {
16 13440 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 13440 : _erased[ i ] = !load_shred;
18 13440 : loaded_cnt += (ulong)load_shred;
19 13440 : }
20 210 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 210 : fd_reedsol_private_gen_pi_64( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 210 : gf_t diff = gf_zero();
27 :
28 6741 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 6531 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 6531 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 6531 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 6531 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 6531 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 6531 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 6531 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 6531 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 6531 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 6531 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 6531 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 6531 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 6531 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 6531 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 6531 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 6531 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 6531 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 6531 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 6531 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 6531 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 6531 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 6531 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 6531 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 6531 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 6531 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 6531 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 6531 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 6531 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 6531 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 6531 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 6531 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 6531 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 6531 : gf_t in32 = _erased[ 32 ] ? gf_zero() : gf_ldu( shred[ 32 ] + shred_pos );
64 6531 : gf_t in33 = _erased[ 33 ] ? gf_zero() : gf_ldu( shred[ 33 ] + shred_pos );
65 6531 : gf_t in34 = _erased[ 34 ] ? gf_zero() : gf_ldu( shred[ 34 ] + shred_pos );
66 6531 : gf_t in35 = _erased[ 35 ] ? gf_zero() : gf_ldu( shred[ 35 ] + shred_pos );
67 6531 : gf_t in36 = _erased[ 36 ] ? gf_zero() : gf_ldu( shred[ 36 ] + shred_pos );
68 6531 : gf_t in37 = _erased[ 37 ] ? gf_zero() : gf_ldu( shred[ 37 ] + shred_pos );
69 6531 : gf_t in38 = _erased[ 38 ] ? gf_zero() : gf_ldu( shred[ 38 ] + shred_pos );
70 6531 : gf_t in39 = _erased[ 39 ] ? gf_zero() : gf_ldu( shred[ 39 ] + shred_pos );
71 6531 : gf_t in40 = _erased[ 40 ] ? gf_zero() : gf_ldu( shred[ 40 ] + shred_pos );
72 6531 : gf_t in41 = _erased[ 41 ] ? gf_zero() : gf_ldu( shred[ 41 ] + shred_pos );
73 6531 : gf_t in42 = _erased[ 42 ] ? gf_zero() : gf_ldu( shred[ 42 ] + shred_pos );
74 6531 : gf_t in43 = _erased[ 43 ] ? gf_zero() : gf_ldu( shred[ 43 ] + shred_pos );
75 6531 : gf_t in44 = _erased[ 44 ] ? gf_zero() : gf_ldu( shred[ 44 ] + shred_pos );
76 6531 : gf_t in45 = _erased[ 45 ] ? gf_zero() : gf_ldu( shred[ 45 ] + shred_pos );
77 6531 : gf_t in46 = _erased[ 46 ] ? gf_zero() : gf_ldu( shred[ 46 ] + shred_pos );
78 6531 : gf_t in47 = _erased[ 47 ] ? gf_zero() : gf_ldu( shred[ 47 ] + shred_pos );
79 6531 : gf_t in48 = _erased[ 48 ] ? gf_zero() : gf_ldu( shred[ 48 ] + shred_pos );
80 6531 : gf_t in49 = _erased[ 49 ] ? gf_zero() : gf_ldu( shred[ 49 ] + shred_pos );
81 6531 : gf_t in50 = _erased[ 50 ] ? gf_zero() : gf_ldu( shred[ 50 ] + shred_pos );
82 6531 : gf_t in51 = _erased[ 51 ] ? gf_zero() : gf_ldu( shred[ 51 ] + shred_pos );
83 6531 : gf_t in52 = _erased[ 52 ] ? gf_zero() : gf_ldu( shred[ 52 ] + shred_pos );
84 6531 : gf_t in53 = _erased[ 53 ] ? gf_zero() : gf_ldu( shred[ 53 ] + shred_pos );
85 6531 : gf_t in54 = _erased[ 54 ] ? gf_zero() : gf_ldu( shred[ 54 ] + shred_pos );
86 6531 : gf_t in55 = _erased[ 55 ] ? gf_zero() : gf_ldu( shred[ 55 ] + shred_pos );
87 6531 : gf_t in56 = _erased[ 56 ] ? gf_zero() : gf_ldu( shred[ 56 ] + shred_pos );
88 6531 : gf_t in57 = _erased[ 57 ] ? gf_zero() : gf_ldu( shred[ 57 ] + shred_pos );
89 6531 : gf_t in58 = _erased[ 58 ] ? gf_zero() : gf_ldu( shred[ 58 ] + shred_pos );
90 6531 : gf_t in59 = _erased[ 59 ] ? gf_zero() : gf_ldu( shred[ 59 ] + shred_pos );
91 6531 : gf_t in60 = _erased[ 60 ] ? gf_zero() : gf_ldu( shred[ 60 ] + shred_pos );
92 6531 : gf_t in61 = _erased[ 61 ] ? gf_zero() : gf_ldu( shred[ 61 ] + shred_pos );
93 6531 : gf_t in62 = _erased[ 62 ] ? gf_zero() : gf_ldu( shred[ 62 ] + shred_pos );
94 6531 : gf_t in63 = _erased[ 63 ] ? gf_zero() : gf_ldu( shred[ 63 ] + shred_pos );
95 : /* Technically, we only need to multiply the non-erased ones, since
96 : the erased ones are 0, but we know at least half of them are
97 : non-erased, and the branch is going to be just as costly as the
98 : multiply. */
99 6531 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
100 6531 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
101 6531 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
102 6531 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
103 6531 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
104 6531 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
105 6531 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
106 6531 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
107 6531 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
108 6531 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
109 6531 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
110 6531 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
111 6531 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
112 6531 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
113 6531 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
114 6531 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
115 6531 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
116 6531 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
117 6531 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
118 6531 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
119 6531 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
120 6531 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
121 6531 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
122 6531 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
123 6531 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
124 6531 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
125 6531 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
126 6531 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
127 6531 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
128 6531 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
129 6531 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
130 6531 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
131 6531 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
132 6531 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
133 6531 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
134 6531 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
135 6531 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
136 6531 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
137 6531 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
138 6531 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
139 6531 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
140 6531 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
141 6531 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
142 6531 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
143 6531 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
144 6531 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
145 6531 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
146 6531 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
147 6531 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
148 6531 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
149 6531 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
150 6531 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
151 6531 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
152 6531 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
153 6531 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
154 6531 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
155 6531 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
156 6531 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
157 6531 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
158 6531 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
159 6531 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
160 6531 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
161 6531 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
162 6531 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
163 6531 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31, in32, in33, in34, in35, in36, in37, in38, in39, in40, in41, in42, in43, in44, in45, in46, in47, in48, in49, in50, in51, in52, in53, in54, in55, in56, in57, in58, in59, in60, in61, in62, in63
164 :
165 6531 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
166 :
167 6531 : FD_REEDSOL_GENERATE_FDERIV( 64, ALL_VARS );
168 :
169 6531 : FD_REEDSOL_GENERATE_FFT( 64, 0, ALL_VARS );
170 :
171 : /* Again, we only need to multiply the erased ones, since we don't
172 : use the value of the non-erased ones anymore, but I'll take
173 : multiplies over branches most days. */
174 6531 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
175 6531 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
176 6531 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
177 6531 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
178 6531 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
179 6531 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
180 6531 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
181 6531 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
182 6531 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
183 6531 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
184 6531 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
185 6531 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
186 6531 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
187 6531 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
188 6531 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
189 6531 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
190 6531 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
191 6531 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
192 6531 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
193 6531 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
194 6531 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
195 6531 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
196 6531 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
197 6531 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
198 6531 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
199 6531 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
200 6531 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
201 6531 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
202 6531 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
203 6531 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
204 6531 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
205 6531 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
206 6531 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
207 6531 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
208 6531 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
209 6531 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
210 6531 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
211 6531 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
212 6531 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
213 6531 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
214 6531 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
215 6531 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
216 6531 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
217 6531 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
218 6531 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
219 6531 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
220 6531 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
221 6531 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
222 6531 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
223 6531 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
224 6531 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
225 6531 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
226 6531 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
227 6531 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
228 6531 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
229 6531 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
230 6531 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
231 6531 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
232 6531 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
233 6531 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
234 6531 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
235 6531 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
236 6531 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
237 6531 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
238 : /* There are a couple of cases we have to handle:
239 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
240 : need to store the generated value.
241 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
242 : value that we ignored to ensure the data lies on a
243 : polynomial of the right order, so we need to compare the
244 : value we generated to the one that was there.
245 : - If i<shred_cnt and !_erased[ i ], then this is a value we
246 : actually used in the computation, but we destroyed it, so we
247 : need to reload the actual value of the shred in order to use the
248 : IFFT in the next step.
249 : - If i>=shred_cnt, do nothing, which will keep the value of the
250 : shred if it existed in the variable. */
251 417984 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
252 417984 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
253 417984 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
254 211407 : else var = gf_ldu( shred[ n ] + shred_pos ); \
255 417984 : } while( 0 )
256 6531 : #define STORE_COMPARE( n, var ) do{ \
257 4638 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
258 4638 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
259 4638 : } while( 0 )
260 6531 : switch( fd_ulong_min( shred_cnt, 64UL ) ) {
261 6531 : case 64UL: STORE_COMPARE_RELOAD( 63, in63 ); FALLTHRU
262 6531 : case 63UL: STORE_COMPARE_RELOAD( 62, in62 ); FALLTHRU
263 6531 : case 62UL: STORE_COMPARE_RELOAD( 61, in61 ); FALLTHRU
264 6531 : case 61UL: STORE_COMPARE_RELOAD( 60, in60 ); FALLTHRU
265 6531 : case 60UL: STORE_COMPARE_RELOAD( 59, in59 ); FALLTHRU
266 6531 : case 59UL: STORE_COMPARE_RELOAD( 58, in58 ); FALLTHRU
267 6531 : case 58UL: STORE_COMPARE_RELOAD( 57, in57 ); FALLTHRU
268 6531 : case 57UL: STORE_COMPARE_RELOAD( 56, in56 ); FALLTHRU
269 6531 : case 56UL: STORE_COMPARE_RELOAD( 55, in55 ); FALLTHRU
270 6531 : case 55UL: STORE_COMPARE_RELOAD( 54, in54 ); FALLTHRU
271 6531 : case 54UL: STORE_COMPARE_RELOAD( 53, in53 ); FALLTHRU
272 6531 : case 53UL: STORE_COMPARE_RELOAD( 52, in52 ); FALLTHRU
273 6531 : case 52UL: STORE_COMPARE_RELOAD( 51, in51 ); FALLTHRU
274 6531 : case 51UL: STORE_COMPARE_RELOAD( 50, in50 ); FALLTHRU
275 6531 : case 50UL: STORE_COMPARE_RELOAD( 49, in49 ); FALLTHRU
276 6531 : case 49UL: STORE_COMPARE_RELOAD( 48, in48 ); FALLTHRU
277 6531 : case 48UL: STORE_COMPARE_RELOAD( 47, in47 ); FALLTHRU
278 6531 : case 47UL: STORE_COMPARE_RELOAD( 46, in46 ); FALLTHRU
279 6531 : case 46UL: STORE_COMPARE_RELOAD( 45, in45 ); FALLTHRU
280 6531 : case 45UL: STORE_COMPARE_RELOAD( 44, in44 ); FALLTHRU
281 6531 : case 44UL: STORE_COMPARE_RELOAD( 43, in43 ); FALLTHRU
282 6531 : case 43UL: STORE_COMPARE_RELOAD( 42, in42 ); FALLTHRU
283 6531 : case 42UL: STORE_COMPARE_RELOAD( 41, in41 ); FALLTHRU
284 6531 : case 41UL: STORE_COMPARE_RELOAD( 40, in40 ); FALLTHRU
285 6531 : case 40UL: STORE_COMPARE_RELOAD( 39, in39 ); FALLTHRU
286 6531 : case 39UL: STORE_COMPARE_RELOAD( 38, in38 ); FALLTHRU
287 6531 : case 38UL: STORE_COMPARE_RELOAD( 37, in37 ); FALLTHRU
288 6531 : case 37UL: STORE_COMPARE_RELOAD( 36, in36 ); FALLTHRU
289 6531 : case 36UL: STORE_COMPARE_RELOAD( 35, in35 ); FALLTHRU
290 6531 : case 35UL: STORE_COMPARE_RELOAD( 34, in34 ); FALLTHRU
291 6531 : case 34UL: STORE_COMPARE_RELOAD( 33, in33 ); FALLTHRU
292 6531 : case 33UL: STORE_COMPARE_RELOAD( 32, in32 ); FALLTHRU
293 6531 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
294 6531 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
295 6531 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
296 6531 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
297 6531 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
298 6531 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
299 6531 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
300 6531 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
301 6531 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
302 6531 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
303 6531 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
304 6531 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
305 6531 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
306 6531 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
307 6531 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
308 6531 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
309 6531 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
310 6531 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
311 6531 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
312 6531 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
313 6531 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
314 6531 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
315 6531 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
316 6531 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
317 6531 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
318 6531 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
319 6531 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
320 6531 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
321 6531 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
322 6531 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
323 6531 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
324 6531 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
325 6531 : }
326 :
327 6531 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 64UL );
328 6531 : if( shreds_remaining>0UL ) {
329 357 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
330 357 : FD_REEDSOL_GENERATE_FFT( 64, 64, ALL_VARS );
331 :
332 357 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
333 0 : case 64UL: STORE_COMPARE( 127, in63 ); FALLTHRU
334 0 : case 63UL: STORE_COMPARE( 126, in62 ); FALLTHRU
335 0 : case 62UL: STORE_COMPARE( 125, in61 ); FALLTHRU
336 0 : case 61UL: STORE_COMPARE( 124, in60 ); FALLTHRU
337 0 : case 60UL: STORE_COMPARE( 123, in59 ); FALLTHRU
338 0 : case 59UL: STORE_COMPARE( 122, in58 ); FALLTHRU
339 0 : case 58UL: STORE_COMPARE( 121, in57 ); FALLTHRU
340 0 : case 57UL: STORE_COMPARE( 120, in56 ); FALLTHRU
341 0 : case 56UL: STORE_COMPARE( 119, in55 ); FALLTHRU
342 0 : case 55UL: STORE_COMPARE( 118, in54 ); FALLTHRU
343 0 : case 54UL: STORE_COMPARE( 117, in53 ); FALLTHRU
344 0 : case 53UL: STORE_COMPARE( 116, in52 ); FALLTHRU
345 0 : case 52UL: STORE_COMPARE( 115, in51 ); FALLTHRU
346 0 : case 51UL: STORE_COMPARE( 114, in50 ); FALLTHRU
347 0 : case 50UL: STORE_COMPARE( 113, in49 ); FALLTHRU
348 0 : case 49UL: STORE_COMPARE( 112, in48 ); FALLTHRU
349 0 : case 48UL: STORE_COMPARE( 111, in47 ); FALLTHRU
350 0 : case 47UL: STORE_COMPARE( 110, in46 ); FALLTHRU
351 0 : case 46UL: STORE_COMPARE( 109, in45 ); FALLTHRU
352 0 : case 45UL: STORE_COMPARE( 108, in44 ); FALLTHRU
353 0 : case 44UL: STORE_COMPARE( 107, in43 ); FALLTHRU
354 0 : case 43UL: STORE_COMPARE( 106, in42 ); FALLTHRU
355 0 : case 42UL: STORE_COMPARE( 105, in41 ); FALLTHRU
356 0 : case 41UL: STORE_COMPARE( 104, in40 ); FALLTHRU
357 0 : case 40UL: STORE_COMPARE( 103, in39 ); FALLTHRU
358 0 : case 39UL: STORE_COMPARE( 102, in38 ); FALLTHRU
359 0 : case 38UL: STORE_COMPARE( 101, in37 ); FALLTHRU
360 0 : case 37UL: STORE_COMPARE( 100, in36 ); FALLTHRU
361 0 : case 36UL: STORE_COMPARE( 99, in35 ); FALLTHRU
362 0 : case 35UL: STORE_COMPARE( 98, in34 ); FALLTHRU
363 0 : case 34UL: STORE_COMPARE( 97, in33 ); FALLTHRU
364 0 : case 33UL: STORE_COMPARE( 96, in32 ); FALLTHRU
365 96 : case 32UL: STORE_COMPARE( 95, in31 ); FALLTHRU
366 96 : case 31UL: STORE_COMPARE( 94, in30 ); FALLTHRU
367 96 : case 30UL: STORE_COMPARE( 93, in29 ); FALLTHRU
368 96 : case 29UL: STORE_COMPARE( 92, in28 ); FALLTHRU
369 96 : case 28UL: STORE_COMPARE( 91, in27 ); FALLTHRU
370 96 : case 27UL: STORE_COMPARE( 90, in26 ); FALLTHRU
371 96 : case 26UL: STORE_COMPARE( 89, in25 ); FALLTHRU
372 96 : case 25UL: STORE_COMPARE( 88, in24 ); FALLTHRU
373 96 : case 24UL: STORE_COMPARE( 87, in23 ); FALLTHRU
374 96 : case 23UL: STORE_COMPARE( 86, in22 ); FALLTHRU
375 96 : case 22UL: STORE_COMPARE( 85, in21 ); FALLTHRU
376 96 : case 21UL: STORE_COMPARE( 84, in20 ); FALLTHRU
377 96 : case 20UL: STORE_COMPARE( 83, in19 ); FALLTHRU
378 96 : case 19UL: STORE_COMPARE( 82, in18 ); FALLTHRU
379 96 : case 18UL: STORE_COMPARE( 81, in17 ); FALLTHRU
380 96 : case 17UL: STORE_COMPARE( 80, in16 ); FALLTHRU
381 96 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
382 96 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
383 96 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
384 96 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
385 96 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
386 96 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
387 96 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
388 96 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
389 96 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
390 96 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
391 357 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
392 357 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
393 357 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
394 357 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
395 357 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
396 357 : case 1UL: STORE_COMPARE( 64, in00 );
397 357 : }
398 357 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
399 357 : }
400 6531 : if( shreds_remaining>0UL ) {
401 0 : FD_REEDSOL_GENERATE_IFFT( 64, 64, ALL_VARS );
402 0 : FD_REEDSOL_GENERATE_FFT( 64, 128, ALL_VARS );
403 :
404 0 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
405 0 : case 7UL: STORE_COMPARE( 134, in06 ); FALLTHRU
406 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
407 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
408 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
409 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
410 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
411 0 : case 1UL: STORE_COMPARE( 128, in00 );
412 0 : }
413 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
414 0 : }
415 6531 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
416 6531 : shred_pos += GF_WIDTH;
417 6531 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
418 6531 : }
419 210 : return FD_REEDSOL_SUCCESS;
420 210 : }
|