Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_64( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 180 : uchar const * erased ) {
11 180 : uchar _erased[ 64 ] W_ATTR;
12 180 : uchar pi[ 64 ] W_ATTR;
13 180 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 180 : ulong loaded_cnt = 0UL;
15 11700 : for( ulong i=0UL; i<64UL; i++) {
16 11520 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 11520 : _erased[ i ] = !load_shred;
18 11520 : loaded_cnt += (ulong)load_shred;
19 11520 : }
20 180 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 180 : fd_reedsol_private_gen_pi_64( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 180 : gf_t diff = gf_zero();
27 :
28 5724 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 5544 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 5544 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 5544 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 5544 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 5544 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 5544 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 5544 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 5544 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 5544 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 5544 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 5544 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 5544 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 5544 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 5544 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 5544 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 5544 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 5544 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 5544 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 5544 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 5544 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 5544 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 5544 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 5544 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 5544 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 5544 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 5544 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 5544 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 5544 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 5544 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 5544 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 5544 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 5544 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 5544 : gf_t in32 = _erased[ 32 ] ? gf_zero() : gf_ldu( shred[ 32 ] + shred_pos );
64 5544 : gf_t in33 = _erased[ 33 ] ? gf_zero() : gf_ldu( shred[ 33 ] + shred_pos );
65 5544 : gf_t in34 = _erased[ 34 ] ? gf_zero() : gf_ldu( shred[ 34 ] + shred_pos );
66 5544 : gf_t in35 = _erased[ 35 ] ? gf_zero() : gf_ldu( shred[ 35 ] + shred_pos );
67 5544 : gf_t in36 = _erased[ 36 ] ? gf_zero() : gf_ldu( shred[ 36 ] + shred_pos );
68 5544 : gf_t in37 = _erased[ 37 ] ? gf_zero() : gf_ldu( shred[ 37 ] + shred_pos );
69 5544 : gf_t in38 = _erased[ 38 ] ? gf_zero() : gf_ldu( shred[ 38 ] + shred_pos );
70 5544 : gf_t in39 = _erased[ 39 ] ? gf_zero() : gf_ldu( shred[ 39 ] + shred_pos );
71 5544 : gf_t in40 = _erased[ 40 ] ? gf_zero() : gf_ldu( shred[ 40 ] + shred_pos );
72 5544 : gf_t in41 = _erased[ 41 ] ? gf_zero() : gf_ldu( shred[ 41 ] + shred_pos );
73 5544 : gf_t in42 = _erased[ 42 ] ? gf_zero() : gf_ldu( shred[ 42 ] + shred_pos );
74 5544 : gf_t in43 = _erased[ 43 ] ? gf_zero() : gf_ldu( shred[ 43 ] + shred_pos );
75 5544 : gf_t in44 = _erased[ 44 ] ? gf_zero() : gf_ldu( shred[ 44 ] + shred_pos );
76 5544 : gf_t in45 = _erased[ 45 ] ? gf_zero() : gf_ldu( shred[ 45 ] + shred_pos );
77 5544 : gf_t in46 = _erased[ 46 ] ? gf_zero() : gf_ldu( shred[ 46 ] + shred_pos );
78 5544 : gf_t in47 = _erased[ 47 ] ? gf_zero() : gf_ldu( shred[ 47 ] + shred_pos );
79 5544 : gf_t in48 = _erased[ 48 ] ? gf_zero() : gf_ldu( shred[ 48 ] + shred_pos );
80 5544 : gf_t in49 = _erased[ 49 ] ? gf_zero() : gf_ldu( shred[ 49 ] + shred_pos );
81 5544 : gf_t in50 = _erased[ 50 ] ? gf_zero() : gf_ldu( shred[ 50 ] + shred_pos );
82 5544 : gf_t in51 = _erased[ 51 ] ? gf_zero() : gf_ldu( shred[ 51 ] + shred_pos );
83 5544 : gf_t in52 = _erased[ 52 ] ? gf_zero() : gf_ldu( shred[ 52 ] + shred_pos );
84 5544 : gf_t in53 = _erased[ 53 ] ? gf_zero() : gf_ldu( shred[ 53 ] + shred_pos );
85 5544 : gf_t in54 = _erased[ 54 ] ? gf_zero() : gf_ldu( shred[ 54 ] + shred_pos );
86 5544 : gf_t in55 = _erased[ 55 ] ? gf_zero() : gf_ldu( shred[ 55 ] + shred_pos );
87 5544 : gf_t in56 = _erased[ 56 ] ? gf_zero() : gf_ldu( shred[ 56 ] + shred_pos );
88 5544 : gf_t in57 = _erased[ 57 ] ? gf_zero() : gf_ldu( shred[ 57 ] + shred_pos );
89 5544 : gf_t in58 = _erased[ 58 ] ? gf_zero() : gf_ldu( shred[ 58 ] + shred_pos );
90 5544 : gf_t in59 = _erased[ 59 ] ? gf_zero() : gf_ldu( shred[ 59 ] + shred_pos );
91 5544 : gf_t in60 = _erased[ 60 ] ? gf_zero() : gf_ldu( shred[ 60 ] + shred_pos );
92 5544 : gf_t in61 = _erased[ 61 ] ? gf_zero() : gf_ldu( shred[ 61 ] + shred_pos );
93 5544 : gf_t in62 = _erased[ 62 ] ? gf_zero() : gf_ldu( shred[ 62 ] + shred_pos );
94 5544 : gf_t in63 = _erased[ 63 ] ? gf_zero() : gf_ldu( shred[ 63 ] + shred_pos );
95 : /* Technically, we only need to multiply the non-erased ones, since
96 : the erased ones are 0, but we know at least half of them are
97 : non-erased, and the branch is going to be just as costly as the
98 : multiply. */
99 5544 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
100 5544 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
101 5544 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
102 5544 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
103 5544 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
104 5544 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
105 5544 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
106 5544 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
107 5544 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
108 5544 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
109 5544 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
110 5544 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
111 5544 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
112 5544 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
113 5544 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
114 5544 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
115 5544 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
116 5544 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
117 5544 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
118 5544 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
119 5544 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
120 5544 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
121 5544 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
122 5544 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
123 5544 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
124 5544 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
125 5544 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
126 5544 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
127 5544 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
128 5544 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
129 5544 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
130 5544 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
131 5544 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
132 5544 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
133 5544 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
134 5544 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
135 5544 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
136 5544 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
137 5544 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
138 5544 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
139 5544 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
140 5544 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
141 5544 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
142 5544 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
143 5544 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
144 5544 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
145 5544 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
146 5544 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
147 5544 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
148 5544 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
149 5544 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
150 5544 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
151 5544 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
152 5544 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
153 5544 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
154 5544 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
155 5544 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
156 5544 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
157 5544 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
158 5544 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
159 5544 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
160 5544 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
161 5544 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
162 5544 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
163 5544 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31, in32, in33, in34, in35, in36, in37, in38, in39, in40, in41, in42, in43, in44, in45, in46, in47, in48, in49, in50, in51, in52, in53, in54, in55, in56, in57, in58, in59, in60, in61, in62, in63
164 :
165 5544 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
166 :
167 5544 : FD_REEDSOL_GENERATE_FDERIV( 64, ALL_VARS );
168 :
169 5544 : FD_REEDSOL_GENERATE_FFT( 64, 0, ALL_VARS );
170 :
171 : /* Again, we only need to multiply the erased ones, since we don't
172 : use the value of the non-erased ones anymore, but I'll take
173 : multiplies over branches most days. */
174 5544 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
175 5544 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
176 5544 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
177 5544 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
178 5544 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
179 5544 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
180 5544 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
181 5544 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
182 5544 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
183 5544 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
184 5544 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
185 5544 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
186 5544 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
187 5544 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
188 5544 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
189 5544 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
190 5544 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
191 5544 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
192 5544 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
193 5544 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
194 5544 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
195 5544 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
196 5544 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
197 5544 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
198 5544 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
199 5544 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
200 5544 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
201 5544 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
202 5544 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
203 5544 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
204 5544 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
205 5544 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
206 5544 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
207 5544 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
208 5544 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
209 5544 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
210 5544 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
211 5544 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
212 5544 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
213 5544 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
214 5544 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
215 5544 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
216 5544 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
217 5544 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
218 5544 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
219 5544 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
220 5544 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
221 5544 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
222 5544 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
223 5544 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
224 5544 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
225 5544 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
226 5544 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
227 5544 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
228 5544 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
229 5544 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
230 5544 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
231 5544 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
232 5544 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
233 5544 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
234 5544 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
235 5544 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
236 5544 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
237 5544 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
238 : /* There are a couple of cases we have to handle:
239 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
240 : need to store the generated value.
241 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
242 : value that we ignored to ensure the data lies on a
243 : polynomial of the right order, so we need to compare the
244 : value we generated to the one that was there.
245 : - If i<shred_cnt and !_erased[ i ], then this is a value we
246 : actually used in the computation, but we destroyed it, so we
247 : need to reload the actual value of the shred in order to use the
248 : IFFT in the next step.
249 : - If i>=shred_cnt, do nothing, which will keep the value of the
250 : shred if it existed in the variable. */
251 354816 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
252 354816 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
253 354816 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
254 177408 : else var = gf_ldu( shred[ n ] + shred_pos ); \
255 354816 : } while( 0 )
256 5544 : #define STORE_COMPARE( n, var ) do{ \
257 0 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
258 0 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
259 0 : } while( 0 )
260 5544 : switch( fd_ulong_min( shred_cnt, 64UL ) ) {
261 5544 : case 64UL: STORE_COMPARE_RELOAD( 63, in63 ); FALLTHRU
262 5544 : case 63UL: STORE_COMPARE_RELOAD( 62, in62 ); FALLTHRU
263 5544 : case 62UL: STORE_COMPARE_RELOAD( 61, in61 ); FALLTHRU
264 5544 : case 61UL: STORE_COMPARE_RELOAD( 60, in60 ); FALLTHRU
265 5544 : case 60UL: STORE_COMPARE_RELOAD( 59, in59 ); FALLTHRU
266 5544 : case 59UL: STORE_COMPARE_RELOAD( 58, in58 ); FALLTHRU
267 5544 : case 58UL: STORE_COMPARE_RELOAD( 57, in57 ); FALLTHRU
268 5544 : case 57UL: STORE_COMPARE_RELOAD( 56, in56 ); FALLTHRU
269 5544 : case 56UL: STORE_COMPARE_RELOAD( 55, in55 ); FALLTHRU
270 5544 : case 55UL: STORE_COMPARE_RELOAD( 54, in54 ); FALLTHRU
271 5544 : case 54UL: STORE_COMPARE_RELOAD( 53, in53 ); FALLTHRU
272 5544 : case 53UL: STORE_COMPARE_RELOAD( 52, in52 ); FALLTHRU
273 5544 : case 52UL: STORE_COMPARE_RELOAD( 51, in51 ); FALLTHRU
274 5544 : case 51UL: STORE_COMPARE_RELOAD( 50, in50 ); FALLTHRU
275 5544 : case 50UL: STORE_COMPARE_RELOAD( 49, in49 ); FALLTHRU
276 5544 : case 49UL: STORE_COMPARE_RELOAD( 48, in48 ); FALLTHRU
277 5544 : case 48UL: STORE_COMPARE_RELOAD( 47, in47 ); FALLTHRU
278 5544 : case 47UL: STORE_COMPARE_RELOAD( 46, in46 ); FALLTHRU
279 5544 : case 46UL: STORE_COMPARE_RELOAD( 45, in45 ); FALLTHRU
280 5544 : case 45UL: STORE_COMPARE_RELOAD( 44, in44 ); FALLTHRU
281 5544 : case 44UL: STORE_COMPARE_RELOAD( 43, in43 ); FALLTHRU
282 5544 : case 43UL: STORE_COMPARE_RELOAD( 42, in42 ); FALLTHRU
283 5544 : case 42UL: STORE_COMPARE_RELOAD( 41, in41 ); FALLTHRU
284 5544 : case 41UL: STORE_COMPARE_RELOAD( 40, in40 ); FALLTHRU
285 5544 : case 40UL: STORE_COMPARE_RELOAD( 39, in39 ); FALLTHRU
286 5544 : case 39UL: STORE_COMPARE_RELOAD( 38, in38 ); FALLTHRU
287 5544 : case 38UL: STORE_COMPARE_RELOAD( 37, in37 ); FALLTHRU
288 5544 : case 37UL: STORE_COMPARE_RELOAD( 36, in36 ); FALLTHRU
289 5544 : case 36UL: STORE_COMPARE_RELOAD( 35, in35 ); FALLTHRU
290 5544 : case 35UL: STORE_COMPARE_RELOAD( 34, in34 ); FALLTHRU
291 5544 : case 34UL: STORE_COMPARE_RELOAD( 33, in33 ); FALLTHRU
292 5544 : case 33UL: STORE_COMPARE_RELOAD( 32, in32 ); FALLTHRU
293 5544 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
294 5544 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
295 5544 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
296 5544 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
297 5544 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
298 5544 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
299 5544 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
300 5544 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
301 5544 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
302 5544 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
303 5544 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
304 5544 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
305 5544 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
306 5544 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
307 5544 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
308 5544 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
309 5544 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
310 5544 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
311 5544 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
312 5544 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
313 5544 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
314 5544 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
315 5544 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
316 5544 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
317 5544 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
318 5544 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
319 5544 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
320 5544 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
321 5544 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
322 5544 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
323 5544 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
324 5544 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
325 5544 : }
326 :
327 5544 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 64UL );
328 5544 : if( shreds_remaining>0UL ) {
329 0 : FD_REEDSOL_GENERATE_IFFT( 64, 0, ALL_VARS );
330 0 : FD_REEDSOL_GENERATE_FFT( 64, 64, ALL_VARS );
331 :
332 0 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
333 0 : case 64UL: STORE_COMPARE( 127, in63 ); FALLTHRU
334 0 : case 63UL: STORE_COMPARE( 126, in62 ); FALLTHRU
335 0 : case 62UL: STORE_COMPARE( 125, in61 ); FALLTHRU
336 0 : case 61UL: STORE_COMPARE( 124, in60 ); FALLTHRU
337 0 : case 60UL: STORE_COMPARE( 123, in59 ); FALLTHRU
338 0 : case 59UL: STORE_COMPARE( 122, in58 ); FALLTHRU
339 0 : case 58UL: STORE_COMPARE( 121, in57 ); FALLTHRU
340 0 : case 57UL: STORE_COMPARE( 120, in56 ); FALLTHRU
341 0 : case 56UL: STORE_COMPARE( 119, in55 ); FALLTHRU
342 0 : case 55UL: STORE_COMPARE( 118, in54 ); FALLTHRU
343 0 : case 54UL: STORE_COMPARE( 117, in53 ); FALLTHRU
344 0 : case 53UL: STORE_COMPARE( 116, in52 ); FALLTHRU
345 0 : case 52UL: STORE_COMPARE( 115, in51 ); FALLTHRU
346 0 : case 51UL: STORE_COMPARE( 114, in50 ); FALLTHRU
347 0 : case 50UL: STORE_COMPARE( 113, in49 ); FALLTHRU
348 0 : case 49UL: STORE_COMPARE( 112, in48 ); FALLTHRU
349 0 : case 48UL: STORE_COMPARE( 111, in47 ); FALLTHRU
350 0 : case 47UL: STORE_COMPARE( 110, in46 ); FALLTHRU
351 0 : case 46UL: STORE_COMPARE( 109, in45 ); FALLTHRU
352 0 : case 45UL: STORE_COMPARE( 108, in44 ); FALLTHRU
353 0 : case 44UL: STORE_COMPARE( 107, in43 ); FALLTHRU
354 0 : case 43UL: STORE_COMPARE( 106, in42 ); FALLTHRU
355 0 : case 42UL: STORE_COMPARE( 105, in41 ); FALLTHRU
356 0 : case 41UL: STORE_COMPARE( 104, in40 ); FALLTHRU
357 0 : case 40UL: STORE_COMPARE( 103, in39 ); FALLTHRU
358 0 : case 39UL: STORE_COMPARE( 102, in38 ); FALLTHRU
359 0 : case 38UL: STORE_COMPARE( 101, in37 ); FALLTHRU
360 0 : case 37UL: STORE_COMPARE( 100, in36 ); FALLTHRU
361 0 : case 36UL: STORE_COMPARE( 99, in35 ); FALLTHRU
362 0 : case 35UL: STORE_COMPARE( 98, in34 ); FALLTHRU
363 0 : case 34UL: STORE_COMPARE( 97, in33 ); FALLTHRU
364 0 : case 33UL: STORE_COMPARE( 96, in32 ); FALLTHRU
365 0 : case 32UL: STORE_COMPARE( 95, in31 ); FALLTHRU
366 0 : case 31UL: STORE_COMPARE( 94, in30 ); FALLTHRU
367 0 : case 30UL: STORE_COMPARE( 93, in29 ); FALLTHRU
368 0 : case 29UL: STORE_COMPARE( 92, in28 ); FALLTHRU
369 0 : case 28UL: STORE_COMPARE( 91, in27 ); FALLTHRU
370 0 : case 27UL: STORE_COMPARE( 90, in26 ); FALLTHRU
371 0 : case 26UL: STORE_COMPARE( 89, in25 ); FALLTHRU
372 0 : case 25UL: STORE_COMPARE( 88, in24 ); FALLTHRU
373 0 : case 24UL: STORE_COMPARE( 87, in23 ); FALLTHRU
374 0 : case 23UL: STORE_COMPARE( 86, in22 ); FALLTHRU
375 0 : case 22UL: STORE_COMPARE( 85, in21 ); FALLTHRU
376 0 : case 21UL: STORE_COMPARE( 84, in20 ); FALLTHRU
377 0 : case 20UL: STORE_COMPARE( 83, in19 ); FALLTHRU
378 0 : case 19UL: STORE_COMPARE( 82, in18 ); FALLTHRU
379 0 : case 18UL: STORE_COMPARE( 81, in17 ); FALLTHRU
380 0 : case 17UL: STORE_COMPARE( 80, in16 ); FALLTHRU
381 0 : case 16UL: STORE_COMPARE( 79, in15 ); FALLTHRU
382 0 : case 15UL: STORE_COMPARE( 78, in14 ); FALLTHRU
383 0 : case 14UL: STORE_COMPARE( 77, in13 ); FALLTHRU
384 0 : case 13UL: STORE_COMPARE( 76, in12 ); FALLTHRU
385 0 : case 12UL: STORE_COMPARE( 75, in11 ); FALLTHRU
386 0 : case 11UL: STORE_COMPARE( 74, in10 ); FALLTHRU
387 0 : case 10UL: STORE_COMPARE( 73, in09 ); FALLTHRU
388 0 : case 9UL: STORE_COMPARE( 72, in08 ); FALLTHRU
389 0 : case 8UL: STORE_COMPARE( 71, in07 ); FALLTHRU
390 0 : case 7UL: STORE_COMPARE( 70, in06 ); FALLTHRU
391 0 : case 6UL: STORE_COMPARE( 69, in05 ); FALLTHRU
392 0 : case 5UL: STORE_COMPARE( 68, in04 ); FALLTHRU
393 0 : case 4UL: STORE_COMPARE( 67, in03 ); FALLTHRU
394 0 : case 3UL: STORE_COMPARE( 66, in02 ); FALLTHRU
395 0 : case 2UL: STORE_COMPARE( 65, in01 ); FALLTHRU
396 0 : case 1UL: STORE_COMPARE( 64, in00 );
397 0 : }
398 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
399 0 : }
400 5544 : if( shreds_remaining>0UL ) {
401 0 : FD_REEDSOL_GENERATE_IFFT( 64, 64, ALL_VARS );
402 0 : FD_REEDSOL_GENERATE_FFT( 64, 128, ALL_VARS );
403 :
404 0 : switch( fd_ulong_min( shreds_remaining, 64UL ) ) {
405 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
406 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
407 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
408 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
409 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
410 0 : case 1UL: STORE_COMPARE( 128, in00 );
411 0 : }
412 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 64UL );
413 0 : }
414 5544 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
415 5544 : shred_pos += GF_WIDTH;
416 5544 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
417 5544 : #undef STORE_COMPARE_RELOAD
418 5544 : #undef STORE_COMPARE
419 5544 : #undef ALL_VARS
420 5544 : }
421 180 : return FD_REEDSOL_SUCCESS;
422 180 : }
|