Line data Source code
1 : /* Note: This file is auto generated. */
2 : #include "fd_reedsol_ppt.h"
3 : #include "fd_reedsol_fderiv.h"
4 :
5 : FD_FN_UNSANITIZED int
6 : fd_reedsol_private_recover_var_128( ulong shred_sz,
7 : uchar * const * shred,
8 : ulong data_shred_cnt,
9 : ulong parity_shred_cnt,
10 6 : uchar const * erased ) {
11 6 : uchar _erased[ 128 ] W_ATTR;
12 6 : uchar pi[ 128 ] W_ATTR;
13 6 : ulong shred_cnt = data_shred_cnt + parity_shred_cnt;
14 6 : ulong loaded_cnt = 0UL;
15 774 : for( ulong i=0UL; i<128UL; i++) {
16 768 : int load_shred = ((i<shred_cnt)&(loaded_cnt<data_shred_cnt))&&( erased[ i ]==0 );
17 768 : _erased[ i ] = !load_shred;
18 768 : loaded_cnt += (ulong)load_shred;
19 768 : }
20 6 : if( FD_UNLIKELY( loaded_cnt<data_shred_cnt ) ) return FD_REEDSOL_ERR_PARTIAL;
21 :
22 6 : fd_reedsol_private_gen_pi_128( _erased, pi );
23 :
24 : /* Store the difference for each shred that was regenerated. This
25 : must be 0. Otherwise there's a corrupt shred. */
26 6 : gf_t diff = gf_zero();
27 :
28 198 : for( ulong shred_pos=0UL; shred_pos<shred_sz; /* advanced manually at end of loop */ ) {
29 : /* Load exactly data_shred_cnt un-erased input shreds into
30 : their respective vector. Fill the erased vectors with 0. */
31 192 : gf_t in00 = _erased[ 0 ] ? gf_zero() : gf_ldu( shred[ 0 ] + shred_pos );
32 192 : gf_t in01 = _erased[ 1 ] ? gf_zero() : gf_ldu( shred[ 1 ] + shred_pos );
33 192 : gf_t in02 = _erased[ 2 ] ? gf_zero() : gf_ldu( shred[ 2 ] + shred_pos );
34 192 : gf_t in03 = _erased[ 3 ] ? gf_zero() : gf_ldu( shred[ 3 ] + shred_pos );
35 192 : gf_t in04 = _erased[ 4 ] ? gf_zero() : gf_ldu( shred[ 4 ] + shred_pos );
36 192 : gf_t in05 = _erased[ 5 ] ? gf_zero() : gf_ldu( shred[ 5 ] + shred_pos );
37 192 : gf_t in06 = _erased[ 6 ] ? gf_zero() : gf_ldu( shred[ 6 ] + shred_pos );
38 192 : gf_t in07 = _erased[ 7 ] ? gf_zero() : gf_ldu( shred[ 7 ] + shred_pos );
39 192 : gf_t in08 = _erased[ 8 ] ? gf_zero() : gf_ldu( shred[ 8 ] + shred_pos );
40 192 : gf_t in09 = _erased[ 9 ] ? gf_zero() : gf_ldu( shred[ 9 ] + shred_pos );
41 192 : gf_t in10 = _erased[ 10 ] ? gf_zero() : gf_ldu( shred[ 10 ] + shred_pos );
42 192 : gf_t in11 = _erased[ 11 ] ? gf_zero() : gf_ldu( shred[ 11 ] + shred_pos );
43 192 : gf_t in12 = _erased[ 12 ] ? gf_zero() : gf_ldu( shred[ 12 ] + shred_pos );
44 192 : gf_t in13 = _erased[ 13 ] ? gf_zero() : gf_ldu( shred[ 13 ] + shred_pos );
45 192 : gf_t in14 = _erased[ 14 ] ? gf_zero() : gf_ldu( shred[ 14 ] + shred_pos );
46 192 : gf_t in15 = _erased[ 15 ] ? gf_zero() : gf_ldu( shred[ 15 ] + shred_pos );
47 192 : gf_t in16 = _erased[ 16 ] ? gf_zero() : gf_ldu( shred[ 16 ] + shred_pos );
48 192 : gf_t in17 = _erased[ 17 ] ? gf_zero() : gf_ldu( shred[ 17 ] + shred_pos );
49 192 : gf_t in18 = _erased[ 18 ] ? gf_zero() : gf_ldu( shred[ 18 ] + shred_pos );
50 192 : gf_t in19 = _erased[ 19 ] ? gf_zero() : gf_ldu( shred[ 19 ] + shred_pos );
51 192 : gf_t in20 = _erased[ 20 ] ? gf_zero() : gf_ldu( shred[ 20 ] + shred_pos );
52 192 : gf_t in21 = _erased[ 21 ] ? gf_zero() : gf_ldu( shred[ 21 ] + shred_pos );
53 192 : gf_t in22 = _erased[ 22 ] ? gf_zero() : gf_ldu( shred[ 22 ] + shred_pos );
54 192 : gf_t in23 = _erased[ 23 ] ? gf_zero() : gf_ldu( shred[ 23 ] + shred_pos );
55 192 : gf_t in24 = _erased[ 24 ] ? gf_zero() : gf_ldu( shred[ 24 ] + shred_pos );
56 192 : gf_t in25 = _erased[ 25 ] ? gf_zero() : gf_ldu( shred[ 25 ] + shred_pos );
57 192 : gf_t in26 = _erased[ 26 ] ? gf_zero() : gf_ldu( shred[ 26 ] + shred_pos );
58 192 : gf_t in27 = _erased[ 27 ] ? gf_zero() : gf_ldu( shred[ 27 ] + shred_pos );
59 192 : gf_t in28 = _erased[ 28 ] ? gf_zero() : gf_ldu( shred[ 28 ] + shred_pos );
60 192 : gf_t in29 = _erased[ 29 ] ? gf_zero() : gf_ldu( shred[ 29 ] + shred_pos );
61 192 : gf_t in30 = _erased[ 30 ] ? gf_zero() : gf_ldu( shred[ 30 ] + shred_pos );
62 192 : gf_t in31 = _erased[ 31 ] ? gf_zero() : gf_ldu( shred[ 31 ] + shred_pos );
63 192 : gf_t in32 = _erased[ 32 ] ? gf_zero() : gf_ldu( shred[ 32 ] + shred_pos );
64 192 : gf_t in33 = _erased[ 33 ] ? gf_zero() : gf_ldu( shred[ 33 ] + shred_pos );
65 192 : gf_t in34 = _erased[ 34 ] ? gf_zero() : gf_ldu( shred[ 34 ] + shred_pos );
66 192 : gf_t in35 = _erased[ 35 ] ? gf_zero() : gf_ldu( shred[ 35 ] + shred_pos );
67 192 : gf_t in36 = _erased[ 36 ] ? gf_zero() : gf_ldu( shred[ 36 ] + shred_pos );
68 192 : gf_t in37 = _erased[ 37 ] ? gf_zero() : gf_ldu( shred[ 37 ] + shred_pos );
69 192 : gf_t in38 = _erased[ 38 ] ? gf_zero() : gf_ldu( shred[ 38 ] + shred_pos );
70 192 : gf_t in39 = _erased[ 39 ] ? gf_zero() : gf_ldu( shred[ 39 ] + shred_pos );
71 192 : gf_t in40 = _erased[ 40 ] ? gf_zero() : gf_ldu( shred[ 40 ] + shred_pos );
72 192 : gf_t in41 = _erased[ 41 ] ? gf_zero() : gf_ldu( shred[ 41 ] + shred_pos );
73 192 : gf_t in42 = _erased[ 42 ] ? gf_zero() : gf_ldu( shred[ 42 ] + shred_pos );
74 192 : gf_t in43 = _erased[ 43 ] ? gf_zero() : gf_ldu( shred[ 43 ] + shred_pos );
75 192 : gf_t in44 = _erased[ 44 ] ? gf_zero() : gf_ldu( shred[ 44 ] + shred_pos );
76 192 : gf_t in45 = _erased[ 45 ] ? gf_zero() : gf_ldu( shred[ 45 ] + shred_pos );
77 192 : gf_t in46 = _erased[ 46 ] ? gf_zero() : gf_ldu( shred[ 46 ] + shred_pos );
78 192 : gf_t in47 = _erased[ 47 ] ? gf_zero() : gf_ldu( shred[ 47 ] + shred_pos );
79 192 : gf_t in48 = _erased[ 48 ] ? gf_zero() : gf_ldu( shred[ 48 ] + shred_pos );
80 192 : gf_t in49 = _erased[ 49 ] ? gf_zero() : gf_ldu( shred[ 49 ] + shred_pos );
81 192 : gf_t in50 = _erased[ 50 ] ? gf_zero() : gf_ldu( shred[ 50 ] + shred_pos );
82 192 : gf_t in51 = _erased[ 51 ] ? gf_zero() : gf_ldu( shred[ 51 ] + shred_pos );
83 192 : gf_t in52 = _erased[ 52 ] ? gf_zero() : gf_ldu( shred[ 52 ] + shred_pos );
84 192 : gf_t in53 = _erased[ 53 ] ? gf_zero() : gf_ldu( shred[ 53 ] + shred_pos );
85 192 : gf_t in54 = _erased[ 54 ] ? gf_zero() : gf_ldu( shred[ 54 ] + shred_pos );
86 192 : gf_t in55 = _erased[ 55 ] ? gf_zero() : gf_ldu( shred[ 55 ] + shred_pos );
87 192 : gf_t in56 = _erased[ 56 ] ? gf_zero() : gf_ldu( shred[ 56 ] + shred_pos );
88 192 : gf_t in57 = _erased[ 57 ] ? gf_zero() : gf_ldu( shred[ 57 ] + shred_pos );
89 192 : gf_t in58 = _erased[ 58 ] ? gf_zero() : gf_ldu( shred[ 58 ] + shred_pos );
90 192 : gf_t in59 = _erased[ 59 ] ? gf_zero() : gf_ldu( shred[ 59 ] + shred_pos );
91 192 : gf_t in60 = _erased[ 60 ] ? gf_zero() : gf_ldu( shred[ 60 ] + shred_pos );
92 192 : gf_t in61 = _erased[ 61 ] ? gf_zero() : gf_ldu( shred[ 61 ] + shred_pos );
93 192 : gf_t in62 = _erased[ 62 ] ? gf_zero() : gf_ldu( shred[ 62 ] + shred_pos );
94 192 : gf_t in63 = _erased[ 63 ] ? gf_zero() : gf_ldu( shred[ 63 ] + shred_pos );
95 192 : gf_t in64 = _erased[ 64 ] ? gf_zero() : gf_ldu( shred[ 64 ] + shred_pos );
96 192 : gf_t in65 = _erased[ 65 ] ? gf_zero() : gf_ldu( shred[ 65 ] + shred_pos );
97 192 : gf_t in66 = _erased[ 66 ] ? gf_zero() : gf_ldu( shred[ 66 ] + shred_pos );
98 192 : gf_t in67 = _erased[ 67 ] ? gf_zero() : gf_ldu( shred[ 67 ] + shred_pos );
99 192 : gf_t in68 = _erased[ 68 ] ? gf_zero() : gf_ldu( shred[ 68 ] + shred_pos );
100 192 : gf_t in69 = _erased[ 69 ] ? gf_zero() : gf_ldu( shred[ 69 ] + shred_pos );
101 192 : gf_t in70 = _erased[ 70 ] ? gf_zero() : gf_ldu( shred[ 70 ] + shred_pos );
102 192 : gf_t in71 = _erased[ 71 ] ? gf_zero() : gf_ldu( shred[ 71 ] + shred_pos );
103 192 : gf_t in72 = _erased[ 72 ] ? gf_zero() : gf_ldu( shred[ 72 ] + shred_pos );
104 192 : gf_t in73 = _erased[ 73 ] ? gf_zero() : gf_ldu( shred[ 73 ] + shred_pos );
105 192 : gf_t in74 = _erased[ 74 ] ? gf_zero() : gf_ldu( shred[ 74 ] + shred_pos );
106 192 : gf_t in75 = _erased[ 75 ] ? gf_zero() : gf_ldu( shred[ 75 ] + shred_pos );
107 192 : gf_t in76 = _erased[ 76 ] ? gf_zero() : gf_ldu( shred[ 76 ] + shred_pos );
108 192 : gf_t in77 = _erased[ 77 ] ? gf_zero() : gf_ldu( shred[ 77 ] + shred_pos );
109 192 : gf_t in78 = _erased[ 78 ] ? gf_zero() : gf_ldu( shred[ 78 ] + shred_pos );
110 192 : gf_t in79 = _erased[ 79 ] ? gf_zero() : gf_ldu( shred[ 79 ] + shred_pos );
111 192 : gf_t in80 = _erased[ 80 ] ? gf_zero() : gf_ldu( shred[ 80 ] + shred_pos );
112 192 : gf_t in81 = _erased[ 81 ] ? gf_zero() : gf_ldu( shred[ 81 ] + shred_pos );
113 192 : gf_t in82 = _erased[ 82 ] ? gf_zero() : gf_ldu( shred[ 82 ] + shred_pos );
114 192 : gf_t in83 = _erased[ 83 ] ? gf_zero() : gf_ldu( shred[ 83 ] + shred_pos );
115 192 : gf_t in84 = _erased[ 84 ] ? gf_zero() : gf_ldu( shred[ 84 ] + shred_pos );
116 192 : gf_t in85 = _erased[ 85 ] ? gf_zero() : gf_ldu( shred[ 85 ] + shred_pos );
117 192 : gf_t in86 = _erased[ 86 ] ? gf_zero() : gf_ldu( shred[ 86 ] + shred_pos );
118 192 : gf_t in87 = _erased[ 87 ] ? gf_zero() : gf_ldu( shred[ 87 ] + shred_pos );
119 192 : gf_t in88 = _erased[ 88 ] ? gf_zero() : gf_ldu( shred[ 88 ] + shred_pos );
120 192 : gf_t in89 = _erased[ 89 ] ? gf_zero() : gf_ldu( shred[ 89 ] + shred_pos );
121 192 : gf_t in90 = _erased[ 90 ] ? gf_zero() : gf_ldu( shred[ 90 ] + shred_pos );
122 192 : gf_t in91 = _erased[ 91 ] ? gf_zero() : gf_ldu( shred[ 91 ] + shred_pos );
123 192 : gf_t in92 = _erased[ 92 ] ? gf_zero() : gf_ldu( shred[ 92 ] + shred_pos );
124 192 : gf_t in93 = _erased[ 93 ] ? gf_zero() : gf_ldu( shred[ 93 ] + shred_pos );
125 192 : gf_t in94 = _erased[ 94 ] ? gf_zero() : gf_ldu( shred[ 94 ] + shred_pos );
126 192 : gf_t in95 = _erased[ 95 ] ? gf_zero() : gf_ldu( shred[ 95 ] + shred_pos );
127 192 : gf_t in96 = _erased[ 96 ] ? gf_zero() : gf_ldu( shred[ 96 ] + shred_pos );
128 192 : gf_t in97 = _erased[ 97 ] ? gf_zero() : gf_ldu( shred[ 97 ] + shred_pos );
129 192 : gf_t in98 = _erased[ 98 ] ? gf_zero() : gf_ldu( shred[ 98 ] + shred_pos );
130 192 : gf_t in99 = _erased[ 99 ] ? gf_zero() : gf_ldu( shred[ 99 ] + shred_pos );
131 192 : gf_t in100 = _erased[ 100 ] ? gf_zero() : gf_ldu( shred[ 100 ] + shred_pos );
132 192 : gf_t in101 = _erased[ 101 ] ? gf_zero() : gf_ldu( shred[ 101 ] + shred_pos );
133 192 : gf_t in102 = _erased[ 102 ] ? gf_zero() : gf_ldu( shred[ 102 ] + shred_pos );
134 192 : gf_t in103 = _erased[ 103 ] ? gf_zero() : gf_ldu( shred[ 103 ] + shred_pos );
135 192 : gf_t in104 = _erased[ 104 ] ? gf_zero() : gf_ldu( shred[ 104 ] + shred_pos );
136 192 : gf_t in105 = _erased[ 105 ] ? gf_zero() : gf_ldu( shred[ 105 ] + shred_pos );
137 192 : gf_t in106 = _erased[ 106 ] ? gf_zero() : gf_ldu( shred[ 106 ] + shred_pos );
138 192 : gf_t in107 = _erased[ 107 ] ? gf_zero() : gf_ldu( shred[ 107 ] + shred_pos );
139 192 : gf_t in108 = _erased[ 108 ] ? gf_zero() : gf_ldu( shred[ 108 ] + shred_pos );
140 192 : gf_t in109 = _erased[ 109 ] ? gf_zero() : gf_ldu( shred[ 109 ] + shred_pos );
141 192 : gf_t in110 = _erased[ 110 ] ? gf_zero() : gf_ldu( shred[ 110 ] + shred_pos );
142 192 : gf_t in111 = _erased[ 111 ] ? gf_zero() : gf_ldu( shred[ 111 ] + shred_pos );
143 192 : gf_t in112 = _erased[ 112 ] ? gf_zero() : gf_ldu( shred[ 112 ] + shred_pos );
144 192 : gf_t in113 = _erased[ 113 ] ? gf_zero() : gf_ldu( shred[ 113 ] + shred_pos );
145 192 : gf_t in114 = _erased[ 114 ] ? gf_zero() : gf_ldu( shred[ 114 ] + shred_pos );
146 192 : gf_t in115 = _erased[ 115 ] ? gf_zero() : gf_ldu( shred[ 115 ] + shred_pos );
147 192 : gf_t in116 = _erased[ 116 ] ? gf_zero() : gf_ldu( shred[ 116 ] + shred_pos );
148 192 : gf_t in117 = _erased[ 117 ] ? gf_zero() : gf_ldu( shred[ 117 ] + shred_pos );
149 192 : gf_t in118 = _erased[ 118 ] ? gf_zero() : gf_ldu( shred[ 118 ] + shred_pos );
150 192 : gf_t in119 = _erased[ 119 ] ? gf_zero() : gf_ldu( shred[ 119 ] + shred_pos );
151 192 : gf_t in120 = _erased[ 120 ] ? gf_zero() : gf_ldu( shred[ 120 ] + shred_pos );
152 192 : gf_t in121 = _erased[ 121 ] ? gf_zero() : gf_ldu( shred[ 121 ] + shred_pos );
153 192 : gf_t in122 = _erased[ 122 ] ? gf_zero() : gf_ldu( shred[ 122 ] + shred_pos );
154 192 : gf_t in123 = _erased[ 123 ] ? gf_zero() : gf_ldu( shred[ 123 ] + shred_pos );
155 192 : gf_t in124 = _erased[ 124 ] ? gf_zero() : gf_ldu( shred[ 124 ] + shred_pos );
156 192 : gf_t in125 = _erased[ 125 ] ? gf_zero() : gf_ldu( shred[ 125 ] + shred_pos );
157 192 : gf_t in126 = _erased[ 126 ] ? gf_zero() : gf_ldu( shred[ 126 ] + shred_pos );
158 192 : gf_t in127 = _erased[ 127 ] ? gf_zero() : gf_ldu( shred[ 127 ] + shred_pos );
159 : /* Technically, we only need to multiply the non-erased ones, since
160 : the erased ones are 0, but we know at least half of them are
161 : non-erased, and the branch is going to be just as costly as the
162 : multiply. */
163 192 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
164 192 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
165 192 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
166 192 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
167 192 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
168 192 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
169 192 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
170 192 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
171 192 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
172 192 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
173 192 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
174 192 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
175 192 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
176 192 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
177 192 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
178 192 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
179 192 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
180 192 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
181 192 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
182 192 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
183 192 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
184 192 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
185 192 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
186 192 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
187 192 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
188 192 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
189 192 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
190 192 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
191 192 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
192 192 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
193 192 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
194 192 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
195 192 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
196 192 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
197 192 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
198 192 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
199 192 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
200 192 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
201 192 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
202 192 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
203 192 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
204 192 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
205 192 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
206 192 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
207 192 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
208 192 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
209 192 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
210 192 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
211 192 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
212 192 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
213 192 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
214 192 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
215 192 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
216 192 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
217 192 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
218 192 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
219 192 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
220 192 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
221 192 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
222 192 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
223 192 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
224 192 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
225 192 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
226 192 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
227 192 : in64 = GF_MUL_VAR( in64, pi[ 64 ] );
228 192 : in65 = GF_MUL_VAR( in65, pi[ 65 ] );
229 192 : in66 = GF_MUL_VAR( in66, pi[ 66 ] );
230 192 : in67 = GF_MUL_VAR( in67, pi[ 67 ] );
231 192 : in68 = GF_MUL_VAR( in68, pi[ 68 ] );
232 192 : in69 = GF_MUL_VAR( in69, pi[ 69 ] );
233 192 : in70 = GF_MUL_VAR( in70, pi[ 70 ] );
234 192 : in71 = GF_MUL_VAR( in71, pi[ 71 ] );
235 192 : in72 = GF_MUL_VAR( in72, pi[ 72 ] );
236 192 : in73 = GF_MUL_VAR( in73, pi[ 73 ] );
237 192 : in74 = GF_MUL_VAR( in74, pi[ 74 ] );
238 192 : in75 = GF_MUL_VAR( in75, pi[ 75 ] );
239 192 : in76 = GF_MUL_VAR( in76, pi[ 76 ] );
240 192 : in77 = GF_MUL_VAR( in77, pi[ 77 ] );
241 192 : in78 = GF_MUL_VAR( in78, pi[ 78 ] );
242 192 : in79 = GF_MUL_VAR( in79, pi[ 79 ] );
243 192 : in80 = GF_MUL_VAR( in80, pi[ 80 ] );
244 192 : in81 = GF_MUL_VAR( in81, pi[ 81 ] );
245 192 : in82 = GF_MUL_VAR( in82, pi[ 82 ] );
246 192 : in83 = GF_MUL_VAR( in83, pi[ 83 ] );
247 192 : in84 = GF_MUL_VAR( in84, pi[ 84 ] );
248 192 : in85 = GF_MUL_VAR( in85, pi[ 85 ] );
249 192 : in86 = GF_MUL_VAR( in86, pi[ 86 ] );
250 192 : in87 = GF_MUL_VAR( in87, pi[ 87 ] );
251 192 : in88 = GF_MUL_VAR( in88, pi[ 88 ] );
252 192 : in89 = GF_MUL_VAR( in89, pi[ 89 ] );
253 192 : in90 = GF_MUL_VAR( in90, pi[ 90 ] );
254 192 : in91 = GF_MUL_VAR( in91, pi[ 91 ] );
255 192 : in92 = GF_MUL_VAR( in92, pi[ 92 ] );
256 192 : in93 = GF_MUL_VAR( in93, pi[ 93 ] );
257 192 : in94 = GF_MUL_VAR( in94, pi[ 94 ] );
258 192 : in95 = GF_MUL_VAR( in95, pi[ 95 ] );
259 192 : in96 = GF_MUL_VAR( in96, pi[ 96 ] );
260 192 : in97 = GF_MUL_VAR( in97, pi[ 97 ] );
261 192 : in98 = GF_MUL_VAR( in98, pi[ 98 ] );
262 192 : in99 = GF_MUL_VAR( in99, pi[ 99 ] );
263 192 : in100 = GF_MUL_VAR( in100, pi[ 100 ] );
264 192 : in101 = GF_MUL_VAR( in101, pi[ 101 ] );
265 192 : in102 = GF_MUL_VAR( in102, pi[ 102 ] );
266 192 : in103 = GF_MUL_VAR( in103, pi[ 103 ] );
267 192 : in104 = GF_MUL_VAR( in104, pi[ 104 ] );
268 192 : in105 = GF_MUL_VAR( in105, pi[ 105 ] );
269 192 : in106 = GF_MUL_VAR( in106, pi[ 106 ] );
270 192 : in107 = GF_MUL_VAR( in107, pi[ 107 ] );
271 192 : in108 = GF_MUL_VAR( in108, pi[ 108 ] );
272 192 : in109 = GF_MUL_VAR( in109, pi[ 109 ] );
273 192 : in110 = GF_MUL_VAR( in110, pi[ 110 ] );
274 192 : in111 = GF_MUL_VAR( in111, pi[ 111 ] );
275 192 : in112 = GF_MUL_VAR( in112, pi[ 112 ] );
276 192 : in113 = GF_MUL_VAR( in113, pi[ 113 ] );
277 192 : in114 = GF_MUL_VAR( in114, pi[ 114 ] );
278 192 : in115 = GF_MUL_VAR( in115, pi[ 115 ] );
279 192 : in116 = GF_MUL_VAR( in116, pi[ 116 ] );
280 192 : in117 = GF_MUL_VAR( in117, pi[ 117 ] );
281 192 : in118 = GF_MUL_VAR( in118, pi[ 118 ] );
282 192 : in119 = GF_MUL_VAR( in119, pi[ 119 ] );
283 192 : in120 = GF_MUL_VAR( in120, pi[ 120 ] );
284 192 : in121 = GF_MUL_VAR( in121, pi[ 121 ] );
285 192 : in122 = GF_MUL_VAR( in122, pi[ 122 ] );
286 192 : in123 = GF_MUL_VAR( in123, pi[ 123 ] );
287 192 : in124 = GF_MUL_VAR( in124, pi[ 124 ] );
288 192 : in125 = GF_MUL_VAR( in125, pi[ 125 ] );
289 192 : in126 = GF_MUL_VAR( in126, pi[ 126 ] );
290 192 : in127 = GF_MUL_VAR( in127, pi[ 127 ] );
291 192 : #define ALL_VARS in00, in01, in02, in03, in04, in05, in06, in07, in08, in09, in10, in11, in12, in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23, in24, in25, in26, in27, in28, in29, in30, in31, in32, in33, in34, in35, in36, in37, in38, in39, in40, in41, in42, in43, in44, in45, in46, in47, in48, in49, in50, in51, in52, in53, in54, in55, in56, in57, in58, in59, in60, in61, in62, in63, in64, in65, in66, in67, in68, in69, in70, in71, in72, in73, in74, in75, in76, in77, in78, in79, in80, in81, in82, in83, in84, in85, in86, in87, in88, in89, in90, in91, in92, in93, in94, in95, in96, in97, in98, in99, in100, in101, in102, in103, in104, in105, in106, in107, in108, in109, in110, in111, in112, in113, in114, in115, in116, in117, in118, in119, in120, in121, in122, in123, in124, in125, in126, in127
292 384 : #define ALL_VARS_REF &in00, &in01, &in02, &in03, &in04, &in05, &in06, &in07, &in08, &in09, &in10, &in11, &in12, &in13, &in14, &in15, &in16, &in17, &in18, &in19, &in20, &in21, &in22, &in23, &in24, &in25, &in26, &in27, &in28, &in29, &in30, &in31, &in32, &in33, &in34, &in35, &in36, &in37, &in38, &in39, &in40, &in41, &in42, &in43, &in44, &in45, &in46, &in47, &in48, &in49, &in50, &in51, &in52, &in53, &in54, &in55, &in56, &in57, &in58, &in59, &in60, &in61, &in62, &in63, &in64, &in65, &in66, &in67, &in68, &in69, &in70, &in71, &in72, &in73, &in74, &in75, &in76, &in77, &in78, &in79, &in80, &in81, &in82, &in83, &in84, &in85, &in86, &in87, &in88, &in89, &in90, &in91, &in92, &in93, &in94, &in95, &in96, &in97, &in98, &in99, &in100, &in101, &in102, &in103, &in104, &in105, &in106, &in107, &in108, &in109, &in110, &in111, &in112, &in113, &in114, &in115, &in116, &in117, &in118, &in119, &in120, &in121, &in122, &in123, &in124, &in125, &in126, &in127
293 :
294 192 : fd_reedsol_ifft_128_0( ALL_VARS_REF );
295 :
296 192 : FD_REEDSOL_GENERATE_FDERIV( 128, ALL_VARS );
297 :
298 192 : fd_reedsol_fft_128_0( ALL_VARS_REF );
299 :
300 : /* Again, we only need to multiply the erased ones, since we don't
301 : use the value of the non-erased ones anymore, but I'll take
302 : multiplies over branches most days. */
303 192 : in00 = GF_MUL_VAR( in00, pi[ 0 ] );
304 192 : in01 = GF_MUL_VAR( in01, pi[ 1 ] );
305 192 : in02 = GF_MUL_VAR( in02, pi[ 2 ] );
306 192 : in03 = GF_MUL_VAR( in03, pi[ 3 ] );
307 192 : in04 = GF_MUL_VAR( in04, pi[ 4 ] );
308 192 : in05 = GF_MUL_VAR( in05, pi[ 5 ] );
309 192 : in06 = GF_MUL_VAR( in06, pi[ 6 ] );
310 192 : in07 = GF_MUL_VAR( in07, pi[ 7 ] );
311 192 : in08 = GF_MUL_VAR( in08, pi[ 8 ] );
312 192 : in09 = GF_MUL_VAR( in09, pi[ 9 ] );
313 192 : in10 = GF_MUL_VAR( in10, pi[ 10 ] );
314 192 : in11 = GF_MUL_VAR( in11, pi[ 11 ] );
315 192 : in12 = GF_MUL_VAR( in12, pi[ 12 ] );
316 192 : in13 = GF_MUL_VAR( in13, pi[ 13 ] );
317 192 : in14 = GF_MUL_VAR( in14, pi[ 14 ] );
318 192 : in15 = GF_MUL_VAR( in15, pi[ 15 ] );
319 192 : in16 = GF_MUL_VAR( in16, pi[ 16 ] );
320 192 : in17 = GF_MUL_VAR( in17, pi[ 17 ] );
321 192 : in18 = GF_MUL_VAR( in18, pi[ 18 ] );
322 192 : in19 = GF_MUL_VAR( in19, pi[ 19 ] );
323 192 : in20 = GF_MUL_VAR( in20, pi[ 20 ] );
324 192 : in21 = GF_MUL_VAR( in21, pi[ 21 ] );
325 192 : in22 = GF_MUL_VAR( in22, pi[ 22 ] );
326 192 : in23 = GF_MUL_VAR( in23, pi[ 23 ] );
327 192 : in24 = GF_MUL_VAR( in24, pi[ 24 ] );
328 192 : in25 = GF_MUL_VAR( in25, pi[ 25 ] );
329 192 : in26 = GF_MUL_VAR( in26, pi[ 26 ] );
330 192 : in27 = GF_MUL_VAR( in27, pi[ 27 ] );
331 192 : in28 = GF_MUL_VAR( in28, pi[ 28 ] );
332 192 : in29 = GF_MUL_VAR( in29, pi[ 29 ] );
333 192 : in30 = GF_MUL_VAR( in30, pi[ 30 ] );
334 192 : in31 = GF_MUL_VAR( in31, pi[ 31 ] );
335 192 : in32 = GF_MUL_VAR( in32, pi[ 32 ] );
336 192 : in33 = GF_MUL_VAR( in33, pi[ 33 ] );
337 192 : in34 = GF_MUL_VAR( in34, pi[ 34 ] );
338 192 : in35 = GF_MUL_VAR( in35, pi[ 35 ] );
339 192 : in36 = GF_MUL_VAR( in36, pi[ 36 ] );
340 192 : in37 = GF_MUL_VAR( in37, pi[ 37 ] );
341 192 : in38 = GF_MUL_VAR( in38, pi[ 38 ] );
342 192 : in39 = GF_MUL_VAR( in39, pi[ 39 ] );
343 192 : in40 = GF_MUL_VAR( in40, pi[ 40 ] );
344 192 : in41 = GF_MUL_VAR( in41, pi[ 41 ] );
345 192 : in42 = GF_MUL_VAR( in42, pi[ 42 ] );
346 192 : in43 = GF_MUL_VAR( in43, pi[ 43 ] );
347 192 : in44 = GF_MUL_VAR( in44, pi[ 44 ] );
348 192 : in45 = GF_MUL_VAR( in45, pi[ 45 ] );
349 192 : in46 = GF_MUL_VAR( in46, pi[ 46 ] );
350 192 : in47 = GF_MUL_VAR( in47, pi[ 47 ] );
351 192 : in48 = GF_MUL_VAR( in48, pi[ 48 ] );
352 192 : in49 = GF_MUL_VAR( in49, pi[ 49 ] );
353 192 : in50 = GF_MUL_VAR( in50, pi[ 50 ] );
354 192 : in51 = GF_MUL_VAR( in51, pi[ 51 ] );
355 192 : in52 = GF_MUL_VAR( in52, pi[ 52 ] );
356 192 : in53 = GF_MUL_VAR( in53, pi[ 53 ] );
357 192 : in54 = GF_MUL_VAR( in54, pi[ 54 ] );
358 192 : in55 = GF_MUL_VAR( in55, pi[ 55 ] );
359 192 : in56 = GF_MUL_VAR( in56, pi[ 56 ] );
360 192 : in57 = GF_MUL_VAR( in57, pi[ 57 ] );
361 192 : in58 = GF_MUL_VAR( in58, pi[ 58 ] );
362 192 : in59 = GF_MUL_VAR( in59, pi[ 59 ] );
363 192 : in60 = GF_MUL_VAR( in60, pi[ 60 ] );
364 192 : in61 = GF_MUL_VAR( in61, pi[ 61 ] );
365 192 : in62 = GF_MUL_VAR( in62, pi[ 62 ] );
366 192 : in63 = GF_MUL_VAR( in63, pi[ 63 ] );
367 192 : in64 = GF_MUL_VAR( in64, pi[ 64 ] );
368 192 : in65 = GF_MUL_VAR( in65, pi[ 65 ] );
369 192 : in66 = GF_MUL_VAR( in66, pi[ 66 ] );
370 192 : in67 = GF_MUL_VAR( in67, pi[ 67 ] );
371 192 : in68 = GF_MUL_VAR( in68, pi[ 68 ] );
372 192 : in69 = GF_MUL_VAR( in69, pi[ 69 ] );
373 192 : in70 = GF_MUL_VAR( in70, pi[ 70 ] );
374 192 : in71 = GF_MUL_VAR( in71, pi[ 71 ] );
375 192 : in72 = GF_MUL_VAR( in72, pi[ 72 ] );
376 192 : in73 = GF_MUL_VAR( in73, pi[ 73 ] );
377 192 : in74 = GF_MUL_VAR( in74, pi[ 74 ] );
378 192 : in75 = GF_MUL_VAR( in75, pi[ 75 ] );
379 192 : in76 = GF_MUL_VAR( in76, pi[ 76 ] );
380 192 : in77 = GF_MUL_VAR( in77, pi[ 77 ] );
381 192 : in78 = GF_MUL_VAR( in78, pi[ 78 ] );
382 192 : in79 = GF_MUL_VAR( in79, pi[ 79 ] );
383 192 : in80 = GF_MUL_VAR( in80, pi[ 80 ] );
384 192 : in81 = GF_MUL_VAR( in81, pi[ 81 ] );
385 192 : in82 = GF_MUL_VAR( in82, pi[ 82 ] );
386 192 : in83 = GF_MUL_VAR( in83, pi[ 83 ] );
387 192 : in84 = GF_MUL_VAR( in84, pi[ 84 ] );
388 192 : in85 = GF_MUL_VAR( in85, pi[ 85 ] );
389 192 : in86 = GF_MUL_VAR( in86, pi[ 86 ] );
390 192 : in87 = GF_MUL_VAR( in87, pi[ 87 ] );
391 192 : in88 = GF_MUL_VAR( in88, pi[ 88 ] );
392 192 : in89 = GF_MUL_VAR( in89, pi[ 89 ] );
393 192 : in90 = GF_MUL_VAR( in90, pi[ 90 ] );
394 192 : in91 = GF_MUL_VAR( in91, pi[ 91 ] );
395 192 : in92 = GF_MUL_VAR( in92, pi[ 92 ] );
396 192 : in93 = GF_MUL_VAR( in93, pi[ 93 ] );
397 192 : in94 = GF_MUL_VAR( in94, pi[ 94 ] );
398 192 : in95 = GF_MUL_VAR( in95, pi[ 95 ] );
399 192 : in96 = GF_MUL_VAR( in96, pi[ 96 ] );
400 192 : in97 = GF_MUL_VAR( in97, pi[ 97 ] );
401 192 : in98 = GF_MUL_VAR( in98, pi[ 98 ] );
402 192 : in99 = GF_MUL_VAR( in99, pi[ 99 ] );
403 192 : in100 = GF_MUL_VAR( in100, pi[ 100 ] );
404 192 : in101 = GF_MUL_VAR( in101, pi[ 101 ] );
405 192 : in102 = GF_MUL_VAR( in102, pi[ 102 ] );
406 192 : in103 = GF_MUL_VAR( in103, pi[ 103 ] );
407 192 : in104 = GF_MUL_VAR( in104, pi[ 104 ] );
408 192 : in105 = GF_MUL_VAR( in105, pi[ 105 ] );
409 192 : in106 = GF_MUL_VAR( in106, pi[ 106 ] );
410 192 : in107 = GF_MUL_VAR( in107, pi[ 107 ] );
411 192 : in108 = GF_MUL_VAR( in108, pi[ 108 ] );
412 192 : in109 = GF_MUL_VAR( in109, pi[ 109 ] );
413 192 : in110 = GF_MUL_VAR( in110, pi[ 110 ] );
414 192 : in111 = GF_MUL_VAR( in111, pi[ 111 ] );
415 192 : in112 = GF_MUL_VAR( in112, pi[ 112 ] );
416 192 : in113 = GF_MUL_VAR( in113, pi[ 113 ] );
417 192 : in114 = GF_MUL_VAR( in114, pi[ 114 ] );
418 192 : in115 = GF_MUL_VAR( in115, pi[ 115 ] );
419 192 : in116 = GF_MUL_VAR( in116, pi[ 116 ] );
420 192 : in117 = GF_MUL_VAR( in117, pi[ 117 ] );
421 192 : in118 = GF_MUL_VAR( in118, pi[ 118 ] );
422 192 : in119 = GF_MUL_VAR( in119, pi[ 119 ] );
423 192 : in120 = GF_MUL_VAR( in120, pi[ 120 ] );
424 192 : in121 = GF_MUL_VAR( in121, pi[ 121 ] );
425 192 : in122 = GF_MUL_VAR( in122, pi[ 122 ] );
426 192 : in123 = GF_MUL_VAR( in123, pi[ 123 ] );
427 192 : in124 = GF_MUL_VAR( in124, pi[ 124 ] );
428 192 : in125 = GF_MUL_VAR( in125, pi[ 125 ] );
429 192 : in126 = GF_MUL_VAR( in126, pi[ 126 ] );
430 192 : in127 = GF_MUL_VAR( in127, pi[ 127 ] );
431 : /* There are a couple of cases we have to handle:
432 : - If i<shred_cnt and erased[ i ], it's an actual erasure, so we
433 : need to store the generated value.
434 : - If i<shred_cnt and _erased[ i ] but not erased[ i ], it was a
435 : value that we ignored to ensure the data lies on a
436 : polynomial of the right order, so we need to compare the
437 : value we generated to the one that was there.
438 : - If i<shred_cnt and !_erased[ i ], then this is a value we
439 : actually used in the computation, but we destroyed it, so we
440 : need to reload the actual value of the shred in order to use the
441 : IFFT in the next step.
442 : - If i>=shred_cnt, do nothing, which will keep the value of the
443 : shred if it existed in the variable. */
444 18432 : #define STORE_COMPARE_RELOAD( n, var ) do{ \
445 18432 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
446 18432 : else if( _erased[ n ] ) diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
447 9216 : else var = gf_ldu( shred[ n ] + shred_pos ); \
448 18432 : } while( 0 )
449 192 : #define STORE_COMPARE( n, var ) do{ \
450 0 : if( erased[ n ] ) gf_stu( shred[ n ] + shred_pos, var ); \
451 0 : else diff = GF_OR( diff, GF_ADD( var, gf_ldu( shred[ n ] + shred_pos ) ) ); \
452 0 : } while( 0 )
453 192 : switch( fd_ulong_min( shred_cnt, 128UL ) ) {
454 0 : case 128UL: STORE_COMPARE_RELOAD( 127, in127 ); FALLTHRU
455 0 : case 127UL: STORE_COMPARE_RELOAD( 126, in126 ); FALLTHRU
456 0 : case 126UL: STORE_COMPARE_RELOAD( 125, in125 ); FALLTHRU
457 0 : case 125UL: STORE_COMPARE_RELOAD( 124, in124 ); FALLTHRU
458 0 : case 124UL: STORE_COMPARE_RELOAD( 123, in123 ); FALLTHRU
459 0 : case 123UL: STORE_COMPARE_RELOAD( 122, in122 ); FALLTHRU
460 0 : case 122UL: STORE_COMPARE_RELOAD( 121, in121 ); FALLTHRU
461 0 : case 121UL: STORE_COMPARE_RELOAD( 120, in120 ); FALLTHRU
462 0 : case 120UL: STORE_COMPARE_RELOAD( 119, in119 ); FALLTHRU
463 0 : case 119UL: STORE_COMPARE_RELOAD( 118, in118 ); FALLTHRU
464 0 : case 118UL: STORE_COMPARE_RELOAD( 117, in117 ); FALLTHRU
465 0 : case 117UL: STORE_COMPARE_RELOAD( 116, in116 ); FALLTHRU
466 0 : case 116UL: STORE_COMPARE_RELOAD( 115, in115 ); FALLTHRU
467 0 : case 115UL: STORE_COMPARE_RELOAD( 114, in114 ); FALLTHRU
468 0 : case 114UL: STORE_COMPARE_RELOAD( 113, in113 ); FALLTHRU
469 0 : case 113UL: STORE_COMPARE_RELOAD( 112, in112 ); FALLTHRU
470 0 : case 112UL: STORE_COMPARE_RELOAD( 111, in111 ); FALLTHRU
471 0 : case 111UL: STORE_COMPARE_RELOAD( 110, in110 ); FALLTHRU
472 0 : case 110UL: STORE_COMPARE_RELOAD( 109, in109 ); FALLTHRU
473 0 : case 109UL: STORE_COMPARE_RELOAD( 108, in108 ); FALLTHRU
474 0 : case 108UL: STORE_COMPARE_RELOAD( 107, in107 ); FALLTHRU
475 0 : case 107UL: STORE_COMPARE_RELOAD( 106, in106 ); FALLTHRU
476 0 : case 106UL: STORE_COMPARE_RELOAD( 105, in105 ); FALLTHRU
477 0 : case 105UL: STORE_COMPARE_RELOAD( 104, in104 ); FALLTHRU
478 0 : case 104UL: STORE_COMPARE_RELOAD( 103, in103 ); FALLTHRU
479 0 : case 103UL: STORE_COMPARE_RELOAD( 102, in102 ); FALLTHRU
480 0 : case 102UL: STORE_COMPARE_RELOAD( 101, in101 ); FALLTHRU
481 0 : case 101UL: STORE_COMPARE_RELOAD( 100, in100 ); FALLTHRU
482 0 : case 100UL: STORE_COMPARE_RELOAD( 99, in99 ); FALLTHRU
483 0 : case 99UL: STORE_COMPARE_RELOAD( 98, in98 ); FALLTHRU
484 0 : case 98UL: STORE_COMPARE_RELOAD( 97, in97 ); FALLTHRU
485 0 : case 97UL: STORE_COMPARE_RELOAD( 96, in96 ); FALLTHRU
486 192 : case 96UL: STORE_COMPARE_RELOAD( 95, in95 ); FALLTHRU
487 192 : case 95UL: STORE_COMPARE_RELOAD( 94, in94 ); FALLTHRU
488 192 : case 94UL: STORE_COMPARE_RELOAD( 93, in93 ); FALLTHRU
489 192 : case 93UL: STORE_COMPARE_RELOAD( 92, in92 ); FALLTHRU
490 192 : case 92UL: STORE_COMPARE_RELOAD( 91, in91 ); FALLTHRU
491 192 : case 91UL: STORE_COMPARE_RELOAD( 90, in90 ); FALLTHRU
492 192 : case 90UL: STORE_COMPARE_RELOAD( 89, in89 ); FALLTHRU
493 192 : case 89UL: STORE_COMPARE_RELOAD( 88, in88 ); FALLTHRU
494 192 : case 88UL: STORE_COMPARE_RELOAD( 87, in87 ); FALLTHRU
495 192 : case 87UL: STORE_COMPARE_RELOAD( 86, in86 ); FALLTHRU
496 192 : case 86UL: STORE_COMPARE_RELOAD( 85, in85 ); FALLTHRU
497 192 : case 85UL: STORE_COMPARE_RELOAD( 84, in84 ); FALLTHRU
498 192 : case 84UL: STORE_COMPARE_RELOAD( 83, in83 ); FALLTHRU
499 192 : case 83UL: STORE_COMPARE_RELOAD( 82, in82 ); FALLTHRU
500 192 : case 82UL: STORE_COMPARE_RELOAD( 81, in81 ); FALLTHRU
501 192 : case 81UL: STORE_COMPARE_RELOAD( 80, in80 ); FALLTHRU
502 192 : case 80UL: STORE_COMPARE_RELOAD( 79, in79 ); FALLTHRU
503 192 : case 79UL: STORE_COMPARE_RELOAD( 78, in78 ); FALLTHRU
504 192 : case 78UL: STORE_COMPARE_RELOAD( 77, in77 ); FALLTHRU
505 192 : case 77UL: STORE_COMPARE_RELOAD( 76, in76 ); FALLTHRU
506 192 : case 76UL: STORE_COMPARE_RELOAD( 75, in75 ); FALLTHRU
507 192 : case 75UL: STORE_COMPARE_RELOAD( 74, in74 ); FALLTHRU
508 192 : case 74UL: STORE_COMPARE_RELOAD( 73, in73 ); FALLTHRU
509 192 : case 73UL: STORE_COMPARE_RELOAD( 72, in72 ); FALLTHRU
510 192 : case 72UL: STORE_COMPARE_RELOAD( 71, in71 ); FALLTHRU
511 192 : case 71UL: STORE_COMPARE_RELOAD( 70, in70 ); FALLTHRU
512 192 : case 70UL: STORE_COMPARE_RELOAD( 69, in69 ); FALLTHRU
513 192 : case 69UL: STORE_COMPARE_RELOAD( 68, in68 ); FALLTHRU
514 192 : case 68UL: STORE_COMPARE_RELOAD( 67, in67 ); FALLTHRU
515 192 : case 67UL: STORE_COMPARE_RELOAD( 66, in66 ); FALLTHRU
516 192 : case 66UL: STORE_COMPARE_RELOAD( 65, in65 ); FALLTHRU
517 192 : case 65UL: STORE_COMPARE_RELOAD( 64, in64 ); FALLTHRU
518 192 : case 64UL: STORE_COMPARE_RELOAD( 63, in63 ); FALLTHRU
519 192 : case 63UL: STORE_COMPARE_RELOAD( 62, in62 ); FALLTHRU
520 192 : case 62UL: STORE_COMPARE_RELOAD( 61, in61 ); FALLTHRU
521 192 : case 61UL: STORE_COMPARE_RELOAD( 60, in60 ); FALLTHRU
522 192 : case 60UL: STORE_COMPARE_RELOAD( 59, in59 ); FALLTHRU
523 192 : case 59UL: STORE_COMPARE_RELOAD( 58, in58 ); FALLTHRU
524 192 : case 58UL: STORE_COMPARE_RELOAD( 57, in57 ); FALLTHRU
525 192 : case 57UL: STORE_COMPARE_RELOAD( 56, in56 ); FALLTHRU
526 192 : case 56UL: STORE_COMPARE_RELOAD( 55, in55 ); FALLTHRU
527 192 : case 55UL: STORE_COMPARE_RELOAD( 54, in54 ); FALLTHRU
528 192 : case 54UL: STORE_COMPARE_RELOAD( 53, in53 ); FALLTHRU
529 192 : case 53UL: STORE_COMPARE_RELOAD( 52, in52 ); FALLTHRU
530 192 : case 52UL: STORE_COMPARE_RELOAD( 51, in51 ); FALLTHRU
531 192 : case 51UL: STORE_COMPARE_RELOAD( 50, in50 ); FALLTHRU
532 192 : case 50UL: STORE_COMPARE_RELOAD( 49, in49 ); FALLTHRU
533 192 : case 49UL: STORE_COMPARE_RELOAD( 48, in48 ); FALLTHRU
534 192 : case 48UL: STORE_COMPARE_RELOAD( 47, in47 ); FALLTHRU
535 192 : case 47UL: STORE_COMPARE_RELOAD( 46, in46 ); FALLTHRU
536 192 : case 46UL: STORE_COMPARE_RELOAD( 45, in45 ); FALLTHRU
537 192 : case 45UL: STORE_COMPARE_RELOAD( 44, in44 ); FALLTHRU
538 192 : case 44UL: STORE_COMPARE_RELOAD( 43, in43 ); FALLTHRU
539 192 : case 43UL: STORE_COMPARE_RELOAD( 42, in42 ); FALLTHRU
540 192 : case 42UL: STORE_COMPARE_RELOAD( 41, in41 ); FALLTHRU
541 192 : case 41UL: STORE_COMPARE_RELOAD( 40, in40 ); FALLTHRU
542 192 : case 40UL: STORE_COMPARE_RELOAD( 39, in39 ); FALLTHRU
543 192 : case 39UL: STORE_COMPARE_RELOAD( 38, in38 ); FALLTHRU
544 192 : case 38UL: STORE_COMPARE_RELOAD( 37, in37 ); FALLTHRU
545 192 : case 37UL: STORE_COMPARE_RELOAD( 36, in36 ); FALLTHRU
546 192 : case 36UL: STORE_COMPARE_RELOAD( 35, in35 ); FALLTHRU
547 192 : case 35UL: STORE_COMPARE_RELOAD( 34, in34 ); FALLTHRU
548 192 : case 34UL: STORE_COMPARE_RELOAD( 33, in33 ); FALLTHRU
549 192 : case 33UL: STORE_COMPARE_RELOAD( 32, in32 ); FALLTHRU
550 192 : case 32UL: STORE_COMPARE_RELOAD( 31, in31 ); FALLTHRU
551 192 : case 31UL: STORE_COMPARE_RELOAD( 30, in30 ); FALLTHRU
552 192 : case 30UL: STORE_COMPARE_RELOAD( 29, in29 ); FALLTHRU
553 192 : case 29UL: STORE_COMPARE_RELOAD( 28, in28 ); FALLTHRU
554 192 : case 28UL: STORE_COMPARE_RELOAD( 27, in27 ); FALLTHRU
555 192 : case 27UL: STORE_COMPARE_RELOAD( 26, in26 ); FALLTHRU
556 192 : case 26UL: STORE_COMPARE_RELOAD( 25, in25 ); FALLTHRU
557 192 : case 25UL: STORE_COMPARE_RELOAD( 24, in24 ); FALLTHRU
558 192 : case 24UL: STORE_COMPARE_RELOAD( 23, in23 ); FALLTHRU
559 192 : case 23UL: STORE_COMPARE_RELOAD( 22, in22 ); FALLTHRU
560 192 : case 22UL: STORE_COMPARE_RELOAD( 21, in21 ); FALLTHRU
561 192 : case 21UL: STORE_COMPARE_RELOAD( 20, in20 ); FALLTHRU
562 192 : case 20UL: STORE_COMPARE_RELOAD( 19, in19 ); FALLTHRU
563 192 : case 19UL: STORE_COMPARE_RELOAD( 18, in18 ); FALLTHRU
564 192 : case 18UL: STORE_COMPARE_RELOAD( 17, in17 ); FALLTHRU
565 192 : case 17UL: STORE_COMPARE_RELOAD( 16, in16 ); FALLTHRU
566 192 : case 16UL: STORE_COMPARE_RELOAD( 15, in15 ); FALLTHRU
567 192 : case 15UL: STORE_COMPARE_RELOAD( 14, in14 ); FALLTHRU
568 192 : case 14UL: STORE_COMPARE_RELOAD( 13, in13 ); FALLTHRU
569 192 : case 13UL: STORE_COMPARE_RELOAD( 12, in12 ); FALLTHRU
570 192 : case 12UL: STORE_COMPARE_RELOAD( 11, in11 ); FALLTHRU
571 192 : case 11UL: STORE_COMPARE_RELOAD( 10, in10 ); FALLTHRU
572 192 : case 10UL: STORE_COMPARE_RELOAD( 9, in09 ); FALLTHRU
573 192 : case 9UL: STORE_COMPARE_RELOAD( 8, in08 ); FALLTHRU
574 192 : case 8UL: STORE_COMPARE_RELOAD( 7, in07 ); FALLTHRU
575 192 : case 7UL: STORE_COMPARE_RELOAD( 6, in06 ); FALLTHRU
576 192 : case 6UL: STORE_COMPARE_RELOAD( 5, in05 ); FALLTHRU
577 192 : case 5UL: STORE_COMPARE_RELOAD( 4, in04 ); FALLTHRU
578 192 : case 4UL: STORE_COMPARE_RELOAD( 3, in03 ); FALLTHRU
579 192 : case 3UL: STORE_COMPARE_RELOAD( 2, in02 ); FALLTHRU
580 192 : case 2UL: STORE_COMPARE_RELOAD( 1, in01 ); FALLTHRU
581 192 : case 1UL: STORE_COMPARE_RELOAD( 0, in00 );
582 192 : }
583 :
584 192 : ulong shreds_remaining = shred_cnt-fd_ulong_min( shred_cnt, 128UL );
585 192 : if( shreds_remaining>0UL ) {
586 0 : FD_REEDSOL_GENERATE_IFFT( 128, 0, ALL_VARS );
587 0 : FD_REEDSOL_GENERATE_FFT( 128, 128, ALL_VARS );
588 :
589 0 : switch( fd_ulong_min( shreds_remaining, 128UL ) ) {
590 0 : case 7UL: STORE_COMPARE( 134, in06 ); FALLTHRU
591 0 : case 6UL: STORE_COMPARE( 133, in05 ); FALLTHRU
592 0 : case 5UL: STORE_COMPARE( 132, in04 ); FALLTHRU
593 0 : case 4UL: STORE_COMPARE( 131, in03 ); FALLTHRU
594 0 : case 3UL: STORE_COMPARE( 130, in02 ); FALLTHRU
595 0 : case 2UL: STORE_COMPARE( 129, in01 ); FALLTHRU
596 0 : case 1UL: STORE_COMPARE( 128, in00 );
597 0 : }
598 0 : shreds_remaining -= fd_ulong_min( shreds_remaining, 128UL );
599 0 : }
600 192 : if( FD_UNLIKELY( GF_ANY( diff ) ) ) return FD_REEDSOL_ERR_CORRUPT;
601 192 : shred_pos += GF_WIDTH;
602 192 : shred_pos = fd_ulong_if( ((shred_sz-GF_WIDTH)<shred_pos) & (shred_pos<shred_sz), shred_sz-GF_WIDTH, shred_pos );
603 192 : }
604 6 : return FD_REEDSOL_SUCCESS;
605 6 : }
|