Line data Source code
1 : #include "fd_store.h"
2 :
3 : void *
4 0 : fd_store_new( void * mem, ulong lo_wmark_slot ) {
5 0 : if( FD_UNLIKELY( !mem ) ) {
6 0 : FD_LOG_WARNING( ( "NULL mem" ) );
7 0 : return NULL;
8 0 : }
9 :
10 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, fd_store_align() ) ) ) {
11 0 : FD_LOG_WARNING( ( "misaligned mem" ) );
12 0 : return NULL;
13 0 : }
14 :
15 0 : fd_memset( mem, 0, fd_store_footprint() );
16 :
17 0 : fd_store_t * store = (fd_store_t *)mem;
18 0 : store->first_turbine_slot = FD_SLOT_NULL;
19 0 : store->curr_turbine_slot = FD_SLOT_NULL;
20 0 : store->root = FD_SLOT_NULL;
21 0 : fd_repair_backoff_map_new( store->repair_backoff_map );
22 0 : store->pending_slots = fd_pending_slots_new( (uchar *)mem + sizeof( fd_store_t ), lo_wmark_slot );
23 0 : if( FD_UNLIKELY( !store->pending_slots ) ) {
24 0 : return NULL;
25 0 : }
26 :
27 0 : return mem;
28 0 : }
29 :
30 : fd_store_t *
31 0 : fd_store_join( void * store ) {
32 0 : if( FD_UNLIKELY( !store ) ) {
33 0 : FD_LOG_WARNING( ( "NULL store" ) );
34 0 : return NULL;
35 0 : }
36 :
37 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)store, fd_store_align() ) ) ) {
38 0 : FD_LOG_WARNING( ( "misaligned replay" ) );
39 0 : return NULL;
40 0 : }
41 :
42 0 : fd_store_t * store_ = (fd_store_t *)store;
43 0 : fd_repair_backoff_map_join( store_->repair_backoff_map );
44 0 : store_->pending_slots = fd_pending_slots_join( store_->pending_slots );
45 0 : if( FD_UNLIKELY( !store_->pending_slots ) ) {
46 0 : return NULL;
47 0 : }
48 :
49 0 : return store_;
50 0 : }
51 :
52 : void *
53 0 : fd_store_leave( fd_store_t const * store ) {
54 0 : if( FD_UNLIKELY( !store ) ) {
55 0 : FD_LOG_WARNING( ( "NULL store" ) );
56 0 : return NULL;
57 0 : }
58 :
59 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)store, fd_store_align() ) ) ) {
60 0 : FD_LOG_WARNING( ( "misaligned store" ) );
61 0 : return NULL;
62 0 : }
63 :
64 0 : return (void *)store;
65 0 : }
66 :
67 : void *
68 0 : fd_store_delete( void * store ) {
69 0 : if( FD_UNLIKELY( !store ) ) {
70 0 : FD_LOG_WARNING( ( "NULL store" ) );
71 0 : return NULL;
72 0 : }
73 :
74 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)store, fd_store_align() ) ) ) {
75 0 : FD_LOG_WARNING( ( "misaligned store" ) );
76 0 : return NULL;
77 0 : }
78 :
79 0 : return store;
80 0 : }
81 :
82 : void
83 0 : fd_store_expected_shred_version( fd_store_t * store, ulong expected_shred_version ) {
84 0 : store->expected_shred_version = expected_shred_version;
85 0 : }
86 :
87 : int
88 : fd_store_slot_prepare( fd_store_t * store,
89 : ulong slot,
90 0 : ulong * repair_slot_out ) {
91 :
92 0 : ulong re_adds[2];
93 0 : uint re_adds_cnt = 0U;
94 0 : long re_add_delays[2];
95 :
96 0 : *repair_slot_out = 0;
97 0 : int rc = FD_STORE_SLOT_PREPARE_CONTINUE;
98 :
99 : /* Slot block map data */
100 :
101 0 : int block_complete = fd_blockstore_shreds_complete( store->blockstore, slot );
102 0 : int block_info = 0;
103 0 : ulong parent_slot = FD_SLOT_NULL;
104 0 : uchar flags = 0;
105 0 : fd_block_map_query_t query[1] = { 0 };
106 0 : int err = FD_MAP_ERR_AGAIN;
107 0 : while( err == FD_MAP_ERR_AGAIN ){
108 0 : err = fd_block_map_query_try( store->blockstore->block_map, &slot, NULL, query, 0 );
109 0 : fd_block_info_t * blk = fd_block_map_query_ele( query );
110 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
111 0 : if( err == FD_MAP_ERR_KEY ) {
112 0 : block_info = 0;
113 0 : flags = 0;
114 0 : parent_slot = FD_SLOT_NULL;
115 0 : break;
116 0 : }
117 0 : block_info = 1;
118 0 : flags = blk->flags;
119 0 : parent_slot = blk->parent_slot;
120 0 : err = fd_block_map_query_test( query );
121 0 : }
122 :
123 : /* We already executed this block */
124 0 : if( FD_UNLIKELY( block_complete && fd_uchar_extract_bit( flags, FD_BLOCK_FLAG_REPLAYING ) ) ) {
125 0 : rc = FD_STORE_SLOT_PREPARE_ALREADY_EXECUTED;
126 0 : goto end;
127 0 : }
128 :
129 0 : if( FD_UNLIKELY( block_complete && fd_uchar_extract_bit( flags, FD_BLOCK_FLAG_PROCESSED ) ) ) {
130 0 : rc = FD_STORE_SLOT_PREPARE_ALREADY_EXECUTED;
131 0 : goto end;
132 0 : }
133 :
134 0 : if( FD_UNLIKELY( !block_info ) ) {
135 : /* I know nothing about this block yet */
136 0 : rc = FD_STORE_SLOT_PREPARE_NEED_REPAIR;
137 0 : *repair_slot_out = slot;
138 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
139 0 : re_adds[re_adds_cnt++] = slot;
140 0 : goto end;
141 0 : }
142 :
143 : /* Parent slot block map data */
144 :
145 0 : int parent_block_info = 0;
146 0 : uchar parent_flags = 0;
147 0 : err = FD_MAP_ERR_AGAIN;
148 0 : while( err == FD_MAP_ERR_AGAIN ){
149 0 : err = fd_block_map_query_try( store->blockstore->block_map, &parent_slot, NULL, query, 0 );
150 0 : fd_block_info_t * blk = fd_block_map_query_ele( query );
151 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
152 0 : if( err == FD_MAP_ERR_KEY ) {
153 0 : parent_block_info = 0;
154 0 : parent_flags = 0;
155 0 : break;
156 0 : } else {
157 0 : parent_block_info = 1;
158 0 : parent_flags = blk->flags;
159 0 : }
160 0 : err = fd_block_map_query_test( query );
161 0 : }
162 :
163 : /* If the parent slot meta is missing, this block is an orphan and the ancestry needs to be
164 : * repaired before we can replay it. */
165 0 : if( FD_UNLIKELY( !parent_block_info ) ) {
166 0 : rc = FD_STORE_SLOT_PREPARE_NEED_ORPHAN;
167 0 : *repair_slot_out = slot;
168 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
169 0 : re_adds[re_adds_cnt++] = slot;
170 :
171 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
172 0 : re_adds[re_adds_cnt++] = parent_slot;
173 0 : goto end;
174 0 : }
175 :
176 0 : int parent_complete = fd_blockstore_shreds_complete( store->blockstore, parent_slot );
177 :
178 : /* We have a parent slot meta, and therefore have at least one shred of the parent block, so we
179 : have the ancestry and need to repair that block directly (as opposed to calling repair orphan).
180 : */
181 0 : if( FD_UNLIKELY( !parent_complete ) ) {
182 0 : rc = FD_STORE_SLOT_PREPARE_NEED_REPAIR;
183 0 : *repair_slot_out = parent_slot;
184 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
185 0 : re_adds[re_adds_cnt++] = parent_slot;
186 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
187 0 : re_adds[re_adds_cnt++] = slot;
188 :
189 0 : goto end;
190 0 : }
191 :
192 : /* See if the parent is executed yet */
193 0 : if( FD_UNLIKELY( !fd_uchar_extract_bit( parent_flags, FD_BLOCK_FLAG_PROCESSED ) ) ) {
194 0 : rc = FD_STORE_SLOT_PREPARE_NEED_PARENT_EXEC;
195 : // FD_LOG_WARNING(("NEED PARENT EXEC %lu %lu", slot, parent_slot));
196 0 : if( FD_UNLIKELY( !fd_uchar_extract_bit( parent_flags, FD_BLOCK_FLAG_REPLAYING ) ) ) {
197 : /* ... but it is not prepared */
198 0 : re_add_delays[re_adds_cnt] = (long)5e6;
199 0 : re_adds[re_adds_cnt++] = slot;
200 0 : }
201 0 : re_add_delays[re_adds_cnt] = (long)5e6;
202 0 : re_adds[re_adds_cnt++] = parent_slot;
203 0 : goto end;
204 0 : }
205 :
206 : /* The parent is executed, but the block is still incomplete. Ask for more shreds. */
207 0 : if( FD_UNLIKELY( !block_complete ) ) {
208 0 : rc = FD_STORE_SLOT_PREPARE_NEED_REPAIR;
209 0 : *repair_slot_out = slot;
210 0 : re_add_delays[re_adds_cnt] = FD_REPAIR_BACKOFF_TIME;
211 0 : re_adds[re_adds_cnt++] = slot;
212 0 : goto end;
213 0 : }
214 :
215 : /* Prepare the replay_slot struct. */
216 : /* Mark the block as prepared, and thus unsafe to remove. */
217 0 : err = fd_block_map_prepare( store->blockstore->block_map, &slot, NULL, query, FD_MAP_FLAG_BLOCKING );
218 0 : fd_block_info_t * meta = fd_block_map_query_ele( query );
219 0 : if( FD_UNLIKELY( err || meta->slot != slot ) ) FD_LOG_ERR(( "block map prepare failed" ));
220 0 : meta->flags = fd_uchar_set_bit( meta->flags, FD_BLOCK_FLAG_REPLAYING );
221 0 : fd_block_map_publish( query );
222 :
223 0 : end:
224 0 : for (uint i = 0; i < re_adds_cnt; ++i)
225 0 : fd_store_add_pending( store, re_adds[i], re_add_delays[i], 0, 0 );
226 :
227 0 : return rc;
228 0 : }
229 :
230 : int
231 : fd_store_shred_insert( fd_store_t * store,
232 0 : fd_shred_t const * shred ) {
233 0 : if( FD_UNLIKELY( shred->version != store->expected_shred_version ) ) {
234 0 : FD_LOG_WARNING(( "received shred version %lu instead of %lu", (ulong)shred->version, store->expected_shred_version ));
235 0 : return FD_BLOCKSTORE_SUCCESS;
236 0 : }
237 :
238 0 : fd_blockstore_t * blockstore = store->blockstore;
239 :
240 0 : uchar shred_type = fd_shred_type( shred->variant );
241 0 : if( !fd_shred_is_data( shred_type ) ) {
242 0 : return FD_BLOCKSTORE_SUCCESS;
243 0 : }
244 :
245 :
246 : /* Check this shred > root. We ignore shreds before the root because
247 : we either already replayed them (ie. the slot is an ancestor of the
248 : SMR) or it is a pruned fork. */
249 :
250 0 : if( store->root!=FD_SLOT_NULL && shred->slot<store->root ) {
251 0 : FD_LOG_WARNING(( "shred slot is behind root, dropping shred - root: %lu, shred_slot: %lu", store->root, shred->slot ));
252 0 : return FD_BLOCKSTORE_SUCCESS;
253 0 : }
254 :
255 0 : if( fd_blockstore_shreds_complete( blockstore, shred->slot ) ) {
256 0 : return FD_BLOCKSTORE_SUCCESS;
257 0 : }
258 0 : fd_blockstore_shred_insert( blockstore, shred );
259 :
260 : /* FIXME */
261 0 : if( FD_UNLIKELY( fd_blockstore_shreds_complete( blockstore, shred->slot ) ) ) {
262 0 : fd_store_add_pending( store, shred->slot, (long)5e6, 0, 1 );
263 0 : return FD_BLOCKSTORE_SUCCESS_SLOT_COMPLETE;
264 0 : } else {
265 0 : fd_store_add_pending( store, shred->slot, FD_REPAIR_BACKOFF_TIME, 0, 0 );
266 0 : fd_repair_backoff_t * backoff = fd_repair_backoff_map_query( store->repair_backoff_map, shred->slot, NULL );
267 0 : if( FD_LIKELY( backoff==NULL ) ) {
268 : /* new backoff entry */
269 0 : backoff = fd_repair_backoff_map_insert( store->repair_backoff_map, shred->slot );
270 0 : backoff->last_backoff_duration = FD_REPAIR_BACKOFF_TIME;
271 0 : backoff->last_repair_time = store->now;
272 0 : } else if( ( backoff->last_repair_time+backoff->last_backoff_duration )
273 0 : >( store->now + FD_REPAIR_BACKOFF_TIME ) ) {
274 0 : backoff->last_backoff_duration = FD_REPAIR_BACKOFF_TIME;
275 0 : backoff->last_repair_time = store->now;
276 0 : }
277 0 : return FD_BLOCKSTORE_SUCCESS;
278 0 : }
279 0 : }
280 :
281 : void
282 : fd_store_shred_update_with_shred_from_turbine( fd_store_t * store,
283 0 : fd_shred_t const * shred ) {
284 0 : if( FD_UNLIKELY( store->first_turbine_slot == FD_SLOT_NULL ) ) {
285 0 : FD_LOG_NOTICE(("first turbine slot: %lu", shred->slot));
286 : // ulong slot = shred->slot;
287 : // while ( slot > store->snapshot_slot ) {
288 : // fd_store_add_pending( store, slot, 0 );
289 : // slot -= 10;
290 : // }
291 0 : store->first_turbine_slot = shred->slot;
292 0 : store->curr_turbine_slot = shred->slot;
293 0 : }
294 :
295 0 : store->curr_turbine_slot = fd_ulong_max(shred->slot, store->curr_turbine_slot);
296 0 : }
297 :
298 : void
299 : fd_store_add_pending( fd_store_t * store,
300 : ulong slot,
301 : long delay,
302 : int should_backoff,
303 0 : int reset_backoff ) {
304 0 : (void)should_backoff;
305 0 : (void)reset_backoff;
306 : // fd_repair_backoff_t * backoff = fd_repair_backoff_map_query( store->repair_backoff_map, slot, NULL );
307 : // long existing_when = fd_pending_slots_get( store->pending_slots, slot );
308 : // if( existing_when!=0L && existing_when!=LONG_MAX ) {
309 : // if( !should_backoff && delay > ( existing_when-store->now ) ) {
310 : // return;
311 : // }
312 : // }
313 : // // if( existing_when!=0L && existing_when!=LONG_MAX ) {
314 : // // if( !should_backoff && delay < ( existing_when-store->now ) ) {
315 : // // FD_LOG_WARNING(( "hey! %lu %ld %ld ", slot, delay, ( existing_when-store->now )));
316 : // // } else {
317 : // // FD_LOG_WARNING(( "eep %lu %lu %lu %d %lu", slot, delay/1000000, (existing_when - store->now)/1000000, should_backoff ));
318 : // // return;
319 : // // }
320 : // // }
321 : // if( backoff==NULL ) {
322 : // backoff = fd_repair_backoff_map_insert( store->repair_backoff_map, slot );
323 : // backoff->slot = slot;
324 : // backoff->last_backoff = delay;
325 : // } else if( reset_backoff ) {
326 : // backoff->last_backoff = delay;
327 : // } else if( should_backoff ) {
328 : // ulong backoff->last_backoff + (backoff->last_backoff>>3);
329 : // backoff->last_backoff =
330 : // delay = backoff->last_backoff;
331 : // } else {
332 : // delay = backoff->last_backoff;
333 : // }
334 : // if( should_backoff ) FD_LOG_INFO(("PENDING %lu %d %lu %ld", slot, should_backoff, delay/1000000, (existing_when-store->now)/1000000L));
335 0 : if( store->root!=FD_SLOT_NULL && slot<store->root) {
336 0 : FD_LOG_WARNING(( "slot is older than root, skipping adding slot to pending queue - root: %lu, slot: %lu",
337 0 : store->root, slot ));
338 0 : return;
339 0 : }
340 0 : fd_pending_slots_add( store->pending_slots, slot, store->now + (long)delay );
341 0 : }
342 :
343 : void
344 : fd_store_set_root( fd_store_t * store,
345 0 : ulong root ) {
346 0 : store->root = root;
347 0 : fd_pending_slots_set_lo_wmark( store->pending_slots, root );
348 :
349 : /* remove old roots */
350 0 : for( ulong i = 0; i<fd_repair_backoff_map_slot_cnt(); i++ ) {
351 0 : if( store->repair_backoff_map[ i ].slot <= root ) {
352 0 : fd_repair_backoff_map_remove( store->repair_backoff_map, &store->repair_backoff_map[ i ] );
353 0 : }
354 0 : }
355 0 : }
356 :
357 : ulong
358 : fd_store_slot_repair( fd_store_t * store,
359 : ulong slot,
360 : fd_repair_request_t * out_repair_reqs,
361 0 : ulong out_repair_reqs_sz ) {
362 0 : if( out_repair_reqs_sz==0UL ) {
363 0 : return 0UL;
364 0 : }
365 :
366 0 : fd_repair_backoff_t * backoff = fd_repair_backoff_map_query( store->repair_backoff_map, slot, NULL );
367 0 : if( FD_LIKELY( backoff!=NULL ) ) {
368 0 : if( store->now<( backoff->last_repair_time+backoff->last_backoff_duration ) ) {
369 0 : return 0UL;
370 0 : }
371 0 : } else {
372 : /* new backoff entry */
373 0 : backoff = fd_repair_backoff_map_insert( store->repair_backoff_map, slot );
374 0 : backoff->last_backoff_duration = FD_REPAIR_BACKOFF_TIME;
375 0 : }
376 0 : backoff->last_repair_time = store->now;
377 :
378 0 : ulong repair_req_cnt = 0;
379 :
380 0 : int block_info = 0;
381 0 : uint complete_idx = UINT_MAX;
382 0 : uint received_idx = 0;
383 0 : uint buffered_idx = 0;
384 0 : int err = FD_MAP_ERR_AGAIN;
385 0 : while( err == FD_MAP_ERR_AGAIN ){
386 0 : fd_block_map_query_t query[1] = { 0 };
387 0 : err = fd_block_map_query_try( store->blockstore->block_map, &slot, NULL, query, 0 );
388 0 : fd_block_info_t * meta = fd_block_map_query_ele( query );
389 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
390 0 : if( err == FD_MAP_ERR_KEY ) {
391 0 : block_info = 0;
392 0 : break;
393 0 : }
394 0 : block_info = 1;
395 0 : complete_idx = meta->slot_complete_idx;
396 0 : received_idx = meta->received_idx;
397 0 : buffered_idx = meta->buffered_idx;
398 :
399 0 : err = fd_block_map_query_test( query );
400 0 : }
401 :
402 0 : if( FD_LIKELY( !block_info ) ) {
403 : /* We haven't received any shreds for this slot yet */
404 :
405 0 : fd_repair_request_t * repair_req = &out_repair_reqs[repair_req_cnt++];
406 0 : repair_req->shred_index = 0;
407 0 : repair_req->slot = slot;
408 0 : repair_req->type = FD_REPAIR_REQ_TYPE_NEED_HIGHEST_WINDOW_INDEX;
409 0 : } else {
410 : /* We've received at least one shred, so fill in what's missing */
411 :
412 : /* We don't know the last index yet */
413 0 : if( FD_UNLIKELY( complete_idx == UINT_MAX ) ) {
414 0 : complete_idx = received_idx - 1;
415 0 : fd_repair_request_t * repair_req = &out_repair_reqs[repair_req_cnt++];
416 0 : repair_req->shred_index = complete_idx;
417 0 : repair_req->slot = slot;
418 0 : repair_req->type = FD_REPAIR_REQ_TYPE_NEED_HIGHEST_WINDOW_INDEX;
419 0 : }
420 :
421 0 : if( repair_req_cnt==out_repair_reqs_sz ) {
422 0 : backoff->last_backoff_duration += backoff->last_backoff_duration>>2;
423 0 : FD_LOG_INFO( ( "[repair] MAX need %lu [%u, %u], sent %lu requests (backoff: %ld ms)", slot, buffered_idx + 1, complete_idx, repair_req_cnt, backoff->last_backoff_duration/(long)1e6 ) );
424 0 : return repair_req_cnt;
425 0 : }
426 :
427 : /* First make sure we are ready to execute this block soon. Look for an ancestor that was executed. */
428 0 : ulong anc_slot = slot;
429 0 : int good = 0;
430 0 : for( uint i = 0; i < 6; ++i ) {
431 0 : anc_slot = fd_blockstore_parent_slot_query( store->blockstore, anc_slot );
432 0 : int anc_complete = fd_blockstore_shreds_complete( store->blockstore, anc_slot );
433 0 : if( !anc_complete ) continue;
434 : /* get ancestor flags */
435 0 : uchar anc_flags = 0;
436 0 : int err = FD_MAP_ERR_AGAIN;
437 0 : while( err == FD_MAP_ERR_AGAIN ){
438 0 : fd_block_map_query_t query[1] = { 0 };
439 0 : err = fd_block_map_query_try( store->blockstore->block_map, &anc_slot, NULL, query, 0 );
440 0 : fd_block_info_t * meta = fd_block_map_query_ele( query );
441 0 : if( FD_UNLIKELY( err == FD_MAP_ERR_AGAIN ) ) continue;
442 0 : if( err == FD_MAP_ERR_KEY ) {
443 0 : anc_flags = 0;
444 0 : break;
445 0 : }
446 0 : anc_flags = meta->flags;
447 0 : err = fd_block_map_query_test( query );
448 0 : }
449 :
450 0 : if( fd_uchar_extract_bit( anc_flags, FD_BLOCK_FLAG_PROCESSED ) ) {
451 0 : good = 1;
452 0 : out_repair_reqs_sz /= (i>>1)+1U; /* Slow roll blocks that are further out */
453 0 : break;
454 0 : }
455 0 : }
456 :
457 0 : if( !good ) {
458 0 : return repair_req_cnt;
459 0 : }
460 :
461 : /* Fill in what's missing */
462 0 : for( uint i = buffered_idx + 1; i <= complete_idx; i++ ) {
463 0 : if( FD_UNLIKELY( fd_blockstore_shred_test( store->blockstore, slot, i ) ) ) continue;
464 :
465 0 : fd_repair_request_t * repair_req = &out_repair_reqs[repair_req_cnt++];
466 0 : repair_req->shred_index = i;
467 0 : repair_req->slot = slot;
468 0 : repair_req->type = FD_REPAIR_REQ_TYPE_NEED_WINDOW_INDEX;
469 :
470 0 : if( repair_req_cnt == out_repair_reqs_sz ) {
471 0 : backoff->last_backoff_duration += backoff->last_backoff_duration>>2;
472 0 : FD_LOG_INFO( ( "[repair] MAX need %lu [%u, %u], sent %lu requests (backoff: %ld ms)", slot, buffered_idx + 1, complete_idx, repair_req_cnt, backoff->last_backoff_duration/(long)1e6 ) );
473 0 : return repair_req_cnt;
474 0 : }
475 0 : }
476 0 : if( repair_req_cnt ) {
477 0 : backoff->last_backoff_duration += backoff->last_backoff_duration>>2;
478 0 : FD_LOG_INFO( ( "[repair] need %lu [%u, %u], sent %lu requests (backoff: %ld ms)", slot, buffered_idx + 1, complete_idx, repair_req_cnt, backoff->last_backoff_duration/(long)1e6 ) );
479 0 : }
480 0 : }
481 :
482 0 : return repair_req_cnt;
483 0 : }
|