Line data Source code
1 : #ifndef HEADER_fd_src_flamenco_vm_fd_vm_private_h
2 : #define HEADER_fd_src_flamenco_vm_fd_vm_private_h
3 :
4 : #include "../runtime/tests/fd_dump_pb.h"
5 : #include "fd_vm.h"
6 :
7 : #include "../../ballet/sbpf/fd_sbpf_instr.h"
8 : #include "../../ballet/sbpf/fd_sbpf_opcodes.h"
9 : #include "../../ballet/murmur3/fd_murmur3.h"
10 : #include "../runtime/context/fd_exec_txn_ctx.h"
11 : #include "../features/fd_features.h"
12 : #include "fd_vm_base.h"
13 :
14 : /* FD_VM_ALIGN_RUST_{} define the alignments for relevant rust types.
15 : Alignments are derived with std::mem::align_of::<T>() and are enforced
16 : by the VM (with the exception of v1 loader).
17 :
18 : In our implementation, when calling FD_VM_MEM_HADDR_ST / FD_VM_MEM_HADDR_LD,
19 : we need to make sure we're passing the correct alignment based on the Rust
20 : type in the corresponding mapping in Agave.
21 :
22 : FD_VM_ALIGN_RUST_{} has been generated with this Rust code:
23 : ```rust
24 : pub type Epoch = u64;
25 : pub struct Pubkey(pub [u8; 32]);
26 : pub struct AccountMeta {
27 : pub lamports: u64,
28 : pub rent_epoch: Epoch,
29 : pub owner: Pubkey,
30 : pub executable: bool,
31 : }
32 :
33 : pub struct PodScalar(pub [u8; 32]);
34 :
35 : fn main() {
36 : println!("u8: {}", std::mem::align_of::<u8>());
37 : println!("u32: {}", std::mem::align_of::<u32>());
38 : println!("u64: {}", std::mem::align_of::<u64>());
39 : println!("u128: {}", std::mem::align_of::<u128>());
40 : println!("&[u8]: {}", std::mem::align_of::<&[u8]>());
41 : println!("AccountMeta: {}", std::mem::align_of::<AccountMeta>());
42 : println!("PodScalar: {}", std::mem::align_of::<PodScalar>());
43 : println!("Pubkey: {}", std::mem::align_of::<Pubkey>());
44 : }
45 : ``` */
46 :
47 0 : #define FD_VM_ALIGN_RUST_U8 (1UL)
48 : #define FD_VM_ALIGN_RUST_U32 (4UL)
49 : #define FD_VM_ALIGN_RUST_I32 (4UL)
50 : #define FD_VM_ALIGN_RUST_U64 (8UL)
51 : #define FD_VM_ALIGN_RUST_U128 (16UL)
52 : #define FD_VM_ALIGN_RUST_SLICE_U8_REF (8UL)
53 : #define FD_VM_ALIGN_RUST_POD_U8_ARRAY (1UL)
54 : #define FD_VM_ALIGN_RUST_PUBKEY (1UL)
55 : #define FD_VM_ALIGN_RUST_SYSVAR_CLOCK (8UL)
56 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_SCHEDULE (8UL)
57 : #define FD_VM_ALIGN_RUST_SYSVAR_RENT (8UL)
58 : #define FD_VM_ALIGN_RUST_SYSVAR_LAST_RESTART_SLOT (8UL)
59 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_REWARDS (16UL)
60 : #define FD_VM_ALIGN_RUST_STABLE_INSTRUCTION (8UL)
61 :
62 : /* fd_vm_vec_t is the in-memory representation of a vector descriptor.
63 : Equal in layout to the Rust slice header &[_] and various vector
64 : types in the C version of the syscall API. */
65 : /* FIXME: WHEN IS VADDR NULL AND/OR SZ 0 OKAY? */
66 : /* FIXME: MOVE FD_VM_RUST_VEC_T FROM SYSCALL/FD_VM_CPI.H HERE TOO? */
67 :
68 : #define FD_VM_VEC_ALIGN FD_VM_ALIGN_RUST_SLICE_U8_REF
69 : #define FD_VM_VEC_SIZE (16UL)
70 :
71 : struct __attribute__((packed)) fd_vm_vec {
72 : ulong addr; /* FIXME: NAME -> VADDR */
73 : ulong len; /* FIXME: NAME -> SZ */
74 : };
75 :
76 : typedef struct fd_vm_vec fd_vm_vec_t;
77 :
78 : /* SBPF version and features
79 : https://github.com/solana-labs/rbpf/blob/4b2c3dfb02827a0119cd1587eea9e27499712646/src/program.rs#L22
80 :
81 : Note: SIMDs enable or disable features, e.g. BPF instructions.
82 : If we have macros with names ENABLE vs DISABLE, we have the advantage that
83 : the condition is always pretty clear: sbpf_version <= activation_version,
84 : but the disadvantage of inconsistent names.
85 : Viceversa, calling everything ENABLE has the risk to invert a <= with a >=
86 : and create a huge mess.
87 : We define both, so hopefully it's foolproof. */
88 :
89 : #define FD_VM_SBPF_REJECT_RODATA_STACK_OVERLAP(v) ( v != FD_SBPF_V0 )
90 : #define FD_VM_SBPF_ENABLE_ELF_VADDR(v) ( v != FD_SBPF_V0 )
91 : /* SIMD-0166 */
92 805366275 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES(v) ( v >= FD_SBPF_V1 )
93 : /* SIMD-0173 */
94 7890 : #define FD_VM_SBPF_CALLX_USES_SRC_REG(v) ( v >= FD_SBPF_V2 )
95 : #define FD_VM_SBPF_DISABLE_LDDW(v) ( v >= FD_SBPF_V2 )
96 77928 : #define FD_VM_SBPF_ENABLE_LDDW(v) ( v < FD_SBPF_V2 )
97 : #define FD_VM_SBPF_DISABLE_LE(v) ( v >= FD_SBPF_V2 )
98 38964 : #define FD_VM_SBPF_ENABLE_LE(v) ( v < FD_SBPF_V2 )
99 935136 : #define FD_VM_SBPF_MOVE_MEMORY_IX_CLASSES(v) ( v >= FD_SBPF_V2 )
100 : /* SIMD-0174 */
101 1052028 : #define FD_VM_SBPF_ENABLE_PQR(v) ( v >= FD_SBPF_V2 )
102 : #define FD_VM_SBPF_DISABLE_NEG(v) ( v >= FD_SBPF_V2 )
103 38964 : #define FD_VM_SBPF_ENABLE_NEG(v) ( v < FD_SBPF_V2 )
104 62232 : #define FD_VM_SBPF_SWAP_SUB_REG_IMM_OPERANDS(v) ( v >= FD_SBPF_V2 )
105 124464 : #define FD_VM_SBPF_EXPLICIT_SIGN_EXT(v) ( v >= FD_SBPF_V2 )
106 : /* SIMD-0178 + SIMD-0179 */
107 163704 : #define FD_VM_SBPF_STATIC_SYSCALLS(v) ( v >= FD_SBPF_V3 )
108 : /* SIMD-0189 */
109 : #define FD_VM_SBPF_ENABLE_STRICTER_ELF_HEADERS(v) ( v >= FD_SBPF_V3 )
110 : #define FD_VM_SBPF_ENABLE_LOWER_BYTECODE_VADDR(v) ( v >= FD_SBPF_V3 )
111 :
112 12 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES_ALIGN (64U)
113 :
114 912 : #define FD_VM_OFFSET_MASK (0xffffffffUL)
115 :
116 : FD_PROTOTYPES_BEGIN
117 :
118 : /* Error logging handholding assertions */
119 :
120 : #ifdef FD_RUNTIME_ERR_HANDHOLDING
121 : /* Asserts that the error and error kind are populated (non-zero) */
122 : #define FD_VM_TEST_ERR_EXISTS( vm ) \
123 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err ); \
124 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err_kind )
125 :
126 : /* Asserts that the error and error kind are not populated (zero) */
127 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) \
128 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err ); \
129 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err_kind )
130 : #else
131 0 : #define FD_VM_TEST_ERR_EXISTS( vm ) ( ( void )0 )
132 66 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) ( ( void )0 )
133 : #endif
134 :
135 : /* Log error within the instr_ctx to match Agave/Rust error. */
136 :
137 42 : #define FD_VM_ERR_FOR_LOG_EBPF( vm, err ) (__extension__({ \
138 42 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
139 42 : vm->instr_ctx->txn_ctx->exec_err = err; \
140 42 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_EBPF; \
141 42 : }))
142 :
143 24 : #define FD_VM_ERR_FOR_LOG_SYSCALL( vm, err ) (__extension__({ \
144 24 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
145 24 : vm->instr_ctx->txn_ctx->exec_err = err; \
146 24 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_SYSCALL; \
147 24 : }))
148 :
149 0 : #define FD_VM_ERR_FOR_LOG_INSTR( vm, err ) (__extension__({ \
150 0 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
151 0 : vm->instr_ctx->txn_ctx->exec_err = err; \
152 0 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_INSTR; \
153 0 : }))
154 :
155 822 : #define FD_VADDR_TO_REGION( _vaddr ) fd_ulong_min( (_vaddr) >> FD_VM_MEM_MAP_REGION_VIRT_ADDR_BITS, FD_VM_HIGH_REGION )
156 :
157 : /* fd_vm_instr APIs ***************************************************/
158 :
159 : /* FIXME: MIGRATE FD_SBPF_INSTR_T STUFF TO THIS API */
160 :
161 : /* fd_vm_instr returns the SBPF instruction word corresponding to the
162 : given fields. */
163 :
164 : FD_FN_CONST static inline ulong
165 : fd_vm_instr( ulong opcode, /* Assumed valid */
166 : ulong dst, /* Assumed in [0,FD_VM_REG_CNT) */
167 : ulong src, /* Assumed in [0,FD_VM_REG_CNT) */
168 : short offset,
169 16587 : uint imm ) {
170 16587 : return opcode | (dst<<8) | (src<<12) | (((ulong)(ushort)offset)<<16) | (((ulong)imm)<<32);
171 16587 : }
172 :
173 : /* fd_vm_instr_* return the SBPF instruction field for the given word.
174 : fd_vm_instr_{normal,mem}_* only apply to {normal,mem} opclass
175 : instructions. */
176 :
177 381891 : FD_FN_CONST static inline ulong fd_vm_instr_opcode( ulong instr ) { return instr & 255UL; } /* In [0,256) */
178 381891 : FD_FN_CONST static inline ulong fd_vm_instr_dst ( ulong instr ) { return ((instr>> 8) & 15UL); } /* In [0,16) */
179 381891 : FD_FN_CONST static inline ulong fd_vm_instr_src ( ulong instr ) { return ((instr>>12) & 15UL); } /* In [0,16) */
180 381891 : FD_FN_CONST static inline ulong fd_vm_instr_offset( ulong instr ) { return (ulong)(long)(short)(ushort)(instr>>16); }
181 382008 : FD_FN_CONST static inline uint fd_vm_instr_imm ( ulong instr ) { return (uint)(instr>>32); }
182 :
183 0 : FD_FN_CONST static inline ulong fd_vm_instr_opclass ( ulong instr ) { return instr & 7UL; } /* In [0,8) */
184 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opsrc ( ulong instr ) { return (instr>>3) & 1UL; } /* In [0,2) */
185 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opmode ( ulong instr ) { return (instr>>4) & 15UL; } /* In [0,16) */
186 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opsize ( ulong instr ) { return (instr>>3) & 3UL; } /* In [0,4) */
187 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opaddrmode( ulong instr ) { return (instr>>5) & 7UL; } /* In [0,16) */
188 :
189 : /* fd_vm_mem API ******************************************************/
190 :
191 : /* fd_vm_mem APIs support the fast mapping of virtual address ranges to
192 : host address ranges. Since the SBPF virtual address space consists
193 : of 4 consecutive 4GiB regions and the mapable size of each region is
194 : less than 4 GiB (as implied by FD_VM_MEM_MAP_REGION_SZ==2^32-1 and
195 : that Solana protocol limits are much smaller still), it is impossible
196 : for a valid virtual address range to span multiple regions. */
197 :
198 : /* fd_vm_mem_cfg configures the vm's tlb arrays. Assumes vm is valid
199 : and vm already has configured the rodata, stack, heap and input
200 : regions. Returns vm. */
201 :
202 : static inline fd_vm_t *
203 8034 : fd_vm_mem_cfg( fd_vm_t * vm ) {
204 8034 : vm->region_haddr[0] = 0UL; vm->region_ld_sz[0] = (uint)0UL; vm->region_st_sz[0] = (uint)0UL;
205 8034 : vm->region_haddr[FD_VM_PROG_REGION] = (ulong)vm->rodata; vm->region_ld_sz[FD_VM_PROG_REGION] = (uint)vm->rodata_sz; vm->region_st_sz[FD_VM_PROG_REGION] = (uint)0UL;
206 8034 : vm->region_haddr[FD_VM_STACK_REGION] = (ulong)vm->stack; vm->region_ld_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX; vm->region_st_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX;
207 8034 : vm->region_haddr[FD_VM_HEAP_REGION] = (ulong)vm->heap; vm->region_ld_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max; vm->region_st_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max;
208 8034 : vm->region_haddr[5] = 0UL; vm->region_ld_sz[5] = (uint)0UL; vm->region_st_sz[5] = (uint)0UL;
209 8034 : if( vm->direct_mapping || !vm->input_mem_regions_cnt ) {
210 : /* When direct mapping is enabled, we don't use these fields because
211 : the load and stores are fragmented. */
212 378 : vm->region_haddr[FD_VM_INPUT_REGION] = 0UL;
213 378 : vm->region_ld_sz[FD_VM_INPUT_REGION] = 0U;
214 378 : vm->region_st_sz[FD_VM_INPUT_REGION] = 0U;
215 7656 : } else {
216 7656 : vm->region_haddr[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].haddr;
217 7656 : vm->region_ld_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
218 7656 : vm->region_st_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
219 7656 : }
220 8034 : return vm;
221 8034 : }
222 :
223 : /* fd_vm_mem_haddr translates the vaddr range [vaddr,vaddr+sz) (in
224 : infinite precision math) into the non-wrapping haddr range
225 : [haddr,haddr+sz). On success, returns haddr and every byte in the
226 : haddr range is a valid address. On failure, returns sentinel and
227 : there was at least one byte in the virtual address range that did not
228 : have a corresponding byte in the host address range.
229 :
230 : IMPORTANT SAFETY TIP! When sz==0, the return value currently is
231 : arbitrary. This is often fine as there should be no
232 : actual accesses to a sz==0 region. However, this also means that
233 : testing return for sentinel is insufficient to tell if mapping
234 : failed. That is, assuming sentinel is a location that could never
235 : happen on success:
236 :
237 : sz!=0 and ret!=sentinel -> success
238 : sz!=0 and ret==sentinel -> failure
239 : sz==0 -> ignore ret, application specific handling
240 :
241 : With ~O(2) extra fast branchless instructions, the below could be
242 : tweaked in the sz==0 case to return NULL or return a non-NULL
243 : sentinel value. What is most optimal practically depends on how
244 : empty ranges and NULL vaddr handling is defined in the application.
245 :
246 : Requires ~O(10) fast branchless assembly instructions with 2 L1 cache
247 : hit loads and pretty good ILP.
248 :
249 : fd_vm_mem_haddr_fast is when the vaddr is for use when it is already
250 : known that the vaddr region has a valid mapping.
251 :
252 : These assumptions don't hold if direct mapping is enabled since input
253 : region lookups become O(log(n)). */
254 :
255 :
256 : /* fd_vm_get_input_mem_region_idx returns the index into the input memory
257 : region array with the largest region offset that is <= the offset that
258 : is passed in. This function makes NO guarantees about the input being
259 : a valid input region offset; the caller is responsible for safely handling
260 : it. */
261 : static inline ulong
262 405 : fd_vm_get_input_mem_region_idx( fd_vm_t const * vm, ulong offset ) {
263 405 : uint left = 0U;
264 405 : uint right = vm->input_mem_regions_cnt - 1U;
265 405 : uint mid = 0U;
266 :
267 747 : while( left<right ) {
268 342 : mid = (left+right) / 2U;
269 342 : if( offset>=vm->input_mem_regions[ mid ].vaddr_offset+vm->input_mem_regions[ mid ].region_sz ) {
270 102 : left = mid + 1U;
271 240 : } else {
272 240 : right = mid;
273 240 : }
274 342 : }
275 405 : return left;
276 405 : }
277 :
278 : /* fd_vm_find_input_mem_region returns the translated haddr for a given
279 : offset into the input region. If an offset/sz is invalid or if an
280 : illegal write is performed, the sentinel value is returned. If the offset
281 : provided is too large, it will choose the upper-most region as the
282 : region_idx. However, it will get caught for being too large of an access
283 : in the multi-region checks. */
284 : static inline ulong
285 : fd_vm_find_input_mem_region( fd_vm_t const * vm,
286 : ulong offset,
287 : ulong sz,
288 : uchar write,
289 : ulong sentinel,
290 300 : uchar * is_multi_region ) {
291 300 : if( FD_UNLIKELY( vm->input_mem_regions_cnt==0 ) ) {
292 0 : return sentinel; /* Access is too large */
293 0 : }
294 :
295 : /* Binary search to find the correct memory region. If direct mapping is not
296 : enabled, then there is only 1 memory region which spans the input region. */
297 300 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
298 :
299 300 : ulong bytes_left = sz;
300 300 : ulong bytes_in_cur_region = fd_ulong_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
301 300 : fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
302 :
303 300 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
304 0 : return sentinel; /* Illegal write */
305 0 : }
306 :
307 300 : ulong start_region_idx = region_idx;
308 :
309 300 : *is_multi_region = 0;
310 360 : while( FD_UNLIKELY( bytes_left>bytes_in_cur_region ) ) {
311 114 : *is_multi_region = 1;
312 114 : FD_LOG_DEBUG(( "Size of access spans multiple memory regions" ));
313 114 : bytes_left = fd_ulong_sat_sub( bytes_left, bytes_in_cur_region );
314 :
315 114 : region_idx += 1U;
316 :
317 114 : if( FD_UNLIKELY( region_idx==vm->input_mem_regions_cnt ) ) {
318 54 : return sentinel; /* Access is too large */
319 54 : }
320 60 : bytes_in_cur_region = vm->input_mem_regions[ region_idx ].region_sz;
321 :
322 60 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
323 0 : return sentinel; /* Illegal write */
324 0 : }
325 60 : }
326 :
327 246 : ulong adjusted_haddr = vm->input_mem_regions[ start_region_idx ].haddr + offset - vm->input_mem_regions[ start_region_idx ].vaddr_offset;
328 246 : return adjusted_haddr;
329 300 : }
330 :
331 :
332 : static inline ulong
333 : fd_vm_mem_haddr( fd_vm_t const * vm,
334 : ulong vaddr,
335 : ulong sz,
336 : ulong const * vm_region_haddr, /* indexed [0,6) */
337 : uint const * vm_region_sz, /* indexed [0,6) */
338 : uchar write, /* 1 if the access is a write, 0 if it is a read */
339 : ulong sentinel,
340 684 : uchar * is_multi_region ) {
341 684 : ulong region = FD_VADDR_TO_REGION( vaddr );
342 684 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
343 :
344 : /* Stack memory regions have 4kB unmapped "gaps" in-between each frame, which only exist if...
345 : - direct mapping is enabled (config.enable_stack_frame_gaps == !direct_mapping)
346 : - dynamic stack frames are not enabled (!(SBPF version >= SBPF_V1))
347 : https://github.com/anza-xyz/agave/blob/v2.2.12/programs/bpf_loader/src/lib.rs#L344-L351
348 : */
349 684 : if( FD_UNLIKELY( region==FD_VM_STACK_REGION && !vm->direct_mapping && vm->sbpf_version<FD_SBPF_V1 ) ) {
350 : /* If an access starts in a gap region, that is an access violation */
351 0 : if( FD_UNLIKELY( !!(vaddr & 0x1000) ) ) {
352 0 : return sentinel;
353 0 : }
354 :
355 : /* To account for the fact that we have gaps in the virtual address space but not in the
356 : physical address space, we need to subtract from the offset the size of all the virtual
357 : gap frames underneath it.
358 :
359 : https://github.com/solana-labs/rbpf/blob/b503a1867a9cfa13f93b4d99679a17fe219831de/src/memory_region.rs#L147-L149 */
360 0 : ulong gap_mask = 0xFFFFFFFFFFFFF000;
361 0 : offset = ( ( offset & gap_mask ) >> 1 ) | ( offset & ~gap_mask );
362 0 : }
363 :
364 684 : ulong region_sz = (ulong)vm_region_sz[ region ];
365 684 : ulong sz_max = region_sz - fd_ulong_min( offset, region_sz );
366 :
367 684 : if( region==FD_VM_INPUT_REGION ) {
368 300 : return fd_vm_find_input_mem_region( vm, offset, sz, write, sentinel, is_multi_region );
369 300 : }
370 :
371 : # ifdef FD_VM_INTERP_MEM_TRACING_ENABLED
372 : if ( FD_LIKELY( sz<=sz_max ) ) {
373 : fd_vm_trace_event_mem( vm->trace, write, vaddr, sz, vm_region_haddr[ region ] + offset );
374 : }
375 : # endif
376 384 : return fd_ulong_if( sz<=sz_max, vm_region_haddr[ region ] + offset, sentinel );
377 684 : }
378 :
379 : static inline ulong
380 : fd_vm_mem_haddr_fast( fd_vm_t const * vm,
381 : ulong vaddr,
382 9 : ulong const * vm_region_haddr ) { /* indexed [0,6) */
383 9 : uchar is_multi = 0;
384 9 : ulong region = FD_VADDR_TO_REGION( vaddr );
385 9 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
386 9 : if( FD_UNLIKELY( region==FD_VM_INPUT_REGION ) ) {
387 0 : return fd_vm_find_input_mem_region( vm, offset, 1UL, 0, 0UL, &is_multi );
388 0 : }
389 9 : return vm_region_haddr[ region ] + offset;
390 9 : }
391 :
392 : /* fd_vm_mem_ld_N loads N bytes from the host address location haddr,
393 : zero extends it to a ulong and returns the ulong. haddr need not be
394 : aligned. fd_vm_mem_ld_multi handles the case where the load spans
395 : multiple input memory regions. */
396 :
397 48 : static inline void fd_vm_mem_ld_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * dst ) {
398 :
399 48 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
400 48 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
401 48 : uint bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
402 48 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
403 :
404 264 : while( sz-- ) {
405 216 : if( !bytes_in_cur_region ) {
406 60 : region_idx++;
407 60 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
408 60 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
409 60 : haddr = vm->input_mem_regions[ region_idx ].haddr;
410 60 : }
411 :
412 216 : *dst++ = *(uchar *)haddr++;
413 216 : bytes_in_cur_region--;
414 216 : }
415 48 : }
416 :
417 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_1( ulong haddr ) {
418 54 : return (ulong)*(uchar const *)haddr;
419 54 : }
420 :
421 72 : FD_FN_PURE static inline ulong fd_vm_mem_ld_2( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
422 72 : ushort t;
423 72 : if( FD_LIKELY( !is_multi_region ) ) {
424 60 : memcpy( &t, (void const *)haddr, sizeof(ushort) );
425 60 : } else {
426 12 : fd_vm_mem_ld_multi( vm, 2U, vaddr, haddr, (uchar *)&t );
427 12 : }
428 72 : return (ulong)t;
429 72 : }
430 :
431 84 : FD_FN_PURE static inline ulong fd_vm_mem_ld_4( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
432 84 : uint t;
433 84 : if( FD_LIKELY( !is_multi_region ) ) {
434 60 : memcpy( &t, (void const *)haddr, sizeof(uint) );
435 60 : } else {
436 24 : fd_vm_mem_ld_multi( vm, 4U, vaddr, haddr, (uchar *)&t );
437 24 : }
438 84 : return (ulong)t;
439 84 : }
440 :
441 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_8( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
442 54 : ulong t;
443 54 : if( FD_LIKELY( !is_multi_region ) ) {
444 42 : memcpy( &t, (void const *)haddr, sizeof(ulong) );
445 42 : } else {
446 12 : fd_vm_mem_ld_multi( vm, 8U, vaddr, haddr, (uchar *)&t );
447 12 : }
448 54 : return t;
449 54 : }
450 :
451 : /* fd_vm_mem_st_N stores val in little endian order to the host address
452 : location haddr. haddr need not be aligned. fd_vm_mem_st_multi handles
453 : the case where the store spans multiple input memory regions. */
454 :
455 0 : static inline void fd_vm_mem_st_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * src ) {
456 0 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
457 0 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
458 0 : ulong bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
459 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
460 0 : uchar * dst = (uchar *)haddr;
461 :
462 0 : while( sz-- ) {
463 0 : if( !bytes_in_cur_region ) {
464 0 : region_idx++;
465 0 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
466 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
467 0 : dst = (uchar *)vm->input_mem_regions[ region_idx ].haddr;
468 0 : }
469 :
470 0 : *dst++ = *src++;
471 0 : bytes_in_cur_region--;
472 0 : }
473 0 : }
474 :
475 6 : static inline void fd_vm_mem_st_1( ulong haddr, uchar val ) {
476 6 : *(uchar *)haddr = val;
477 6 : }
478 :
479 : static inline void fd_vm_mem_st_2( fd_vm_t const * vm,
480 : ulong vaddr,
481 : ulong haddr,
482 : ushort val,
483 6 : uint is_multi_region ) {
484 6 : if( FD_LIKELY( !is_multi_region ) ) {
485 6 : memcpy( (void *)haddr, &val, sizeof(ushort) );
486 6 : } else {
487 0 : fd_vm_mem_st_multi( vm, 2U, vaddr, haddr, (uchar *)&val );
488 0 : }
489 6 : }
490 :
491 : static inline void fd_vm_mem_st_4( fd_vm_t const * vm,
492 : ulong vaddr,
493 : ulong haddr,
494 : uint val,
495 6 : uint is_multi_region ) {
496 6 : if( FD_LIKELY( !is_multi_region ) ) {
497 6 : memcpy( (void *)haddr, &val, sizeof(uint) );
498 6 : } else {
499 0 : fd_vm_mem_st_multi( vm, 4U, vaddr, haddr, (uchar *)&val );
500 0 : }
501 6 : }
502 :
503 : static inline void fd_vm_mem_st_8( fd_vm_t const * vm,
504 : ulong vaddr,
505 : ulong haddr,
506 : ulong val,
507 6 : uint is_multi_region ) {
508 6 : if( FD_LIKELY( !is_multi_region ) ) {
509 6 : memcpy( (void *)haddr, &val, sizeof(ulong) );
510 6 : } else {
511 0 : fd_vm_mem_st_multi( vm, 8U, vaddr, haddr, (uchar *)&val );
512 0 : }
513 6 : }
514 :
515 : /* fd_vm_mem_st_try is strictly not required for correctness and in
516 : fact just slows down the performance of the firedancer vm. However,
517 : this emulates the behavior of the agave client, where a store will
518 : be attempted partially until it fails. This is useful for debugging
519 : and fuzzing conformance. */
520 : static inline void fd_vm_mem_st_try( fd_vm_t const * vm,
521 : ulong vaddr,
522 : ulong sz,
523 0 : uchar * val ) {
524 0 : uchar is_multi_region = 0;
525 0 : for( ulong i=0UL; i<sz; i++ ) {
526 0 : ulong haddr = fd_vm_mem_haddr( vm,
527 0 : vaddr+i,
528 0 : sizeof(uchar),
529 0 : vm->region_haddr,
530 0 : vm->region_st_sz,
531 0 : 1,
532 0 : 0UL,
533 0 : &is_multi_region );
534 0 : if( !haddr ) {
535 0 : return;
536 0 : }
537 0 : *(uchar *)haddr = *(val+i);
538 0 : }
539 0 : }
540 :
541 : FD_PROTOTYPES_END
542 :
543 : #endif /* HEADER_fd_src_flamenco_vm_fd_vm_private_h */
|