Line data Source code
1 : #ifndef HEADER_fd_src_flamenco_vm_fd_vm_private_h
2 : #define HEADER_fd_src_flamenco_vm_fd_vm_private_h
3 :
4 : #include "../runtime/tests/fd_dump_pb.h"
5 : #include "fd_vm.h"
6 :
7 : #include "../../ballet/sbpf/fd_sbpf_instr.h"
8 : #include "../../ballet/sbpf/fd_sbpf_opcodes.h"
9 : #include "../../ballet/murmur3/fd_murmur3.h"
10 : #include "../runtime/context/fd_exec_txn_ctx.h"
11 : #include "../features/fd_features.h"
12 : #include "fd_vm_base.h"
13 :
14 : /* FD_VM_ALIGN_RUST_{} define the alignments for relevant rust types.
15 : Alignments are derived with std::mem::align_of::<T>() and are enforced
16 : by the VM (with the exception of v1 loader).
17 :
18 : In our implementation, when calling FD_VM_MEM_HADDR_ST / FD_VM_MEM_HADDR_LD,
19 : we need to make sure we're passing the correct alignment based on the Rust
20 : type in the corresponding mapping in Agave.
21 :
22 : FD_VM_ALIGN_RUST_{} has been generated with this Rust code:
23 : ```rust
24 : pub type Epoch = u64;
25 : pub struct Pubkey(pub [u8; 32]);
26 : pub struct AccountMeta {
27 : pub lamports: u64,
28 : pub rent_epoch: Epoch,
29 : pub owner: Pubkey,
30 : pub executable: bool,
31 : }
32 :
33 : pub struct PodScalar(pub [u8; 32]);
34 :
35 : fn main() {
36 : println!("u8: {}", std::mem::align_of::<u8>());
37 : println!("u32: {}", std::mem::align_of::<u32>());
38 : println!("u64: {}", std::mem::align_of::<u64>());
39 : println!("u128: {}", std::mem::align_of::<u128>());
40 : println!("&[u8]: {}", std::mem::align_of::<&[u8]>());
41 : println!("AccountMeta: {}", std::mem::align_of::<AccountMeta>());
42 : println!("PodScalar: {}", std::mem::align_of::<PodScalar>());
43 : println!("Pubkey: {}", std::mem::align_of::<Pubkey>());
44 : }
45 : ``` */
46 :
47 33 : #define FD_VM_ALIGN_RUST_U8 (1UL)
48 : #define FD_VM_ALIGN_RUST_U32 (4UL)
49 0 : #define FD_VM_ALIGN_RUST_I32 (4UL)
50 : #define FD_VM_ALIGN_RUST_U64 (8UL)
51 : #define FD_VM_ALIGN_RUST_U128 (16UL)
52 : #define FD_VM_ALIGN_RUST_SLICE_U8_REF (8UL)
53 18 : #define FD_VM_ALIGN_RUST_POD_U8_ARRAY (1UL)
54 0 : #define FD_VM_ALIGN_RUST_PUBKEY (1UL)
55 0 : #define FD_VM_ALIGN_RUST_SYSVAR_CLOCK (8UL)
56 0 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_SCHEDULE (8UL)
57 0 : #define FD_VM_ALIGN_RUST_SYSVAR_RENT (8UL)
58 0 : #define FD_VM_ALIGN_RUST_SYSVAR_LAST_RESTART_SLOT (8UL)
59 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_REWARDS (16UL)
60 : #define FD_VM_ALIGN_RUST_STABLE_INSTRUCTION (8UL)
61 :
62 : /* fd_vm_vec_t is the in-memory representation of a vector descriptor.
63 : Equal in layout to the Rust slice header &[_] and various vector
64 : types in the C version of the syscall API. */
65 : /* FIXME: WHEN IS VADDR NULL AND/OR SZ 0 OKAY? */
66 : /* FIXME: MOVE FD_VM_RUST_VEC_T FROM SYSCALL/FD_VM_CPI.H HERE TOO? */
67 :
68 : #define FD_VM_VEC_ALIGN FD_VM_ALIGN_RUST_SLICE_U8_REF
69 : #define FD_VM_VEC_SIZE (16UL)
70 :
71 : struct __attribute__((packed)) fd_vm_vec {
72 : ulong addr; /* FIXME: NAME -> VADDR */
73 : ulong len; /* FIXME: NAME -> SZ */
74 : };
75 :
76 : typedef struct fd_vm_vec fd_vm_vec_t;
77 :
78 : /* SBPF version and features
79 : https://github.com/solana-labs/rbpf/blob/4b2c3dfb02827a0119cd1587eea9e27499712646/src/program.rs#L22
80 :
81 : Note: SIMDs enable or disable features, e.g. BPF instructions.
82 : If we have macros with names ENABLE vs DISABLE, we have the advantage that
83 : the condition is always pretty clear: sbpf_version <= activation_version,
84 : but the disadvantage of inconsistent names.
85 : Viceversa, calling everything ENABLE has the risk to invert a <= with a >=
86 : and create a huge mess.
87 : We define both, so hopefully it's foolproof. */
88 :
89 : #define FD_VM_SBPF_REJECT_RODATA_STACK_OVERLAP(v) ( v != FD_SBPF_V0 )
90 : #define FD_VM_SBPF_ENABLE_ELF_VADDR(v) ( v != FD_SBPF_V0 )
91 : /* SIMD-0166 */
92 805366641 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES(v) ( v >= FD_SBPF_V1 )
93 : /* SIMD-0173 */
94 7890 : #define FD_VM_SBPF_CALLX_USES_SRC_REG(v) ( v >= FD_SBPF_V2 )
95 : #define FD_VM_SBPF_DISABLE_LDDW(v) ( v >= FD_SBPF_V2 )
96 77928 : #define FD_VM_SBPF_ENABLE_LDDW(v) ( v < FD_SBPF_V2 )
97 : #define FD_VM_SBPF_DISABLE_LE(v) ( v >= FD_SBPF_V2 )
98 38964 : #define FD_VM_SBPF_ENABLE_LE(v) ( v < FD_SBPF_V2 )
99 935136 : #define FD_VM_SBPF_MOVE_MEMORY_IX_CLASSES(v) ( v >= FD_SBPF_V2 )
100 : /* SIMD-0174 */
101 1052028 : #define FD_VM_SBPF_ENABLE_PQR(v) ( v >= FD_SBPF_V2 )
102 : #define FD_VM_SBPF_DISABLE_NEG(v) ( v >= FD_SBPF_V2 )
103 38964 : #define FD_VM_SBPF_ENABLE_NEG(v) ( v < FD_SBPF_V2 )
104 62232 : #define FD_VM_SBPF_SWAP_SUB_REG_IMM_OPERANDS(v) ( v >= FD_SBPF_V2 )
105 124464 : #define FD_VM_SBPF_EXPLICIT_SIGN_EXT(v) ( v >= FD_SBPF_V2 )
106 : /* SIMD-0178 + SIMD-0179 */
107 163704 : #define FD_VM_SBPF_STATIC_SYSCALLS(v) ( v >= FD_SBPF_V3 )
108 : /* SIMD-0189 */
109 : #define FD_VM_SBPF_ENABLE_STRICTER_ELF_HEADERS(v) ( v >= FD_SBPF_V3 )
110 : #define FD_VM_SBPF_ENABLE_LOWER_BYTECODE_VADDR(v) ( v >= FD_SBPF_V3 )
111 :
112 12 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES_ALIGN (64U)
113 :
114 912 : #define FD_VM_OFFSET_MASK (0xffffffffUL)
115 :
116 : FD_PROTOTYPES_BEGIN
117 :
118 : /* Error logging handholding assertions */
119 :
120 : #ifdef FD_RUNTIME_ERR_HANDHOLDING
121 : /* Asserts that the error and error kind are populated (non-zero) */
122 : #define FD_VM_TEST_ERR_EXISTS( vm ) \
123 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err ); \
124 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err_kind )
125 :
126 : /* Used prior to a FD_VM_ERR_FOR_LOG_INSTR call to deliberately
127 : bypass overwrite handholding checks.
128 : Only use this if you know what you're doing. */
129 : #define FD_VM_PREPARE_ERR_OVERWRITE( vm ) \
130 : vm->instr_ctx->txn_ctx->exec_err = 0; \
131 : vm->instr_ctx->txn_ctx->exec_err_kind = 0
132 :
133 : /* Asserts that the error and error kind are not populated (zero) */
134 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) \
135 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err ); \
136 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err_kind )
137 : #else
138 0 : #define FD_VM_TEST_ERR_EXISTS( vm ) ( ( void )0 )
139 0 : #define FD_VM_PREPARE_ERR_OVERWRITE( vm ) ( ( void )0 )
140 66 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) ( ( void )0 )
141 : #endif
142 :
143 : /* Log error within the instr_ctx to match Agave/Rust error. */
144 :
145 42 : #define FD_VM_ERR_FOR_LOG_EBPF( vm, err ) (__extension__({ \
146 42 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
147 42 : vm->instr_ctx->txn_ctx->exec_err = err; \
148 42 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_EBPF; \
149 42 : }))
150 :
151 24 : #define FD_VM_ERR_FOR_LOG_SYSCALL( vm, err ) (__extension__({ \
152 24 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
153 24 : vm->instr_ctx->txn_ctx->exec_err = err; \
154 24 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_SYSCALL; \
155 24 : }))
156 :
157 0 : #define FD_VM_ERR_FOR_LOG_INSTR( vm, err ) (__extension__({ \
158 0 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
159 0 : vm->instr_ctx->txn_ctx->exec_err = err; \
160 0 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_INSTR; \
161 0 : }))
162 :
163 822 : #define FD_VADDR_TO_REGION( _vaddr ) fd_ulong_min( (_vaddr) >> FD_VM_MEM_MAP_REGION_VIRT_ADDR_BITS, FD_VM_HIGH_REGION )
164 :
165 : /* fd_vm_instr APIs ***************************************************/
166 :
167 : /* FIXME: MIGRATE FD_SBPF_INSTR_T STUFF TO THIS API */
168 :
169 : /* fd_vm_instr returns the SBPF instruction word corresponding to the
170 : given fields. */
171 :
172 : FD_FN_CONST static inline ulong
173 : fd_vm_instr( ulong opcode, /* Assumed valid */
174 : ulong dst, /* Assumed in [0,FD_VM_REG_CNT) */
175 : ulong src, /* Assumed in [0,FD_VM_REG_CNT) */
176 : short offset,
177 16587 : uint imm ) {
178 16587 : return opcode | (dst<<8) | (src<<12) | (((ulong)(ushort)offset)<<16) | (((ulong)imm)<<32);
179 16587 : }
180 :
181 : /* fd_vm_instr_* return the SBPF instruction field for the given word.
182 : fd_vm_instr_{normal,mem}_* only apply to {normal,mem} opclass
183 : instructions. */
184 :
185 381891 : FD_FN_CONST static inline ulong fd_vm_instr_opcode( ulong instr ) { return instr & 255UL; } /* In [0,256) */
186 381891 : FD_FN_CONST static inline ulong fd_vm_instr_dst ( ulong instr ) { return ((instr>> 8) & 15UL); } /* In [0,16) */
187 381891 : FD_FN_CONST static inline ulong fd_vm_instr_src ( ulong instr ) { return ((instr>>12) & 15UL); } /* In [0,16) */
188 381891 : FD_FN_CONST static inline ulong fd_vm_instr_offset( ulong instr ) { return (ulong)(long)(short)(ushort)(instr>>16); }
189 382008 : FD_FN_CONST static inline uint fd_vm_instr_imm ( ulong instr ) { return (uint)(instr>>32); }
190 :
191 0 : FD_FN_CONST static inline ulong fd_vm_instr_opclass ( ulong instr ) { return instr & 7UL; } /* In [0,8) */
192 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opsrc ( ulong instr ) { return (instr>>3) & 1UL; } /* In [0,2) */
193 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opmode ( ulong instr ) { return (instr>>4) & 15UL; } /* In [0,16) */
194 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opsize ( ulong instr ) { return (instr>>3) & 3UL; } /* In [0,4) */
195 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opaddrmode( ulong instr ) { return (instr>>5) & 7UL; } /* In [0,16) */
196 :
197 : /* fd_vm_mem API ******************************************************/
198 :
199 : /* fd_vm_mem APIs support the fast mapping of virtual address ranges to
200 : host address ranges. Since the SBPF virtual address space consists
201 : of 4 consecutive 4GiB regions and the mapable size of each region is
202 : less than 4 GiB (as implied by FD_VM_MEM_MAP_REGION_SZ==2^32-1 and
203 : that Solana protocol limits are much smaller still), it is impossible
204 : for a valid virtual address range to span multiple regions. */
205 :
206 : /* fd_vm_mem_cfg configures the vm's tlb arrays. Assumes vm is valid
207 : and vm already has configured the rodata, stack, heap and input
208 : regions. Returns vm. */
209 :
210 : static inline fd_vm_t *
211 8034 : fd_vm_mem_cfg( fd_vm_t * vm ) {
212 8034 : vm->region_haddr[0] = 0UL; vm->region_ld_sz[0] = (uint)0UL; vm->region_st_sz[0] = (uint)0UL;
213 8034 : vm->region_haddr[FD_VM_PROG_REGION] = (ulong)vm->rodata; vm->region_ld_sz[FD_VM_PROG_REGION] = (uint)vm->rodata_sz; vm->region_st_sz[FD_VM_PROG_REGION] = (uint)0UL;
214 8034 : vm->region_haddr[FD_VM_STACK_REGION] = (ulong)vm->stack; vm->region_ld_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX; vm->region_st_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX;
215 8034 : vm->region_haddr[FD_VM_HEAP_REGION] = (ulong)vm->heap; vm->region_ld_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max; vm->region_st_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max;
216 8034 : vm->region_haddr[5] = 0UL; vm->region_ld_sz[5] = (uint)0UL; vm->region_st_sz[5] = (uint)0UL;
217 8034 : if( vm->direct_mapping || !vm->input_mem_regions_cnt ) {
218 : /* When direct mapping is enabled, we don't use these fields because
219 : the load and stores are fragmented. */
220 378 : vm->region_haddr[FD_VM_INPUT_REGION] = 0UL;
221 378 : vm->region_ld_sz[FD_VM_INPUT_REGION] = 0U;
222 378 : vm->region_st_sz[FD_VM_INPUT_REGION] = 0U;
223 7656 : } else {
224 7656 : vm->region_haddr[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].haddr;
225 7656 : vm->region_ld_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
226 7656 : vm->region_st_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
227 7656 : }
228 8034 : return vm;
229 8034 : }
230 :
231 : /* Simplified version of Agave's `generate_access_violation()` function
232 : that simply returns either FD_VM_ERR_EBPF_ACCESS_VIOLATION or
233 : FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION. This has no consensus
234 : effects and is purely for logging purposes for fuzzing. Returns
235 : FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION if the provided vaddr is in the
236 : stack (0x200000000) and FD_VM_ERR_EBPF_ACCESS_VIOLATION otherwise.
237 :
238 : https://github.com/anza-xyz/sbpf/blob/v0.11.1/src/memory_region.rs#L834-L869 */
239 : static FD_FN_PURE inline int
240 183 : fd_vm_generate_access_violation( ulong vaddr, ulong sbpf_version ) {
241 : /* rel_offset can be negative because there is an edge case where the
242 : first "frame" right before the stack region should also throw a
243 : stack access violation. */
244 183 : long rel_offset = fd_long_sat_sub( (long)vaddr, (long)FD_VM_MEM_MAP_STACK_REGION_START );
245 183 : long stack_frame = rel_offset / (long)FD_VM_STACK_FRAME_SZ;
246 183 : if( !FD_VM_SBPF_DYNAMIC_STACK_FRAMES( sbpf_version ) &&
247 183 : stack_frame>=-1L && stack_frame<=(long)FD_VM_MAX_CALL_DEPTH ) {
248 0 : return FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION;
249 0 : }
250 183 : return FD_VM_ERR_EBPF_ACCESS_VIOLATION;
251 183 : }
252 :
253 : /* fd_vm_mem_haddr translates the vaddr range [vaddr,vaddr+sz) (in
254 : infinite precision math) into the non-wrapping haddr range
255 : [haddr,haddr+sz). On success, returns haddr and every byte in the
256 : haddr range is a valid address. On failure, returns sentinel and
257 : there was at least one byte in the virtual address range that did not
258 : have a corresponding byte in the host address range.
259 :
260 : IMPORTANT SAFETY TIP! When sz==0, the return value currently is
261 : arbitrary. This is often fine as there should be no
262 : actual accesses to a sz==0 region. However, this also means that
263 : testing return for sentinel is insufficient to tell if mapping
264 : failed. That is, assuming sentinel is a location that could never
265 : happen on success:
266 :
267 : sz!=0 and ret!=sentinel -> success
268 : sz!=0 and ret==sentinel -> failure
269 : sz==0 -> ignore ret, application specific handling
270 :
271 : With ~O(2) extra fast branchless instructions, the below could be
272 : tweaked in the sz==0 case to return NULL or return a non-NULL
273 : sentinel value. What is most optimal practically depends on how
274 : empty ranges and NULL vaddr handling is defined in the application.
275 :
276 : Requires ~O(10) fast branchless assembly instructions with 2 L1 cache
277 : hit loads and pretty good ILP.
278 :
279 : fd_vm_mem_haddr_fast is when the vaddr is for use when it is already
280 : known that the vaddr region has a valid mapping.
281 :
282 : These assumptions don't hold if direct mapping is enabled since input
283 : region lookups become O(log(n)). */
284 :
285 :
286 : /* fd_vm_get_input_mem_region_idx returns the index into the input memory
287 : region array with the largest region offset that is <= the offset that
288 : is passed in. This function makes NO guarantees about the input being
289 : a valid input region offset; the caller is responsible for safely handling
290 : it. */
291 : static inline ulong
292 405 : fd_vm_get_input_mem_region_idx( fd_vm_t const * vm, ulong offset ) {
293 405 : uint left = 0U;
294 405 : uint right = vm->input_mem_regions_cnt - 1U;
295 405 : uint mid = 0U;
296 :
297 747 : while( left<right ) {
298 342 : mid = (left+right) / 2U;
299 342 : if( offset>=vm->input_mem_regions[ mid ].vaddr_offset+vm->input_mem_regions[ mid ].region_sz ) {
300 102 : left = mid + 1U;
301 240 : } else {
302 240 : right = mid;
303 240 : }
304 342 : }
305 405 : return left;
306 405 : }
307 :
308 : /* fd_vm_find_input_mem_region returns the translated haddr for a given
309 : offset into the input region. If an offset/sz is invalid or if an
310 : illegal write is performed, the sentinel value is returned. If the offset
311 : provided is too large, it will choose the upper-most region as the
312 : region_idx. However, it will get caught for being too large of an access
313 : in the multi-region checks. */
314 : static inline ulong
315 : fd_vm_find_input_mem_region( fd_vm_t const * vm,
316 : ulong offset,
317 : ulong sz,
318 : uchar write,
319 : ulong sentinel,
320 300 : uchar * is_multi_region ) {
321 300 : if( FD_UNLIKELY( vm->input_mem_regions_cnt==0 ) ) {
322 0 : return sentinel; /* Access is too large */
323 0 : }
324 :
325 : /* Binary search to find the correct memory region. If direct mapping is not
326 : enabled, then there is only 1 memory region which spans the input region. */
327 300 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
328 :
329 300 : ulong bytes_left = sz;
330 300 : ulong bytes_in_cur_region = fd_ulong_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
331 300 : fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
332 :
333 300 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
334 0 : return sentinel; /* Illegal write */
335 0 : }
336 :
337 300 : ulong start_region_idx = region_idx;
338 :
339 300 : *is_multi_region = 0;
340 360 : while( FD_UNLIKELY( bytes_left>bytes_in_cur_region ) ) {
341 114 : *is_multi_region = 1;
342 114 : FD_LOG_DEBUG(( "Size of access spans multiple memory regions" ));
343 114 : bytes_left = fd_ulong_sat_sub( bytes_left, bytes_in_cur_region );
344 :
345 114 : region_idx += 1U;
346 :
347 114 : if( FD_UNLIKELY( region_idx==vm->input_mem_regions_cnt ) ) {
348 54 : return sentinel; /* Access is too large */
349 54 : }
350 60 : bytes_in_cur_region = vm->input_mem_regions[ region_idx ].region_sz;
351 :
352 60 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
353 0 : return sentinel; /* Illegal write */
354 0 : }
355 60 : }
356 :
357 246 : ulong adjusted_haddr = vm->input_mem_regions[ start_region_idx ].haddr + offset - vm->input_mem_regions[ start_region_idx ].vaddr_offset;
358 246 : return adjusted_haddr;
359 300 : }
360 :
361 :
362 : static inline ulong
363 : fd_vm_mem_haddr( fd_vm_t const * vm,
364 : ulong vaddr,
365 : ulong sz,
366 : ulong const * vm_region_haddr, /* indexed [0,6) */
367 : uint const * vm_region_sz, /* indexed [0,6) */
368 : uchar write, /* 1 if the access is a write, 0 if it is a read */
369 : ulong sentinel,
370 684 : uchar * is_multi_region ) {
371 684 : ulong region = FD_VADDR_TO_REGION( vaddr );
372 684 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
373 :
374 : /* Stack memory regions have 4kB unmapped "gaps" in-between each frame, which only exist if...
375 : - direct mapping is enabled (config.enable_stack_frame_gaps == !direct_mapping)
376 : - dynamic stack frames are not enabled (!(SBPF version >= SBPF_V1))
377 : https://github.com/anza-xyz/agave/blob/v2.2.12/programs/bpf_loader/src/lib.rs#L344-L351
378 : */
379 684 : if( FD_UNLIKELY( region==FD_VM_STACK_REGION &&
380 684 : !vm->direct_mapping &&
381 684 : !FD_VM_SBPF_DYNAMIC_STACK_FRAMES( vm->sbpf_version ) ) ) {
382 : /* If an access starts in a gap region, that is an access violation */
383 0 : if( FD_UNLIKELY( !!(vaddr & 0x1000) ) ) {
384 0 : return sentinel;
385 0 : }
386 :
387 : /* To account for the fact that we have gaps in the virtual address space but not in the
388 : physical address space, we need to subtract from the offset the size of all the virtual
389 : gap frames underneath it.
390 :
391 : https://github.com/solana-labs/rbpf/blob/b503a1867a9cfa13f93b4d99679a17fe219831de/src/memory_region.rs#L147-L149 */
392 0 : ulong gap_mask = 0xFFFFFFFFFFFFF000;
393 0 : offset = ( ( offset & gap_mask ) >> 1 ) | ( offset & ~gap_mask );
394 0 : }
395 :
396 684 : ulong region_sz = (ulong)vm_region_sz[ region ];
397 684 : ulong sz_max = region_sz - fd_ulong_min( offset, region_sz );
398 :
399 684 : if( region==FD_VM_INPUT_REGION ) {
400 300 : return fd_vm_find_input_mem_region( vm, offset, sz, write, sentinel, is_multi_region );
401 300 : }
402 :
403 : # ifdef FD_VM_INTERP_MEM_TRACING_ENABLED
404 : if ( FD_LIKELY( sz<=sz_max ) ) {
405 : fd_vm_trace_event_mem( vm->trace, write, vaddr, sz, vm_region_haddr[ region ] + offset );
406 : }
407 : # endif
408 384 : return fd_ulong_if( sz<=sz_max, vm_region_haddr[ region ] + offset, sentinel );
409 684 : }
410 :
411 : static inline ulong
412 : fd_vm_mem_haddr_fast( fd_vm_t const * vm,
413 : ulong vaddr,
414 9 : ulong const * vm_region_haddr ) { /* indexed [0,6) */
415 9 : uchar is_multi = 0;
416 9 : ulong region = FD_VADDR_TO_REGION( vaddr );
417 9 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
418 9 : if( FD_UNLIKELY( region==FD_VM_INPUT_REGION ) ) {
419 0 : return fd_vm_find_input_mem_region( vm, offset, 1UL, 0, 0UL, &is_multi );
420 0 : }
421 9 : return vm_region_haddr[ region ] + offset;
422 9 : }
423 :
424 : /* fd_vm_mem_ld_N loads N bytes from the host address location haddr,
425 : zero extends it to a ulong and returns the ulong. haddr need not be
426 : aligned. fd_vm_mem_ld_multi handles the case where the load spans
427 : multiple input memory regions. */
428 :
429 48 : static inline void fd_vm_mem_ld_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * dst ) {
430 :
431 48 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
432 48 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
433 48 : uint bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
434 48 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
435 :
436 264 : while( sz-- ) {
437 216 : if( !bytes_in_cur_region ) {
438 60 : region_idx++;
439 60 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
440 60 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
441 60 : haddr = vm->input_mem_regions[ region_idx ].haddr;
442 60 : }
443 :
444 216 : *dst++ = *(uchar *)haddr++;
445 216 : bytes_in_cur_region--;
446 216 : }
447 48 : }
448 :
449 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_1( ulong haddr ) {
450 54 : return (ulong)*(uchar const *)haddr;
451 54 : }
452 :
453 72 : FD_FN_PURE static inline ulong fd_vm_mem_ld_2( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
454 72 : ushort t;
455 72 : if( FD_LIKELY( !is_multi_region ) ) {
456 60 : memcpy( &t, (void const *)haddr, sizeof(ushort) );
457 60 : } else {
458 12 : fd_vm_mem_ld_multi( vm, 2U, vaddr, haddr, (uchar *)&t );
459 12 : }
460 72 : return (ulong)t;
461 72 : }
462 :
463 84 : FD_FN_PURE static inline ulong fd_vm_mem_ld_4( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
464 84 : uint t;
465 84 : if( FD_LIKELY( !is_multi_region ) ) {
466 60 : memcpy( &t, (void const *)haddr, sizeof(uint) );
467 60 : } else {
468 24 : fd_vm_mem_ld_multi( vm, 4U, vaddr, haddr, (uchar *)&t );
469 24 : }
470 84 : return (ulong)t;
471 84 : }
472 :
473 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_8( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
474 54 : ulong t;
475 54 : if( FD_LIKELY( !is_multi_region ) ) {
476 42 : memcpy( &t, (void const *)haddr, sizeof(ulong) );
477 42 : } else {
478 12 : fd_vm_mem_ld_multi( vm, 8U, vaddr, haddr, (uchar *)&t );
479 12 : }
480 54 : return t;
481 54 : }
482 :
483 : /* fd_vm_mem_st_N stores val in little endian order to the host address
484 : location haddr. haddr need not be aligned. fd_vm_mem_st_multi handles
485 : the case where the store spans multiple input memory regions. */
486 :
487 0 : static inline void fd_vm_mem_st_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * src ) {
488 0 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
489 0 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
490 0 : ulong bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
491 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
492 0 : uchar * dst = (uchar *)haddr;
493 :
494 0 : while( sz-- ) {
495 0 : if( !bytes_in_cur_region ) {
496 0 : region_idx++;
497 0 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
498 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
499 0 : dst = (uchar *)vm->input_mem_regions[ region_idx ].haddr;
500 0 : }
501 :
502 0 : *dst++ = *src++;
503 0 : bytes_in_cur_region--;
504 0 : }
505 0 : }
506 :
507 6 : static inline void fd_vm_mem_st_1( ulong haddr, uchar val ) {
508 6 : *(uchar *)haddr = val;
509 6 : }
510 :
511 : static inline void fd_vm_mem_st_2( fd_vm_t const * vm,
512 : ulong vaddr,
513 : ulong haddr,
514 : ushort val,
515 6 : uint is_multi_region ) {
516 6 : if( FD_LIKELY( !is_multi_region ) ) {
517 6 : memcpy( (void *)haddr, &val, sizeof(ushort) );
518 6 : } else {
519 0 : fd_vm_mem_st_multi( vm, 2U, vaddr, haddr, (uchar *)&val );
520 0 : }
521 6 : }
522 :
523 : static inline void fd_vm_mem_st_4( fd_vm_t const * vm,
524 : ulong vaddr,
525 : ulong haddr,
526 : uint val,
527 6 : uint is_multi_region ) {
528 6 : if( FD_LIKELY( !is_multi_region ) ) {
529 6 : memcpy( (void *)haddr, &val, sizeof(uint) );
530 6 : } else {
531 0 : fd_vm_mem_st_multi( vm, 4U, vaddr, haddr, (uchar *)&val );
532 0 : }
533 6 : }
534 :
535 : static inline void fd_vm_mem_st_8( fd_vm_t const * vm,
536 : ulong vaddr,
537 : ulong haddr,
538 : ulong val,
539 6 : uint is_multi_region ) {
540 6 : if( FD_LIKELY( !is_multi_region ) ) {
541 6 : memcpy( (void *)haddr, &val, sizeof(ulong) );
542 6 : } else {
543 0 : fd_vm_mem_st_multi( vm, 8U, vaddr, haddr, (uchar *)&val );
544 0 : }
545 6 : }
546 :
547 : /* fd_vm_mem_st_try is strictly not required for correctness and in
548 : fact just slows down the performance of the firedancer vm. However,
549 : this emulates the behavior of the agave client, where a store will
550 : be attempted partially until it fails. This is useful for debugging
551 : and fuzzing conformance. */
552 : static inline void fd_vm_mem_st_try( fd_vm_t const * vm,
553 : ulong vaddr,
554 : ulong sz,
555 0 : uchar * val ) {
556 0 : uchar is_multi_region = 0;
557 0 : for( ulong i=0UL; i<sz; i++ ) {
558 0 : ulong haddr = fd_vm_mem_haddr( vm,
559 0 : vaddr+i,
560 0 : sizeof(uchar),
561 0 : vm->region_haddr,
562 0 : vm->region_st_sz,
563 0 : 1,
564 0 : 0UL,
565 0 : &is_multi_region );
566 0 : if( !haddr ) {
567 0 : return;
568 0 : }
569 0 : *(uchar *)haddr = *(val+i);
570 0 : }
571 0 : }
572 :
573 : FD_PROTOTYPES_END
574 :
575 : #endif /* HEADER_fd_src_flamenco_vm_fd_vm_private_h */
|