Line data Source code
1 : #ifndef HEADER_fd_src_flamenco_vm_fd_vm_private_h
2 : #define HEADER_fd_src_flamenco_vm_fd_vm_private_h
3 :
4 : #include "fd_vm.h"
5 :
6 : #include "../../ballet/sbpf/fd_sbpf_instr.h"
7 : #include "../../ballet/sbpf/fd_sbpf_opcodes.h"
8 : #include "../../ballet/murmur3/fd_murmur3.h"
9 : #include "../runtime/context/fd_exec_txn_ctx.h"
10 : #include "../runtime/fd_runtime.h"
11 : #include "../features/fd_features.h"
12 : #include "fd_vm_base.h"
13 :
14 : /* FD_VM_ALIGN_RUST_{} define the alignments for relevant rust types.
15 : Alignments are derived with std::mem::align_of::<T>() and are enforced
16 : by the VM (with the exception of v1 loader).
17 :
18 : In our implementation, when calling FD_VM_MEM_HADDR_ST / FD_VM_MEM_HADDR_LD,
19 : we need to make sure we're passing the correct alignment based on the Rust
20 : type in the corresponding mapping in Agave.
21 :
22 : FD_VM_ALIGN_RUST_{} has been generated with this Rust code:
23 : ```rust
24 : pub type Epoch = u64;
25 : pub struct Pubkey(pub [u8; 32]);
26 : pub struct AccountMeta {
27 : pub lamports: u64,
28 : pub rent_epoch: Epoch,
29 : pub owner: Pubkey,
30 : pub executable: bool,
31 : }
32 :
33 : pub struct PodScalar(pub [u8; 32]);
34 :
35 : fn main() {
36 : println!("u8: {}", std::mem::align_of::<u8>());
37 : println!("u32: {}", std::mem::align_of::<u32>());
38 : println!("u64: {}", std::mem::align_of::<u64>());
39 : println!("u128: {}", std::mem::align_of::<u128>());
40 : println!("&[u8]: {}", std::mem::align_of::<&[u8]>());
41 : println!("AccountMeta: {}", std::mem::align_of::<AccountMeta>());
42 : println!("PodScalar: {}", std::mem::align_of::<PodScalar>());
43 : println!("Pubkey: {}", std::mem::align_of::<Pubkey>());
44 : }
45 : ``` */
46 :
47 0 : #define FD_VM_ALIGN_RUST_U8 (1UL)
48 : #define FD_VM_ALIGN_RUST_U32 (4UL)
49 : #define FD_VM_ALIGN_RUST_I32 (4UL)
50 : #define FD_VM_ALIGN_RUST_U64 (8UL)
51 : #define FD_VM_ALIGN_RUST_U128 (16UL)
52 : #define FD_VM_ALIGN_RUST_SLICE_U8_REF (8UL)
53 : #define FD_VM_ALIGN_RUST_POD_U8_ARRAY (1UL)
54 : #define FD_VM_ALIGN_RUST_PUBKEY (1UL)
55 : #define FD_VM_ALIGN_RUST_SYSVAR_CLOCK (8UL)
56 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_SCHEDULE (8UL)
57 : #define FD_VM_ALIGN_RUST_SYSVAR_FEES (8UL)
58 : #define FD_VM_ALIGN_RUST_SYSVAR_RENT (8UL)
59 : #define FD_VM_ALIGN_RUST_SYSVAR_LAST_RESTART_SLOT (8UL)
60 : #define FD_VM_ALIGN_RUST_STABLE_INSTRUCTION (8UL)
61 :
62 : /* fd_vm_vec_t is the in-memory representation of a vector descriptor.
63 : Equal in layout to the Rust slice header &[_] and various vector
64 : types in the C version of the syscall API. */
65 : /* FIXME: WHEN IS VADDR NULL AND/OR SZ 0 OKAY? */
66 : /* FIXME: MOVE FD_VM_RUST_VEC_T FROM SYSCALL/FD_VM_CPI.H HERE TOO? */
67 :
68 : #define FD_VM_VEC_ALIGN FD_VM_ALIGN_RUST_SLICE_U8_REF
69 : #define FD_VM_VEC_SIZE (16UL)
70 :
71 : struct __attribute__((packed)) fd_vm_vec {
72 : ulong addr; /* FIXME: NAME -> VADDR */
73 : ulong len; /* FIXME: NAME -> SZ */
74 : };
75 :
76 : typedef struct fd_vm_vec fd_vm_vec_t;
77 :
78 : /* SBPF version and features
79 : https://github.com/solana-labs/rbpf/blob/4b2c3dfb02827a0119cd1587eea9e27499712646/src/program.rs#L22
80 :
81 : Note: SIMDs enable or disable features, e.g. BPF instructions.
82 : If we have macros with names ENABLE vs DISABLE, we have the advantage that
83 : the condition is always pretty clear: sbpf_version <= activation_version,
84 : but the disadvantage of inconsistent names.
85 : Viceversa, calling everything ENABLE has the risk to invert a <= with a >=
86 : and create a huge mess.
87 : We define both, so hopefully it's foolproof. */
88 :
89 : #define FD_VM_SBPF_REJECT_RODATA_STACK_OVERLAP(v) ( v != FD_SBPF_V0 )
90 : #define FD_VM_SBPF_ENABLE_ELF_VADDR(v) ( v != FD_SBPF_V0 )
91 : /* SIMD-0166 */
92 805332903 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES(v) ( v >= FD_SBPF_V1 )
93 : /* SIMD-0173 */
94 15618 : #define FD_VM_SBPF_CALLX_USES_SRC_REG(v) ( v >= FD_SBPF_V2 )
95 : #define FD_VM_SBPF_DISABLE_LDDW(v) ( v >= FD_SBPF_V2 )
96 31236 : #define FD_VM_SBPF_ENABLE_LDDW(v) ( v < FD_SBPF_V2 )
97 : #define FD_VM_SBPF_DISABLE_LE(v) ( v >= FD_SBPF_V2 )
98 15618 : #define FD_VM_SBPF_ENABLE_LE(v) ( v < FD_SBPF_V2 )
99 374832 : #define FD_VM_SBPF_MOVE_MEMORY_IX_CLASSES(v) ( v >= FD_SBPF_V2 )
100 : /* SIMD-0174 */
101 421686 : #define FD_VM_SBPF_ENABLE_PQR(v) ( v >= FD_SBPF_V2 )
102 : #define FD_VM_SBPF_DISABLE_NEG(v) ( v >= FD_SBPF_V2 )
103 15618 : #define FD_VM_SBPF_ENABLE_NEG(v) ( v < FD_SBPF_V2 )
104 15558 : #define FD_VM_SBPF_SWAP_SUB_REG_IMM_OPERANDS(v) ( v >= FD_SBPF_V2 )
105 31116 : #define FD_VM_SBPF_EXPLICIT_SIGN_EXT(v) ( v >= FD_SBPF_V2 )
106 : /* SIMD-0178 + SIMD-0179 */
107 54693 : #define FD_VM_SBPF_STATIC_SYSCALLS(v) ( v >= FD_SBPF_V3 )
108 : /* SIMD-0189 */
109 : #define FD_VM_SBPF_ENABLE_STRICTER_ELF_HEADERS(v) ( v >= FD_SBPF_V3 )
110 : #define FD_VM_SBPF_ENABLE_LOWER_BYTECODE_VADDR(v) ( v >= FD_SBPF_V3 )
111 :
112 12 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES_ALIGN (64U)
113 :
114 912 : #define FD_VM_OFFSET_MASK (0xffffffffUL)
115 :
116 : static const uint FD_VM_SBPF_STATIC_SYSCALLS_LIST[] = {
117 : 0,
118 : // 1 = abort
119 : 0xb6fc1a11,
120 : // 2 = sol_panic_
121 : 0x686093bb,
122 : // 3 = sol_memcpy_
123 : 0x717cc4a3,
124 : // 4 = sol_memmove_
125 : 0x434371f8,
126 : // 5 = sol_memset_
127 : 0x3770fb22,
128 : // 6 = sol_memcmp_
129 : 0x5fdcde31,
130 : // 7 = sol_log_
131 : 0x207559bd,
132 : // 8 = sol_log_64_
133 : 0x5c2a3178,
134 : // 9 = sol_log_pubkey
135 : 0x7ef088ca,
136 : // 10 = sol_log_compute_units_
137 : 0x52ba5096,
138 : // 11 = sol_alloc_free_
139 : 0x83f00e8f,
140 : // 12 = sol_invoke_signed_c
141 : 0xa22b9c85,
142 : // 13 = sol_invoke_signed_rust
143 : 0xd7449092,
144 : // 14 = sol_set_return_data
145 : 0xa226d3eb,
146 : // 15 = sol_get_return_data
147 : 0x5d2245e4,
148 : // 16 = sol_log_data
149 : 0x7317b434,
150 : // 17 = sol_sha256
151 : 0x11f49d86,
152 : // 18 = sol_keccak256
153 : 0xd7793abb,
154 : // 19 = sol_secp256k1_recover
155 : 0x17e40350,
156 : // 20 = sol_blake3
157 : 0x174c5122,
158 : // 21 = sol_poseidon
159 : 0xc4947c21,
160 : // 22 = sol_get_processed_sibling_instruction
161 : 0xadb8efc8,
162 : // 23 = sol_get_stack_height
163 : 0x85532d94,
164 : // 24 = sol_curve_validate_point
165 : 0xaa2607ca,
166 : // 25 = sol_curve_group_op
167 : 0xdd1c41a6,
168 : // 26 = sol_curve_multiscalar_mul
169 : 0x60a40880,
170 : // 27 = sol_curve_pairing_map
171 : 0xf111a47e,
172 : // 28 = sol_alt_bn128_group_op
173 : 0xae0c318b,
174 : // 29 = sol_alt_bn128_compression
175 : 0x334fd5ed,
176 : // 30 = sol_big_mod_exp
177 : 0x780e4c15,
178 : // 31 = sol_remaining_compute_units
179 : 0xedef5aee,
180 : // 32 = sol_create_program_address
181 : 0x9377323c,
182 : // 33 = sol_try_find_program_address
183 : 0x48504a38,
184 : // 34 = sol_get_sysvar
185 : 0x13c1b505,
186 : // 35 = sol_get_epoch_stake
187 : 0x5be92f4a,
188 : // 36 = sol_get_clock_sysvar
189 : 0xd56b5fe9,
190 : // 37 = sol_get_epoch_schedule_sysvar
191 : 0x23a29a61,
192 : // 38 = sol_get_last_restart_slot
193 : 0x188a0031,
194 : // 39 = sol_get_epoch_rewards_sysvar
195 : 0xfdba2b3b,
196 : // 40 = sol_get_fees_sysvar
197 : 0x3b97b73c,
198 : // 41 = sol_get_rent_sysvar
199 : 0xbf7188f6,
200 : };
201 126 : #define FD_VM_SBPF_STATIC_SYSCALLS_LIST_SZ (sizeof(FD_VM_SBPF_STATIC_SYSCALLS_LIST) / sizeof(uint))
202 :
203 : FD_PROTOTYPES_BEGIN
204 :
205 : /* Error logging handholding assertions */
206 :
207 : #ifdef FD_RUNTIME_ERR_HANDHOLDING
208 : /* Asserts that the error and error kind are populated (non-zero) */
209 : #define FD_VM_TEST_ERR_EXISTS( vm ) \
210 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err ); \
211 : FD_TEST( vm->instr_ctx->txn_ctx->exec_err_kind )
212 :
213 : /* Asserts that the error and error kind are not populated (zero) */
214 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) \
215 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err ); \
216 : FD_TEST( !vm->instr_ctx->txn_ctx->exec_err_kind )
217 : #else
218 0 : #define FD_VM_TEST_ERR_EXISTS( vm ) ( ( void )0 )
219 66 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) ( ( void )0 )
220 : #endif
221 :
222 : /* Log error within the instr_ctx to match Agave/Rust error. */
223 :
224 42 : #define FD_VM_ERR_FOR_LOG_EBPF( vm, err ) (__extension__({ \
225 42 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
226 42 : vm->instr_ctx->txn_ctx->exec_err = err; \
227 42 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_EBPF; \
228 42 : }))
229 :
230 24 : #define FD_VM_ERR_FOR_LOG_SYSCALL( vm, err ) (__extension__({ \
231 24 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
232 24 : vm->instr_ctx->txn_ctx->exec_err = err; \
233 24 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_SYSCALL; \
234 24 : }))
235 :
236 0 : #define FD_VM_ERR_FOR_LOG_INSTR( vm, err ) (__extension__({ \
237 0 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
238 0 : vm->instr_ctx->txn_ctx->exec_err = err; \
239 0 : vm->instr_ctx->txn_ctx->exec_err_kind = FD_EXECUTOR_ERR_KIND_INSTR; \
240 0 : }))
241 :
242 822 : #define FD_VADDR_TO_REGION( _vaddr ) fd_ulong_min( (_vaddr) >> FD_VM_MEM_MAP_REGION_VIRT_ADDR_BITS, FD_VM_HIGH_REGION )
243 :
244 : /* fd_vm_instr APIs ***************************************************/
245 :
246 : /* FIXME: MIGRATE FD_SBPF_INSTR_T STUFF TO THIS API */
247 :
248 : /* fd_vm_instr returns the SBPF instruction word corresponding to the
249 : given fields. */
250 :
251 : FD_FN_CONST static inline ulong
252 : fd_vm_instr( ulong opcode, /* Assumed valid */
253 : ulong dst, /* Assumed in [0,FD_VM_REG_CNT) */
254 : ulong src, /* Assumed in [0,FD_VM_REG_CNT) */
255 : short offset,
256 15957 : uint imm ) {
257 15957 : return opcode | (dst<<8) | (src<<12) | (((ulong)(ushort)offset)<<16) | (((ulong)imm)<<32);
258 15957 : }
259 :
260 : /* fd_vm_instr_* return the SBPF instruction field for the given word.
261 : fd_vm_instr_{normal,mem}_* only apply to {normal,mem} opclass
262 : instructions. */
263 :
264 381891 : FD_FN_CONST static inline ulong fd_vm_instr_opcode( ulong instr ) { return instr & 255UL; } /* In [0,256) */
265 381891 : FD_FN_CONST static inline ulong fd_vm_instr_dst ( ulong instr ) { return ((instr>> 8) & 15UL); } /* In [0,16) */
266 381891 : FD_FN_CONST static inline ulong fd_vm_instr_src ( ulong instr ) { return ((instr>>12) & 15UL); } /* In [0,16) */
267 381891 : FD_FN_CONST static inline ulong fd_vm_instr_offset( ulong instr ) { return (ulong)(long)(short)(ushort)(instr>>16); }
268 382008 : FD_FN_CONST static inline uint fd_vm_instr_imm ( ulong instr ) { return (uint)(instr>>32); }
269 :
270 0 : FD_FN_CONST static inline ulong fd_vm_instr_opclass ( ulong instr ) { return instr & 7UL; } /* In [0,8) */
271 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opsrc ( ulong instr ) { return (instr>>3) & 1UL; } /* In [0,2) */
272 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opmode ( ulong instr ) { return (instr>>4) & 15UL; } /* In [0,16) */
273 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opsize ( ulong instr ) { return (instr>>3) & 3UL; } /* In [0,4) */
274 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opaddrmode( ulong instr ) { return (instr>>5) & 7UL; } /* In [0,16) */
275 :
276 : /* fd_vm_mem API ******************************************************/
277 :
278 : /* fd_vm_mem APIs support the fast mapping of virtual address ranges to
279 : host address ranges. Since the SBPF virtual address space consists
280 : of 4 consecutive 4GiB regions and the mapable size of each region is
281 : less than 4 GiB (as implied by FD_VM_MEM_MAP_REGION_SZ==2^32-1 and
282 : that Solana protocol limits are much smaller still), it is impossible
283 : for a valid virtual address range to span multiple regions. */
284 :
285 : /* fd_vm_mem_cfg configures the vm's tlb arrays. Assumes vm is valid
286 : and vm already has configured the rodata, stack, heap and input
287 : regions. Returns vm. */
288 :
289 : static inline fd_vm_t *
290 8025 : fd_vm_mem_cfg( fd_vm_t * vm ) {
291 8025 : vm->region_haddr[0] = 0UL; vm->region_ld_sz[0] = (uint)0UL; vm->region_st_sz[0] = (uint)0UL;
292 8025 : vm->region_haddr[FD_VM_PROG_REGION] = (ulong)vm->rodata; vm->region_ld_sz[FD_VM_PROG_REGION] = (uint)vm->rodata_sz; vm->region_st_sz[FD_VM_PROG_REGION] = (uint)0UL;
293 8025 : vm->region_haddr[FD_VM_STACK_REGION] = (ulong)vm->stack; vm->region_ld_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX; vm->region_st_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX;
294 8025 : vm->region_haddr[FD_VM_HEAP_REGION] = (ulong)vm->heap; vm->region_ld_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max; vm->region_st_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max;
295 8025 : vm->region_haddr[5] = 0UL; vm->region_ld_sz[5] = (uint)0UL; vm->region_st_sz[5] = (uint)0UL;
296 8025 : if( FD_FEATURE_ACTIVE( vm->instr_ctx->txn_ctx->slot, vm->instr_ctx->txn_ctx->features, bpf_account_data_direct_mapping ) || !vm->input_mem_regions_cnt ) {
297 : /* When direct mapping is enabled, we don't use these fields because
298 : the load and stores are fragmented. */
299 369 : vm->region_haddr[FD_VM_INPUT_REGION] = 0UL;
300 369 : vm->region_ld_sz[FD_VM_INPUT_REGION] = 0U;
301 369 : vm->region_st_sz[FD_VM_INPUT_REGION] = 0U;
302 7656 : } else {
303 7656 : vm->region_haddr[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].haddr;
304 7656 : vm->region_ld_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
305 7656 : vm->region_st_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
306 7656 : }
307 8025 : return vm;
308 8025 : }
309 :
310 : /* fd_vm_mem_haddr translates the vaddr range [vaddr,vaddr+sz) (in
311 : infinite precision math) into the non-wrapping haddr range
312 : [haddr,haddr+sz). On success, returns haddr and every byte in the
313 : haddr range is a valid address. On failure, returns sentinel and
314 : there was at least one byte in the virtual address range that did not
315 : have a corresponding byte in the host address range.
316 :
317 : IMPORTANT SAFETY TIP! When sz==0, the return value currently is
318 : arbitrary. This is often fine as there should be no
319 : actual accesses to a sz==0 region. However, this also means that
320 : testing return for sentinel is insufficient to tell if mapping
321 : failed. That is, assuming sentinel is a location that could never
322 : happen on success:
323 :
324 : sz!=0 and ret!=sentinel -> success
325 : sz!=0 and ret==sentinel -> failure
326 : sz==0 -> ignore ret, application specific handling
327 :
328 : With ~O(2) extra fast branchless instructions, the below could be
329 : tweaked in the sz==0 case to return NULL or return a non-NULL
330 : sentinel value. What is most optimal practically depends on how
331 : empty ranges and NULL vaddr handling is defined in the application.
332 :
333 : Requires ~O(10) fast branchless assembly instructions with 2 L1 cache
334 : hit loads and pretty good ILP.
335 :
336 : fd_vm_mem_haddr_fast is when the vaddr is for use when it is already
337 : known that the vaddr region has a valid mapping.
338 :
339 : These assumptions don't hold if direct mapping is enabled since input
340 : region lookups become O(log(n)). */
341 :
342 :
343 : /* fd_vm_get_input_mem_region_idx returns the index into the input memory
344 : region array with the largest region offset that is <= the offset that
345 : is passed in. This function makes NO guarantees about the input being
346 : a valid input region offset; the caller is responsible for safely handling
347 : it. */
348 : static inline ulong
349 405 : fd_vm_get_input_mem_region_idx( fd_vm_t const * vm, ulong offset ) {
350 405 : uint left = 0U;
351 405 : uint right = vm->input_mem_regions_cnt - 1U;
352 405 : uint mid = 0U;
353 :
354 747 : while( left<right ) {
355 342 : mid = (left+right) / 2U;
356 342 : if( offset>=vm->input_mem_regions[ mid ].vaddr_offset+vm->input_mem_regions[ mid ].region_sz ) {
357 102 : left = mid + 1U;
358 240 : } else {
359 240 : right = mid;
360 240 : }
361 342 : }
362 405 : return left;
363 405 : }
364 :
365 : /* fd_vm_find_input_mem_region returns the translated haddr for a given
366 : offset into the input region. If an offset/sz is invalid or if an
367 : illegal write is performed, the sentinel value is returned. If the offset
368 : provided is too large, it will choose the upper-most region as the
369 : region_idx. However, it will get caught for being too large of an access
370 : in the multi-region checks. */
371 : static inline ulong
372 : fd_vm_find_input_mem_region( fd_vm_t const * vm,
373 : ulong offset,
374 : ulong sz,
375 : uchar write,
376 : ulong sentinel,
377 300 : uchar * is_multi_region ) {
378 300 : if( FD_UNLIKELY( vm->input_mem_regions_cnt==0 ) ) {
379 0 : return sentinel; /* Access is too large */
380 0 : }
381 :
382 : /* Binary search to find the correct memory region. If direct mapping is not
383 : enabled, then there is only 1 memory region which spans the input region. */
384 300 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
385 :
386 300 : ulong bytes_left = sz;
387 300 : ulong bytes_in_cur_region = fd_ulong_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
388 300 : fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
389 :
390 300 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
391 0 : return sentinel; /* Illegal write */
392 0 : }
393 :
394 300 : ulong start_region_idx = region_idx;
395 :
396 300 : *is_multi_region = 0;
397 360 : while( FD_UNLIKELY( bytes_left>bytes_in_cur_region ) ) {
398 114 : *is_multi_region = 1;
399 114 : FD_LOG_DEBUG(( "Size of access spans multiple memory regions" ));
400 114 : bytes_left = fd_ulong_sat_sub( bytes_left, bytes_in_cur_region );
401 :
402 114 : region_idx += 1U;
403 :
404 114 : if( FD_UNLIKELY( region_idx==vm->input_mem_regions_cnt ) ) {
405 54 : return sentinel; /* Access is too large */
406 54 : }
407 60 : bytes_in_cur_region = vm->input_mem_regions[ region_idx ].region_sz;
408 :
409 60 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
410 0 : return sentinel; /* Illegal write */
411 0 : }
412 60 : }
413 :
414 246 : ulong adjusted_haddr = vm->input_mem_regions[ start_region_idx ].haddr + offset - vm->input_mem_regions[ start_region_idx ].vaddr_offset;
415 246 : return adjusted_haddr;
416 300 : }
417 :
418 :
419 : static inline ulong
420 : fd_vm_mem_haddr( fd_vm_t const * vm,
421 : ulong vaddr,
422 : ulong sz,
423 : ulong const * vm_region_haddr, /* indexed [0,6) */
424 : uint const * vm_region_sz, /* indexed [0,6) */
425 : uchar write, /* 1 if the access is a write, 0 if it is a read */
426 : ulong sentinel,
427 684 : uchar * is_multi_region ) {
428 684 : ulong region = FD_VADDR_TO_REGION( vaddr );
429 684 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
430 :
431 : /* Stack memory regions have 4kB unmapped "gaps" in-between each frame (only if direct mapping is disabled).
432 : https://github.com/solana-labs/rbpf/blob/b503a1867a9cfa13f93b4d99679a17fe219831de/src/memory_region.rs#L141
433 : */
434 684 : if( FD_UNLIKELY( region==FD_VM_STACK_REGION && !vm->direct_mapping ) ) {
435 : /* If an access starts in a gap region, that is an access violation */
436 0 : if( !!(vaddr & 0x1000) ) {
437 0 : return sentinel;
438 0 : }
439 :
440 : /* To account for the fact that we have gaps in the virtual address space but not in the
441 : physical address space, we need to subtract from the offset the size of all the virtual
442 : gap frames underneath it.
443 :
444 : https://github.com/solana-labs/rbpf/blob/b503a1867a9cfa13f93b4d99679a17fe219831de/src/memory_region.rs#L147-L149 */
445 0 : ulong gap_mask = 0xFFFFFFFFFFFFF000;
446 0 : offset = ( ( offset & gap_mask ) >> 1 ) | ( offset & ~gap_mask );
447 0 : }
448 :
449 684 : ulong region_sz = (ulong)vm_region_sz[ region ];
450 684 : ulong sz_max = region_sz - fd_ulong_min( offset, region_sz );
451 :
452 684 : if( region==FD_VM_INPUT_REGION ) {
453 300 : return fd_vm_find_input_mem_region( vm, offset, sz, write, sentinel, is_multi_region );
454 300 : }
455 :
456 : # ifdef FD_VM_INTERP_MEM_TRACING_ENABLED
457 : if ( FD_LIKELY( sz<=sz_max ) ) {
458 : fd_vm_trace_event_mem( vm->trace, write, vaddr, sz, vm_region_haddr[ region ] + offset );
459 : }
460 : # endif
461 384 : return fd_ulong_if( sz<=sz_max, vm_region_haddr[ region ] + offset, sentinel );
462 684 : }
463 :
464 : static inline ulong
465 : fd_vm_mem_haddr_fast( fd_vm_t const * vm,
466 : ulong vaddr,
467 9 : ulong const * vm_region_haddr ) { /* indexed [0,6) */
468 9 : uchar is_multi = 0;
469 9 : ulong region = FD_VADDR_TO_REGION( vaddr );
470 9 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
471 9 : if( FD_UNLIKELY( region==FD_VM_INPUT_REGION ) ) {
472 0 : return fd_vm_find_input_mem_region( vm, offset, 1UL, 0, 0UL, &is_multi );
473 0 : }
474 9 : return vm_region_haddr[ region ] + offset;
475 9 : }
476 :
477 : /* fd_vm_mem_ld_N loads N bytes from the host address location haddr,
478 : zero extends it to a ulong and returns the ulong. haddr need not be
479 : aligned. fd_vm_mem_ld_multi handles the case where the load spans
480 : multiple input memory regions. */
481 :
482 48 : static inline void fd_vm_mem_ld_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * dst ) {
483 :
484 48 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
485 48 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
486 48 : uint bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
487 48 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
488 :
489 264 : while( sz-- ) {
490 216 : if( !bytes_in_cur_region ) {
491 60 : region_idx++;
492 60 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
493 60 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
494 60 : haddr = vm->input_mem_regions[ region_idx ].haddr;
495 60 : }
496 :
497 216 : *dst++ = *(uchar *)haddr++;
498 216 : bytes_in_cur_region--;
499 216 : }
500 48 : }
501 :
502 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_1( ulong haddr ) {
503 54 : return (ulong)*(uchar const *)haddr;
504 54 : }
505 :
506 72 : FD_FN_PURE static inline ulong fd_vm_mem_ld_2( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
507 72 : ushort t;
508 72 : if( FD_LIKELY( !is_multi_region ) ) {
509 60 : memcpy( &t, (void const *)haddr, sizeof(ushort) );
510 60 : } else {
511 12 : fd_vm_mem_ld_multi( vm, 2U, vaddr, haddr, (uchar *)&t );
512 12 : }
513 72 : return (ulong)t;
514 72 : }
515 :
516 84 : FD_FN_PURE static inline ulong fd_vm_mem_ld_4( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
517 84 : uint t;
518 84 : if( FD_LIKELY( !is_multi_region ) ) {
519 60 : memcpy( &t, (void const *)haddr, sizeof(uint) );
520 60 : } else {
521 24 : fd_vm_mem_ld_multi( vm, 4U, vaddr, haddr, (uchar *)&t );
522 24 : }
523 84 : return (ulong)t;
524 84 : }
525 :
526 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_8( fd_vm_t const * vm, ulong vaddr, ulong haddr, uint is_multi_region ) {
527 54 : ulong t;
528 54 : if( FD_LIKELY( !is_multi_region ) ) {
529 42 : memcpy( &t, (void const *)haddr, sizeof(ulong) );
530 42 : } else {
531 12 : fd_vm_mem_ld_multi( vm, 8U, vaddr, haddr, (uchar *)&t );
532 12 : }
533 54 : return t;
534 54 : }
535 :
536 : /* fd_vm_mem_st_N stores val in little endian order to the host address
537 : location haddr. haddr need not be aligned. fd_vm_mem_st_multi handles
538 : the case where the store spans multiple input memory regions. */
539 :
540 0 : static inline void fd_vm_mem_st_multi( fd_vm_t const * vm, uint sz, ulong vaddr, ulong haddr, uchar * src ) {
541 0 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
542 0 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
543 0 : ulong bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
544 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
545 0 : uchar * dst = (uchar *)haddr;
546 :
547 0 : while( sz-- ) {
548 0 : if( !bytes_in_cur_region ) {
549 0 : region_idx++;
550 0 : bytes_in_cur_region = fd_uint_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
551 0 : (uint)fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
552 0 : dst = (uchar *)vm->input_mem_regions[ region_idx ].haddr;
553 0 : }
554 :
555 0 : *dst++ = *src++;
556 0 : bytes_in_cur_region--;
557 0 : }
558 0 : }
559 :
560 6 : static inline void fd_vm_mem_st_1( ulong haddr, uchar val ) {
561 6 : *(uchar *)haddr = val;
562 6 : }
563 :
564 : static inline void fd_vm_mem_st_2( fd_vm_t const * vm,
565 : ulong vaddr,
566 : ulong haddr,
567 : ushort val,
568 6 : uint is_multi_region ) {
569 6 : if( FD_LIKELY( !is_multi_region ) ) {
570 6 : memcpy( (void *)haddr, &val, sizeof(ushort) );
571 6 : } else {
572 0 : fd_vm_mem_st_multi( vm, 2U, vaddr, haddr, (uchar *)&val );
573 0 : }
574 6 : }
575 :
576 : static inline void fd_vm_mem_st_4( fd_vm_t const * vm,
577 : ulong vaddr,
578 : ulong haddr,
579 : uint val,
580 6 : uint is_multi_region ) {
581 6 : if( FD_LIKELY( !is_multi_region ) ) {
582 6 : memcpy( (void *)haddr, &val, sizeof(uint) );
583 6 : } else {
584 0 : fd_vm_mem_st_multi( vm, 4U, vaddr, haddr, (uchar *)&val );
585 0 : }
586 6 : }
587 :
588 : static inline void fd_vm_mem_st_8( fd_vm_t const * vm,
589 : ulong vaddr,
590 : ulong haddr,
591 : ulong val,
592 6 : uint is_multi_region ) {
593 6 : if( FD_LIKELY( !is_multi_region ) ) {
594 6 : memcpy( (void *)haddr, &val, sizeof(ulong) );
595 6 : } else {
596 0 : fd_vm_mem_st_multi( vm, 8U, vaddr, haddr, (uchar *)&val );
597 0 : }
598 6 : }
599 :
600 : /* fd_vm_mem_st_try is strictly not required for correctness and in
601 : fact just slows down the performance of the firedancer vm. However,
602 : this emulates the behavior of the agave client, where a store will
603 : be attempted partially until it fails. This is useful for debugging
604 : and fuzzing conformance. */
605 : static inline void fd_vm_mem_st_try( fd_vm_t const * vm,
606 : ulong vaddr,
607 : ulong sz,
608 0 : uchar * val ) {
609 0 : uchar is_multi_region = 0;
610 0 : for( ulong i=0UL; i<sz; i++ ) {
611 0 : ulong haddr = fd_vm_mem_haddr( vm,
612 0 : vaddr+i,
613 0 : sizeof(uchar),
614 0 : vm->region_haddr,
615 0 : vm->region_st_sz,
616 0 : 1,
617 0 : 0UL,
618 0 : &is_multi_region );
619 0 : if( !haddr ) {
620 0 : return;
621 0 : }
622 0 : *(uchar *)haddr = *(val+i);
623 0 : }
624 0 : }
625 :
626 : FD_PROTOTYPES_END
627 :
628 : #endif /* HEADER_fd_src_flamenco_vm_fd_vm_private_h */
|