Line data Source code
1 : #ifndef HEADER_fd_src_flamenco_vm_fd_vm_private_h
2 : #define HEADER_fd_src_flamenco_vm_fd_vm_private_h
3 :
4 : #include "fd_vm.h"
5 :
6 : #include "../runtime/fd_runtime_const.h"
7 : #include "../runtime/fd_runtime.h"
8 : #include "fd_vm_base.h"
9 :
10 : /* FD_VM_ALIGN_RUST_{} define the alignments for relevant rust types.
11 : Alignments are derived with std::mem::align_of::<T>() and are enforced
12 : by the VM (with the exception of v1 loader).
13 :
14 : In our implementation, when calling FD_VM_MEM_HADDR_ST / FD_VM_MEM_HADDR_LD,
15 : we need to make sure we're passing the correct alignment based on the Rust
16 : type in the corresponding mapping in Agave.
17 :
18 : FD_VM_ALIGN_RUST_{} has been generated with this Rust code:
19 : ```rust
20 : pub type Epoch = u64;
21 : pub struct Pubkey(pub [u8; 32]);
22 : pub struct AccountMeta {
23 : pub lamports: u64,
24 : pub rent_epoch: Epoch,
25 : pub owner: Pubkey,
26 : pub executable: bool,
27 : }
28 :
29 : pub struct PodScalar(pub [u8; 32]);
30 :
31 : fn main() {
32 : println!("u8: {}", std::mem::align_of::<u8>());
33 : println!("u32: {}", std::mem::align_of::<u32>());
34 : println!("u64: {}", std::mem::align_of::<u64>());
35 : println!("u128: {}", std::mem::align_of::<u128>());
36 : println!("&[u8]: {}", std::mem::align_of::<&[u8]>());
37 : println!("AccountMeta: {}", std::mem::align_of::<AccountMeta>());
38 : println!("PodScalar: {}", std::mem::align_of::<PodScalar>());
39 : println!("Pubkey: {}", std::mem::align_of::<Pubkey>());
40 : }
41 : ``` */
42 :
43 93 : #define FD_VM_ALIGN_RUST_U8 (1UL)
44 : #define FD_VM_ALIGN_RUST_U32 (4UL)
45 15 : #define FD_VM_ALIGN_RUST_I32 (4UL)
46 : #define FD_VM_ALIGN_RUST_U64 (8UL)
47 : #define FD_VM_ALIGN_RUST_U128 (16UL)
48 : #define FD_VM_ALIGN_RUST_SLICE_U8_REF (8UL)
49 21 : #define FD_VM_ALIGN_RUST_POD_U8_ARRAY (1UL)
50 0 : #define FD_VM_ALIGN_RUST_PUBKEY (1UL)
51 0 : #define FD_VM_ALIGN_RUST_SYSVAR_CLOCK (8UL)
52 0 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_SCHEDULE (8UL)
53 0 : #define FD_VM_ALIGN_RUST_SYSVAR_RENT (8UL)
54 0 : #define FD_VM_ALIGN_RUST_SYSVAR_LAST_RESTART_SLOT (8UL)
55 : #define FD_VM_ALIGN_RUST_SYSVAR_EPOCH_REWARDS (16UL)
56 : #define FD_VM_ALIGN_RUST_STABLE_INSTRUCTION (8UL)
57 :
58 : /* fd_vm_vec_t is the in-memory representation of a vector descriptor.
59 : Equal in layout to the Rust slice header &[_] and various vector
60 : types in the C version of the syscall API. */
61 : /* FIXME: WHEN IS VADDR NULL AND/OR SZ 0 OKAY? */
62 : /* FIXME: MOVE FD_VM_RUST_VEC_T FROM SYSCALL/FD_VM_CPI.H HERE TOO? */
63 :
64 : #define FD_VM_VEC_ALIGN FD_VM_ALIGN_RUST_SLICE_U8_REF
65 : #define FD_VM_VEC_SIZE (16UL)
66 :
67 : struct __attribute__((packed)) fd_vm_vec {
68 : ulong addr; /* FIXME: NAME -> VADDR */
69 : ulong len; /* FIXME: NAME -> SZ */
70 : };
71 :
72 : typedef struct fd_vm_vec fd_vm_vec_t;
73 :
74 : FD_STATIC_ASSERT( sizeof(fd_vm_vec_t)==FD_VM_VEC_SIZE, fd_vm_vec size mismatch );
75 :
76 : /* SBPF version and features
77 : https://github.com/anza-xyz/sbpf/blob/v0.12.2/src/program.rs#L28
78 : Note: SIMDs enable or disable features, e.g. BPF instructions.
79 : If we have macros with names ENABLE vs DISABLE, we have the advantage that
80 : the condition is always pretty clear: sbpf_version <= activation_version,
81 : but the disadvantage of inconsistent names.
82 : Viceversa, calling everything ENABLE has the risk to invert a <= with a >=
83 : and create a huge mess.
84 : We define both, so hopefully it's foolproof. */
85 :
86 : #define FD_VM_SBPF_REJECT_RODATA_STACK_OVERLAP(v) ( v != FD_SBPF_V0 )
87 : #define FD_VM_SBPF_ENABLE_ELF_VADDR(v) ( v != FD_SBPF_V0 )
88 : /* SIMD-0166 */
89 805500573 : #define FD_VM_SBPF_DYNAMIC_STACK_FRAMES(v) ( v >= FD_SBPF_V1 )
90 : /* SIMD-0173 */
91 8328 : #define FD_VM_SBPF_CALLX_USES_SRC_REG(v) ( v >= FD_SBPF_V2 )
92 : #define FD_VM_SBPF_DISABLE_LDDW(v) ( v >= FD_SBPF_V2 )
93 82440 : #define FD_VM_SBPF_ENABLE_LDDW(v) ( v < FD_SBPF_V2 )
94 : #define FD_VM_SBPF_DISABLE_LE(v) ( v >= FD_SBPF_V2 )
95 41220 : #define FD_VM_SBPF_ENABLE_LE(v) ( v < FD_SBPF_V2 )
96 989280 : #define FD_VM_SBPF_MOVE_MEMORY_IX_CLASSES(v) ( v >= FD_SBPF_V2 )
97 : /* SIMD-0174 */
98 1112940 : #define FD_VM_SBPF_ENABLE_PQR(v) ( v >= FD_SBPF_V2 )
99 : #define FD_VM_SBPF_DISABLE_NEG(v) ( v >= FD_SBPF_V2 )
100 41220 : #define FD_VM_SBPF_ENABLE_NEG(v) ( v < FD_SBPF_V2 )
101 65784 : #define FD_VM_SBPF_SWAP_SUB_REG_IMM_OPERANDS(v) ( v >= FD_SBPF_V2 )
102 131568 : #define FD_VM_SBPF_EXPLICIT_SIGN_EXT(v) ( v >= FD_SBPF_V2 )
103 :
104 3642 : #define FD_VM_OFFSET_MASK (0xffffffffUL)
105 :
106 : /* https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L32 */
107 0 : #define FD_MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION ((long)(FD_RUNTIME_ACC_SZ_MAX * 2UL))
108 :
109 : FD_PROTOTYPES_BEGIN
110 :
111 : /* Error logging handholding assertions */
112 :
113 : #ifdef FD_RUNTIME_ERR_HANDHOLDING
114 : /* Asserts that the error and error kind are populated (non-zero) */
115 : #define FD_VM_TEST_ERR_EXISTS( vm ) \
116 : FD_TEST( vm->instr_ctx->txn_out->err.exec_err ); \
117 : FD_TEST( vm->instr_ctx->txn_out->err.exec_err_kind )
118 :
119 : /* Used prior to a FD_VM_ERR_FOR_LOG_INSTR call to deliberately
120 : bypass overwrite handholding checks.
121 : Only use this if you know what you're doing. */
122 : #define FD_VM_PREPARE_ERR_OVERWRITE( vm ) \
123 : vm->instr_ctx->txn_out->err.exec_err = 0; \
124 : vm->instr_ctx->txn_out->err.exec_err_kind = 0
125 :
126 : /* Asserts that the error and error kind are not populated (zero) */
127 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) \
128 : FD_TEST( !vm->instr_ctx->txn_out->err.exec_err ); \
129 : FD_TEST( !vm->instr_ctx->txn_out->err.exec_err_kind )
130 : #else
131 0 : #define FD_VM_TEST_ERR_EXISTS( vm ) ( ( void )0 )
132 0 : #define FD_VM_PREPARE_ERR_OVERWRITE( vm ) ( ( void )0 )
133 96 : #define FD_VM_TEST_ERR_OVERWRITE( vm ) ( ( void )0 )
134 : #endif
135 :
136 : /* Log error within the instr_ctx to match Agave/Rust error. */
137 :
138 57 : #define FD_VM_ERR_FOR_LOG_EBPF( vm, err_ ) (__extension__({ \
139 57 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
140 57 : vm->instr_ctx->txn_out->err.exec_err = err_; \
141 57 : vm->instr_ctx->txn_out->err.exec_err_kind = FD_EXECUTOR_ERR_KIND_EBPF; \
142 57 : }))
143 :
144 36 : #define FD_VM_ERR_FOR_LOG_SYSCALL( vm, err_ ) (__extension__({ \
145 36 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
146 36 : vm->instr_ctx->txn_out->err.exec_err = err_; \
147 36 : vm->instr_ctx->txn_out->err.exec_err_kind = FD_EXECUTOR_ERR_KIND_SYSCALL; \
148 36 : }))
149 :
150 3 : #define FD_VM_ERR_FOR_LOG_INSTR( vm, err_ ) (__extension__({ \
151 3 : FD_VM_TEST_ERR_OVERWRITE( vm ); \
152 3 : vm->instr_ctx->txn_out->err.exec_err = err_; \
153 3 : vm->instr_ctx->txn_out->err.exec_err_kind = FD_EXECUTOR_ERR_KIND_INSTR; \
154 3 : }))
155 :
156 3600 : #define FD_VADDR_TO_REGION( _vaddr ) fd_ulong_min( (_vaddr) >> FD_VM_MEM_MAP_REGION_VIRT_ADDR_BITS, FD_VM_HIGH_REGION )
157 :
158 : /* fd_vm_instr APIs ***************************************************/
159 :
160 : /* FIXME: MIGRATE FD_SBPF_INSTR_T STUFF TO THIS API */
161 :
162 : /* fd_vm_instr returns the SBPF instruction word corresponding to the
163 : given fields. */
164 :
165 : FD_FN_CONST static inline ulong
166 : fd_vm_instr( ulong opcode, /* Assumed valid */
167 : ulong dst, /* Assumed in [0,FD_VM_REG_CNT) */
168 : ulong src, /* Assumed in [0,FD_VM_REG_CNT) */
169 : short offset,
170 17463 : uint imm ) {
171 17463 : return opcode | (dst<<8) | (src<<12) | (((ulong)(ushort)offset)<<16) | (((ulong)imm)<<32);
172 17463 : }
173 :
174 : /* fd_vm_instr_* return the SBPF instruction field for the given word.
175 : fd_vm_instr_{normal,mem}_* only apply to {normal,mem} opclass
176 : instructions. */
177 :
178 382749 : FD_FN_CONST static inline ulong fd_vm_instr_opcode( ulong instr ) { return instr & 255UL; } /* In [0,256) */
179 382749 : FD_FN_CONST static inline ulong fd_vm_instr_dst ( ulong instr ) { return ((instr>> 8) & 15UL); } /* In [0,16) */
180 382749 : FD_FN_CONST static inline ulong fd_vm_instr_src ( ulong instr ) { return ((instr>>12) & 15UL); } /* In [0,16) */
181 382749 : FD_FN_CONST static inline ulong fd_vm_instr_offset( ulong instr ) { return (ulong)(long)(short)(ushort)(instr>>16); }
182 382866 : FD_FN_CONST static inline uint fd_vm_instr_imm ( ulong instr ) { return (uint)(instr>>32); }
183 :
184 0 : FD_FN_CONST static inline ulong fd_vm_instr_opclass ( ulong instr ) { return instr & 7UL; } /* In [0,8) */
185 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opsrc ( ulong instr ) { return (instr>>3) & 1UL; } /* In [0,2) */
186 0 : FD_FN_CONST static inline ulong fd_vm_instr_normal_opmode ( ulong instr ) { return (instr>>4) & 15UL; } /* In [0,16) */
187 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opsize ( ulong instr ) { return (instr>>3) & 3UL; } /* In [0,4) */
188 0 : FD_FN_CONST static inline ulong fd_vm_instr_mem_opaddrmode( ulong instr ) { return (instr>>5) & 7UL; } /* In [0,16) */
189 :
190 : /* fd_vm_mem API ******************************************************/
191 :
192 : /* fd_vm_mem APIs support the fast mapping of virtual address ranges to
193 : host address ranges. Since the SBPF virtual address space consists
194 : of 4 consecutive 4GiB regions and the mapable size of each region is
195 : less than 4 GiB (as implied by FD_VM_MEM_MAP_REGION_SZ==2^32-1 and
196 : that Solana protocol limits are much smaller still), it is impossible
197 : for a valid virtual address range to span multiple regions. */
198 :
199 : /* fd_vm_mem_cfg configures the vm's tlb arrays. Assumes vm is valid
200 : and vm already has configured the rodata, stack, heap and input
201 : regions. Returns vm. */
202 :
203 : static inline fd_vm_t *
204 8562 : fd_vm_mem_cfg( fd_vm_t * vm ) {
205 8562 : vm->region_haddr[0] = 0UL; vm->region_ld_sz[0] = (uint)0UL; vm->region_st_sz[0] = (uint)0UL;
206 8562 : vm->region_haddr[FD_VM_PROG_REGION] = (ulong)vm->rodata; vm->region_ld_sz[FD_VM_PROG_REGION] = (uint)vm->rodata_sz; vm->region_st_sz[FD_VM_PROG_REGION] = (uint)0UL;
207 8562 : vm->region_haddr[FD_VM_STACK_REGION] = (ulong)vm->stack; vm->region_ld_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX; vm->region_st_sz[FD_VM_STACK_REGION] = (uint)FD_VM_STACK_MAX;
208 8562 : vm->region_haddr[FD_VM_HEAP_REGION] = (ulong)vm->heap; vm->region_ld_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max; vm->region_st_sz[FD_VM_HEAP_REGION] = (uint)vm->heap_max;
209 8562 : vm->region_haddr[5] = 0UL; vm->region_ld_sz[5] = (uint)0UL; vm->region_st_sz[5] = (uint)0UL;
210 8562 : if( vm->direct_mapping || !vm->input_mem_regions_cnt ) {
211 : /* When direct mapping is enabled, we don't use these fields because
212 : the load and stores are fragmented. */
213 462 : vm->region_haddr[FD_VM_INPUT_REGION] = 0UL;
214 462 : vm->region_ld_sz[FD_VM_INPUT_REGION] = 0U;
215 462 : vm->region_st_sz[FD_VM_INPUT_REGION] = 0U;
216 8100 : } else {
217 8100 : vm->region_haddr[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].haddr;
218 8100 : vm->region_ld_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
219 8100 : vm->region_st_sz[FD_VM_INPUT_REGION] = vm->input_mem_regions[0].region_sz;
220 8100 : }
221 8562 : return vm;
222 8562 : }
223 :
224 : /* Simplified version of Agave's `generate_access_violation()` function
225 : that simply returns either FD_VM_ERR_EBPF_ACCESS_VIOLATION or
226 : FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION. This has no consensus
227 : effects and is purely for logging purposes for fuzzing. Returns
228 : FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION if the provided vaddr is in the
229 : stack (0x200000000) and FD_VM_ERR_EBPF_ACCESS_VIOLATION otherwise.
230 :
231 : https://github.com/anza-xyz/sbpf/blob/v0.11.1/src/memory_region.rs#L834-L869 */
232 : static FD_FN_PURE inline int
233 255 : fd_vm_generate_access_violation( ulong vaddr, ulong sbpf_version ) {
234 : /* rel_offset can be negative because there is an edge case where the
235 : first "frame" right before the stack region should also throw a
236 : stack access violation. */
237 255 : long rel_offset = fd_long_sat_sub( (long)vaddr, (long)FD_VM_MEM_MAP_STACK_REGION_START );
238 255 : long stack_frame = rel_offset / (long)FD_VM_STACK_FRAME_SZ;
239 255 : if( !fd_sbpf_dynamic_stack_frames_enabled( sbpf_version ) &&
240 255 : stack_frame>=-1L && stack_frame<=(long)FD_VM_MAX_CALL_DEPTH ) {
241 0 : return FD_VM_ERR_EBPF_STACK_ACCESS_VIOLATION;
242 0 : }
243 255 : return FD_VM_ERR_EBPF_ACCESS_VIOLATION;
244 255 : }
245 :
246 : /* fd_vm_mem_haddr translates the vaddr range [vaddr,vaddr+sz) (in
247 : infinite precision math) into the non-wrapping haddr range
248 : [haddr,haddr+sz). On success, returns haddr and every byte in the
249 : haddr range is a valid address. On failure, returns sentinel and
250 : there was at least one byte in the virtual address range that did not
251 : have a corresponding byte in the host address range.
252 :
253 : IMPORTANT SAFETY TIP! When sz==0, the return value currently is
254 : arbitrary. This is often fine as there should be no
255 : actual accesses to a sz==0 region. However, this also means that
256 : testing return for sentinel is insufficient to tell if mapping
257 : failed. That is, assuming sentinel is a location that could never
258 : happen on success:
259 :
260 : sz!=0 and ret!=sentinel -> success
261 : sz!=0 and ret==sentinel -> failure
262 : sz==0 -> ignore ret, application specific handling
263 :
264 : With ~O(2) extra fast branchless instructions, the below could be
265 : tweaked in the sz==0 case to return NULL or return a non-NULL
266 : sentinel value. What is most optimal practically depends on how
267 : empty ranges and NULL vaddr handling is defined in the application.
268 :
269 : Requires ~O(10) fast branchless assembly instructions with 2 L1 cache
270 : hit loads and pretty good ILP.
271 :
272 : fd_vm_mem_haddr_fast is when the vaddr is for use when it is already
273 : known that the vaddr region has a valid mapping.
274 :
275 : These assumptions don't hold if direct mapping is enabled since input
276 : region lookups become O(log(n)). */
277 :
278 :
279 : /* fd_vm_get_input_mem_region_idx returns the index into the input memory
280 : region array with the largest region offset that is <= the offset that
281 : is passed in. This function makes NO guarantees about the input being
282 : a valid input region offset; the caller is responsible for safely handling
283 : it. */
284 : static inline ulong
285 390 : fd_vm_get_input_mem_region_idx( fd_vm_t const * vm, ulong offset ) {
286 390 : uint left = 0U;
287 390 : uint right = vm->input_mem_regions_cnt - 1U;
288 390 : uint mid = 0U;
289 :
290 618 : while( left<right ) {
291 228 : mid = (left+right) / 2U;
292 228 : if( offset>=vm->input_mem_regions[ mid ].vaddr_offset+vm->input_mem_regions[ mid ].address_space_reserved ) {
293 51 : left = mid + 1U;
294 177 : } else {
295 177 : right = mid;
296 177 : }
297 228 : }
298 390 : return left;
299 390 : }
300 :
301 : /* If the region is an account, handle the resizing logic. This logic
302 : corresponds to
303 : solana_transaction_context::TransactionContext::access_violation_handler
304 :
305 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L510-L581 */
306 : static inline void
307 : fd_vm_handle_input_mem_region_oob( fd_vm_t const * vm,
308 : ulong offset,
309 : ulong sz,
310 : ulong region_idx,
311 120 : uchar write ) {
312 : /* If stricter_abi_and_runtime_constraints is not enabled, we don't need to
313 : do anything */
314 120 : if( FD_UNLIKELY( !vm->stricter_abi_and_runtime_constraints ) ) {
315 102 : return;
316 102 : }
317 :
318 : /* If the access is not a write, we don't need to do anything
319 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L523-L525 */
320 18 : if( FD_UNLIKELY( !write ) ) {
321 0 : return;
322 0 : }
323 :
324 18 : fd_vm_input_region_t * region = &vm->input_mem_regions[ region_idx ];
325 : /* If the region is not writable, we don't need to do anything
326 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L526-L529 */
327 18 : if( FD_UNLIKELY( !region->is_writable ) ) {
328 0 : return;
329 0 : }
330 :
331 : /* Calculate the requested length
332 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L532-L535 */
333 18 : ulong requested_len = fd_ulong_sat_sub( fd_ulong_sat_add( offset, sz ), region->vaddr_offset );
334 18 : if( FD_UNLIKELY( requested_len > region->address_space_reserved ) ) {
335 18 : return;
336 18 : }
337 :
338 : /* Calculate the remaining allowed growth
339 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L549-L551 */
340 0 : long remaining_growth_signed = fd_long_sat_sub(
341 0 : FD_MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION,
342 0 : vm->instr_ctx->txn_out->details.accounts_resize_delta );
343 0 : ulong remaining_allowed_growth = (remaining_growth_signed > 0L)
344 0 : ? (ulong)remaining_growth_signed
345 0 : : 0UL;
346 :
347 : /* If the requested length is greater than the size of the region,
348 : resize the region
349 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L553-L571 */
350 0 : if( FD_UNLIKELY( requested_len > region->region_sz ) ) {
351 : /* Calculate the new region size
352 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L558-L560 */
353 0 : ulong new_region_sz = fd_ulong_min(
354 0 : fd_ulong_min( region->address_space_reserved, FD_RUNTIME_ACC_SZ_MAX ),
355 0 : fd_ulong_sat_add( region->region_sz, remaining_allowed_growth ) );
356 :
357 : /* Resize the account and the region
358 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L569-L570 */
359 0 : if( FD_UNLIKELY( new_region_sz > region->region_sz ) ) {
360 : /* Safe because new_region_sz > region->region_sz */
361 0 : long growth = (long)(new_region_sz - region->region_sz);
362 0 : vm->instr_ctx->txn_out->details.accounts_resize_delta = fd_long_sat_add(
363 0 : vm->instr_ctx->txn_out->details.accounts_resize_delta, growth );
364 :
365 0 : fd_account_meta_resize( vm->acc_region_metas[ region->acc_region_meta_idx ].meta, new_region_sz );
366 0 : region->region_sz = (uint)new_region_sz;
367 0 : }
368 0 : }
369 0 : }
370 :
371 : /* fd_vm_find_input_mem_region returns the translated haddr for a given
372 : offset into the input region. If an offset/sz is invalid or if an
373 : illegal write is performed, the sentinel value is returned. If the offset
374 : provided is too large, it will choose the upper-most region as the
375 : region_idx. However, it will get caught for being too large of an access
376 : in the multi-region checks. */
377 : static inline ulong
378 : fd_vm_find_input_mem_region( fd_vm_t const * vm,
379 : ulong offset,
380 : ulong sz,
381 : uchar write,
382 390 : ulong sentinel ) {
383 390 : if( FD_UNLIKELY( vm->input_mem_regions_cnt==0 ) ) {
384 0 : return sentinel; /* Access is too large */
385 0 : }
386 :
387 : /* Binary search to find the correct memory region. If direct mapping is not
388 : enabled, then there is only 1 memory region which spans the input region. */
389 390 : ulong region_idx = fd_vm_get_input_mem_region_idx( vm, offset );
390 390 : if( FD_UNLIKELY( region_idx>=vm->input_mem_regions_cnt ) ) {
391 0 : return sentinel; /* Region not found */
392 0 : }
393 :
394 390 : ulong bytes_in_region = fd_ulong_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
395 390 : fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
396 :
397 : /* If the access is out of bounds, invoke the callback to handle the out of bounds access.
398 : This potentially resizes the region if necessary. */
399 390 : if( FD_UNLIKELY( sz>bytes_in_region ) ) {
400 120 : fd_vm_handle_input_mem_region_oob( vm, offset, sz, region_idx, write );
401 120 : }
402 :
403 : /* After potentially resizing, re-check the bounds */
404 390 : bytes_in_region = fd_ulong_sat_sub( vm->input_mem_regions[ region_idx ].region_sz,
405 390 : fd_ulong_sat_sub( offset, vm->input_mem_regions[ region_idx ].vaddr_offset ) );
406 : /* If the access is still out of bounds, return the sentinel */
407 390 : if( FD_UNLIKELY( sz>bytes_in_region ) ) {
408 120 : return sentinel;
409 120 : }
410 :
411 270 : if( FD_UNLIKELY( write && vm->input_mem_regions[ region_idx ].is_writable==0U ) ) {
412 0 : return sentinel; /* Illegal write */
413 0 : }
414 :
415 270 : ulong start_region_idx = region_idx;
416 :
417 270 : ulong adjusted_haddr = vm->input_mem_regions[ start_region_idx ].haddr + offset - vm->input_mem_regions[ start_region_idx ].vaddr_offset;
418 270 : return adjusted_haddr;
419 270 : }
420 :
421 :
422 : static inline ulong
423 : fd_vm_mem_haddr( fd_vm_t const * vm,
424 : ulong vaddr,
425 : ulong sz,
426 : ulong const * vm_region_haddr, /* indexed [0,6) */
427 : uint const * vm_region_sz, /* indexed [0,6) */
428 : uchar write, /* 1 if the access is a write, 0 if it is a read */
429 3600 : ulong sentinel ) {
430 3600 : ulong region = FD_VADDR_TO_REGION( vaddr );
431 3600 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
432 :
433 : /* Stack memory regions have 4kB unmapped "gaps" in-between each frame, which only exist if...
434 : - dynamic stack frames are not enabled (!(SBPF version >= SBPF_V1))
435 : https://github.com/anza-xyz/agave/blob/v2.2.12/programs/bpf_loader/src/lib.rs#L344-L351
436 : */
437 3600 : if( FD_UNLIKELY( region==FD_VM_STACK_REGION &&
438 3600 : !fd_sbpf_dynamic_stack_frames_enabled( vm->sbpf_version ) ) ) {
439 : /* If an access starts in a gap region, that is an access violation */
440 0 : if( FD_UNLIKELY( !!(vaddr & 0x1000) ) ) {
441 0 : return sentinel;
442 0 : }
443 :
444 : /* To account for the fact that we have gaps in the virtual address space but not in the
445 : physical address space, we need to subtract from the offset the size of all the virtual
446 : gap frames underneath it.
447 :
448 : https://github.com/solana-labs/rbpf/blob/b503a1867a9cfa13f93b4d99679a17fe219831de/src/memory_region.rs#L147-L149 */
449 0 : ulong gap_mask = 0xFFFFFFFFFFFFF000;
450 0 : offset = ( ( offset & gap_mask ) >> 1 ) | ( offset & ~gap_mask );
451 0 : }
452 :
453 3600 : ulong region_sz = (ulong)vm_region_sz[ region ];
454 3600 : ulong sz_max = region_sz - fd_ulong_min( offset, region_sz );
455 :
456 : /* If the region is an account, handle the resizing logic. This logic corresponds to
457 : solana_transaction_context::TransactionContext::access_violation_handler
458 :
459 : https://github.com/anza-xyz/agave/blob/v3.0.1/transaction-context/src/lib.rs#L510-L581 */
460 3600 : if( region==FD_VM_INPUT_REGION ) {
461 390 : return fd_vm_find_input_mem_region( vm, offset, sz, write, sentinel );
462 390 : }
463 :
464 : # ifdef FD_VM_INTERP_MEM_TRACING_ENABLED
465 : if ( FD_LIKELY( sz<=sz_max ) ) {
466 : fd_vm_trace_event_mem( vm->trace, write, vaddr, sz, vm_region_haddr[ region ] + offset );
467 : }
468 : # endif
469 3210 : return fd_ulong_if( sz<=sz_max, vm_region_haddr[ region ] + offset, sentinel );
470 3600 : }
471 :
472 : static inline ulong
473 : fd_vm_mem_haddr_fast( fd_vm_t const * vm,
474 : ulong vaddr,
475 0 : ulong const * vm_region_haddr ) { /* indexed [0,6) */
476 0 : ulong region = FD_VADDR_TO_REGION( vaddr );
477 0 : ulong offset = vaddr & FD_VM_OFFSET_MASK;
478 0 : if( FD_UNLIKELY( region==FD_VM_INPUT_REGION ) ) {
479 0 : return fd_vm_find_input_mem_region( vm, offset, 1UL, 0, 0UL );
480 0 : }
481 0 : return vm_region_haddr[ region ] + offset;
482 0 : }
483 :
484 54 : FD_FN_PURE static inline ulong fd_vm_mem_ld_1( ulong haddr ) {
485 54 : return (ulong)*(uchar const *)haddr;
486 54 : }
487 :
488 60 : FD_FN_PURE static inline ulong fd_vm_mem_ld_2( ulong haddr ) {
489 60 : ushort t;
490 60 : memcpy( &t, (void const *)haddr, sizeof(ushort) );
491 60 : return (ulong)t;
492 60 : }
493 :
494 60 : FD_FN_PURE static inline ulong fd_vm_mem_ld_4( ulong haddr ) {
495 60 : uint t;
496 60 : memcpy( &t, (void const *)haddr, sizeof(uint) );
497 60 : return (ulong)t;
498 60 : }
499 :
500 42 : FD_FN_PURE static inline ulong fd_vm_mem_ld_8( ulong haddr ) {
501 42 : ulong t;
502 42 : memcpy( &t, (void const *)haddr, sizeof(ulong) );
503 42 : return t;
504 42 : }
505 :
506 6 : static inline void fd_vm_mem_st_1( ulong haddr, uchar val ) {
507 6 : *(uchar *)haddr = val;
508 6 : }
509 :
510 : static inline void fd_vm_mem_st_2( ulong haddr,
511 6 : ushort val ) {
512 6 : memcpy( (void *)haddr, &val, sizeof(ushort) );
513 6 : }
514 :
515 : static inline void fd_vm_mem_st_4( ulong haddr,
516 6 : uint val ) {
517 6 : memcpy( (void *)haddr, &val, sizeof(uint) );
518 6 : }
519 :
520 : static inline void fd_vm_mem_st_8( ulong haddr,
521 18 : ulong val ) {
522 18 : memcpy( (void *)haddr, &val, sizeof(ulong) );
523 18 : }
524 :
525 : FD_PROTOTYPES_END
526 :
527 : #endif /* HEADER_fd_src_flamenco_vm_fd_vm_private_h */
|