Line data Source code
1 : /* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
2 : #ifndef HEADER_fd_src_discof_restore_generated_fd_snapwr_tile_seccomp_h
3 : #define HEADER_fd_src_discof_restore_generated_fd_snapwr_tile_seccomp_h
4 :
5 : #if defined(__linux__)
6 :
7 : #include "../../../../src/util/fd_util_base.h"
8 : #include <linux/audit.h>
9 : #include <linux/capability.h>
10 : #include <linux/filter.h>
11 : #include <linux/seccomp.h>
12 : #include <linux/bpf.h>
13 : #include <linux/unistd.h>
14 : #include <sys/syscall.h>
15 : #include <signal.h>
16 : #include <stddef.h>
17 :
18 : #if defined(__i386__)
19 : # define ARCH_NR AUDIT_ARCH_I386
20 : #elif defined(__x86_64__)
21 : # define ARCH_NR AUDIT_ARCH_X86_64
22 : #elif defined(__aarch64__)
23 : # define ARCH_NR AUDIT_ARCH_AARCH64
24 : #else
25 : # error "Target architecture is unsupported by seccomp."
26 : #endif
27 : static const unsigned int sock_filter_policy_fd_snapwr_tile_instr_cnt = 25;
28 :
29 0 : static void populate_sock_filter_policy_fd_snapwr_tile( ulong out_cnt, struct sock_filter * out, uint logfile_fd, uint vinyl_fd ) {
30 0 : FD_TEST( out_cnt >= 25 );
31 0 : struct sock_filter filter[25] = {
32 : /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
33 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
34 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 21 ),
35 : /* loading syscall number in accumulator */
36 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
37 : /* allow pwrite64 based on expression */
38 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_pwrite64, /* check_pwrite64 */ 7, 0 ),
39 : /* allow write based on expression */
40 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 8, 0 ),
41 : /* allow fsync based on expression */
42 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 11, 0 ),
43 : /* allow exit based on expression */
44 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_exit, /* check_exit */ 12, 0 ),
45 : /* allow sched_yield based on expression */
46 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_sched_yield, /* check_sched_yield */ 13, 0 ),
47 : /* allow clock_nanosleep based on expression */
48 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_clock_nanosleep, /* check_clock_nanosleep */ 12, 0 ),
49 : /* allow nanosleep based on expression */
50 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_nanosleep, /* check_nanosleep */ 13, 0 ),
51 : /* none of the syscalls matched */
52 0 : { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 12 },
53 : // check_pwrite64:
54 : /* load syscall argument 0 in accumulator */
55 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
56 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, vinyl_fd, /* RET_ALLOW */ 11, /* RET_KILL_PROCESS */ 10 ),
57 : // check_write:
58 : /* load syscall argument 0 in accumulator */
59 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
60 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 9, /* lbl_1 */ 0 ),
61 : // lbl_1:
62 : /* load syscall argument 0 in accumulator */
63 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
64 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 7, /* RET_KILL_PROCESS */ 6 ),
65 : // check_fsync:
66 : /* load syscall argument 0 in accumulator */
67 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
68 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 5, /* RET_KILL_PROCESS */ 4 ),
69 : // check_exit:
70 : /* load syscall argument 0 in accumulator */
71 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
72 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 3, /* RET_KILL_PROCESS */ 2 ),
73 : // check_sched_yield:
74 : // check_clock_nanosleep:
75 : /* load syscall argument 0 in accumulator */
76 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
77 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
78 : // check_nanosleep:
79 : // RET_KILL_PROCESS:
80 : /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
81 0 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
82 : // RET_ALLOW:
83 : /* ALLOW has to be reached by jumping */
84 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
85 0 : };
86 0 : fd_memcpy( out, filter, sizeof( filter ) );
87 0 : }
88 :
89 : #endif /* defined(__linux__) */
90 :
91 : #endif /* HEADER_fd_src_discof_restore_generated_fd_snapwr_tile_seccomp_h */
|