Line data Source code
1 : /* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
2 : #ifndef HEADER_fd_src_app_shared_commands_monitor_generated_monitor_seccomp_h
3 : #define HEADER_fd_src_app_shared_commands_monitor_generated_monitor_seccomp_h
4 :
5 : #include "../../../../../../src/util/fd_util_base.h"
6 : #include <linux/audit.h>
7 : #include <linux/capability.h>
8 : #include <linux/filter.h>
9 : #include <linux/seccomp.h>
10 : #include <linux/bpf.h>
11 : #include <sys/syscall.h>
12 : #include <signal.h>
13 : #include <stddef.h>
14 :
15 : #if defined(__i386__)
16 : # define ARCH_NR AUDIT_ARCH_I386
17 : #elif defined(__x86_64__)
18 : # define ARCH_NR AUDIT_ARCH_X86_64
19 : #elif defined(__aarch64__)
20 : # define ARCH_NR AUDIT_ARCH_AARCH64
21 : #else
22 : # error "Target architecture is unsupported by seccomp."
23 : #endif
24 : static const unsigned int sock_filter_policy_monitor_instr_cnt = 36;
25 :
26 0 : static void populate_sock_filter_policy_monitor( ulong out_cnt, struct sock_filter * out, unsigned int logfile_fd, unsigned int drain_output_fd) {
27 0 : FD_TEST( out_cnt >= 36 );
28 0 : struct sock_filter filter[36] = {
29 : /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
30 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
31 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 32 ),
32 : /* loading syscall number in accumulator */
33 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
34 : /* allow write based on expression */
35 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 8, 0 ),
36 : /* allow fsync based on expression */
37 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 13, 0 ),
38 : /* simply allow nanosleep */
39 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_nanosleep, /* RET_ALLOW */ 29, 0 ),
40 : /* simply allow sched_yield */
41 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_sched_yield, /* RET_ALLOW */ 28, 0 ),
42 : /* simply allow exit_group */
43 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_exit_group, /* RET_ALLOW */ 27, 0 ),
44 : /* allow read based on expression */
45 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_read, /* check_read */ 11, 0 ),
46 : /* allow ioctl based on expression */
47 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_ioctl, /* check_ioctl */ 14, 0 ),
48 : /* allow pselect6 based on expression */
49 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_pselect6, /* check_pselect6 */ 19, 0 ),
50 : /* none of the syscalls matched */
51 0 : { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 22 },
52 : // check_write:
53 : /* load syscall argument 0 in accumulator */
54 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
55 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 1, /* RET_ALLOW */ 21, /* lbl_1 */ 0 ),
56 : // lbl_1:
57 : /* load syscall argument 0 in accumulator */
58 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
59 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 19, /* lbl_2 */ 0 ),
60 : // lbl_2:
61 : /* load syscall argument 0 in accumulator */
62 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
63 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 17, /* RET_KILL_PROCESS */ 16 ),
64 : // check_fsync:
65 : /* load syscall argument 0 in accumulator */
66 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
67 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 15, /* RET_KILL_PROCESS */ 14 ),
68 : // check_read:
69 : /* load syscall argument 0 in accumulator */
70 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
71 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, drain_output_fd, /* RET_ALLOW */ 13, /* lbl_3 */ 0 ),
72 : // lbl_3:
73 : /* load syscall argument 0 in accumulator */
74 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
75 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 11, /* RET_KILL_PROCESS */ 10 ),
76 : // check_ioctl:
77 : /* load syscall argument 0 in accumulator */
78 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
79 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* lbl_4 */ 0, /* RET_KILL_PROCESS */ 8 ),
80 : // lbl_4:
81 : /* load syscall argument 1 in accumulator */
82 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[1])),
83 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, TCGETS, /* RET_ALLOW */ 7, /* lbl_5 */ 0 ),
84 : // lbl_5:
85 : /* load syscall argument 1 in accumulator */
86 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[1])),
87 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, TCSETS, /* RET_ALLOW */ 5, /* RET_KILL_PROCESS */ 4 ),
88 : // check_pselect6:
89 : /* load syscall argument 0 in accumulator */
90 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
91 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 1, /* lbl_6 */ 0, /* RET_KILL_PROCESS */ 2 ),
92 : // lbl_6:
93 : /* load syscall argument 2 in accumulator */
94 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[2])),
95 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
96 : // RET_KILL_PROCESS:
97 : /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
98 0 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
99 : // RET_ALLOW:
100 : /* ALLOW has to be reached by jumping */
101 0 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
102 0 : };
103 0 : fd_memcpy( out, filter, sizeof( filter ) );
104 0 : }
105 :
106 : #endif
|