Line data Source code
1 : /* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
2 : #ifndef HEADER_fd_src_discof_ipecho_generated_fd_ipecho_tile_seccomp_h
3 : #define HEADER_fd_src_discof_ipecho_generated_fd_ipecho_tile_seccomp_h
4 :
5 : #include "../../../../src/util/fd_util_base.h"
6 : #include <linux/audit.h>
7 : #include <linux/capability.h>
8 : #include <linux/filter.h>
9 : #include <linux/seccomp.h>
10 : #include <linux/bpf.h>
11 : #include <sys/syscall.h>
12 : #include <signal.h>
13 : #include <stddef.h>
14 :
15 : #if defined(__i386__)
16 : # define ARCH_NR AUDIT_ARCH_I386
17 : #elif defined(__x86_64__)
18 : # define ARCH_NR AUDIT_ARCH_X86_64
19 : #elif defined(__aarch64__)
20 : # define ARCH_NR AUDIT_ARCH_AARCH64
21 : #else
22 : # error "Target architecture is unsupported by seccomp."
23 : #endif
24 : static const unsigned int sock_filter_policy_fd_ipecho_tile_instr_cnt = 54;
25 :
26 0 : static void populate_sock_filter_policy_fd_ipecho_tile( ulong out_cnt, struct sock_filter * out, unsigned int logfile_fd ) {
27 0 : FD_TEST( out_cnt >= 54 );
28 0 : struct sock_filter filter[54] = {
29 : /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
30 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
31 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 50 ),
32 : /* loading syscall number in accumulator */
33 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
34 : /* allow write based on expression */
35 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 8, 0 ),
36 : /* allow close based on expression */
37 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_close, /* check_close */ 11, 0 ),
38 : /* allow read based on expression */
39 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_read, /* check_read */ 14, 0 ),
40 : /* allow accept4 based on expression */
41 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_accept4, /* check_accept4 */ 17, 0 ),
42 : /* simply allow ppoll */
43 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_ppoll, /* RET_ALLOW */ 45, 0 ),
44 : /* allow sendto based on expression */
45 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_sendto, /* check_sendto */ 21, 0 ),
46 : /* allow recvfrom based on expression */
47 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_recvfrom, /* check_recvfrom */ 30, 0 ),
48 : /* allow fsync based on expression */
49 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 39, 0 ),
50 : /* none of the syscalls matched */
51 0 : { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 40 },
52 : // check_write:
53 : /* load syscall argument 0 in accumulator */
54 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
55 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 39, /* lbl_1 */ 0 ),
56 : // lbl_1:
57 : /* load syscall argument 0 in accumulator */
58 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
59 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 37, /* RET_KILL_PROCESS */ 36 ),
60 : // check_close:
61 : /* load syscall argument 0 in accumulator */
62 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
63 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_KILL_PROCESS */ 34, /* lbl_2 */ 0 ),
64 : // lbl_2:
65 : /* load syscall argument 0 in accumulator */
66 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
67 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_KILL_PROCESS */ 32, /* RET_ALLOW */ 33 ),
68 : // check_read:
69 : /* load syscall argument 0 in accumulator */
70 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
71 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_KILL_PROCESS */ 30, /* lbl_3 */ 0 ),
72 : // lbl_3:
73 : /* load syscall argument 0 in accumulator */
74 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
75 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_KILL_PROCESS */ 28, /* RET_ALLOW */ 29 ),
76 : // check_accept4:
77 : /* load syscall argument 0 in accumulator */
78 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
79 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_KILL_PROCESS */ 26, /* lbl_5 */ 0 ),
80 : // lbl_5:
81 : /* load syscall argument 0 in accumulator */
82 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
83 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_KILL_PROCESS */ 24, /* lbl_4 */ 0 ),
84 : // lbl_4:
85 : /* load syscall argument 3 in accumulator */
86 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[3])),
87 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SOCK_NONBLOCK|SOCK_CLOEXEC, /* RET_ALLOW */ 23, /* RET_KILL_PROCESS */ 22 ),
88 : // check_sendto:
89 : /* load syscall argument 0 in accumulator */
90 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
91 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_KILL_PROCESS */ 20, /* lbl_7 */ 0 ),
92 : // lbl_7:
93 : /* load syscall argument 0 in accumulator */
94 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
95 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_KILL_PROCESS */ 18, /* lbl_6 */ 0 ),
96 : // lbl_6:
97 : /* load syscall argument 3 in accumulator */
98 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[3])),
99 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, MSG_NOSIGNAL, /* lbl_8 */ 0, /* RET_KILL_PROCESS */ 16 ),
100 : // lbl_8:
101 : /* load syscall argument 4 in accumulator */
102 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[4])),
103 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* lbl_9 */ 0, /* RET_KILL_PROCESS */ 14 ),
104 : // lbl_9:
105 : /* load syscall argument 5 in accumulator */
106 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[5])),
107 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 13, /* RET_KILL_PROCESS */ 12 ),
108 : // check_recvfrom:
109 : /* load syscall argument 0 in accumulator */
110 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
111 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_KILL_PROCESS */ 10, /* lbl_11 */ 0 ),
112 : // lbl_11:
113 : /* load syscall argument 0 in accumulator */
114 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
115 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_KILL_PROCESS */ 8, /* lbl_10 */ 0 ),
116 : // lbl_10:
117 : /* load syscall argument 3 in accumulator */
118 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[3])),
119 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* lbl_12 */ 0, /* RET_KILL_PROCESS */ 6 ),
120 : // lbl_12:
121 : /* load syscall argument 4 in accumulator */
122 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[4])),
123 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* lbl_13 */ 0, /* RET_KILL_PROCESS */ 4 ),
124 : // lbl_13:
125 : /* load syscall argument 5 in accumulator */
126 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[5])),
127 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 3, /* RET_KILL_PROCESS */ 2 ),
128 : // check_fsync:
129 : /* load syscall argument 0 in accumulator */
130 0 : BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
131 0 : BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
132 : // RET_KILL_PROCESS:
133 : /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
134 0 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
135 : // RET_ALLOW:
136 : /* ALLOW has to be reached by jumping */
137 : BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
138 0 : };
139 0 : fd_memcpy( out, filter, sizeof( filter ) );
140 0 : }
141 :
142 : #endif
|