LCOV - code coverage report
Current view: top level - app/fdctl/monitor/generated - monitor_seccomp.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 28 0.0 %
Date: 2024-11-13 11:58:15 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
       2             : #ifndef HEADER_fd_src_app_fdctl_monitor_generated_monitor_seccomp_h
       3             : #define HEADER_fd_src_app_fdctl_monitor_generated_monitor_seccomp_h
       4             : 
       5             : #include "../../../../../src/util/fd_util_base.h"
       6             : #include <linux/audit.h>
       7             : #include <linux/capability.h>
       8             : #include <linux/filter.h>
       9             : #include <linux/seccomp.h>
      10             : #include <linux/bpf.h>
      11             : #include <sys/syscall.h>
      12             : #include <signal.h>
      13             : #include <stddef.h>
      14             : 
      15             : #if defined(__i386__)
      16             : # define ARCH_NR  AUDIT_ARCH_I386
      17             : #elif defined(__x86_64__)
      18             : # define ARCH_NR  AUDIT_ARCH_X86_64
      19             : #elif defined(__aarch64__)
      20             : # define ARCH_NR AUDIT_ARCH_AARCH64
      21             : #else
      22             : # error "Target architecture is unsupported by seccomp."
      23             : #endif
      24             : static const unsigned int sock_filter_policy_monitor_instr_cnt = 22;
      25             : 
      26           0 : static void populate_sock_filter_policy_monitor( ulong out_cnt, struct sock_filter * out, unsigned int logfile_fd, unsigned int drain_output_fd) {
      27           0 :   FD_TEST( out_cnt >= 22 );
      28           0 :   struct sock_filter filter[22] = {
      29             :     /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
      30           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
      31           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 18 ),
      32             :     /* loading syscall number in accumulator */
      33           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
      34             :     /* allow write based on expression */
      35           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 6, 0 ),
      36             :     /* allow fsync based on expression */
      37           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 11, 0 ),
      38             :     /* simply allow nanosleep */
      39           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_nanosleep, /* RET_ALLOW */ 15, 0 ),
      40             :     /* simply allow sched_yield */
      41           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_sched_yield, /* RET_ALLOW */ 14, 0 ),
      42             :     /* simply allow exit_group */
      43           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_exit_group, /* RET_ALLOW */ 13, 0 ),
      44             :     /* allow read based on expression */
      45           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_read, /* check_read */ 9, 0 ),
      46             :     /* none of the syscalls matched */
      47           0 :     { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 10 },
      48             : //  check_write:
      49             :     /* load syscall argument 0 in accumulator */
      50           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      51           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 1, /* RET_ALLOW */ 9, /* lbl_1 */ 0 ),
      52             : //  lbl_1:
      53             :     /* load syscall argument 0 in accumulator */
      54           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      55           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 7, /* lbl_2 */ 0 ),
      56             : //  lbl_2:
      57             :     /* load syscall argument 0 in accumulator */
      58           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      59           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 5, /* RET_KILL_PROCESS */ 4 ),
      60             : //  check_fsync:
      61             :     /* load syscall argument 0 in accumulator */
      62           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      63           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 3, /* RET_KILL_PROCESS */ 2 ),
      64             : //  check_read:
      65             :     /* load syscall argument 0 in accumulator */
      66           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      67           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, drain_output_fd, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
      68             : //  RET_KILL_PROCESS:
      69             :     /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
      70           0 :     BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
      71             : //  RET_ALLOW:
      72             :     /* ALLOW has to be reached by jumping */
      73           0 :     BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
      74           0 :   };
      75           0 :   fd_memcpy( out, filter, sizeof( filter ) );
      76           0 : }
      77             : 
      78             : #endif

Generated by: LCOV version 1.14