LCOV - code coverage report
Current view: top level - discof/batch/generated - fd_batch_tile_seccomp.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 57 0.0 %
Date: 2025-03-20 12:08:36 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
       2             : #ifndef HEADER_fd_src_discof_batch_generated_fd_batch_tile_seccomp_h
       3             : #define HEADER_fd_src_discof_batch_generated_fd_batch_tile_seccomp_h
       4             : 
       5             : #include "../../../../src/util/fd_util_base.h"
       6             : #include <linux/audit.h>
       7             : #include <linux/capability.h>
       8             : #include <linux/filter.h>
       9             : #include <linux/seccomp.h>
      10             : #include <linux/bpf.h>
      11             : #include <sys/syscall.h>
      12             : #include <signal.h>
      13             : #include <stddef.h>
      14             : 
      15             : #if defined(__i386__)
      16             : # define ARCH_NR  AUDIT_ARCH_I386
      17             : #elif defined(__x86_64__)
      18             : # define ARCH_NR  AUDIT_ARCH_X86_64
      19             : #elif defined(__aarch64__)
      20             : # define ARCH_NR AUDIT_ARCH_AARCH64
      21             : #else
      22             : # error "Target architecture is unsupported by seccomp."
      23             : #endif
      24             : static const unsigned int sock_filter_policy_fd_batch_tile_instr_cnt = 51;
      25             : 
      26           0 : static void populate_sock_filter_policy_fd_batch_tile( ulong out_cnt, struct sock_filter * out, unsigned int logfile_fd, unsigned int tmp_fd, unsigned int tmp_inc_fd, unsigned int full_snapshot_fd, unsigned int incremental_snapshot_fd) {
      27           0 :   FD_TEST( out_cnt >= 51 );
      28           0 :   struct sock_filter filter[51] = {
      29             :     /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
      30           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
      31           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 47 ),
      32             :     /* loading syscall number in accumulator */
      33           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
      34             :     /* allow write based on expression */
      35           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 7, 0 ),
      36             :     /* allow fsync based on expression */
      37           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 18, 0 ),
      38             :     /* allow fchmod based on expression */
      39           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fchmod, /* check_fchmod */ 19, 0 ),
      40             :     /* allow ftruncate based on expression */
      41           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_ftruncate, /* check_ftruncate */ 20, 0 ),
      42             :     /* allow lseek based on expression */
      43           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_lseek, /* check_lseek */ 29, 0 ),
      44             :     /* allow read based on expression */
      45           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_read, /* check_read */ 36, 0 ),
      46             :     /* allow readlink based on expression */
      47           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_readlink, /* check_readlink */ 39, 0 ),
      48             :     /* none of the syscalls matched */
      49           0 :     { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 38 },
      50             : //  check_write:
      51             :     /* load syscall argument 0 in accumulator */
      52           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      53           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 37, /* lbl_1 */ 0 ),
      54             : //  lbl_1:
      55             :     /* load syscall argument 0 in accumulator */
      56           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      57           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 35, /* lbl_2 */ 0 ),
      58             : //  lbl_2:
      59             :     /* load syscall argument 0 in accumulator */
      60           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      61           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_fd, /* RET_ALLOW */ 33, /* lbl_3 */ 0 ),
      62             : //  lbl_3:
      63             :     /* load syscall argument 0 in accumulator */
      64           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      65           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_inc_fd, /* RET_ALLOW */ 31, /* lbl_4 */ 0 ),
      66             : //  lbl_4:
      67             :     /* load syscall argument 0 in accumulator */
      68           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      69           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, full_snapshot_fd, /* RET_ALLOW */ 29, /* lbl_5 */ 0 ),
      70             : //  lbl_5:
      71             :     /* load syscall argument 0 in accumulator */
      72           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      73           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, incremental_snapshot_fd, /* RET_ALLOW */ 27, /* RET_KILL_PROCESS */ 26 ),
      74             : //  check_fsync:
      75             :     /* load syscall argument 0 in accumulator */
      76           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      77           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 25, /* RET_KILL_PROCESS */ 24 ),
      78             : //  check_fchmod:
      79             :     /* load syscall argument 1 in accumulator */
      80           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[1])),
      81           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH, /* RET_ALLOW */ 23, /* RET_KILL_PROCESS */ 22 ),
      82             : //  check_ftruncate:
      83             :     /* load syscall argument 0 in accumulator */
      84           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      85           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_fd, /* lbl_6 */ 6, /* lbl_7 */ 0 ),
      86             : //  lbl_7:
      87             :     /* load syscall argument 0 in accumulator */
      88           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      89           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_inc_fd, /* lbl_6 */ 4, /* lbl_8 */ 0 ),
      90             : //  lbl_8:
      91             :     /* load syscall argument 0 in accumulator */
      92           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      93           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, full_snapshot_fd, /* lbl_6 */ 2, /* lbl_9 */ 0 ),
      94             : //  lbl_9:
      95             :     /* load syscall argument 0 in accumulator */
      96           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
      97           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, incremental_snapshot_fd, /* lbl_6 */ 0, /* RET_KILL_PROCESS */ 14 ),
      98             : //  lbl_6:
      99             :     /* load syscall argument 1 in accumulator */
     100           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[1])),
     101           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 13, /* RET_KILL_PROCESS */ 12 ),
     102             : //  check_lseek:
     103             :     /* load syscall argument 0 in accumulator */
     104           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     105           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_fd, /* RET_ALLOW */ 11, /* lbl_10 */ 0 ),
     106             : //  lbl_10:
     107             :     /* load syscall argument 0 in accumulator */
     108           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     109           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_inc_fd, /* RET_ALLOW */ 9, /* lbl_11 */ 0 ),
     110             : //  lbl_11:
     111             :     /* load syscall argument 0 in accumulator */
     112           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     113           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, full_snapshot_fd, /* RET_ALLOW */ 7, /* lbl_12 */ 0 ),
     114             : //  lbl_12:
     115             :     /* load syscall argument 0 in accumulator */
     116           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     117           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, incremental_snapshot_fd, /* RET_ALLOW */ 5, /* RET_KILL_PROCESS */ 4 ),
     118             : //  check_read:
     119             :     /* load syscall argument 0 in accumulator */
     120           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     121           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_fd, /* RET_ALLOW */ 3, /* lbl_13 */ 0 ),
     122             : //  lbl_13:
     123             :     /* load syscall argument 0 in accumulator */
     124           0 :     BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
     125           0 :     BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, tmp_inc_fd, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
     126             : //  check_readlink:
     127             : //  RET_KILL_PROCESS:
     128             :     /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
     129           0 :     BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
     130             : //  RET_ALLOW:
     131             :     /* ALLOW has to be reached by jumping */
     132           0 :     BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
     133           0 :   };
     134           0 :   fd_memcpy( out, filter, sizeof( filter ) );
     135           0 : }
     136             : 
     137             : #endif

Generated by: LCOV version 1.14