summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Moore <paul@paul-moore.com>2017-02-15 15:33:39 -0500
committerPaul Moore <paul@paul-moore.com>2017-02-23 12:17:56 -0500
commitba73ee4f56e8ada7309345cbe08ebf358d60f7e7 (patch)
tree447646652ea462dc533f183e945cdf5267d7bdac
parent11e21098e0c3b5481fb0f6e6bdbb266bdd0fc24c (diff)
downloadlibseccomp-ba73ee4f56e8ada7309345cbe08ebf358d60f7e7.tar.gz
bpf: don't catch the -1 syscall in the x32/x86_64 check
The -1 syscall can be used by a tracing process to skip a syscall, which up until Linux v4.8 was of no concern for libseccomp since the seccomp filter was only executed at the start of the syscall and not after the tracing process was notified, however recent kernels also execute the seccomp filter after the tracing process finishes its syscall handling; this caused problems on x86_64 systems that didn't explicitly add an x32 architecture to their filters. This patch fixes the x32 check to treat the -1 syscall like any other syscall. Signed-off-by: Paul Moore <paul@paul-moore.com>
-rw-r--r--src/gen_bpf.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/src/gen_bpf.c b/src/gen_bpf.c
index 65e96c4..54df2ef 100644
--- a/src/gen_bpf.c
+++ b/src/gen_bpf.c
@@ -1351,13 +1351,32 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
/* filter out x32 */
_BPF_INSTR(instr,
_BPF_OP(state->arch, BPF_JMP + BPF_JGE),
- _BPF_JMP_HSH(state->bad_arch_hsh),
+ _BPF_JMP_NO,
_BPF_JMP_NO,
_BPF_K(state->arch, X32_SYSCALL_BIT));
if (b_head != NULL)
instr.jf = _BPF_JMP_HSH(b_head->hash);
else
instr.jf = _BPF_JMP_HSH(state->def_hsh);
+ b_new = _blk_append(state, b_new, &instr);
+ if (b_new == NULL)
+ goto arch_failure;
+ /* NOTE: starting with Linux v4.8 the seccomp filters
+ * are processed both when the syscall is
+ * initially executed as well as after any
+ * tracing processes finish so we need to make
+ * sure we don't trap the -1 syscall which
+ * tracers can use to skip the syscall, see
+ * seccomp(2) for more information */
+ _BPF_INSTR(instr,
+ _BPF_OP(state->arch, BPF_JMP + BPF_JEQ),
+ _BPF_JMP_NO,
+ _BPF_JMP_HSH(state->bad_arch_hsh),
+ _BPF_K(state->arch, -1));
+ if (b_head != NULL)
+ instr.jt = _BPF_JMP_HSH(b_head->hash);
+ else
+ instr.jt = _BPF_JMP_HSH(state->def_hsh);
blk_cnt++;
} else if (state->arch->token == SCMP_ARCH_X32) {
/* filter out x86_64 */