summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Moore <paul@paul-moore.com>2017-02-23 12:27:05 -0500
committerPaul Moore <paul@paul-moore.com>2017-02-23 12:27:05 -0500
commit0006b53a6d4163df86739efd712d62deb1e9c0ef (patch)
tree403875373b0ffcb6ce04eec30ab834c9c3f3c6df
parent882e772fe8a8eaab1c562b7557e8828e5f5f0b4e (diff)
downloadlibseccomp-0006b53a6d4163df86739efd712d62deb1e9c0ef.tar.gz
bpf: don't catch the -1 syscall in the x32/x86_64 check
The -1 syscall can be used by a tracing process to skip a syscall, which up until Linux v4.8 was of no concern for libseccomp since the seccomp filter was only executed at the start of the syscall and not after the tracing process was notified, however recent kernels also execute the seccomp filter after the tracing process finishes its syscall handling; this caused problems on x86_64 systems that didn't explicitly add an x32 architecture to their filters. This patch fixes the x32 check to treat the -1 syscall like any other syscall. Signed-off-by: Paul Moore <paul@paul-moore.com> (imported from commit ba73ee4f56e8ada7309345cbe08ebf358d60f7e7)
-rw-r--r--src/gen_bpf.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/src/gen_bpf.c b/src/gen_bpf.c
index 2418a1a..01dcdac 100644
--- a/src/gen_bpf.c
+++ b/src/gen_bpf.c
@@ -1353,13 +1353,32 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
/* filter out x32 */
_BPF_INSTR(instr,
_BPF_OP(state->arch, BPF_JMP + BPF_JGE),
- _BPF_JMP_HSH(state->bad_arch_hsh),
+ _BPF_JMP_NO,
_BPF_JMP_NO,
_BPF_K(state->arch, X32_SYSCALL_BIT));
if (b_head != NULL)
instr.jf = _BPF_JMP_HSH(b_head->hash);
else
instr.jf = _BPF_JMP_HSH(state->def_hsh);
+ b_new = _blk_append(state, b_new, &instr);
+ if (b_new == NULL)
+ goto arch_failure;
+ /* NOTE: starting with Linux v4.8 the seccomp filters
+ * are processed both when the syscall is
+ * initially executed as well as after any
+ * tracing processes finish so we need to make
+ * sure we don't trap the -1 syscall which
+ * tracers can use to skip the syscall, see
+ * seccomp(2) for more information */
+ _BPF_INSTR(instr,
+ _BPF_OP(state->arch, BPF_JMP + BPF_JEQ),
+ _BPF_JMP_NO,
+ _BPF_JMP_HSH(state->bad_arch_hsh),
+ _BPF_K(state->arch, -1));
+ if (b_head != NULL)
+ instr.jt = _BPF_JMP_HSH(b_head->hash);
+ else
+ instr.jt = _BPF_JMP_HSH(state->def_hsh);
blk_cnt++;
} else if (state->arch->token == SCMP_ARCH_X32) {
/* filter out x86_64 */