From 61fd28b0d8f3c7f3e6eea4091f9551125b4ae8e1 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Wed, 27 Mar 2013 11:42:30 -0400 Subject: arch: ensure that we handle x32 correctly when checking the architecture Since x86_64 and x32 share the same seccomp filter architecture token in the kernel we need to do an extra step and verify the syscall number when checking the architecture. Signed-off-by: Paul Moore --- src/arch-x32-syscalls.c | 6 +-- src/arch-x32.h | 2 + src/gen_bpf.c | 134 ++++++++++++++++++++++++++++++++++-------------- 3 files changed, 100 insertions(+), 42 deletions(-) diff --git a/src/arch-x32-syscalls.c b/src/arch-x32-syscalls.c index 79cece8..00c5bc2 100644 --- a/src/arch-x32-syscalls.c +++ b/src/arch-x32-syscalls.c @@ -27,8 +27,6 @@ #include "arch-x86_64.h" #include "arch-x32.h" -#define __SCMP_X32_SYSCALL_BIT 0x40000000 - /** * Resolve a syscall name to a number * @param name the syscall name @@ -44,7 +42,7 @@ int x32_syscall_resolve_name(const char *name) syscall = x86_64_syscall_resolve_name(name); if (syscall >= 0) - syscall |= __SCMP_X32_SYSCALL_BIT; + syscall |= X32_SYSCALL_BIT; return syscall; } @@ -63,7 +61,7 @@ const char *x32_syscall_resolve_num(int num) int syscall = num; if (syscall >= 0) - syscall &= (~__SCMP_X32_SYSCALL_BIT); + syscall &= (~X32_SYSCALL_BIT); return x86_64_syscall_resolve_num(syscall); } diff --git a/src/arch-x32.h b/src/arch-x32.h index 32c8c0d..891a6f1 100644 --- a/src/arch-x32.h +++ b/src/arch-x32.h @@ -27,6 +27,8 @@ #include "arch.h" #include "system.h" +#define X32_SYSCALL_BIT 0x40000000 + #define x32_arg_count_max 6 extern const struct arch_def arch_def_x32; diff --git a/src/gen_bpf.c b/src/gen_bpf.c index 8939a06..715560d 100644 --- a/src/gen_bpf.c +++ b/src/gen_bpf.c @@ -28,6 +28,7 @@ #include #include "arch.h" +#include "arch-x32.h" #include "gen_bpf.h" #include "db.h" #include "hash.h" @@ -163,7 +164,8 @@ struct bpf_state { static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, const struct db_sys_list *sys, const struct db_arg_chain_tree *chain, - const struct bpf_jump *nxt_jump); + const struct bpf_jump *nxt_jump, + struct acc_state *a_state); static struct bpf_blk *_hsh_remove(struct bpf_state *state, uint64_t h_val); static struct bpf_blk *_hsh_find(const struct bpf_state *state, uint64_t h_val); @@ -768,6 +770,7 @@ node_failure: * @param sys the syscall filter * @param blk the BPF instruction block * @param nxt_jump the jump to fallthrough to at the end of the level + * @param a_state the accumulator state * * Resolve the jump targets in a BPF instruction block generated by the * _gen_bpf_chain_lvl() function and adds the resulting block to the hash @@ -778,7 +781,8 @@ node_failure: static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, const struct db_sys_list *sys, struct bpf_blk *blk, - const struct bpf_jump *nxt_jump) + const struct bpf_jump *nxt_jump, + struct acc_state *a_state) { int rc; unsigned int iter; @@ -801,14 +805,15 @@ static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, case TGT_PTR_BLK: b_new = _gen_bpf_chain_lvl_res(state, sys, i_iter->jt.tgt.blk, - nxt_jump); + nxt_jump, a_state); if (b_new == NULL) return NULL; i_iter->jt = _BPF_JMP_HSH(b_new->hash); break; case TGT_PTR_DB: node = (struct db_arg_chain_tree *)i_iter->jt.tgt.db; - b_new = _gen_bpf_chain(state, sys, node, nxt_jump); + b_new = _gen_bpf_chain(state, sys, node, + nxt_jump, a_state); if (b_new == NULL) return NULL; i_iter->jt = _BPF_JMP_HSH(b_new->hash); @@ -826,14 +831,15 @@ static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, case TGT_PTR_BLK: b_new = _gen_bpf_chain_lvl_res(state, sys, i_iter->jf.tgt.blk, - nxt_jump); + nxt_jump, a_state); if (b_new == NULL) return NULL; i_iter->jf = _BPF_JMP_HSH(b_new->hash); break; case TGT_PTR_DB: node = (struct db_arg_chain_tree *)i_iter->jf.tgt.db; - b_new = _gen_bpf_chain(state, sys, node, nxt_jump); + b_new = _gen_bpf_chain(state, sys, node, + nxt_jump, a_state); if (b_new == NULL) return NULL; i_iter->jf = _BPF_JMP_HSH(b_new->hash); @@ -868,6 +874,7 @@ static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, * @param sys the syscall filter * @param chain the filter chain * @param nxt_jump the jump to fallthrough to at the end of the level + * @param a_state the accumulator state * * Generate the BPF instruction blocks for the given filter chain and return * a pointer to the first block on success; returns NULL on failure. @@ -876,13 +883,13 @@ static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, const struct db_sys_list *sys, const struct db_arg_chain_tree *chain, - const struct bpf_jump *nxt_jump) + const struct bpf_jump *nxt_jump, + struct acc_state *a_state) { struct bpf_blk *b_head = NULL, *b_tail = NULL; struct bpf_blk *b_prev, *b_next, *b_iter; struct bpf_instr *i_iter; const struct db_arg_chain_tree *c_iter; - struct acc_state a_state = { -1, ARG_MASK_MAX }; unsigned int iter; if (chain == NULL) { @@ -898,7 +905,7 @@ static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, /* build all of the blocks for this level */ do { - b_iter = _gen_bpf_node(state, c_iter, &a_state); + b_iter = _gen_bpf_node(state, c_iter, a_state); if (b_iter == NULL) goto chain_failure; if (b_head != NULL) { @@ -947,7 +954,8 @@ static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, b_iter = _gen_bpf_chain_lvl_res(state, sys, b_iter, (b_next == NULL ? nxt_jump : - &_BPF_JMP_BLK(b_next))); + &_BPF_JMP_BLK(b_next)), + a_state); if (b_iter == NULL) goto chain_failure; @@ -980,7 +988,7 @@ chain_failure: * @param state the BPF state * @param sys the syscall filter DB entry * @param nxt_hash the hash value of the next syscall filter DB entry - * @param first this is the first syscall for the architecture + * @param acc_reset accumulator reset flag * * Generate the BPF instruction blocks for the given syscall filter and return * a pointer to the first block on success; returns NULL on failure. It is @@ -991,27 +999,35 @@ chain_failure: */ static struct bpf_blk *_gen_bpf_syscall(struct bpf_state *state, const struct db_sys_list *sys, - uint64_t nxt_hash, int first) + uint64_t nxt_hash, + int acc_reset) { int rc; struct bpf_instr instr; struct bpf_blk *blk_c, *blk_s = NULL; struct bpf_jump def_jump = _BPF_JMP_HSH(state->def_hsh); + struct acc_state a_state; - /* generate the argument chains */ - blk_c = _gen_bpf_chain(state, sys, sys->chains, &def_jump); - if (blk_c == NULL) - return NULL; - - /* load the syscall into the accumulator (if needed) */ - if (first) { + /* setup the accumulator state */ + if (acc_reset) { _BPF_INSTR(instr, BPF_LD+BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO, _BPF_SYSCALL); blk_s = _blk_append(state, NULL, &instr); if (blk_s == NULL) return NULL; + a_state.offset = _BPF_OFFSET_SYSCALL; + a_state.mask = ARG_MASK_MAX; + } else { + /* set the accumulator state to an unknown value */ + a_state.offset = -1; + a_state.mask = ARG_MASK_MAX; } + /* generate the argument chains */ + blk_c = _gen_bpf_chain(state, sys, sys->chains, &def_jump, &a_state); + if (blk_c == NULL) + return NULL; + /* syscall check */ _BPF_INSTR(instr, BPF_JMP+BPF_JEQ, _BPF_JMP_HSH(blk_c->hash), _BPF_JMP_HSH(nxt_hash), @@ -1049,6 +1065,7 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state, { int rc; unsigned int blk_cnt = 0; + unsigned int acc_reset; struct bpf_instr instr; struct db_sys_list *s_head = NULL, *s_tail = NULL, *s_iter, *s_iter_b; struct bpf_blk *b_head = NULL, *b_tail = NULL, *b_iter, *b_new; @@ -1119,6 +1136,12 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state, } } + if ((db->arch->token == SCMP_ARCH_X86_64 || + db->arch->token == SCMP_ARCH_X32) && (db_secondary == NULL)) + acc_reset = 0; + else + acc_reset = 1; + /* create the syscall filters and add them to block list group */ for (s_iter = s_tail; s_iter != NULL; s_iter = s_iter->pri_prv) { if (s_iter->valid == 0) @@ -1128,7 +1151,7 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state, b_new = _gen_bpf_syscall(state, s_iter, (b_head == NULL ? state->def_hsh : b_head->hash), - (s_iter == s_head ? 1 : 0)); + (s_iter == s_head ? acc_reset : 0)); if (b_new == NULL) goto arch_failure; @@ -1148,27 +1171,62 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state, blk_cnt++; } - /* do the architecture check and load the syscall number */ - if (b_head != NULL) { - /* arch check */ - _BPF_INSTR(instr, BPF_JMP+BPF_JEQ, - _BPF_JMP_HSH(b_head->hash), - _BPF_JMP_NXT(blk_cnt), - _BPF_K(db->arch->token_bpf)); - b_head->prev = _blk_append(state, NULL, &instr); - if (b_head->prev == NULL) + /* additional ABI filtering */ + if ((db->arch->token == SCMP_ARCH_X86_64 || + db->arch->token == SCMP_ARCH_X32) && (db_secondary == NULL)) { + _BPF_INSTR(instr, BPF_LD+BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO, + _BPF_SYSCALL); + b_new = _blk_append(state, NULL, &instr); + if (b_new == NULL) goto arch_failure; - b_head->prev->next = b_head; - b_head = b_head->prev; - } else { - /* arch check */ - _BPF_INSTR(instr, BPF_JMP+BPF_JEQ, - _BPF_JMP_HSH(state->def_hsh), _BPF_JMP_NXT(0), - _BPF_K(db->arch->token_bpf)); - b_head = _blk_append(state, NULL, &instr); - if (b_head == NULL) + if (db->arch->token == SCMP_ARCH_X86_64) { + /* filter out x32 */ + _BPF_INSTR(instr, BPF_JMP+BPF_JGE, + _BPF_JMP_NXT(blk_cnt++), _BPF_JMP_NO, + _BPF_K(X32_SYSCALL_BIT)); + if (b_head != NULL) + instr.jf = _BPF_JMP_HSH(b_head->hash); + else + instr.jf = _BPF_JMP_HSH(state->def_hsh); + } else if (db->arch->token == SCMP_ARCH_X32) { + /* filter out x86_64 */ + _BPF_INSTR(instr, BPF_JMP+BPF_JGE, + _BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++), + _BPF_K(X32_SYSCALL_BIT)); + if (b_head != NULL) + instr.jt = _BPF_JMP_HSH(b_head->hash); + else + instr.jt = _BPF_JMP_HSH(state->def_hsh); + } else + /* we should never get here */ + goto arch_failure; + b_new = _blk_append(state, b_new, &instr); + if (b_new == NULL) + goto arch_failure; + b_new->next = b_head; + if (b_head != NULL) + b_head->prev = b_new; + b_head = b_new; + rc = _hsh_add(state, &b_head, 1); + if (rc < 0) goto arch_failure; } + + /* do the ABI/architecture check */ + _BPF_INSTR(instr, BPF_JMP+BPF_JEQ, + _BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++), + _BPF_K(db->arch->token_bpf)); + if (b_head != NULL) + instr.jt = _BPF_JMP_HSH(b_head->hash); + else + instr.jt = _BPF_JMP_HSH(state->def_hsh); + b_new = _blk_append(state, NULL, &instr); + if (b_new == NULL) + goto arch_failure; + b_new->next = b_head; + if (b_head != NULL) + b_head->prev = b_new; + b_head = b_new; rc = _hsh_add(state, &b_head, 1); if (rc < 0) goto arch_failure; -- cgit v1.2.1