summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Moore <pmoore@redhat.com>2013-10-30 15:25:42 -0400
committerPaul Moore <pmoore@redhat.com>2013-10-30 15:25:42 -0400
commitb1c399e8112ac74af1d13ba67d836021ae6b4f3b (patch)
tree86064a7db4c50b19fce58f721db0f243a674f96d
parenta363a8dfd73821a0086fe985daf8c8e7c81cd8e0 (diff)
parentbbac038b75ec9df641e774b470d56d9e0a587850 (diff)
downloadlibseccomp-b1c399e8112ac74af1d13ba67d836021ae6b4f3b.tar.gz
Merge branch 'master' into release-2.1
-rw-r--r--Makefile18
-rw-r--r--include/seccomp.h.in2
-rw-r--r--macros.mk7
-rw-r--r--src/Makefile4
-rw-r--r--src/api.c71
-rw-r--r--src/arch-arm-syscalls.c3
-rw-r--r--src/arch-x86-syscalls.c3
-rw-r--r--src/arch-x86_64-syscalls.c3
-rw-r--r--src/db.c369
-rw-r--r--src/db.h15
-rw-r--r--src/gen_bpf.c76
-rw-r--r--src/gen_pfc.c28
-rw-r--r--src/hash.c1360
-rw-r--r--src/python/seccomp.pyx36
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/01-sim-allow.tests5
-rw-r--r--tests/02-sim-basic.tests5
-rw-r--r--tests/03-sim-basic_chains.tests5
-rw-r--r--tests/04-sim-multilevel_chains.tests5
-rw-r--r--tests/05-sim-long_jumps.tests5
-rw-r--r--tests/06-sim-actions.tests5
-rwxr-xr-xtests/07-sim-db_bug_looping.py4
-rw-r--r--tests/07-sim-db_bug_looping.tests5
-rw-r--r--tests/08-sim-subtree_checks.tests6
-rw-r--r--tests/09-sim-syscall_priority_pre.tests5
-rw-r--r--tests/10-sim-syscall_priority_post.tests5
-rw-r--r--tests/11-basic-basic_errors.c10
-rw-r--r--tests/12-sim-basic_masked_ops.tests5
-rw-r--r--tests/13-basic-attrs.c2
-rw-r--r--tests/14-sim-reset.tests5
-rwxr-xr-xtests/16-sim-arch_basic.py6
-rw-r--r--tests/16-sim-arch_basic.tests5
-rwxr-xr-xtests/17-sim-arch_merge.py6
-rw-r--r--tests/17-sim-arch_merge.tests5
-rw-r--r--tests/18-sim-basic_whitelist.tests5
-rw-r--r--tests/19-sim-missing_syscalls.tests5
-rw-r--r--tests/22-sim-basic_chains_array.tests5
-rw-r--r--tests/23-sim-arch_all_basic.tests5
-rw-r--r--tests/24-live-arg_allow.c2
-rw-r--r--tests/25-sim-multilevel_chains_adv.tests25
-rw-r--r--tests/Makefile7
-rwxr-xr-xtests/regression141
-rwxr-xr-xtests/testdiff126
-rwxr-xr-xtests/testgen206
-rw-r--r--tests/util.c2
-rwxr-xr-xtools/check-syntax116
-rw-r--r--tools/scmp_arch_detect.c2
-rw-r--r--tools/scmp_bpf_disasm.c370
-rw-r--r--tools/scmp_bpf_sim.c7
-rw-r--r--tools/scmp_sys_resolver.c2
50 files changed, 1877 insertions, 1244 deletions
diff --git a/Makefile b/Makefile
index 4fd50c6..1f61f20 100644
--- a/Makefile
+++ b/Makefile
@@ -41,7 +41,8 @@ CONFIGS = configure.mk configure.h version_info.mk libseccomp.pc
SUBDIRS_BUILD = include src tests tools
SUBDIRS_INSTALL = include src tools doc
-.PHONY: tarball install ctags cstags clean dist-clean $(SUBDIRS_BUILD)
+.PHONY: tarball install check check-syntax ctags cstags clean dist-clean \
+ $(SUBDIRS_BUILD)
all: $(SUBDIRS_BUILD)
@@ -49,7 +50,7 @@ $(CONFIGS): version_info
@$(ECHO_INFO) "automatically generating configuration ..."
@./configure
-tarball: clean
+tarball: dist-clean
@ver=$(VERSION_RELEASE); \
tarball=libseccomp-$$ver.tar.gz; \
$(ECHO_INFO) "creating the tarball ../$$tarball"; \
@@ -78,15 +79,15 @@ include: $(VERSION_HDR) $(CONFIGS)
@$(ECHO_INFO) "building in directory $@/ ..."
@$(MAKE) -C $@
-src: $(VERSION_HDR) $(CONFIGS)
+src: $(VERSION_HDR) $(CONFIGS) include
@$(ECHO_INFO) "building in directory $@/ ..."
@$(MAKE) -C $@
-tests: src
+tests: src include
@$(ECHO_INFO) "building in directory $@/ ..."
@$(MAKE) -C $@
-tools: src
+tools: src include
@$(ECHO_INFO) "building in directory $@/ ..."
@$(MAKE) -C $@
@@ -98,6 +99,13 @@ install: $(SUBDIRS_BUILD)
$(MAKE) -C $$dir install; \
done
+check: tools tests
+ @$(ECHO_INFO) "checking in directory tests/ ..."
+ @$(MAKE) -C tests check
+
+check-syntax:
+ @./tools/check-syntax
+
ctags:
@$(ECHO_INFO) "generating ctags for the project ..."
@ctags -R *
diff --git a/include/seccomp.h.in b/include/seccomp.h.in
index 7897cbf..e150fbd 100644
--- a/include/seccomp.h.in
+++ b/include/seccomp.h.in
@@ -46,7 +46,7 @@ extern "C" {
/**
* Filter context/handle
*/
-typedef void * scmp_filter_ctx;
+typedef void *scmp_filter_ctx;
/**
* Filter attributes
diff --git a/macros.mk b/macros.mk
index 9c62fa7..6df5b3a 100644
--- a/macros.mk
+++ b/macros.mk
@@ -45,8 +45,9 @@ V ?= 0
CPPFLAGS += -I$(TOPDIR) -I$(TOPDIR)/include
LIBFLAGS =
-CFLAGS ?= -Wl,-z,relro -Wall -O0 -g
+CFLAGS ?= -Wl,-z,relro -Wall -O0 -g -fvisibility=hidden
CFLAGS += -fPIC
+PYCFLAGS ?= -fvisibility=default
LDFLAGS ?= -z relro -g
#
@@ -107,7 +108,7 @@ VERSION_HDR = version.h
PY_DISTUTILS = \
VERSION_RELEASE="$(VERSION_RELEASE)" \
- CFLAGS="$(CFLAGS) $(CPPFLAGS)" LDFLAGS="$(LDFLAGS)" \
+ CFLAGS="$(CFLAGS) $(CPPFLAGS) $(PYCFLAGS)" LDFLAGS="$(LDFLAGS)" \
$(PYTHON) ./setup.py
ifeq ($(V),0)
@@ -181,7 +182,7 @@ INSTALL_BIN_MACRO += \
$(INSTALL) -o $(INSTALL_OWNER) -g $(INSTALL_GROUP) \
-d "$(INSTALL_BIN_DIR)"; \
$(INSTALL) -o $(INSTALL_OWNER) -g $(INSTALL_GROUP) -m 0755 \
- "$^" "$(INSTALL_BIN_DIR)";
+ $^ "$(INSTALL_BIN_DIR)";
ifeq ($(V),0)
INSTALL_PC_MACRO = \
diff --git a/src/Makefile b/src/Makefile
index ae50f86..7b980ab 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -85,8 +85,8 @@ install: $(LIB_SHARED)
done
clean:
- $(RM) $(DEPS) $(OBJS) $(LIB_STATIC) $(LIB_SHARED)
- @for dir in $(BINDINGS); do \
+ $(RM) $(DEPS) $(OBJS) $(LIB_STATIC) libseccomp.so.*
+ @for dir in python; do \
$(MAKE) -C $$dir clean; \
done
diff --git a/src/api.c b/src/api.c
index 63d31c8..86e5f9d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -37,6 +37,8 @@
#include "gen_bpf.h"
#include "system.h"
+#define API __attribute__((visibility("default")))
+
/**
* Validate a filter context
* @param ctx the filter context
@@ -66,7 +68,7 @@ static int _syscall_valid(int syscall)
}
/* NOTE - function header comment in include/seccomp.h */
-scmp_filter_ctx seccomp_init(uint32_t def_action)
+API scmp_filter_ctx seccomp_init(uint32_t def_action)
{
struct db_filter_col *col;
struct db_filter *db;
@@ -94,7 +96,7 @@ init_failure_col:
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_reset(scmp_filter_ctx ctx, uint32_t def_action)
+API int seccomp_reset(scmp_filter_ctx ctx, uint32_t def_action)
{
int rc;
struct db_filter_col *col = (struct db_filter_col *)ctx;
@@ -116,7 +118,7 @@ int seccomp_reset(scmp_filter_ctx ctx, uint32_t def_action)
}
/* NOTE - function header comment in include/seccomp.h */
-void seccomp_release(scmp_filter_ctx ctx)
+API void seccomp_release(scmp_filter_ctx ctx)
{
if (_ctx_valid(ctx))
return;
@@ -125,7 +127,8 @@ void seccomp_release(scmp_filter_ctx ctx)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_merge(scmp_filter_ctx ctx_dst, scmp_filter_ctx ctx_src)
+API int seccomp_merge(scmp_filter_ctx ctx_dst,
+ scmp_filter_ctx ctx_src)
{
struct db_filter_col *col_dst = (struct db_filter_col *)ctx_dst;
struct db_filter_col *col_src = (struct db_filter_col *)ctx_src;
@@ -142,13 +145,14 @@ int seccomp_merge(scmp_filter_ctx ctx_dst, scmp_filter_ctx ctx_src)
}
/* NOTE - function header comment in include/seccomp.h */
-uint32_t seccomp_arch_native(void)
+API uint32_t seccomp_arch_native(void)
{
return arch_def_native->token;
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_arch_exist(const scmp_filter_ctx ctx, uint32_t arch_token)
+API int seccomp_arch_exist(const scmp_filter_ctx ctx,
+ uint32_t arch_token)
{
struct db_filter_col *col = (struct db_filter_col *)ctx;
@@ -162,7 +166,7 @@ int seccomp_arch_exist(const scmp_filter_ctx ctx, uint32_t arch_token)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_arch_add(scmp_filter_ctx ctx, uint32_t arch_token)
+API int seccomp_arch_add(scmp_filter_ctx ctx, uint32_t arch_token)
{
int rc;
const struct arch_def *arch;
@@ -191,7 +195,7 @@ int seccomp_arch_add(scmp_filter_ctx ctx, uint32_t arch_token)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_arch_remove(scmp_filter_ctx ctx, uint32_t arch_token)
+API int seccomp_arch_remove(scmp_filter_ctx ctx, uint32_t arch_token)
{
struct db_filter_col *col = (struct db_filter_col *)ctx;
@@ -207,7 +211,7 @@ int seccomp_arch_remove(scmp_filter_ctx ctx, uint32_t arch_token)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_load(const scmp_filter_ctx ctx)
+API int seccomp_load(const scmp_filter_ctx ctx)
{
int rc;
struct db_filter_col *col;
@@ -236,8 +240,8 @@ int seccomp_load(const scmp_filter_ctx ctx)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_attr_get(const scmp_filter_ctx ctx,
- enum scmp_filter_attr attr, uint32_t *value)
+API int seccomp_attr_get(const scmp_filter_ctx ctx,
+ enum scmp_filter_attr attr, uint32_t *value)
{
if (_ctx_valid(ctx))
return -EINVAL;
@@ -246,8 +250,8 @@ int seccomp_attr_get(const scmp_filter_ctx ctx,
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_attr_set(scmp_filter_ctx ctx,
- enum scmp_filter_attr attr, uint32_t value)
+API int seccomp_attr_set(scmp_filter_ctx ctx,
+ enum scmp_filter_attr attr, uint32_t value)
{
if (_ctx_valid(ctx))
return -EINVAL;
@@ -256,7 +260,7 @@ int seccomp_attr_set(scmp_filter_ctx ctx,
}
/* NOTE - function header comment in include/seccomp.h */
-char *seccomp_syscall_resolve_num_arch(uint32_t arch_token, int num)
+API char *seccomp_syscall_resolve_num_arch(uint32_t arch_token, int num)
{
const struct arch_def *arch;
const char *name;
@@ -277,7 +281,7 @@ char *seccomp_syscall_resolve_num_arch(uint32_t arch_token, int num)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_syscall_resolve_name_arch(uint32_t arch_token, const char *name)
+API int seccomp_syscall_resolve_name_arch(uint32_t arch_token, const char *name)
{
const struct arch_def *arch;
@@ -296,13 +300,14 @@ int seccomp_syscall_resolve_name_arch(uint32_t arch_token, const char *name)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_syscall_resolve_name(const char *name)
+API int seccomp_syscall_resolve_name(const char *name)
{
return seccomp_syscall_resolve_name_arch(SCMP_ARCH_NATIVE, name);
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_syscall_priority(scmp_filter_ctx ctx, int syscall, uint8_t priority)
+API int seccomp_syscall_priority(scmp_filter_ctx ctx,
+ int syscall, uint8_t priority)
{
int rc = 0, rc_tmp;
unsigned int iter;
@@ -484,9 +489,10 @@ rule_add_return:
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_rule_add_array(scmp_filter_ctx ctx,
- uint32_t action, int syscall, unsigned int arg_cnt,
- const struct scmp_arg_cmp *arg_array)
+API int seccomp_rule_add_array(scmp_filter_ctx ctx,
+ uint32_t action, int syscall,
+ unsigned int arg_cnt,
+ const struct scmp_arg_cmp *arg_array)
{
if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
return -EINVAL;
@@ -495,10 +501,10 @@ int seccomp_rule_add_array(scmp_filter_ctx ctx,
0, action, syscall, arg_cnt, arg_array);
}
-
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_rule_add(scmp_filter_ctx ctx,
- uint32_t action, int syscall, unsigned int arg_cnt, ...)
+API int seccomp_rule_add(scmp_filter_ctx ctx,
+ uint32_t action, int syscall,
+ unsigned int arg_cnt, ...)
{
int rc;
int iter;
@@ -517,12 +523,11 @@ int seccomp_rule_add(scmp_filter_ctx ctx,
return rc;
}
-
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_rule_add_exact_array(scmp_filter_ctx ctx,
- uint32_t action, int syscall,
- unsigned int arg_cnt,
- const struct scmp_arg_cmp *arg_array)
+API int seccomp_rule_add_exact_array(scmp_filter_ctx ctx,
+ uint32_t action, int syscall,
+ unsigned int arg_cnt,
+ const struct scmp_arg_cmp *arg_array)
{
if (arg_cnt < 0 || arg_cnt > ARG_COUNT_MAX)
return -EINVAL;
@@ -531,10 +536,10 @@ int seccomp_rule_add_exact_array(scmp_filter_ctx ctx,
1, action, syscall, arg_cnt, arg_array);
}
-
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_rule_add_exact(scmp_filter_ctx ctx, uint32_t action,
- int syscall, unsigned int arg_cnt, ...)
+API int seccomp_rule_add_exact(scmp_filter_ctx ctx,
+ uint32_t action, int syscall,
+ unsigned int arg_cnt, ...)
{
int rc;
int iter;
@@ -555,7 +560,7 @@ int seccomp_rule_add_exact(scmp_filter_ctx ctx, uint32_t action,
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_export_pfc(const scmp_filter_ctx ctx, int fd)
+API int seccomp_export_pfc(const scmp_filter_ctx ctx, int fd)
{
if (_ctx_valid(ctx))
return -EINVAL;
@@ -564,7 +569,7 @@ int seccomp_export_pfc(const scmp_filter_ctx ctx, int fd)
}
/* NOTE - function header comment in include/seccomp.h */
-int seccomp_export_bpf(const scmp_filter_ctx ctx, int fd)
+API int seccomp_export_bpf(const scmp_filter_ctx ctx, int fd)
{
int rc;
struct bpf_program *program;
diff --git a/src/arch-arm-syscalls.c b/src/arch-arm-syscalls.c
index 8083486..66db33b 100644
--- a/src/arch-arm-syscalls.c
+++ b/src/arch-arm-syscalls.c
@@ -37,8 +37,7 @@
#endif
/* NOTE: based on Linux 3.8.0-rc5 */
-const struct arch_syscall_def arm_syscall_table[] = \
-{
+const struct arch_syscall_def arm_syscall_table[] = { \
/* NOTE: arm_sync_file_range() and sync_file_range2() share values */
{ "accept", (__NR_SYSCALL_BASE + 285) },
{ "accept4", (__NR_SYSCALL_BASE + 366) },
diff --git a/src/arch-x86-syscalls.c b/src/arch-x86-syscalls.c
index c44eb06..79f1b91 100644
--- a/src/arch-x86-syscalls.c
+++ b/src/arch-x86-syscalls.c
@@ -27,8 +27,7 @@
#include "arch-x86.h"
/* NOTE: based on Linux 3.4.7 */
-static const struct arch_syscall_def x86_syscall_table[] = \
-{
+static const struct arch_syscall_def x86_syscall_table[] = { \
{ "accept", __PNR_accept },
{ "accept4", __PNR_accept4 },
{ "access", 33 },
diff --git a/src/arch-x86_64-syscalls.c b/src/arch-x86_64-syscalls.c
index 2f5253a..af095b8 100644
--- a/src/arch-x86_64-syscalls.c
+++ b/src/arch-x86_64-syscalls.c
@@ -27,8 +27,7 @@
#include "arch-x86_64.h"
/* NOTE: based on Linux 3.4.7 */
-const struct arch_syscall_def x86_64_syscall_table[] = \
-{
+const struct arch_syscall_def x86_64_syscall_table[] = { \
{ "accept", 43 },
{ "accept4", 288 },
{ "access", 21 },
diff --git a/src/db.c b/src/db.c
index 29d94c8..345a654 100644
--- a/src/db.c
+++ b/src/db.c
@@ -42,6 +42,13 @@
#define _DB_PRI_MASK_USER 0x00FF0000
#define _DB_PRI_USER(x) (((x) << 16) & _DB_PRI_MASK_USER)
+/* private structure for tracking the state of the sub-tree "pruning" */
+struct db_prune_state {
+ bool prefix_exist;
+ bool prefix_new;
+ bool matched;
+};
+
static unsigned int _db_tree_free(struct db_arg_chain_tree *tree);
/**
@@ -91,8 +98,7 @@ static unsigned int _db_tree_free(struct db_arg_chain_tree *tree)
* @param node the node to remove
*
* This function searches the tree looking for the node and removes it once
- * found. The function also removes any other nodes that are no longer needed
- * as a result of removing the given node. Returns the number of nodes freed.
+ * found. Returns the number of nodes freed.
*
*/
static unsigned int _db_tree_remove(struct db_arg_chain_tree **tree,
@@ -109,7 +115,7 @@ static unsigned int _db_tree_remove(struct db_arg_chain_tree **tree,
c_iter = c_iter->lvl_prv;
do {
- if (c_iter == node) {
+ if (c_iter == node || db_chain_zombie(c_iter)) {
/* remove from the tree */
if (c_iter == *tree) {
if (c_iter->lvl_prv != NULL)
@@ -181,71 +187,162 @@ static int _db_tree_act_check(struct db_arg_chain_tree *tree, uint32_t action)
}
/**
- * Checks for a sub-tree match in an existing tree and prunes the leaves
- * @param tree_head the head of the existing tree
- * @param tree_start the starting point into the existing tree
- * @param new_p pointer to the new tree
- * @param remove_flg removal flag, only valid on return if return >= 0
+ * Checks for a sub-tree match in an existing tree and prunes the tree
+ * @param prev the head of the existing tree or sub-tree
+ * @param existing the starting point into the existing tree
+ * @param new pointer to the new tree
+ * @param state pointer to the pruning state
*
- * This function searches the existing tree for an occurance of the new tree
- * and removes as much of it as possible. Returns the number of nodes removed
- * from the tree on success, and negative values on failure.
+ * This function searches the existing and new trees trying to prune each to
+ * eliminate redundancy. Returns the number of nodes removed from the tree on
+ * success, zero if no changes were made, and negative values if the new tree
+ * should be discarded.
*
*/
-static int _db_tree_sub_prune(struct db_arg_chain_tree **tree_head,
- struct db_arg_chain_tree *tree_start,
+static int _db_tree_sub_prune(struct db_arg_chain_tree **prev,
+ struct db_arg_chain_tree *existing,
struct db_arg_chain_tree *new,
- bool *remove_flg)
+ struct db_prune_state *state)
{
int rc = 0;
- struct db_arg_chain_tree *c_iter = tree_start;
-
- *remove_flg = false;
+ int rc_tmp;
+ struct db_arg_chain_tree *ec_iter;
+ struct db_arg_chain_tree *ec_iter_tmp;
+ struct db_arg_chain_tree *c_iter;
+ struct db_prune_state state_new;
- if (new == NULL || c_iter == NULL)
+ if (!state || !existing || !new)
return 0;
- while (c_iter->lvl_prv != NULL)
- c_iter = c_iter->lvl_prv;
+ ec_iter = existing;
+ c_iter = new;
do {
- if (db_chain_eq(c_iter, new)) {
- if (new->act_t_flg) {
- rc += _db_tree_remove(tree_head, c_iter->nxt_t);
- c_iter->act_t = new->act_t;
- c_iter->act_t_flg = true;
- } else if (new->nxt_t != NULL)
- rc += _db_tree_sub_prune(tree_head,
- c_iter->nxt_t,
- new->nxt_t,
- remove_flg);
- if (new->act_f_flg) {
- rc += _db_tree_remove(tree_head, c_iter->nxt_f);
- c_iter->act_f = new->act_f;
- c_iter->act_f_flg = true;
- } else if (new->nxt_f != NULL)
- rc += _db_tree_sub_prune(tree_head,
- c_iter->nxt_f,
- new->nxt_f,
- remove_flg);
- } else if (db_chain_lt(c_iter, new)) {
- if (c_iter->nxt_t != NULL)
- rc += _db_tree_sub_prune(tree_head,
- c_iter->nxt_t, new,
- remove_flg);
- if (c_iter->nxt_f != NULL)
- rc += _db_tree_sub_prune(tree_head,
- c_iter->nxt_f, new,
- remove_flg);
-
- } else if (db_chain_gt(c_iter, new))
- goto sub_prune_return;
+ if (db_chain_eq(ec_iter, c_iter)) {
+ /* equal */
+
+ if (db_chain_leaf(c_iter)) {
+ /* leaf */
+ if (db_chain_eq_result(ec_iter, c_iter)) {
+ /* identical results */
+ if (prev != NULL)
+ return _db_tree_remove(prev,
+ ec_iter);
+ else
+ return -1;
+ }
+ if (c_iter->act_t_flg && ec_iter->nxt_t) {
+ /* new is shorter (true) */
+ if (prev == NULL)
+ return -1;
+ rc += _db_tree_remove(&(ec_iter->nxt_t),
+ ec_iter->nxt_t);
+ ec_iter->act_t = c_iter->act_t;
+ ec_iter->act_t_flg = true;
+ }
+ if (c_iter->act_f_flg && ec_iter->nxt_f) {
+ /* new is shorter (false) */
+ if (prev == NULL)
+ return -1;
+ rc += _db_tree_remove(&(ec_iter->nxt_f),
+ ec_iter->nxt_f);
+ ec_iter->act_f = c_iter->act_f;
+ ec_iter->act_f_flg = true;
+ }
- c_iter = c_iter->lvl_nxt;
- } while (c_iter != NULL);
+ return rc;
+ }
+
+ if (c_iter->nxt_t && ec_iter->act_t_flg)
+ /* existing is shorter (true) */
+ return -1;
+ if (c_iter->nxt_f && ec_iter->act_f_flg)
+ /* existing is shorter (false) */
+ return -1;
+
+ if (c_iter->nxt_t) {
+ state_new = *state;
+ state_new.matched = true;
+ rc_tmp = _db_tree_sub_prune((prev ?
+ &ec_iter : NULL),
+ ec_iter->nxt_t,
+ c_iter->nxt_t,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ if (state->prefix_new && rc_tmp < 0)
+ return (rc > 0 ? rc : rc_tmp);
+ }
+ if (c_iter->nxt_f) {
+ state_new = *state;
+ state_new.matched = true;
+ rc_tmp = _db_tree_sub_prune((prev ?
+ &ec_iter : NULL),
+ ec_iter->nxt_f,
+ c_iter->nxt_f,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ if (state->prefix_new && rc_tmp < 0)
+ return (rc > 0 ? rc : rc_tmp);
+ }
+ } else if (db_chain_lt(ec_iter, c_iter)) {
+ /* less than */
+ if (state->matched || state->prefix_new)
+ goto next;
+ state_new = *state;
+ state_new.prefix_exist = true;
+
+ if (ec_iter->nxt_t) {
+ rc_tmp = _db_tree_sub_prune((prev ?
+ &ec_iter : NULL),
+ ec_iter->nxt_t,
+ c_iter,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ }
+ if (ec_iter->nxt_f) {
+ rc_tmp = _db_tree_sub_prune((prev ?
+ &ec_iter : NULL),
+ ec_iter->nxt_f,
+ c_iter,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ }
+ } else if (db_chain_gt(ec_iter, c_iter)) {
+ /* greater than */
+ if (state->matched || state->prefix_exist)
+ goto next;
+ state_new = *state;
+ state_new.prefix_new = true;
+
+ if (c_iter->nxt_t) {
+ rc_tmp = _db_tree_sub_prune(NULL,
+ ec_iter,
+ c_iter->nxt_t,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ if (rc_tmp < 0)
+ return (rc > 0 ? rc : rc_tmp);
+ }
+ if (c_iter->nxt_f) {
+ rc_tmp = _db_tree_sub_prune(NULL,
+ ec_iter,
+ c_iter->nxt_f,
+ &state_new);
+ rc += (rc_tmp > 0 ? rc_tmp : 0);
+ if (rc_tmp < 0)
+ return (rc > 0 ? rc : rc_tmp);
+ }
+ }
+
+next:
+ /* re-check current node and advance to the next node */
+ if (db_chain_zombie(ec_iter)) {
+ ec_iter_tmp = ec_iter->lvl_nxt;
+ rc += _db_tree_remove(prev, ec_iter);
+ ec_iter = ec_iter_tmp;
+ } else
+ ec_iter = ec_iter->lvl_nxt;
+ } while (ec_iter);
-sub_prune_return:
- if (rc > 0)
- *remove_flg = true;
return rc;
}
@@ -395,6 +492,7 @@ int db_col_merge(struct db_filter_col *col_dst, struct db_filter_col *col_src)
(col_dst->filter_cnt + col_src->filter_cnt));
if (dbs == NULL)
return -ENOMEM;
+ col_dst->filters = dbs;
/* transfer the architecture filters */
for (iter_a = col_dst->filter_cnt, iter_b = 0;
@@ -444,6 +542,8 @@ int db_col_arch_exist(struct db_filter_col *col, uint32_t arch_token)
int db_col_attr_get(const struct db_filter_col *col,
enum scmp_filter_attr attr, uint32_t *value)
{
+ int rc = 0;
+
switch (attr) {
case SCMP_FLTATR_ACT_DEFAULT:
*value = col->attr.act_default;
@@ -455,11 +555,11 @@ int db_col_attr_get(const struct db_filter_col *col,
*value = col->attr.nnp_enable;
break;
default:
- return -EEXIST;
+ rc = -EEXIST;
break;
}
- return 0;
+ return rc;
}
/**
@@ -475,6 +575,8 @@ int db_col_attr_get(const struct db_filter_col *col,
int db_col_attr_set(struct db_filter_col *col,
enum scmp_filter_attr attr, uint32_t value)
{
+ int rc = 0;
+
switch (attr) {
case SCMP_FLTATR_ACT_DEFAULT:
/* read only */
@@ -490,11 +592,11 @@ int db_col_attr_set(struct db_filter_col *col,
col->attr.nnp_enable = (value ? 1 : 0);
break;
default:
- return -EEXIST;
+ rc = -EEXIST;
break;
}
- return 0;
+ return rc;
}
/**
@@ -778,30 +880,30 @@ static struct db_sys_list *_db_rule_gen_64(const struct arch_def *arch,
c_iter_lo->arg_offset = arch_arg_offset_lo(arch,
c_iter_lo->arg);
switch (chain[iter].op) {
- case SCMP_CMP_GT:
- c_iter_hi->op = SCMP_CMP_GE;
- c_iter_lo->op = SCMP_CMP_GT;
- tf_flag = true;
- break;
- case SCMP_CMP_NE:
- c_iter_hi->op = SCMP_CMP_EQ;
- c_iter_lo->op = SCMP_CMP_EQ;
- tf_flag = false;
- break;
- case SCMP_CMP_LT:
- c_iter_hi->op = SCMP_CMP_GE;
- c_iter_lo->op = SCMP_CMP_GE;
- tf_flag = false;
- break;
- case SCMP_CMP_LE:
- c_iter_hi->op = SCMP_CMP_GE;
- c_iter_lo->op = SCMP_CMP_GT;
- tf_flag = false;
- break;
- default:
- c_iter_hi->op = chain[iter].op;
- c_iter_lo->op = chain[iter].op;
- tf_flag = true;
+ case SCMP_CMP_GT:
+ c_iter_hi->op = SCMP_CMP_GE;
+ c_iter_lo->op = SCMP_CMP_GT;
+ tf_flag = true;
+ break;
+ case SCMP_CMP_NE:
+ c_iter_hi->op = SCMP_CMP_EQ;
+ c_iter_lo->op = SCMP_CMP_EQ;
+ tf_flag = false;
+ break;
+ case SCMP_CMP_LT:
+ c_iter_hi->op = SCMP_CMP_GE;
+ c_iter_lo->op = SCMP_CMP_GE;
+ tf_flag = false;
+ break;
+ case SCMP_CMP_LE:
+ c_iter_hi->op = SCMP_CMP_GE;
+ c_iter_lo->op = SCMP_CMP_GT;
+ tf_flag = false;
+ break;
+ default:
+ c_iter_hi->op = chain[iter].op;
+ c_iter_lo->op = chain[iter].op;
+ tf_flag = true;
}
c_iter_hi->mask = D64_HI(chain[iter].mask);
c_iter_lo->mask = D64_LO(chain[iter].mask);
@@ -898,20 +1000,20 @@ static struct db_sys_list *_db_rule_gen_32(const struct arch_def *arch,
/* rewrite the op to reduce the op/datum combos */
switch (c_iter->op) {
- case SCMP_CMP_NE:
- c_iter->op = SCMP_CMP_EQ;
- tf_flag = false;
- break;
- case SCMP_CMP_LT:
- c_iter->op = SCMP_CMP_GE;
- tf_flag = false;
- break;
- case SCMP_CMP_LE:
- c_iter->op = SCMP_CMP_GT;
- tf_flag = false;
- break;
- default:
- tf_flag = true;
+ case SCMP_CMP_NE:
+ c_iter->op = SCMP_CMP_EQ;
+ tf_flag = false;
+ break;
+ case SCMP_CMP_LT:
+ c_iter->op = SCMP_CMP_GE;
+ tf_flag = false;
+ break;
+ case SCMP_CMP_LE:
+ c_iter->op = SCMP_CMP_GT;
+ tf_flag = false;
+ break;
+ default:
+ tf_flag = true;
}
/* fixup the mask/datum */
@@ -960,7 +1062,8 @@ int db_rule_add(struct db_filter *db, uint32_t action, unsigned int syscall,
int rc = -ENOMEM;
struct db_sys_list *s_new, *s_iter, *s_prev = NULL;
struct db_arg_chain_tree *c_iter = NULL, *c_prev = NULL;
- struct db_arg_chain_tree *ec_iter, *ec_iter_b;
+ struct db_arg_chain_tree *ec_iter;
+ struct db_prune_state state;
bool rm_flag = false;
unsigned int new_chain_cnt = 0;
unsigned int n_cnt;
@@ -1037,41 +1140,26 @@ add_reset:
s_iter->action = action;
goto add_free_ok;
}
+
+ /* check for sub-tree matches */
+ memset(&state, 0, sizeof(state));
+ rc = _db_tree_sub_prune(&(s_iter->chains), ec_iter, c_iter, &state);
+ if (rc > 0) {
+ rm_flag = true;
+ s_iter->node_cnt -= rc;
+ goto add_reset;
+ } else if (rc < 0)
+ goto add_free_ok;
+
/* syscall exists and has at least one existing chain - start at the
* top and walk the two chains */
do {
- /* check for sub-tree matches in the existing tree */
- rc = _db_tree_sub_prune(&(s_iter->chains), ec_iter, c_iter,
- &rm_flag);
- if (rc > 0) {
- s_iter->node_cnt -= rc;
- goto add_reset;
- } else if (rc < 0)
- goto add_free;
-
- /* check for sub-tree matches in the new tree */
- ec_iter_b = ec_iter;
- while (ec_iter_b->lvl_prv != NULL)
- ec_iter_b = ec_iter_b->lvl_prv;
- do {
- rc = _db_tree_sub_prune(&(s_new->chains),
- c_iter, ec_iter_b, &rm_flag);
- ec_iter_b = ec_iter_b->lvl_nxt;
- } while (rc == 0 && ec_iter_b != NULL);
- if (rc > 0) {
- s_new->node_cnt -= rc;
- if (s_new->node_cnt > 0)
- goto add_reset;
- rc = 0;
- goto add_free;
- } else if (rc < 0)
- goto add_free;
-
/* insert the new rule into the existing tree */
if (db_chain_eq(c_iter, ec_iter)) {
/* found a matching node on this chain level */
- if (db_chain_leaf(c_iter) && db_chain_leaf(ec_iter)) {
- /* both are leaf nodes */
+ if (db_chain_action(c_iter) &&
+ db_chain_action(ec_iter)) {
+ /* both are "action" nodes */
if (c_iter->act_t_flg && ec_iter->act_t_flg) {
if (ec_iter->act_t != action)
goto add_free_exist;
@@ -1089,12 +1177,12 @@ add_reset:
if (ec_iter->act_t_flg == ec_iter->act_f_flg &&
ec_iter->act_t == ec_iter->act_f) {
n_cnt = _db_tree_remove(
- &(s_iter->chains),
- ec_iter);
+ &(s_iter->chains),
+ ec_iter);
s_iter->node_cnt -= n_cnt;
+ goto add_free_ok;
}
- goto add_free_ok;
- } else if (db_chain_leaf(c_iter)) {
+ } else if (db_chain_action(c_iter)) {
/* new is shorter */
if (c_iter->act_t_flg) {
rc = _db_tree_act_check(ec_iter->nxt_t,
@@ -1116,8 +1204,8 @@ add_reset:
ec_iter->act_f = action;
}
s_iter->node_cnt -= n_cnt;
- goto add_free_ok;
- } else if (c_iter->nxt_t != NULL) {
+ }
+ if (c_iter->nxt_t != NULL) {
if (ec_iter->nxt_t != NULL) {
/* jump to the next level */
c_prev = c_iter;
@@ -1133,7 +1221,8 @@ add_reset:
/* add a new branch */
c_prev = c_iter;
ec_iter->nxt_t = c_iter->nxt_t;
- s_iter->node_cnt += (s_new->node_cnt-1);
+ s_iter->node_cnt +=
+ (s_new->node_cnt - 1);
goto add_free_match;
}
} else if (c_iter->nxt_f != NULL) {
@@ -1152,14 +1241,12 @@ add_reset:
/* add a new branch */
c_prev = c_iter;
ec_iter->nxt_f = c_iter->nxt_f;
- s_iter->node_cnt += (s_new->node_cnt-1);
+ s_iter->node_cnt +=
+ (s_new->node_cnt - 1);
goto add_free_match;
}
- } else {
- /* we should never be here! */
- rc = -EFAULT;
- goto add_free;
- }
+ } else
+ goto add_free_ok;
} else {
/* need to check other nodes on this level */
if (db_chain_lt(c_iter, ec_iter)) {
diff --git a/src/db.h b/src/db.h
index d686e03..c0472a5 100644
--- a/src/db.h
+++ b/src/db.h
@@ -70,18 +70,23 @@ struct db_arg_chain_tree {
#define ARG_MASK_MAX ((uint32_t)-1)
#define db_chain_lt(x,y) \
(((x)->arg < (y)->arg) || \
- (((x)->arg == (y)->arg) && (((x)->op < (y)->op) || \
- (((x)->mask & (y)->mask) == (y)->mask))))
+ (((x)->arg == (y)->arg) && \
+ (((x)->op < (y)->op) || (((x)->mask & (y)->mask) == (y)->mask))))
#define db_chain_eq(x,y) \
(((x)->arg == (y)->arg) && \
((x)->op == (y)->op) && ((x)->datum == (y)->datum) && \
((x)->mask == (y)->mask))
#define db_chain_gt(x,y) \
(((x)->arg > (y)->arg) || \
- (((x)->arg == (y)->arg) && (((x)->op > (y)->op) || \
- (((x)->mask & (y)->mask) != (y)->mask))))
-#define db_chain_leaf(x) \
+ (((x)->arg == (y)->arg) && \
+ (((x)->op > (y)->op) || (((x)->mask & (y)->mask) != (y)->mask))))
+#define db_chain_action(x) \
(((x)->act_t_flg) || ((x)->act_f_flg))
+#define db_chain_zombie(x) \
+ ((x)->nxt_t == NULL && !((x)->act_t_flg) && \
+ (x)->nxt_f == NULL && !((x)->act_f_flg))
+#define db_chain_leaf(x) \
+ ((x)->nxt_t == NULL && (x)->nxt_f == NULL)
#define db_chain_eq_result(x,y) \
((((x)->nxt_t != NULL && (y)->nxt_t != NULL) || \
((x)->nxt_t == NULL && (y)->nxt_t == NULL)) && \
diff --git a/src/gen_bpf.c b/src/gen_bpf.c
index f491217..8c96f87 100644
--- a/src/gen_bpf.c
+++ b/src/gen_bpf.c
@@ -701,8 +701,8 @@ static struct bpf_blk *_gen_bpf_node(struct bpf_state *state,
/* reload the accumulator */
a_state->offset = acc_offset;
a_state->mask = ARG_MASK_MAX;
- _BPF_INSTR(instr, BPF_LD+BPF_ABS,
- _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(acc_offset));
+ _BPF_INSTR(instr, BPF_LD + BPF_ABS,
+ _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(acc_offset));
blk = _blk_append(state, blk, &instr);
if (blk == NULL)
goto node_failure;
@@ -710,8 +710,8 @@ static struct bpf_blk *_gen_bpf_node(struct bpf_state *state,
if (acc_mask != a_state->mask) {
/* apply the bitmask */
a_state->mask = acc_mask;
- _BPF_INSTR(instr, BPF_ALU+BPF_AND,
- _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(acc_mask));
+ _BPF_INSTR(instr, BPF_ALU + BPF_AND,
+ _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(acc_mask));
blk = _blk_append(state, blk, &instr);
if (blk == NULL)
goto node_failure;
@@ -721,15 +721,15 @@ static struct bpf_blk *_gen_bpf_node(struct bpf_state *state,
switch (node->op) {
case SCMP_CMP_MASKED_EQ:
case SCMP_CMP_EQ:
- _BPF_INSTR(instr, BPF_JMP+BPF_JEQ,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JEQ,
_BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(node->datum));
- break;
+ break;
case SCMP_CMP_GT:
- _BPF_INSTR(instr, BPF_JMP+BPF_JGT,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JGT,
_BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(node->datum));
break;
case SCMP_CMP_GE:
- _BPF_INSTR(instr, BPF_JMP+BPF_JGE,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JGE,
_BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(node->datum));
break;
case SCMP_CMP_NE:
@@ -892,6 +892,7 @@ static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state,
struct bpf_instr *i_iter;
const struct db_arg_chain_tree *c_iter;
unsigned int iter;
+ struct bpf_jump nxt_jump_tmp;
if (chain == NULL) {
b_head = _gen_bpf_action(state, NULL, sys->action);
@@ -929,16 +930,20 @@ static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state,
if (i_iter->jt.type == TGT_NXT) {
if (i_iter->jt.tgt.nxt != 0)
goto chain_failure;
- i_iter->jt = (b_next == NULL ?
- *nxt_jump :
- _BPF_JMP_BLK(b_next));
+ if (b_next == NULL)
+ i_iter->jt = *nxt_jump;
+ else
+ i_iter->jt =
+ _BPF_JMP_BLK(b_next);
}
if (i_iter->jf.type == TGT_NXT) {
if (i_iter->jf.tgt.nxt != 0)
goto chain_failure;
- i_iter->jf = (b_next == NULL ?
- *nxt_jump :
- _BPF_JMP_BLK(b_next));
+ if (b_next == NULL)
+ i_iter->jf = *nxt_jump;
+ else
+ i_iter->jf =
+ _BPF_JMP_BLK(b_next);
}
}
b_iter = b_next;
@@ -946,16 +951,18 @@ static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state,
}
/* resolve all of the blocks */
+ memset(&nxt_jump_tmp, 0, sizeof(nxt_jump_tmp));
b_iter = b_tail;
do {
/* b_iter may change after resolving, so save the linkage */
b_prev = b_iter->lvl_prv;
b_next = b_iter->lvl_nxt;
+ nxt_jump_tmp = _BPF_JMP_BLK(b_next);
b_iter = _gen_bpf_chain_lvl_res(state, sys, b_iter,
(b_next == NULL ?
nxt_jump :
- &_BPF_JMP_BLK(b_next)));
+ &nxt_jump_tmp));
if (b_iter == NULL)
goto chain_failure;
@@ -1014,8 +1021,8 @@ static struct bpf_blk *_gen_bpf_syscall(struct bpf_state *state,
/* setup the accumulator state */
if (acc_reset) {
- _BPF_INSTR(instr, BPF_LD+BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
- _BPF_SYSCALL);
+ _BPF_INSTR(instr, BPF_LD + BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
+ _BPF_SYSCALL);
blk_s = _blk_append(state, NULL, &instr);
if (blk_s == NULL)
return NULL;
@@ -1033,7 +1040,7 @@ static struct bpf_blk *_gen_bpf_syscall(struct bpf_state *state,
return NULL;
/* syscall check */
- _BPF_INSTR(instr, BPF_JMP+BPF_JEQ,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JEQ,
_BPF_JMP_HSH(blk_c->hash), _BPF_JMP_HSH(nxt_hash),
_BPF_K(sys->num));
blk_s = _blk_append(state, blk_s, &instr);
@@ -1112,7 +1119,7 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
if (s_head != NULL) {
s_iter_b = s_head;
while ((s_iter_b->pri_nxt != NULL) &&
- (s_iter->priority <= s_iter_b->priority))
+ (s_iter->priority <= s_iter_b->priority))
s_iter_b = s_iter_b->pri_nxt;
if (s_iter->priority > s_iter_b->priority) {
@@ -1122,8 +1129,10 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
s_head->pri_prv = s_iter;
s_head = s_iter;
} else {
- s_iter->pri_prv->pri_nxt=s_iter;
- s_iter->pri_nxt->pri_prv=s_iter;
+ s_iter->pri_prv->pri_nxt =
+ s_iter;
+ s_iter->pri_nxt->pri_prv =
+ s_iter;
}
} else {
s_iter->pri_prv = s_tail;
@@ -1155,7 +1164,8 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
b_new = _gen_bpf_syscall(state, s_iter,
(b_head == NULL ?
state->def_hsh : b_head->hash),
- (s_iter == s_head ? acc_reset:false));
+ (s_iter == s_head ?
+ acc_reset : false));
if (b_new == NULL)
goto arch_failure;
@@ -1178,25 +1188,25 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
/* additional ABI filtering */
if ((db->arch->token == SCMP_ARCH_X86_64 ||
db->arch->token == SCMP_ARCH_X32) && (db_secondary == NULL)) {
- _BPF_INSTR(instr, BPF_LD+BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
+ _BPF_INSTR(instr, BPF_LD + BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
_BPF_SYSCALL);
b_new = _blk_append(state, NULL, &instr);
if (b_new == NULL)
goto arch_failure;
if (db->arch->token == SCMP_ARCH_X86_64) {
/* filter out x32 */
- _BPF_INSTR(instr, BPF_JMP+BPF_JGE,
- _BPF_JMP_NXT(blk_cnt++), _BPF_JMP_NO,
- _BPF_K(X32_SYSCALL_BIT));
+ _BPF_INSTR(instr, BPF_JMP + BPF_JGE,
+ _BPF_JMP_NXT(blk_cnt++), _BPF_JMP_NO,
+ _BPF_K(X32_SYSCALL_BIT));
if (b_head != NULL)
instr.jf = _BPF_JMP_HSH(b_head->hash);
else
instr.jf = _BPF_JMP_HSH(state->def_hsh);
} else if (db->arch->token == SCMP_ARCH_X32) {
/* filter out x86_64 */
- _BPF_INSTR(instr, BPF_JMP+BPF_JGE,
- _BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++),
- _BPF_K(X32_SYSCALL_BIT));
+ _BPF_INSTR(instr, BPF_JMP + BPF_JGE,
+ _BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++),
+ _BPF_K(X32_SYSCALL_BIT));
if (b_head != NULL)
instr.jt = _BPF_JMP_HSH(b_head->hash);
else
@@ -1217,7 +1227,7 @@ static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state,
}
/* do the ABI/architecture check */
- _BPF_INSTR(instr, BPF_JMP+BPF_JEQ,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JEQ,
_BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++),
_BPF_K(db->arch->token_bpf));
if (b_head != NULL)
@@ -1406,7 +1416,7 @@ static int _gen_bpf_build_jmp(struct bpf_state *state,
return -EFAULT;
/* we need to insert a long jump - create one */
- _BPF_INSTR(instr, BPF_JMP+BPF_JA,
+ _BPF_INSTR(instr, BPF_JMP + BPF_JA,
_BPF_JMP_NO, _BPF_JMP_NO, _BPF_JMP_HSH(tgt_hash));
b_new = _blk_append(state, NULL, &instr);
if (b_new == NULL)
@@ -1473,7 +1483,7 @@ static int _gen_bpf_build_bpf(struct bpf_state *state,
state->def_hsh = b_default->hash;
/* load the architecture token/number */
- _BPF_INSTR(instr, BPF_LD+BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
+ _BPF_INSTR(instr, BPF_LD + BPF_ABS, _BPF_JMP_NO, _BPF_JMP_NO,
_BPF_K(offsetof(struct seccomp_data, arch)));
b_head = _blk_append(state, NULL, &instr);
if (b_head == NULL)
@@ -1570,7 +1580,7 @@ static int _gen_bpf_build_bpf(struct bpf_state *state,
b_jmp->next = b_iter->next;
b_iter->next = b_jmp;
if (b_jmp->next)
- b_jmp->next->prev=b_jmp;
+ b_jmp->next->prev = b_jmp;
}
}
if (b_jmp != NULL) {
diff --git a/src/gen_pfc.c b/src/gen_pfc.c
index 75c96d6..954feab 100644
--- a/src/gen_pfc.c
+++ b/src/gen_pfc.c
@@ -151,20 +151,20 @@ static void _gen_pfc_chain(const struct arch_def *arch,
fprintf(fds, "if (");
_pfc_arg(fds, arch, c_iter);
switch (c_iter->op) {
- case SCMP_CMP_EQ:
- fprintf(fds, " == ");
- break;
- case SCMP_CMP_GE:
- fprintf(fds, " >= ");
- break;
- case SCMP_CMP_GT:
- fprintf(fds, " > ");
- break;
- case SCMP_CMP_MASKED_EQ:
- fprintf(fds, " & 0x%.8x == ", c_iter->mask);
- break;
- default:
- fprintf(fds, " ??? ");
+ case SCMP_CMP_EQ:
+ fprintf(fds, " == ");
+ break;
+ case SCMP_CMP_GE:
+ fprintf(fds, " >= ");
+ break;
+ case SCMP_CMP_GT:
+ fprintf(fds, " > ");
+ break;
+ case SCMP_CMP_MASKED_EQ:
+ fprintf(fds, " & 0x%.8x == ", c_iter->mask);
+ break;
+ default:
+ fprintf(fds, " ??? ");
}
fprintf(fds, "%u)\n", c_iter->datum);
diff --git a/src/hash.c b/src/hash.c
index 6c54e18..cb52b3b 100644
--- a/src/hash.c
+++ b/src/hash.c
@@ -1,756 +1,674 @@
-/*
--------------------------------------------------------------------------------
-lookup3.c, by Bob Jenkins, May 2006, Public Domain.
-
-These are functions for producing 32-bit hashes for hash table lookup.
-hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
-are externally useful functions. Routines to test the hash are included
-if SELF_TEST is defined. You can use this free for any purpose. It's in
-the public domain. It has no warranty.
-
-You probably want to use hashlittle(). hashlittle() and hashbig()
-hash byte arrays. hashlittle() is is faster than hashbig() on
-little-endian machines. Intel and AMD are little-endian machines.
-On second thought, you probably want hashlittle2(), which is identical to
-hashlittle() except it returns two 32-bit hashes for the price of one.
-You could implement hashbig2() if you wanted but I haven't bothered here.
-
-If you want to find a hash of, say, exactly 7 integers, do
- a = i1; b = i2; c = i3;
- mix(a,b,c);
- a += i4; b += i5; c += i6;
- mix(a,b,c);
- a += i7;
- final(a,b,c);
-then use c as the hash value. If you have a variable length array of
-4-byte integers to hash, use hashword(). If you have a byte array (like
-a character string), use hashlittle(). If you have several byte arrays, or
-a mix of things, see the comments above hashlittle().
-
-Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
-then mix those integers. This is fast (you can do a lot more thorough
-mixing with 12*3 instructions on 3 integers than you can with 3 instructions
-on 1 byte), but shoehorning those bytes into integers efficiently is messy.
--------------------------------------------------------------------------------
-*/
-
-#include <stdint.h> /* defines uint32_t etc */
-#include <sys/param.h> /* attempt to define endianness */
-#ifdef linux
-# include <endian.h> /* attempt to define endianness */
-#endif
-
-#include "hash.h"
-#define hashlittle jhash
-
-/*
- * My best guess at if you are big-endian or little-endian. This may
- * need adjustment.
+/**
+ * Seccomp Library hash code
+ *
+ * Release under the Public Domain
+ * Author: Bob Jenkins <bob_jenkins@burtleburtle.net>
*/
-#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
- __BYTE_ORDER == __LITTLE_ENDIAN) || \
- (defined(i386) || defined(__i386__) || defined(__i486__) || \
- defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
-# define HASH_LITTLE_ENDIAN 1
-# define HASH_BIG_ENDIAN 0
-#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
- __BYTE_ORDER == __BIG_ENDIAN) || \
- (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
-# define HASH_LITTLE_ENDIAN 0
-# define HASH_BIG_ENDIAN 1
-#else
-# define HASH_LITTLE_ENDIAN 0
-# define HASH_BIG_ENDIAN 0
-#endif
-
-#define hashsize(n) ((uint32_t)1<<(n))
-#define hashmask(n) (hashsize(n)-1)
-#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
/*
--------------------------------------------------------------------------------
-mix -- mix 3 32-bit values reversibly.
-
-This is reversible, so any information in (a,b,c) before mix() is
-still in (a,b,c) after mix().
-
-If four pairs of (a,b,c) inputs are run through mix(), or through
-mix() in reverse, there are at least 32 bits of the output that
-are sometimes the same for one pair and different for another pair.
-This was tested for:
-* pairs that differed by one bit, by two bits, in any combination
- of top bits of (a,b,c), or in any combination of bottom bits of
- (a,b,c).
-* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
- the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
- is commonly produced by subtraction) look like a single 1-bit
- difference.
-* the base values were pseudorandom, all zero but one bit set, or
- all zero plus a counter that starts at zero.
-
-Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
-satisfy this are
- 4 6 8 16 19 4
- 9 15 3 18 27 15
- 14 9 3 7 17 3
-Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
-for "differ" defined as + with a one-bit base and a two-bit delta. I
-used http://burtleburtle.net/bob/hash/avalanche.html to choose
-the operations, constants, and arrangements of the variables.
-
-This does not achieve avalanche. There are input bits of (a,b,c)
-that fail to affect some output bits of (a,b,c), especially of a. The
-most thoroughly mixed value is c, but it doesn't really even achieve
-avalanche in c.
-
-This allows some parallelism. Read-after-writes are good at doubling
-the number of bits affected, so the goal of mixing pulls in the opposite
-direction as the goal of parallelism. I did what I could. Rotates
-seem to cost as much as shifts on every machine I could lay my hands
-on, and rotates are much kinder to the top and bottom bits, so I used
-rotates.
--------------------------------------------------------------------------------
-*/
-#define mix(a,b,c) \
-{ \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c,16); c += b; \
- b -= a; b ^= rot(a,19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
-}
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * jhash_word(), jhash_le(), jhash_be(), mix(), and final() are externally useful
+ * functions. Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It's in the public domain. It has
+ * no warranty.
+ *
+ * You probably want to use jhash_le(). jhash_le() and jhash_be() hash byte
+ * arrays. jhash_le() is is faster than jhash_be() on little-endian machines.
+ * Intel and AMD are little-endian machines.
+ *
+ * If you want to find a hash of, say, exactly 7 integers, do
+ * a = i1; b = i2; c = i3;
+ * mix(a,b,c);
+ * a += i4; b += i5; c += i6;
+ * mix(a,b,c);
+ * a += i7;
+ * final(a,b,c);
+ *
+ * then use c as the hash value. If you have a variable length array of
+ * 4-byte integers to hash, use jhash_word(). If you have a byte array (like
+ * a character string), use jhash_le(). If you have several byte arrays, or
+ * a mix of things, see the comments above jhash_le().
+ *
+ * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
+ * mix those integers. This is fast (you can do a lot more thorough mixing
+ * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
+ * byte), but shoehorning those bytes into integers efficiently is messy.
+ */
-/*
--------------------------------------------------------------------------------
-final -- final mixing of 3 32-bit values (a,b,c) into c
-
-Pairs of (a,b,c) values differing in only a few bits will usually
-produce values of c that look totally different. This was tested for
-* pairs that differed by one bit, by two bits, in any combination
- of top bits of (a,b,c), or in any combination of bottom bits of
- (a,b,c).
-* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
- the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
- is commonly produced by subtraction) look like a single 1-bit
- difference.
-* the base values were pseudorandom, all zero but one bit set, or
- all zero plus a counter that starts at zero.
-
-These constants passed:
- 14 11 25 16 4 14 24
- 12 14 25 16 4 14 24
-and these came close:
- 4 8 15 26 3 22 24
- 10 8 15 26 3 22 24
- 11 8 15 26 3 22 24
--------------------------------------------------------------------------------
-*/
-#define final(a,b,c) \
-{ \
- c ^= b; c -= rot(b,14); \
- a ^= c; a -= rot(c,11); \
- b ^= a; b -= rot(a,25); \
- c ^= b; c -= rot(b,16); \
- a ^= c; a -= rot(c,4); \
- b ^= a; b -= rot(a,14); \
- c ^= b; c -= rot(b,24); \
-}
+#include <stdint.h>
-/*
---------------------------------------------------------------------
- This works on all machines. To be useful, it requires
- -- that the key be an array of uint32_t's, and
- -- that the length be the number of uint32_t's in the key
-
- The function hashword() is identical to hashlittle() on little-endian
- machines, and identical to hashbig() on big-endian machines,
- except that the length has to be measured in uint32_ts rather than in
- bytes. hashlittle() is more complicated than hashword() only because
- hashlittle() has to dance around fitting the key bytes into registers.
---------------------------------------------------------------------
-*/
-uint32_t hashword(
-const uint32_t *k, /* the key, an array of uint32_t values */
-size_t length, /* the length of the key, in uint32_ts */
-uint32_t initval) /* the previous hash, or an arbitrary value */
-{
- uint32_t a,b,c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
-
- /*------------------------------------------------- handle most of the key */
- while (length > 3)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 3;
- k += 3;
- }
-
- /*------------------------------------------- handle the last 3 uint32_t's */
- switch(length) /* all the case statements fall through */
- {
- case 3 : c+=k[2];
- case 2 : b+=k[1];
- case 1 : a+=k[0];
- final(a,b,c);
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*------------------------------------------------------ report the result */
- return c;
-}
+#include "arch.h"
+#include "hash.h"
-/*
---------------------------------------------------------------------
-hashword2() -- same as hashword(), but take two seeds and return two
-32-bit values. pc and pb must both be nonnull, and *pc and *pb must
-both be initialized with seeds. If you pass in (*pb)==0, the output
-(*pc) will be the same as the return value from hashword().
---------------------------------------------------------------------
-*/
-void hashword2 (
-const uint32_t *k, /* the key, an array of uint32_t values */
-size_t length, /* the length of the key, in uint32_ts */
-uint32_t *pc, /* IN: seed OUT: primary hash value */
-uint32_t *pb) /* IN: more seed OUT: secondary hash value */
-{
- uint32_t a,b,c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc;
- c += *pb;
-
- /*------------------------------------------------- handle most of the key */
- while (length > 3)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 3;
- k += 3;
- }
-
- /*------------------------------------------- handle the last 3 uint32_t's */
- switch(length) /* all the case statements fall through */
- {
- case 3 : c+=k[2];
- case 2 : b+=k[1];
- case 1 : a+=k[0];
- final(a,b,c);
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*------------------------------------------------------ report the result */
- *pc=c; *pb=b;
-}
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
-/*
--------------------------------------------------------------------------------
-hashlittle() -- hash a variable-length key into a 32-bit value
- k : the key (the unaligned variable-length array of bytes)
- length : the length of the key, counting by bytes
- initval : can be any 4-byte value
-Returns a 32-bit value. Every bit of the key affects every bit of
-the return value. Two keys differing by one or two bits will have
-totally different hash values.
-
-The best hash table sizes are powers of 2. There is no need to do
-mod a prime (mod is sooo slow!). If you need less than 32 bits,
-use a bitmask. For example, if you need only 10 bits, do
- h = (h & hashmask(10));
-In which case, the hash table should have hashsize(10) elements.
-
-If you are hashing n strings (uint8_t **)k, do it like this:
- for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
-
-By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
-code any way you wish, private, educational, or commercial. It's free.
-
-Use for hash table lookup, or anything where one collision in 2^^32 is
-acceptable. Do NOT use for cryptographic purposes.
--------------------------------------------------------------------------------
-*/
-uint32_t hashlittle( const void *key, size_t length, uint32_t initval)
+/**
+ * Mix 3 32-bit values reversibly
+ * @param a 32-bit value
+ * @param b 32-bit value
+ * @param c 32-bit value
+ *
+ * This is reversible, so any information in (a,b,c) before mix() is still
+ * in (a,b,c) after mix().
+ *
+ * If four pairs of (a,b,c) inputs are run through mix(), or through mix() in
+ * reverse, there are at least 32 bits of the output that are sometimes the
+ * same for one pair and different for another pair.
+ *
+ * This was tested for:
+ * - pairs that differed by one bit, by two bits, in any combination of top
+ * bits of (a,b,c), or in any combination of bottom bits of (a,b,c).
+ * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the
+ * output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly
+ * produced by subtraction) look like a single 1-bit difference.
+ * - the base values were pseudorandom, all zero but one bit set, or all zero
+ * plus a counter that starts at zero.
+ *
+ * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+ * satisfy this are
+ * 4 6 8 16 19 4
+ * 9 15 3 18 27 15
+ * 14 9 3 7 17 3
+ *
+ * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ"
+ * defined as + with a one-bit base and a two-bit delta. I used
+ * http://burtleburtle.net/bob/hash/avalanche.html to choose the operations,
+ * constants, and arrangements of the variables.
+ *
+ * This does not achieve avalanche. There are input bits of (a,b,c) that fail
+ * to affect some output bits of (a,b,c), especially of a. The most thoroughly
+ * mixed value is c, but it doesn't really even achieve avalanche in c.
+ *
+ * This allows some parallelism. Read-after-writes are good at doubling the
+ * number of bits affected, so the goal of mixing pulls in the opposite
+ * direction as the goal of parallelism. I did what I could. Rotates seem to
+ * cost as much as shifts on every machine I could lay my hands on, and rotates
+ * are much kinder to the top and bottom bits, so I used rotates.
+ *
+ */
+#define mix(a,b,c) \
+ { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+ }
+
+/**
+ * Final mixing of 3 32-bit values (a,b,c) into c
+ * @param a 32-bit value
+ * @param b 32-bit value
+ * @param c 32-bit value
+ *
+ * Pairs of (a,b,c) values differing in only a few bits will usually produce
+ * values of c that look totally different. This was tested for:
+ * - pairs that differed by one bit, by two bits, in any combination of top
+ * bits of (a,b,c), or in any combination of bottom bits of (a,b,c).
+ * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the
+ * output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly
+ * produced by subtraction) look like a single 1-bit difference.
+ * - the base values were pseudorandom, all zero but one bit set, or all zero
+ * plus a counter that starts at zero.
+ *
+ * These constants passed:
+ * 14 11 25 16 4 14 24
+ * 12 14 25 16 4 14 24
+ * and these came close:
+ * 4 8 15 26 3 22 24
+ * 10 8 15 26 3 22 24
+ * 11 8 15 26 3 22 24
+ *
+ */
+#define final(a,b,c) \
+ { \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+ }
+
+/**
+ * Hash an array of 32-bit values
+ * @param k the key, an array of uint32_t values
+ * @param length the number of array elements
+ * @param initval the previous hash, or an arbitrary value
+ *
+ * This works on all machines. To be useful, it requires:
+ * - that the key be an array of uint32_t's, and
+ * - that the length be the number of uint32_t's in the key
+ *
+ * The function jhash_word() is identical to jhash_le() on little-endian
+ * machines, and identical to jhash_be() on big-endian machines, except that
+ * the length has to be measured in uint32_ts rather than in bytes. jhash_le()
+ * is more complicated than jhash_word() only because jhash_le() has to dance
+ * around fitting the key bytes into registers.
+ *
+ */
+static uint32_t jhash_word(const uint32_t *k, size_t length, uint32_t initval)
{
- uint32_t a,b,c; /* internal state */
- union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
-
- u.ptr = key;
- if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
-
- /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 12;
- k += 3;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- /*
- * "k[2]&0xffffff" actually reads beyond the end of the string, but
- * then masks off the part it's not allowed to read. Because the
- * string is aligned, the masked-off tail is in the same word as the
- * rest of the string. Every machine with memory protection I've seen
- * does it on word boundaries, so is OK with this. But VALGRIND will
- * still catch it and complain. The masking trick does make the hash
- * noticably faster for short strings (like English words).
- */
-#ifndef VALGRIND
-
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
- case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
- case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
- case 6 : b+=k[1]&0xffff; a+=k[0]; break;
- case 5 : b+=k[1]&0xff; a+=k[0]; break;
- case 4 : a+=k[0]; break;
- case 3 : a+=k[0]&0xffffff; break;
- case 2 : a+=k[0]&0xffff; break;
- case 1 : a+=k[0]&0xff; break;
- case 0 : return c; /* zero length strings require no mixing */
- }
-
-#else /* make valgrind happy */
-
- k8 = (const uint8_t *)k;
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
- case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
- case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]; break;
- case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
- case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
- case 1 : a+=k8[0]; break;
- case 0 : return c;
- }
-
-#endif /* !valgrind */
-
- } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
- const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
- const uint8_t *k8;
-
- /*--------------- all but last block: aligned reads and different mixing */
- while (length > 12)
- {
- a += k[0] + (((uint32_t)k[1])<<16);
- b += k[2] + (((uint32_t)k[3])<<16);
- c += k[4] + (((uint32_t)k[5])<<16);
- mix(a,b,c);
- length -= 12;
- k += 6;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- k8 = (const uint8_t *)k;
- switch(length)
- {
- case 12: c+=k[4]+(((uint32_t)k[5])<<16);
- b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
- case 10: c+=k[4];
- b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
- case 6 : b+=k[2];
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
- case 2 : a+=k[0];
- break;
- case 1 : a+=k8[0];
- break;
- case 0 : return c; /* zero length requires no mixing */
- }
-
- } else { /* need to read the key one byte at a time */
- const uint8_t *k = (const uint8_t *)key;
-
- /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += k[0];
- a += ((uint32_t)k[1])<<8;
- a += ((uint32_t)k[2])<<16;
- a += ((uint32_t)k[3])<<24;
- b += k[4];
- b += ((uint32_t)k[5])<<8;
- b += ((uint32_t)k[6])<<16;
- b += ((uint32_t)k[7])<<24;
- c += k[8];
- c += ((uint32_t)k[9])<<8;
- c += ((uint32_t)k[10])<<16;
- c += ((uint32_t)k[11])<<24;
- mix(a,b,c);
- length -= 12;
- k += 12;
- }
-
- /*-------------------------------- last block: affect all 32 bits of (c) */
- switch(length) /* all the case statements fall through */
- {
- case 12: c+=((uint32_t)k[11])<<24;
- case 11: c+=((uint32_t)k[10])<<16;
- case 10: c+=((uint32_t)k[9])<<8;
- case 9 : c+=k[8];
- case 8 : b+=((uint32_t)k[7])<<24;
- case 7 : b+=((uint32_t)k[6])<<16;
- case 6 : b+=((uint32_t)k[5])<<8;
- case 5 : b+=k[4];
- case 4 : a+=((uint32_t)k[3])<<24;
- case 3 : a+=((uint32_t)k[2])<<16;
- case 2 : a+=((uint32_t)k[1])<<8;
- case 1 : a+=k[0];
- break;
- case 0 : return c;
- }
- }
-
- final(a,b,c);
- return c;
+ uint32_t a, b, c;
+
+ /* set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t)length) << 2) + initval;
+
+ /* handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /* handle the last 3 uint32_t's */
+ switch(length) {
+ case 3 :
+ c += k[2];
+ case 2 :
+ b += k[1];
+ case 1 :
+ a += k[0];
+ final(a, b, c);
+ case 0:
+ /* nothing left to add */
+ break;
+ }
+
+ return c;
}
-/*
- * hashlittle2: return 2 32-bit hash values
+/**
+ * Hash a variable-length key into a 32-bit value
+ * @param k the key (the unaligned variable-length array of bytes)
+ * @param length the length of the key, counting by bytes
+ * @param initval can be any 4-byte value
+ *
+ * Returns a 32-bit value. Every bit of the key affects every bit of the
+ * return value. Two keys differing by one or two bits will have totally
+ * different hash values.
+ *
+ * The best hash table sizes are powers of 2. There is no need to do mod a
+ * prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask.
+ * For example, if you need only 10 bits, do:
+ * h = (h & hashmask(10));
+ * In which case, the hash table should have hashsize(10) elements.
+ *
+ * If you are hashing n strings (uint8_t **)k, do it like this:
+ * for (i=0, h=0; i<n; ++i) h = jhash_le( k[i], len[i], h);
*
- * This is identical to hashlittle(), except it returns two 32-bit hash
- * values instead of just one. This is good enough for hash table
- * lookup with 2^^64 buckets, or if you want a second hash if you're not
- * happy with the first, or if you want a probably-unique 64-bit ID for
- * the key. *pc is better mixed than *pb, so use *pc first. If you want
- * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
*/
-void hashlittle2(
- const void *key, /* the key to hash */
- size_t length, /* length of the key */
- uint32_t *pc, /* IN: primary initval, OUT: primary hash */
- uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+static uint32_t jhash_le(const void *key, size_t length, uint32_t initval)
{
- uint32_t a,b,c; /* internal state */
- union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc;
- c += *pb;
-
- u.ptr = key;
- if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
-
- /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 12;
- k += 3;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- /*
- * "k[2]&0xffffff" actually reads beyond the end of the string, but
- * then masks off the part it's not allowed to read. Because the
- * string is aligned, the masked-off tail is in the same word as the
- * rest of the string. Every machine with memory protection I've seen
- * does it on word boundaries, so is OK with this. But VALGRIND will
- * still catch it and complain. The masking trick does make the hash
- * noticably faster for short strings (like English words).
- */
+ uint32_t a, b, c;
+ union {
+ const void *ptr;
+ size_t i;
+ } u; /* needed for Mac Powerbook G4 */
+
+ /* set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if ((arch_def_native->endian == ARCH_ENDIAN_LITTLE) &&
+ ((u.i & 0x3) == 0)) {
+ /* read 32-bit chunks */
+ const uint32_t *k = (const uint32_t *)key;
+
+ while (length > 12) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 12;
+ k += 3;
+ }
+
+ /* "k[2]&0xffffff" actually reads beyond the end of the string,
+ * but then masks off the part it's not allowed to read.
+ * Because the string is aligned, the masked-off tail is in the
+ * same word as the rest of the string. Every machine with
+ * memory protection I've seen does it on word boundaries, so
+ * is OK with this. But VALGRIND will still catch it and
+ * complain. The masking trick does make the hash noticably
+ * faster for short strings (like English words). */
#ifndef VALGRIND
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
- case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
- case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
- case 6 : b+=k[1]&0xffff; a+=k[0]; break;
- case 5 : b+=k[1]&0xff; a+=k[0]; break;
- case 4 : a+=k[0]; break;
- case 3 : a+=k[0]&0xffffff; break;
- case 2 : a+=k[0]&0xffff; break;
- case 1 : a+=k[0]&0xff; break;
- case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
- }
+ switch(length) {
+ case 12:
+ c += k[2];
+ b += k[1];
+ a += k[0];
+ break;
+ case 11:
+ c += k[2] & 0xffffff;
+ b += k[1];
+ a += k[0];
+ break;
+ case 10:
+ c += k[2] & 0xffff;
+ b += k[1];
+ a += k[0];
+ break;
+ case 9 :
+ c += k[2] & 0xff;
+ b += k[1];
+ a += k[0];
+ break;
+ case 8 :
+ b += k[1];
+ a += k[0];
+ break;
+ case 7 :
+ b += k[1] & 0xffffff;
+ a += k[0];
+ break;
+ case 6 :
+ b += k[1] & 0xffff;
+ a += k[0];
+ break;
+ case 5 :
+ b += k[1] & 0xff;
+ a += k[0];
+ break;
+ case 4 :
+ a += k[0];
+ break;
+ case 3 :
+ a += k[0] & 0xffffff;
+ break;
+ case 2 :
+ a += k[0] & 0xffff;
+ break;
+ case 1 :
+ a += k[0] & 0xff;
+ break;
+ case 0 :
+ /* zero length strings require no mixing */
+ return c;
+ }
#else /* make valgrind happy */
- k8 = (const uint8_t *)k;
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
- case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
- case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]; break;
- case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
- case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
- case 1 : a+=k8[0]; break;
- case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
- }
+ k8 = (const uint8_t *)k;
+ switch(length) {
+ case 12:
+ c += k[2];
+ b += k[1];
+ a += k[0];
+ break;
+ case 11:
+ c += ((uint32_t)k8[10]) << 16;
+ case 10:
+ c += ((uint32_t)k8[9]) << 8;
+ case 9 :
+ c += k8[8];
+ case 8 :
+ b += k[1];
+ a += k[0];
+ break;
+ case 7 :
+ b += ((uint32_t)k8[6]) << 16;
+ case 6 :
+ b += ((uint32_t)k8[5]) << 8;
+ case 5 :
+ b += k8[4];
+ case 4 :
+ a += k[0];
+ break;
+ case 3 :
+ a += ((uint32_t)k8[2]) << 16;
+ case 2 :
+ a += ((uint32_t)k8[1]) << 8;
+ case 1 :
+ a += k8[0];
+ break;
+ case 0 :
+ return c;
+ }
#endif /* !valgrind */
- } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
- const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
- const uint8_t *k8;
-
- /*--------------- all but last block: aligned reads and different mixing */
- while (length > 12)
- {
- a += k[0] + (((uint32_t)k[1])<<16);
- b += k[2] + (((uint32_t)k[3])<<16);
- c += k[4] + (((uint32_t)k[5])<<16);
- mix(a,b,c);
- length -= 12;
- k += 6;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- k8 = (const uint8_t *)k;
- switch(length)
- {
- case 12: c+=k[4]+(((uint32_t)k[5])<<16);
- b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
- case 10: c+=k[4];
- b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
- case 6 : b+=k[2];
- a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
- break;
- case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
- case 2 : a+=k[0];
- break;
- case 1 : a+=k8[0];
- break;
- case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
- }
-
- } else { /* need to read the key one byte at a time */
- const uint8_t *k = (const uint8_t *)key;
-
- /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += k[0];
- a += ((uint32_t)k[1])<<8;
- a += ((uint32_t)k[2])<<16;
- a += ((uint32_t)k[3])<<24;
- b += k[4];
- b += ((uint32_t)k[5])<<8;
- b += ((uint32_t)k[6])<<16;
- b += ((uint32_t)k[7])<<24;
- c += k[8];
- c += ((uint32_t)k[9])<<8;
- c += ((uint32_t)k[10])<<16;
- c += ((uint32_t)k[11])<<24;
- mix(a,b,c);
- length -= 12;
- k += 12;
- }
-
- /*-------------------------------- last block: affect all 32 bits of (c) */
- switch(length) /* all the case statements fall through */
- {
- case 12: c+=((uint32_t)k[11])<<24;
- case 11: c+=((uint32_t)k[10])<<16;
- case 10: c+=((uint32_t)k[9])<<8;
- case 9 : c+=k[8];
- case 8 : b+=((uint32_t)k[7])<<24;
- case 7 : b+=((uint32_t)k[6])<<16;
- case 6 : b+=((uint32_t)k[5])<<8;
- case 5 : b+=k[4];
- case 4 : a+=((uint32_t)k[3])<<24;
- case 3 : a+=((uint32_t)k[2])<<16;
- case 2 : a+=((uint32_t)k[1])<<8;
- case 1 : a+=k[0];
- break;
- case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
- }
- }
-
- final(a,b,c);
- *pc=c; *pb=b;
+ } else if ((arch_def_native->endian == ARCH_ENDIAN_LITTLE) &&
+ ((u.i & 0x1) == 0)) {
+ /* read 16-bit chunks */
+ const uint16_t *k = (const uint16_t *)key;
+ const uint8_t *k8;
+
+ while (length > 12) {
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ b += k[2] + (((uint32_t)k[3]) << 16);
+ c += k[4] + (((uint32_t)k[5]) << 16);
+ mix(a, b, c);
+ length -= 12;
+ k += 6;
+ }
+
+ k8 = (const uint8_t *)k;
+ switch(length) {
+ case 12:
+ c += k[4] + (((uint32_t)k[5]) << 16);
+ b += k[2] + (((uint32_t)k[3]) << 16);
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ break;
+ case 11:
+ c += ((uint32_t)k8[10]) << 16;
+ case 10:
+ c += k[4];
+ b += k[2] + (((uint32_t)k[3]) << 16);
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ break;
+ case 9 :
+ c += k8[8];
+ case 8 :
+ b += k[2] + (((uint32_t)k[3]) << 16);
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ break;
+ case 7 :
+ b += ((uint32_t)k8[6]) << 16;
+ case 6 :
+ b += k[2];
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ break;
+ case 5 :
+ b += k8[4];
+ case 4 :
+ a += k[0] + (((uint32_t)k[1]) << 16);
+ break;
+ case 3 :
+ a += ((uint32_t)k8[2]) << 16;
+ case 2 :
+ a += k[0];
+ break;
+ case 1 :
+ a += k8[0];
+ break;
+ case 0 :
+ /* zero length requires no mixing */
+ return c;
+ }
+
+ } else {
+ /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ while (length > 12) {
+ a += k[0];
+ a += ((uint32_t)k[1]) << 8;
+ a += ((uint32_t)k[2]) << 16;
+ a += ((uint32_t)k[3]) << 24;
+ b += k[4];
+ b += ((uint32_t)k[5]) << 8;
+ b += ((uint32_t)k[6]) << 16;
+ b += ((uint32_t)k[7]) << 24;
+ c += k[8];
+ c += ((uint32_t)k[9]) << 8;
+ c += ((uint32_t)k[10]) << 16;
+ c += ((uint32_t)k[11]) << 24;
+ mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+
+ switch(length) {
+ case 12:
+ c += ((uint32_t)k[11]) << 24;
+ case 11:
+ c += ((uint32_t)k[10]) << 16;
+ case 10:
+ c += ((uint32_t)k[9]) << 8;
+ case 9 :
+ c += k[8];
+ case 8 :
+ b += ((uint32_t)k[7]) << 24;
+ case 7 :
+ b += ((uint32_t)k[6]) << 16;
+ case 6 :
+ b += ((uint32_t)k[5]) << 8;
+ case 5 :
+ b += k[4];
+ case 4 :
+ a += ((uint32_t)k[3]) << 24;
+ case 3 :
+ a += ((uint32_t)k[2]) << 16;
+ case 2 :
+ a += ((uint32_t)k[1]) << 8;
+ case 1 :
+ a += k[0];
+ break;
+ case 0 :
+ return c;
+ }
+ }
+
+ final(a, b, c);
+ return c;
}
-/*
- * hashbig():
- * This is the same as hashword() on big-endian machines. It is different
- * from hashlittle() on all machines. hashbig() takes advantage of
- * big-endian byte ordering.
+/**
+ * Hash a variable-length key into a 32-bit value
+ * @param k the key (the unaligned variable-length array of bytes)
+ * @param length the length of the key, counting by bytes
+ * @param initval can be any 4-byte value
+ *
+ * This is the same as jhash_word() on big-endian machines. It is different
+ * from jhash_le() on all machines. jhash_be() takes advantage of big-endian
+ * byte ordering.
+ *
*/
-uint32_t hashbig( const void *key, size_t length, uint32_t initval)
+static uint32_t jhash_be( const void *key, size_t length, uint32_t initval)
{
- uint32_t a,b,c;
- union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
-
- u.ptr = key;
- if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
-
- /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 12;
- k += 3;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- /*
- * "k[2]<<8" actually reads beyond the end of the string, but
- * then shifts out the part it's not allowed to read. Because the
- * string is aligned, the illegal read is in the same word as the
- * rest of the string. Every machine with memory protection I've seen
- * does it on word boundaries, so is OK with this. But VALGRIND will
- * still catch it and complain. The masking trick does make the hash
- * noticably faster for short strings (like English words).
- */
+ uint32_t a, b, c;
+ union {
+ const void *ptr;
+ size_t i;
+ } u; /* to cast key to (size_t) happily */
+
+ /* set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if ((arch_def_native->endian == ARCH_ENDIAN_BIG) &&
+ ((u.i & 0x3) == 0)) {
+ /* read 32-bit chunks */
+ const uint32_t *k = (const uint32_t *)key;
+
+ while (length > 12) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 12;
+ k += 3;
+ }
+
+ /* "k[2]<<8" actually reads beyond the end of the string, but
+ * then shifts out the part it's not allowed to read. Because
+ * the string is aligned, the illegal read is in the same word
+ * as the rest of the string. Every machine with memory
+ * protection I've seen does it on word boundaries, so is OK
+ * with this. But VALGRIND will still catch it and complain.
+ * The masking trick does make the hash noticably faster for
+ * short strings (like English words). */
#ifndef VALGRIND
- switch(length)
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
- case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
- case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
- case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
- case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
- case 4 : a+=k[0]; break;
- case 3 : a+=k[0]&0xffffff00; break;
- case 2 : a+=k[0]&0xffff0000; break;
- case 1 : a+=k[0]&0xff000000; break;
- case 0 : return c; /* zero length strings require no mixing */
- }
+ switch(length) {
+ case 12:
+ c += k[2];
+ b += k[1];
+ a += k[0];
+ break;
+ case 11:
+ c += k[2] & 0xffffff00;
+ b += k[1];
+ a += k[0];
+ break;
+ case 10:
+ c += k[2] & 0xffff0000;
+ b += k[1];
+ a += k[0];
+ break;
+ case 9 :
+ c += k[2] & 0xff000000;
+ b += k[1];
+ a += k[0];
+ break;
+ case 8 :
+ b += k[1];
+ a += k[0];
+ break;
+ case 7 :
+ b += k[1] & 0xffffff00;
+ a += k[0];
+ break;
+ case 6 :
+ b += k[1] & 0xffff0000;
+ a += k[0];
+ break;
+ case 5 :
+ b += k[1] & 0xff000000;
+ a += k[0];
+ break;
+ case 4 :
+ a += k[0];
+ break;
+ case 3 :
+ a += k[0] & 0xffffff00;
+ break;
+ case 2 :
+ a += k[0] & 0xffff0000;
+ break;
+ case 1 :
+ a += k[0] & 0xff000000;
+ break;
+ case 0 :
+ /* zero length strings require no mixing */
+ return c;
+ }
#else /* make valgrind happy */
- k8 = (const uint8_t *)k;
- switch(length) /* all the case statements fall through */
- {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
- case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
- case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
- case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
- case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
- case 4 : a+=k[0]; break;
- case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
- case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
- case 1 : a+=((uint32_t)k8[0])<<24; break;
- case 0 : return c;
- }
+ k8 = (const uint8_t *)k;
+ switch(length) {
+ case 12:
+ c += k[2];
+ b += k[1];
+ a += k[0];
+ break;
+ case 11:
+ c += ((uint32_t)k8[10]) << 8;
+ case 10:
+ c += ((uint32_t)k8[9]) << 16;
+ case 9 :
+ c += ((uint32_t)k8[8]) << 24;
+ case 8 :
+ b += k[1];
+ a += k[0];
+ break;
+ case 7 :
+ b += ((uint32_t)k8[6]) << 8;
+ case 6 :
+ b += ((uint32_t)k8[5]) << 16;
+ case 5 :
+ b += ((uint32_t)k8[4]) << 24;
+ case 4 :
+ a += k[0];
+ break;
+ case 3 :
+ a += ((uint32_t)k8[2]) << 8;
+ case 2 :
+ a += ((uint32_t)k8[1]) << 16;
+ case 1 :
+ a += ((uint32_t)k8[0]) << 24;
+ break;
+ case 0 :
+ return c;
+ }
#endif /* !VALGRIND */
- } else { /* need to read the key one byte at a time */
- const uint8_t *k = (const uint8_t *)key;
-
- /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
- while (length > 12)
- {
- a += ((uint32_t)k[0])<<24;
- a += ((uint32_t)k[1])<<16;
- a += ((uint32_t)k[2])<<8;
- a += ((uint32_t)k[3]);
- b += ((uint32_t)k[4])<<24;
- b += ((uint32_t)k[5])<<16;
- b += ((uint32_t)k[6])<<8;
- b += ((uint32_t)k[7]);
- c += ((uint32_t)k[8])<<24;
- c += ((uint32_t)k[9])<<16;
- c += ((uint32_t)k[10])<<8;
- c += ((uint32_t)k[11]);
- mix(a,b,c);
- length -= 12;
- k += 12;
- }
-
- /*-------------------------------- last block: affect all 32 bits of (c) */
- switch(length) /* all the case statements fall through */
- {
- case 12: c+=k[11];
- case 11: c+=((uint32_t)k[10])<<8;
- case 10: c+=((uint32_t)k[9])<<16;
- case 9 : c+=((uint32_t)k[8])<<24;
- case 8 : b+=k[7];
- case 7 : b+=((uint32_t)k[6])<<8;
- case 6 : b+=((uint32_t)k[5])<<16;
- case 5 : b+=((uint32_t)k[4])<<24;
- case 4 : a+=k[3];
- case 3 : a+=((uint32_t)k[2])<<8;
- case 2 : a+=((uint32_t)k[1])<<16;
- case 1 : a+=((uint32_t)k[0])<<24;
- break;
- case 0 : return c;
- }
- }
-
- final(a,b,c);
- return c;
+ } else {
+ /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ while (length > 12) {
+ a += ((uint32_t)k[0]) << 24;
+ a += ((uint32_t)k[1]) << 16;
+ a += ((uint32_t)k[2]) << 8;
+ a += ((uint32_t)k[3]);
+ b += ((uint32_t)k[4]) << 24;
+ b += ((uint32_t)k[5]) << 16;
+ b += ((uint32_t)k[6]) << 8;
+ b += ((uint32_t)k[7]);
+ c += ((uint32_t)k[8]) << 24;
+ c += ((uint32_t)k[9]) << 16;
+ c += ((uint32_t)k[10]) << 8;
+ c += ((uint32_t)k[11]);
+ mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+
+ switch(length) {
+ case 12:
+ c += k[11];
+ case 11:
+ c += ((uint32_t)k[10]) << 8;
+ case 10:
+ c += ((uint32_t)k[9]) << 16;
+ case 9 :
+ c += ((uint32_t)k[8]) << 24;
+ case 8 :
+ b += k[7];
+ case 7 :
+ b += ((uint32_t)k[6]) << 8;
+ case 6 :
+ b += ((uint32_t)k[5]) << 16;
+ case 5 :
+ b += ((uint32_t)k[4]) << 24;
+ case 4 :
+ a += k[3];
+ case 3 :
+ a += ((uint32_t)k[2]) << 8;
+ case 2 :
+ a += ((uint32_t)k[1]) << 16;
+ case 1 :
+ a += ((uint32_t)k[0]) << 24;
+ break;
+ case 0 :
+ return c;
+ }
+ }
+
+ final(a, b, c);
+ return c;
}
+/**
+ * Hash a variable-length key into a 32-bit value
+ * @param k the key (the unaligned variable-length array of bytes)
+ * @param length the length of the key, counting by bytes
+ * @param initval can be any 4-byte value
+ *
+ * A small wrapper function that selects the proper hash function based on the
+ * native machine's byte-ordering.
+ *
+ */
+uint32_t jhash(const void *key, size_t length, uint32_t initval)
+{
+ if (length % sizeof(uint32_t) == 0)
+ return jhash_word(key, (length / sizeof(uint32_t)), initval);
+ else if (arch_def_native->endian == ARCH_ENDIAN_BIG)
+ return jhash_be(key, length, initval);
+ else
+ return jhash_le(key, length, initval);
+}
diff --git a/src/python/seccomp.pyx b/src/python/seccomp.pyx
index a87e1a4..47d2ae1 100644
--- a/src/python/seccomp.pyx
+++ b/src/python/seccomp.pyx
@@ -37,15 +37,15 @@ Filter action values:
tracing process via PTRACE_EVENT_SECCOMP and the
PTRACE_GETEVENTMSG option
-Argument comparison values:
+Argument comparison values (see the Arg class):
- NE - argument not equal the value
- LT - argument less than the value
- LE - argument less than, or equal to, the value
- EQ - argument equal the value
- GT - argument greater than the value
- GE - argument greater than, or equal to, the value
- MASKED_EQ - masked argument is equal to the value
+ NE - arg != datum_a
+ LT - arg < datum_a
+ LE - arg <= datum_a
+ EQ - arg == datum_a
+ GT - arg > datum_a
+ GE - arg >= datum_a
+ MASKED_EQ - (arg & datum_b) == datum_a
Example:
@@ -165,14 +165,8 @@ cdef class Arg:
"""
self._arg.arg = arg
self._arg.op = op
- if isinstance(datum_a, file):
- self._arg.datum_a = datum_a.fileno()
- else:
- self._arg.datum_a = datum_a
- if isinstance(datum_b, file):
- self._arg.datum_b = datum_b.fileno()
- else:
- self._arg.datum_b = datum_b
+ self._arg.datum_a = datum_a
+ self._arg.datum_b = datum_b
def to_c(self):
""" Convert the object into a C structure.
@@ -191,6 +185,12 @@ cdef class SyscallFilter:
cdef libseccomp.scmp_filter_ctx _ctx
def __cinit__(self, int defaction):
+ self._ctx = libseccomp.seccomp_init(defaction)
+ if self._ctx == NULL:
+ raise RuntimeError("Library error")
+ _defaction = defaction
+
+ def __init__(self, defaction):
""" Initialize the filter state
Arguments:
@@ -199,10 +199,6 @@ cdef class SyscallFilter:
Description:
Initializes the seccomp filter state to the defaults.
"""
- self._ctx = libseccomp.seccomp_init(defaction)
- if self._ctx == NULL:
- raise RuntimeError("Library error")
- _defaction = defaction
def __dealloc__(self):
""" Destroys the filter state and releases any resources.
diff --git a/tests/.gitignore b/tests/.gitignore
index b393021..9b5a1b2 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -1,4 +1,5 @@
*.bpf
+*.bpfd
*.pfc
util.pyc
00-test.c
diff --git a/tests/01-sim-allow.tests b/tests/01-sim-allow.tests
index e3da13c..9630276 100644
--- a/tests/01-sim-allow.tests
+++ b/tests/01-sim-allow.tests
@@ -14,3 +14,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
01-sim-allow 50
+
+test type: bpf-valgrind
+
+# Testname
+01-sim-allow
diff --git a/tests/02-sim-basic.tests b/tests/02-sim-basic.tests
index 2121d37..07004a4 100644
--- a/tests/02-sim-basic.tests
+++ b/tests/02-sim-basic.tests
@@ -23,3 +23,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
02-sim-basic 50
+
+test type: bpf-valgrind
+
+# Testname
+02-sim-basic
diff --git a/tests/03-sim-basic_chains.tests b/tests/03-sim-basic_chains.tests
index 6692640..ef4353a 100644
--- a/tests/03-sim-basic_chains.tests
+++ b/tests/03-sim-basic_chains.tests
@@ -25,3 +25,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
03-sim-basic_chains 50
+
+test type: bpf-valgrind
+
+# Testname
+03-sim-basic_chains
diff --git a/tests/04-sim-multilevel_chains.tests b/tests/04-sim-multilevel_chains.tests
index 6f50562..cefbc4f 100644
--- a/tests/04-sim-multilevel_chains.tests
+++ b/tests/04-sim-multilevel_chains.tests
@@ -35,3 +35,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
04-sim-multilevel_chains 50
+
+test type: bpf-valgrind
+
+# Testname
+04-sim-multilevel_chains
diff --git a/tests/05-sim-long_jumps.tests b/tests/05-sim-long_jumps.tests
index bbdeaa1..03eb6d9 100644
--- a/tests/05-sim-long_jumps.tests
+++ b/tests/05-sim-long_jumps.tests
@@ -30,3 +30,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
05-sim-long_jumps 50
+
+test type: bpf-valgrind
+
+# Testname
+05-sim-long_jumps
diff --git a/tests/06-sim-actions.tests b/tests/06-sim-actions.tests
index 99d1071..f09f0a0 100644
--- a/tests/06-sim-actions.tests
+++ b/tests/06-sim-actions.tests
@@ -20,3 +20,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
06-sim-actions 50
+
+test type: bpf-valgrind
+
+# Testname
+06-sim-actions
diff --git a/tests/07-sim-db_bug_looping.py b/tests/07-sim-db_bug_looping.py
index 0b6e988..3314a3e 100755
--- a/tests/07-sim-db_bug_looping.py
+++ b/tests/07-sim-db_bug_looping.py
@@ -32,9 +32,9 @@ def test(args):
f = SyscallFilter(KILL)
# the next three seccomp_rule_add_exact() calls for read must go together
# in this order to catch an infinite loop.
- f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdout))
+ f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdout.fileno()))
f.add_rule(ALLOW, "read", Arg(1, EQ, 0))
- f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin))
+ f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin.fileno()))
return f
args = util.get_opt()
diff --git a/tests/07-sim-db_bug_looping.tests b/tests/07-sim-db_bug_looping.tests
index f2b1b84..a7ec72b 100644
--- a/tests/07-sim-db_bug_looping.tests
+++ b/tests/07-sim-db_bug_looping.tests
@@ -16,3 +16,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
07-sim-db_bug_looping 50
+
+test type: bpf-valgrind
+
+# Testname
+07-sim-db_bug_looping
diff --git a/tests/08-sim-subtree_checks.tests b/tests/08-sim-subtree_checks.tests
index d4511bd..ed3ec42 100644
--- a/tests/08-sim-subtree_checks.tests
+++ b/tests/08-sim-subtree_checks.tests
@@ -39,3 +39,9 @@ test type: bpf-sim-fuzz
# Testname StressCount
08-sim-subtree_checks 50
+
+
+test type: bpf-valgrind
+
+# Testname
+08-sim-subtree_checks
diff --git a/tests/09-sim-syscall_priority_pre.tests b/tests/09-sim-syscall_priority_pre.tests
index 3e17bc4..7b7d53f 100644
--- a/tests/09-sim-syscall_priority_pre.tests
+++ b/tests/09-sim-syscall_priority_pre.tests
@@ -19,3 +19,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
09-sim-syscall_priority_pre 50
+
+test type: bpf-valgrind
+
+# Testname
+09-sim-syscall_priority_pre
diff --git a/tests/10-sim-syscall_priority_post.tests b/tests/10-sim-syscall_priority_post.tests
index 6624d2d..aa0389f 100644
--- a/tests/10-sim-syscall_priority_post.tests
+++ b/tests/10-sim-syscall_priority_post.tests
@@ -19,3 +19,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
10-sim-syscall_priority_post 50
+
+test type: bpf-valgrind
+
+# Testname
+10-sim-syscall_priority_post
diff --git a/tests/11-basic-basic_errors.c b/tests/11-basic-basic_errors.c
index c328577..c695e8b 100644
--- a/tests/11-basic-basic_errors.c
+++ b/tests/11-basic-basic_errors.c
@@ -30,7 +30,7 @@ int main(int argc, char *argv[])
scmp_filter_ctx ctx;
/* seccomp_init errors */
- ctx = seccomp_init(SCMP_ACT_ALLOW+1);
+ ctx = seccomp_init(SCMP_ACT_ALLOW + 1);
if (ctx != NULL)
return -1;
@@ -41,7 +41,7 @@ int main(int argc, char *argv[])
ctx = NULL;
/* seccomp_reset error */
- rc = seccomp_reset(ctx, SCMP_ACT_KILL+1);
+ rc = seccomp_reset(ctx, SCMP_ACT_KILL + 1);
if (rc != -EINVAL)
return -1;
rc = seccomp_reset(ctx, SCMP_ACT_KILL);
@@ -82,7 +82,7 @@ int main(int argc, char *argv[])
rc = seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(read), 0);
if (rc != -EPERM)
return -1;
- rc = seccomp_rule_add(ctx, SCMP_ACT_KILL-1, SCMP_SYS(read), 0);
+ rc = seccomp_rule_add(ctx, SCMP_ACT_KILL - 1, SCMP_SYS(read), 0);
if (rc != -EINVAL)
return -1;
rc = seccomp_rule_add(ctx, SCMP_ACT_KILL, SCMP_SYS(read), 6);
@@ -141,7 +141,7 @@ int main(int argc, char *argv[])
if (ctx == NULL)
return -1;
else {
- rc = seccomp_export_pfc(ctx, sysconf(_SC_OPEN_MAX)-1);
+ rc = seccomp_export_pfc(ctx, sysconf(_SC_OPEN_MAX) - 1);
if (rc != EBADF)
return -1;
}
@@ -157,7 +157,7 @@ int main(int argc, char *argv[])
if (ctx == NULL)
return -1;
else {
- rc = seccomp_export_bpf(ctx, sysconf(_SC_OPEN_MAX)-1);
+ rc = seccomp_export_bpf(ctx, sysconf(_SC_OPEN_MAX) - 1);
if (rc != -EBADF)
return -1;
}
diff --git a/tests/12-sim-basic_masked_ops.tests b/tests/12-sim-basic_masked_ops.tests
index 23eada2..20e0f6d 100644
--- a/tests/12-sim-basic_masked_ops.tests
+++ b/tests/12-sim-basic_masked_ops.tests
@@ -35,3 +35,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
12-sim-basic_masked_ops 50
+
+test type: bpf-valgrind
+
+# Testname
+12-sim-basic_masked_ops
diff --git a/tests/13-basic-attrs.c b/tests/13-basic-attrs.c
index 46518a5..99e8dcb 100644
--- a/tests/13-basic-attrs.c
+++ b/tests/13-basic-attrs.c
@@ -29,7 +29,7 @@
int main(int argc, char *argv[])
{
int rc;
- uint32_t val = (uint32_t)-1;
+ uint32_t val = (uint32_t)(-1);
scmp_filter_ctx ctx;
ctx = seccomp_init(SCMP_ACT_ALLOW);
diff --git a/tests/14-sim-reset.tests b/tests/14-sim-reset.tests
index 4c42e3f..da52b2d 100644
--- a/tests/14-sim-reset.tests
+++ b/tests/14-sim-reset.tests
@@ -22,3 +22,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
14-sim-reset 50
+
+test type: bpf-valgrind
+
+# Testname
+14-sim-reset
diff --git a/tests/16-sim-arch_basic.py b/tests/16-sim-arch_basic.py
index 4484ac5..d29a5ff 100755
--- a/tests/16-sim-arch_basic.py
+++ b/tests/16-sim-arch_basic.py
@@ -38,9 +38,9 @@ def test(args):
f.add_arch(Arch.X32)
if not f.exist_arch(Arch.ARM):
f.add_arch(Arch.ARM)
- f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin))
- f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout))
- f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr))
+ f.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin.fileno()))
+ f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout.fileno()))
+ f.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr.fileno()))
f.add_rule(ALLOW, "close")
f.add_rule(ALLOW, "socket")
f.add_rule(ALLOW, "connect")
diff --git a/tests/16-sim-arch_basic.tests b/tests/16-sim-arch_basic.tests
index 80bd705..39917b5 100644
--- a/tests/16-sim-arch_basic.tests
+++ b/tests/16-sim-arch_basic.tests
@@ -25,3 +25,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
16-sim-arch_basic 50
+
+test type: bpf-valgrind
+
+# Testname
+16-sim-arch_basic
diff --git a/tests/17-sim-arch_merge.py b/tests/17-sim-arch_merge.py
index 0221764..44e9cc4 100755
--- a/tests/17-sim-arch_merge.py
+++ b/tests/17-sim-arch_merge.py
@@ -37,9 +37,9 @@ def test(args):
if not f64.exist_arch(Arch.X86_64):
f64.add_arch(Arch.X86_64)
f64.remove_arch(Arch.NATIVE)
- f32.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin))
- f32.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout))
- f32.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr))
+ f32.add_rule(ALLOW, "read", Arg(0, EQ, sys.stdin.fileno()))
+ f32.add_rule(ALLOW, "write", Arg(0, EQ, sys.stdout.fileno()))
+ f32.add_rule(ALLOW, "write", Arg(0, EQ, sys.stderr.fileno()))
f32.add_rule(ALLOW, "close")
f64.add_rule(ALLOW, "socket")
f64.add_rule(ALLOW, "connect")
diff --git a/tests/17-sim-arch_merge.tests b/tests/17-sim-arch_merge.tests
index c32f5fe..0f27ca9 100644
--- a/tests/17-sim-arch_merge.tests
+++ b/tests/17-sim-arch_merge.tests
@@ -22,3 +22,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
17-sim-arch_merge 50
+
+test type: bpf-valgrind
+
+# Testname
+17-sim-arch_merge
diff --git a/tests/18-sim-basic_whitelist.tests b/tests/18-sim-basic_whitelist.tests
index 2669ec4..d88d0d2 100644
--- a/tests/18-sim-basic_whitelist.tests
+++ b/tests/18-sim-basic_whitelist.tests
@@ -25,3 +25,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
18-sim-basic_whitelist 50
+
+test type: bpf-valgrind
+
+# Testname
+18-sim-basic_whitelist
diff --git a/tests/19-sim-missing_syscalls.tests b/tests/19-sim-missing_syscalls.tests
index 113fc66..5a1f244 100644
--- a/tests/19-sim-missing_syscalls.tests
+++ b/tests/19-sim-missing_syscalls.tests
@@ -14,3 +14,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
19-sim-missing_syscalls 50
+
+test type: bpf-valgrind
+
+# Testname
+19-sim-missing_syscalls
diff --git a/tests/22-sim-basic_chains_array.tests b/tests/22-sim-basic_chains_array.tests
index 6785152..b8867b7 100644
--- a/tests/22-sim-basic_chains_array.tests
+++ b/tests/22-sim-basic_chains_array.tests
@@ -24,3 +24,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
22-sim-basic_chains_array 50
+
+test type: bpf-valgrind
+
+# Testname
+22-sim-basic_chains_array
diff --git a/tests/23-sim-arch_all_basic.tests b/tests/23-sim-arch_all_basic.tests
index 02b3a79..980268f 100644
--- a/tests/23-sim-arch_all_basic.tests
+++ b/tests/23-sim-arch_all_basic.tests
@@ -21,3 +21,8 @@ test type: bpf-sim-fuzz
# Testname StressCount
23-sim-arch_all_basic 50
+
+test type: bpf-valgrind
+
+# Testname
+23-sim-arch_all_basic
diff --git a/tests/24-live-arg_allow.c b/tests/24-live-arg_allow.c
index fd6e289..e071dda 100644
--- a/tests/24-live-arg_allow.c
+++ b/tests/24-live-arg_allow.c
@@ -48,7 +48,7 @@ int main(int argc, char *argv[])
if (rc != 0)
goto out;
- fd = open("/dev/null", O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR);
+ fd = open("/dev/null", O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
if (fd < 0) {
rc = errno;
goto out;
diff --git a/tests/25-sim-multilevel_chains_adv.tests b/tests/25-sim-multilevel_chains_adv.tests
index d47a23b..1bdfa40 100644
--- a/tests/25-sim-multilevel_chains_adv.tests
+++ b/tests/25-sim-multilevel_chains_adv.tests
@@ -8,18 +8,23 @@
test type: bpf-sim
# Testname Arch Syscall Arg0 Arg1 Arg2 Arg3 Arg4 Arg5 Result
-25-sim-multilevel_chains_adv +x86_64 0-9 N N N N N N KILL
-25-sim-multilevel_chains_adv +x86_64 10 0x0000000b 0x00000000 N N N N ALLOW
-25-sim-multilevel_chains_adv +x86_64 10 0x10000000b 0x00000000 N N N N KILL
-25-sim-multilevel_chains_adv +x86_64 10 0x0000000b 0x10000000c N N N N ALLOW
-25-sim-multilevel_chains_adv +x86_64 11-19 N N N N N N KILL
-25-sim-multilevel_chains_adv +x86_64 20 0x00000015 0x00000000 0x00000017 N N N ALLOW
-25-sim-multilevel_chains_adv +x86_64 20 0x00000015 0x00000016 0x00000017 N N N KILL
-25-sim-multilevel_chains_adv +x86_64 20 0x100000015 0x00000000 0x00000017 N N N KILL
-25-sim-multilevel_chains_adv +x86_64 20 0x00000015 0x00000000 0x100000017 N N N KILL
-25-sim-multilevel_chains_adv +x86_64 21-30 N N N N N N KILL
+25-sim-multilevel_chains_adv all 0-9 N N N N N N KILL
+25-sim-multilevel_chains_adv all 10 0x0000000b 0x00000000 N N N N ALLOW
+25-sim-multilevel_chains_adv x86_64 10 0x10000000b 0x00000000 N N N N KILL
+25-sim-multilevel_chains_adv x86_64 10 0x0000000b 0x10000000c N N N N ALLOW
+25-sim-multilevel_chains_adv all 11-19 N N N N N N KILL
+25-sim-multilevel_chains_adv all 20 0x00000015 0x00000000 0x00000017 N N N ALLOW
+25-sim-multilevel_chains_adv all 20 0x00000015 0x00000016 0x00000017 N N N KILL
+25-sim-multilevel_chains_adv x86_64 20 0x100000015 0x00000000 0x00000017 N N N KILL
+25-sim-multilevel_chains_adv x86_64 20 0x00000015 0x00000000 0x100000017 N N N KILL
+25-sim-multilevel_chains_adv all 21-30 N N N N N N KILL
test type: bpf-sim-fuzz
# Testname StressCount
25-sim-multilevel_chains_adv 50
+
+test type: bpf-valgrind
+
+# Testname
+25-sim-multilevel_chains_adv
diff --git a/tests/Makefile b/tests/Makefile
index adb5820..5fdbca7 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -71,7 +71,7 @@ DEPS_TESTS = $(TESTS:%=%.d)
# targets
#
-.PHONY: clean
+.PHONY: check clean
all: $(TESTS) $(OBJS)
@@ -88,5 +88,8 @@ $(TESTS):
$(TEST_PRIVATE): 00-test.c $(OBJS) ../src/libseccomp.a
$(COMPILE_EXEC)
+check: $(TESTS)
+ ./regression
+
clean:
- $(RM) $(DEPS_TESTS) $(DEPS_OBJS) $(TESTS) $(TEST_PRIVATE) $(OBJS)
+ $(RM) $(DEPS_TESTS) $(DEPS_OBJS) $(TESTS) $(TEST_PRIVATE) $(OBJS) *.pyc
diff --git a/tests/regression b/tests/regression
index 6e3d6d1..790d5e1 100755
--- a/tests/regression
+++ b/tests/regression
@@ -31,6 +31,18 @@ GLBL_SYS_SIM="../tools/scmp_bpf_sim"
# functions
#
+# Dependency check
+#
+# Arguments:
+# 1 Dependency to check for
+#
+function check_deps() {
+ [[ -z "$1" ]] && return
+ which "$1" >& /dev/null
+ return $?
+}
+
+#
# Dependency verification
#
# Arguments:
@@ -38,7 +50,7 @@ GLBL_SYS_SIM="../tools/scmp_bpf_sim"
#
function verify_deps() {
[[ -z "$1" ]] && return
- if ! which "$1" >& /dev/null; then
+ if ! check_deps "$1"; then
echo "error: install \"$1\" and include it in your \$PATH"
exit 1
fi
@@ -49,8 +61,8 @@ function verify_deps() {
#
function usage() {
cat << EOF
-usage: regression [-h] [-m MODE] [-a] [-b BATCH_NAME] [-g] [-l <LOG>]
- [-s SINGLE_TEST] [-t <TEMP_DIR>] [-T <TEST_TYPE>] [-v]
+usage: regression [-h] [-v] [-m MODE] [-a] [-b BATCH_NAME] [-l <LOG>]
+ [-s SINGLE_TEST] [-t <TEMP_DIR>] [-T <TEST_TYPE>]
libseccomp regression test automation script
optional arguments:
@@ -58,7 +70,6 @@ optional arguments:
-m MODE specified the test mode [c (default), python]
-a specifies all tests are to be run
-b BATCH_NAME specifies batch of tests to be run
- -g specifies that tests are to be run with valgrind
-l [LOG] specifies log file to write test results to
-s SINGLE_TEST specifies individual test number to be run
-t [TEMP_DIR] specifies directory to create temporary files in
@@ -97,7 +108,7 @@ function generate_test_num() {
# 2 string containing line of test data
#
function print_data() {
- if $verbose; then
+ if [[ -n $verbose ]]; then
printf "Test %s data: %s\n" "$1" "$2" >&$logfd
fi
}
@@ -111,7 +122,7 @@ function print_data() {
# 3 string containing addition details
#
function print_result() {
- if [[ $2 == "INFO" ]] && ! $verbose; then
+ if [[ $2 == "INFO" && -z $verbose ]]; then
return
fi
if [[ $3 == "" ]]; then
@@ -128,8 +139,8 @@ function print_result() {
# 1 string containing generated test number
#
function print_valgrind() {
- if $verbose; then
- printf "Test %s valgrind results:\n" "$1" >&$logfd
+ if [[ -n $verbose ]]; then
+ printf "Test %s valgrind output\n" "$1" >&$logfd
fi
}
@@ -164,26 +175,12 @@ function get_range() {
function run_test_command() {
local cmd
- if $use_valgrind && $verbose; then
- print_valgrind $1
- if [[ $logfd -eq 3 ]]; then
- cmd="/usr/bin/valgrind --log-fd=$logfd ./$2 $3"
- else
- cmd="/usr/bin/valgrind ./$2 $3"
- fi
- elif $use_valgrind; then
- # with -q, valgrind will only print error messages
- if [[ $logfd -eq 3 ]]; then
- cmd="/usr/bin/valgrind -q --log-fd=$logfd ./$2 $3"
- else
- cmd="/usr/bin/valgrind -q ./$2 $3"
- fi
- elif [[ $mode == "python" ]]; then
+ if [[ $mode == "python" ]]; then
cmd="PYTHONPATH=$PYTHONPATH"
cmd="$cmd:$(cd $(pwd)/../src/python/build/lib.*; pwd)"
- cmd="$cmd /usr/bin/env python ./$2.py $3"
+ cmd="$cmd /usr/bin/env python $2.py $3"
else
- cmd="./$2 $3"
+ cmd="$2 $3"
fi
# setup the stdout/stderr redirects
@@ -278,7 +275,7 @@ function run_test_bpf_sim_fuzz() {
# run the test command and put the BPF filter in a temp file
exec 4>$tmpfile
- run_test_command "$testnumstr" "$testname" "-b" 4 "" ""
+ run_test_command "$testnumstr" "./$testname" "-b" 4 ""
rc=$?
exec 4>&-
if [[ $rc -ne 0 ]]; then
@@ -464,7 +461,7 @@ function run_test_bpf_sim() {
# run the test command and put the BPF in a temp file
exec 4>$tmpfile
- run_test_command "$testnumstr" "$testname" "-b" 4 ""
+ run_test_command "$testnumstr" "./$testname" "-b" 4 ""
rc=$?
exec 4>&-
if [[ $rc -ne 0 ]]; then
@@ -523,7 +520,7 @@ function run_test_basic() {
print_data "$1" "$2"
# run the command
- run_test_command "$1" "$2" "" "" ""
+ run_test_command "$1" "./$2" "" "" ""
rc=$?
if [[ $rc -ne 0 ]]; then
print_result $1 "FAILURE" "$2 rc=$rc"
@@ -536,6 +533,60 @@ function run_test_basic() {
}
#
+# Run the specified "bpf-valgrind" test
+#
+# Tests that belong to the "bpf-valgrind" test type generate a BPF filter
+# while running under valgrind to detect any memory errors.
+#
+# Arguments:
+# 1 value of test number from batch file
+# 2 string containing line of test data from batch file
+#
+function run_test_bpf_valgrind() {
+ local rc
+ local testcmd
+
+ # we only support the native/c test mode here
+ if [[ $mode != "c" ]]; then
+ stats_skipped=$(($stats_skipped+1))
+ return
+ fi
+
+ # print out the input test data to the log file
+ print_data "$1" "$2"
+
+ # build the command
+ testcmd="$2"
+ testvalgrind="valgrind \
+ --tool=memcheck \
+ --error-exitcode=1 \
+ --leak-check=full \
+ --read-var-info=yes \
+ --track-origins=yes"
+ if [[ -n $logfile ]]; then
+ testvalgrind+=" --log-fd=$logfd"
+ fi
+ if [[ -z $verbose ]]; then
+ testvalgrind+=" --quiet --log-fd=4"
+ fi
+
+ # run the command
+ exec 4>/dev/null
+ print_valgrind "$1"
+ run_test_command "$1" "$testvalgrind --" "./$testcmd -b" 4 2
+ rc=$?
+ exec 4>&-
+ if [[ $rc -ne 0 ]]; then
+ print_result $1 "FAILURE" "$2 rc=$rc"
+ stats_failure=$(($stats_failure+1))
+ else
+ print_result $1 "SUCCESS" ""
+ stats_success=$(($stats_success+1))
+ fi
+ stats_all=$(($stats_all+1))
+}
+
+#
# Run the specified "live" test
#
# Tests that belong to the "live" test type will attempt to run a live test
@@ -560,7 +611,7 @@ function run_test_live() {
# run the command
exec 4>/dev/null
- run_test_command "$1" "$line_cmd" "$line_act" "" 4
+ run_test_command "$1" "./$line_cmd" "$line_act" "" 4
rc=$?
exec 4>&-
@@ -603,7 +654,7 @@ function run_test_live() {
#
function run_test() {
# generate the test number string for the line of batch test data
- local testnumstr=$(generate_test_num "$1" $2 0)
+ local testnumstr=$(generate_test_num "$1" $2 1)
# ensure we only run tests which match the specified type
[[ -n $type && "$4" != "$type" ]] && return
@@ -615,6 +666,13 @@ function run_test() {
run_test_bpf_sim "$1" $2 "$3"
elif [[ "$4" == "bpf-sim-fuzz" ]]; then
run_test_bpf_sim_fuzz "$1" $2 "$3"
+ elif [[ "$4" == "bpf-valgrind" ]]; then
+ # only run this test if valgrind is installed
+ if check_deps valgrind; then
+ run_test_bpf_valgrind "$testnumstr" "$3"
+ else
+ stats_skipped=$(($stats_skipped+1))
+ fi
elif [[ "$4" == "live" ]]; then
# only run this test if explicitly requested
if [[ -n $type ]]; then
@@ -712,13 +770,12 @@ batch_count=0
logfile=
logfd=
mode_list=""
-runall=false
+runall=
singlecount=0
tmpfile=""
tmpdir=""
type=
-use_valgrind=false
-verbose=false
+verbose=
stats_all=0
stats_skipped=0
stats_success=0
@@ -728,16 +785,12 @@ stats_error=0
while getopts "ab:gl:m:s:t:T:vh" opt; do
case $opt in
a)
- runall=true
+ runall=1
;;
b)
batch_list[batch_count]="$OPTARG"
batch_count=$(($batch_count+1))
;;
- g)
- verify_deps valgrind
- use_valgrind=true
- ;;
l)
logfile="$OPTARG"
;;
@@ -766,7 +819,7 @@ while getopts "ab:gl:m:s:t:T:vh" opt; do
type="$OPTARG"
;;
v)
- verbose=true
+ verbose=1
;;
h|*)
usage
@@ -782,11 +835,11 @@ fi
# default to all tests if batch or single tests not requested
if [[ -z $batch_list ]] && [[ -z $single_list ]]; then
- runall=true
+ runall=1
fi
# drop any requested batch and single tests if all tests were requested
-if $runall; then
+if [[ -n $runall ]]; then
batch_list=()
single_list=()
fi
@@ -825,4 +878,8 @@ echo "============================================================" >&$logfd
# cleanup and exit
rm -f $tmpfile
-exit 0
+rc=0
+[[ $stats_failure -gt 0 ]] && rc=$(($rc + 2))
+[[ $stats_error -gt 0 ]] && rc=$(($rc + 4))
+
+exit $rc
diff --git a/tests/testdiff b/tests/testdiff
new file mode 100755
index 0000000..a6ddbac
--- /dev/null
+++ b/tests/testdiff
@@ -0,0 +1,126 @@
+#!/bin/bash
+
+#
+# libseccomp test diff generator
+#
+# Copyright (c) 2013 Red Hat <pmoore@redhat.com>
+# Author: Paul Moore <pmoore@redhat.com>
+#
+
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of version 2.1 of the GNU Lesser General Public License as
+# published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+# for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, see <http://www.gnu.org/licenses>.
+#
+
+####
+# functions
+
+#
+# Print out script usage details
+#
+function usage() {
+cat << EOF
+usage: regression [-h] LABEL_1 LABEL_2
+
+libseccomp test diff generator script
+optional arguments:
+ -h show this help message and exit
+EOF
+}
+
+#
+# Print the test header
+#
+# Arguments:
+# 1 string containing generated test number
+#
+function print_test() {
+ printf "Test %s comparison:\n" "$1"
+}
+
+#
+# Compare the tests
+#
+# Arguments:
+# 1 string containing first test label
+# 2 string containing second test label
+#
+function diff_tests() {
+ local batch_name
+ local label_a
+ local label_b
+ local file_a
+ local file_b
+
+ if [[ -n $1 ]]; then
+ label_a=".$1"
+ else
+ label_a=""
+ fi
+
+ if [[ -n $2 ]]; then
+ label_b=".$2"
+ else
+ label_b=""
+ fi
+
+ for file in *-sim-*.tests; do
+ # extract the batch name from the file name
+ batch_name=$(basename $file .tests)
+
+ print_test "$batch_name"
+
+ file_a="${batch_name}${label_a}"
+ file_b="${batch_name}${label_b}"
+
+ if [[ -r "$file_a.pfc" && -r "$file_b.pfc" ]]; then
+ diff -pu "$file_a.pfc" "$file_b.pfc"
+ fi
+
+ if [[ -r "$file_a.bpf" && -r "$file_b.bpf" ]]; then
+ diff -pu "$file_a.bpf" "$file_b.bpf"
+ fi
+
+ if [[ -r "$file_a.bpfd" && -r "$file_b.bpfd" ]]; then
+ diff -pu "$file_a.bpfd" "$file_b.bpfd"
+ fi
+ done
+
+ return
+}
+
+####
+# main
+
+opt_label=
+opt_disasm=0
+
+while getopts "h" opt; do
+ case $opt in
+ h|*)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+stats_all=0
+stats_failure=0
+
+# display the test output and run the requested tests
+echo "=============== $(date) ==============="
+echo "Comparing Test Output (\"testdiff $*\")"
+diff_tests "$1" "$2"
+echo "============================================================"
+
+# exit
+exit 0
diff --git a/tests/testgen b/tests/testgen
new file mode 100755
index 0000000..0da599d
--- /dev/null
+++ b/tests/testgen
@@ -0,0 +1,206 @@
+#!/bin/bash
+
+#
+# libseccomp test output generator
+#
+# Copyright (c) 2013 Red Hat <pmoore@redhat.com>
+# Author: Paul Moore <pmoore@redhat.com>
+#
+
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of version 2.1 of the GNU Lesser General Public License as
+# published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+# for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, see <http://www.gnu.org/licenses>.
+#
+
+####
+# functions
+
+#
+# Dependency verification
+#
+# Arguments:
+# 1 Dependency to check for
+#
+function verify_deps() {
+ [[ -z "$1" ]] && return
+ if ! which "$1" >& /dev/null; then
+ echo "error: install \"$1\" and include it in your \$PATH"
+ exit 1
+ fi
+}
+
+#
+# Print out script usage details
+#
+function usage() {
+cat << EOF
+usage: regression [-h] [-d] [-l LABEL]
+
+libseccomp test output generator script
+optional arguments:
+ -h show this help message and exit
+ -b generate BPF output
+ -d generate disassembled BPF output
+ -p generate PFC output
+ -v perform valgrind checks
+ -l [LABEL] specifies label for the test output
+EOF
+}
+
+#
+# Print the test result
+#
+# Arguments:
+# 1 string containing generated test number
+# 2 string containing the test result
+#
+function print_result() {
+ printf "Test %s result: %s\n" "$1" "$2"
+}
+
+#
+# Run the tests
+#
+# Arguments:
+# 1 string containing output label
+#
+function run_tests() {
+ local batch_name
+ local label
+ local rc
+
+ if [[ -n $1 ]]; then
+ label=".$1"
+ else
+ label=""
+ fi
+
+ for file in *-sim-*.tests; do
+ # extract the batch name from the file name
+ batch_name=$(basename $file .tests)
+
+ if [[ -x "$batch_name" ]]; then
+ if [[ $opt_pfc -eq 1 ]]; then
+ ./$batch_name > ${batch_name}${label}.pfc
+ rc=$?
+ stats_all=$(($stats_all + 1))
+ if [[ $rc -eq 0 ]]; then
+ print_result "$batch_name [pfc]" "SUCCESS"
+ else
+ stats_failure=$(($stats_failure + 1))
+ print_result "$batch_name [pfc]" "FAILURE"
+ fi
+ fi
+
+ if [[ $opt_bpf -eq 1 ]]; then
+ ./$batch_name -b > ${batch_name}${label}.bpf
+ rc=$?
+ stats_all=$(($stats_all + 1))
+ if [[ $rc -eq 0 ]]; then
+ print_result "$batch_name [bpf]" "SUCCESS"
+ else
+ stats_failure=$(($stats_failure + 1))
+ print_result "$batch_name [bpf]" "FAILURE"
+ fi
+ fi
+
+ if [[ $opt_disasm -eq 1 ]]; then
+ ./$batch_name -b | \
+ ../tools/scmp_bpf_disasm > ${batch_name}${label}.bpfd
+ rc=$?
+ stats_all=$(($stats_all + 1))
+ if [[ $rc -eq 0 ]]; then
+ print_result "$batch_name [bpfd]" "SUCCESS"
+ else
+ stats_failure=$(($stats_failure + 1))
+ print_result "$batch_name [bpfd]" "FAILURE"
+ fi
+ fi
+
+ if [[ $opt_valgrind -eq 1 ]]; then
+ valgrind --tool=memcheck \
+ --quiet --error-exitcode=1 \
+ --leak-check=full \
+ --read-var-info=yes \
+ --track-origins=yes \
+ -- ./$batch_name -b > /dev/null
+ rc=$?
+ stats_all=$(($stats_all + 1))
+ if [[ $rc -eq 0 ]]; then
+ print_result "$batch_name [valgrind]" "SUCCESS"
+ else
+ stats_failure=$(($stats_failure + 1))
+ print_result "$batch_name [valgrind]" "FAILURE"
+ fi
+ fi
+ else
+ stats_failure=$(($stats_failure + 1))
+ print_result "$batch_name" "FAILURE"
+ fi
+ done
+
+ return
+}
+
+####
+# main
+
+opt_label=
+opt_bpf=0
+opt_disasm=0
+opt_pfc=0
+opt_valgrind=0
+
+while getopts "bphdl:v" opt; do
+ case $opt in
+ b)
+ opt_bpf=1
+ ;;
+ d)
+ opt_disasm=1
+ ;;
+ l)
+ opt_label="$OPTARG"
+ ;;
+ p)
+ opt_pfc=1
+ ;;
+ v)
+ opt_valgrind=1
+ ;;
+ h|*)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# verify valgrind
+[[ $opt_valgrind -eq 1 ]] && verify_deps valgrind
+
+stats_all=0
+stats_failure=0
+
+# display the test output and run the requested tests
+echo "=============== $(date) ==============="
+echo "Collecting Test Output (\"testgen $*\")"
+run_tests "$opt_label"
+echo "Test Summary"
+echo " tests run: $stats_all"
+echo " tests failed: $stats_failure"
+echo "============================================================"
+
+# cleanup and exit
+rc=0
+[[ $stats_failure -gt 0 ]] && rc=$(($rc + 2))
+
+exit $rc
diff --git a/tests/util.c b/tests/util.c
index 02b0043..9c069d6 100644
--- a/tests/util.c
+++ b/tests/util.c
@@ -194,7 +194,7 @@ int util_file_write(const char *path)
const char buf[] = "testing";
ssize_t buf_len = strlen(buf);
- fd = open(path, O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR);
+ fd = open(path, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
if (fd < 0)
return errno;
if (write(fd, buf, buf_len) < buf_len) {
diff --git a/tools/check-syntax b/tools/check-syntax
new file mode 100755
index 0000000..f356a46
--- /dev/null
+++ b/tools/check-syntax
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+#
+# libseccomp code syntax checking tool
+#
+# Copyright (c) 2013 Red Hat <pmoore@redhat.com>
+# Author: Paul Moore <pmoore@redhat.com>
+#
+
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of version 2.1 of the GNU Lesser General Public License as
+# published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+# for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, see <http://www.gnu.org/licenses>.
+#
+
+CHK_C_LIST="include/seccomp.h.in \
+ src/*.c src/*.h \
+ tests/*.c tests/*.h \
+ tools/*.c tools/*.h"
+CHK_C_EXCLUDE=""
+
+####
+# functions
+
+#
+# Dependency verification
+#
+# Arguments:
+# 1 Dependency to check for
+#
+function verify_deps() {
+ [[ -z "$1" ]] && return
+ if ! which "$1" >& /dev/null; then
+ echo "error: install \"$1\" and include it in your \$PATH"
+ exit 1
+ fi
+}
+
+#
+# Print out script usage details
+#
+function usage() {
+cat << EOF
+usage: check-syntax [-h]
+
+libseccomp code syntax checking tool
+optional arguments:
+ -h show this help message and exit
+EOF
+}
+
+#
+# Check the formatting on a C source/header file
+#
+# Arguments:
+# 1 File to check
+#
+function tool_c_style() {
+ [[ -z "$1" || ! -r "$1" ]] && return
+
+ astyle --options=none --lineend=linux --mode=c \
+ --style=linux \
+ --indent=force-tab=8 \
+ --indent-preprocessor \
+ --indent-col1-comments \
+ --min-conditional-indent=0 \
+ --max-instatement-indent=80 \
+ --pad-oper \
+ --align-pointer=name \
+ --align-reference=name \
+ --max-code-length=80 \
+ --break-after-logical < "$1" \
+ | diff -pu --label="$1" "$1" --label="$1 [CORRECTED]" -
+}
+
+#
+# Perform all known syntax checks for the configured C sources/headers
+#
+function check_c() {
+ for i in $CHK_C_LIST; do
+ echo "$CHK_C_EXCLUDE" | grep -q "$i" && continue
+ echo "Differences for $i"
+ tool_c_style "$i"
+ done
+}
+
+####
+# main
+
+verify_deps astyle
+
+while getopts "h" opt; do
+ case $opt in
+ h|*)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# display the results
+echo "=============== $(date) ==============="
+echo "Code Syntax Check Results (\"check-syntax $*\")"
+check_c
+echo "============================================================"
+
+# exit
+exit 0
diff --git a/tools/scmp_arch_detect.c b/tools/scmp_arch_detect.c
index 0aca5a8..e90f92b 100644
--- a/tools/scmp_arch_detect.c
+++ b/tools/scmp_arch_detect.c
@@ -51,7 +51,7 @@ int main(int argc, char *argv[])
uint32_t arch;
/* parse the command line */
- while ((opt = getopt(argc, argv, "ht"))> 0) {
+ while ((opt = getopt(argc, argv, "ht")) > 0) {
switch (opt) {
case 't':
token = 1;
diff --git a/tools/scmp_bpf_disasm.c b/tools/scmp_bpf_disasm.c
index 5b63fe8..8474eb1 100644
--- a/tools/scmp_bpf_disasm.c
+++ b/tools/scmp_bpf_disasm.c
@@ -44,125 +44,125 @@
static void bpf_decode_op(const bpf_instr_raw *bpf)
{
switch (bpf->code) {
- case BPF_LD+BPF_W+BPF_IMM:
- case BPF_LD+BPF_W+BPF_ABS:
- case BPF_LD+BPF_W+BPF_IND:
- case BPF_LD+BPF_W+BPF_MEM:
- case BPF_LD+BPF_W+BPF_LEN:
- case BPF_LD+BPF_W+BPF_MSH:
- printf(_OP_FMT, "ld");
- break;
- case BPF_LD+BPF_H+BPF_IMM:
- case BPF_LD+BPF_H+BPF_ABS:
- case BPF_LD+BPF_H+BPF_IND:
- case BPF_LD+BPF_H+BPF_MEM:
- case BPF_LD+BPF_H+BPF_LEN:
- case BPF_LD+BPF_H+BPF_MSH:
- printf(_OP_FMT, "ldh");
- break;
- case BPF_LD+BPF_B+BPF_IMM:
- case BPF_LD+BPF_B+BPF_ABS:
- case BPF_LD+BPF_B+BPF_IND:
- case BPF_LD+BPF_B+BPF_MEM:
- case BPF_LD+BPF_B+BPF_LEN:
- case BPF_LD+BPF_B+BPF_MSH:
- printf(_OP_FMT, "ldb");
- break;
- case BPF_LDX+BPF_W+BPF_IMM:
- case BPF_LDX+BPF_W+BPF_ABS:
- case BPF_LDX+BPF_W+BPF_IND:
- case BPF_LDX+BPF_W+BPF_MEM:
- case BPF_LDX+BPF_W+BPF_LEN:
- case BPF_LDX+BPF_W+BPF_MSH:
- case BPF_LDX+BPF_H+BPF_IMM:
- case BPF_LDX+BPF_H+BPF_ABS:
- case BPF_LDX+BPF_H+BPF_IND:
- case BPF_LDX+BPF_H+BPF_MEM:
- case BPF_LDX+BPF_H+BPF_LEN:
- case BPF_LDX+BPF_H+BPF_MSH:
- case BPF_LDX+BPF_B+BPF_IMM:
- case BPF_LDX+BPF_B+BPF_ABS:
- case BPF_LDX+BPF_B+BPF_IND:
- case BPF_LDX+BPF_B+BPF_MEM:
- case BPF_LDX+BPF_B+BPF_LEN:
- case BPF_LDX+BPF_B+BPF_MSH:
- printf(_OP_FMT, "ldx");
- break;
- case BPF_ST:
- printf(_OP_FMT, "st");
- break;
- case BPF_STX:
- printf(_OP_FMT, "stx");
- break;
- case BPF_ALU+BPF_ADD+BPF_K:
- case BPF_ALU+BPF_ADD+BPF_X:
- printf(_OP_FMT, "add");
- break;
- case BPF_ALU+BPF_SUB+BPF_K:
- case BPF_ALU+BPF_SUB+BPF_X:
- printf(_OP_FMT, "sub");
- break;
- case BPF_ALU+BPF_MUL+BPF_K:
- case BPF_ALU+BPF_MUL+BPF_X:
- printf(_OP_FMT, "mul");
- break;
- case BPF_ALU+BPF_DIV+BPF_K:
- case BPF_ALU+BPF_DIV+BPF_X:
- printf(_OP_FMT, "div");
- break;
- case BPF_ALU+BPF_OR+BPF_K:
- case BPF_ALU+BPF_OR+BPF_X:
- printf(_OP_FMT, "or");
- break;
- case BPF_ALU+BPF_AND+BPF_K:
- case BPF_ALU+BPF_AND+BPF_X:
- printf(_OP_FMT, "and");
- break;
- case BPF_ALU+BPF_LSH+BPF_K:
- case BPF_ALU+BPF_LSH+BPF_X:
- printf(_OP_FMT, "lsh");
- break;
- case BPF_ALU+BPF_RSH+BPF_K:
- case BPF_ALU+BPF_RSH+BPF_X:
- printf(_OP_FMT, "rsh");
- break;
- case BPF_ALU+BPF_NEG+BPF_K:
- case BPF_ALU+BPF_NEG+BPF_X:
- printf(_OP_FMT, "neg");
- break;
- case BPF_JMP+BPF_JA+BPF_K:
- case BPF_JMP+BPF_JA+BPF_X:
- printf(_OP_FMT, "jmp");
- break;
- case BPF_JMP+BPF_JEQ+BPF_K:
- case BPF_JMP+BPF_JEQ+BPF_X:
- printf(_OP_FMT, "jeq");
- break;
- case BPF_JMP+BPF_JGT+BPF_K:
- case BPF_JMP+BPF_JGT+BPF_X:
- printf(_OP_FMT, "jgt");
- break;
- case BPF_JMP+BPF_JGE+BPF_K:
- case BPF_JMP+BPF_JGE+BPF_X:
- printf(_OP_FMT, "jge");
- break;
- case BPF_JMP+BPF_JSET+BPF_K:
- case BPF_JMP+BPF_JSET+BPF_X:
- printf(_OP_FMT, "jset");
- break;
- case BPF_RET+BPF_K:
- case BPF_RET+BPF_X:
- case BPF_RET+BPF_A:
- printf(_OP_FMT, "ret");
- break;
- case BPF_MISC+BPF_TAX:
- printf(_OP_FMT, "tax");
- break;
- case BPF_MISC+BPF_TXA:
- printf(_OP_FMT, "txa");
- break;
- default:
- printf(_OP_FMT, "???");
+ case BPF_LD+BPF_W+BPF_IMM:
+ case BPF_LD+BPF_W+BPF_ABS:
+ case BPF_LD+BPF_W+BPF_IND:
+ case BPF_LD+BPF_W+BPF_MEM:
+ case BPF_LD+BPF_W+BPF_LEN:
+ case BPF_LD+BPF_W+BPF_MSH:
+ printf(_OP_FMT, "ld");
+ break;
+ case BPF_LD+BPF_H+BPF_IMM:
+ case BPF_LD+BPF_H+BPF_ABS:
+ case BPF_LD+BPF_H+BPF_IND:
+ case BPF_LD+BPF_H+BPF_MEM:
+ case BPF_LD+BPF_H+BPF_LEN:
+ case BPF_LD+BPF_H+BPF_MSH:
+ printf(_OP_FMT, "ldh");
+ break;
+ case BPF_LD+BPF_B+BPF_IMM:
+ case BPF_LD+BPF_B+BPF_ABS:
+ case BPF_LD+BPF_B+BPF_IND:
+ case BPF_LD+BPF_B+BPF_MEM:
+ case BPF_LD+BPF_B+BPF_LEN:
+ case BPF_LD+BPF_B+BPF_MSH:
+ printf(_OP_FMT, "ldb");
+ break;
+ case BPF_LDX+BPF_W+BPF_IMM:
+ case BPF_LDX+BPF_W+BPF_ABS:
+ case BPF_LDX+BPF_W+BPF_IND:
+ case BPF_LDX+BPF_W+BPF_MEM:
+ case BPF_LDX+BPF_W+BPF_LEN:
+ case BPF_LDX+BPF_W+BPF_MSH:
+ case BPF_LDX+BPF_H+BPF_IMM:
+ case BPF_LDX+BPF_H+BPF_ABS:
+ case BPF_LDX+BPF_H+BPF_IND:
+ case BPF_LDX+BPF_H+BPF_MEM:
+ case BPF_LDX+BPF_H+BPF_LEN:
+ case BPF_LDX+BPF_H+BPF_MSH:
+ case BPF_LDX+BPF_B+BPF_IMM:
+ case BPF_LDX+BPF_B+BPF_ABS:
+ case BPF_LDX+BPF_B+BPF_IND:
+ case BPF_LDX+BPF_B+BPF_MEM:
+ case BPF_LDX+BPF_B+BPF_LEN:
+ case BPF_LDX+BPF_B+BPF_MSH:
+ printf(_OP_FMT, "ldx");
+ break;
+ case BPF_ST:
+ printf(_OP_FMT, "st");
+ break;
+ case BPF_STX:
+ printf(_OP_FMT, "stx");
+ break;
+ case BPF_ALU+BPF_ADD+BPF_K:
+ case BPF_ALU+BPF_ADD+BPF_X:
+ printf(_OP_FMT, "add");
+ break;
+ case BPF_ALU+BPF_SUB+BPF_K:
+ case BPF_ALU+BPF_SUB+BPF_X:
+ printf(_OP_FMT, "sub");
+ break;
+ case BPF_ALU+BPF_MUL+BPF_K:
+ case BPF_ALU+BPF_MUL+BPF_X:
+ printf(_OP_FMT, "mul");
+ break;
+ case BPF_ALU+BPF_DIV+BPF_K:
+ case BPF_ALU+BPF_DIV+BPF_X:
+ printf(_OP_FMT, "div");
+ break;
+ case BPF_ALU+BPF_OR+BPF_K:
+ case BPF_ALU+BPF_OR+BPF_X:
+ printf(_OP_FMT, "or");
+ break;
+ case BPF_ALU+BPF_AND+BPF_K:
+ case BPF_ALU+BPF_AND+BPF_X:
+ printf(_OP_FMT, "and");
+ break;
+ case BPF_ALU+BPF_LSH+BPF_K:
+ case BPF_ALU+BPF_LSH+BPF_X:
+ printf(_OP_FMT, "lsh");
+ break;
+ case BPF_ALU+BPF_RSH+BPF_K:
+ case BPF_ALU+BPF_RSH+BPF_X:
+ printf(_OP_FMT, "rsh");
+ break;
+ case BPF_ALU+BPF_NEG+BPF_K:
+ case BPF_ALU+BPF_NEG+BPF_X:
+ printf(_OP_FMT, "neg");
+ break;
+ case BPF_JMP+BPF_JA+BPF_K:
+ case BPF_JMP+BPF_JA+BPF_X:
+ printf(_OP_FMT, "jmp");
+ break;
+ case BPF_JMP+BPF_JEQ+BPF_K:
+ case BPF_JMP+BPF_JEQ+BPF_X:
+ printf(_OP_FMT, "jeq");
+ break;
+ case BPF_JMP+BPF_JGT+BPF_K:
+ case BPF_JMP+BPF_JGT+BPF_X:
+ printf(_OP_FMT, "jgt");
+ break;
+ case BPF_JMP+BPF_JGE+BPF_K:
+ case BPF_JMP+BPF_JGE+BPF_X:
+ printf(_OP_FMT, "jge");
+ break;
+ case BPF_JMP+BPF_JSET+BPF_K:
+ case BPF_JMP+BPF_JSET+BPF_X:
+ printf(_OP_FMT, "jset");
+ break;
+ case BPF_RET+BPF_K:
+ case BPF_RET+BPF_X:
+ case BPF_RET+BPF_A:
+ printf(_OP_FMT, "ret");
+ break;
+ case BPF_MISC+BPF_TAX:
+ printf(_OP_FMT, "tax");
+ break;
+ case BPF_MISC+BPF_TXA:
+ printf(_OP_FMT, "txa");
+ break;
+ default:
+ printf(_OP_FMT, "???");
}
}
@@ -178,80 +178,80 @@ static void bpf_decode_op(const bpf_instr_raw *bpf)
static void bpf_decode_args(const bpf_instr_raw *bpf, unsigned int line)
{
switch (BPF_CLASS(bpf->code)) {
- case BPF_LD:
- case BPF_LDX:
- switch (BPF_MODE(bpf->code)) {
- case BPF_ABS:
- printf("$data[%u]", bpf->k);
- break;
- case BPF_MEM:
- printf("$temp[%u]", bpf->k);
- break;
- }
+ case BPF_LD:
+ case BPF_LDX:
+ switch (BPF_MODE(bpf->code)) {
+ case BPF_ABS:
+ printf("$data[%u]", bpf->k);
break;
- case BPF_ST:
- case BPF_STX:
+ case BPF_MEM:
printf("$temp[%u]", bpf->k);
break;
- case BPF_ALU:
- if (BPF_SRC(bpf->code) == BPF_K) {
- switch (BPF_OP(bpf->code)) {
- case BPF_OR:
- case BPF_AND:
- printf("0x%.8x", bpf->k);
- break;
- default:
- printf("%u", bpf->k);
- }
- } else
+ }
+ break;
+ case BPF_ST:
+ case BPF_STX:
+ printf("$temp[%u]", bpf->k);
+ break;
+ case BPF_ALU:
+ if (BPF_SRC(bpf->code) == BPF_K) {
+ switch (BPF_OP(bpf->code)) {
+ case BPF_OR:
+ case BPF_AND:
+ printf("0x%.8x", bpf->k);
+ break;
+ default:
printf("%u", bpf->k);
- break;
- case BPF_JMP:
- if (BPF_OP(bpf->code) == BPF_JA) {
- printf("%.4u", (line + 1) + bpf->k);
- } else {
- printf("%-4u true:%.4u false:%.4u",
- bpf->k,
- (line + 1) + bpf->jt,
- (line + 1) + bpf->jf);
}
- break;
- case BPF_RET:
- if (BPF_RVAL(bpf->code) == BPF_A) {
- /* XXX - accumulator? */
- printf("$acc");
- } else if (BPF_SRC(bpf->code) == BPF_K) {
- uint32_t act = bpf->k & SECCOMP_RET_ACTION;
- uint32_t data = bpf->k & SECCOMP_RET_DATA;
+ } else
+ printf("%u", bpf->k);
+ break;
+ case BPF_JMP:
+ if (BPF_OP(bpf->code) == BPF_JA) {
+ printf("%.4u", (line + 1) + bpf->k);
+ } else {
+ printf("%-4u true:%.4u false:%.4u",
+ bpf->k,
+ (line + 1) + bpf->jt,
+ (line + 1) + bpf->jf);
+ }
+ break;
+ case BPF_RET:
+ if (BPF_RVAL(bpf->code) == BPF_A) {
+ /* XXX - accumulator? */
+ printf("$acc");
+ } else if (BPF_SRC(bpf->code) == BPF_K) {
+ uint32_t act = bpf->k & SECCOMP_RET_ACTION;
+ uint32_t data = bpf->k & SECCOMP_RET_DATA;
- switch (act) {
- case SECCOMP_RET_KILL:
- printf("KILL");
- break;
- case SECCOMP_RET_TRAP:
- printf("TRAP");
- break;
- case SECCOMP_RET_ERRNO:
- printf("ERRNO(%u)", data);
- break;
- case SECCOMP_RET_TRACE:
- printf("TRACE(%u)", data);
- break;
- case SECCOMP_RET_ALLOW:
- printf("ALLOW");
- break;
- default:
- printf("0x%.8x", bpf->k);
- }
- } else if (BPF_SRC(bpf->code) == BPF_X) {
- /* XXX - any idea? */
- printf("???");
+ switch (act) {
+ case SECCOMP_RET_KILL:
+ printf("KILL");
+ break;
+ case SECCOMP_RET_TRAP:
+ printf("TRAP");
+ break;
+ case SECCOMP_RET_ERRNO:
+ printf("ERRNO(%u)", data);
+ break;
+ case SECCOMP_RET_TRACE:
+ printf("TRACE(%u)", data);
+ break;
+ case SECCOMP_RET_ALLOW:
+ printf("ALLOW");
+ break;
+ default:
+ printf("0x%.8x", bpf->k);
}
- break;
- case BPF_MISC:
- break;
- default:
+ } else if (BPF_SRC(bpf->code) == BPF_X) {
+ /* XXX - any idea? */
printf("???");
+ }
+ break;
+ case BPF_MISC:
+ break;
+ default:
+ printf("???");
}
}
diff --git a/tools/scmp_bpf_sim.c b/tools/scmp_bpf_sim.c
index 94c6648..1b46cc9 100644
--- a/tools/scmp_bpf_sim.c
+++ b/tools/scmp_bpf_sim.c
@@ -59,9 +59,10 @@ static unsigned int opt_verbose = 0;
*/
static void exit_usage(const char *program)
{
- fprintf(stderr, "usage: %s -f <bpf_file> [-v]"
- " -a <arch> -s <syscall_num> [-0 <a0>] ... [-5 <a5>]\n",
- program);
+ fprintf(stderr,
+ "usage: %s -f <bpf_file> [-v]"
+ " -a <arch> -s <syscall_num> [-0 <a0>] ... [-5 <a5>]\n",
+ program);
exit(EINVAL);
}
diff --git a/tools/scmp_sys_resolver.c b/tools/scmp_sys_resolver.c
index 7e627d4..e86b400 100644
--- a/tools/scmp_sys_resolver.c
+++ b/tools/scmp_sys_resolver.c
@@ -59,7 +59,7 @@ int main(int argc, char *argv[])
const char *sys_name;
/* parse the command line */
- while ((opt = getopt(argc, argv, "a:ht"))> 0) {
+ while ((opt = getopt(argc, argv, "a:ht")) > 0) {
switch (opt) {
case 'a':
if (strcmp(optarg, "x86") == 0)