diff options
author | David S. Miller <davem@davemloft.net> | 2018-08-13 10:07:23 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-08-13 10:07:23 -0700 |
commit | c1617fb4c5eea2ad38b5ffb937a732dbb137117f (patch) | |
tree | 7331bdc1fd66799533d2339816c688ded60e0475 /tools | |
parent | 961d9735357ec59be3e3e2c37c0ca6494f04461b (diff) | |
parent | 2ce3206b9eb3943de09f3bf4ec9134568420d8b9 (diff) | |
download | linux-c1617fb4c5eea2ad38b5ffb937a732dbb137117f.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2018-08-13
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) Add driver XDP support for veth. This can be used in conjunction with
redirect of another XDP program e.g. sitting on NIC so the xdp_frame
can be forwarded to the peer veth directly without modification,
from Toshiaki.
2) Add a new BPF map type REUSEPORT_SOCKARRAY and prog type SK_REUSEPORT
in order to provide more control and visibility on where a SO_REUSEPORT
sk should be located, and the latter enables to directly select a sk
from the bpf map. This also enables map-in-map for application migration
use cases, from Martin.
3) Add a new BPF helper bpf_skb_ancestor_cgroup_id() that returns the id
of cgroup v2 that is the ancestor of the cgroup associated with the
skb at the ancestor_level, from Andrey.
4) Implement BPF fs map pretty-print support based on BTF data for regular
hash table and LRU map, from Yonghong.
5) Decouple the ability to attach BTF for a map from the key and value
pretty-printer in BPF fs, and enable further support of BTF for maps for
percpu and LPM trie, from Daniel.
6) Implement a better BPF sample of using XDP's CPU redirect feature for
load balancing SKB processing to remote CPU. The sample implements the
same XDP load balancing as Suricata does which is symmetric hash based
on IP and L4 protocol, from Jesper.
7) Revert adding NULL pointer check with WARN_ON_ONCE() in __xdp_return()'s
critical path as it is ensured that the allocator is present, from Björn.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
19 files changed, 1615 insertions, 41 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index dd5758dc35d3..66917a4eba27 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -126,6 +126,7 @@ enum bpf_map_type { BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, }; enum bpf_prog_type { @@ -150,6 +151,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, + BPF_PROG_TYPE_SK_REUSEPORT, }; enum bpf_attach_type { @@ -2091,6 +2093,24 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * + * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * * u64 bpf_get_current_cgroup_id(void) * Return * A 64-bit integer containing the current cgroup id based @@ -2113,6 +2133,14 @@ union bpf_attr { * the shared data. * Return * Pointer to the local storage area. + * + * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * Description + * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map + * It checks the selected sk is matching the incoming + * request in the skb. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2196,7 +2224,9 @@ union bpf_attr { FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ - FN(get_local_storage), + FN(get_local_storage), \ + FN(sk_select_reuseport), \ + FN(skb_ancestor_cgroup_id), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2413,6 +2443,30 @@ struct sk_msg_md { __u32 local_port; /* stored in host byte order */ }; +struct sk_reuseport_md { + /* + * Start of directly accessible data. It begins from + * the tcp/udp header. + */ + void *data; + void *data_end; /* End of directly accessible data */ + /* + * Total length of packet (starting from the tcp/udp header). + * Note that the directly accessible bytes (data_end - data) + * could be less than this "len". Those bytes could be + * indirectly read by a helper "bpf_skb_load_bytes()". + */ + __u32 len; + /* + * Eth protocol in the mac header (network byte order). e.g. + * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) + */ + __u32 eth_protocol; + __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ + __u32 bind_inany; /* Is sock bound to an INANY address? */ + __u32 hash; /* A hash of the packet 4 tuples */ +}; + #define BPF_TAG_SIZE 8 struct bpf_prog_info { diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 9ddc89dae962..60aa4ca8b2c5 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -92,6 +92,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) attr.btf_key_type_id = create_attr->btf_key_type_id; attr.btf_value_type_id = create_attr->btf_value_type_id; attr.map_ifindex = create_attr->map_ifindex; + attr.inner_map_fd = create_attr->inner_map_fd; return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0639a30a457d..6f38164b2618 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -39,6 +39,7 @@ struct bpf_create_map_attr { __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 map_ifindex; + __u32 inner_map_fd; }; int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 40211b51427a..2abd0f112627 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1501,6 +1501,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_LIRC_MODE2: + case BPF_PROG_TYPE_SK_REUSEPORT: return false; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_KPROBE: diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 17a7a5818ee1..fff7fb1285fc 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,7 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ - test_socket_cookie test_cgroup_storage + test_socket_cookie test_cgroup_storage test_select_reuseport TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ @@ -34,7 +34,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \ test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ - get_cgroup_id_kern.o socket_cookie_prog.o + get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \ + test_skb_cgroup_id_kern.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -45,10 +46,11 @@ TEST_PROGS := test_kmod.sh \ test_sock_addr.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ - test_lirc_mode2.sh + test_lirc_mode2.sh \ + test_skb_cgroup_id.sh # Compile but not part of 'make run_tests' -TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr +TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user include ../lib.mk @@ -59,6 +61,7 @@ $(TEST_GEN_PROGS): $(BPFOBJ) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c +$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c $(OUTPUT)/test_sock: cgroup_helpers.c $(OUTPUT)/test_sock_addr: cgroup_helpers.c $(OUTPUT)/test_socket_cookie: cgroup_helpers.c diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 9ba1c72d7cf5..e4be7730222d 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -111,6 +111,8 @@ static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state, int size, int flags) = (void *) BPF_FUNC_skb_get_xfrm_state; +static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = + (void *) BPF_FUNC_sk_select_reuseport; static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) = (void *) BPF_FUNC_get_stack; static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, @@ -137,6 +139,10 @@ static unsigned long long (*bpf_get_current_cgroup_id)(void) = (void *) BPF_FUNC_get_current_cgroup_id; static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = (void *) BPF_FUNC_get_local_storage; +static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = + (void *) BPF_FUNC_skb_cgroup_id; +static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = + (void *) BPF_FUNC_skb_ancestor_cgroup_id; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions @@ -173,6 +179,8 @@ struct bpf_map_def { static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = (void *) BPF_FUNC_skb_load_bytes; +static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) = + (void *) BPF_FUNC_skb_load_bytes_relative; static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = (void *) BPF_FUNC_skb_store_bytes; static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index d0811b3d6a6f..315a44fa32af 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -44,4 +44,8 @@ static inline unsigned int bpf_num_possible_cpus(void) name[bpf_num_possible_cpus()] #define bpf_percpu(name, cpu) name[(cpu)].v +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 6b1b302310fe..5f377ec53f2f 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -18,10 +18,7 @@ #include "../../../include/linux/filter.h" #include "bpf_rlimit.h" - -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_util.h" #define MAX_INSNS 512 #define MAX_MATCHES 16 diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index ffdd27737c9e..6b5cfeb7a9cc 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -19,6 +19,7 @@ #include <bpf/btf.h> #include "bpf_rlimit.h" +#include "bpf_util.h" static uint32_t pass_cnt; static uint32_t error_cnt; @@ -93,10 +94,6 @@ static int __base_pr(const char *format, ...) #define MAX_NR_RAW_TYPES 1024 #define BTF_LOG_BUF_SIZE 65535 -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - static struct args { unsigned int raw_test_num; unsigned int file_test_num; @@ -131,6 +128,8 @@ struct btf_raw_test { __u32 max_entries; bool btf_load_err; bool map_create_err; + bool ordered_map; + bool lossless_map; int hdr_len_delta; int type_off_delta; int str_off_delta; @@ -2093,8 +2092,7 @@ struct pprint_mapv { } aenum; }; -static struct btf_raw_test pprint_test = { - .descr = "BTF pretty print test #1", +static struct btf_raw_test pprint_test_template = { .raw_types = { /* unsighed char */ /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), @@ -2146,8 +2144,6 @@ static struct btf_raw_test pprint_test = { }, .str_sec = "\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum", .str_sec_size = sizeof("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), - .map_type = BPF_MAP_TYPE_ARRAY, - .map_name = "pprint_test", .key_size = sizeof(unsigned int), .value_size = sizeof(struct pprint_mapv), .key_type_id = 3, /* unsigned int */ @@ -2155,6 +2151,40 @@ static struct btf_raw_test pprint_test = { .max_entries = 128 * 1024, }; +static struct btf_pprint_test_meta { + const char *descr; + enum bpf_map_type map_type; + const char *map_name; + bool ordered_map; + bool lossless_map; +} pprint_tests_meta[] = { +{ + .descr = "BTF pretty print array", + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "pprint_test_array", + .ordered_map = true, + .lossless_map = true, +}, + +{ + .descr = "BTF pretty print hash", + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "pprint_test_hash", + .ordered_map = false, + .lossless_map = true, +}, + +{ + .descr = "BTF pretty print lru hash", + .map_type = BPF_MAP_TYPE_LRU_HASH, + .map_name = "pprint_test_lru_hash", + .ordered_map = false, + .lossless_map = false, +}, + +}; + + static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i) { v->ui32 = i; @@ -2166,10 +2196,12 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i) v->aenum = i & 0x03; } -static int test_pprint(void) +static int do_test_pprint(void) { - const struct btf_raw_test *test = &pprint_test; + const struct btf_raw_test *test = &pprint_test_template; struct bpf_create_map_attr create_attr = {}; + unsigned int key, nr_read_elems; + bool ordered_map, lossless_map; int map_fd = -1, btf_fd = -1; struct pprint_mapv mapv = {}; unsigned int raw_btf_size; @@ -2178,7 +2210,6 @@ static int test_pprint(void) char pin_path[255]; size_t line_len = 0; char *line = NULL; - unsigned int key; uint8_t *raw_btf; ssize_t nread; int err, ret; @@ -2251,14 +2282,18 @@ static int test_pprint(void) goto done; } - key = 0; + nr_read_elems = 0; + ordered_map = test->ordered_map; + lossless_map = test->lossless_map; do { ssize_t nexpected_line; + unsigned int next_key; - set_pprint_mapv(&mapv, key); + next_key = ordered_map ? nr_read_elems : atoi(line); + set_pprint_mapv(&mapv, next_key); nexpected_line = snprintf(expected_line, sizeof(expected_line), "%u: {%u,0,%d,0x%x,0x%x,0x%x,{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", - key, + next_key, mapv.ui32, mapv.si32, mapv.unused_bits2a, mapv.bits28, mapv.unused_bits2b, mapv.ui64, @@ -2281,11 +2316,12 @@ static int test_pprint(void) } nread = getline(&line, &line_len, pin_file); - } while (++key < test->max_entries && nread > 0); + } while (++nr_read_elems < test->max_entries && nread > 0); - if (CHECK(key < test->max_entries, - "Unexpected EOF. key:%u test->max_entries:%u", - key, test->max_entries)) { + if (lossless_map && + CHECK(nr_read_elems < test->max_entries, + "Unexpected EOF. nr_read_elems:%u test->max_entries:%u", + nr_read_elems, test->max_entries)) { err = -1; goto done; } @@ -2314,6 +2350,24 @@ done: return err; } +static int test_pprint(void) +{ + unsigned int i; + int err = 0; + + for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) { + pprint_test_template.descr = pprint_tests_meta[i].descr; + pprint_test_template.map_type = pprint_tests_meta[i].map_type; + pprint_test_template.map_name = pprint_tests_meta[i].map_name; + pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map; + pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map; + + err |= count_result(do_test_pprint()); + } + + return err; +} + static void usage(const char *cmd) { fprintf(stderr, "Usage: %s [-l] [[-r test_num (1 - %zu)] | [-g test_num (1 - %zu)] | [-f test_num (1 - %zu)] | [-p]]\n", @@ -2409,7 +2463,7 @@ int main(int argc, char **argv) err |= test_file(); if (args.pprint_test) - err |= count_result(test_pprint()); + err |= test_pprint(); if (args.raw_test || args.get_info_test || args.file_test || args.pprint_test) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6c253343a6f9..4b7c74f5faa7 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -17,7 +17,8 @@ #include <stdlib.h> #include <sys/wait.h> - +#include <sys/socket.h> +#include <netinet/in.h> #include <linux/bpf.h> #include <bpf/bpf.h> @@ -26,8 +27,21 @@ #include "bpf_util.h" #include "bpf_rlimit.h" +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + static int map_flags; +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ + printf(format); \ + exit(-1); \ + } \ +}) + static void test_hashmap(int task, void *data) { long long key, next_key, first_key, value; @@ -1150,6 +1164,250 @@ static void test_map_wronly(void) assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM); } +static void prepare_reuseport_grp(int type, int map_fd, + __s64 *fds64, __u64 *sk_cookies, + unsigned int n) +{ + socklen_t optlen, addrlen; + struct sockaddr_in6 s6; + const __u32 index0 = 0; + const int optval = 1; + unsigned int i; + u64 sk_cookie; + __s64 fd64; + int err; + + s6.sin6_family = AF_INET6; + s6.sin6_addr = in6addr_any; + s6.sin6_port = 0; + addrlen = sizeof(s6); + optlen = sizeof(sk_cookie); + + for (i = 0; i < n; i++) { + fd64 = socket(AF_INET6, type, 0); + CHECK(fd64 == -1, "socket()", + "sock_type:%d fd64:%lld errno:%d\n", + type, fd64, errno); + + err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT, + &optval, sizeof(optval)); + CHECK(err == -1, "setsockopt(SO_REUSEEPORT)", + "err:%d errno:%d\n", err, errno); + + /* reuseport_array does not allow unbound sk */ + err = bpf_map_update_elem(map_fd, &index0, &fd64, + BPF_ANY); + CHECK(err != -1 || errno != EINVAL, + "reuseport array update unbound sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + err = bind(fd64, (struct sockaddr *)&s6, sizeof(s6)); + CHECK(err == -1, "bind()", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + if (i == 0) { + err = getsockname(fd64, (struct sockaddr *)&s6, + &addrlen); + CHECK(err == -1, "getsockname()", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + } + + err = getsockopt(fd64, SOL_SOCKET, SO_COOKIE, &sk_cookie, + &optlen); + CHECK(err == -1, "getsockopt(SO_COOKIE)", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + if (type == SOCK_STREAM) { + /* + * reuseport_array does not allow + * non-listening tcp sk. + */ + err = bpf_map_update_elem(map_fd, &index0, &fd64, + BPF_ANY); + CHECK(err != -1 || errno != EINVAL, + "reuseport array update non-listening sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + err = listen(fd64, 0); + CHECK(err == -1, "listen()", + "sock_type:%d, err:%d errno:%d\n", + type, err, errno); + } + + fds64[i] = fd64; + sk_cookies[i] = sk_cookie; + } +} + +static void test_reuseport_array(void) +{ +#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; }) + + const __u32 array_size = 4, index0 = 0, index3 = 3; + int types[2] = { SOCK_STREAM, SOCK_DGRAM }, type; + __u64 grpa_cookies[2], sk_cookie, map_cookie; + __s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1; + const __u32 bad_index = array_size; + int map_fd, err, t, f; + __u32 fds_idx = 0; + int fd; + + map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + sizeof(__u32), sizeof(__u64), array_size, 0); + CHECK(map_fd == -1, "reuseport array create", + "map_fd:%d, errno:%d\n", map_fd, errno); + + /* Test lookup/update/delete with invalid index */ + err = bpf_map_delete_elem(map_fd, &bad_index); + CHECK(err != -1 || errno != E2BIG, "reuseport array del >=max_entries", + "err:%d errno:%d\n", err, errno); + + err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY); + CHECK(err != -1 || errno != E2BIG, + "reuseport array update >=max_entries", + "err:%d errno:%d\n", err, errno); + + err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array update >=max_entries", + "err:%d errno:%d\n", err, errno); + + /* Test lookup/delete non existence elem */ + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array lookup not-exist elem", + "err:%d errno:%d\n", err, errno); + err = bpf_map_delete_elem(map_fd, &index3); + CHECK(err != -1 || errno != ENOENT, + "reuseport array del not-exist elem", + "err:%d errno:%d\n", err, errno); + + for (t = 0; t < ARRAY_SIZE(types); t++) { + type = types[t]; + + prepare_reuseport_grp(type, map_fd, grpa_fds64, + grpa_cookies, ARRAY_SIZE(grpa_fds64)); + + /* Test BPF_* update flags */ + /* BPF_EXIST failure case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_EXIST); + CHECK(err != -1 || errno != ENOENT, + "reuseport array update empty elem BPF_EXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_NOEXIST success case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_NOEXIST); + CHECK(err == -1, + "reuseport array update empty elem BPF_NOEXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_EXIST success case. */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_EXIST); + CHECK(err == -1, + "reuseport array update same elem BPF_EXIST", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_NOEXIST failure case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_NOEXIST); + CHECK(err != -1 || errno != EEXIST, + "reuseport array update non-empty elem BPF_NOEXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_ANY case (always succeed) */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_ANY); + CHECK(err == -1, + "reuseport array update same sk with BPF_ANY", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + fd64 = grpa_fds64[fds_idx]; + sk_cookie = grpa_cookies[fds_idx]; + + /* The same sk cannot be added to reuseport_array twice */ + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY); + CHECK(err != -1 || errno != EBUSY, + "reuseport array update same sk with same index", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY); + CHECK(err != -1 || errno != EBUSY, + "reuseport array update same sk with different index", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + /* Test delete elem */ + err = bpf_map_delete_elem(map_fd, &index3); + CHECK(err == -1, "reuseport array delete sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + /* Add it back with BPF_NOEXIST */ + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST); + CHECK(err == -1, + "reuseport array re-add with BPF_NOEXIST after del", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + /* Test cookie */ + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err == -1 || sk_cookie != map_cookie, + "reuseport array lookup re-added sk", + "sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn", + type, err, errno, sk_cookie, map_cookie); + + /* Test elem removed by close() */ + for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++) + close(grpa_fds64[f]); + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array lookup after close()", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + } + + /* Test SOCK_RAW */ + fd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP); + CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n", + err, errno); + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST); + CHECK(err != -1 || errno != ENOTSUPP, "reuseport array update SOCK_RAW", + "err:%d errno:%d\n", err, errno); + close(fd64); + + /* Close the 64 bit value map */ + close(map_fd); + + /* Test 32 bit fd */ + map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + sizeof(__u32), sizeof(__u32), array_size, 0); + CHECK(map_fd == -1, "reuseport array create", + "map_fd:%d, errno:%d\n", map_fd, errno); + prepare_reuseport_grp(SOCK_STREAM, map_fd, &fd64, &sk_cookie, 1); + fd = fd64; + err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST); + CHECK(err == -1, "reuseport array update 32 bit fd", + "err:%d errno:%d\n", err, errno); + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOSPC, + "reuseport array lookup 32 bit fd", + "err:%d errno:%d\n", err, errno); + close(fd); + close(map_fd); +} + static void run_all_tests(void) { test_hashmap(0, NULL); @@ -1170,6 +1428,8 @@ static void run_all_tests(void) test_map_rdonly(); test_map_wronly(); + + test_reuseport_array(); } int main(void) diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c new file mode 100644 index 000000000000..75646d9b34aa --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include <stdlib.h> +#include <unistd.h> +#include <stdbool.h> +#include <string.h> +#include <errno.h> +#include <assert.h> +#include <fcntl.h> +#include <linux/bpf.h> +#include <linux/err.h> +#include <linux/types.h> +#include <linux/if_ether.h> +#include <sys/types.h> +#include <sys/epoll.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include "bpf_rlimit.h" +#include "bpf_util.h" +#include "test_select_reuseport_common.h" + +#define MIN_TCPHDR_LEN 20 +#define UDPHDR_LEN 8 + +#define TCP_SYNCOOKIE_SYSCTL "/proc/sys/net/ipv4/tcp_syncookies" +#define TCP_FO_SYSCTL "/proc/sys/net/ipv4/tcp_fastopen" +#define REUSEPORT_ARRAY_SIZE 32 + +static int result_map, tmp_index_ovr_map, linum_map, data_check_map; +static enum result expected_results[NR_RESULTS]; +static int sk_fds[REUSEPORT_ARRAY_SIZE]; +static int reuseport_array, outer_map; +static int select_by_skb_data_prog; +static int saved_tcp_syncookie; +static struct bpf_object *obj; +static int saved_tcp_fo; +static __u32 index_zero; +static int epfd; + +static union sa46 { + struct sockaddr_in6 v6; + struct sockaddr_in v4; + sa_family_t family; +} srv_sa; + +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ + printf(format); \ + exit(-1); \ + } \ +}) + +static void create_maps(void) +{ + struct bpf_create_map_attr attr = {}; + + /* Creating reuseport_array */ + attr.name = "reuseport_array"; + attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; + attr.key_size = sizeof(__u32); + attr.value_size = sizeof(__u32); + attr.max_entries = REUSEPORT_ARRAY_SIZE; + + reuseport_array = bpf_create_map_xattr(&attr); + CHECK(reuseport_array == -1, "creating reuseport_array", + "reuseport_array:%d errno:%d\n", reuseport_array, errno); + + /* Creating outer_map */ + attr.name = "outer_map"; + attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS; + attr.key_size = sizeof(__u32); + attr.value_size = sizeof(__u32); + attr.max_entries = 1; + attr.inner_map_fd = reuseport_array; + outer_map = bpf_create_map_xattr(&attr); + CHECK(outer_map == -1, "creating outer_map", + "outer_map:%d errno:%d\n", outer_map, errno); +} + +static void prepare_bpf_obj(void) +{ + struct bpf_program *prog; + struct bpf_map *map; + int err; + struct bpf_object_open_attr attr = { + .file = "test_select_reuseport_kern.o", + .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, + }; + + obj = bpf_object__open_xattr(&attr); + CHECK(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o", + "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj)); + + prog = bpf_program__next(NULL, obj); + CHECK(!prog, "get first bpf_program", "!prog\n"); + bpf_program__set_type(prog, attr.prog_type); + + map = bpf_object__find_map_by_name(obj, "outer_map"); + CHECK(!map, "find outer_map", "!map\n"); + err = bpf_map__reuse_fd(map, outer_map); + CHECK(err, "reuse outer_map", "err:%d\n", err); + + err = bpf_object__load(obj); + CHECK(err, "load bpf_object", "err:%d\n", err); + + select_by_skb_data_prog = bpf_program__fd(prog); + CHECK(select_by_skb_data_prog == -1, "get prog fd", + "select_by_skb_data_prog:%d\n", select_by_skb_data_prog); + + map = bpf_object__find_map_by_name(obj, "result_map"); + CHECK(!map, "find result_map", "!map\n"); + result_map = bpf_map__fd(map); + CHECK(result_map == -1, "get result_map fd", + "result_map:%d\n", result_map); + + map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map"); + CHECK(!map, "find tmp_index_ovr_map", "!map\n"); + tmp_index_ovr_map = bpf_map__fd(map); + CHECK(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd", + "tmp_index_ovr_map:%d\n", tmp_index_ovr_map); + + map = bpf_object__find_map_by_name(obj, "linum_map"); + CHECK(!map, "find linum_map", "!map\n"); + linum_map = bpf_map__fd(map); + CHECK(linum_map == -1, "get linum_map fd", + "linum_map:%d\n", linum_map); + + map = bpf_object__find_map_by_name(obj, "data_check_map"); + CHECK(!map, "find data_check_map", "!map\n"); + data_check_map = bpf_map__fd(map); + CHECK(data_check_map == -1, "get data_check_map fd", + "data_check_map:%d\n", data_check_map); +} + +static void sa46_init_loopback(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_loopback; + else + sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); +} + +static void sa46_init_inany(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_any; + else + sa->v4.sin_addr.s_addr = INADDR_ANY; +} + +static int read_int_sysctl(const char *sysctl) +{ + char buf[16]; + int fd, ret; + + fd = open(sysctl, 0); + CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", + sysctl, fd, errno); + + ret = read(fd, buf, sizeof(buf)); + CHECK(ret <= 0, "read(sysctl)", "sysctl:%s ret:%d errno:%d\n", + sysctl, ret, errno); + close(fd); + + return atoi(buf); +} + +static void write_int_sysctl(const char *sysctl, int v) +{ + int fd, ret, size; + char buf[16]; + + fd = open(sysctl, O_RDWR); + CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", + sysctl, fd, errno); + + size = snprintf(buf, sizeof(buf), "%d", v); + ret = write(fd, buf, size); + CHECK(ret != size, "write(sysctl)", + "sysctl:%s ret:%d size:%d errno:%d\n", sysctl, ret, size, errno); + close(fd); +} + +static void restore_sysctls(void) +{ + write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); +} + +static void enable_fastopen(void) +{ + int fo; + + fo = read_int_sysctl(TCP_FO_SYSCTL); + write_int_sysctl(TCP_FO_SYSCTL, fo | 7); +} + +static void enable_syncookie(void) +{ + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2); +} + +static void disable_syncookie(void) +{ + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0); +} + +static __u32 get_linum(void) +{ + __u32 linum; + int err; + + err = bpf_map_lookup_elem(linum_map, &index_zero, &linum); + CHECK(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n", + err, errno); + + return linum; +} + +static void check_data(int type, sa_family_t family, const struct cmd *cmd, + int cli_fd) +{ + struct data_check expected = {}, result; + union sa46 cli_sa; + socklen_t addrlen; + int err; + + addrlen = sizeof(cli_sa); + err = getsockname(cli_fd, (struct sockaddr *)&cli_sa, + &addrlen); + CHECK(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n", + err, errno); + + err = bpf_map_lookup_elem(data_check_map, &index_zero, &result); + CHECK(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n", + err, errno); + + if (type == SOCK_STREAM) { + expected.len = MIN_TCPHDR_LEN; + expected.ip_protocol = IPPROTO_TCP; + } else { + expected.len = UDPHDR_LEN; + expected.ip_protocol = IPPROTO_UDP; + } + + if (family == AF_INET6) { + expected.eth_protocol = htons(ETH_P_IPV6); + expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] && + !srv_sa.v6.sin6_addr.s6_addr32[2] && + !srv_sa.v6.sin6_addr.s6_addr32[1] && + !srv_sa.v6.sin6_addr.s6_addr32[0]; + + memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32, + sizeof(cli_sa.v6.sin6_addr)); + memcpy(&expected.skb_addrs[4], &in6addr_loopback, + sizeof(in6addr_loopback)); + expected.skb_ports[0] = cli_sa.v6.sin6_port; + expected.skb_ports[1] = srv_sa.v6.sin6_port; + } else { + expected.eth_protocol = htons(ETH_P_IP); + expected.bind_inany = !srv_sa.v4.sin_addr.s_addr; + + expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr; + expected.skb_addrs[1] = htonl(INADDR_LOOPBACK); + expected.skb_ports[0] = cli_sa.v4.sin_port; + expected.skb_ports[1] = srv_sa.v4.sin_port; + } + + if (memcmp(&result, &expected, offsetof(struct data_check, + equal_check_end))) { + printf("unexpected data_check\n"); + printf(" result: (0x%x, %u, %u)\n", + result.eth_protocol, result.ip_protocol, + result.bind_inany); + printf("expected: (0x%x, %u, %u)\n", + expected.eth_protocol, expected.ip_protocol, + expected.bind_inany); + CHECK(1, "data_check result != expected", + "bpf_prog_linum:%u\n", get_linum()); + } + + CHECK(!result.hash, "data_check result.hash empty", + "result.hash:%u", result.hash); + + expected.len += cmd ? sizeof(*cmd) : 0; + if (type == SOCK_STREAM) + CHECK(expected.len > result.len, "expected.len > result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%u\n", + expected.len, result.len, get_linum()); + else + CHECK(expected.len != result.len, "expected.len != result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%u\n", + expected.len, result.len, get_linum()); +} + +static void check_results(void) +{ + __u32 results[NR_RESULTS]; + __u32 i, broken = 0; + int err; + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &results[i]); + CHECK(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + for (i = 0; i < NR_RESULTS; i++) { + if (results[i] != expected_results[i]) { + broken = i; + break; + } + } + + if (i == NR_RESULTS) + return; + + printf("unexpected result\n"); + printf(" result: ["); + printf("%u", results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", results[i]); + printf("]\n"); + + printf("expected: ["); + printf("%u", expected_results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", expected_results[i]); + printf("]\n"); + + CHECK(expected_results[broken] != results[broken], + "unexpected result", + "expected_results[%u] != results[%u] bpf_prog_linum:%u\n", + broken, broken, get_linum()); +} + +static int send_data(int type, sa_family_t family, void *data, size_t len, + enum result expected) +{ + union sa46 cli_sa; + int fd, err; + + fd = socket(family, type, 0); + CHECK(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno); + + sa46_init_loopback(&cli_sa, family); + err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa)); + CHECK(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno); + + err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa, + sizeof(srv_sa)); + CHECK(err != len && expected >= PASS, + "sendto()", "family:%u err:%d errno:%d expected:%d\n", + family, err, errno, expected); + + return fd; +} + +static void do_test(int type, sa_family_t family, struct cmd *cmd, + enum result expected) +{ + int nev, srv_fd, cli_fd; + struct epoll_event ev; + struct cmd rcv_cmd; + ssize_t nread; + + cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0, + expected); + nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0); + CHECK((nev <= 0 && expected >= PASS) || + (nev > 0 && expected < PASS), + "nev <> expected", + "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n", + nev, expected, type, family, + cmd ? cmd->reuseport_index : -1, + cmd ? cmd->pass_on_failure : -1); + check_results(); + check_data(type, family, cmd, cli_fd); + + if (expected < PASS) + return; + + CHECK(expected != PASS_ERR_SK_SELECT_REUSEPORT && + cmd->reuseport_index != ev.data.u32, + "check cmd->reuseport_index", + "cmd:(%u, %u) ev.data.u32:%u\n", + cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32); + + srv_fd = sk_fds[ev.data.u32]; + if (type == SOCK_STREAM) { + int new_fd = accept(srv_fd, NULL, 0); + + CHECK(new_fd == -1, "accept(srv_fd)", + "ev.data.u32:%u new_fd:%d errno:%d\n", + ev.data.u32, new_fd, errno); + + nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + CHECK(nread != sizeof(rcv_cmd), + "recv(new_fd)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + + close(new_fd); + } else { + nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + CHECK(nread != sizeof(rcv_cmd), + "recv(sk_fds)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + } + + close(cli_fd); +} + +static void test_err_inner_map(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + printf("%s: ", __func__); + expected_results[DROP_ERR_INNER_MAP]++; + do_test(type, family, &cmd, DROP_ERR_INNER_MAP); + printf("OK\n"); +} + +static void test_err_skb_data(int type, sa_family_t family) +{ + printf("%s: ", __func__); + expected_results[DROP_ERR_SKB_DATA]++; + do_test(type, family, NULL, DROP_ERR_SKB_DATA); + printf("OK\n"); +} + +static void test_err_sk_select_port(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 0, + }; + + printf("%s: ", __func__); + expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++; + do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT); + printf("OK\n"); +} + +static void test_pass(int type, sa_family_t family) +{ + struct cmd cmd; + int i; + + printf("%s: ", __func__); + cmd.pass_on_failure = 0; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + expected_results[PASS]++; + cmd.reuseport_index = i; + do_test(type, family, &cmd, PASS); + } + printf("OK\n"); +} + +static void test_syncookie(int type, sa_family_t family) +{ + int err, tmp_index = 1; + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + if (type != SOCK_STREAM) + return; + + printf("%s: ", __func__); + /* + * +1 for TCP-SYN and + * +1 for the TCP-ACK (ack the syncookie) + */ + expected_results[PASS] += 2; + enable_syncookie(); + /* + * Simulate TCP-SYN and TCP-ACK are handled by two different sk: + * TCP-SYN: select sk_fds[tmp_index = 1] tmp_index is from the + * tmp_index_ovr_map + * TCP-ACK: select sk_fds[reuseport_index = 0] reuseport_index + * is from the cmd.reuseport_index + */ + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, + &tmp_index, BPF_ANY); + CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)", + "err:%d errno:%d\n", err, errno); + do_test(type, family, &cmd, PASS); + err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero, + &tmp_index); + CHECK(err == -1 || tmp_index != -1, + "lookup_elem(tmp_index_ovr_map)", + "err:%d errno:%d tmp_index:%d\n", + err, errno, tmp_index); + disable_syncookie(); + printf("OK\n"); +} + +static void test_pass_on_err(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 1, + }; + + printf("%s: ", __func__); + expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1; + do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT); + printf("OK\n"); +} + +static void prepare_sk_fds(int type, sa_family_t family, bool inany) +{ + const int first = REUSEPORT_ARRAY_SIZE - 1; + int i, err, optval = 1; + struct epoll_event ev; + socklen_t addrlen; + + if (inany) + sa46_init_inany(&srv_sa, family); + else + sa46_init_loopback(&srv_sa, family); + addrlen = sizeof(srv_sa); + + /* + * The sk_fds[] is filled from the back such that the order + * is exactly opposite to the (struct sock_reuseport *)reuse->socks[]. + */ + for (i = first; i >= 0; i--) { + sk_fds[i] = socket(family, type, 0); + CHECK(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n", + i, sk_fds[i], errno); + err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT, + &optval, sizeof(optval)); + CHECK(err == -1, "setsockopt(SO_REUSEPORT)", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (i == first) { + err = setsockopt(sk_fds[i], SOL_SOCKET, + SO_ATTACH_REUSEPORT_EBPF, + &select_by_skb_data_prog, + sizeof(select_by_skb_data_prog)); + CHECK(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)", + "err:%d errno:%d\n", err, errno); + } + + err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen); + CHECK(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (type == SOCK_STREAM) { + err = listen(sk_fds[i], 10); + CHECK(err == -1, "listen()", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + } + + err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i], + BPF_NOEXIST); + CHECK(err == -1, "update_elem(reuseport_array)", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + + if (i == first) { + socklen_t addrlen = sizeof(srv_sa); + + err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa, + &addrlen); + CHECK(err == -1, "getsockname()", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + } + } + + epfd = epoll_create(1); + CHECK(epfd == -1, "epoll_create(1)", + "epfd:%d errno:%d\n", epfd, errno); + + ev.events = EPOLLIN; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + ev.data.u32 = i; + err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev); + CHECK(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i); + } +} + +static void setup_per_test(int type, unsigned short family, bool inany) +{ + int ovr = -1, err; + + prepare_sk_fds(type, family, inany); + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr, + BPF_ANY); + CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup_per_test(void) +{ + int i, err; + + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) + close(sk_fds[i]); + close(epfd); + + err = bpf_map_delete_elem(outer_map, &index_zero); + CHECK(err == -1, "delete_elem(outer_map)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup(void) +{ + close(outer_map); + close(reuseport_array); + bpf_object__close(obj); +} + +static void test_all(void) +{ + /* Extra SOCK_STREAM to test bind_inany==true */ + const int types[] = { SOCK_STREAM, SOCK_DGRAM, SOCK_STREAM }; + const char * const type_strings[] = { "TCP", "UDP", "TCP" }; + const char * const family_strings[] = { "IPv6", "IPv4" }; + const unsigned short families[] = { AF_INET6, AF_INET }; + const bool bind_inany[] = { false, false, true }; + int t, f, err; + + for (f = 0; f < ARRAY_SIZE(families); f++) { + unsigned short family = families[f]; + + for (t = 0; t < ARRAY_SIZE(types); t++) { + bool inany = bind_inany[t]; + int type = types[t]; + + printf("######## %s/%s %s ########\n", + family_strings[f], type_strings[t], + inany ? " INANY " : "LOOPBACK"); + + setup_per_test(type, family, inany); + + test_err_inner_map(type, family); + + /* Install reuseport_array to the outer_map */ + err = bpf_map_update_elem(outer_map, &index_zero, + &reuseport_array, BPF_ANY); + CHECK(err == -1, "update_elem(outer_map)", + "err:%d errno:%d\n", err, errno); + + test_err_skb_data(type, family); + test_err_sk_select_port(type, family); + test_pass(type, family); + test_syncookie(type, family); + test_pass_on_err(type, family); + + cleanup_per_test(); + printf("\n"); + } + } +} + +int main(int argc, const char **argv) +{ + create_maps(); + prepare_bpf_obj(); + saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); + saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL); + enable_fastopen(); + disable_syncookie(); + atexit(restore_sysctls); + + test_all(); + + cleanup(); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_select_reuseport_common.h b/tools/testing/selftests/bpf/test_select_reuseport_common.h new file mode 100644 index 000000000000..08eb2a9f145f --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport_common.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ + +#ifndef __TEST_SELECT_REUSEPORT_COMMON_H +#define __TEST_SELECT_REUSEPORT_COMMON_H + +#include <linux/types.h> + +enum result { + DROP_ERR_INNER_MAP, + DROP_ERR_SKB_DATA, + DROP_ERR_SK_SELECT_REUSEPORT, + DROP_MISC, + PASS, + PASS_ERR_SK_SELECT_REUSEPORT, + NR_RESULTS, +}; + +struct cmd { + __u32 reuseport_index; + __u32 pass_on_failure; +}; + +struct data_check { + __u32 ip_protocol; + __u32 skb_addrs[8]; + __u16 skb_ports[2]; + __u16 eth_protocol; + __u8 bind_inany; + __u8 equal_check_end[0]; + + __u32 len; + __u32 hash; +}; + +#endif diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/test_select_reuseport_kern.c new file mode 100644 index 000000000000..5b54ec637ada --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport_kern.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include <stdlib.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/bpf.h> +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "bpf_endian.h" +#include "bpf_helpers.h" +#include "test_select_reuseport_common.h" + +int _version SEC("version") = 1; + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +struct bpf_map_def SEC("maps") outer_map = { + .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") result_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = NR_RESULTS, +}; + +struct bpf_map_def SEC("maps") tmp_index_ovr_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(int), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") linum_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") data_check_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct data_check), + .max_entries = 1, +}; + +#define GOTO_DONE(_result) ({ \ + result = (_result); \ + linum = __LINE__; \ + goto done; \ +}) + +SEC("select_by_skb_data") +int _select_by_skb_data(struct sk_reuseport_md *reuse_md) +{ + __u32 linum, index = 0, flags = 0, index_zero = 0; + __u32 *result_cnt, *linum_value; + struct data_check data_check = {}; + struct cmd *cmd, cmd_copy; + void *data, *data_end; + void *reuseport_array; + enum result result; + int *index_ovr; + int err; + + data = reuse_md->data; + data_end = reuse_md->data_end; + data_check.len = reuse_md->len; + data_check.eth_protocol = reuse_md->eth_protocol; + data_check.ip_protocol = reuse_md->ip_protocol; + data_check.hash = reuse_md->hash; + data_check.bind_inany = reuse_md->bind_inany; + if (data_check.eth_protocol == bpf_htons(ETH_P_IP)) { + if (bpf_skb_load_bytes_relative(reuse_md, + offsetof(struct iphdr, saddr), + data_check.skb_addrs, 8, + BPF_HDR_START_NET)) + GOTO_DONE(DROP_MISC); + } else { + if (bpf_skb_load_bytes_relative(reuse_md, + offsetof(struct ipv6hdr, saddr), + data_check.skb_addrs, 32, + BPF_HDR_START_NET)) + GOTO_DONE(DROP_MISC); + } + + /* + * The ip_protocol could be a compile time decision + * if the bpf_prog.o is dedicated to either TCP or + * UDP. + * + * Otherwise, reuse_md->ip_protocol or + * the protocol field in the iphdr can be used. + */ + if (data_check.ip_protocol == IPPROTO_TCP) { + struct tcphdr *th = data; + + if (th + 1 > data_end) + GOTO_DONE(DROP_MISC); + + data_check.skb_ports[0] = th->source; + data_check.skb_ports[1] = th->dest; + + if ((th->doff << 2) + sizeof(*cmd) > data_check.len) + GOTO_DONE(DROP_ERR_SKB_DATA); + if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy, + sizeof(cmd_copy))) + GOTO_DONE(DROP_MISC); + cmd = &cmd_copy; + } else if (data_check.ip_protocol == IPPROTO_UDP) { + struct udphdr *uh = data; + + if (uh + 1 > data_end) + GOTO_DONE(DROP_MISC); + + data_check.skb_ports[0] = uh->source; + data_check.skb_ports[1] = uh->dest; + + if (sizeof(struct udphdr) + sizeof(*cmd) > data_check.len) + GOTO_DONE(DROP_ERR_SKB_DATA); + if (data + sizeof(struct udphdr) + sizeof(*cmd) > data_end) { + if (bpf_skb_load_bytes(reuse_md, sizeof(struct udphdr), + &cmd_copy, sizeof(cmd_copy))) + GOTO_DONE(DROP_MISC); + cmd = &cmd_copy; + } else { + cmd = data + sizeof(struct udphdr); + } + } else { + GOTO_DONE(DROP_MISC); + } + + reuseport_array = bpf_map_lookup_elem(&outer_map, &index_zero); + if (!reuseport_array) + GOTO_DONE(DROP_ERR_INNER_MAP); + + index = cmd->reuseport_index; + index_ovr = bpf_map_lookup_elem(&tmp_index_ovr_map, &index_zero); + if (!index_ovr) + GOTO_DONE(DROP_MISC); + + if (*index_ovr != -1) { + index = *index_ovr; + *index_ovr = -1; + } + err = bpf_sk_select_reuseport(reuse_md, reuseport_array, &index, + flags); + if (!err) + GOTO_DONE(PASS); + + if (cmd->pass_on_failure) + GOTO_DONE(PASS_ERR_SK_SELECT_REUSEPORT); + else + GOTO_DONE(DROP_ERR_SK_SELECT_REUSEPORT); + +done: + result_cnt = bpf_map_lookup_elem(&result_map, &result); + if (!result_cnt) + return SK_DROP; + + bpf_map_update_elem(&linum_map, &index_zero, &linum, BPF_ANY); + bpf_map_update_elem(&data_check_map, &index_zero, &data_check, BPF_ANY); + + (*result_cnt)++; + return result < PASS ? SK_DROP : SK_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh new file mode 100755 index 000000000000..42544a969abc --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh @@ -0,0 +1,62 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018 Facebook + +set -eu + +wait_for_ip() +{ + local _i + echo -n "Wait for testing link-local IP to become available " + for _i in $(seq ${MAX_PING_TRIES}); do + echo -n "." + if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then + echo " OK" + return + fi + sleep 1 + done + echo 1>&2 "ERROR: Timeout waiting for test IP to become available." + exit 1 +} + +setup() +{ + # Create testing interfaces not to interfere with current environment. + ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER} + ip link set ${TEST_IF} up + ip link set ${TEST_IF_PEER} up + + wait_for_ip + + tc qdisc add dev ${TEST_IF} clsact + tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \ + sec ${BPF_PROG_SECTION} da + + BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \ + awk '/ id / {sub(/.* id /, "", $0); print($1)}') +} + +cleanup() +{ + ip link del ${TEST_IF} 2>/dev/null || : + ip link del ${TEST_IF_PEER} 2>/dev/null || : +} + +main() +{ + trap cleanup EXIT 2 3 6 15 + setup + ${PROG} ${TEST_IF} ${BPF_PROG_ID} +} + +DIR=$(dirname $0) +TEST_IF="test_cgid_1" +TEST_IF_PEER="test_cgid_2" +MAX_PING_TRIES=5 +BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o" +BPF_PROG_SECTION="cgroup_id_logger" +BPF_PROG_ID=0 +PROG="${DIR}/test_skb_cgroup_id_user" + +main diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c new file mode 100644 index 000000000000..68cf9829f5a7 --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <linux/bpf.h> +#include <linux/pkt_cls.h> + +#include <string.h> + +#include "bpf_helpers.h" + +#define NUM_CGROUP_LEVELS 4 + +struct bpf_map_def SEC("maps") cgroup_ids = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = NUM_CGROUP_LEVELS, +}; + +static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) +{ + __u64 id; + + /* [1] &level passed to external function that may change it, it's + * incompatible with loop unroll. + */ + id = bpf_skb_ancestor_cgroup_id(skb, level); + bpf_map_update_elem(&cgroup_ids, &level, &id, 0); +} + +SEC("cgroup_id_logger") +int log_cgroup_id(struct __sk_buff *skb) +{ + /* Loop unroll can't be used here due to [1]. Unrolling manually. + * Number of calls should be in sync with NUM_CGROUP_LEVELS. + */ + log_nth_level(skb, 0); + log_nth_level(skb, 1); + log_nth_level(skb, 2); + log_nth_level(skb, 3); + + return TC_ACT_OK; +} + +int _version SEC("version") = 1; + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c new file mode 100644 index 000000000000..c121cc59f314 --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <arpa/inet.h> +#include <net/if.h> +#include <netinet/in.h> +#include <sys/socket.h> +#include <sys/types.h> + + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "bpf_rlimit.h" +#include "cgroup_helpers.h" + +#define CGROUP_PATH "/skb_cgroup_test" +#define NUM_CGROUP_LEVELS 4 + +/* RFC 4291, Section 2.7.1 */ +#define LINKLOCAL_MULTICAST "ff02::1" + +static int mk_dst_addr(const char *ip, const char *iface, + struct sockaddr_in6 *dst) +{ + memset(dst, 0, sizeof(*dst)); + + dst->sin6_family = AF_INET6; + dst->sin6_port = htons(1025); + + if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) { + log_err("Invalid IPv6: %s", ip); + return -1; + } + + dst->sin6_scope_id = if_nametoindex(iface); + if (!dst->sin6_scope_id) { + log_err("Failed to get index of iface: %s", iface); + return -1; + } + + return 0; +} + +static int send_packet(const char *iface) +{ + struct sockaddr_in6 dst; + char msg[] = "msg"; + int err = 0; + int fd = -1; + + if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst)) + goto err; + + fd = socket(AF_INET6, SOCK_DGRAM, 0); + if (fd == -1) { + log_err("Failed to create UDP socket"); + goto err; + } + + if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst, + sizeof(dst)) == -1) { + log_err("Failed to send datagram"); + goto err; + } + + goto out; +err: + err = -1; +out: + if (fd >= 0) + close(fd); + return err; +} + +int get_map_fd_by_prog_id(int prog_id) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 map_ids[1]; + int prog_fd = -1; + int map_fd = -1; + + prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (prog_fd < 0) { + log_err("Failed to get fd by prog id %d", prog_id); + goto err; + } + + info.nr_map_ids = 1; + info.map_ids = (__u64) (unsigned long) map_ids; + + if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) { + log_err("Failed to get info by prog fd %d", prog_fd); + goto err; + } + + if (!info.nr_map_ids) { + log_err("No maps found for prog fd %d", prog_fd); + goto err; + } + + map_fd = bpf_map_get_fd_by_id(map_ids[0]); + if (map_fd < 0) + log_err("Failed to get fd by map id %d", map_ids[0]); +err: + if (prog_fd >= 0) + close(prog_fd); + return map_fd; +} + +int check_ancestor_cgroup_ids(int prog_id) +{ + __u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS]; + __u32 level; + int err = 0; + int map_fd; + + expected_ids[0] = 0x100000001; /* root cgroup */ + expected_ids[1] = get_cgroup_id(""); + expected_ids[2] = get_cgroup_id(CGROUP_PATH); + expected_ids[3] = 0; /* non-existent cgroup */ + + map_fd = get_map_fd_by_prog_id(prog_id); + if (map_fd < 0) + goto err; + + for (level = 0; level < NUM_CGROUP_LEVELS; ++level) { + if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) { + log_err("Failed to lookup key %d", level); + goto err; + } + if (actual_ids[level] != expected_ids[level]) { + log_err("%llx (actual) != %llx (expected), level: %u\n", + actual_ids[level], expected_ids[level], level); + goto err; + } + } + + goto out; +err: + err = -1; +out: + if (map_fd >= 0) + close(map_fd); + return err; +} + +int main(int argc, char **argv) +{ + int cgfd = -1; + int err = 0; + + if (argc < 3) { + fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]); + exit(EXIT_FAILURE); + } + + if (setup_cgroup_environment()) + goto err; + + cgfd = create_and_get_cgroup(CGROUP_PATH); + if (!cgfd) + goto err; + + if (join_cgroup(CGROUP_PATH)) + goto err; + + if (send_packet(argv[1])) + goto err; + + if (check_ancestor_cgroup_ids(atoi(argv[2]))) + goto err; + + goto out; +err: + err = -1; +out: + close(cgfd); + cleanup_cgroup_environment(); + printf("[%s]\n", err ? "FAIL" : "PASS"); + return err; +} diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index f4d99fabc56d..b8ebe2f58074 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -14,10 +14,7 @@ #include "cgroup_helpers.h" #include "bpf_rlimit.h" - -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_util.h" #define CG_PATH "/foo" #define MAX_INSNS 512 diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 2e45c92d1111..aeeb76a54d63 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -20,15 +20,12 @@ #include "cgroup_helpers.h" #include "bpf_rlimit.h" +#include "bpf_util.h" #ifndef ENOTSUPP # define ENOTSUPP 524 #endif -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - #define CG_PATH "/foo" #define CONNECT4_PROG_PATH "./connect4_prog.o" #define CONNECT6_PROG_PATH "./connect6_prog.o" diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 452cf5c6c784..67c412d19c09 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -42,12 +42,9 @@ #endif #include "bpf_rlimit.h" #include "bpf_rand.h" +#include "bpf_util.h" #include "../../../include/linux/filter.h" -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - #define MAX_INSNS BPF_MAXINSNS #define MAX_FIXUPS 8 #define MAX_NR_MAPS 8 |