summaryrefslogtreecommitdiff
path: root/lib/flow.c
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2015-08-25 13:55:03 -0700
committerJarno Rajahalme <jrajahalme@nicira.com>2015-08-26 15:37:22 -0700
commit5fcff47b0b56b91e2a7096b2f015426a43d73ed9 (patch)
treee2e1f9fca6c9ce55e454419d000675604ba98203 /lib/flow.c
parentfa2fdbf8d0c1da808230ad904e94199eafddd926 (diff)
downloadopenvswitch-5fcff47b0b56b91e2a7096b2f015426a43d73ed9.tar.gz
flow: Add struct flowmap.
Struct miniflow is now sometimes used just as a map. Define a new struct flowmap for that purpose. The flowmap is defined as an array of maps, and it is automatically sized according to the size of struct flow, so it will be easier to maintain in the future. It would have been tempting to use the existing struct bitmap for this purpose. The main reason this is not feasible at the moment is that some flowmap algorithms are simpler when it can be assumed that no struct flow member requires more bits than can fit to a single map unit. The tunnel member already requires more than 32 bits, so the map unit needs to be 64 bits wide. Performance critical algorithms enumerate the flowmap array units explicitly, as it is easier for the compiler to optimize, compared to the normal iterator. Without this optimization a classifier lookup without wildcard masks would be about 25% slower. With this more general (and maintainable) algorithm the classifier lookups are about 5% slower, when the struct flow actually becomes big enough to require a second map. This negates the performance gained in the "Pre-compute stage masks" patch earlier in the series. Requested-by: Ben Pfaff <blp@nicira.com> Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Acked-by: Ben Pfaff <blp@nicira.com>
Diffstat (limited to 'lib/flow.c')
-rw-r--r--lib/flow.c357
1 files changed, 152 insertions, 205 deletions
diff --git a/lib/flow.c b/lib/flow.c
index 268ae3853..1dbf82f75 100644
--- a/lib/flow.c
+++ b/lib/flow.c
@@ -112,7 +112,7 @@ data_try_pull(const void **datap, size_t *sizep, size_t size)
/* Context for pushing data to a miniflow. */
struct mf_ctx {
- struct miniflow maps;
+ struct flowmap map;
uint64_t *data;
uint64_t * const end;
};
@@ -132,114 +132,93 @@ BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
#define MINIFLOW_ASSERT(X)
#endif
-#define miniflow_set_map(MF, OFS) \
+/* True if 'IDX' and higher bits are not set. */
+#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
- unsigned int ofs = (OFS); \
- \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(!(MF.maps.tnl_map & (UINT64_MAX << ofs)) \
- && !MF.maps.pkt_map); \
- MF.maps.tnl_map |= UINT64_C(1) << ofs; \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
- MF.maps.pkt_map |= UINT64_C(1) << ofs; \
+ MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
+ (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
+ MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
}
-#define miniflow_assert_in_map(MF, OFS) \
-{ \
- unsigned int ofs = (OFS); \
- \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(MF.maps.tnl_map & UINT64_C(1) << ofs \
- && !(MF.maps.tnl_map & UINT64_MAX << (ofs + 1)) \
- && !MF.maps.pkt_map); \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(MF.maps.pkt_map & UINT64_C(1) << ofs \
- && !(MF.maps.pkt_map & UINT64_MAX << (ofs + 1))); \
- } \
+#define miniflow_set_map(MF, OFS) \
+ { \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS)); \
+ flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_push_uint64_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
- *MF.data++ = VALUE; \
- miniflow_set_map(MF, OFS / 8); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
+
+#define miniflow_push_uint64_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
+ *MF.data++ = VALUE; \
+ miniflow_set_map(MF, OFS / 8); \
}
-#define miniflow_push_be64_(MF, OFS, VALUE) \
+#define miniflow_push_be64_(MF, OFS, VALUE) \
miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
-#define miniflow_push_uint32_(MF, OFS, VALUE) \
- { \
- MINIFLOW_ASSERT(MF.data < MF.end); \
- \
- if ((OFS) % 8 == 0) { \
- miniflow_set_map(MF, OFS / 8); \
- *(uint32_t *)MF.data = VALUE; \
- } else if ((OFS) % 8 == 4) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint32_t *)MF.data + 1) = VALUE; \
- MF.data++; \
- } \
+#define miniflow_push_uint32_(MF, OFS, VALUE) \
+ { \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint32_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint32_t *)MF.data + 1) = VALUE; \
+ MF.data++; \
+ } \
}
#define miniflow_push_be32_(MF, OFS, VALUE) \
miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
-#define miniflow_push_uint16_(MF, OFS, VALUE) \
-{ \
- MINIFLOW_ASSERT(MF.data < MF.end); \
- \
- if ((OFS) % 8 == 0) { \
- miniflow_set_map(MF, OFS / 8); \
- *(uint16_t *)MF.data = VALUE; \
- } else if ((OFS) % 8 == 2) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 1) = VALUE; \
- } else if ((OFS) % 8 == 4) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 2) = VALUE; \
- } else if ((OFS) % 8 == 6) { \
- miniflow_assert_in_map(MF, OFS / 8); \
- *((uint16_t *)MF.data + 3) = VALUE; \
- MF.data++; \
- } \
-}
-
-#define miniflow_pad_to_64_(MF, OFS) \
-{ \
- MINIFLOW_ASSERT((OFS) % 8 != 0); \
- miniflow_assert_in_map(MF, OFS / 8); \
- \
- memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
- MF.data++; \
+#define miniflow_push_uint16_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint16_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 2) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 1) = VALUE; \
+ } else if ((OFS) % 8 == 4) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 2) = VALUE; \
+ } else if ((OFS) % 8 == 6) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint16_t *)MF.data + 3) = VALUE; \
+ MF.data++; \
+ } \
+}
+
+#define miniflow_pad_to_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ \
+ memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
+ MF.data++; \
}
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
-#define miniflow_set_maps(MF, OFS, N_WORDS) \
-{ \
- unsigned int ofs = (OFS); \
- unsigned int n_words = (N_WORDS); \
- uint64_t n_words_mask = UINT64_MAX >> (64 - n_words); \
- \
- MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
- if (ofs < FLOW_TNL_U64S) { \
- MINIFLOW_ASSERT(!(MF.maps.tnl_map & UINT64_MAX << ofs) \
- && !MF.maps.pkt_map); \
- MF.maps.tnl_map |= n_words_mask << ofs; \
- if (n_words > FLOW_TNL_U64S - ofs) { \
- MF.maps.pkt_map |= n_words_mask >> (FLOW_TNL_U64S - ofs); \
- } \
- } else { \
- ofs -= FLOW_TNL_U64S; \
- MINIFLOW_ASSERT(!(MF.maps.pkt_map & (UINT64_MAX << ofs))); \
- MF.maps.pkt_map |= n_words_mask << ofs; \
- } \
+#define miniflow_set_maps(MF, OFS, N_WORDS) \
+{ \
+ size_t ofs = (OFS); \
+ size_t n_words = (N_WORDS); \
+ \
+ MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
+ ASSERT_FLOWMAP_NOT_SET(&MF.map, ofs); \
+ flowmap_set(&MF.map, ofs, n_words); \
}
/* Data at 'valuep' may be unaligned. */
@@ -464,7 +443,8 @@ miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
const void *data = dp_packet_data(packet);
size_t size = dp_packet_size(packet);
uint64_t *values = miniflow_values(dst);
- struct mf_ctx mf = { { 0, 0 }, values, values + FLOW_U64S };
+ struct mf_ctx mf = { FLOWMAP_EMPTY_INITIALIZER, values,
+ values + FLOW_U64S };
const char *l2;
ovs_be16 dl_type;
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
@@ -781,7 +761,7 @@ miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
}
}
out:
- *dst = mf.maps;
+ dst->map = mf.map;
}
/* For every bit of a field that is wildcarded in 'wildcards', sets the
@@ -1276,63 +1256,80 @@ void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
*
* This is a less precise version of flow_wildcards_init_for_packet() above. */
void
-flow_wc_map(const struct flow *flow, struct miniflow *map)
+flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
- map->tnl_map = 0;
+ flowmap_init(map);
+
if (flow->tunnel.ip_dst) {
- map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel,
- offsetof(struct flow_tnl, metadata));
+ FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
if (flow->tunnel.metadata.present.map) {
- map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata);
+ FLOWMAP_SET(map, tunnel.metadata);
}
} else {
- map->tnl_map |= MINIFLOW_TNL_MAP(tunnel.metadata.present.len);
- map->tnl_map |= MINIFLOW_TNL_MAP__(tunnel.metadata.opts.gnv,
- flow->tunnel.metadata.present.len);
+ FLOWMAP_SET(map, tunnel.metadata.present.len);
+ FLOWMAP_SET__(map, tunnel.metadata.opts.gnv,
+ flow->tunnel.metadata.present.len);
}
}
/* Metadata fields that can appear on packet input. */
- map->pkt_map = MINIFLOW_PKT_MAP(skb_priority) | MINIFLOW_PKT_MAP(pkt_mark)
- | MINIFLOW_PKT_MAP(recirc_id) | MINIFLOW_PKT_MAP(dp_hash)
- | MINIFLOW_PKT_MAP(in_port)
- | MINIFLOW_PKT_MAP(dl_dst) | MINIFLOW_PKT_MAP(dl_src)
- | MINIFLOW_PKT_MAP(dl_type) | MINIFLOW_PKT_MAP(vlan_tci);
+ FLOWMAP_SET(map, skb_priority);
+ FLOWMAP_SET(map, pkt_mark);
+ FLOWMAP_SET(map, recirc_id);
+ FLOWMAP_SET(map, dp_hash);
+ FLOWMAP_SET(map, in_port);
+ FLOWMAP_SET(map, dl_dst);
+ FLOWMAP_SET(map, dl_src);
+ FLOWMAP_SET(map, dl_type);
+ FLOWMAP_SET(map, vlan_tci);
/* Ethertype-dependent fields. */
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
- | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
- | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
+ FLOWMAP_SET(map, nw_src);
+ FLOWMAP_SET(map, nw_dst);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, nw_frag);
+ FLOWMAP_SET(map, nw_tos);
+ FLOWMAP_SET(map, nw_ttl);
+
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(igmp_group_ip4);
+ FLOWMAP_SET(map, igmp_group_ip4);
} else {
- map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
- | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
+ FLOWMAP_SET(map, tcp_flags);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
}
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(ipv6_src) | MINIFLOW_PKT_MAP(ipv6_dst)
- | MINIFLOW_PKT_MAP(ipv6_label)
- | MINIFLOW_PKT_MAP(nw_proto) | MINIFLOW_PKT_MAP(nw_frag)
- | MINIFLOW_PKT_MAP(nw_tos) | MINIFLOW_PKT_MAP(nw_ttl);
+ FLOWMAP_SET(map, ipv6_src);
+ FLOWMAP_SET(map, ipv6_dst);
+ FLOWMAP_SET(map, ipv6_label);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, nw_frag);
+ FLOWMAP_SET(map, nw_tos);
+ FLOWMAP_SET(map, nw_ttl);
+
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nd_target)
- | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
+ FLOWMAP_SET(map, nd_target);
+ FLOWMAP_SET(map, arp_sha);
+ FLOWMAP_SET(map, arp_tha);
} else {
- map->pkt_map |= MINIFLOW_PKT_MAP(tcp_flags)
- | MINIFLOW_PKT_MAP(tp_src) | MINIFLOW_PKT_MAP(tp_dst);
+ FLOWMAP_SET(map, tcp_flags);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
}
} else if (eth_type_mpls(flow->dl_type)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(mpls_lse);
+ FLOWMAP_SET(map, mpls_lse);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
- map->pkt_map |= MINIFLOW_PKT_MAP(nw_src) | MINIFLOW_PKT_MAP(nw_dst)
- | MINIFLOW_PKT_MAP(nw_proto)
- | MINIFLOW_PKT_MAP(arp_sha) | MINIFLOW_PKT_MAP(arp_tha);
+ FLOWMAP_SET(map, nw_src);
+ FLOWMAP_SET(map, nw_dst);
+ FLOWMAP_SET(map, nw_proto);
+ FLOWMAP_SET(map, arp_sha);
+ FLOWMAP_SET(map, arp_tha);
}
}
@@ -1486,11 +1483,13 @@ miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
/* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
- struct miniflow maps = { 0, MINIFLOW_PKT_MAP(ipv6_src)
- | MINIFLOW_PKT_MAP(ipv6_dst) };
+ struct flowmap map = FLOWMAP_EMPTY_INITIALIZER;
uint64_t value;
- MINIFLOW_FOR_EACH_IN_PKT_MAP(value, flow, maps) {
+ FLOWMAP_SET(&map, ipv6_src);
+ FLOWMAP_SET(&map, ipv6_dst);
+
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, map) {
hash = hash_add64(hash, value);
}
} else {
@@ -2240,9 +2239,8 @@ flow_compose(struct dp_packet *p, const struct flow *flow)
/* Compressed flow. */
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
- * the caller. The caller must have already computed 'dst->tnl_map' and
- * 'dst->pkt_map' properly to indicate the significant uint64_t elements of
- * 'src'.
+ * the caller. The caller must have already computed 'dst->map' properly to
+ * indicate the significant uint64_t elements of 'src'.
*
* Normally the significant elements are the ones that are non-zero. However,
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
@@ -2250,12 +2248,11 @@ flow_compose(struct dp_packet *p, const struct flow *flow)
void
miniflow_init(struct miniflow *dst, const struct flow *src)
{
- const uint64_t *src_u64 = (const uint64_t *) src;
uint64_t *dst_u64 = miniflow_values(dst);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, *dst) {
- *dst_u64++ = src_u64[idx];
+ FLOWMAP_FOR_EACH_INDEX(idx, dst->map) {
+ *dst_u64++ = flow_u64_value(src, idx);
}
}
@@ -2263,21 +2260,11 @@ miniflow_init(struct miniflow *dst, const struct flow *src)
void
miniflow_map_init(struct miniflow *flow, const struct flow *src)
{
- const uint64_t *src_u64 = (const uint64_t *) src;
- int i;
-
/* Initialize map, counting the number of nonzero elements. */
- flow->tnl_map = 0;
- for (i = 0; i < FLOW_TNL_U64S; i++) {
- if (src_u64[i]) {
- flow->tnl_map |= UINT64_C(1) << i;
- }
- }
- src_u64 += FLOW_TNL_U64S;
- flow->pkt_map = 0;
- for (i = 0; i < FLOW_U64S - FLOW_TNL_U64S; i++) {
- if (src_u64[i]) {
- flow->pkt_map |= UINT64_C(1) << i;
+ flowmap_init(&flow->map);
+ for (size_t i = 0; i < FLOW_U64S; i++) {
+ if (flow_u64_value(src, i)) {
+ flowmap_set(&flow->map, i, 1);
}
}
}
@@ -2291,7 +2278,7 @@ miniflow_alloc(struct miniflow *dsts[], size_t n, const struct miniflow *src)
size_t n_values = miniflow_n_values(src);
size_t data_size = MINIFLOW_VALUES_SIZE(n_values);
struct miniflow *dst = xmalloc(n * (sizeof *src + data_size));
- unsigned int i;
+ size_t i;
COVERAGE_INC(miniflow_malloc);
@@ -2345,26 +2332,16 @@ miniflow_equal(const struct miniflow *a, const struct miniflow *b)
const uint64_t *ap = miniflow_get_values(a);
const uint64_t *bp = miniflow_get_values(b);
- if (OVS_LIKELY(miniflow_equal_maps(a, b))) {
+ /* This is mostly called after a matching hash, so it is highly likely that
+ * the maps are equal as well. */
+ if (OVS_LIKELY(flowmap_equal(a->map, b->map))) {
return !memcmp(ap, bp, miniflow_n_values(a) * sizeof *ap);
} else {
- uint64_t map;
-
- map = a->tnl_map | b->tnl_map;
- for (; map; map = zero_rightmost_1bit(map)) {
- uint64_t bit = rightmost_1bit(map);
-
- if ((a->tnl_map & bit ? *ap++ : 0)
- != (b->tnl_map & bit ? *bp++ : 0)) {
- return false;
- }
- }
- map = a->pkt_map | b->pkt_map;
- for (; map; map = zero_rightmost_1bit(map)) {
- uint64_t bit = rightmost_1bit(map);
+ size_t idx;
- if ((a->pkt_map & bit ? *ap++ : 0)
- != (b->pkt_map & bit ? *bp++ : 0)) {
+ FLOWMAP_FOR_EACH_INDEX (idx, flowmap_or(a->map, b->map)) {
+ if ((flowmap_is_set(&a->map, idx) ? *ap++ : 0)
+ != (flowmap_is_set(&b->map, idx) ? *bp++ : 0)) {
return false;
}
}
@@ -2382,7 +2359,7 @@ miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
const uint64_t *p = miniflow_get_values(&mask->masks);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, mask->masks) {
+ FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
return false;
}
@@ -2397,12 +2374,11 @@ bool
miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
const struct minimask *mask)
{
- const uint64_t *b_u64 = (const uint64_t *) b;
const uint64_t *p = miniflow_get_values(&mask->masks);
size_t idx;
- MAPS_FOR_EACH_INDEX(idx, mask->masks) {
- if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
+ FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
+ if ((miniflow_get(a, idx) ^ flow_u64_value(b, idx)) & *p++) {
return false;
}
}
@@ -2439,31 +2415,16 @@ minimask_combine(struct minimask *dst_,
uint64_t *dst_values = storage;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- const uint64_t *ap = miniflow_get_values(a);
- const uint64_t *bp = miniflow_get_values(b);
size_t idx;
- dst->tnl_map = 0;
- MAP_FOR_EACH_INDEX(idx, a->tnl_map & b->tnl_map) {
- /* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint64_t mask = *miniflow_values_get__(ap, a->tnl_map, idx)
- & *miniflow_values_get__(bp, b->tnl_map, idx);
+ flowmap_init(&dst->map);
- if (mask) {
- dst->tnl_map |= UINT64_C(1) << idx;
- *dst_values++ = mask;
- }
- }
- dst->pkt_map = 0;
- ap += count_1bits(a->tnl_map); /* Skip tnl_map values. */
- bp += count_1bits(b->tnl_map); /* Skip tnl_map values. */
- MAP_FOR_EACH_INDEX(idx, a->pkt_map & b->pkt_map) {
+ FLOWMAP_FOR_EACH_INDEX(idx, flowmap_and(a->map, b->map)) {
/* Both 'a' and 'b' have non-zero data at 'idx'. */
- uint64_t mask = *miniflow_values_get__(ap, a->pkt_map, idx)
- & *miniflow_values_get__(bp, b->pkt_map, idx);
+ uint64_t mask = *miniflow_get__(a, idx) & *miniflow_get__(b, idx);
if (mask) {
- dst->pkt_map |= UINT64_C(1) << idx;
+ flowmap_set(&dst->map, idx, 1);
*dst_values++ = mask;
}
}
@@ -2482,10 +2443,8 @@ minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
bool
minimask_equal(const struct minimask *a, const struct minimask *b)
{
- return a->masks.tnl_map == b->masks.tnl_map
- && a->masks.pkt_map == b->masks.pkt_map &&
- !memcmp(miniflow_get_values(&a->masks), miniflow_get_values(&b->masks),
- MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
+ return !memcmp(a, b, sizeof *a
+ + MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
}
/* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
@@ -2493,28 +2452,16 @@ minimask_equal(const struct minimask *a, const struct minimask *b)
bool
minimask_has_extra(const struct minimask *a, const struct minimask *b)
{
- const uint64_t *ap = miniflow_get_values(&a->masks);
const uint64_t *bp = miniflow_get_values(&b->masks);
size_t idx;
- MAP_FOR_EACH_INDEX(idx, b->masks.tnl_map) {
+ FLOWMAP_FOR_EACH_INDEX(idx, b->masks.map) {
uint64_t b_u64 = *bp++;
/* 'b_u64' is non-zero, check if the data in 'a' is either zero
* or misses some of the bits in 'b_u64'. */
- if (!(a->masks.tnl_map & (UINT64_C(1) << idx))
- || ((*miniflow_values_get__(ap, a->masks.tnl_map, idx) & b_u64)
- != b_u64)) {
- return true; /* 'a' wildcards some bits 'b' doesn't. */
- }
- }
- ap += count_1bits(a->masks.tnl_map); /* Skip tnl_map values. */
- MAP_FOR_EACH_INDEX(idx, b->masks.pkt_map) {
- uint64_t b_u64 = *bp++;
-
- if (!(a->masks.pkt_map & (UINT64_C(1) << idx))
- || ((*miniflow_values_get__(ap, a->masks.pkt_map, idx) & b_u64)
- != b_u64)) {
+ if (!MINIFLOW_IN_MAP(&a->masks, idx)
+ || ((*miniflow_get__(&a->masks, idx) & b_u64) != b_u64)) {
return true; /* 'a' wildcards some bits 'b' doesn't. */
}
}