summaryrefslogtreecommitdiff
path: root/lib/classifier-private.h
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2015-01-06 11:10:42 -0800
committerJarno Rajahalme <jrajahalme@nicira.com>2015-01-06 14:47:30 -0800
commitd70e8c28f992c0d8c2918aa0733b935ce1a0caed (patch)
tree95442ab72932758e667f9241282ddc69b79a0873 /lib/classifier-private.h
parentaae7c34f0406569d1eaf574ec6e4e34b15f5bc78 (diff)
downloadopenvswitch-d70e8c28f992c0d8c2918aa0733b935ce1a0caed.tar.gz
miniflow: Use 64-bit data.
So far the compressed flow data in struct miniflow has been in 32-bit words with a 63-bit map, allowing for a maximum size of struct flow of 252 bytes. With the forthcoming Geneve options this is not sufficient any more. This patch solves the problem by changing the miniflow data to 64-bit words, doubling the flow max size to 504 bytes. Since the word size is doubled, there is some loss in compression efficiency. To counter this some of the flow fields have been reordered to keep related fields together (e.g., the source and destination IP addresses share the same 64-bit word). This change should speed up flow data processing on 64-bit CPUs, which may help counterbalance the impact of making the struct flow bigger in the future. Classifier lookup stage boundaries are also changed to 64-bit alignment, as the current algorithm depends on each miniflow word to not be split between ranges. This has resulted in new padding (part of the 'mpls_lse' field). The 'dp_hash' field is also moved to packet metadata to eliminate otherwise needed padding there. This allows the L4 to fit into one 64-bit word, and also makes matches on 'dp_hash' more efficient as misses can be found already on stage 1. Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Acked-by: Ben Pfaff <blp@nicira.com>
Diffstat (limited to 'lib/classifier-private.h')
-rw-r--r--lib/classifier-private.h60
1 files changed, 30 insertions, 30 deletions
diff --git a/lib/classifier-private.h b/lib/classifier-private.h
index 17eed2c9e..cd64fed83 100644
--- a/lib/classifier-private.h
+++ b/lib/classifier-private.h
@@ -42,7 +42,7 @@ struct cls_subtable {
/* These fields are accessed by readers who care about wildcarding. */
const tag_type tag; /* Tag generated from mask for partitioning. */
const uint8_t n_indices; /* How many indices to use. */
- const uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries. */
+ const uint8_t index_ofs[CLS_MAX_INDICES]; /* u64 segment boundaries. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
* (runtime configurable). */
const int ports_mask_len;
@@ -112,7 +112,7 @@ miniflow_get_map_in_range(const struct miniflow *miniflow,
*offset = count_1bits(map & msk);
map &= ~msk;
}
- if (end < FLOW_U32S) {
+ if (end < FLOW_U64S) {
uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
map &= msk;
}
@@ -128,18 +128,18 @@ static inline uint32_t
flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
uint32_t basis)
{
- const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
- const uint32_t *flow_u32 = (const uint32_t *)flow;
- const uint32_t *p = mask_values;
+ const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *flow_u64 = (const uint64_t *)flow;
+ const uint64_t *p = mask_values;
uint32_t hash;
int idx;
hash = basis;
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
- hash = hash_add(hash, flow_u32[idx] & *p++);
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
}
- return hash_finish(hash, (p - mask_values) * 4);
+ return hash_finish(hash, (p - mask_values) * 8);
}
/* Returns a hash value for the bits of 'flow' where there are 1-bits in
@@ -151,16 +151,16 @@ static inline uint32_t
miniflow_hash_in_minimask(const struct miniflow *flow,
const struct minimask *mask, uint32_t basis)
{
- const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
- const uint32_t *p = mask_values;
+ const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *p = mask_values;
uint32_t hash = basis;
- uint32_t flow_u32;
+ uint64_t flow_u64;
- MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
- hash = hash_add(hash, flow_u32 & *p++);
+ MINIFLOW_FOR_EACH_IN_MAP(flow_u64, flow, mask->masks.map) {
+ hash = hash_add64(hash, flow_u64 & *p++);
}
- return hash_finish(hash, (p - mask_values) * 4);
+ return hash_finish(hash, (p - mask_values) * 8);
}
/* Returns a hash value for the bits of range [start, end) in 'flow',
@@ -173,22 +173,22 @@ flow_hash_in_minimask_range(const struct flow *flow,
const struct minimask *mask,
uint8_t start, uint8_t end, uint32_t *basis)
{
- const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
- const uint32_t *flow_u32 = (const uint32_t *)flow;
+ const uint64_t *mask_values = miniflow_get_values(&mask->masks);
+ const uint64_t *flow_u64 = (const uint64_t *)flow;
unsigned int offset;
uint64_t map;
- const uint32_t *p;
+ const uint64_t *p;
uint32_t hash = *basis;
int idx;
map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
p = mask_values + offset;
MAP_FOR_EACH_INDEX(idx, map) {
- hash = hash_add(hash, flow_u32[idx] & *p++);
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
}
*basis = hash; /* Allow continuation from the unfinished value. */
- return hash_finish(hash, (p - mask_values) * 4);
+ return hash_finish(hash, (p - mask_values) * 8);
}
/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
@@ -206,16 +206,16 @@ flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
const struct minimask *mask,
uint8_t start, uint8_t end)
{
- uint32_t *dst_u32 = (uint32_t *)&wc->masks;
+ uint64_t *dst_u64 = (uint64_t *)&wc->masks;
unsigned int offset;
uint64_t map;
- const uint32_t *p;
+ const uint64_t *p;
int idx;
map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
- p = miniflow_get_u32_values(&mask->masks) + offset;
+ p = miniflow_get_values(&mask->masks) + offset;
MAP_FOR_EACH_INDEX(idx, map) {
- dst_u32[idx] |= *p++;
+ dst_u64[idx] |= *p++;
}
}
@@ -223,15 +223,15 @@ flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
static inline uint32_t
miniflow_hash(const struct miniflow *flow, uint32_t basis)
{
- const uint32_t *values = miniflow_get_u32_values(flow);
- const uint32_t *p = values;
+ const uint64_t *values = miniflow_get_values(flow);
+ const uint64_t *p = values;
uint32_t hash = basis;
uint64_t hash_map = 0;
uint64_t map;
for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
if (*p) {
- hash = hash_add(hash, *p);
+ hash = hash_add64(hash, *p);
hash_map |= rightmost_1bit(map);
}
p++;
@@ -265,20 +265,20 @@ minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
uint32_t *basis)
{
unsigned int offset;
- const uint32_t *p, *q;
+ const uint64_t *p, *q;
uint32_t hash = *basis;
int n, i;
n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
&offset));
- q = miniflow_get_u32_values(&match->mask.masks) + offset;
- p = miniflow_get_u32_values(&match->flow) + offset;
+ q = miniflow_get_values(&match->mask.masks) + offset;
+ p = miniflow_get_values(&match->flow) + offset;
for (i = 0; i < n; i++) {
- hash = hash_add(hash, p[i] & q[i]);
+ hash = hash_add64(hash, p[i] & q[i]);
}
*basis = hash; /* Allow continuation from the unfinished value. */
- return hash_finish(hash, (offset + n) * 4);
+ return hash_finish(hash, (offset + n) * 8);
}
#endif