summaryrefslogtreecommitdiff
path: root/ofproto
diff options
context:
space:
mode:
authorJarno Rajahalme <jarno@ovn.org>2016-07-29 16:52:02 -0700
committerJarno Rajahalme <jarno@ovn.org>2016-07-29 16:52:02 -0700
commit7697323773ca35fad42a0742ef4ebd5e9e0e27b6 (patch)
tree9dc553b40146430dbe712d8e634a663e69729d24 /ofproto
parentdb88b35c622681dff7d6e945429c7db888613e04 (diff)
downloadopenvswitch-7697323773ca35fad42a0742ef4ebd5e9e0e27b6.tar.gz
ofproto: Take group references only when needed.
Avoid unnecessary references when RCU protection suffices. This makes group lookup memory management more like flow lookup memory management. Signed-off-by: Jarno Rajahalme <jarno@ovn.org> Acked-by: Ben Pfaff <blp@ovn.org>
Diffstat (limited to 'ofproto')
-rw-r--r--ofproto/ofproto-dpif-xlate.c22
-rw-r--r--ofproto/ofproto-dpif.c6
-rw-r--r--ofproto/ofproto-dpif.h2
-rw-r--r--ofproto/ofproto-provider.h2
-rw-r--r--ofproto/ofproto.c7
5 files changed, 23 insertions, 16 deletions
diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
index 519e211d5..ca468c62d 100644
--- a/ofproto/ofproto-dpif-xlate.c
+++ b/ofproto/ofproto-dpif-xlate.c
@@ -1503,13 +1503,9 @@ group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
{
struct group_dpif *group;
- group = group_dpif_lookup(ctx->xbridge->ofproto, group_id);
+ group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, false);
if (group) {
- struct ofputil_bucket *bucket;
-
- bucket = group_first_live_bucket(ctx, group, depth);
- group_dpif_unref(group);
- return bucket != NULL;
+ return group_first_live_bucket(ctx, group, depth) != NULL;
}
return false;
@@ -3377,6 +3373,7 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
}
}
+/* Consumes the group reference, which is only taken if xcache exists. */
static void
xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
struct ofputil_bucket *bucket)
@@ -3388,7 +3385,7 @@ xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
- entry->u.group.group = group_dpif_ref(group);
+ entry->u.group.group = group;
entry->u.group.bucket = bucket;
}
}
@@ -3466,6 +3463,8 @@ xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
if (bucket) {
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
+ } else if (ctx->xin->xcache) {
+ group_dpif_unref(group);
}
}
@@ -3482,6 +3481,8 @@ xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
if (bucket) {
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
+ } else if (ctx->xin->xcache) {
+ group_dpif_unref(group);
}
}
@@ -3551,6 +3552,8 @@ xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
if (bucket) {
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
+ } else if (ctx->xin->xcache) {
+ group_dpif_unref(group);
}
}
@@ -3596,7 +3599,6 @@ xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
default:
OVS_NOT_REACHED();
}
- group_dpif_unref(group);
ctx->in_group = was_in_group;
}
@@ -3607,7 +3609,9 @@ xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
if (xlate_resubmit_resource_check(ctx)) {
struct group_dpif *group;
- group = group_dpif_lookup(ctx->xbridge->ofproto, group_id);
+ /* Take ref only if xcache exists. */
+ group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
+ ctx->xin->xcache);
if (!group) {
/* XXX: Should set ctx->error ? */
return true;
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 681a12e66..53ba3b2a1 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -4367,9 +4367,11 @@ group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
* Make sure to call group_dpif_unref() after no longer needing to maintain
* a reference to the group. */
struct group_dpif *
-group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id)
+group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
+ bool take_ref)
{
- struct ofgroup *ofgroup = ofproto_group_lookup(&ofproto->up, group_id);
+ struct ofgroup *ofgroup = ofproto_group_lookup(&ofproto->up, group_id,
+ take_ref);
return ofgroup ? group_dpif_cast(ofgroup) : NULL;
}
diff --git a/ofproto/ofproto-dpif.h b/ofproto/ofproto-dpif.h
index 18a90b2f3..4c6f08871 100644
--- a/ofproto/ofproto-dpif.h
+++ b/ofproto/ofproto-dpif.h
@@ -137,7 +137,7 @@ void group_dpif_credit_stats(struct group_dpif *,
struct ofputil_bucket *,
const struct dpif_flow_stats *);
struct group_dpif *group_dpif_lookup(struct ofproto_dpif *ofproto,
- uint32_t group_id);
+ uint32_t group_id, bool take_ref);
const struct ovs_list *group_dpif_get_buckets(const struct group_dpif *group);
enum ofp11_group_type group_dpif_get_type(const struct group_dpif *group);
diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h
index 3cda68a9f..dee817ec4 100644
--- a/ofproto/ofproto-provider.h
+++ b/ofproto/ofproto-provider.h
@@ -524,7 +524,7 @@ struct ofgroup {
};
struct ofgroup *ofproto_group_lookup(const struct ofproto *ofproto,
- uint32_t group_id);
+ uint32_t group_id, bool take_ref);
void ofproto_group_ref(struct ofgroup *);
bool ofproto_group_try_ref(struct ofgroup *);
diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
index bdca1e7db..1c09f617b 100644
--- a/ofproto/ofproto.c
+++ b/ofproto/ofproto.c
@@ -6215,17 +6215,18 @@ ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id)
* Make sure to call ofproto_group_unref() after no longer needing to maintain
* a reference to the group. */
struct ofgroup *
-ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id)
+ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
+ bool take_ref)
{
struct ofgroup *group;
group = ofproto_group_lookup__(ofproto, group_id);
- if (group) {
+ if (group && take_ref) {
/* Not holding a lock, so it is possible that another thread releases
* the last reference just before we manage to get one. */
return ofproto_group_try_ref(group) ? group : NULL;
}
- return NULL;
+ return group;
}
/* Caller should hold 'ofproto->groups_rwlock' if it is important that the