summaryrefslogtreecommitdiff
path: root/ofproto/ofproto-dpif.c
diff options
context:
space:
mode:
authorDaniele Di Proietto <diproiettod@vmware.com>2016-06-01 19:01:10 -0700
committerDaniele Di Proietto <diproiettod@vmware.com>2016-06-02 13:20:56 -0700
commitf5857865770efb5a5383fa7aa886c0ff15f2038a (patch)
treeddc9ff55fc3cfea72ff734e985bed7cfb5a0b615 /ofproto/ofproto-dpif.c
parent68da36feee8ad2eccb9371542018c6b08b5a553d (diff)
downloadopenvswitch-f5857865770efb5a5383fa7aa886c0ff15f2038a.tar.gz
ofproto-dpif: Cache result of time_msec() for rule_expire().
In the run() function of ofproto-dpif we call rule_expire() for every possible flow that has a timeout and rule_expire() calls time_msec(). Calling time_msec() repeatedly can be pretty expensive, even though most of the time it involves only a vdso call. This commit calls time_msec only once in run(), to reduce the workload. Keeping the flows ordered by expiration in some kind of heap or timing wheel data structure could help make this process more efficient, if rule_expire() turns out to be a bottleneck. VMware-BZ: #1655122 Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com> Acked-by: Ben Pfaff <blp@ovn.org>
Diffstat (limited to 'ofproto/ofproto-dpif.c')
-rw-r--r--ofproto/ofproto-dpif.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index df4a63297..91529fe26 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -109,7 +109,7 @@ BUILD_ASSERT_DECL(offsetof(struct rule_dpif, up) == 0);
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
-static void rule_expire(struct rule_dpif *);
+static void rule_expire(struct rule_dpif *, long long now);
struct group_dpif {
struct ofgroup up;
@@ -1545,6 +1545,7 @@ run(struct ofproto *ofproto_)
new_dump_seq = seq_read(udpif_dump_seq(ofproto->backer->udpif));
if (ofproto->dump_seq != new_dump_seq) {
struct rule *rule, *next_rule;
+ long long now = time_msec();
/* We know stats are relatively fresh, so now is a good time to do some
* periodic work. */
@@ -1555,7 +1556,7 @@ run(struct ofproto *ofproto_)
ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
&ofproto->up.expirable) {
- rule_expire(rule_dpif_cast(rule));
+ rule_expire(rule_dpif_cast(rule), now);
}
ovs_mutex_unlock(&ofproto_mutex);
@@ -3621,11 +3622,10 @@ port_is_lacp_current(const struct ofport *ofport_)
/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
* then delete it entirely. */
static void
-rule_expire(struct rule_dpif *rule)
+rule_expire(struct rule_dpif *rule, long long now)
OVS_REQUIRES(ofproto_mutex)
{
uint16_t hard_timeout, idle_timeout;
- long long int now = time_msec();
int reason = -1;
hard_timeout = rule->up.hard_timeout;