summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDavid Marchand <david.marchand@redhat.com>2019-07-09 18:19:58 +0200
committerIan Stokes <ian.stokes@intel.com>2019-07-10 11:51:09 +0100
commit68a0625b78cfed6fcc479fc97b024dc28e538ab2 (patch)
treeb5c2978a3b974ac9345cc5bea516337088605a26 /lib
parente2cafa8692f62afe85acc49f11475ca2756f87b0 (diff)
downloadopenvswitch-68a0625b78cfed6fcc479fc97b024dc28e538ab2.tar.gz
dpif-netdev: Catch reloads faster.
Looking at the reload flag only every 1024 loops can be a long time under load, since we might be handling 32 packets per rxq, per iteration, which means up to poll_cnt * 32 * 1024 packets. Look at the flag every loop, no major performance impact seen. Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Eelco Chaudron <echaudro@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/dpif-netdev.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index b4384b472..647a8ee4b 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -5480,7 +5480,6 @@ reload:
poll_block();
}
}
- lc = UINT_MAX;
}
pmd->intrvl_tsc_prev = 0;
@@ -5529,11 +5528,6 @@ reload:
emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
}
- atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
- if (reload) {
- break;
- }
-
for (i = 0; i < poll_cnt; i++) {
uint64_t current_seq =
netdev_get_change_seq(poll_list[i].rxq->port->netdev);
@@ -5544,6 +5538,12 @@ reload:
}
}
}
+
+ atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
+ if (OVS_UNLIKELY(reload)) {
+ break;
+ }
+
pmd_perf_end_iteration(s, rx_packets, tx_packets,
pmd_perf_metrics_enabled(pmd));
}