summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorNitin Katiyar <nitin.katiyar@ericsson.com>2019-08-22 22:23:30 +0530
committerIlya Maximets <i.maximets@ovn.org>2020-07-07 17:23:54 +0200
commit81ac8b3b194c602e7c9b4095f5bc1e214ae72a32 (patch)
treef5cf47eedf27601b022a27b58fe591505acfac6d /lib
parent9ed69557e5fae4f33f34a985c380e7e040b09548 (diff)
downloadopenvswitch-81ac8b3b194c602e7c9b4095f5bc1e214ae72a32.tar.gz
dpif-netdev: Do RCU synchronization at fixed interval in PMD main loop.
Each PMD updates the global sequence number for RCU synchronization purpose with other OVS threads. This is done at every 1025th iteration in PMD main loop. If the PMD thread is responsible for polling large number of queues that are carrying traffic, it spends a lot of time processing packets and this results in significant delay in performing the housekeeping activities. If the OVS main thread is waiting to synchronize with the PMD threads and if those threads delay performing housekeeping activities for more than 3 sec then LACP processing will be impacted and it will lead to LACP flaps. Similarly, other controls protocols run by OVS main thread are impacted. For e.g. a PMD thread polling 200 ports/queues with average of 1600 processing cycles per packet with batch size of 32 may take 10240000 (200 * 1600 * 32) cycles per iteration. In system with 2.0 GHz CPU it means more than 5 ms per iteration. So, for 1024 iterations to complete it would be more than 5 seconds. This gets worse when there are PMD threads which are less loaded. It reduces possibility of getting mutex lock in ovsrcu_try_quiesce() by heavily loaded PMD and next attempt to quiesce would be after 1024 iterations. With this patch, PMD RCU synchronization will be performed after fixed interval instead after a fixed number of iterations. This will ensure that even if the packet processing load is high the RCU synchronization will not be delayed long. Co-authored-by: Anju Thomas <anju.thomas@ericsson.com> Signed-off-by: Anju Thomas <anju.thomas@ericsson.com> Signed-off-by: Nitin Katiyar <nitin.katiyar@ericsson.com> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/dpif-netdev.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index cd349c4a4..e037eab2a 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -240,6 +240,9 @@ struct dfc_cache {
* and used during rxq to pmd assignment. */
#define PMD_RXQ_INTERVAL_MAX 6
+/* Time in microseconds to try RCU quiescing. */
+#define PMD_RCU_QUIESCE_INTERVAL 10000LL
+
struct dpcls {
struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
odp_port_t in_port;
@@ -787,6 +790,9 @@ struct dp_netdev_pmd_thread {
/* Set to true if the pmd thread needs to be reloaded. */
bool need_reload;
+
+ /* Next time when PMD should try RCU quiescing. */
+ long long next_rcu_quiesce;
};
/* Interface to netdev-based datapath. */
@@ -5730,6 +5736,9 @@ reload:
pmd->intrvl_tsc_prev = 0;
atomic_store_relaxed(&pmd->intrvl_cycles, 0);
cycles_counter_update(s);
+
+ pmd->next_rcu_quiesce = pmd->ctx.now + PMD_RCU_QUIESCE_INTERVAL;
+
/* Protect pmd stats from external clearing while polling. */
ovs_mutex_lock(&pmd->perf_stats.stats_mutex);
for (;;) {
@@ -5764,6 +5773,16 @@ reload:
tx_packets = dp_netdev_pmd_flush_output_packets(pmd, false);
}
+ /* Do RCU synchronization at fixed interval. This ensures that
+ * synchronization would not be delayed long even at high load of
+ * packet processing. */
+ if (pmd->ctx.now > pmd->next_rcu_quiesce) {
+ if (!ovsrcu_try_quiesce()) {
+ pmd->next_rcu_quiesce =
+ pmd->ctx.now + PMD_RCU_QUIESCE_INTERVAL;
+ }
+ }
+
if (lc++ > 1024) {
lc = 0;
@@ -5771,6 +5790,8 @@ reload:
dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
if (!ovsrcu_try_quiesce()) {
emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
+ pmd->next_rcu_quiesce =
+ pmd->ctx.now + PMD_RCU_QUIESCE_INTERVAL;
}
for (i = 0; i < poll_cnt; i++) {
@@ -6244,6 +6265,7 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
pmd->ctx.last_rxq = NULL;
pmd_thread_ctx_time_update(pmd);
pmd->next_optimization = pmd->ctx.now + DPCLS_OPTIMIZATION_INTERVAL;
+ pmd->next_rcu_quiesce = pmd->ctx.now + PMD_RCU_QUIESCE_INTERVAL;
pmd->rxq_next_cycle_store = pmd->ctx.now + PMD_RXQ_INTERVAL_LEN;
hmap_init(&pmd->poll_list);
hmap_init(&pmd->tx_ports);