summaryrefslogtreecommitdiff
path: root/ofproto/ofproto-dpif.c
diff options
context:
space:
mode:
authorYi-Hung Wei <yihung.wei@gmail.com>2019-08-28 15:14:26 -0700
committerJustin Pettit <jpettit@ovn.org>2019-09-26 13:50:17 -0700
commit993cae678bca283cf0ef553bed5df99c7bb15b13 (patch)
treef176ac607a473d56a9384d7e176f26cfe6698532 /ofproto/ofproto-dpif.c
parent6a16962cda0ea757de88b0fa9e737dfb303387d6 (diff)
downloadopenvswitch-993cae678bca283cf0ef553bed5df99c7bb15b13.tar.gz
ofproto-dpif: Consume CT_Zone, and CT_Timeout_Policy tables
This patch consumes the CT_Zone and CT_Timeout_Policy tables, maintains the zone-based configuration in the vswitchd. Whenever there is a database change, vswitchd will read the datapath, CT_Zone, and CT_Timeout_Policy tables from ovsdb, builds an internal snapshot of the database configuration in bridge.c, and pushes down the change into ofproto and dpif layer. If a new zone-based timeout policy is added, it updates the zone to timeout policy mapping in the per datapath type datapath structure in dpif-backer, and pushes down the timeout policy into the datapath via dpif interface. If a timeout policy is no longer used, for kernel datapath, vswitchd may not be able to remove it from datapath immediately since datapath flows can still reference the to-be-deleted timeout policies. Thus, we keep an timeout policy kill list, that vswitchd will go back to the list periodically and try to kill the unused timeout policies. Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com> Signed-off-by: Justin Pettit <jpettit@ovn.org>
Diffstat (limited to 'ofproto/ofproto-dpif.c')
-rw-r--r--ofproto/ofproto-dpif.c293
1 files changed, 293 insertions, 0 deletions
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 7bb0f7bdb..e120d9ecd 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -156,6 +156,31 @@ struct ofport_dpif {
size_t n_qdscp;
};
+struct ct_timeout_policy {
+ int ref_count; /* The number of ct zones that use this
+ * timeout policy. */
+ uint32_t tp_id; /* Timeout policy id in the datapath. */
+ struct simap tp; /* A map from timeout policy attribute to
+ * timeout value. */
+ struct hmap_node node; /* Element in struct dpif_backer's "ct_tps"
+ * cmap. */
+ struct ovs_list list_node; /* Element in struct dpif_backer's
+ * "ct_tp_kill_list" list. */
+};
+
+/* Periodically try to purge deleted timeout policies from the datapath. Retry
+ * may be necessary if the kernel datapath has a non-zero datapath flow
+ * reference count for the timeout policy. */
+#define TIMEOUT_POLICY_CLEANUP_INTERVAL (20000) /* 20 seconds. */
+static long long int timeout_policy_cleanup_timer = LLONG_MIN;
+
+struct ct_zone {
+ uint16_t zone_id;
+ struct ct_timeout_policy *ct_tp;
+ struct cmap_node node; /* Element in struct dpif_backer's
+ * "ct_zones" cmap. */
+};
+
static odp_port_t ofp_port_to_odp_port(const struct ofproto_dpif *,
ofp_port_t);
@@ -196,6 +221,9 @@ static struct hmap all_ofproto_dpifs_by_uuid =
static bool ofproto_use_tnl_push_pop = true;
static void ofproto_unixctl_init(void);
+static void ct_zone_config_init(struct dpif_backer *backer);
+static void ct_zone_config_uninit(struct dpif_backer *backer);
+static void ct_zone_timeout_policy_sweep(struct dpif_backer *backer);
static inline struct ofproto_dpif *
ofproto_dpif_cast(const struct ofproto *ofproto)
@@ -488,6 +516,7 @@ type_run(const char *type)
}
process_dpif_port_changes(backer);
+ ct_zone_timeout_policy_sweep(backer);
return 0;
}
@@ -683,6 +712,7 @@ close_dpif_backer(struct dpif_backer *backer, bool del)
}
dpif_close(backer->dpif);
id_pool_destroy(backer->meter_ids);
+ ct_zone_config_uninit(backer);
free(backer);
}
@@ -811,6 +841,8 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
backer->meter_ids = NULL;
}
+ ct_zone_config_init(backer);
+
/* Make a pristine snapshot of 'support' into 'boottime_support'.
* 'boottime_support' can be checked to prevent 'support' to be changed
* beyond the datapath capabilities. In case 'support' is changed by
@@ -5087,6 +5119,265 @@ ct_flush(const struct ofproto *ofproto_, const uint16_t *zone)
ct_dpif_flush(ofproto->backer->dpif, zone, NULL);
}
+static struct ct_timeout_policy *
+ct_timeout_policy_lookup(const struct hmap *ct_tps, struct simap *tp)
+{
+ struct ct_timeout_policy *ct_tp;
+
+ HMAP_FOR_EACH_WITH_HASH (ct_tp, node, simap_hash(tp), ct_tps) {
+ if (simap_equal(&ct_tp->tp, tp)) {
+ return ct_tp;
+ }
+ }
+ return NULL;
+}
+
+static struct ct_timeout_policy *
+ct_timeout_policy_alloc__(void)
+{
+ struct ct_timeout_policy *ct_tp = xzalloc(sizeof *ct_tp);
+ simap_init(&ct_tp->tp);
+ return ct_tp;
+}
+
+static struct ct_timeout_policy *
+ct_timeout_policy_alloc(struct simap *tp, struct id_pool *tp_ids)
+{
+ struct simap_node *node;
+
+ struct ct_timeout_policy *ct_tp = ct_timeout_policy_alloc__();
+ SIMAP_FOR_EACH (node, tp) {
+ simap_put(&ct_tp->tp, node->name, node->data);
+ }
+
+ if (!id_pool_alloc_id(tp_ids, &ct_tp->tp_id)) {
+ VLOG_ERR_RL(&rl, "failed to allocate timeout policy id.");
+ simap_destroy(&ct_tp->tp);
+ free(ct_tp);
+ return NULL;
+ }
+
+ return ct_tp;
+}
+
+static void
+ct_timeout_policy_destroy__(struct ct_timeout_policy *ct_tp)
+{
+ simap_destroy(&ct_tp->tp);
+ free(ct_tp);
+}
+
+static void
+ct_timeout_policy_destroy(struct ct_timeout_policy *ct_tp,
+ struct id_pool *tp_ids)
+{
+ id_pool_free_id(tp_ids, ct_tp->tp_id);
+ ovsrcu_postpone(ct_timeout_policy_destroy__, ct_tp);
+}
+
+static void
+ct_timeout_policy_unref(struct dpif_backer *backer,
+ struct ct_timeout_policy *ct_tp)
+{
+ if (ct_tp) {
+ ct_tp->ref_count--;
+
+ if (!ct_tp->ref_count) {
+ hmap_remove(&backer->ct_tps, &ct_tp->node);
+ ovs_list_push_back(&backer->ct_tp_kill_list, &ct_tp->list_node);
+ }
+ }
+}
+
+static struct ct_zone *
+ct_zone_lookup(const struct cmap *ct_zones, uint16_t zone_id)
+{
+ struct ct_zone *ct_zone;
+
+ CMAP_FOR_EACH_WITH_HASH (ct_zone, node, hash_int(zone_id, 0), ct_zones) {
+ if (ct_zone->zone_id == zone_id) {
+ return ct_zone;
+ }
+ }
+ return NULL;
+}
+
+static struct ct_zone *
+ct_zone_alloc(uint16_t zone_id)
+{
+ struct ct_zone *ct_zone = xzalloc(sizeof *ct_zone);
+ ct_zone->zone_id = zone_id;
+ return ct_zone;
+}
+
+static void
+ct_zone_destroy(struct ct_zone *ct_zone)
+{
+ ovsrcu_postpone(free, ct_zone);
+}
+
+static void
+ct_zone_remove_and_destroy(struct dpif_backer *backer, struct ct_zone *ct_zone)
+{
+ cmap_remove(&backer->ct_zones, &ct_zone->node,
+ hash_int(ct_zone->zone_id, 0));
+ ct_zone_destroy(ct_zone);
+}
+
+static void
+ct_add_timeout_policy_to_dpif(struct dpif *dpif,
+ struct ct_timeout_policy *ct_tp)
+{
+ struct ct_dpif_timeout_policy cdtp;
+ struct simap_node *node;
+
+ cdtp.id = ct_tp->tp_id;
+ SIMAP_FOR_EACH (node, &ct_tp->tp) {
+ ct_dpif_set_timeout_policy_attr_by_name(&cdtp, node->name, node->data);
+ }
+
+ int err = ct_dpif_set_timeout_policy(dpif, &cdtp);
+ if (err) {
+ VLOG_ERR_RL(&rl, "failed to set timeout policy %"PRIu32" (%s)",
+ ct_tp->tp_id, ovs_strerror(err));
+ }
+}
+
+static void
+clear_existing_ct_timeout_policies(struct dpif_backer *backer)
+{
+ /* In kernel datapath, when OVS starts, there may be some pre-existing
+ * timeout policies in the kernel. To avoid reassigning the same timeout
+ * policy ids, we dump all the pre-existing timeout policies and keep
+ * the ids in the pool. Since OVS will not use those timeout policies
+ * for new datapath flow, we add them to the kill list and remove
+ * them later on. */
+ struct ct_dpif_timeout_policy cdtp;
+ void *state;
+
+ if (ct_dpif_timeout_policy_dump_start(backer->dpif, &state)) {
+ return;
+ }
+
+ while (!ct_dpif_timeout_policy_dump_next(backer->dpif, state, &cdtp)) {
+ struct ct_timeout_policy *ct_tp = ct_timeout_policy_alloc__();
+ ct_tp->tp_id = cdtp.id;
+ id_pool_add(backer->tp_ids, cdtp.id);
+ ovs_list_push_back(&backer->ct_tp_kill_list, &ct_tp->list_node);
+ }
+
+ ct_dpif_timeout_policy_dump_done(backer->dpif, state);
+}
+
+#define MAX_TIMEOUT_POLICY_ID UINT32_MAX
+
+static void
+ct_zone_config_init(struct dpif_backer *backer)
+{
+ backer->tp_ids = id_pool_create(0, MAX_TIMEOUT_POLICY_ID);
+ cmap_init(&backer->ct_zones);
+ hmap_init(&backer->ct_tps);
+ ovs_list_init(&backer->ct_tp_kill_list);
+ clear_existing_ct_timeout_policies(backer);
+}
+
+static void
+ct_zone_config_uninit(struct dpif_backer *backer)
+{
+ struct ct_zone *ct_zone;
+ CMAP_FOR_EACH (ct_zone, node, &backer->ct_zones) {
+ ct_zone_remove_and_destroy(backer, ct_zone);
+ }
+
+ struct ct_timeout_policy *ct_tp;
+ HMAP_FOR_EACH_POP (ct_tp, node, &backer->ct_tps) {
+ ct_timeout_policy_destroy(ct_tp, backer->tp_ids);
+ }
+
+ LIST_FOR_EACH_POP (ct_tp, list_node, &backer->ct_tp_kill_list) {
+ ct_timeout_policy_destroy(ct_tp, backer->tp_ids);
+ }
+
+ id_pool_destroy(backer->tp_ids);
+ cmap_destroy(&backer->ct_zones);
+ hmap_destroy(&backer->ct_tps);
+}
+
+static void
+ct_zone_timeout_policy_sweep(struct dpif_backer *backer)
+{
+ if (!ovs_list_is_empty(&backer->ct_tp_kill_list)
+ && time_msec() >= timeout_policy_cleanup_timer) {
+ struct ct_timeout_policy *ct_tp, *next;
+
+ LIST_FOR_EACH_SAFE (ct_tp, next, list_node, &backer->ct_tp_kill_list) {
+ if (!ct_dpif_del_timeout_policy(backer->dpif, ct_tp->tp_id)) {
+ ovs_list_remove(&ct_tp->list_node);
+ ct_timeout_policy_destroy(ct_tp, backer->tp_ids);
+ } else {
+ /* INFO log raised by 'dpif' layer. */
+ }
+ }
+ timeout_policy_cleanup_timer = time_msec() +
+ TIMEOUT_POLICY_CLEANUP_INTERVAL;
+ }
+}
+
+static void
+ct_set_zone_timeout_policy(const char *datapath_type, uint16_t zone_id,
+ struct simap *timeout_policy)
+{
+ struct dpif_backer *backer = shash_find_data(&all_dpif_backers,
+ datapath_type);
+ if (!backer) {
+ return;
+ }
+
+ struct ct_timeout_policy *ct_tp = ct_timeout_policy_lookup(&backer->ct_tps,
+ timeout_policy);
+ if (!ct_tp) {
+ ct_tp = ct_timeout_policy_alloc(timeout_policy, backer->tp_ids);
+ if (ct_tp) {
+ hmap_insert(&backer->ct_tps, &ct_tp->node, simap_hash(&ct_tp->tp));
+ ct_add_timeout_policy_to_dpif(backer->dpif, ct_tp);
+ } else {
+ return;
+ }
+ }
+
+ struct ct_zone *ct_zone = ct_zone_lookup(&backer->ct_zones, zone_id);
+ if (ct_zone) {
+ if (ct_zone->ct_tp != ct_tp) {
+ /* Update the zone timeout policy. */
+ ct_timeout_policy_unref(backer, ct_zone->ct_tp);
+ ct_zone->ct_tp = ct_tp;
+ ct_tp->ref_count++;
+ }
+ } else {
+ struct ct_zone *new_ct_zone = ct_zone_alloc(zone_id);
+ new_ct_zone->ct_tp = ct_tp;
+ cmap_insert(&backer->ct_zones, &new_ct_zone->node,
+ hash_int(zone_id, 0));
+ ct_tp->ref_count++;
+ }
+}
+
+static void
+ct_del_zone_timeout_policy(const char *datapath_type, uint16_t zone_id)
+{
+ struct dpif_backer *backer = shash_find_data(&all_dpif_backers,
+ datapath_type);
+ if (!backer) {
+ return;
+ }
+
+ struct ct_zone *ct_zone = ct_zone_lookup(&backer->ct_zones, zone_id);
+ if (ct_zone) {
+ ct_timeout_policy_unref(backer, ct_zone->ct_tp);
+ ct_zone_remove_and_destroy(backer, ct_zone);
+ }
+}
+
static bool
set_frag_handling(struct ofproto *ofproto_,
enum ofputil_frag_handling frag_handling)
@@ -6191,4 +6482,6 @@ const struct ofproto_class ofproto_dpif_class = {
get_datapath_version, /* get_datapath_version */
type_set_config,
ct_flush, /* ct_flush */
+ ct_set_zone_timeout_policy,
+ ct_del_zone_timeout_policy,
};