summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuoshuai Li <ligs@dtdream.com>2018-01-24 20:39:09 +0800
committerBen Pfaff <blp@ovn.org>2018-01-24 12:33:35 -0800
commit7605c992e11ef9fee5964feed3d6fffb4e38c455 (patch)
tree07d2f50c3b12571f098c6ce8173e204e6732a2f9
parent2cabfd27e72e37be3594d6901dbb327b668fb25f (diff)
downloadopenvswitch-7605c992e11ef9fee5964feed3d6fffb4e38c455.tar.gz
ovn: OVN Support QoS meter
This feature is used to limit the bandwidth of flows, such as floating IP. ovn-northd changes: 1. add bandwidth column in NB's QOS table. 2. add QOS_METER stages in Logical switch ingress/egress. 3. add set_meter() action in SB's LFlow table. ovn-controller changes: add meter_table for meter action process openflow meter table. Now, This feature is only supported in DPDK. Signed-off-by: Guoshuai Li <ligs@dtdream.com> Signed-off-by: Ben Pfaff <blp@ovn.org>
-rw-r--r--NEWS1
-rw-r--r--include/ovn/actions.h13
-rw-r--r--ovn/controller/lflow.c10
-rw-r--r--ovn/controller/lflow.h1
-rw-r--r--ovn/controller/ofctrl.c96
-rw-r--r--ovn/controller/ofctrl.h3
-rw-r--r--ovn/controller/ovn-controller.c11
-rw-r--r--ovn/lib/actions.c83
-rw-r--r--ovn/northd/ovn-northd.8.xml54
-rw-r--r--ovn/northd/ovn-northd.c116
-rw-r--r--ovn/ovn-nb.ovsschema14
-rw-r--r--ovn/ovn-nb.xml16
-rw-r--r--ovn/ovn-sb.xml15
-rw-r--r--ovn/utilities/ovn-trace.c4
-rw-r--r--tests/ovn.at52
-rw-r--r--tests/test-ovn.c5
16 files changed, 418 insertions, 76 deletions
diff --git a/NEWS b/NEWS
index c02faad01..734932697 100644
--- a/NEWS
+++ b/NEWS
@@ -114,6 +114,7 @@ v2.8.0 - 31 Aug 2017
gateway.
* Add support for ACL logging.
* ovn-northd now has native support for active-standby high availability.
+ * Add support for QoS bandwidth limt with DPDK.
- Tracing with ofproto/trace now traces through recirculation.
- OVSDB:
* New support for role-based access control (see ovsdb-server(1)).
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index ea90dbb2a..9554a395d 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -75,7 +75,8 @@ struct ovn_extend_table;
OVNACT(DNS_LOOKUP, ovnact_dns_lookup) \
OVNACT(LOG, ovnact_log) \
OVNACT(PUT_ND_RA_OPTS, ovnact_put_opts) \
- OVNACT(ND_NS, ovnact_nest)
+ OVNACT(ND_NS, ovnact_nest) \
+ OVNACT(SET_METER, ovnact_set_meter)
/* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
enum OVS_PACKED_ENUM ovnact_type {
@@ -281,6 +282,13 @@ struct ovnact_log {
char *name;
};
+/* OVNACT_SET_METER. */
+struct ovnact_set_meter {
+ struct ovnact ovnact;
+ uint64_t rate; /* rate field, in kbps. */
+ uint64_t burst; /* burst rate field, in kbps. */
+};
+
/* Internal use by the helpers below. */
void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
@@ -490,6 +498,9 @@ struct ovnact_encode_params {
/* A struct to figure out the group_id for group actions. */
struct ovn_extend_table *group_table;
+ /* A struct to figure out the meter_id for meter actions. */
+ struct ovn_extend_table *meter_table;
+
/* OVN maps each logical flow table (ltable), one-to-one, onto a physical
* OpenFlow flow table (ptable). A number of parameters describe this
* mapping and data related to flow tables:
diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
index 3d990c49c..1e79a5355 100644
--- a/ovn/controller/lflow.c
+++ b/ovn/controller/lflow.c
@@ -27,6 +27,7 @@
#include "ovn/expr.h"
#include "ovn/lib/ovn-l7.h"
#include "ovn/lib/ovn-sb-idl.h"
+#include "ovn/lib/extend-table.h"
#include "packets.h"
#include "physical.h"
#include "simap.h"
@@ -62,6 +63,7 @@ static void consider_logical_flow(struct controller_ctx *ctx,
const struct sbrec_logical_flow *lflow,
const struct hmap *local_datapaths,
struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table,
const struct sbrec_chassis *chassis,
struct hmap *dhcp_opts,
struct hmap *dhcpv6_opts,
@@ -144,6 +146,7 @@ add_logical_flows(struct controller_ctx *ctx,
const struct chassis_index *chassis_index,
const struct hmap *local_datapaths,
struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table,
const struct sbrec_chassis *chassis,
const struct shash *addr_sets,
struct hmap *flow_table,
@@ -174,7 +177,7 @@ add_logical_flows(struct controller_ctx *ctx,
SBREC_LOGICAL_FLOW_FOR_EACH (lflow, ctx->ovnsb_idl) {
consider_logical_flow(ctx, chassis_index,
lflow, local_datapaths,
- group_table, chassis,
+ group_table, meter_table, chassis,
&dhcp_opts, &dhcpv6_opts, &nd_ra_opts,
&conj_id_ofs, addr_sets, flow_table,
active_tunnels, local_lport_ids);
@@ -191,6 +194,7 @@ consider_logical_flow(struct controller_ctx *ctx,
const struct sbrec_logical_flow *lflow,
const struct hmap *local_datapaths,
struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table,
const struct sbrec_chassis *chassis,
struct hmap *dhcp_opts,
struct hmap *dhcpv6_opts,
@@ -263,6 +267,7 @@ consider_logical_flow(struct controller_ctx *ctx,
.is_switch = is_switch(ldp),
.is_gateway_router = is_gateway_router(ldp, local_datapaths),
.group_table = group_table,
+ .meter_table = meter_table,
.pipeline = ingress ? OVNACT_P_INGRESS : OVNACT_P_EGRESS,
.ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,
@@ -435,13 +440,14 @@ lflow_run(struct controller_ctx *ctx,
const struct chassis_index *chassis_index,
const struct hmap *local_datapaths,
struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table,
const struct shash *addr_sets,
struct hmap *flow_table,
struct sset *active_tunnels,
struct sset *local_lport_ids)
{
add_logical_flows(ctx, chassis_index, local_datapaths,
- group_table, chassis, addr_sets, flow_table,
+ group_table, meter_table, chassis, addr_sets, flow_table,
active_tunnels, local_lport_ids);
add_neighbor_flows(ctx, flow_table);
}
diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
index 087b0ed8d..22bf5341a 100644
--- a/ovn/controller/lflow.h
+++ b/ovn/controller/lflow.h
@@ -67,6 +67,7 @@ void lflow_run(struct controller_ctx *,
const struct chassis_index *,
const struct hmap *local_datapaths,
struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table,
const struct shash *addr_sets,
struct hmap *flow_table,
struct sset *active_tunnels,
diff --git a/ovn/controller/ofctrl.c b/ovn/controller/ofctrl.c
index 0fa7f76cc..fc4d4d928 100644
--- a/ovn/controller/ofctrl.c
+++ b/ovn/controller/ofctrl.c
@@ -134,6 +134,9 @@ static struct hmap installed_flows;
/* A reference to the group_table. */
static struct ovn_extend_table *groups;
+/* A reference to the meter_table. */
+static struct ovn_extend_table *meters;
+
/* MFF_* field ID for our Geneve option. In S_TLV_TABLE_MOD_SENT, this is
* the option we requested (we don't know whether we obtained it yet). In
* S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we have. */
@@ -145,13 +148,16 @@ static struct ofpbuf *encode_flow_mod(struct ofputil_flow_mod *);
static struct ofpbuf *encode_group_mod(const struct ofputil_group_mod *);
+static struct ofpbuf *encode_meter_mod(const struct ofputil_meter_mod *);
+
static void ovn_flow_table_clear(struct hmap *flow_table);
static void ovn_flow_table_destroy(struct hmap *flow_table);
static void ofctrl_recv(const struct ofp_header *, enum ofptype);
void
-ofctrl_init(struct ovn_extend_table *group_table)
+ofctrl_init(struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table)
{
swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
tx_counter = rconn_packet_counter_create();
@@ -159,6 +165,7 @@ ofctrl_init(struct ovn_extend_table *group_table)
ovs_list_init(&flow_updates);
ovn_init_symtab(&symtab);
groups = group_table;
+ meters = meter_table;
}
/* S_NEW, for a new connection.
@@ -389,6 +396,18 @@ run_S_CLEAR_FLOWS(void)
ovn_extend_table_clear(groups, true);
}
+ /* Send a meter_mod to delete all meters. */
+ struct ofputil_meter_mod mm;
+ memset(&mm, 0, sizeof mm);
+ mm.command = OFPMC13_DELETE;
+ mm.meter.meter_id = OFPM13_ALL;
+ queue_msg(encode_meter_mod(&mm));
+
+ /* Clear existing meters, to match the state of the switch. */
+ if (meters) {
+ ovn_extend_table_clear(meters, true);
+ }
+
/* All flow updates are irrelevant now. */
struct ofctrl_flow_update *fup, *next;
LIST_FOR_EACH_SAFE (fup, next, list_node, &flow_updates) {
@@ -760,6 +779,20 @@ add_group_mod(const struct ofputil_group_mod *gm, struct ovs_list *msgs)
struct ofpbuf *msg = encode_group_mod(gm);
ovs_list_push_back(msgs, &msg->list_node);
}
+
+
+static struct ofpbuf *
+encode_meter_mod(const struct ofputil_meter_mod *mm)
+{
+ return ofputil_encode_meter_mod(OFP13_VERSION, mm);
+}
+
+static void
+add_meter_mod(const struct ofputil_meter_mod *mm, struct ovs_list *msgs)
+{
+ struct ofpbuf *msg = encode_meter_mod(mm);
+ ovs_list_push_back(msgs, &msg->list_node);
+}
static void
add_ct_flush_zone(uint16_t zone_id, struct ovs_list *msgs)
@@ -790,11 +823,10 @@ ofctrl_can_put(void)
/* Replaces the flow table on the switch, if possible, by the flows added
* with ofctrl_add_flow().
*
- * Replaces the group table on the switch, if possible, by the contents of
- * 'groups->desired_groups'. Regardless of whether the group table
- * is updated, this deletes all the groups from the
- * 'groups->desired_groups' and frees them. (The hmap itself isn't
- * destroyed.)
+ * Replaces the group table and meter table on the switch, if possible, by the
+ * contents of 'groups->desired'. Regardless of whether the group table
+ * is updated, this deletes all the groups from the 'groups->desired' and frees
+ * them. (The hmap itself isn't destroyed.)
*
* Sends conntrack flush messages to each zone in 'pending_ct_zones' that
* is in the CT_ZONE_OF_QUEUED state and then moves the zone into the
@@ -808,6 +840,7 @@ ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
if (!ofctrl_can_put()) {
ovn_flow_table_clear(flow_table);
ovn_extend_table_clear(groups, false);
+ ovn_extend_table_clear(meters, false);
return;
}
@@ -848,6 +881,28 @@ ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
ofputil_uninit_group_mod(&gm);
}
+ /* Iterate through all the desired meters. If there are new ones,
+ * add them to the switch. */
+ struct ovn_extend_table_info *m_desired;
+ EXTEND_TABLE_FOR_EACH_UNINSTALLED (m_desired, meters) {
+ /* Create and install new meter. */
+ struct ofputil_meter_mod mm;
+ enum ofputil_protocol usable_protocols;
+ char *meter_string = xasprintf("meter=%"PRIu32",%s",
+ m_desired->table_id,
+ ds_cstr(&m_desired->info));
+ char *error = parse_ofp_meter_mod_str(&mm, meter_string, OFPMC13_ADD,
+ &usable_protocols);
+ if (!error) {
+ add_meter_mod(&mm, &msgs);
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_ERR_RL(&rl, "new meter %s %s", error, meter_string);
+ free(error);
+ }
+ free(meter_string);
+ }
+
/* Iterate through all of the installed flows. If any of them are no
* longer desired, delete them; if any of them should have different
* actions, update them. */
@@ -936,7 +991,7 @@ ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
} else {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
VLOG_ERR_RL(&rl, "Error deleting group %d: %s",
- installed->table_id, error);
+ installed->table_id, error);
free(error);
}
free(group_string);
@@ -947,6 +1002,33 @@ ofctrl_put(struct hmap *flow_table, struct shash *pending_ct_zones,
/* Move the contents of groups->desired to groups->existing. */
ovn_extend_table_move(groups);
+ /* Iterate through the installed meters from previous runs. If they
+ * are not needed delete them. */
+ struct ovn_extend_table_info *m_installed, *next_meter;
+ EXTEND_TABLE_FOR_EACH_INSTALLED (m_installed, next_meter, meters) {
+ /* Delete the meter. */
+ struct ofputil_meter_mod mm;
+ enum ofputil_protocol usable_protocols;
+ char *meter_string = xasprintf("meter=%"PRIu32"",
+ m_installed->table_id);
+ char *error = parse_ofp_meter_mod_str(&mm, meter_string,
+ OFPMC13_DELETE,
+ &usable_protocols);
+ if (!error) {
+ add_meter_mod(&mm, &msgs);
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_ERR_RL(&rl, "Error deleting meter %"PRIu32": %s",
+ m_installed->table_id, error);
+ free(error);
+ }
+ free(meter_string);
+ ovn_extend_table_remove(meters, m_installed);
+ }
+
+ /* Move the contents of meters->desired to meters->existing. */
+ ovn_extend_table_move(meters);
+
if (!ovs_list_is_empty(&msgs)) {
/* Add a barrier to the list of messages. */
struct ofpbuf *barrier = ofputil_encode_barrier_request(OFP13_VERSION);
diff --git a/ovn/controller/ofctrl.h b/ovn/controller/ofctrl.h
index 9b5eab1f4..125f9a4c2 100644
--- a/ovn/controller/ofctrl.h
+++ b/ovn/controller/ofctrl.h
@@ -31,7 +31,8 @@ struct ovsrec_bridge;
struct shash;
/* Interface for OVN main loop. */
-void ofctrl_init(struct ovn_extend_table *group_table);
+void ofctrl_init(struct ovn_extend_table *group_table,
+ struct ovn_extend_table *meter_table);
enum mf_field_id ofctrl_run(const struct ovsrec_bridge *br_int,
struct shash *pending_ct_zones);
bool ofctrl_can_put(void);
diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
index c486887a5..7592bda25 100644
--- a/ovn/controller/ovn-controller.c
+++ b/ovn/controller/ovn-controller.c
@@ -597,9 +597,13 @@ main(int argc, char *argv[])
struct ovn_extend_table group_table;
ovn_extend_table_init(&group_table);
+ /* Initialize meter ids for QoS. */
+ struct ovn_extend_table meter_table;
+ ovn_extend_table_init(&meter_table);
+
daemonize_complete();
- ofctrl_init(&group_table);
+ ofctrl_init(&group_table, &meter_table);
pinctrl_init();
lflow_init();
@@ -709,8 +713,8 @@ main(int argc, char *argv[])
struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
lflow_run(&ctx, chassis,
&chassis_index, &local_datapaths, &group_table,
- &addr_sets, &flow_table, &active_tunnels,
- &local_lport_ids);
+ &meter_table, &addr_sets, &flow_table,
+ &active_tunnels, &local_lport_ids);
if (chassis_id) {
bfd_run(&ctx, br_int, chassis, &local_datapaths,
@@ -844,6 +848,7 @@ main(int argc, char *argv[])
shash_destroy(&pending_ct_zones);
ovn_extend_table_destroy(&group_table);
+ ovn_extend_table_destroy(&meter_table);
ovsdb_idl_loop_destroy(&ovs_idl_loop);
ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
index cf1476bab..a6977d8ee 100644
--- a/ovn/lib/actions.c
+++ b/ovn/lib/actions.c
@@ -2095,6 +2095,87 @@ ovnact_log_free(struct ovnact_log *log)
free(log->name);
}
+static void
+parse_set_meter_action(struct action_context *ctx)
+{
+ uint64_t rate = 0;
+ uint64_t burst = 0;
+
+ lexer_force_match(ctx->lexer, LEX_T_LPAREN); /* Skip '('. */
+ if (ctx->lexer->token.type == LEX_T_INTEGER
+ && ctx->lexer->token.format == LEX_F_DECIMAL) {
+ rate = ntohll(ctx->lexer->token.value.integer);
+ }
+ lexer_get(ctx->lexer);
+ if (lexer_match(ctx->lexer, LEX_T_COMMA)) { /* Skip ','. */
+ if (ctx->lexer->token.type == LEX_T_INTEGER
+ && ctx->lexer->token.format == LEX_F_DECIMAL) {
+ burst = ntohll(ctx->lexer->token.value.integer);
+ }
+ lexer_get(ctx->lexer);
+ }
+ lexer_force_match(ctx->lexer, LEX_T_RPAREN); /* Skip ')'. */
+
+ if (!rate) {
+ lexer_error(ctx->lexer,
+ "Rate %"PRId64" for set_meter is not in valid.",
+ rate);
+ return;
+ }
+
+ struct ovnact_set_meter *cl = ovnact_put_SET_METER(ctx->ovnacts);
+ cl->rate = rate;
+ cl->burst = burst;
+}
+
+static void
+format_SET_METER(const struct ovnact_set_meter *cl, struct ds *s)
+{
+ if (cl->burst) {
+ ds_put_format(s, "set_meter(%"PRId64", %"PRId64");",
+ cl->rate, cl->burst);
+ } else {
+ ds_put_format(s, "set_meter(%"PRId64");", cl->rate);
+ }
+}
+
+static void
+encode_SET_METER(const struct ovnact_set_meter *cl,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ uint32_t table_id;
+ struct ofpact_meter *om;
+
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ if (cl->burst) {
+ ds_put_format(&ds,
+ "kbps burst stats bands=type=drop rate=%"PRId64" "
+ "burst_size=%"PRId64"",
+ cl->rate, cl->burst);
+ } else {
+ ds_put_format(&ds, "kbps stats bands=type=drop rate=%"PRId64"",
+ cl->rate);
+ }
+
+ table_id = ovn_extend_table_assign_id(ep->meter_table, &ds);
+ if (table_id == EXT_TABLE_ID_INVALID) {
+ ds_destroy(&ds);
+ return;
+ }
+
+ ds_destroy(&ds);
+
+ /* Create an action to set the meter. */
+ om = ofpact_put_METER(ofpacts);
+ om->meter_id = table_id;
+}
+
+static void
+ovnact_set_meter_free(struct ovnact_set_meter *ct OVS_UNUSED)
+{
+}
+
/* Parses an assignment or exchange or put_dhcp_opts action. */
static void
parse_set_action(struct action_context *ctx)
@@ -2182,6 +2263,8 @@ parse_action(struct action_context *ctx)
parse_SET_QUEUE(ctx);
} else if (lexer_match_id(ctx->lexer, "log")) {
parse_LOG(ctx);
+ } else if (lexer_match_id(ctx->lexer, "set_meter")) {
+ parse_set_meter_action(ctx);
} else {
lexer_syntax_error(ctx->lexer, "expecting action");
}
diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
index 41fba1199..ee9cf8c47 100644
--- a/ovn/northd/ovn-northd.8.xml
+++ b/ovn/northd/ovn-northd.8.xml
@@ -364,7 +364,28 @@
</li>
</ul>
- <h3>Ingress Table 8: LB</h3>
+ <h3>Ingress Table 8: <code>from-lport</code> QoS meter</h3>
+
+ <p>
+ Logical flows in this table closely reproduce those in the
+ <code>QoS</code> table <code>bandwidth</code> column in the
+ <code>OVN_Northbound</code> database for the <code>from-lport</code>
+ direction.
+ </p>
+
+ <ul>
+ <li>
+ For every qos_rules for every logical switch a flow will be added at
+ priorities mentioned in the QoS table.
+ </li>
+
+ <li>
+ One priority-0 fallback flow that matches all packets and advances to
+ the next table.
+ </li>
+ </ul>
+
+ <h3>Ingress Table 9: LB</h3>
<p>
It contains a priority-0 flow that simply moves traffic to the next
@@ -377,7 +398,7 @@
connection.)
</p>
- <h3>Ingress Table 9: Stateful</h3>
+ <h3>Ingress Table 10: Stateful</h3>
<ul>
<li>
@@ -422,7 +443,7 @@
</li>
</ul>
- <h3>Ingress Table 10: ARP/ND responder</h3>
+ <h3>Ingress Table 11: ARP/ND responder</h3>
<p>
This table implements ARP/ND responder in a logical switch for known
@@ -572,7 +593,7 @@ nd_na {
</li>
</ul>
- <h3>Ingress Table 11: DHCP option processing</h3>
+ <h3>Ingress Table 12: DHCP option processing</h3>
<p>
This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -632,7 +653,7 @@ next;
</li>
</ul>
- <h3>Ingress Table 12: DHCP responses</h3>
+ <h3>Ingress Table 13: DHCP responses</h3>
<p>
This table implements DHCP responder for the DHCP replies generated by
@@ -714,7 +735,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 13 DNS Lookup</h3>
+ <h3>Ingress Table 14 DNS Lookup</h3>
<p>
This table looks up and resolves the DNS names to the corresponding
@@ -743,7 +764,7 @@ reg0[4] = dns_lookup(); next;
</li>
</ul>
- <h3>Ingress Table 14 DNS Responses</h3>
+ <h3>Ingress Table 15 DNS Responses</h3>
<p>
This table implements DNS responder for the DNS replies generated by
@@ -778,7 +799,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 15 Destination Lookup</h3>
+ <h3>Ingress Table 16 Destination Lookup</h3>
<p>
This table implements switching behavior. It contains these logical
@@ -880,7 +901,14 @@ output;
<code>to-lport</code> qos rules.
</p>
- <h3>Egress Table 6: Stateful</h3>
+ <h3>Egress Table 6: <code>to-lport</code> QoS meter</h3>
+
+ <p>
+ This is similar to ingress table <code>QoS meter</code> except for
+ <code>to-lport</code> qos rules.
+ </p>
+
+ <h3>Egress Table 7: Stateful</h3>
<p>
This is similar to ingress table <code>Stateful</code> except that
@@ -895,18 +923,18 @@ output;
A priority 34000 logical flow is added for each logical port which
has DHCPv4 options defined to allow the DHCPv4 reply packet and which has
DHCPv6 options defined to allow the DHCPv6 reply packet from the
- <code>Ingress Table 12: DHCP responses</code>.
+ <code>Ingress Table 13: DHCP responses</code>.
</li>
<li>
A priority 34000 logical flow is added for each logical switch datapath
configured with DNS records with the match <code>udp.dst = 53</code>
to allow the DNS reply packet from the
- <code>Ingress Table 14:DNS responses</code>.
+ <code>Ingress Table 15:DNS responses</code>.
</li>
</ul>
- <h3>Egress Table 7: Egress Port Security - IP</h3>
+ <h3>Egress Table 8: Egress Port Security - IP</h3>
<p>
This is similar to the port security logic in table
@@ -916,7 +944,7 @@ output;
<code>ip4.src</code> and <code>ip6.src</code>
</p>
- <h3>Egress Table 8: Egress Port Security - L2</h3>
+ <h3>Egress Table 9: Egress Port Security - L2</h3>
<p>
This is similar to the ingress port security logic in ingress table
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index 902392f84..f4edd8ff5 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -108,25 +108,27 @@ enum ovn_stage {
PIPELINE_STAGE(SWITCH, IN, PRE_STATEFUL, 5, "ls_in_pre_stateful") \
PIPELINE_STAGE(SWITCH, IN, ACL, 6, "ls_in_acl") \
PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 7, "ls_in_qos_mark") \
- PIPELINE_STAGE(SWITCH, IN, LB, 8, "ls_in_lb") \
- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 9, "ls_in_stateful") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 10, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 11, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 12, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 13, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 14, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 15, "ls_in_l2_lkup") \
- \
- /* Logical switch egress stages. */ \
- PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \
- PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 1, "ls_out_pre_acl") \
- PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful") \
- PIPELINE_STAGE(SWITCH, OUT, LB, 3, "ls_out_lb") \
+ PIPELINE_STAGE(SWITCH, IN, QOS_METER, 8, "ls_in_qos_meter") \
+ PIPELINE_STAGE(SWITCH, IN, LB, 9, "ls_in_lb") \
+ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 10, "ls_in_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 11, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 12, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 13, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 14, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 15, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 16, "ls_in_l2_lkup") \
+ \
+ /* Logical switch egress stages. */ \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 1, "ls_out_pre_acl") \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful") \
+ PIPELINE_STAGE(SWITCH, OUT, LB, 3, "ls_out_lb") \
PIPELINE_STAGE(SWITCH, OUT, ACL, 4, "ls_out_acl") \
PIPELINE_STAGE(SWITCH, OUT, QOS_MARK, 5, "ls_out_qos_mark") \
- PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 6, "ls_out_stateful") \
- PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 7, "ls_out_port_sec_ip") \
- PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 8, "ls_out_port_sec_l2") \
+ PIPELINE_STAGE(SWITCH, OUT, QOS_METER, 6, "ls_out_qos_meter") \
+ PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 7, "ls_out_stateful") \
+ PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 8, "ls_out_port_sec_ip") \
+ PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 9, "ls_out_port_sec_l2") \
\
/* Logical router ingress stages. */ \
PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
@@ -3389,21 +3391,57 @@ static void
build_qos(struct ovn_datapath *od, struct hmap *lflows) {
ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;");
ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1", "next;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_METER, 0, "1", "next;");
+ ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1", "next;");
for (size_t i = 0; i < od->nbs->n_qos_rules; i++) {
struct nbrec_qos *qos = od->nbs->qos_rules[i];
bool ingress = !strcmp(qos->direction, "from-lport") ? true :false;
enum ovn_stage stage = ingress ? S_SWITCH_IN_QOS_MARK : S_SWITCH_OUT_QOS_MARK;
+ int64_t rate = 0;
+ int64_t burst = 0;
+
+ for (size_t j = 0; j < qos->n_action; j++) {
+ if (!strcmp(qos->key_action[j], "dscp")) {
+ struct ds dscp_action = DS_EMPTY_INITIALIZER;
+
+ ds_put_format(&dscp_action, "ip.dscp = %"PRId64"; next;",
+ qos->value_action[j]);
+ ovn_lflow_add(lflows, od, stage,
+ qos->priority,
+ qos->match, ds_cstr(&dscp_action));
+ ds_destroy(&dscp_action);
+ }
+ }
- if (!strcmp(qos->key_action, "dscp")) {
- struct ds dscp_action = DS_EMPTY_INITIALIZER;
+ for (size_t n = 0; n < qos->n_bandwidth; n++) {
+ if (!strcmp(qos->key_bandwidth[n], "rate")) {
+ rate = qos->value_bandwidth[n];
+ } else if (!strcmp(qos->key_bandwidth[n], "burst")) {
+ burst = qos->value_bandwidth[n];
+ }
+ }
+ if (rate) {
+ struct ds meter_action = DS_EMPTY_INITIALIZER;
+ stage = ingress ? S_SWITCH_IN_QOS_METER : S_SWITCH_OUT_QOS_METER;
+ if (burst) {
+ ds_put_format(&meter_action,
+ "set_meter(%"PRId64", %"PRId64"); next;",
+ rate, burst);
+ } else {
+ ds_put_format(&meter_action,
+ "set_meter(%"PRId64"); next;",
+ rate);
+ }
- ds_put_format(&dscp_action, "ip.dscp = %d; next;",
- (uint8_t)qos->value_action);
+ /* Ingress and Egress QoS Meter Table.
+ *
+ * We limit the bandwidth of this flow by adding a meter table.
+ */
ovn_lflow_add(lflows, od, stage,
qos->priority,
- qos->match, ds_cstr(&dscp_action));
- ds_destroy(&dscp_action);
+ qos->match, ds_cstr(&meter_action));
+ ds_destroy(&meter_action);
}
}
}
@@ -3519,7 +3557,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
struct ds actions = DS_EMPTY_INITIALIZER;
/* Build pre-ACL and ACL tables for both ingress and egress.
- * Ingress tables 3 through 9. Egress tables 0 through 6. */
+ * Ingress tables 3 through 10. Egress tables 0 through 7. */
struct ovn_datapath *od;
HMAP_FOR_EACH (od, key_node, datapaths) {
if (!od->nbs) {
@@ -3602,7 +3640,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_IP, 0, "1", "next;");
}
- /* Ingress table 10: ARP/ND responder, skip requests coming from localnet
+ /* Ingress table 11: ARP/ND responder, skip requests coming from localnet
* and vtep ports. (priority 100); see ovn-northd.8.xml for the
* rationale. */
HMAP_FOR_EACH (op, key_node, ports) {
@@ -3619,7 +3657,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
}
}
- /* Ingress table 10: ARP/ND responder, reply for known IPs.
+ /* Ingress table 11: ARP/ND responder, reply for known IPs.
* (priority 50). */
HMAP_FOR_EACH (op, key_node, ports) {
if (!op->nbsp) {
@@ -3714,7 +3752,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
}
}
- /* Ingress table 10: ARP/ND responder, by default goto next.
+ /* Ingress table 11: ARP/ND responder, by default goto next.
* (priority 0)*/
HMAP_FOR_EACH (od, key_node, datapaths) {
if (!od->nbs) {
@@ -3724,7 +3762,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1", "next;");
}
- /* Logical switch ingress table 11 and 12: DHCP options and response
+ /* Logical switch ingress table 12 and 13: DHCP options and response
* priority 100 flows. */
HMAP_FOR_EACH (op, key_node, ports) {
if (!op->nbsp) {
@@ -3826,7 +3864,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
}
}
- /* Logical switch ingress table 13 and 14: DNS lookup and response
+ /* Logical switch ingress table 14 and 15: DNS lookup and response
* priority 100 flows.
*/
HMAP_FOR_EACH (od, key_node, datapaths) {
@@ -3858,9 +3896,9 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
ds_destroy(&action);
}
- /* Ingress table 11 and 12: DHCP options and response, by default goto next.
- * (priority 0).
- * Ingress table 13 and 14: DNS lookup and response, by default goto next.
+ /* Ingress table 12 and 13: DHCP options and response, by default goto
+ * next. (priority 0).
+ * Ingress table 14 and 15: DNS lookup and response, by default goto next.
* (priority 0).*/
HMAP_FOR_EACH (od, key_node, datapaths) {
@@ -3874,7 +3912,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1", "next;");
}
- /* Ingress table 15: Destination lookup, broadcast and multicast handling
+ /* Ingress table 16: Destination lookup, broadcast and multicast handling
* (priority 100). */
HMAP_FOR_EACH (op, key_node, ports) {
if (!op->nbsp) {
@@ -3894,7 +3932,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
"outport = \""MC_FLOOD"\"; output;");
}
- /* Ingress table 13: Destination lookup, unicast handling (priority 50), */
+ /* Ingress table 16: Destination lookup, unicast handling (priority 50), */
HMAP_FOR_EACH (op, key_node, ports) {
if (!op->nbsp) {
continue;
@@ -3994,7 +4032,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
}
}
- /* Ingress table 13: Destination lookup for unknown MACs (priority 0). */
+ /* Ingress table 16: Destination lookup for unknown MACs (priority 0). */
HMAP_FOR_EACH (od, key_node, datapaths) {
if (!od->nbs) {
continue;
@@ -4006,8 +4044,8 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
}
}
- /* Egress tables 6: Egress port security - IP (priority 0)
- * Egress table 7: Egress port security L2 - multicast/broadcast
+ /* Egress tables 8: Egress port security - IP (priority 0)
+ * Egress table 9: Egress port security L2 - multicast/broadcast
* (priority 100). */
HMAP_FOR_EACH (od, key_node, datapaths) {
if (!od->nbs) {
@@ -4019,10 +4057,10 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
"output;");
}
- /* Egress table 6: Egress port security - IP (priorities 90 and 80)
+ /* Egress table 8: Egress port security - IP (priorities 90 and 80)
* if port security enabled.
*
- * Egress table 7: Egress port security - L2 (priorities 50 and 150).
+ * Egress table 9: Egress port security - L2 (priorities 50 and 150).
*
* Priority 50 rules implement port security for enabled logical port.
*
diff --git a/ovn/ovn-nb.ovsschema b/ovn/ovn-nb.ovsschema
index 081ddb54c..32f9d5a39 100644
--- a/ovn/ovn-nb.ovsschema
+++ b/ovn/ovn-nb.ovsschema
@@ -1,7 +1,7 @@
{
"name": "OVN_Northbound",
- "version": "5.9.0",
- "cksum": "1120419033 17249",
+ "version": "5.10.0",
+ "cksum": "626737541 17810",
"tables": {
"NB_Global": {
"columns": {
@@ -164,7 +164,15 @@
"enum": ["set", ["dscp"]]},
"value": {"type": "integer",
"minInteger": 0,
- "maxInteger": 63}}},
+ "maxInteger": 63},
+ "min": 0, "max": "unlimited"}},
+ "bandwidth": {"type": {"key": {"type": "string",
+ "enum": ["set", ["rate",
+ "burst"]]},
+ "value": {"type": "integer",
+ "minInteger": 1,
+ "maxInteger": 4294967295},
+ "min": 0, "max": "unlimited"}},
"external_ids": {
"type": {"key": "string", "value": "string",
"min": 0, "max": "unlimited"}}},
diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
index 4447c16b4..b7a5b6bf2 100644
--- a/ovn/ovn-nb.xml
+++ b/ovn/ovn-nb.xml
@@ -1264,6 +1264,22 @@
</ul>
</column>
+ <column name="bandwidth">
+ <p>
+ The bandwidth limit to be performed on the matched packet.
+ Currently only supported in the userspace by dpdk.
+ </p>
+ <ul>
+ <li>
+ <code>rate</code>: The value of rate limit in kbps.
+ </li>
+ <li>
+ <code>burst</code>: The value of burst rate limit in kbps.
+ This is optional and needs to specify the <code>rate</code> first.
+ </li>
+ </ul>
+ </column>
+
<column name="external_ids">
See <em>External IDs</em> at the beginning of this document.
</column>
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index 4a75135dc..f000b166c 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -1642,6 +1642,21 @@
</code>
</p>
</dd>
+
+ <dt><code>set_meter(<var>rate</var>);</code></dt>
+ <dt><code>set_meter(<var>rate</var>, <var>burst</var>);</code></dt>
+ <dd>
+ <p>
+ <b>Parameters</b>: rate limit int field <var>rate</var> in kbps,
+ burst rate limits int field <var>burst</var> in kbps.
+ </p>
+
+ <p>
+ This action sets the rate limit for a flow.
+ </p>
+
+ <p><b>Example:</b> <code>set_meter(100, 1000);</code></p>
+ </dd>
</dl>
<dl>
diff --git a/ovn/utilities/ovn-trace.c b/ovn/utilities/ovn-trace.c
index 7ff4a2682..06d4ddf8e 100644
--- a/ovn/utilities/ovn-trace.c
+++ b/ovn/utilities/ovn-trace.c
@@ -1888,6 +1888,10 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
case OVNACT_LOG:
execute_log(ovnact_get_LOG(a), uflow, super);
break;
+
+ case OVNACT_SET_METER:
+ /* Nothing to do. */
+ break;
}
}
diff --git a/tests/ovn.at b/tests/ovn.at
index 3d1df1051..d02915e82 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -1082,6 +1082,18 @@ reg1[0] = dns_lookup();
reg1[0] = dns_lookup("foo");
dns_lookup doesn't take any parameters
+# set_meter
+set_meter(0);
+ Rate 0 for set_meter is not in valid.
+set_meter(1);
+ encodes as meter:1
+set_meter(100, 1000);
+ encodes as meter:2
+set_meter(100, 1000, );
+ Syntax error at `,' expecting `)'.
+set_meter(4294967295, 4294967295);
+ encodes as meter:3
+
# put_nd_ra_opts
reg1[0] = put_nd_ra_opts(addr_mode = "slaac", mtu = 1500, prefix = aef0::/64, slla = ae:01:02:03:04:05);
encodes as controller(userdata=00.00.00.08.00.00.00.00.00.01.de.10.00.00.00.40.86.00.00.00.ff.00.ff.ff.00.00.00.00.00.00.00.00.05.01.00.00.00.00.05.dc.03.04.40.c0.ff.ff.ff.ff.ff.ff.ff.ff.00.00.00.00.ae.f0.00.00.00.00.00.00.00.00.00.00.00.00.00.00.01.01.ae.01.02.03.04.05,pause)
@@ -5953,7 +5965,7 @@ OVN_CLEANUP([hv])
AT_CLEANUP
-AT_SETUP([ovn -- DSCP marking check])
+AT_SETUP([ovn -- DSCP marking and meter check])
AT_KEYWORDS([ovn])
ovn_start
@@ -6023,10 +6035,32 @@ check_tos 0
qos_id=$(ovn-nbctl --wait=hv -- --id=@lp1-qos create QoS priority=100 action=dscp=48 match="inport\=\=\"lp1\"" direction="from-lport" -- set Logical_Switch lsw0 qos_rules=@lp1-qos)
check_tos 48
+# check at hv without qos meter
+AT_CHECK([as hv ovs-ofctl dump-flows br-int -O OpenFlow13 | grep meter | wc -l], [0], [0
+])
+
+# Update the meter rate
+ovn-nbctl --wait=hv set QoS $qos_id bandwidth=rate=100
+
+# check at hv with a qos meter table
+AT_CHECK([as hv ovs-ofctl dump-meters br-int -O OpenFlow13 | grep rate=100 | wc -l], [0], [1
+])
+AT_CHECK([as hv ovs-ofctl dump-flows br-int -O OpenFlow13 | grep meter | wc -l], [0], [1
+])
+
# Update the DSCP marking
ovn-nbctl --wait=hv set QoS $qos_id action=dscp=63
check_tos 63
+# Update the meter rate
+ovn-nbctl --wait=hv set QoS $qos_id bandwidth=rate=4294967295,burst=4294967295
+
+# check at hv with a qos meter table
+AT_CHECK([as hv ovs-ofctl dump-meters br-int -O OpenFlow13 | grep burst_size=4294967295 | wc -l], [0], [1
+])
+AT_CHECK([as hv ovs-ofctl dump-flows br-int -O OpenFlow13 | grep meter | wc -l], [0], [1
+])
+
ovn-nbctl --wait=hv set QoS $qos_id match="outport\=\=\"lp2\"" direction="to-lport"
check_tos 63
@@ -6034,6 +6068,10 @@ check_tos 63
ovn-nbctl --wait=hv clear Logical_Switch lsw0 qos_rules
check_tos 0
+# check at hv without qos meter
+AT_CHECK([as hv ovs-ofctl dump-flows br-int -O OpenFlow13 | grep meter | wc -l], [0], [0
+])
+
OVN_CLEANUP([hv])
AT_CLEANUP
@@ -8502,9 +8540,9 @@ AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=32 | grep active_backup | gre
sleep 3 # let BFD sessions settle so we get the right flows on the right chassis
# make sure that flows for handling the outside router port reside on gw1
-AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
+AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
]])
-AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
+AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
]])
# make sure ARP responder flows for outside router port reside on gw1 too
@@ -8594,9 +8632,9 @@ AT_CHECK([ovs-vsctl --bare --columns bfd find Interface name=ovn-hv1-0],[0],
sleep 3 # let BFD sessions settle so we get the right flows on the right chassis
# make sure that flows for handling the outside router port reside on gw2 now
-AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
+AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
]])
-AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
+AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
]])
# disconnect GW2 from the network, GW1 should take over
@@ -8608,9 +8646,9 @@ sleep 4
bfd_dump
# make sure that flows for handling the outside router port reside on gw2 now
-AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
+AT_CHECK([as gw1 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[1
]])
-AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=23 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
+AT_CHECK([as gw2 ovs-ofctl dump-flows br-int table=24 | grep 00:00:02:01:02:04 | wc -l], [0], [[0
]])
# check that the chassis redirect port has been reclaimed by the gw1 chassis
diff --git a/tests/test-ovn.c b/tests/test-ovn.c
index 4f65ee9d1..997e778f6 100644
--- a/tests/test-ovn.c
+++ b/tests/test-ovn.c
@@ -1211,6 +1211,10 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
struct ovn_extend_table group_table;
ovn_extend_table_init(&group_table);
+ /* Initialize meter ids for QoS. */
+ struct ovn_extend_table meter_table;
+ ovn_extend_table_init(&meter_table);
+
simap_init(&ports);
simap_put(&ports, "eth0", 5);
simap_put(&ports, "eth1", 6);
@@ -1250,6 +1254,7 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
.aux = &ports,
.is_switch = true,
.group_table = &group_table,
+ .meter_table = &meter_table,
.pipeline = OVNACT_P_INGRESS,
.ingress_ptable = 8,