summaryrefslogtreecommitdiff
path: root/ovn
diff options
context:
space:
mode:
authorGurucharan Shetty <guru@ovn.org>2016-07-03 05:31:37 -0700
committerGurucharan Shetty <guru@ovn.org>2016-07-03 15:52:51 -0700
commit467085fd7e316bb88cca83932f854e59fb01e9b5 (patch)
treea8b97bbbf33a61948b1de59fa4fc9fd321f5c056 /ovn
parent52c0fc3921489c13a4bab87e36cd37bbd747e41b (diff)
downloadopenvswitch-467085fd7e316bb88cca83932f854e59fb01e9b5.tar.gz
ovn-controller: Add support for load balancing.
ovn-controller now supports 2 new logical actions. 1. ct_lb; Sends the packet through the conntrack zone to NAT packets. Packets that are part of established connection will automatically get NATed based on the NAT arguments supplied to conntrack when the first packet was committed. 2. ct_lb(192.168.1.2, 192.168.1.3); ct_lb(192.168.1.2:80, 192.168.1.3:80); Creates an OpenFlow group with multiple buckets and equal weights that changes the destination IP address (and port number) of the packet statefully to one of the options provided inside the parenthesis. Signed-off-by: Gurucharan Shetty <guru@ovn.org> Signed-off-by: Ben Pfaff <blp@ovn.org>
Diffstat (limited to 'ovn')
-rw-r--r--ovn/controller/lflow.c9
-rw-r--r--ovn/controller/lflow.h2
-rw-r--r--ovn/controller/ofctrl.c163
-rw-r--r--ovn/controller/ofctrl.h3
-rw-r--r--ovn/controller/ovn-controller.c25
-rw-r--r--ovn/lib/actions.c152
-rw-r--r--ovn/lib/actions.h21
-rw-r--r--ovn/ovn-sb.xml27
8 files changed, 394 insertions, 8 deletions
diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
index c7a38e773..05e1eaf79 100644
--- a/ovn/controller/lflow.c
+++ b/ovn/controller/lflow.c
@@ -319,6 +319,7 @@ static void consider_logical_flow(const struct lport_index *lports,
const struct sbrec_logical_flow *lflow,
const struct hmap *local_datapaths,
const struct hmap *patched_datapaths,
+ struct group_table *group_table,
const struct simap *ct_zones,
struct hmap *dhcp_opts_p,
uint32_t *conj_id_ofs_p,
@@ -359,6 +360,7 @@ add_logical_flows(struct controller_ctx *ctx, const struct lport_index *lports,
const struct mcgroup_index *mcgroups,
const struct hmap *local_datapaths,
const struct hmap *patched_datapaths,
+ struct group_table *group_table,
const struct simap *ct_zones, struct hmap *flow_table)
{
uint32_t conj_id_ofs = 1;
@@ -373,7 +375,7 @@ add_logical_flows(struct controller_ctx *ctx, const struct lport_index *lports,
const struct sbrec_logical_flow *lflow;
SBREC_LOGICAL_FLOW_FOR_EACH (lflow, ctx->ovnsb_idl) {
consider_logical_flow(lports, mcgroups, lflow, local_datapaths,
- patched_datapaths, ct_zones,
+ patched_datapaths, group_table, ct_zones,
&dhcp_opts, &conj_id_ofs, flow_table);
}
@@ -386,6 +388,7 @@ consider_logical_flow(const struct lport_index *lports,
const struct sbrec_logical_flow *lflow,
const struct hmap *local_datapaths,
const struct hmap *patched_datapaths,
+ struct group_table *group_table,
const struct simap *ct_zones,
struct hmap *dhcp_opts_p,
uint32_t *conj_id_ofs_p,
@@ -464,6 +467,7 @@ consider_logical_flow(const struct lport_index *lports,
.lookup_port = lookup_port_cb,
.aux = &aux,
.ct_zones = ct_zones,
+ .group_table = group_table,
.n_tables = LOG_PIPELINE_LEN,
.first_ptable = first_ptable,
@@ -622,11 +626,12 @@ lflow_run(struct controller_ctx *ctx, const struct lport_index *lports,
const struct mcgroup_index *mcgroups,
const struct hmap *local_datapaths,
const struct hmap *patched_datapaths,
+ struct group_table *group_table,
const struct simap *ct_zones, struct hmap *flow_table)
{
update_address_sets(ctx);
add_logical_flows(ctx, lports, mcgroups, local_datapaths,
- patched_datapaths, ct_zones, flow_table);
+ patched_datapaths, group_table, ct_zones, flow_table);
add_neighbor_flows(ctx, lports, flow_table);
}
diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
index a3fc50c13..e96a24b3b 100644
--- a/ovn/controller/lflow.h
+++ b/ovn/controller/lflow.h
@@ -36,6 +36,7 @@
#include <stdint.h>
struct controller_ctx;
+struct group_table;
struct hmap;
struct lport_index;
struct mcgroup_index;
@@ -63,6 +64,7 @@ void lflow_run(struct controller_ctx *, const struct lport_index *,
const struct mcgroup_index *,
const struct hmap *local_datapaths,
const struct hmap *patched_datapaths,
+ struct group_table *group_table,
const struct simap *ct_zones,
struct hmap *flow_table);
void lflow_destroy(void);
diff --git a/ovn/controller/ofctrl.c b/ovn/controller/ofctrl.c
index f537bc008..4c410da1c 100644
--- a/ovn/controller/ofctrl.c
+++ b/ovn/controller/ofctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015 Nicira, Inc.
+/* Copyright (c) 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,6 +14,7 @@
*/
#include <config.h>
+#include "bitmap.h"
#include "byte-order.h"
#include "dirs.h"
#include "hash.h"
@@ -24,11 +25,13 @@
#include "openvswitch/match.h"
#include "openvswitch/ofp-actions.h"
#include "openvswitch/ofp-msgs.h"
+#include "openvswitch/ofp-parse.h"
#include "openvswitch/ofp-print.h"
#include "openvswitch/ofp-util.h"
#include "openvswitch/ofpbuf.h"
#include "openvswitch/vlog.h"
#include "ovn-controller.h"
+#include "ovn/lib/actions.h"
#include "physical.h"
#include "rconn.h"
#include "socket-util.h"
@@ -63,6 +66,8 @@ static void queue_flow_mod(struct ofputil_flow_mod *);
/* OpenFlow connection to the switch. */
static struct rconn *swconn;
+static void queue_group_mod(struct ofputil_group_mod *);
+
/* Last seen sequence number for 'swconn'. When this differs from
* rconn_get_connection_seqno(rconn), 'swconn' has reconnected. */
static unsigned int seqno;
@@ -95,6 +100,9 @@ static struct rconn_packet_counter *tx_counter;
* installed in the switch. */
static struct hmap installed_flows;
+/* A reference to the group_table. */
+static struct group_table *groups;
+
/* MFF_* field ID for our Geneve option. In S_TLV_TABLE_MOD_SENT, this is
* the option we requested (we don't know whether we obtained it yet). In
* S_CLEAR_FLOWS or S_UPDATE_FLOWS, this is really the option we have. */
@@ -103,6 +111,9 @@ static enum mf_field_id mff_ovn_geneve;
static void ovn_flow_table_clear(struct hmap *flow_table);
static void ovn_flow_table_destroy(struct hmap *flow_table);
+static void ovn_group_table_clear(struct group_table *group_table,
+ bool existing);
+
static void ofctrl_recv(const struct ofp_header *, enum ofptype);
void
@@ -312,9 +323,23 @@ run_S_CLEAR_FLOWS(void)
queue_flow_mod(&fm);
VLOG_DBG("clearing all flows");
+ struct ofputil_group_mod gm;
+ memset(&gm, 0, sizeof gm);
+ gm.command = OFPGC11_DELETE;
+ gm.group_id = OFPG_ALL;
+ gm.command_bucket_id = OFPG15_BUCKET_ALL;
+ ovs_list_init(&gm.buckets);
+ queue_group_mod(&gm);
+ ofputil_bucket_list_destroy(&gm.buckets);
+
/* Clear installed_flows, to match the state of the switch. */
ovn_flow_table_clear(&installed_flows);
+ /* Clear existing groups, to match the state of the switch. */
+ if (groups) {
+ ovn_group_table_clear(groups, true);
+ }
+
state = S_UPDATE_FLOWS;
}
@@ -591,16 +616,70 @@ queue_flow_mod(struct ofputil_flow_mod *fm)
queue_msg(ofputil_encode_flow_mod(fm, OFPUTIL_P_OF13_OXM));
}
+
+/* group_table. */
+
+/* Finds and returns a group_info in 'existing_groups' whose key is identical
+ * to 'target''s key, or NULL if there is none. */
+static struct group_info *
+ovn_group_lookup(struct hmap *exisiting_groups,
+ const struct group_info *target)
+{
+ struct group_info *e;
+
+ HMAP_FOR_EACH_WITH_HASH(e, hmap_node, target->hmap_node.hash,
+ exisiting_groups) {
+ if (e->group_id == target->group_id) {
+ return e;
+ }
+ }
+ return NULL;
+}
+
+/* Clear either desired_groups or existing_groups in group_table. */
+static void
+ovn_group_table_clear(struct group_table *group_table, bool existing)
+{
+ struct group_info *g, *next;
+ struct hmap *target_group = existing
+ ? &group_table->existing_groups
+ : &group_table->desired_groups;
+
+ HMAP_FOR_EACH_SAFE (g, next, hmap_node, target_group) {
+ hmap_remove(target_group, &g->hmap_node);
+ bitmap_set0(group_table->group_ids, g->group_id);
+ ds_destroy(&g->group);
+ free(g);
+ }
+}
+
+static void
+queue_group_mod(struct ofputil_group_mod *gm)
+{
+ queue_msg(ofputil_encode_group_mod(OFP13_VERSION, gm));
+}
+
+
/* Replaces the flow table on the switch, if possible, by the flows in
* 'flow_table', which should have been added with ofctrl_add_flow().
* Regardless of whether the flow table is updated, this deletes all of the
* flows from 'flow_table' and frees them. (The hmap itself isn't
* destroyed.)
*
+ * Replaces the group table on the switch, if possible, by the groups in
+ * 'group_table->desired_groups'. Regardless of whether the group table
+ * is updated, this deletes all the groups from the
+ * 'group_table->desired_groups' and frees them. (The hmap itself isn't
+ * destroyed.)
+ *
* This called be called be ofctrl_run() within the main loop. */
void
-ofctrl_put(struct hmap *flow_table)
+ofctrl_put(struct hmap *flow_table, struct group_table *group_table)
{
+ if (!groups) {
+ groups = group_table;
+ }
+
/* The flow table can be updated if the connection to the switch is up and
* in the correct state and not backlogged with existing flow_mods. (Our
* criteria for being backlogged appear very conservative, but the socket
@@ -610,9 +689,39 @@ ofctrl_put(struct hmap *flow_table)
if (state != S_UPDATE_FLOWS
|| rconn_packet_counter_n_packets(tx_counter)) {
ovn_flow_table_clear(flow_table);
+ ovn_group_table_clear(group_table, false);
return;
}
+ /* Iterate through all the desired groups. If there are new ones,
+ * add them to the switch. */
+ struct group_info *desired;
+ HMAP_FOR_EACH(desired, hmap_node, &group_table->desired_groups) {
+ if (!ovn_group_lookup(&group_table->existing_groups, desired)) {
+ /* Create and install new group. */
+ struct ofputil_group_mod gm;
+ enum ofputil_protocol usable_protocols;
+ char *error;
+ struct ds group_string = DS_EMPTY_INITIALIZER;
+ ds_put_format(&group_string, "group_id=%u,%s",
+ desired->group_id, ds_cstr(&desired->group));
+
+ error = parse_ofp_group_mod_str(&gm, OFPGC11_ADD,
+ ds_cstr(&group_string),
+ &usable_protocols);
+ if (!error) {
+ queue_group_mod(&gm);
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_ERR_RL(&rl, "new group %s %s", error,
+ ds_cstr(&group_string));
+ free(error);
+ }
+ ds_destroy(&group_string);
+ ofputil_bucket_list_destroy(&gm.buckets);
+ }
+ }
+
/* Iterate through all of the installed flows. If any of them are no
* longer desired, delete them; if any of them should have different
* actions, update them. */
@@ -682,4 +791,54 @@ ofctrl_put(struct hmap *flow_table)
hmap_remove(flow_table, &d->hmap_node);
hmap_insert(&installed_flows, &d->hmap_node, d->hmap_node.hash);
}
+
+ /* Iterate through the installed groups from previous runs. If they
+ * are not needed delete them. */
+ struct group_info *installed, *next_group;
+ HMAP_FOR_EACH_SAFE(installed, next_group, hmap_node,
+ &group_table->existing_groups) {
+ if (!ovn_group_lookup(&group_table->desired_groups, installed)) {
+ /* Delete the group. */
+ struct ofputil_group_mod gm;
+ enum ofputil_protocol usable_protocols;
+ char *error;
+ struct ds group_string = DS_EMPTY_INITIALIZER;
+ ds_put_format(&group_string, "group_id=%u", installed->group_id);
+
+ error = parse_ofp_group_mod_str(&gm, OFPGC11_DELETE,
+ ds_cstr(&group_string),
+ &usable_protocols);
+ if (!error) {
+ queue_group_mod(&gm);
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_ERR_RL(&rl, "Error deleting group %d: %s",
+ installed->group_id, error);
+ free(error);
+ }
+ ds_destroy(&group_string);
+ ofputil_bucket_list_destroy(&gm.buckets);
+
+ /* Remove 'installed' from 'group_table->existing_groups' */
+ hmap_remove(&group_table->existing_groups, &installed->hmap_node);
+ ds_destroy(&installed->group);
+
+ /* Dealloc group_id. */
+ bitmap_set0(group_table->group_ids, installed->group_id);
+ free(installed);
+ }
+ }
+
+ /* Move the contents of desired_groups to existing_groups. */
+ HMAP_FOR_EACH_SAFE(desired, next_group, hmap_node,
+ &group_table->desired_groups) {
+ hmap_remove(&group_table->desired_groups, &desired->hmap_node);
+ if (!ovn_group_lookup(&group_table->existing_groups, desired)) {
+ hmap_insert(&group_table->existing_groups, &desired->hmap_node,
+ desired->hmap_node.hash);
+ } else {
+ ds_destroy(&desired->group);
+ free(desired);
+ }
+ }
}
diff --git a/ovn/controller/ofctrl.h b/ovn/controller/ofctrl.h
index bc9cfbab1..bf5dfd5c8 100644
--- a/ovn/controller/ofctrl.h
+++ b/ovn/controller/ofctrl.h
@@ -26,11 +26,12 @@ struct hmap;
struct match;
struct ofpbuf;
struct ovsrec_bridge;
+struct group_table;
/* Interface for OVN main loop. */
void ofctrl_init(void);
enum mf_field_id ofctrl_run(const struct ovsrec_bridge *br_int);
-void ofctrl_put(struct hmap *flows);
+void ofctrl_put(struct hmap *flows, struct group_table *group_table);
void ofctrl_wait(void);
void ofctrl_destroy(void);
diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
index 3257eab48..8471f64e3 100644
--- a/ovn/controller/ovn-controller.c
+++ b/ovn/controller/ovn-controller.c
@@ -39,6 +39,7 @@
#include "ofctrl.h"
#include "openvswitch/vconn.h"
#include "openvswitch/vlog.h"
+#include "ovn/lib/actions.h"
#include "ovn/lib/ovn-sb-idl.h"
#include "ovn/lib/ovn-util.h"
#include "patch.h"
@@ -327,6 +328,13 @@ main(int argc, char *argv[])
}
unixctl_command_register("exit", "", 0, 0, ovn_controller_exit, &exiting);
+ /* Initialize group ids for loadbalancing. */
+ struct group_table group_table;
+ group_table.group_ids = bitmap_allocate(MAX_OVN_GROUPS);
+ bitmap_set1(group_table.group_ids, 0); /* Group id 0 is invalid. */
+ hmap_init(&group_table.desired_groups);
+ hmap_init(&group_table.existing_groups);
+
daemonize_complete();
ovsrec_init();
@@ -435,13 +443,14 @@ main(int argc, char *argv[])
struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
lflow_run(&ctx, &lports, &mcgroups, &local_datapaths,
- &patched_datapaths, &ct_zones, &flow_table);
+ &patched_datapaths, &group_table, &ct_zones,
+ &flow_table);
if (chassis_id) {
physical_run(&ctx, mff_ovn_geneve,
br_int, chassis_id, &ct_zones, &flow_table,
&local_datapaths, &patched_datapaths);
}
- ofctrl_put(&flow_table);
+ ofctrl_put(&flow_table, &group_table);
hmap_destroy(&flow_table);
}
@@ -501,6 +510,18 @@ main(int argc, char *argv[])
simap_destroy(&ct_zones);
+ bitmap_free(group_table.group_ids);
+ hmap_destroy(&group_table.desired_groups);
+
+ struct group_info *installed, *next_group;
+ HMAP_FOR_EACH_SAFE(installed, next_group, hmap_node,
+ &group_table.existing_groups) {
+ hmap_remove(&group_table.existing_groups, &installed->hmap_node);
+ ds_destroy(&installed->group);
+ free(installed);
+ }
+ hmap_destroy(&group_table.existing_groups);
+
ovsdb_idl_loop_destroy(&ovs_idl_loop);
ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
index c757747c8..3d10d611e 100644
--- a/ovn/lib/actions.c
+++ b/ovn/lib/actions.c
@@ -18,10 +18,13 @@
#include <stdarg.h>
#include <stdbool.h>
#include "actions.h"
+#include "bitmap.h"
#include "byte-order.h"
#include "compiler.h"
#include "ovn-dhcp.h"
#include "expr.h"
+#include "hash.h"
+#include "hmap.h"
#include "lex.h"
#include "logical-fields.h"
#include "nx-match.h"
@@ -624,6 +627,153 @@ parse_put_dhcp_opts_action(struct action_context *ctx,
finish_controller_op(ctx->ofpacts, oc_offset);
}
+static bool
+action_parse_port(struct action_context *ctx, uint16_t *port)
+{
+ if (lexer_is_int(ctx->lexer)) {
+ int value = ntohll(ctx->lexer->token.value.integer);
+ if (value <= UINT16_MAX) {
+ *port = value;
+ lexer_get(ctx->lexer);
+ return true;
+ }
+ }
+ action_syntax_error(ctx, "expecting port number");
+ return false;
+}
+
+static void
+parse_ct_lb_action(struct action_context *ctx)
+{
+ uint8_t recirc_table;
+ if (ctx->ap->cur_ltable < ctx->ap->n_tables) {
+ recirc_table = ctx->ap->first_ptable + ctx->ap->cur_ltable + 1;
+ } else {
+ action_error(ctx, "\"ct_lb\" action not allowed in last table.");
+ return;
+ }
+
+ if (!lexer_match(ctx->lexer, LEX_T_LPAREN)) {
+ /* ct_lb without parentheses means that this is an established
+ * connection and we just need to do a NAT. */
+ const size_t ct_offset = ctx->ofpacts->size;
+ ofpbuf_pull(ctx->ofpacts, ct_offset);
+
+ struct ofpact_conntrack *ct = ofpact_put_CT(ctx->ofpacts);
+ struct ofpact_nat *nat;
+ size_t nat_offset;
+ ct->zone_src.field = mf_from_id(MFF_LOG_CT_ZONE);
+ ct->zone_src.ofs = 0;
+ ct->zone_src.n_bits = 16;
+ ct->flags = 0;
+ ct->recirc_table = recirc_table;
+ ct->alg = 0;
+
+ add_prerequisite(ctx, "ip");
+
+ nat_offset = ctx->ofpacts->size;
+ ofpbuf_pull(ctx->ofpacts, nat_offset);
+
+ nat = ofpact_put_NAT(ctx->ofpacts);
+ nat->flags = 0;
+ nat->range_af = AF_UNSPEC;
+
+ ctx->ofpacts->header = ofpbuf_push_uninit(ctx->ofpacts, nat_offset);
+ ct = ctx->ofpacts->header;
+ ofpact_finish(ctx->ofpacts, &ct->ofpact);
+ ofpbuf_push_uninit(ctx->ofpacts, ct_offset);
+ return;
+ }
+
+ uint32_t group_id = 0, bucket_id = 0, hash;
+ struct group_info *group_info;
+ struct ofpact_group *og;
+
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ ds_put_format(&ds, "type=select");
+
+ BUILD_ASSERT(MFF_LOG_CT_ZONE >= MFF_REG0);
+ BUILD_ASSERT(MFF_LOG_CT_ZONE < MFF_REG0 + FLOW_N_REGS);
+ do {
+ if (ctx->lexer->token.type != LEX_T_INTEGER
+ || mf_subvalue_width(&ctx->lexer->token.value) > 32) {
+ action_syntax_error(ctx, "expecting IPv4 address");
+ ds_destroy(&ds);
+ return;
+ }
+ ovs_be32 ip = ctx->lexer->token.value.ipv4;
+ lexer_get(ctx->lexer);
+
+ uint16_t port = 0;
+ if (lexer_match(ctx->lexer, LEX_T_COLON)
+ && !action_parse_port(ctx, &port)) {
+ ds_destroy(&ds);
+ return;
+ }
+
+ bucket_id++;
+ ds_put_format(&ds, ",bucket=bucket_id=%u,weight:100,actions="
+ "ct(nat(dst="IP_FMT, bucket_id, IP_ARGS(ip));
+ if (port) {
+ ds_put_format(&ds, ":%"PRIu16, port);
+ }
+ ds_put_format(&ds, "),commit,table=%d,zone=NXM_NX_REG%d[0..15])",
+ recirc_table, MFF_LOG_CT_ZONE - MFF_REG0);
+
+ lexer_match(ctx->lexer, LEX_T_COMMA);
+ } while (!lexer_match(ctx->lexer, LEX_T_RPAREN));
+ add_prerequisite(ctx, "ip");
+
+ hash = hash_string(ds_cstr(&ds), 0);
+
+ /* Check whether we have non installed but allocated group_id. */
+ HMAP_FOR_EACH_WITH_HASH (group_info, hmap_node, hash,
+ &ctx->ap->group_table->desired_groups) {
+ if (!strcmp(ds_cstr(&group_info->group), ds_cstr(&ds))) {
+ group_id = group_info->group_id;
+ break;
+ }
+ }
+
+ if (!group_id) {
+ /* Check whether we already have an installed entry for this
+ * combination. */
+ HMAP_FOR_EACH_WITH_HASH (group_info, hmap_node, hash,
+ &ctx->ap->group_table->existing_groups) {
+ if (!strcmp(ds_cstr(&group_info->group), ds_cstr(&ds))) {
+ group_id = group_info->group_id;
+ }
+ }
+
+ if (!group_id) {
+ /* Reserve a new group_id. */
+ group_id = bitmap_scan(ctx->ap->group_table->group_ids, 0, 1,
+ MAX_OVN_GROUPS + 1);
+ }
+
+ if (group_id == MAX_OVN_GROUPS + 1) {
+ ds_destroy(&ds);
+ action_error(ctx, "out of group ids.");
+ return;
+ }
+ bitmap_set1(ctx->ap->group_table->group_ids, group_id);
+
+ group_info = xmalloc(sizeof *group_info);
+ group_info->group = ds;
+ group_info->group_id = group_id;
+ group_info->hmap_node.hash = hash;
+
+ hmap_insert(&ctx->ap->group_table->desired_groups,
+ &group_info->hmap_node, group_info->hmap_node.hash);
+ } else {
+ ds_destroy(&ds);
+ }
+
+ /* Create an action to set the group. */
+ og = ofpact_put_GROUP(ctx->ofpacts);
+ og->group_id = group_id;
+}
+
static void
emit_ct(struct action_context *ctx, bool recirc_next, bool commit,
int *ct_mark, int *ct_mark_mask,
@@ -908,6 +1058,8 @@ parse_action(struct action_context *ctx)
parse_ct_nat(ctx, false);
} else if (lexer_match_id(ctx->lexer, "ct_snat")) {
parse_ct_nat(ctx, true);
+ } else if (lexer_match_id(ctx->lexer, "ct_lb")) {
+ parse_ct_lb_action(ctx);
} else if (lexer_match_id(ctx->lexer, "arp")) {
parse_nested_action(ctx, ACTION_OPCODE_ARP, "ip4");
} else if (lexer_match_id(ctx->lexer, "na")) {
diff --git a/ovn/lib/actions.h b/ovn/lib/actions.h
index 6b9cd248e..48f014089 100644
--- a/ovn/lib/actions.h
+++ b/ovn/lib/actions.h
@@ -20,6 +20,8 @@
#include <stdbool.h>
#include <stdint.h>
#include "compiler.h"
+#include "hmap.h"
+#include "openvswitch/dynamic-string.h"
#include "util.h"
struct expr;
@@ -28,6 +30,22 @@ struct ofpbuf;
struct shash;
struct simap;
+#define MAX_OVN_GROUPS 65535
+
+struct group_table {
+ unsigned long *group_ids; /* Used as a bitmap with value set
+ * for allocated group ids in either
+ * desired_groups or existing_groups. */
+ struct hmap desired_groups;
+ struct hmap existing_groups;
+};
+
+struct group_info {
+ struct hmap_node hmap_node;
+ struct ds group;
+ uint32_t group_id;
+};
+
enum action_opcode {
/* "arp { ...actions... }".
*
@@ -86,6 +104,9 @@ struct action_params {
/* A map from a port name to its connection tracking zone. */
const struct simap *ct_zones;
+ /* A struct to figure out the group_id for group actions. */
+ struct group_table *group_table;
+
/* OVN maps each logical flow table (ltable), one-to-one, onto a physical
* OpenFlow flow table (ptable). A number of parameters describe this
* mapping and data related to flow tables:
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index bb8552824..759513f84 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -1000,7 +1000,7 @@
<p>
<code>ct_dnat(<var>IP</var>)</code> sends the packet through the
DNAT zone to change the destination IP address of the packet to
- the one provided inside the parenthesis and commits the connection.
+ the one provided inside the parentheses and commits the connection.
The packet is then automatically sent to the next tables as if
followed by <code>next;</code> action. The next tables will see
the changes in the packet caused by the connection tracker.
@@ -1183,6 +1183,31 @@
</code>
</p>
</dd>
+
+ <dt><code>ct_lb;</code></dt>
+ <dt><code>ct_lb(</code><var>ip</var>[<code>:</code><var>port</var>]...<code>);</code></dt>
+ <dd>
+ <p>
+ With one or more arguments, <code>ct_lb</code> commits the packet
+ to the connection tracking table and DNATs the packet's destination
+ IP address (and port) to the IP address or addresses (and optional
+ ports) specified in the string. If multiple comma-separated IP
+ addresses are specified, each is given equal weight for picking the
+ DNAT address. Processing automatically moves on to the next table,
+ as if <code>next;</code> were specified, and later tables act on
+ the packet as modified by the connection tracker. Connection
+ tracking state is scoped by the logical port, so overlapping
+ addresses may be used.
+ </p>
+ <p>
+ Without arguments, <code>ct_lb</code> sends the packet to the
+ connection tracking table to NAT the packets. If the packet is
+ part of an established connection that was previously committed to
+ the connection tracker via <code>ct_lb(</code>...<code>)</code>, it
+ will automatically get DNATed to the same IP address as the first
+ packet in that connection.
+ </p>
+ </dd>
</dl>
<p>