summaryrefslogtreecommitdiff
path: root/ofproto
diff options
context:
space:
mode:
authorJustin Pettit <jpettit@ovn.org>2017-07-05 15:17:52 -0700
committerJustin Pettit <jpettit@ovn.org>2018-01-10 16:42:00 -0800
commitd39ec23de38464ee35b3098b9f6c5f06d5191015 (patch)
tree2f9d5eb33373d01dcd6d8c84107a7a588bd9e8b9 /ofproto
parentfcb9579be3c7717744e63a343a86a0dbcf0d3d78 (diff)
downloadopenvswitch-d39ec23de38464ee35b3098b9f6c5f06d5191015.tar.gz
ofproto-dpif: Don't slow-path controller actions.
Controller actions have become more commonly used for purposes other than just making forwarding decisions (e.g., packet logging). A packet that needs to be copied to the controller and forwarded would always be sent to ovs-vswitchd to be handled, which could negatively affect performance and cause heavier CPU utilization in ovs-vswitchd. This commit changes the behavior so that OpenFlow controller actions become userspace datapath actions while continuing to let packet forwarding and manipulation continue to be handled by the datapath directly. This patch still slow-paths controller actions with the "pause" flag set. A future patch will stop slow-pathing these pause actions as well. Signed-off-by: Justin Pettit <jpettit@ovn.org> Acked-by: Ben Pfaff <blp@ovn.org>
Diffstat (limited to 'ofproto')
-rw-r--r--ofproto/ofproto-dpif-rid.c10
-rw-r--r--ofproto/ofproto-dpif-rid.h4
-rw-r--r--ofproto/ofproto-dpif-upcall.c95
-rw-r--r--ofproto/ofproto-dpif-xlate.c297
-rw-r--r--ofproto/ofproto-unixctl.man6
5 files changed, 176 insertions, 236 deletions
diff --git a/ofproto/ofproto-dpif-rid.c b/ofproto/ofproto-dpif-rid.c
index fc5700489..83278d82b 100644
--- a/ofproto/ofproto-dpif-rid.c
+++ b/ofproto/ofproto-dpif-rid.c
@@ -142,6 +142,9 @@ frozen_state_hash(const struct frozen_state *state)
hash = hash_bytes64(ALIGNED_CAST(const uint64_t *, state->ofpacts),
state->ofpacts_len, hash);
}
+ if (state->userdata && state->userdata_len) {
+ hash = hash_bytes(state->userdata, state->userdata_len, hash);
+ }
return hash;
}
@@ -158,7 +161,8 @@ frozen_state_equal(const struct frozen_state *a, const struct frozen_state *b)
&& ofpacts_equal(a->ofpacts, a->ofpacts_len,
b->ofpacts, b->ofpacts_len)
&& ofpacts_equal(a->action_set, a->action_set_len,
- b->action_set, b->action_set_len));
+ b->action_set, b->action_set_len)
+ && !memcmp(a->userdata, b->userdata, a->userdata_len));
}
/* Lockless RCU protected lookup. If node is needed accross RCU quiescent
@@ -203,6 +207,9 @@ frozen_state_clone(struct frozen_state *new, const struct frozen_state *old)
new->action_set = (new->action_set_len
? xmemdup(new->action_set, new->action_set_len)
: NULL);
+ new->userdata = (new->userdata_len
+ ? xmemdup(new->userdata, new->userdata_len)
+ : NULL);
}
static void
@@ -211,6 +218,7 @@ frozen_state_free(struct frozen_state *state)
free(state->stack);
free(state->ofpacts);
free(state->action_set);
+ free(state->userdata);
}
/* Allocate a unique recirculation id for the given set of flow metadata.
diff --git a/ofproto/ofproto-dpif-rid.h b/ofproto/ofproto-dpif-rid.h
index 19fc27c7c..858b02e39 100644
--- a/ofproto/ofproto-dpif-rid.h
+++ b/ofproto/ofproto-dpif-rid.h
@@ -149,6 +149,10 @@ struct frozen_state {
size_t ofpacts_len; /* Size of 'ofpacts', in bytes. */
struct ofpact *action_set;
size_t action_set_len; /* Size of 'action_set', in bytes. */
+
+ /* User data for controller userspace cookie. */
+ uint8_t *userdata;
+ size_t userdata_len;
};
/* This maps a recirculation ID to saved state that flow translation can
diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index bd7dc9e7b..c7bfa472f 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -187,7 +187,8 @@ enum upcall_type {
SLOW_PATH_UPCALL, /* Slow path upcall. */
SFLOW_UPCALL, /* sFlow sample. */
FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
- IPFIX_UPCALL /* Per-bridge sampling. */
+ IPFIX_UPCALL, /* Per-bridge sampling. */
+ CONTROLLER_UPCALL /* Destined for the controller. */
};
enum reval_result {
@@ -1011,6 +1012,8 @@ classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata,
return FLOW_SAMPLE_UPCALL;
} else if (cookie->type == USER_ACTION_COOKIE_IPFIX) {
return IPFIX_UPCALL;
+ } else if (cookie->type == USER_ACTION_COOKIE_CONTROLLER) {
+ return CONTROLLER_UPCALL;
} else {
VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
" and size %"PRIuSIZE, cookie->type, userdata_len);
@@ -1024,8 +1027,8 @@ static void
compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
const struct flow *flow,
odp_port_t odp_in_port, ofp_port_t ofp_in_port,
- struct ofpbuf *buf, uint32_t slowpath_meter_id,
- uint32_t controller_meter_id, struct uuid *ofproto_uuid)
+ struct ofpbuf *buf, uint32_t meter_id,
+ struct uuid *ofproto_uuid)
{
struct user_action_cookie cookie;
odp_port_t port;
@@ -1043,9 +1046,6 @@ compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
size_t offset;
size_t ac_offset;
- uint32_t meter_id = xout->slow & SLOW_CONTROLLER ? controller_meter_id
- : slowpath_meter_id;
-
if (meter_id != UINT32_MAX) {
/* If slowpath meter is configured, generate clone(meter, userspace)
* action. */
@@ -1178,12 +1178,11 @@ upcall_xlate(struct udpif *udpif, struct upcall *upcall,
ofpbuf_use_const(&upcall->put_actions,
odp_actions->data, odp_actions->size);
} else {
- uint32_t smid = upcall->ofproto->up.slowpath_meter_id;
- uint32_t cmid = upcall->ofproto->up.controller_meter_id;
/* upcall->put_actions already initialized by upcall_receive(). */
compose_slow_path(udpif, &upcall->xout, upcall->flow,
upcall->flow->in_port.odp_port, upcall->ofp_in_port,
- &upcall->put_actions, smid, cmid,
+ &upcall->put_actions,
+ upcall->ofproto->up.slowpath_meter_id,
&upcall->ofproto->uuid);
}
@@ -1340,6 +1339,7 @@ dpif_read_actions(struct udpif *udpif, struct upcall *upcall,
case BAD_UPCALL:
case MISS_UPCALL:
case SLOW_PATH_UPCALL:
+ case CONTROLLER_UPCALL:
default:
break;
}
@@ -1409,6 +1409,68 @@ process_upcall(struct udpif *udpif, struct upcall *upcall,
}
break;
+ case CONTROLLER_UPCALL:
+ {
+ struct user_action_cookie *cookie = &upcall->cookie;
+
+ if (cookie->controller.dont_send) {
+ return 0;
+ }
+
+ uint32_t recirc_id = cookie->controller.recirc_id;
+ if (!recirc_id) {
+ break;
+ }
+
+ const struct recirc_id_node *recirc_node
+ = recirc_id_node_find(recirc_id);
+ if (!recirc_node) {
+ break;
+ }
+
+ struct ofproto_async_msg *am = xmalloc(sizeof *am);
+ *am = (struct ofproto_async_msg) {
+ .controller_id = cookie->controller.controller_id,
+ .oam = OAM_PACKET_IN,
+ .pin = {
+ .up = {
+ .base = {
+ .packet = xmemdup(dp_packet_data(packet),
+ dp_packet_size(packet)),
+ .packet_len = dp_packet_size(packet),
+ .reason = cookie->controller.reason,
+ .table_id = recirc_node->state.table_id,
+ .cookie = get_32aligned_be64(
+ &cookie->controller.rule_cookie),
+ .userdata = (recirc_node->state.userdata_len
+ ? xmemdup(recirc_node->state.userdata,
+ recirc_node->state.userdata_len)
+ : NULL),
+ .userdata_len = recirc_node->state.userdata_len,
+ },
+ },
+ .max_len = cookie->controller.max_len,
+ },
+ };
+
+ /* We don't want to use the upcall 'flow', since it may be
+ * more specific than the point at which the "controller"
+ * action was specified. */
+ struct flow frozen_flow;
+
+ frozen_flow = *flow;
+ if (!recirc_node->state.conntracked) {
+ flow_clear_conntrack(&frozen_flow);
+ }
+
+ frozen_metadata_to_flow(&recirc_node->state.metadata,
+ &frozen_flow);
+ flow_get_metadata(&frozen_flow, &am->pin.up.base.flow_metadata);
+
+ ofproto_dpif_send_async_msg(upcall->ofproto, am);
+ }
+ break;
+
case BAD_UPCALL:
break;
}
@@ -1430,11 +1492,11 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
* translation is what processes received packets for these
* protocols.
*
- * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
- * controller.
- *
* - For SLOW_ACTION, translation executes the actions directly.
*
+ * - For SLOW_PAUSE, translation needs to handle a pause request
+ * from the controller.
+ *
* The loop fills 'ops' with an array of operations to execute in the
* datapath. */
n_ops = 0;
@@ -2042,15 +2104,12 @@ revalidate_ukey__(struct udpif *udpif, const struct udpif_key *ukey,
struct ofproto_dpif *ofproto;
ofp_port_t ofp_in_port;
- ofproto = xlate_lookup_ofproto(udpif->backer, &ctx.flow,
- &ofp_in_port);
- uint32_t smid = ofproto ? ofproto->up.slowpath_meter_id : UINT32_MAX;
- uint32_t cmid = ofproto ? ofproto->up.controller_meter_id : UINT32_MAX;
+ ofproto = xlate_lookup_ofproto(udpif->backer, &ctx.flow, &ofp_in_port);
ofpbuf_clear(odp_actions);
compose_slow_path(udpif, xoutp, &ctx.flow, ctx.flow.in_port.odp_port,
- ofp_in_port, odp_actions, smid, cmid,
- &ofproto->uuid);
+ ofp_in_port, odp_actions,
+ ofproto->up.slowpath_meter_id, &ofproto->uuid);
}
if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &ctx.flow)
diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
index 625063a31..f1a9e37d5 100644
--- a/ofproto/ofproto-dpif-xlate.c
+++ b/ofproto/ofproto-dpif-xlate.c
@@ -4355,174 +4355,13 @@ flood_packets(struct xlate_ctx *ctx, bool all, bool is_last_action)
ctx->nf_output_iface = NF_OUT_FLOOD;
}
-/* Copy and reformat a partially xlated odp actions to a new
- * odp actions list in 'b', so that the new actions list
- * can be executed by odp_execute_actions.
- *
- * When xlate using nested odp actions, such as sample and clone,
- * the nested action created by nl_msg_start_nested() may not
- * have been properly closed yet, thus can not be executed
- * directly.
- *
- * Since unclosed nested action has to be last action, it can be
- * fixed by skipping the outer header, and treating the actions within
- * as if they are outside the nested attribute since the effect
- * of executing them on packet is the same.
- *
- * As an optimization, a fully closed 'sample' or 'clone' action
- * is skipped since their execution has no effect to the packet.
- *
- * Returns true if success. 'b' contains the new actions list.
- * The caller is responsible for disposing 'b'.
- *
- * Returns false if error, 'b' has been freed already. */
-static bool
-xlate_fixup_actions(struct ofpbuf *b, const struct nlattr *actions,
- size_t actions_len)
-{
- const struct nlattr *a;
- unsigned int left;
-
- NL_ATTR_FOR_EACH_UNSAFE (a, left, actions, actions_len) {
- int type = nl_attr_type(a);
-
- switch ((enum ovs_action_attr) type) {
- case OVS_ACTION_ATTR_HASH:
- case OVS_ACTION_ATTR_PUSH_VLAN:
- case OVS_ACTION_ATTR_POP_VLAN:
- case OVS_ACTION_ATTR_PUSH_MPLS:
- case OVS_ACTION_ATTR_POP_MPLS:
- case OVS_ACTION_ATTR_SET:
- case OVS_ACTION_ATTR_SET_MASKED:
- case OVS_ACTION_ATTR_TRUNC:
- case OVS_ACTION_ATTR_OUTPUT:
- case OVS_ACTION_ATTR_TUNNEL_PUSH:
- case OVS_ACTION_ATTR_TUNNEL_POP:
- case OVS_ACTION_ATTR_USERSPACE:
- case OVS_ACTION_ATTR_RECIRC:
- case OVS_ACTION_ATTR_CT:
- case OVS_ACTION_ATTR_PUSH_ETH:
- case OVS_ACTION_ATTR_POP_ETH:
- case OVS_ACTION_ATTR_PUSH_NSH:
- case OVS_ACTION_ATTR_POP_NSH:
- case OVS_ACTION_ATTR_METER:
- ofpbuf_put(b, a, nl_attr_len_pad(a, left));
- break;
-
- case OVS_ACTION_ATTR_CLONE:
- /* If the clone action has been fully xlated, it can
- * be skipped, since any actions executed within clone
- * do not affect the current packet.
- *
- * When xlating actions within clone, the clone action,
- * because it is an nested netlink attribute, do not have
- * a valid 'nla_len'; it will be zero instead. Skip
- * the clone header to find the start of the actions
- * enclosed. Treat those actions as if they are written
- * outside of clone. */
- if (!a->nla_len) {
- bool ok;
- if (left < NLA_HDRLEN) {
- goto error;
- }
-
- ok = xlate_fixup_actions(b, nl_attr_get_unspec(a, 0),
- left - NLA_HDRLEN);
- if (!ok) {
- goto error;
- }
- }
- break;
-
- case OVS_ACTION_ATTR_SAMPLE:
- if (!a->nla_len) {
- bool ok;
- if (left < NLA_HDRLEN) {
- goto error;
- }
- const struct nlattr *attr = nl_attr_get_unspec(a, 0);
- left -= NLA_HDRLEN;
-
- while (left > 0 &&
- nl_attr_type(attr) != OVS_SAMPLE_ATTR_ACTIONS) {
- /* Only OVS_SAMPLE_ATTR_ACTIONS can have unclosed
- * nested netlink attribute. */
- if (!attr->nla_len) {
- goto error;
- }
-
- left -= NLA_ALIGN(attr->nla_len);
- attr = nl_attr_next(attr);
- }
-
- if (left < NLA_HDRLEN) {
- goto error;
- }
-
- ok = xlate_fixup_actions(b, nl_attr_get_unspec(attr, 0),
- left - NLA_HDRLEN);
- if (!ok) {
- goto error;
- }
- }
- break;
-
- case OVS_ACTION_ATTR_UNSPEC:
- case __OVS_ACTION_ATTR_MAX:
- OVS_NOT_REACHED();
- }
- }
-
- return true;
-
-error:
- ofpbuf_delete(b);
- return false;
-}
-
-static bool
-xlate_execute_odp_actions(struct dp_packet *packet,
- const struct nlattr *actions, int actions_len)
-{
- struct dp_packet_batch batch;
- struct ofpbuf *b = ofpbuf_new(actions_len);
-
- if (!xlate_fixup_actions(b, actions, actions_len)) {
- return false;
- }
-
- dp_packet_batch_init_packet(&batch, packet);
- odp_execute_actions(NULL, &batch, false, b->data, b->size, NULL);
- ofpbuf_delete(b);
-
- return true;
-}
-
static void
-execute_controller_action(struct xlate_ctx *ctx, int len,
- enum ofp_packet_in_reason reason,
- uint16_t controller_id,
- const uint8_t *userdata, size_t userdata_len)
+xlate_controller_action(struct xlate_ctx *ctx, int len,
+ enum ofp_packet_in_reason reason,
+ uint16_t controller_id,
+ const uint8_t *userdata, size_t userdata_len)
{
- struct dp_packet *packet;
-
- ctx->xout->slow |= SLOW_CONTROLLER;
xlate_commit_actions(ctx);
- if (!ctx->xin->packet) {
- return;
- }
-
- if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
- return;
- }
-
- packet = dp_packet_clone(ctx->xin->packet);
- if (!xlate_execute_odp_actions(packet, ctx->odp_actions->data,
- ctx->odp_actions->size)) {
- xlate_report_error(ctx, "Failed to execute controller action");
- dp_packet_delete(packet);
- return;
- }
/* A packet sent by an action in a table-miss rule is considered an
* explicit table miss. OpenFlow before 1.3 doesn't have that concept so
@@ -4532,44 +4371,74 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
reason = OFPR_EXPLICIT_MISS;
}
- size_t packet_len = dp_packet_size(packet);
-
- struct ofproto_async_msg *am = xmalloc(sizeof *am);
- *am = (struct ofproto_async_msg) {
- .controller_id = controller_id,
- .oam = OAM_PACKET_IN,
- .pin = {
- .up = {
- .base = {
- .packet = dp_packet_steal_data(packet),
- .packet_len = packet_len,
- .reason = reason,
- .table_id = ctx->table_id,
- .cookie = ctx->rule_cookie,
- .userdata = (userdata_len
- ? xmemdup(userdata, userdata_len)
- : NULL),
- .userdata_len = userdata_len,
- }
- },
- .max_len = len,
- },
+ struct frozen_state state = {
+ .table_id = ctx->table_id,
+ .ofproto_uuid = ctx->xbridge->ofproto->uuid,
+ .stack = ctx->stack.data,
+ .stack_size = ctx->stack.size,
+ .mirrors = ctx->mirrors,
+ .conntracked = ctx->conntracked,
+ .ofpacts = NULL,
+ .ofpacts_len = 0,
+ .action_set = NULL,
+ .action_set_len = 0,
+ .userdata = CONST_CAST(uint8_t *, userdata),
+ .userdata_len = userdata_len,
};
- flow_get_metadata(&ctx->xin->flow, &am->pin.up.base.flow_metadata);
+ frozen_metadata_from_flow(&state.metadata, &ctx->xin->flow);
- /* Async messages are only sent once, so if we send one now, no
- * xlate cache entry is created. */
- if (ctx->xin->allow_side_effects) {
- ofproto_dpif_send_async_msg(ctx->xbridge->ofproto, am);
- } else /* xcache */ {
- struct xc_entry *entry;
+ uint32_t recirc_id = recirc_alloc_id_ctx(&state);
+ if (!recirc_id) {
+ xlate_report_error(ctx, "Failed to allocate recirculation id");
+ ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
+ return;
+ }
+ recirc_refs_add(&ctx->xout->recircs, recirc_id);
- entry = xlate_cache_add_entry(ctx->xin->xcache, XC_CONTROLLER);
- entry->controller.ofproto = ctx->xbridge->ofproto;
- entry->controller.am = am;
+ size_t offset;
+ size_t ac_offset;
+ uint32_t meter_id = ctx->xbridge->ofproto->up.controller_meter_id;
+ if (meter_id != UINT32_MAX) {
+ /* If controller meter is configured, generate clone(meter, userspace)
+ * action. */
+ offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_SAMPLE);
+ nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY,
+ UINT32_MAX);
+ ac_offset = nl_msg_start_nested(ctx->odp_actions,
+ OVS_SAMPLE_ATTR_ACTIONS);
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_METER, meter_id);
}
- dp_packet_delete(packet);
+ struct user_action_cookie cookie;
+
+ memset(&cookie, 0, sizeof cookie);
+ cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = ctx->xbridge->ofproto->uuid;
+ cookie.controller.dont_send = false;
+ cookie.controller.reason = reason;
+ cookie.controller.recirc_id = recirc_id;
+ put_32aligned_be64(&cookie.controller.rule_cookie, ctx->rule_cookie);
+ cookie.controller.max_len = len;
+ cookie.controller.controller_id = controller_id;
+
+ /* Generate the datapath flows even if we don't send the packet-in
+ * so that debugging more closely represents normal state. */
+ if (!ctx->xin->allow_side_effects && !ctx->xin->xcache) {
+ cookie.controller.dont_send = true;
+ }
+
+ odp_port_t odp_port = ofp_port_to_odp_port(
+ ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
+ uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
+ flow_hash_5tuple(&ctx->xin->flow, 0));
+ odp_put_userspace_action(pid, &cookie, sizeof cookie, ODPP_NONE,
+ false, ctx->odp_actions);
+
+ if (meter_id != UINT32_MAX) {
+ nl_msg_end_nested(ctx->odp_actions, ac_offset);
+ nl_msg_end_nested(ctx->odp_actions, offset);
+ }
}
static void
@@ -4782,8 +4651,8 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
size_t i;
for (i = 0; i < ids->n_controllers; i++) {
- execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
- ids->cnt_ids[i], NULL, 0);
+ xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
+ ids->cnt_ids[i], NULL, 0);
}
/* Stop processing for current table. */
@@ -4834,8 +4703,8 @@ compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
return false;
} else {
- execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
- NULL, 0);
+ xlate_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0,
+ NULL, 0);
}
}
@@ -4888,12 +4757,12 @@ xlate_output_action(struct xlate_ctx *ctx, ofp_port_t port,
flood_packets(ctx, true, is_last_action);
break;
case OFPP_CONTROLLER:
- execute_controller_action(ctx, controller_len,
- (ctx->in_packet_out ? OFPR_PACKET_OUT
- : ctx->in_group ? OFPR_GROUP
- : ctx->in_action_set ? OFPR_ACTION_SET
- : OFPR_ACTION),
- 0, NULL, 0);
+ xlate_controller_action(ctx, controller_len,
+ (ctx->in_packet_out ? OFPR_PACKET_OUT
+ : ctx->in_group ? OFPR_GROUP
+ : ctx->in_action_set ? OFPR_ACTION_SET
+ : OFPR_ACTION),
+ 0, NULL, 0);
break;
case OFPP_NONE:
break;
@@ -6251,16 +6120,16 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
controller = ofpact_get_CONTROLLER(a);
if (controller->pause) {
ctx->pause = controller;
- ctx->xout->slow |= SLOW_CONTROLLER;
+ ctx->xout->slow |= SLOW_PAUSE;
*ctx->paused_flow = ctx->xin->flow;
ctx_trigger_freeze(ctx);
a = ofpact_next(a);
} else {
- execute_controller_action(ctx, controller->max_len,
- controller->reason,
- controller->controller_id,
- controller->userdata,
- controller->userdata_len);
+ xlate_controller_action(ctx, controller->max_len,
+ controller->reason,
+ controller->controller_id,
+ controller->userdata,
+ controller->userdata_len);
}
break;
diff --git a/ofproto/ofproto-unixctl.man b/ofproto/ofproto-unixctl.man
index f511c392b..ee1f81fce 100644
--- a/ofproto/ofproto-unixctl.man
+++ b/ofproto/ofproto-unixctl.man
@@ -107,9 +107,9 @@ effects when a packet is specified. If you want side effects to take
place, then you must supply a packet.
.
.IP
-(Output actions are obviously side effects too, but
-the trace commands never execute them, even when one specifies a
-packet.)
+(Side effects when tracing do not have external consequences. Even if a
+packet is specified, a trace will not output a packet or generate sFlow,
+NetFlow or controller events.)
.
.IP "Incomplete information."
Most of the time, Open vSwitch can figure out everything about the