[ovs-dev] [xlate v2 6/6] ofproto-dpif: Revamp xlate_actions() interface.
Ethan Jackson
ethan at nicira.com
Fri May 24 00:15:20 UTC 2013
This patch implements a new interface to xlate_actions which, aside
from being simpler and more intuitive, achieves several goals. It
pulls all of xlate_actions() results into a single structure which
will be easier to cache and pass around in future. And it shields
xlate_ctx from the rest of the code, making it possible to factor
xlate_actions() in the future (hopefully).
Signed-off-by: Ethan Jackson <ethan at nicira.com>
---
ofproto/ofproto-dpif.c | 1059 +++++++++++++++++++++++++-----------------------
1 file changed, 543 insertions(+), 516 deletions(-)
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index cc4abd7..cd5b900 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -208,31 +208,29 @@ static int set_stp_port(struct ofport *,
static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
-struct xlate_ctx {
- /* xlate_ctx_init() initializes these members. */
+struct xlate_ctx;
+
+struct xlate_out {
+ tag_type tags; /* Tags associated with actions. */
+ enum slow_path_reason slow; /* 0 if fast path may be used. */
+ bool has_learn; /* Actions include NXAST_LEARN? */
+ bool has_normal; /* Actions output to OFPP_NORMAL? */
+ bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
+ mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
- /* The ofproto. */
+ uint64_t odp_actions_stub[256 / 8];
+ struct ofpbuf odp_actions;
+};
+
+struct xlate_in {
struct ofproto_dpif *ofproto;
/* Flow to which the OpenFlow actions apply. xlate_actions() will modify
* this flow when actions change header fields. */
struct flow flow;
- /* Flow at the last commit. */
- struct flow base_flow;
-
- /* Tunnel IP destination address as received. This is stored separately
- * as the base_flow.tunnel is cleared on init to reflect the datapath
- * behavior. Used to make sure not to send tunneled output to ourselves,
- * which might lead to an infinite loop. This could happen easily
- * if a tunnel is marked as 'ip_remote=flow', and the flow does not
- * actually set the tun_dst field. */
- ovs_be32 orig_tunnel_ip_dst;
-
- /* stack for the push and pop actions.
- * Each stack element is of the type "union mf_subvalue". */
- struct ofpbuf stack;
- union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+ const struct initial_vals *initial_vals;
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
@@ -246,9 +244,13 @@ struct xlate_ctx {
* not if we are just revalidating. */
bool may_learn;
- /* The rule that we are currently translating, or NULL. */
+ /* The rule initiating translation or NULL. */
struct rule_dpif *rule;
+ /* The actions to translate. If 'rule' is not NULL, these may be NULL. */
+ const struct ofpact *ofpacts;
+ size_t ofpacts_len;
+
/* Union of the set of TCP flags seen so far in this flow. (Used only by
* NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
* timeouts.) */
@@ -262,7 +264,7 @@ struct xlate_ctx {
* resubmit or OFPP_TABLE action didn't find a matching rule.
*
* This is normally null so the client has to set it manually after
- * calling xlate_ctx_init(). */
+ * calling xlate_in_init(). */
void (*resubmit_hook)(struct xlate_ctx *, struct rule_dpif *rule);
/* If nonnull, flow translation calls this function to report some
@@ -274,23 +276,35 @@ struct xlate_ctx {
* rule reached through a resubmit or OFPP_TABLE action.
*
* This is normally null so the client has to set it manually after
- * calling xlate_ctx_init(). */
+ * calling xlate_in_init(). */
const struct dpif_flow_stats *resubmit_stats;
+};
-/* xlate_actions() initializes and uses these members. The client might want
- * to look at them after it returns. */
+/* Context used by xlate_actions() and its callees. */
+struct xlate_ctx {
+ struct xlate_in *xin;
+ struct xlate_out *xout;
- struct ofpbuf *odp_actions; /* Datapath actions. */
- tag_type tags; /* Tags associated with actions. */
- enum slow_path_reason slow; /* 0 if fast path may be used. */
- bool has_learn; /* Actions include NXAST_LEARN? */
- bool has_normal; /* Actions output to OFPP_NORMAL? */
- bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
- uint16_t nf_output_iface; /* Output interface index for NetFlow. */
- mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
+ struct ofproto_dpif *ofproto;
+
+ /* Flow at the last commit. */
+ struct flow base_flow;
+
+ /* Tunnel IP destination address as received. This is stored separately
+ * as the base_flow.tunnel is cleared on init to reflect the datapath
+ * behavior. Used to make sure not to send tunneled output to ourselves,
+ * which might lead to an infinite loop. This could happen easily
+ * if a tunnel is marked as 'ip_remote=flow', and the flow does not
+ * actually set the tun_dst field. */
+ ovs_be32 orig_tunnel_ip_dst;
-/* xlate_actions() initializes and uses these members, but the client has no
- * reason to look at them. */
+ /* Stack for the push and pop actions. Each stack element is of type
+ * "union mf_subvalue". */
+ union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+ struct ofpbuf stack;
+
+ /* The rule that we are currently translating, or NULL. */
+ struct rule_dpif *rule;
int recurse; /* Recursion level, via xlate_table_action. */
bool max_resubmit_trigger; /* Recursed too deeply during translation. */
@@ -317,16 +331,17 @@ struct initial_vals {
ovs_be16 vlan_tci;
};
-static void xlate_ctx_init(struct xlate_ctx *, struct ofproto_dpif *,
- const struct flow *,
- const struct initial_vals *initial_vals,
- struct rule_dpif *, uint8_t tcp_flags,
- const struct ofpbuf *);
-static void xlate_actions(struct xlate_ctx *, const struct ofpact *ofpacts,
- size_t ofpacts_len, struct ofpbuf *odp_actions);
-static void xlate_actions_for_side_effects(struct xlate_ctx *,
- const struct ofpact *ofpacts,
- size_t ofpacts_len);
+static void xlate_in_init(struct xlate_in *, struct ofproto_dpif *,
+ const struct flow *, const struct initial_vals *,
+ struct rule_dpif *, uint8_t tcp_flags,
+ const struct ofpbuf *);
+
+static void xlate_out_uninit(struct xlate_out *);
+
+static void xlate_actions(struct xlate_in *, struct xlate_out *);
+
+static void xlate_actions_for_side_effects(struct xlate_in *);
+
static void xlate_table_action(struct xlate_ctx *, uint16_t in_port,
uint8_t table_id, bool may_packet_in);
@@ -465,21 +480,11 @@ struct facet {
struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
- bool has_learn; /* Actions include NXAST_LEARN? */
- bool has_normal; /* Actions output to OFPP_NORMAL? */
- bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
- tag_type tags; /* Tags that would require revalidation. */
- mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
-
- /* Datapath actions. */
- struct ofpbuf odp_actions;
- uint64_t odp_actions_stub[256 / 8];
+ struct xlate_out xout;
/* Initial values of the packet that may be needed later. */
struct initial_vals initial_vals;
- enum slow_path_reason slow; /* 0 if fast path may be used. */
-
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
* case has exactly one subfacet. However, 'one_subfacet' may not
@@ -1013,7 +1018,7 @@ type_run(const char *type)
HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
if (need_revalidate
- || tag_set_intersects(&revalidate_set, facet->tags)) {
+ || tag_set_intersects(&revalidate_set, facet->xout.tags)) {
facet_revalidate(facet);
run_fast_rl();
}
@@ -1604,7 +1609,7 @@ run(struct ofproto *ofproto_)
facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
struct facet, hmap_node);
if (!tag_set_intersects(&ofproto->backer->revalidate_set,
- facet->tags)) {
+ facet->xout.tags)) {
if (!facet_check_consistency(facet)) {
ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
@@ -3491,8 +3496,10 @@ struct flow_miss {
struct flow_miss_op {
struct dpif_op dpif_op;
- void *garbage; /* Pointer to pass to free(), NULL if none. */
- uint64_t stub[1024 / 8]; /* Temporary buffer. */
+
+ uint64_t slow_stub[128 / 8]; /* Buffer for compose_slow_path() */
+ struct xlate_out xout;
+ bool xout_garbage; /* 'xout' needs to be uninitialized? */
};
/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
@@ -3585,7 +3592,7 @@ init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
eth_pop_vlan(packet);
}
- op->garbage = NULL;
+ op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
op->dpif_op.u.execute.key_len = miss->key_len;
@@ -3652,38 +3659,34 @@ handle_flow_miss_without_facet(struct flow_miss *miss,
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
long long int now = time_msec();
- struct xlate_ctx ctx;
struct ofpbuf *packet;
+ struct xlate_in xin;
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
struct dpif_flow_stats stats;
- struct ofpbuf odp_actions;
COVERAGE_INC(facet_suppress);
- ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
-
dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
rule_credit_stats(rule, &stats);
- xlate_ctx_init(&ctx, ofproto, &miss->flow, &miss->initial_vals, rule,
- stats.tcp_flags, packet);
- ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
- &odp_actions);
+ xlate_in_init(&xin, ofproto, &miss->flow, &miss->initial_vals, rule,
+ stats.tcp_flags, packet);
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &op->xout);
- if (odp_actions.size) {
+ if (op->xout.odp_actions.size) {
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- execute->actions = odp_actions.data;
- execute->actions_len = odp_actions.size;
- op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
+ execute->actions = op->xout.odp_actions.data;
+ execute->actions_len = op->xout.odp_actions.size;
+ op->xout_garbage = true;
(*n_ops)++;
} else {
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&op->xout);
}
}
}
@@ -3708,7 +3711,7 @@ handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
struct ofpbuf *packet;
subfacet = subfacet_create(facet, miss, now);
- want_path = subfacet->facet->slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ want_path = subfacet->facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
@@ -3717,23 +3720,22 @@ handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
handle_flow_miss_common(facet->rule, packet, &miss->flow);
if (want_path != SF_FAST_PATH) {
- struct xlate_ctx ctx;
+ struct xlate_in xin;
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, 0, packet);
- xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
- facet->rule->up.ofpacts_len);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, 0, packet);
+ xlate_actions_for_side_effects(&xin);
}
dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
subfacet_update_stats(subfacet, &stats);
- if (facet->odp_actions.size) {
+ if (facet->xout.odp_actions.size) {
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- execute->actions = facet->odp_actions.data,
- execute->actions_len = facet->odp_actions.size;
+ execute->actions = facet->xout.odp_actions.data,
+ execute->actions_len = facet->xout.odp_actions.size;
(*n_ops)++;
}
}
@@ -3744,17 +3746,17 @@ handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
subfacet->path = want_path;
- op->garbage = NULL;
+ op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
put->key = miss->key;
put->key_len = miss->key_len;
if (want_path == SF_FAST_PATH) {
- put->actions = facet->odp_actions.data;
- put->actions_len = facet->odp_actions.size;
+ put->actions = facet->xout.odp_actions.data;
+ put->actions_len = facet->xout.odp_actions.size;
} else {
- compose_slow_path(ofproto, &facet->flow, facet->slow,
- op->stub, sizeof op->stub,
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ op->slow_stub, sizeof op->slow_stub,
&put->actions, &put->actions_len);
}
put->stats = NULL;
@@ -4048,7 +4050,9 @@ handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
/* Free memory. */
for (i = 0; i < n_ops; i++) {
- free(flow_miss_ops[i].garbage);
+ if (flow_miss_ops[i].xout_garbage) {
+ xlate_out_uninit(&flow_miss_ops[i].xout);
+ }
}
hmap_destroy(&todo);
}
@@ -4537,8 +4541,8 @@ expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
&ofproto->subfacets) {
long long int cutoff;
- cutoff = (subfacet->facet->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP
- | SLOW_STP)
+ cutoff = (subfacet->facet->xout.slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP
+ | SLOW_STP)
? special_cutoff
: normal_cutoff);
if (subfacet->used < cutoff) {
@@ -4612,7 +4616,7 @@ static struct facet *
facet_create(const struct flow_miss *miss, uint32_t hash)
{
struct ofproto_dpif *ofproto = miss->ofproto;
- struct xlate_ctx ctx;
+ struct xlate_in xin;
struct facet *facet;
facet = xzalloc(sizeof *facet);
@@ -4628,20 +4632,11 @@ facet_create(const struct flow_miss *miss, uint32_t hash)
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
- ofpbuf_use_stub(&facet->odp_actions, &facet->odp_actions_stub,
- sizeof facet->odp_actions_stub);
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, 0, NULL);
- ctx.may_learn = true;
- xlate_actions(&ctx, facet->rule->up.ofpacts, facet->rule->up.ofpacts_len,
- &facet->odp_actions);
- facet->tags = ctx.tags;
- facet->has_learn = ctx.has_learn;
- facet->has_normal = ctx.has_normal;
- facet->has_fin_timeout = ctx.has_fin_timeout;
- facet->nf_flow.output_iface = ctx.nf_output_iface;
- facet->mirrors = ctx.mirrors;
- facet->slow = ctx.slow;
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, 0, NULL);
+ xin.may_learn = true;
+ xlate_actions(&xin, &facet->xout);
+ facet->nf_flow.output_iface = facet->xout.nf_output_iface;
return facet;
}
@@ -4650,7 +4645,7 @@ static void
facet_free(struct facet *facet)
{
if (facet) {
- ofpbuf_uninit(&facet->odp_actions);
+ xlate_out_uninit(&facet->xout);
free(facet);
}
}
@@ -4719,26 +4714,25 @@ facet_learn(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
long long int now = time_msec();
- struct xlate_ctx ctx;
+ struct xlate_in xin;
- if (!facet->has_fin_timeout && now < facet->learn_rl) {
+ if (!facet->xout.has_fin_timeout && now < facet->learn_rl) {
return;
}
facet->learn_rl = now + 500;
- if (!facet->has_learn
- && !facet->has_normal
- && (!facet->has_fin_timeout
+ if (!facet->xout.has_learn
+ && !facet->xout.has_normal
+ && (!facet->xout.has_fin_timeout
|| !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
return;
}
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, facet->tcp_flags, NULL);
- ctx.may_learn = true;
- xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
- facet->rule->up.ofpacts_len);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, facet->tcp_flags, NULL);
+ xin.may_learn = true;
+ xlate_actions_for_side_effects(&xin);
}
static void
@@ -4750,7 +4744,7 @@ facet_account(struct facet *facet)
ovs_be16 vlan_tci;
uint64_t n_bytes;
- if (!facet->has_normal || !ofproto->has_bonded_bundles) {
+ if (!facet->xout.has_normal || !ofproto->has_bonded_bundles) {
return;
}
n_bytes = facet->byte_count - facet->accounted_bytes;
@@ -4764,8 +4758,8 @@ facet_account(struct facet *facet)
* We use the actions from an arbitrary subfacet because they should all
* be equally valid for our purpose. */
vlan_tci = facet->flow.vlan_tci;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->odp_actions.data,
- facet->odp_actions.size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size) {
const struct ovs_action_push_vlan *vlan;
struct ofport_dpif *port;
@@ -4886,7 +4880,7 @@ facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
if (facet
&& (ofproto->backer->need_revalidate
|| tag_set_intersects(&ofproto->backer->revalidate_set,
- facet->tags))) {
+ facet->xout.tags))) {
facet_revalidate(facet);
/* facet_revalidate() may have destroyed 'facet'. */
@@ -4904,10 +4898,9 @@ facet_check_consistency(struct facet *facet)
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct ds s = DS_EMPTY_INITIALIZER;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ struct xlate_out xout;
+ struct xlate_in xin;
- struct xlate_ctx ctx;
struct rule_dpif *rule;
bool may_log = false;
bool ok;
@@ -4930,13 +4923,12 @@ facet_check_consistency(struct facet *facet)
}
/* Check the datapath actions for consistency. */
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals, rule, 0,
- NULL);
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule,
+ 0, NULL);
+ xlate_actions(&xin, &xout);
- if (facet->slow == ctx.slow
- && ofpbuf_equal(&facet->odp_actions, &odp_actions)) {
+ if (facet->xout.slow == xout.slow
+ && ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
goto exit;
}
@@ -4952,18 +4944,18 @@ facet_check_consistency(struct facet *facet)
flow_format(&s, &facet->flow);
ds_put_cstr(&s, ": inconsistency in facet");
- if (!ofpbuf_equal(&facet->odp_actions, &odp_actions)) {
+ if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
ds_put_cstr(&s, " (actions were: ");
- format_odp_actions(&s, facet->odp_actions.data,
- facet->odp_actions.size);
+ format_odp_actions(&s, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size);
ds_put_cstr(&s, ") (correct actions: ");
- format_odp_actions(&s, odp_actions.data, odp_actions.size);
+ format_odp_actions(&s, xout.odp_actions.data, xout.odp_actions.size);
ds_put_cstr(&s, ")");
}
- if (facet->slow != ctx.slow) {
+ if (facet->xout.slow != xout.slow) {
ds_put_format(&s, " slow path incorrect. should be %s",
- ctx.slow ? "true" : "false");
+ xout.slow ? "true" : "false");
}
exit:
@@ -4971,7 +4963,7 @@ exit:
ds_chomp(&s, '\n');
VLOG_WARN("%s", ds_cstr(&s));
}
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
ds_destroy(&s);
return ok;
}
@@ -4992,13 +4984,10 @@ static void
facet_revalidate(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
-
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
-
struct rule_dpif *new_rule;
struct subfacet *subfacet;
- struct xlate_ctx ctx;
+ struct xlate_out xout;
+ struct xlate_in xin;
COVERAGE_INC(facet_revalidate);
@@ -5028,44 +5017,44 @@ facet_revalidate(struct facet *facet)
* We do not modify any 'facet' state yet, because we might need to, e.g.,
* emit a NetFlow expiration and, if so, we need to have the old state
* around to properly compose it. */
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals, new_rule,
- 0, NULL);
- xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
- &odp_actions);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, new_rule,
+ 0, NULL);
+ xlate_actions(&xin, &xout);
/* A facet's slow path reason should only change under dramatic
* circumstances. Rather than try to update everything, it's simpler to
* remove the facet and start over. */
- if (facet->slow != ctx.slow) {
+ if (facet->xout.slow != xout.slow) {
facet_remove(facet);
goto exit;
}
- if (!ofpbuf_equal(&facet->odp_actions, &odp_actions)) {
+ if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
LIST_FOR_EACH(subfacet, list_node, &facet->subfacets) {
if (subfacet->path == SF_FAST_PATH) {
struct dpif_flow_stats stats;
- subfacet_install(subfacet, &odp_actions, &stats);
+ subfacet_install(subfacet, &xout.odp_actions, &stats);
subfacet_update_stats(subfacet, &stats);
}
}
facet_flush_stats(facet);
- ofpbuf_clear(&facet->odp_actions);
- ofpbuf_put(&facet->odp_actions, odp_actions.data, odp_actions.size);
+ ofpbuf_clear(&facet->xout.odp_actions);
+ ofpbuf_put(&facet->xout.odp_actions, xout.odp_actions.data,
+ xout.odp_actions.size);
}
/* Update 'facet' now that we've taken care of all the old state. */
- facet->tags = ctx.tags;
- facet->slow = ctx.slow;
- facet->nf_flow.output_iface = ctx.nf_output_iface;
- facet->has_learn = ctx.has_learn;
- facet->has_normal = ctx.has_normal;
- facet->has_fin_timeout = ctx.has_fin_timeout;
- facet->mirrors = ctx.mirrors;
+ facet->xout.tags = xout.tags;
+ facet->xout.slow = xout.slow;
+ facet->xout.has_learn = xout.has_learn;
+ facet->xout.has_normal = xout.has_normal;
+ facet->xout.has_fin_timeout = xout.has_fin_timeout;
+ facet->xout.nf_output_iface = xout.nf_output_iface;
+ facet->xout.mirrors = xout.mirrors;
+ facet->nf_flow.output_iface = facet->xout.nf_output_iface;
if (facet->rule != new_rule) {
COVERAGE_INC(facet_changed_rule);
@@ -5077,7 +5066,7 @@ facet_revalidate(struct facet *facet)
}
exit:
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
}
/* Updates 'facet''s used time. Caller is responsible for calling
@@ -5126,7 +5115,8 @@ facet_push_stats(struct facet *facet)
flow_push_stats(facet, &stats);
update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
- facet->mirrors, stats.n_packets, stats.n_bytes);
+ facet->xout.mirrors, stats.n_packets,
+ stats.n_bytes);
}
}
@@ -5175,15 +5165,14 @@ flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
{
struct rule_dpif *rule = facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct xlate_ctx ctx;
+ struct xlate_in xin;
ofproto_rule_update_used(&rule->up, stats->used);
- xlate_ctx_init(&ctx, ofproto, &facet->flow, &facet->initial_vals, rule, 0,
- NULL);
- ctx.resubmit_stats = stats;
- xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
- rule->up.ofpacts_len);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule,
+ 0, NULL);
+ xin.resubmit_stats = stats;
+ xlate_actions_for_side_effects(&xin);
}
/* Subfacets. */
@@ -5330,7 +5319,7 @@ subfacet_install(struct subfacet *subfacet, const struct ofpbuf *odp_actions,
{
struct facet *facet = subfacet->facet;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- enum subfacet_path path = facet->slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
const struct nlattr *actions = odp_actions->data;
size_t actions_len = odp_actions->size;
@@ -5344,7 +5333,7 @@ subfacet_install(struct subfacet *subfacet, const struct ofpbuf *odp_actions,
}
if (path == SF_SLOW_PATH) {
- compose_slow_path(ofproto, &facet->flow, facet->slow,
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
slow_path_stub, sizeof slow_path_stub,
&actions, &actions_len);
}
@@ -5610,24 +5599,22 @@ rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct initial_vals initial_vals;
struct dpif_flow_stats stats;
- struct xlate_ctx ctx;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ struct xlate_out xout;
+ struct xlate_in xin;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
rule_credit_stats(rule, &stats);
initial_vals.vlan_tci = flow->vlan_tci;
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_ctx_init(&ctx, ofproto, flow, &initial_vals,
- rule, stats.tcp_flags, packet);
- ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
+ xlate_in_init(&xin, ofproto, flow, &initial_vals, rule, stats.tcp_flags,
+ packet);
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
- execute_odp_actions(ofproto, flow, odp_actions.data,
- odp_actions.size, packet);
+ execute_odp_actions(ofproto, flow, xout.odp_actions.data,
+ xout.odp_actions.size, packet);
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
}
static enum ofperr
@@ -5659,7 +5646,8 @@ send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
struct dpif_flow_stats stats;
struct odputil_keybuf keybuf;
struct ofpact_output output;
- struct xlate_ctx ctx;
+ struct xlate_out xout;
+ struct xlate_in xin;
struct flow flow;
int error;
@@ -5676,15 +5664,17 @@ send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
output.port = ofport->up.ofp_port;
output.max_len = 0;
- xlate_ctx_init(&ctx, ofproto, &flow, NULL, NULL, 0, packet);
- ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, &output.ofpact, sizeof output, &odp_actions);
+ xlate_in_init(&xin, ofproto, &flow, NULL, NULL, 0, packet);
+ xin.ofpacts_len = sizeof output;
+ xin.ofpacts = &output.ofpact;
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
error = dpif_execute(ofproto->backer->dpif,
key.data, key.size,
- odp_actions.data, odp_actions.size,
+ xout.odp_actions.data, xout.odp_actions.size,
packet);
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
if (error) {
VLOG_WARN_RL(&rl, "%s: failed to send packet on port %s (%s)",
@@ -5881,8 +5871,8 @@ static void
add_sflow_action(struct xlate_ctx *ctx)
{
ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
- ctx->odp_actions,
- &ctx->flow, OVSP_NONE);
+ &ctx->xout->odp_actions,
+ &ctx->xin->flow, OVSP_NONE);
ctx->sflow_odp_port = 0;
ctx->sflow_n_outputs = 0;
}
@@ -5892,7 +5882,8 @@ add_sflow_action(struct xlate_ctx *ctx)
static void
add_ipfix_action(struct xlate_ctx *ctx)
{
- compose_ipfix_action(ctx->ofproto, ctx->odp_actions, &ctx->flow);
+ compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
+ &ctx->xin->flow);
}
/* Fix SAMPLE action according to data collected while composing ODP actions.
@@ -5908,7 +5899,7 @@ fix_sflow_action(struct xlate_ctx *ctx)
return;
}
- cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
+ cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
sizeof cookie->sflow);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
@@ -5944,7 +5935,7 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
if (netdev_vport_is_patch(ofport->up.netdev)) {
struct ofport_dpif *peer = ofport_get_peer(ofport);
- struct flow old_flow = ctx->flow;
+ struct flow old_flow = ctx->xin->flow;
const struct ofproto_dpif *peer_ofproto;
enum slow_path_reason special;
struct ofport_dpif *in_port;
@@ -5961,49 +5952,49 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
}
ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
- ctx->flow.in_port = peer->up.ofp_port;
- ctx->flow.metadata = htonll(0);
- memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
- memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
-
- in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- special = process_special(ctx->ofproto, &ctx->flow, in_port,
- ctx->packet);
+ ctx->xin->flow.in_port = peer->up.ofp_port;
+ ctx->xin->flow.metadata = htonll(0);
+ memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel);
+ memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs);
+
+ in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->xin->flow, in_port,
+ ctx->xin->packet);
if (special) {
- ctx->slow |= special;
+ ctx->xout->slow |= special;
} else if (!in_port || may_receive(in_port, ctx)) {
if (!in_port || stp_forward_in_state(in_port->stp_state)) {
- xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
} else {
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->odp_actions->size;
- xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ size_t old_size = ctx->xout->odp_actions.size;
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
ctx->base_flow = old_base_flow;
- ctx->odp_actions->size = old_size;
+ ctx->xout->odp_actions.size = old_size;
}
}
- ctx->flow = old_flow;
+ ctx->xin->flow = old_flow;
ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- if (ctx->resubmit_stats) {
- netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
- netdev_vport_inc_rx(peer->up.netdev, ctx->resubmit_stats);
+ if (ctx->xin->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
+ netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats);
}
return;
}
- flow_vlan_tci = ctx->flow.vlan_tci;
- flow_skb_mark = ctx->flow.skb_mark;
- flow_nw_tos = ctx->flow.nw_tos;
+ flow_vlan_tci = ctx->xin->flow.vlan_tci;
+ flow_skb_mark = ctx->xin->flow.skb_mark;
+ flow_nw_tos = ctx->xin->flow.nw_tos;
- pdscp = get_priority(ofport, ctx->flow.skb_priority);
+ pdscp = get_priority(ofport, ctx->xin->flow.skb_priority);
if (pdscp) {
- ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= pdscp->dscp;
+ ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
+ ctx->xin->flow.nw_tos |= pdscp->dscp;
}
if (ofport->tnl_port) {
@@ -6011,44 +6002,45 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
*/
- struct flow_tnl flow_tnl = ctx->flow.tunnel;
- odp_port = tnl_port_send(ofport->tnl_port, &ctx->flow);
+ struct flow_tnl flow_tnl = ctx->xin->flow.tunnel;
+ odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow);
if (odp_port == OVSP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
}
- if (ctx->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
+ if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
xlate_report(ctx, "Not tunneling to our own address");
goto out; /* restore flow_nw_tos */
}
- if (ctx->resubmit_stats) {
- netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
+ if (ctx->xin->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
}
out_port = odp_port;
- commit_odp_tunnel_action(&ctx->flow, &ctx->base_flow,
- ctx->odp_actions);
- ctx->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
+ commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
+ ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
} else {
odp_port = ofport->odp_port;
out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
- ctx->flow.vlan_tci);
+ ctx->xin->flow.vlan_tci);
if (out_port != odp_port) {
- ctx->flow.vlan_tci = htons(0);
+ ctx->xin->flow.vlan_tci = htons(0);
}
- ctx->flow.skb_mark &= ~IPSEC_MARK;
+ ctx->xin->flow.skb_mark &= ~IPSEC_MARK;
}
- commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
+ commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
+ nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
- ctx->nf_output_iface = ofp_port;
+ ctx->xout->nf_output_iface = ofp_port;
/* Restore flow */
- ctx->flow.vlan_tci = flow_vlan_tci;
- ctx->flow.skb_mark = flow_skb_mark;
+ ctx->xin->flow.vlan_tci = flow_vlan_tci;
+ ctx->xin->flow.skb_mark = flow_skb_mark;
out:
- ctx->flow.nw_tos = flow_nw_tos;
+ ctx->xin->flow.nw_tos = flow_nw_tos;
}
static void
@@ -6066,11 +6058,11 @@ tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
if (table_id > 0 && table_id < N_TABLES) {
struct table_dpif *table = &ofproto->tables[table_id];
if (table->other_table) {
- ctx->tags |= (rule && rule->tag
- ? rule->tag
- : rule_calculate_tag(&ctx->flow,
- &table->other_table->mask,
- table->basis));
+ ctx->xout->tags |= (rule && rule->tag
+ ? rule->tag
+ : rule_calculate_tag(&ctx->xin->flow,
+ &table->other_table->mask,
+ table->basis));
}
}
}
@@ -6080,8 +6072,8 @@ static struct rule_dpif *
ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
bool may_packet_in)
{
- if (ctx->resubmit_hook) {
- ctx->resubmit_hook(ctx, rule);
+ if (ctx->xin->resubmit_hook) {
+ ctx->xin->resubmit_hook(ctx, rule);
}
if (rule == NULL && may_packet_in) {
/* XXX
@@ -6091,10 +6083,10 @@ ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
* OFPTC_TABLE_MISS_DROP
* When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
*/
- rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->flow);
+ rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
}
- if (rule && ctx->resubmit_stats) {
- rule_credit_stats(rule, ctx->resubmit_stats);
+ if (rule && ctx->xin->resubmit_stats) {
+ rule_credit_stats(rule, ctx->xin->resubmit_stats);
}
return rule;
}
@@ -6105,20 +6097,20 @@ xlate_table_action(struct xlate_ctx *ctx,
{
if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
struct rule_dpif *rule;
- uint16_t old_in_port = ctx->flow.in_port;
+ uint16_t old_in_port = ctx->xin->flow.in_port;
uint8_t old_table_id = ctx->table_id;
ctx->table_id = table_id;
/* Look up a flow with 'in_port' as the input port. */
- ctx->flow.in_port = in_port;
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, table_id);
+ ctx->xin->flow.in_port = in_port;
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, table_id);
tag_the_flow(ctx, rule);
/* Restore the original input port. Otherwise OFPP_NORMAL and
* OFPP_IN_PORT will have surprising behavior. */
- ctx->flow.in_port = old_in_port;
+ ctx->xin->flow.in_port = old_in_port;
rule = ctx_rule_hooks(ctx, rule, may_packet_in);
@@ -6151,7 +6143,7 @@ xlate_ofpact_resubmit(struct xlate_ctx *ctx,
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
- in_port = ctx->flow.in_port;
+ in_port = ctx->xin->flow.in_port;
}
table_id = resubmit->table_id;
@@ -6170,7 +6162,7 @@ flood_packets(struct xlate_ctx *ctx, bool all)
HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
uint16_t ofp_port = ofport->up.ofp_port;
- if (ofp_port == ctx->flow.in_port) {
+ if (ofp_port == ctx->xin->flow.in_port) {
continue;
}
@@ -6181,7 +6173,7 @@ flood_packets(struct xlate_ctx *ctx, bool all)
}
}
- ctx->nf_output_iface = NF_OUT_FLOOD;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
static void
@@ -6192,12 +6184,12 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
struct ofputil_packet_in pin;
struct ofpbuf *packet;
- ctx->slow |= SLOW_CONTROLLER;
- if (!ctx->packet) {
+ ctx->xout->slow |= SLOW_CONTROLLER;
+ if (!ctx->xin->packet) {
return;
}
- packet = ofpbuf_clone(ctx->packet);
+ packet = ofpbuf_clone(ctx->xin->packet);
if (packet->l2 && packet->l3) {
struct eth_header *eh;
@@ -6206,36 +6198,37 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
eth_pop_vlan(packet);
eh = packet->l2;
- memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
- memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
+ memcpy(eh->eth_src, ctx->xin->flow.dl_src, sizeof eh->eth_src);
+ memcpy(eh->eth_dst, ctx->xin->flow.dl_dst, sizeof eh->eth_dst);
- if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(packet, ctx->flow.vlan_tci);
+ if (ctx->xin->flow.vlan_tci & htons(VLAN_CFI)) {
+ eth_push_vlan(packet, ctx->xin->flow.vlan_tci);
}
mpls_depth = eth_mpls_depth(packet);
- if (mpls_depth < ctx->flow.mpls_depth) {
- push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
- } else if (mpls_depth > ctx->flow.mpls_depth) {
- pop_mpls(packet, ctx->flow.dl_type);
+ if (mpls_depth < ctx->xin->flow.mpls_depth) {
+ push_mpls(packet, ctx->xin->flow.dl_type, ctx->xin->flow.mpls_lse);
+ } else if (mpls_depth > ctx->xin->flow.mpls_depth) {
+ pop_mpls(packet, ctx->xin->flow.dl_type);
} else if (mpls_depth) {
- set_mpls_lse(packet, ctx->flow.mpls_lse);
+ set_mpls_lse(packet, ctx->xin->flow.mpls_lse);
}
if (packet->l4) {
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
- ctx->flow.nw_tos, ctx->flow.nw_ttl);
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ packet_set_ipv4(packet, ctx->xin->flow.nw_src,
+ ctx->xin->flow.nw_dst, ctx->xin->flow.nw_tos,
+ ctx->xin->flow.nw_ttl);
}
if (packet->l7) {
- if (ctx->flow.nw_proto == IPPROTO_TCP) {
- packet_set_tcp_port(packet, ctx->flow.tp_src,
- ctx->flow.tp_dst);
- } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
- packet_set_udp_port(packet, ctx->flow.tp_src,
- ctx->flow.tp_dst);
+ if (ctx->xin->flow.nw_proto == IPPROTO_TCP) {
+ packet_set_tcp_port(packet, ctx->xin->flow.tp_src,
+ ctx->xin->flow.tp_dst);
+ } else if (ctx->xin->flow.nw_proto == IPPROTO_UDP) {
+ packet_set_udp_port(packet, ctx->xin->flow.tp_src,
+ ctx->xin->flow.tp_dst);
}
}
}
@@ -6249,7 +6242,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
pin.send_len = len;
- flow_get_metadata(&ctx->flow, &pin.fmd);
+ flow_get_metadata(&ctx->xin->flow, &pin.fmd);
connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
ofpbuf_delete(packet);
@@ -6261,36 +6254,36 @@ execute_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
ovs_assert(eth_type_mpls(eth_type));
if (ctx->base_flow.mpls_depth) {
- ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
- ctx->flow.mpls_depth++;
+ ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
+ ctx->xin->flow.mpls_depth++;
} else {
ovs_be32 label;
uint8_t tc, ttl;
- if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IPV6)) {
label = htonl(0x2); /* IPV6 Explicit Null. */
} else {
label = htonl(0x0); /* IPV4 Explicit Null. */
}
- tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
- ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
- ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
- ctx->flow.mpls_depth = 1;
+ tc = (ctx->xin->flow.nw_tos & IP_DSCP_MASK) >> 2;
+ ttl = ctx->xin->flow.nw_ttl ? ctx->xin->flow.nw_ttl : 0x40;
+ ctx->xin->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
+ ctx->xin->flow.mpls_depth = 1;
}
- ctx->flow.dl_type = eth_type;
+ ctx->xin->flow.dl_type = eth_type;
}
static void
execute_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
{
- ovs_assert(eth_type_mpls(ctx->flow.dl_type));
+ ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
ovs_assert(!eth_type_mpls(eth_type));
- if (ctx->flow.mpls_depth) {
- ctx->flow.mpls_depth--;
- ctx->flow.mpls_lse = htonl(0);
- if (!ctx->flow.mpls_depth) {
- ctx->flow.dl_type = eth_type;
+ if (ctx->xin->flow.mpls_depth) {
+ ctx->xin->flow.mpls_depth--;
+ ctx->xin->flow.mpls_lse = htonl(0);
+ if (!ctx->xin->flow.mpls_depth) {
+ ctx->xin->flow.dl_type = eth_type;
}
}
}
@@ -6298,13 +6291,13 @@ execute_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
static bool
compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
{
- if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
- ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
+ if (ctx->xin->flow.dl_type != htons(ETH_TYPE_IP) &&
+ ctx->xin->flow.dl_type != htons(ETH_TYPE_IPV6)) {
return false;
}
- if (ctx->flow.nw_ttl > 1) {
- ctx->flow.nw_ttl--;
+ if (ctx->xin->flow.nw_ttl > 1) {
+ ctx->xin->flow.nw_ttl--;
return false;
} else {
size_t i;
@@ -6322,26 +6315,26 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
static bool
execute_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
{
- if (!eth_type_mpls(ctx->flow.dl_type)) {
+ if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
return true;
}
- set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
return false;
}
static bool
execute_dec_mpls_ttl_action(struct xlate_ctx *ctx)
{
- uint8_t ttl = mpls_lse_to_ttl(ctx->flow.mpls_lse);
+ uint8_t ttl = mpls_lse_to_ttl(ctx->xin->flow.mpls_lse);
- if (!eth_type_mpls(ctx->flow.dl_type)) {
+ if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
return false;
}
if (ttl > 1) {
ttl--;
- set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
return false;
} else {
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
@@ -6355,16 +6348,16 @@ static void
xlate_output_action(struct xlate_ctx *ctx,
uint16_t port, uint16_t max_len, bool may_packet_in)
{
- uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+ uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface;
- ctx->nf_output_iface = NF_OUT_DROP;
+ ctx->xout->nf_output_iface = NF_OUT_DROP;
switch (port) {
case OFPP_IN_PORT:
- compose_output_action(ctx, ctx->flow.in_port);
+ compose_output_action(ctx, ctx->xin->flow.in_port);
break;
case OFPP_TABLE:
- xlate_table_action(ctx, ctx->flow.in_port, 0, may_packet_in);
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in);
break;
case OFPP_NORMAL:
xlate_normal(ctx);
@@ -6382,7 +6375,7 @@ xlate_output_action(struct xlate_ctx *ctx,
break;
case OFPP_LOCAL:
default:
- if (port != ctx->flow.in_port) {
+ if (port != ctx->xin->flow.in_port) {
compose_output_action(ctx, port);
} else {
xlate_report(ctx, "skipping output to input port");
@@ -6391,12 +6384,12 @@ xlate_output_action(struct xlate_ctx *ctx,
}
if (prev_nf_output_iface == NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_FLOOD;
- } else if (ctx->nf_output_iface == NF_OUT_DROP) {
- ctx->nf_output_iface = prev_nf_output_iface;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
+ ctx->xout->nf_output_iface = prev_nf_output_iface;
} else if (prev_nf_output_iface != NF_OUT_DROP &&
- ctx->nf_output_iface != NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_MULTI;
+ ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->xout->nf_output_iface = NF_OUT_MULTI;
}
}
@@ -6404,7 +6397,7 @@ static void
xlate_output_reg_action(struct xlate_ctx *ctx,
const struct ofpact_output_reg *or)
{
- uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
+ uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
if (port <= UINT16_MAX) {
xlate_output_action(ctx, port, or->max_len, false);
}
@@ -6430,22 +6423,22 @@ xlate_enqueue_action(struct xlate_ctx *ctx,
/* Check output port. */
if (ofp_port == OFPP_IN_PORT) {
- ofp_port = ctx->flow.in_port;
- } else if (ofp_port == ctx->flow.in_port) {
+ ofp_port = ctx->xin->flow.in_port;
+ } else if (ofp_port == ctx->xin->flow.in_port) {
return;
}
/* Add datapath actions. */
- flow_priority = ctx->flow.skb_priority;
- ctx->flow.skb_priority = priority;
+ flow_priority = ctx->xin->flow.skb_priority;
+ ctx->xin->flow.skb_priority = priority;
compose_output_action(ctx, ofp_port);
- ctx->flow.skb_priority = flow_priority;
+ ctx->xin->flow.skb_priority = flow_priority;
/* Update NetFlow output port. */
- if (ctx->nf_output_iface == NF_OUT_DROP) {
- ctx->nf_output_iface = ofp_port;
- } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_MULTI;
+ if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
+ ctx->xout->nf_output_iface = ofp_port;
+ } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->xout->nf_output_iface = NF_OUT_MULTI;
}
}
@@ -6456,7 +6449,7 @@ xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
queue_id, &skb_priority)) {
- ctx->flow.skb_priority = skb_priority;
+ ctx->xin->flow.skb_priority = skb_priority;
} else {
/* Couldn't translate queue to a priority. Nothing to do. A warning
* has already been logged. */
@@ -6491,9 +6484,10 @@ xlate_bundle_action(struct xlate_ctx *ctx,
{
uint16_t port;
- port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
+ port = bundle_execute(bundle, &ctx->xin->flow, slave_enabled_cb,
+ ctx->ofproto);
if (bundle->dst.field) {
- nxm_reg_load(&bundle->dst, port, &ctx->flow);
+ nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
} else {
xlate_output_action(ctx, port, 0, false);
}
@@ -6510,7 +6504,7 @@ xlate_learn_action(struct xlate_ctx *ctx,
int error;
ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
- learn_execute(learn, &ctx->flow, &fm, &ofpacts);
+ learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
if (error && !VLOG_DROP_WARN(&rl)) {
@@ -6535,7 +6529,7 @@ static void
xlate_fin_timeout(struct xlate_ctx *ctx,
const struct ofpact_fin_timeout *oft)
{
- if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
+ if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
struct rule_dpif *rule = ctx->rule;
reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
@@ -6552,18 +6546,20 @@ xlate_sample_action(struct xlate_ctx *ctx,
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
- commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
+ commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->ofproto, ctx->odp_actions, &ctx->flow,
+ compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
probability, &cookie, sizeof cookie.flow_sample);
}
static bool
may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
{
- if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
+ if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
+ eth_addr_stp)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
@@ -6585,14 +6581,14 @@ static bool
tunnel_ecn_ok(struct xlate_ctx *ctx)
{
if (is_ip_any(&ctx->base_flow)
- && (ctx->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
" but is not ECN capable");
return false;
} else {
/* Set the ECN CE value in the tunneled packet. */
- ctx->flow.nw_tos |= IP_ECN_CE;
+ ctx->xin->flow.nw_tos |= IP_ECN_CE;
}
}
@@ -6639,66 +6635,69 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_SET_VLAN_VID:
- ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
- ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
- | htons(VLAN_CFI));
+ ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
+ ctx->xin->flow.vlan_tci |=
+ (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
+ | htons(VLAN_CFI));
break;
case OFPACT_SET_VLAN_PCP:
- ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
- ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
- << VLAN_PCP_SHIFT)
- | VLAN_CFI);
+ ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
+ ctx->xin->flow.vlan_tci |=
+ htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
+ | VLAN_CFI);
break;
case OFPACT_STRIP_VLAN:
- ctx->flow.vlan_tci = htons(0);
+ ctx->xin->flow.vlan_tci = htons(0);
break;
case OFPACT_PUSH_VLAN:
/* XXX 802.1AD(QinQ) */
- ctx->flow.vlan_tci = htons(VLAN_CFI);
+ ctx->xin->flow.vlan_tci = htons(VLAN_CFI);
break;
case OFPACT_SET_ETH_SRC:
- memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
+ memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_ETH_DST:
- memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
+ memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_IPV4_SRC:
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DST:
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
+ ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
}
break;
case OFPACT_SET_L4_SRC_PORT:
- if (is_ip_any(&ctx->flow)) {
- ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ if (is_ip_any(&ctx->xin->flow)) {
+ ctx->xin->flow.tp_src =
+ htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
}
break;
case OFPACT_SET_L4_DST_PORT:
- if (is_ip_any(&ctx->flow)) {
- ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ if (is_ip_any(&ctx->xin->flow)) {
+ ctx->xin->flow.tp_dst =
+ htons(ofpact_get_SET_L4_DST_PORT(a)->port);
}
break;
@@ -6707,7 +6706,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_SET_TUNNEL:
- ctx->flow.tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
+ ctx->xin->flow.tunnel.tun_id =
+ htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
break;
case OFPACT_SET_QUEUE:
@@ -6715,24 +6715,24 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_POP_QUEUE:
- ctx->flow.skb_priority = ctx->orig_skb_priority;
+ ctx->xin->flow.skb_priority = ctx->orig_skb_priority;
break;
case OFPACT_REG_MOVE:
- nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
+ nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow);
break;
case OFPACT_REG_LOAD:
- nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
+ nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow);
break;
case OFPACT_STACK_PUSH:
- nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->flow,
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow,
&ctx->stack);
break;
case OFPACT_STACK_POP:
- nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->flow,
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow,
&ctx->stack);
break;
@@ -6745,7 +6745,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_SET_MPLS_TTL:
- if (execute_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl)) {
+ if (execute_set_mpls_ttl_action(ctx,
+ ofpact_get_SET_MPLS_TTL(a)->ttl)) {
goto out;
}
break;
@@ -6767,7 +6768,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_MULTIPATH:
- multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
+ multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow);
break;
case OFPACT_BUNDLE:
@@ -6780,8 +6781,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_LEARN:
- ctx->has_learn = true;
- if (ctx->may_learn) {
+ ctx->xout->has_learn = true;
+ if (ctx->xin->may_learn) {
xlate_learn_action(ctx, ofpact_get_LEARN(a));
}
break;
@@ -6791,7 +6792,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
break;
case OFPACT_FIN_TIMEOUT:
- ctx->has_fin_timeout = true;
+ ctx->xout->has_fin_timeout = true;
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
break;
@@ -6805,8 +6806,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
case OFPACT_WRITE_METADATA:
metadata = ofpact_get_WRITE_METADATA(a);
- ctx->flow.metadata &= ~metadata->mask;
- ctx->flow.metadata |= metadata->metadata & metadata->mask;
+ ctx->xin->flow.metadata &= ~metadata->mask;
+ ctx->xin->flow.metadata |= metadata->metadata & metadata->mask;
break;
case OFPACT_GOTO_TABLE: {
@@ -6819,7 +6820,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
ctx->table_id = ogt->table_id;
/* Look up a flow from the new table. */
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, ctx->table_id);
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, ctx->table_id);
tag_the_flow(ctx, rule);
@@ -6854,12 +6855,53 @@ out:
}
static void
-xlate_ctx_init(struct xlate_ctx *ctx, struct ofproto_dpif *ofproto,
- const struct flow *flow,
- const struct initial_vals *initial_vals,
- struct rule_dpif *rule, uint8_t tcp_flags,
- const struct ofpbuf *packet)
+xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *rule, uint8_t tcp_flags,
+ const struct ofpbuf *packet)
{
+ xin->ofproto = ofproto;
+ xin->flow = *flow;
+ xin->initial_vals = initial_vals;
+ xin->packet = packet;
+ xin->may_learn = packet != NULL;
+ xin->rule = rule;
+ xin->ofpacts = NULL;
+ xin->ofpacts_len = 0;
+ xin->tcp_flags = tcp_flags;
+ xin->resubmit_hook = NULL;
+ xin->report_hook = NULL;
+ xin->resubmit_stats = NULL;
+}
+
+static void
+xlate_out_uninit(struct xlate_out *xout)
+{
+ if (xout) {
+ ofpbuf_uninit(&xout->odp_actions);
+ }
+}
+
+/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
+ * into datapath actions in 'odp_actions', using 'ctx'. */
+static void
+xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+{
+ /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
+ * that in the future we always keep a copy of the original flow for
+ * tracing purposes. */
+ static bool hit_resubmit_limit;
+
+ enum slow_path_reason special;
+ const struct ofpact *ofpacts;
+ struct ofport_dpif *in_port;
+ struct flow orig_flow;
+ struct xlate_ctx ctx;
+ size_t ofpacts_len;
+
+ COVERAGE_INC(ofproto_dpif_xlate);
+
/* Flow initialization rules:
* - 'base_flow' must match the kernel's view of the packet at the
* time that action processing starts. 'flow' represents any
@@ -6881,73 +6923,61 @@ xlate_ctx_init(struct xlate_ctx *ctx, struct ofproto_dpif *ofproto,
* kernel does. If we wish to maintain the original values an action
* needs to be generated. */
- ctx->ofproto = ofproto;
- ctx->flow = *flow;
- ctx->base_flow = ctx->flow;
- memset(&ctx->base_flow.tunnel, 0, sizeof ctx->base_flow.tunnel);
- ctx->orig_tunnel_ip_dst = flow->tunnel.ip_dst;
- ctx->rule = rule;
- ctx->packet = packet;
- ctx->may_learn = packet != NULL;
- ctx->tcp_flags = tcp_flags;
- ctx->resubmit_hook = NULL;
- ctx->report_hook = NULL;
- ctx->resubmit_stats = NULL;
-
- if (initial_vals) {
- ctx->base_flow.vlan_tci = initial_vals->vlan_tci;
+ ctx.xin = xin;
+ ctx.xout = xout;
+
+ ctx.ofproto = xin->ofproto;
+ ctx.rule = xin->rule;
+
+ ctx.base_flow = ctx.xin->flow;
+ if (xin->initial_vals) {
+ ctx.base_flow.vlan_tci = xin->initial_vals->vlan_tci;
+ }
+ memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst;
+
+ ctx.xout->tags = 0;
+ ctx.xout->slow = 0;
+ ctx.xout->has_learn = false;
+ ctx.xout->has_normal = false;
+ ctx.xout->has_fin_timeout = false;
+ ctx.xout->nf_output_iface = NF_OUT_DROP;
+ ctx.xout->mirrors = 0;
+
+ ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
+ sizeof ctx.xout->odp_actions_stub);
+ ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
+
+ ctx.recurse = 0;
+ ctx.max_resubmit_trigger = false;
+ ctx.orig_skb_priority = ctx.xin->flow.skb_priority;
+ ctx.table_id = 0;
+ ctx.exit = false;
+
+ if (xin->ofpacts) {
+ ofpacts = xin->ofpacts;
+ ofpacts_len = xin->ofpacts_len;
+ } else if (xin->rule) {
+ ofpacts = xin->rule->up.ofpacts;
+ ofpacts_len = xin->rule->up.ofpacts_len;
+ } else {
+ NOT_REACHED();
}
-}
-
-/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
- * into datapath actions in 'odp_actions', using 'ctx'. */
-static void
-xlate_actions(struct xlate_ctx *ctx,
- const struct ofpact *ofpacts, size_t ofpacts_len,
- struct ofpbuf *odp_actions)
-{
- /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
- * that in the future we always keep a copy of the original flow for
- * tracing purposes. */
- static bool hit_resubmit_limit;
- enum slow_path_reason special;
- struct ofport_dpif *in_port;
- struct flow orig_flow;
+ ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
- COVERAGE_INC(ofproto_dpif_xlate);
-
- ofpbuf_clear(odp_actions);
- ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
-
- ctx->odp_actions = odp_actions;
- ctx->tags = 0;
- ctx->slow = 0;
- ctx->has_learn = false;
- ctx->has_normal = false;
- ctx->has_fin_timeout = false;
- ctx->nf_output_iface = NF_OUT_DROP;
- ctx->mirrors = 0;
- ctx->recurse = 0;
- ctx->max_resubmit_trigger = false;
- ctx->orig_skb_priority = ctx->flow.skb_priority;
- ctx->table_id = 0;
- ctx->exit = false;
-
- ofpbuf_use_stub(&ctx->stack, ctx->init_stack, sizeof ctx->init_stack);
-
- if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
+ if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
- orig_flow = ctx->flow;
+ orig_flow = ctx.xin->flow;
}
- if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
- switch (ctx->ofproto->up.frag_handling) {
+ if (ctx.xin->flow.nw_frag & FLOW_NW_FRAG_ANY) {
+ switch (ctx.ofproto->up.frag_handling) {
case OFPC_FRAG_NORMAL:
/* We must pretend that transport ports are unavailable. */
- ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
- ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
+ ctx.xin->flow.tp_src = ctx.base_flow.tp_src = htons(0);
+ ctx.xin->flow.tp_dst = ctx.base_flow.tp_dst = htons(0);
break;
case OFPC_FRAG_DROP:
@@ -6965,33 +6995,34 @@ xlate_actions(struct xlate_ctx *ctx,
}
}
- in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
+ in_port = get_ofp_port(ctx.ofproto, ctx.xin->flow.in_port);
+ special = process_special(ctx.ofproto, &ctx.xin->flow, in_port,
+ ctx.xin->packet);
if (special) {
- ctx->slow |= special;
+ ctx.xout->slow |= special;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
struct initial_vals initial_vals;
size_t sample_actions_len;
uint32_t local_odp_port;
- initial_vals.vlan_tci = ctx->base_flow.vlan_tci;
+ initial_vals.vlan_tci = ctx.base_flow.vlan_tci;
- add_sflow_action(ctx);
- add_ipfix_action(ctx);
- sample_actions_len = ctx->odp_actions->size;
+ add_sflow_action(&ctx);
+ add_ipfix_action(&ctx);
+ sample_actions_len = ctx.xout->odp_actions.size;
- if (tunnel_ecn_ok(ctx) && (!in_port || may_receive(in_port, ctx))) {
- do_xlate_actions(ofpacts, ofpacts_len, ctx);
+ if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
+ do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
if (in_port && !stp_forward_in_state(in_port->stp_state)) {
- ctx->odp_actions->size = sample_actions_len;
+ ctx.xout->odp_actions.size = sample_actions_len;
}
}
- if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
+ if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
if (!hit_resubmit_limit) {
/* We didn't record the original flow. Make sure we do from
* now on. */
@@ -6999,7 +7030,7 @@ xlate_actions(struct xlate_ctx *ctx,
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
+ ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet,
&initial_vals, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
@@ -7007,42 +7038,38 @@ xlate_actions(struct xlate_ctx *ctx,
}
}
- local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
- if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
+ local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
+ if (!connmgr_may_set_up_flow(ctx.ofproto->up.connmgr, &ctx.xin->flow,
local_odp_port,
- ctx->odp_actions->data,
- ctx->odp_actions->size)) {
- compose_output_action(ctx, OFPP_LOCAL);
+ ctx.xout->odp_actions.data,
+ ctx.xout->odp_actions.size)) {
+ compose_output_action(&ctx, OFPP_LOCAL);
}
- if (ctx->ofproto->has_mirrors) {
- add_mirror_actions(ctx, &orig_flow);
+ if (ctx.ofproto->has_mirrors) {
+ add_mirror_actions(&ctx, &orig_flow);
}
- fix_sflow_action(ctx);
+ fix_sflow_action(&ctx);
}
- ofpbuf_uninit(&ctx->stack);
+ ofpbuf_uninit(&ctx.stack);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
* into datapath actions, using 'ctx', and discards the datapath actions. */
static void
-xlate_actions_for_side_effects(struct xlate_ctx *ctx,
- const struct ofpact *ofpacts,
- size_t ofpacts_len)
+xlate_actions_for_side_effects(struct xlate_in *xin)
{
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ struct xlate_out xout;
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
- ofpbuf_uninit(&odp_actions);
+ xlate_actions(xin, &xout);
+ xlate_out_uninit(&xout);
}
static void
xlate_report(struct xlate_ctx *ctx, const char *s)
{
- if (ctx->report_hook) {
- ctx->report_hook(ctx, s);
+ if (ctx->xin->report_hook) {
+ ctx->xin->report_hook(ctx, s);
}
}
@@ -7171,26 +7198,26 @@ output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
if (!out_bundle->bond) {
port = ofbundle_get_a_port(out_bundle);
} else {
- port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
- vid, &ctx->tags);
+ port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
+ vid, &ctx->xout->tags);
if (!port) {
/* No slaves enabled, so drop packet. */
return;
}
}
- old_tci = ctx->flow.vlan_tci;
+ old_tci = ctx->xin->flow.vlan_tci;
tci = htons(vid);
if (tci || out_bundle->use_priority_tags) {
- tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
+ tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK);
if (tci) {
tci |= htons(VLAN_CFI);
}
}
- ctx->flow.vlan_tci = tci;
+ ctx->xin->flow.vlan_tci = tci;
compose_output_action(ctx, port->up.ofp_port);
- ctx->flow.vlan_tci = old_tci;
+ ctx->xin->flow.vlan_tci = old_tci;
}
static int
@@ -7239,7 +7266,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
size_t left;
in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
- ctx->packet != NULL, NULL);
+ ctx->xin->packet != NULL, NULL);
if (!in_bundle) {
return;
}
@@ -7247,7 +7274,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
/* Drop frames on bundles reserved for mirroring. */
if (in_bundle->mirror_out) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
"%s, which is reserved exclusively for mirroring",
@@ -7258,15 +7285,15 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
/* Check VLAN. */
vid = vlan_tci_to_vid(orig_flow->vlan_tci);
- if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
+ if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
return;
}
vlan = input_vid_to_vlan(in_bundle, vid);
/* Look at the output ports to check for destination selections. */
- NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
- ctx->odp_actions->size) {
+ NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
+ ctx->xout->odp_actions.size) {
enum ovs_action_attr type = nl_attr_type(a);
struct ofport_dpif *ofport;
@@ -7285,7 +7312,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
}
/* Restore the original packet before adding the mirror actions. */
- ctx->flow = *orig_flow;
+ ctx->xin->flow = *orig_flow;
while (mirrors) {
struct ofmirror *m;
@@ -7298,7 +7325,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
}
mirrors &= ~m->dup_mirrors;
- ctx->mirrors |= m->dup_mirrors;
+ ctx->xout->mirrors |= m->dup_mirrors;
if (m->out) {
output_normal(ctx, m->out, vlan);
} else if (vlan != m->out_vlan
@@ -7460,7 +7487,7 @@ is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
uint16_t vlan)
{
struct ofproto_dpif *ofproto = ctx->ofproto;
- struct flow *flow = &ctx->flow;
+ struct flow *flow = &ctx->xin->flow;
struct ofbundle *in_bundle = in_port->bundle;
/* Drop frames for reserved multicast addresses
@@ -7474,7 +7501,7 @@ is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
struct mac_entry *mac;
switch (bond_check_admissibility(in_bundle->bond, in_port,
- flow->dl_dst, &ctx->tags)) {
+ flow->dl_dst, &ctx->xout->tags)) {
case BV_ACCEPT:
break;
@@ -7507,19 +7534,19 @@ xlate_normal(struct xlate_ctx *ctx)
uint16_t vlan;
uint16_t vid;
- ctx->has_normal = true;
+ ctx->xout->has_normal = true;
- in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
- ctx->packet != NULL, &in_port);
+ in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port,
+ ctx->xin->packet != NULL, &in_port);
if (!in_bundle) {
xlate_report(ctx, "no input bundle, dropping");
return;
}
/* Drop malformed frames. */
- if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
- !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) &&
+ !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
"VLAN tag received on port %s",
@@ -7531,7 +7558,7 @@ xlate_normal(struct xlate_ctx *ctx)
/* Drop frames on bundles reserved for mirroring. */
if (in_bundle->mirror_out) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
"%s, which is reserved exclusively for mirroring",
@@ -7542,8 +7569,8 @@ xlate_normal(struct xlate_ctx *ctx)
}
/* Check VLAN. */
- vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
+ vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
+ if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
return;
}
@@ -7555,13 +7582,13 @@ xlate_normal(struct xlate_ctx *ctx)
}
/* Learn source MAC. */
- if (ctx->may_learn) {
- update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
+ if (ctx->xin->may_learn) {
+ update_learning_table(ctx->ofproto, &ctx->xin->flow, vlan, in_bundle);
}
/* Determine output bundle. */
- mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
- &ctx->tags);
+ mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan,
+ &ctx->xout->tags);
if (mac) {
if (mac->port.p != in_bundle) {
xlate_report(ctx, "forwarding to learned port");
@@ -7581,7 +7608,7 @@ xlate_normal(struct xlate_ctx *ctx)
output_normal(ctx, bundle, vlan);
}
}
- ctx->nf_output_iface = NF_OUT_FLOOD;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
}
@@ -7720,12 +7747,10 @@ packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
struct initial_vals initial_vals;
struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
-
+ struct xlate_out xout;
+ struct xlate_in xin;
struct ofpbuf key;
- struct xlate_ctx ctx;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, flow,
@@ -7734,16 +7759,16 @@ packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
initial_vals.vlan_tci = flow->vlan_tci;
- xlate_ctx_init(&ctx, ofproto, flow, &initial_vals, NULL,
- packet_get_tcp_flags(packet, flow), packet);
- ctx.resubmit_stats = &stats;
+ xlate_in_init(&xin, ofproto, flow, &initial_vals, NULL, stats.tcp_flags,
+ packet);
+ xin.resubmit_stats = &stats;
+ xin.ofpacts_len = ofpacts_len;
+ xin.ofpacts = ofpacts;
- ofpbuf_use_stub(&odp_actions,
- odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
+ xlate_actions(&xin, &xout);
dpif_execute(ofproto->backer->dpif, key.data, key.size,
- odp_actions.data, odp_actions.size, packet);
- ofpbuf_uninit(&odp_actions);
+ xout.odp_actions.data, xout.odp_actions.size, packet);
+ xlate_out_uninit(&xout);
return 0;
}
@@ -7789,7 +7814,7 @@ send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
if (subfacet->path == SF_FAST_PATH) {
struct dpif_flow_stats stats;
- subfacet_install(subfacet, &facet->odp_actions, &stats);
+ subfacet_install(subfacet, &facet->xout.odp_actions, &stats);
subfacet_update_stats(subfacet, &stats);
}
}
@@ -7875,7 +7900,8 @@ ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
}
struct trace_ctx {
- struct xlate_ctx ctx;
+ struct xlate_out xout;
+ struct xlate_in xin;
struct flow flow;
struct ds *result;
};
@@ -7903,15 +7929,15 @@ trace_format_rule(struct ds *result, uint8_t table_id, int level,
static void
trace_format_flow(struct ds *result, int level, const char *title,
- struct trace_ctx *trace)
+ struct trace_ctx *trace)
{
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
- if (flow_equal(&trace->ctx.flow, &trace->flow)) {
+ if (flow_equal(&trace->xin.flow, &trace->flow)) {
ds_put_cstr(result, "unchanged");
} else {
- flow_format(result, &trace->ctx.flow);
- trace->flow = trace->ctx.flow;
+ flow_format(result, &trace->xin.flow);
+ trace->flow = trace->xin.flow;
}
ds_put_char(result, '\n');
}
@@ -7934,7 +7960,7 @@ static void
trace_format_odp(struct ds *result, int level, const char *title,
struct trace_ctx *trace)
{
- struct ofpbuf *odp_actions = trace->ctx.odp_actions;
+ struct ofpbuf *odp_actions = &trace->xout.odp_actions;
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
@@ -7945,7 +7971,7 @@ trace_format_odp(struct ds *result, int level, const char *title,
static void
trace_resubmit(struct xlate_ctx *ctx, struct rule_dpif *rule)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
+ struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
struct ds *result = trace->result;
ds_put_char(result, '\n');
@@ -7958,7 +7984,7 @@ trace_resubmit(struct xlate_ctx *ctx, struct rule_dpif *rule)
static void
trace_report(struct xlate_ctx *ctx, const char *s)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
+ struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
struct ds *result = trace->result;
ds_put_char_multiple(result, '\t', ctx->recurse);
@@ -8118,25 +8144,24 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
trace.flow = *flow;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- xlate_ctx_init(&trace.ctx, ofproto, flow, initial_vals, rule,
- tcp_flags, packet);
- trace.ctx.resubmit_hook = trace_resubmit;
- trace.ctx.report_hook = trace_report;
- xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
- &odp_actions);
+ xlate_in_init(&trace.xin, ofproto, flow, initial_vals, rule, tcp_flags,
+ packet);
+ trace.xin.resubmit_hook = trace_resubmit;
+ trace.xin.report_hook = trace_report;
+ xlate_actions(&trace.xin, &trace.xout);
ds_put_char(ds, '\n');
trace_format_flow(ds, 0, "Final flow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, odp_actions.data, odp_actions.size);
- ofpbuf_uninit(&odp_actions);
+ format_odp_actions(ds, trace.xout.odp_actions.data,
+ trace.xout.odp_actions.size);
- if (trace.ctx.slow) {
+ if (trace.xout.slow) {
enum slow_path_reason slow;
ds_put_cstr(ds, "\nThis flow is handled by the userspace "
"slow path because it:");
- for (slow = trace.ctx.slow; slow; ) {
+ for (slow = trace.xout.slow; slow; ) {
enum slow_path_reason bit = rightmost_1bit(slow);
switch (bit) {
@@ -8166,6 +8191,8 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
"the special slow-path processing.");
}
}
+
+ xlate_out_uninit(&trace.xout);
}
}
@@ -8443,18 +8470,18 @@ ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
}
ds_put_cstr(&ds, ", actions:");
- if (facet->slow) {
+ if (facet->xout.slow) {
uint64_t slow_path_stub[128 / 8];
const struct nlattr *actions;
size_t actions_len;
- compose_slow_path(ofproto, &facet->flow, facet->slow,
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
slow_path_stub, sizeof slow_path_stub,
&actions, &actions_len);
format_odp_actions(&ds, actions, actions_len);
} else {
- format_odp_actions(&ds, facet->odp_actions.data,
- facet->odp_actions.size);
+ format_odp_actions(&ds, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size);
}
ds_put_char(&ds, '\n');
}
--
1.7.9.5
More information about the dev
mailing list