[ovs-dev] [PATCH v21 5/8] Refactor port binding processing in physical.c

Ryan Moats rmoats at us.ibm.com
Sun Jul 3 15:35:30 UTC 2016


iExtract block from SBREC_PORT_BINDING_FOR_EACH block in
physical_run to helper method so that it can be reused when
doing incremental processing.

Side effects:
  - localvif_to_oport and tunnels are now static file scoped.
  - the is_new parameter is added for use in a later patch set.

Signed-off-by: Ryan Moats <rmoats at us.ibm.com>
---
 ovn/controller/physical.c | 623 ++++++++++++++++++++++++----------------------
 1 file changed, 319 insertions(+), 304 deletions(-)

diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
index 2b5ef0a..80bd3eb 100644
--- a/ovn/controller/physical.c
+++ b/ovn/controller/physical.c
@@ -54,6 +54,10 @@ physical_register_ovs_idl(struct ovsdb_idl *ovs_idl)
 /* UUID to identify OF flows not associated with ovsdb rows. */
 static struct uuid *hc_uuid = NULL;
 
+static struct simap localvif_to_ofport =
+    SIMAP_INITIALIZER(&localvif_to_ofport);
+static struct hmap tunnels = HMAP_INITIALIZER(&tunnels);
+
 /* Maps from a chassis to the OpenFlow port number of the tunnel that can be
  * used to reach that chassis. */
 struct chassis_tunnel {
@@ -64,11 +68,11 @@ struct chassis_tunnel {
 };
 
 static struct chassis_tunnel *
-chassis_tunnel_find(struct hmap *tunnels, const char *chassis_id)
+chassis_tunnel_find(const char *chassis_id)
 {
     struct chassis_tunnel *tun;
     HMAP_FOR_EACH_WITH_HASH (tun, hmap_node, hash_string(chassis_id, 0),
-                             tunnels) {
+                             &tunnels) {
         if (!strcmp(tun->chassis_id, chassis_id)) {
             return tun;
         }
@@ -149,14 +153,320 @@ get_localnet_port(struct hmap *local_datapaths, int64_t tunnel_key)
     return ld ? ld->localnet_port : NULL;
 }
 
+static void
+consider_port_binding(enum mf_field_id mff_ovn_geneve,
+                      const struct simap *ct_zones,
+                      struct hmap *local_datapaths,
+                      struct hmap *patched_datapaths,
+                      const struct sbrec_port_binding *binding,
+                      struct ofpbuf *ofpacts_p, bool is_new)
+{
+    /* Skip the port binding if the port is on a datapath that is neither
+     * local nor with any logical patch port connected, because local ports
+     * would never need to talk to those ports.
+     *
+     * Even with this approach there could still be unnecessary port
+     * bindings processed. A better approach would be a kind of "flood
+     * fill" algorithm:
+     *
+     *   1. Initialize set S to the logical datapaths that have a port
+     *      located on the hypervisor.
+     *
+     *   2. For each patch port P in a logical datapath in S, add the
+     *      logical datapath of the remote end of P to S.  Iterate
+     *      until S reaches a fixed point.
+     *
+     * This can be implemented in northd, which can generate the sets and
+     * save it on each port-binding record in SB, and ovn-controller can
+     * use the information directly. However, there can be update storms
+     * when a pair of patch ports are added/removed to connect/disconnect
+     * large lrouters and lswitches. This need to be studied further.
+     */
+    uint32_t dp_key = binding->datapath->tunnel_key;
+    uint32_t port_key = binding->tunnel_key;
+    if (!get_local_datapath(local_datapaths, dp_key)
+        && !get_patched_datapath(patched_datapaths, dp_key)) {
+        return;
+    }
+
+    /* Find the OpenFlow port for the logical port, as 'ofport'.  This is
+     * one of:
+     *
+     *     - If the port is a VIF on the chassis we're managing, the
+     *       OpenFlow port for the VIF.  'tun' will be NULL.
+     *
+     *       The same logic handles logical patch ports, as well as
+     *       localnet patch ports.
+     *
+     *       For a container nested inside a VM and accessible via a VLAN,
+     *       'tag' is the VLAN ID; otherwise 'tag' is 0.
+     *
+     *       For a localnet patch port, if a VLAN ID was configured, 'tag'
+     *       is set to that VLAN ID; otherwise 'tag' is 0.
+     *
+     *     - If the port is on a remote chassis, the OpenFlow port for a
+     *       tunnel to the VIF's remote chassis.  'tun' identifies that
+     *       tunnel.
+     */
+
+    int tag = 0;
+    ofp_port_t ofport;
+    bool is_remote = false;
+    if (binding->parent_port && *binding->parent_port) {
+        if (!binding->tag) {
+            return;
+        }
+        ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
+                                      binding->parent_port));
+        if (ofport) {
+            tag = *binding->tag;
+        }
+    } else {
+        ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
+                                      binding->logical_port));
+        if ((!strcmp(binding->type, "localnet")
+            || !strcmp(binding->type, "l2gateway"))
+            && ofport && binding->tag) {
+            tag = *binding->tag;
+        }
+    }
+
+    const struct chassis_tunnel *tun = NULL;
+    const struct sbrec_port_binding *localnet_port =
+        get_localnet_port(local_datapaths, dp_key);
+    if (!ofport) {
+        /* It is remote port, may be reached by tunnel or localnet port */
+        is_remote = true;
+        if (!binding->chassis) {
+            return;
+        }
+        if (localnet_port) {
+            ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
+                                          localnet_port->logical_port));
+            if (!ofport) {
+                return;
+            }
+        } else {
+            tun = chassis_tunnel_find(binding->chassis->name);
+            if (!tun) {
+                return;
+            }
+            ofport = tun->ofport;
+        }
+    }
+
+    struct match match;
+    if (!is_remote) {
+        int zone_id = simap_get(ct_zones, binding->logical_port);
+        /* Packets that arrive from a vif can belong to a VM or
+         * to a container located inside that VM. Packets that
+         * arrive from containers have a tag (vlan) associated with them.
+         */
+
+        /* Table 0, Priority 150 and 100.
+         * ==============================
+         *
+         * Priority 150 is for tagged traffic. This may be containers in a
+         * VM or a VLAN on a local network. For such traffic, match on the
+         * tags and then strip the tag.
+         *
+         * Priority 100 is for traffic belonging to VMs or untagged locally
+         * connected networks.
+         *
+         * For both types of traffic: set MFF_LOG_INPORT to the logical
+         * input port, MFF_LOG_DATAPATH to the logical datapath, and
+         * resubmit into the logical ingress pipeline starting at table
+         * 16. */
+        ofpbuf_clear(ofpacts_p);
+        match_init_catchall(&match);
+        match_set_in_port(&match, ofport);
+
+        /* Match a VLAN tag and strip it, including stripping priority tags
+         * (e.g. VLAN ID 0).  In the latter case we'll add a second flow
+         * for frames that lack any 802.1Q header later. */
+        if (tag || !strcmp(binding->type, "localnet")
+            || !strcmp(binding->type, "l2gateway")) {
+            match_set_dl_vlan(&match, htons(tag));
+            ofpact_put_STRIP_VLAN(ofpacts_p);
+        }
+
+        /* Remember the size with just strip vlan added so far,
+         * as we're going to remove this with ofpbuf_pull() later. */
+        uint32_t ofpacts_orig_size = ofpacts_p->size;
+
+        if (zone_id) {
+            put_load(zone_id, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
+        }
+
+        int zone_id_dnat, zone_id_snat;
+        char *dnat = alloc_nat_zone_key(binding, "dnat");
+        char *snat = alloc_nat_zone_key(binding, "snat");
+        zone_id_dnat = simap_get(ct_zones, dnat);
+        if (zone_id_dnat) {
+            put_load(zone_id_dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
+        }
+        free(dnat);
+
+        zone_id_snat = simap_get(ct_zones, snat);
+        if (zone_id_snat) {
+            put_load(zone_id_snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
+        }
+        free(snat);
+
+        /* Set MFF_LOG_DATAPATH and MFF_LOG_INPORT. */
+        put_load(dp_key, MFF_LOG_DATAPATH, 0, 64, ofpacts_p);
+        put_load(port_key, MFF_LOG_INPORT, 0, 32, ofpacts_p);
+
+        /* Resubmit to first logical ingress pipeline table. */
+        put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, ofpacts_p);
+        ofctrl_add_flow(OFTABLE_PHY_TO_LOG,
+                        tag ? 150 : 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+
+        if (!tag && (!strcmp(binding->type, "localnet")
+                     || !strcmp(binding->type, "l2gateway"))) {
+
+            /* Add a second flow for frames that lack any 802.1Q
+             * header.  For these, drop the OFPACT_STRIP_VLAN
+             * action. */
+            ofpbuf_pull(ofpacts_p, ofpacts_orig_size);
+            match_set_dl_tci_masked(&match, 0, htons(VLAN_CFI));
+            ofctrl_add_flow(0, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+        }
+
+        /* Table 33, priority 100.
+         * =======================
+         *
+         * Implements output to local hypervisor.  Each flow matches a
+         * logical output port on the local hypervisor, and resubmits to
+         * table 34.
+         */
+
+        match_init_catchall(&match);
+        ofpbuf_clear(ofpacts_p);
+
+        /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
+        match_set_metadata(&match, htonll(dp_key));
+        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+
+        if (zone_id) {
+            put_load(zone_id, MFF_LOG_CT_ZONE, 0, 32, ofpacts_p);
+        }
+        if (zone_id_dnat) {
+            put_load(zone_id_dnat, MFF_LOG_DNAT_ZONE, 0, 32, ofpacts_p);
+        }
+        if (zone_id_snat) {
+            put_load(zone_id_snat, MFF_LOG_SNAT_ZONE, 0, 32, ofpacts_p);
+        }
+
+        /* Resubmit to table 34. */
+        put_resubmit(OFTABLE_DROP_LOOPBACK, ofpacts_p);
+        ofctrl_add_flow(OFTABLE_LOCAL_OUTPUT, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+
+        /* Table 34, Priority 100.
+         * =======================
+         *
+         * Drop packets whose logical inport and outport are the same. */
+        match_init_catchall(&match);
+        ofpbuf_clear(ofpacts_p);
+        match_set_metadata(&match, htonll(dp_key));
+        match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, port_key);
+        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        ofctrl_add_flow(OFTABLE_DROP_LOOPBACK, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+
+        /* Table 64, Priority 100.
+         * =======================
+         *
+         * Deliver the packet to the local vif. */
+        match_init_catchall(&match);
+        ofpbuf_clear(ofpacts_p);
+        match_set_metadata(&match, htonll(dp_key));
+        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+        if (tag) {
+            /* For containers sitting behind a local vif, tag the packets
+             * before delivering them. */
+            struct ofpact_vlan_vid *vlan_vid;
+            vlan_vid = ofpact_put_SET_VLAN_VID(ofpacts_p);
+            vlan_vid->vlan_vid = tag;
+            vlan_vid->push_vlan_if_needed = true;
+
+            /* A packet might need to hair-pin back into its ingress
+             * OpenFlow port (to a different logical port, which we already
+             * checked back in table 34), so set the in_port to zero. */
+            put_stack(MFF_IN_PORT, ofpact_put_STACK_PUSH(ofpacts_p));
+            put_load(0, MFF_IN_PORT, 0, 16, ofpacts_p);
+        }
+        ofpact_put_OUTPUT(ofpacts_p)->port = ofport;
+        if (tag) {
+            /* Revert the tag added to the packets headed to containers
+             * in the previous step. If we don't do this, the packets
+             * that are to be broadcasted to a VM in the same logical
+             * switch will also contain the tag. Also revert the zero'd
+             * in_port. */
+            ofpact_put_STRIP_VLAN(ofpacts_p);
+            put_stack(MFF_IN_PORT, ofpact_put_STACK_POP(ofpacts_p));
+        }
+        ofctrl_add_flow(OFTABLE_LOG_TO_PHY, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+    } else if (!tun) {
+        /* Remote port connected by localnet port */
+        /* Table 33, priority 100.
+         * =======================
+         *
+         * Implements switching to localnet port. Each flow matches a
+         * logical output port on remote hypervisor, switch the output port
+         * to connected localnet port and resubmits to same table.
+         */
+
+        match_init_catchall(&match);
+        ofpbuf_clear(ofpacts_p);
+
+        /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
+        match_set_metadata(&match, htonll(dp_key));
+        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+
+        put_load(localnet_port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, ofpacts_p);
+
+        /* Resubmit to table 33. */
+        put_resubmit(OFTABLE_LOCAL_OUTPUT, ofpacts_p);
+        ofctrl_add_flow(OFTABLE_LOCAL_OUTPUT, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+    } else {
+        /* Remote port connected by tunnel */
+        /* Table 32, priority 100.
+         * =======================
+         *
+         * Implements output to remote hypervisors.  Each flow matches an
+         * output port that includes a logical port on a remote hypervisor,
+         * and tunnels the packet to that hypervisor.
+         */
+
+        match_init_catchall(&match);
+        ofpbuf_clear(ofpacts_p);
+
+        /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
+        match_set_metadata(&match, htonll(dp_key));
+        match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+
+        put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
+                          port_key, ofpacts_p);
+
+        /* Output to tunnel. */
+        ofpact_put_OUTPUT(ofpacts_p)->port = ofport;
+        ofctrl_add_flow(OFTABLE_REMOTE_OUTPUT, 100, &match, ofpacts_p,
+                        &binding->header_.uuid, is_new);
+    }
+}
+
 void
 physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
              const struct ovsrec_bridge *br_int, const char *this_chassis_id,
              const struct simap *ct_zones,
              struct hmap *local_datapaths, struct hmap *patched_datapaths)
 {
-    struct simap localvif_to_ofport = SIMAP_INITIALIZER(&localvif_to_ofport);
-    struct hmap tunnels = HMAP_INITIALIZER(&tunnels);
     if (!hc_uuid) {
         hc_uuid = xmalloc(sizeof(struct uuid));
         uuid_generate(hc_uuid);
@@ -247,303 +557,8 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
      * 64 for logical-to-physical translation. */
     const struct sbrec_port_binding *binding;
     SBREC_PORT_BINDING_FOR_EACH (binding, ctx->ovnsb_idl) {
-        /* Skip the port binding if the port is on a datapath that is neither
-         * local nor with any logical patch port connected, because local ports
-         * would never need to talk to those ports.
-         *
-         * Even with this approach there could still be unnecessary port
-         * bindings processed. A better approach would be a kind of "flood
-         * fill" algorithm:
-         *
-         *   1. Initialize set S to the logical datapaths that have a port
-         *      located on the hypervisor.
-         *
-         *   2. For each patch port P in a logical datapath in S, add the
-         *      logical datapath of the remote end of P to S.  Iterate
-         *      until S reaches a fixed point.
-         *
-         * This can be implemented in northd, which can generate the sets and
-         * save it on each port-binding record in SB, and ovn-controller can
-         * use the information directly. However, there can be update storms
-         * when a pair of patch ports are added/removed to connect/disconnect
-         * large lrouters and lswitches. This need to be studied further.
-         */
-        uint32_t dp_key = binding->datapath->tunnel_key;
-        uint32_t port_key = binding->tunnel_key;
-        if (!get_local_datapath(local_datapaths, dp_key)
-            && !get_patched_datapath(patched_datapaths, dp_key)) {
-            continue;
-        }
-
-        /* Find the OpenFlow port for the logical port, as 'ofport'.  This is
-         * one of:
-         *
-         *     - If the port is a VIF on the chassis we're managing, the
-         *       OpenFlow port for the VIF.  'tun' will be NULL.
-         *
-         *       The same logic handles logical patch ports, as well as
-         *       localnet and L2 gateway patch ports.
-         *
-         *       For a container nested inside a VM and accessible via a VLAN,
-         *       'tag' is the VLAN ID; otherwise 'tag' is 0.
-         *
-         *       For a localnet or L2 gateway patch port, if a VLAN ID was
-         *       configured, 'tag' is set to that VLAN ID; otherwise 'tag' is 0.
-         *
-         *     - If the port is on a remote chassis, the OpenFlow port for a
-         *       tunnel to the VIF's remote chassis.  'tun' identifies that
-         *       tunnel.
-         */
-
-        int tag = 0;
-        ofp_port_t ofport;
-        bool is_remote = false;
-        if (binding->parent_port && *binding->parent_port) {
-            if (!binding->tag) {
-                continue;
-            }
-            ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
-                                          binding->parent_port));
-            if (ofport) {
-                tag = *binding->tag;
-            }
-        } else {
-            ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
-                                          binding->logical_port));
-            if ((!strcmp(binding->type, "localnet")
-                 || !strcmp(binding->type, "l2gateway"))
-                && ofport && binding->tag) {
-                tag = *binding->tag;
-            }
-        }
-
-        const struct chassis_tunnel *tun = NULL;
-        const struct sbrec_port_binding *localnet_port =
-            get_localnet_port(local_datapaths, dp_key);
-        if (!ofport) {
-            /* It is remote port, may be reached by tunnel or localnet port */
-            is_remote = true;
-            if (!binding->chassis) {
-                continue;
-            }
-            if (localnet_port) {
-                ofport = u16_to_ofp(simap_get(&localvif_to_ofport,
-                                              localnet_port->logical_port));
-                if (!ofport) {
-                    continue;
-                }
-            } else {
-                tun = chassis_tunnel_find(&tunnels, binding->chassis->name);
-                if (!tun) {
-                    continue;
-                }
-                ofport = tun->ofport;
-            }
-        }
-
-        struct match match;
-        if (!is_remote) {
-            int zone_id = simap_get(ct_zones, binding->logical_port);
-            /* Packets that arrive from a vif can belong to a VM or
-             * to a container located inside that VM. Packets that
-             * arrive from containers have a tag (vlan) associated with them.
-             */
-
-            /* Table 0, Priority 150 and 100.
-             * ==============================
-             *
-             * Priority 150 is for tagged traffic. This may be containers in a
-             * VM or a VLAN on a local network. For such traffic, match on the
-             * tags and then strip the tag.
-             *
-             * Priority 100 is for traffic belonging to VMs or untagged locally
-             * connected networks.
-             *
-             * For both types of traffic: set MFF_LOG_INPORT to the logical
-             * input port, MFF_LOG_DATAPATH to the logical datapath, and
-             * resubmit into the logical ingress pipeline starting at table
-             * 16. */
-            ofpbuf_clear(&ofpacts);
-            match_init_catchall(&match);
-            match_set_in_port(&match, ofport);
-
-            /* Match a VLAN tag and strip it, including stripping priority tags
-             * (e.g. VLAN ID 0).  In the latter case we'll add a second flow
-             * for frames that lack any 802.1Q header later. */
-            if (tag || !strcmp(binding->type, "localnet")
-                || !strcmp(binding->type, "l2gateway")) {
-                match_set_dl_vlan(&match, htons(tag));
-                ofpact_put_STRIP_VLAN(&ofpacts);
-            }
-
-            /* Remember the size with just strip vlan added so far,
-             * as we're going to remove this with ofpbuf_pull() later. */
-            uint32_t ofpacts_orig_size = ofpacts.size;
-
-            if (zone_id) {
-                put_load(zone_id, MFF_LOG_CT_ZONE, 0, 32, &ofpacts);
-            }
-
-            int zone_id_dnat, zone_id_snat;
-            char *dnat = alloc_nat_zone_key(binding, "dnat");
-            char *snat = alloc_nat_zone_key(binding, "snat");
-            zone_id_dnat = simap_get(ct_zones, dnat);
-            if (zone_id_dnat) {
-                put_load(zone_id_dnat, MFF_LOG_DNAT_ZONE, 0, 32, &ofpacts);
-            }
-            free(dnat);
-
-            zone_id_snat = simap_get(ct_zones, snat);
-            if (zone_id_snat) {
-                put_load(zone_id_snat, MFF_LOG_SNAT_ZONE, 0, 32, &ofpacts);
-            }
-            free(snat);
-
-            /* Set MFF_LOG_DATAPATH and MFF_LOG_INPORT. */
-            put_load(dp_key, MFF_LOG_DATAPATH, 0, 64, &ofpacts);
-            put_load(port_key, MFF_LOG_INPORT, 0, 32, &ofpacts);
-
-            /* Resubmit to first logical ingress pipeline table. */
-            put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, &ofpacts);
-            ofctrl_add_flow(OFTABLE_PHY_TO_LOG,
-                            tag ? 150 : 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-
-            if (!tag && (!strcmp(binding->type, "localnet")
-                         || !strcmp(binding->type, "l2gateway"))) {
-                /* Add a second flow for frames that lack any 802.1Q
-                 * header.  For these, drop the OFPACT_STRIP_VLAN
-                 * action. */
-                ofpbuf_pull(&ofpacts, ofpacts_orig_size);
-                match_set_dl_tci_masked(&match, 0, htons(VLAN_CFI));
-                ofctrl_add_flow(0, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-            }
-
-            /* Table 33, priority 100.
-             * =======================
-             *
-             * Implements output to local hypervisor.  Each flow matches a
-             * logical output port on the local hypervisor, and resubmits to
-             * table 34.
-             */
-
-            match_init_catchall(&match);
-            ofpbuf_clear(&ofpacts);
-
-            /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-
-            if (zone_id) {
-                put_load(zone_id, MFF_LOG_CT_ZONE, 0, 32, &ofpacts);
-            }
-            if (zone_id_dnat) {
-                put_load(zone_id_dnat, MFF_LOG_DNAT_ZONE, 0, 32, &ofpacts);
-            }
-            if (zone_id_snat) {
-                put_load(zone_id_snat, MFF_LOG_SNAT_ZONE, 0, 32, &ofpacts);
-            }
-
-            /* Resubmit to table 34. */
-            put_resubmit(OFTABLE_DROP_LOOPBACK, &ofpacts);
-            ofctrl_add_flow(OFTABLE_LOCAL_OUTPUT, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-
-            /* Table 34, Priority 100.
-             * =======================
-             *
-             * Drop packets whose logical inport and outport are the same. */
-            match_init_catchall(&match);
-            ofpbuf_clear(&ofpacts);
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, port_key);
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-            ofctrl_add_flow(OFTABLE_DROP_LOOPBACK, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-
-            /* Table 64, Priority 100.
-             * =======================
-             *
-             * Deliver the packet to the local vif. */
-            match_init_catchall(&match);
-            ofpbuf_clear(&ofpacts);
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-            if (tag) {
-                /* For containers sitting behind a local vif, tag the packets
-                 * before delivering them. */
-                struct ofpact_vlan_vid *vlan_vid;
-                vlan_vid = ofpact_put_SET_VLAN_VID(&ofpacts);
-                vlan_vid->vlan_vid = tag;
-                vlan_vid->push_vlan_if_needed = true;
-
-                /* A packet might need to hair-pin back into its ingress
-                 * OpenFlow port (to a different logical port, which we already
-                 * checked back in table 34), so set the in_port to zero. */
-                put_stack(MFF_IN_PORT, ofpact_put_STACK_PUSH(&ofpacts));
-                put_load(0, MFF_IN_PORT, 0, 16, &ofpacts);
-            }
-            ofpact_put_OUTPUT(&ofpacts)->port = ofport;
-            if (tag) {
-                /* Revert the tag added to the packets headed to containers
-                 * in the previous step. If we don't do this, the packets
-                 * that are to be broadcasted to a VM in the same logical
-                 * switch will also contain the tag. Also revert the zero'd
-                 * in_port. */
-                ofpact_put_STRIP_VLAN(&ofpacts);
-                put_stack(MFF_IN_PORT, ofpact_put_STACK_POP(&ofpacts));
-            }
-            ofctrl_add_flow(OFTABLE_LOG_TO_PHY, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-        } else if (!tun) {
-            /* Remote port connected by localnet port */
-            /* Table 33, priority 100.
-             * =======================
-             *
-             * Implements switching to localnet port. Each flow matches a
-             * logical output port on remote hypervisor, switch the output port
-             * to connected localnet port and resubmits to same table.
-             */
-
-            match_init_catchall(&match);
-            ofpbuf_clear(&ofpacts);
-
-            /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-
-            put_load(localnet_port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
-
-            /* Resubmit to table 33. */
-            put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
-            ofctrl_add_flow(OFTABLE_LOCAL_OUTPUT, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-        } else {
-            /* Remote port connected by tunnel */
-            /* Table 32, priority 100.
-             * =======================
-             *
-             * Implements output to remote hypervisors.  Each flow matches an
-             * output port that includes a logical port on a remote hypervisor,
-             * and tunnels the packet to that hypervisor.
-             */
-
-            match_init_catchall(&match);
-            ofpbuf_clear(&ofpacts);
-
-            /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-            match_set_metadata(&match, htonll(dp_key));
-            match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
-
-            put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
-                              port_key, &ofpacts);
-
-            /* Output to tunnel. */
-            ofpact_put_OUTPUT(&ofpacts)->port = ofport;
-            ofctrl_add_flow(OFTABLE_REMOTE_OUTPUT, 100, &match, &ofpacts,
-                            &binding->header_.uuid, true);
-        }
+        consider_port_binding(mff_ovn_geneve, ct_zones, local_datapaths,
+                              patched_datapaths, binding, &ofpacts, true);
     }
 
     /* Handle output to multicast groups, in tables 32 and 33. */
@@ -638,7 +653,7 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
             const struct chassis_tunnel *prev = NULL;
             SSET_FOR_EACH (chassis, &remote_chassis) {
                 const struct chassis_tunnel *tun
-                    = chassis_tunnel_find(&tunnels, chassis);
+                    = chassis_tunnel_find(chassis);
                 if (!tun) {
                     continue;
                 }
@@ -767,9 +782,9 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
                     hc_uuid, true);
 
     ofpbuf_uninit(&ofpacts);
-    simap_destroy(&localvif_to_ofport);
+    simap_clear(&localvif_to_ofport);
     HMAP_FOR_EACH_POP (tun, hmap_node, &tunnels) {
         free(tun);
     }
-    hmap_destroy(&tunnels);
+    hmap_clear(&tunnels);
 }
-- 
2.7.4 (Apple Git-66)




More information about the dev mailing list