[ovs-dev] [PATCH v2 4/4 ovn] OVN: Vlan backed DVR N-S, redirect packet via localnet port

Ankur Sharma ankur.sharma at nutanix.com
Thu Aug 1 21:10:02 UTC 2019


Background:
With c0974331b7a19a87ab8f1f2cec8fbe366af92fa2, we have added
support for E-W workflow for vlan backed DVRs.

This series enables N-S workflow for vlan backed DVRs.

Key difference between E-W and N-S traffic flow is that
N-S flow requires a gateway chassis. A gateway chassis
will be respondible for following:
a. Doing Network Address Translation (NAT).
b. Becoming entry and exit point for North->South
   and South->North traffic respectively.

OVN by default always uses overlay encapsulation to redirect
the packet to gateway chassis. This series will enable
the redirection to gateway chassis in the absence of encapsulation.

This patch:
Achieves the vlan backed redirection by doing following:

Sender Side:
------------
a. For a remote port of type "chassisredirect" and if it
   has redirect type as "vlan", then do not add tunnel
   based redirection flow in table=32.

b. In table=33, add a flow with priority=100, that would do following:
   i. Change the metadata to that of gateway logical switch
      (i.e logical switch attached to gateway logical router port).
  ii. Change REG15 to point to localnet port of gateway logical switch.
 iii. send to packet to table=15.

c. In Table=65, packet will hit the existing priority=150 flow to send
   the packet to physical bridge, while attaching vlan header and
   changing source mac to chassis mac.

Receiver Side:
--------------
a. No changes needed

OVERALL PACKET FLOW:
Sender Side:
-----------
a. logical flow in lr_in_gw_redirect stage will ensure that
   outport of the packet is chassisredirect port.
   For example:
   table=12(lr_in_gw_redirect  ), priority=50   , match=(outport == "router-to-underlay"), action=(outport = "cr-router-to-underlay"; next;)

b. After ingress pipeline, packet will enter the table=32, followed by table=33

c. Table=33, will send the packet to table=65.

d. Table=65, will send the packet to uplink bridge
   with destination mac of chassisredirect port and vlan
   id of peer logical switch.

Receiver Side:
-------------
a. Packet is received by the pipeline of peer logical switch.
b. Since destination mac is that of router port, hence packet will
   enter the logical router pipeline.
c. Now, packet will go through regular logical router pipeline
   (both ingress and egress).

One caveat with the approach is that ttl will be decremented twice,
since the packets are going through logical router ingress pipeline
twice (once on sender chassis and again on gateway chassis).

No changes needed for the reverse path.

Signed-off-by: Ankur Sharma <ankur.sharma at nutanix.com>
---
 controller/physical.c  | 255 +++++++++++++++++++++++++++-------------
 lib/ovn-util.c         |  33 ++++++
 lib/ovn-util.h         |   5 +
 ovn-architecture.7.xml |  64 +++++++++++
 tests/ovn.at           | 307 +++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 581 insertions(+), 83 deletions(-)

diff --git a/controller/physical.c b/controller/physical.c
index aa06b3f..4697821 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -229,6 +229,165 @@ get_zone_ids(const struct sbrec_port_binding *binding,
 }
 
 static void
+put_remote_port_redirect_vlan(const struct
+                              sbrec_port_binding *binding,
+                              const struct hmap *local_datapaths,
+                              struct local_datapath *ld,
+                              struct match *match,
+                              struct ofpbuf *ofpacts_p,
+                              struct ovn_desired_flow_table *flow_table)
+{
+        struct eth_addr binding_mac;
+        uint32_t ls_dp_key = 0;
+
+        if (strcmp(binding->type, "chassisredirect")) {
+            /* VLAN based redirect is only supported for chassisredirect
+             * type remote ports. */
+            return;
+        }
+
+        bool  is_valid_mac = extract_sbrec_binding_first_mac(binding,
+                                                             &binding_mac);
+        if (!is_valid_mac) {
+            return;
+        }
+
+        for (int i = 0; i < ld->n_peer_ports; i++) {
+            const struct sbrec_port_binding *sport_binding = ld->peer_ports[i];
+            const char *sport_peer_name = smap_get(&sport_binding->options,
+                                                   "peer");
+            const char *distributed_port = smap_get(&binding->options,
+                                                    "distributed-port");
+
+            if (!strcmp(sport_peer_name, distributed_port)) {
+                ls_dp_key = sport_binding->datapath->tunnel_key;
+                break;
+            }
+        }
+
+        if (!ls_dp_key) {
+            return;
+        }
+
+        union mf_value value;
+        struct ofpact_mac *src_mac;
+        const struct sbrec_port_binding *ls_localnet_port;
+
+        ls_localnet_port = get_localnet_port(local_datapaths, ls_dp_key);
+
+        src_mac = ofpact_put_SET_ETH_SRC(ofpacts_p);
+        src_mac->mac = binding_mac;
+
+        value.be64 = htonll(ls_dp_key);
+
+        ofpact_put_set_field(ofpacts_p, mf_from_id(MFF_METADATA),
+                             &value, NULL);
+
+        value.be32 = htonl(ls_localnet_port->tunnel_key);
+        ofpact_put_set_field(ofpacts_p, mf_from_id(MFF_REG15),
+                             &value, NULL);
+
+        put_resubmit(OFTABLE_LOG_TO_PHY, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100, 0,
+                        match, ofpacts_p, &binding->header_.uuid);
+
+}
+
+static void
+put_remote_port_redirect_overlay(const struct
+                                 sbrec_port_binding *binding,
+                                 bool is_ha_remote,
+                                 struct ha_chassis_ordered *ha_ch_ordered,
+                                 enum mf_field_id mff_ovn_geneve,
+                                 const struct chassis_tunnel *tun,
+                                 uint32_t port_key,
+                                 struct match *match,
+                                 struct ofpbuf *ofpacts_p,
+                                 struct ovn_desired_flow_table *flow_table)
+{
+    if (!is_ha_remote) {
+        /* Setup encapsulation */
+        const struct chassis_tunnel *rem_tun =
+            get_port_binding_tun(binding);
+        if (!rem_tun) {
+            return;
+        }
+        put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
+                          port_key, ofpacts_p);
+        /* Output to tunnel. */
+        ofpact_put_OUTPUT(ofpacts_p)->port = rem_tun->ofport;
+    } else {
+        /* Make sure all tunnel endpoints use the same encapsulation,
+         * and set it up */
+        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
+            const struct sbrec_chassis *ch = ha_ch_ordered->ha_ch[i].chassis;
+            if (!ch) {
+                continue;
+            }
+            if (!tun) {
+                tun = chassis_tunnel_find(ch->name, NULL);
+            } else {
+                struct chassis_tunnel *chassis_tunnel =
+                                       chassis_tunnel_find(ch->name, NULL);
+                if (chassis_tunnel &&
+                    tun->type != chassis_tunnel->type) {
+                    static struct vlog_rate_limit rl =
+                                  VLOG_RATE_LIMIT_INIT(1, 1);
+                    VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
+                                "with mixed encapsulations, only "
+                                "uniform encapsulations are "
+                                "supported.", binding->logical_port);
+                    return;
+                }
+            }
+        }
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
+                        "HA chassis group of port %s",
+                        binding->logical_port);
+            return;
+        }
+
+        put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
+                          port_key, ofpacts_p);
+
+        /* Output to tunnels with active/backup */
+        struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
+
+        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
+            const struct sbrec_chassis *ch =
+                ha_ch_ordered->ha_ch[i].chassis;
+            if (!ch) {
+                continue;
+            }
+            tun = chassis_tunnel_find(ch->name, NULL);
+            if (!tun) {
+                continue;
+            }
+            if (bundle->n_slaves >= BUNDLE_MAX_SLAVES) {
+                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+                VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
+                             "BUNDLE_MAX_SLAVES");
+                break;
+            }
+            ofpbuf_put(ofpacts_p, &tun->ofport, sizeof tun->ofport);
+            bundle = ofpacts_p->header;
+            bundle->n_slaves++;
+        }
+
+        bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
+        /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
+         * the next two fields, those are always set */
+        bundle->basis = 0;
+        bundle->fields = NX_HASH_FIELDS_ETH_SRC;
+        ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+    }
+    ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100, 0,
+                    match, ofpacts_p, &binding->header_.uuid);
+}
+
+static void
 put_replace_router_port_mac_flows(struct ovsdb_idl_index
                                   *sbrec_port_binding_by_name,
                                   const struct
@@ -485,7 +644,8 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 {
     uint32_t dp_key = binding->datapath->tunnel_key;
     uint32_t port_key = binding->tunnel_key;
-    if (!get_local_datapath(local_datapaths, dp_key)) {
+    struct local_datapath *ld;
+    if (!(ld = get_local_datapath(local_datapaths, dp_key))) {
         return;
     }
 
@@ -831,6 +991,10 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100, 0,
                         &match, ofpacts_p, &binding->header_.uuid);
     } else {
+
+        const char *redirect_type = smap_get(&binding->options,
+                                             "redirect-type");
+
         /* Remote port connected by tunnel */
 
         /* Table 32, priority 100.
@@ -847,90 +1011,15 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         match_set_metadata(&match, htonll(dp_key));
         match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
 
-        if (!is_ha_remote) {
-            /* Setup encapsulation */
-            const struct chassis_tunnel *rem_tun =
-                get_port_binding_tun(binding);
-            if (!rem_tun) {
-                goto out;
-            }
-            put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
-                              port_key, ofpacts_p);
-            /* Output to tunnel. */
-            ofpact_put_OUTPUT(ofpacts_p)->port = rem_tun->ofport;
+        if (redirect_type && !strcasecmp(redirect_type, "vlan")) {
+            put_remote_port_redirect_vlan(binding, local_datapaths,
+                                          ld, &match, ofpacts_p, flow_table);
         } else {
-            /* Make sure all tunnel endpoints use the same encapsulation,
-             * and set it up */
-            for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
-                const struct sbrec_chassis *ch =
-                    ha_ch_ordered->ha_ch[i].chassis;
-                if (!ch) {
-                    continue;
-                }
-                if (!tun) {
-                    tun = chassis_tunnel_find(ch->name, NULL);
-                } else {
-                    struct chassis_tunnel *chassis_tunnel =
-                        chassis_tunnel_find(ch->name, NULL);
-                    if (chassis_tunnel &&
-                        tun->type != chassis_tunnel->type) {
-                        static struct vlog_rate_limit rl =
-                            VLOG_RATE_LIMIT_INIT(1, 1);
-                        VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
-                                            "with mixed encapsulations, only "
-                                            "uniform encapsulations are "
-                                            "supported.",
-                                    binding->logical_port);
-                        goto out;
-                    }
-                }
-            }
-            if (!tun) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
-                                 "HA chassis group of port %s",
-                            binding->logical_port);
-                goto out;
-            }
-
-            put_encapsulation(mff_ovn_geneve, tun, binding->datapath,
-                              port_key, ofpacts_p);
-
-            /* Output to tunnels with active/backup */
-            struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
-
-            for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
-                const struct sbrec_chassis *ch =
-                    ha_ch_ordered->ha_ch[i].chassis;
-                if (!ch) {
-                    continue;
-                }
-                tun = chassis_tunnel_find(ch->name, NULL);
-                if (!tun) {
-                    continue;
-                }
-                if (bundle->n_slaves >= BUNDLE_MAX_SLAVES) {
-                    static struct vlog_rate_limit rl =
-                            VLOG_RATE_LIMIT_INIT(1, 1);
-                    VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
-                                        "BUNDLE_MAX_SLAVES");
-                    break;
-                }
-                ofpbuf_put(ofpacts_p, &tun->ofport,
-                            sizeof tun->ofport);
-                bundle = ofpacts_p->header;
-                bundle->n_slaves++;
-            }
-
-            bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
-            /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
-             * the next two fields, those are always set */
-            bundle->basis = 0;
-            bundle->fields = NX_HASH_FIELDS_ETH_SRC;
-            ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+            put_remote_port_redirect_overlay(binding, is_ha_remote,
+                                             ha_ch_ordered, mff_ovn_geneve,
+                                             tun, port_key, &match, ofpacts_p,
+                                             flow_table);
         }
-        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100, 0,
-                        &match, ofpacts_p, &binding->header_.uuid);
     }
 out:
     if (ha_ch_ordered) {
diff --git a/lib/ovn-util.c b/lib/ovn-util.c
index 0f07d80..72e6a34 100644
--- a/lib/ovn-util.c
+++ b/lib/ovn-util.c
@@ -16,6 +16,7 @@
 #include "ovn-util.h"
 #include "dirs.h"
 #include "openvswitch/vlog.h"
+#include "openvswitch/ofp-parse.h"
 #include "ovn/lib/ovn-nb-idl.h"
 #include "ovn/lib/ovn-sb-idl.h"
 
@@ -272,6 +273,38 @@ extract_lrp_networks(const struct nbrec_logical_router_port *lrp,
     return true;
 }
 
+bool
+extract_sbrec_binding_first_mac(const struct sbrec_port_binding *binding,
+                                struct eth_addr *ea)
+{
+    char *save_ptr = NULL;
+    bool ret = false;
+
+    if (!binding->n_mac) {
+        return ret;
+    }
+
+    char *tokstr = xstrdup(binding->mac[0]);
+
+    for (char *token = strtok_r(tokstr, " ", &save_ptr);
+         token != NULL;
+         token = strtok_r(NULL, " ", &save_ptr)) {
+
+        /* Return the first chassis mac. */
+        char *err_str = str_to_mac(token, ea);
+        if (err_str) {
+            free(err_str);
+            continue;
+        }
+
+        ret = true;
+        break;
+    }
+
+    free(tokstr);
+    return ret;
+}
+
 void
 destroy_lport_addresses(struct lport_addresses *laddrs)
 {
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index 6d5e1df..8461db5 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -21,6 +21,8 @@
 struct nbrec_logical_router_port;
 struct sbrec_logical_flow;
 struct uuid;
+struct eth_addr;
+struct sbrec_port_binding;
 
 struct ipv4_netaddr {
     ovs_be32 addr;            /* 192.168.10.123 */
@@ -61,6 +63,9 @@ bool extract_lsp_addresses(const char *address, struct lport_addresses *);
 bool extract_ip_addresses(const char *address, struct lport_addresses *);
 bool extract_lrp_networks(const struct nbrec_logical_router_port *,
                           struct lport_addresses *);
+bool extract_sbrec_binding_first_mac(const struct sbrec_port_binding *binding,
+                                     struct eth_addr *ea);
+
 void destroy_lport_addresses(struct lport_addresses *);
 
 char *alloc_nat_zone_key(const struct uuid *key, const char *type);
diff --git a/ovn-architecture.7.xml b/ovn-architecture.7.xml
index c4099f2..366c201 100644
--- a/ovn-architecture.7.xml
+++ b/ovn-architecture.7.xml
@@ -1614,6 +1614,70 @@
     </li>
   </ol>
 
+  VLAN based redirection
+
+  As an enhancement to <code>reside-on-redirect-chassis</code> we support
+  VLAN based redirection as well. By setting <code>options:redirect-type</code>
+  to <code>vlan</code> to a gateway chassis attached router port, user can
+  enforce that redirected packet should not use tunnel port but rather use
+  localnet port of peer logical switch to go out as vlan packet.
+
+  Following happens for a VLAN based redirection:
+  <ol>
+    <li>
+      On compute chassis, packet passes though logical router's
+      ingress pipeline.
+    </li>
+
+    <li>
+      If logical outport is gateway chassis attached router port
+      then packet is "redirected" to gateway chassis using peer logical
+      switch's localnet port.
+    </li>
+
+    <li>
+      This VLAN backed redirected packet has destination mac
+      as router port mac (the one to which gateway chassis is attached) and
+      vlan id is that of localnet port (peer logical switch of
+      the logical router port).
+    </li>
+
+    <li>
+      On the gateway chassis packet will enter the logical router pipeline
+      again and this time it will passthrough egress pipeline as well.
+    </li>
+
+    <li>
+      Reverse traffic packet flows stays the same.
+    </li>
+  </ol>
+
+  Some guidelines and expections with VLAN based redirection:
+  <ol>
+    <li>
+      Since router port mac is destination mac, hence it has to be ensured
+      that physical network learns it on ONLY from the gateway chassis.
+      Which means that <code>ovn-chassis-mac-mappings</code> should be
+      configure on all the compute nodes, so that physical network
+      never learn router port mac from compute nodes.
+    </li>
+
+    <li>
+      Since packet enters logical router ingress pipeline twice
+      (once on compute chassis and again on gateway chassis),
+      hence ttl will be decremented twice.
+    </li>
+
+    <li>
+      Default redirection type continues to be <code>overlay</code>.
+      User can switch the redirect-type between <code>vlan</code>
+      and <code>overlay</code> by changing the value of
+      <code>options:redirect-type</code>
+    </li>
+
+  </ol>
+
+
   <h2>Life Cycle of a VTEP gateway</h2>
 
   <p>
diff --git a/tests/ovn.at b/tests/ovn.at
index 6d9a08a..11b4ff7 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -15013,3 +15013,310 @@ as hv4 ovs-appctl fdb/show br-phys
 OVN_CLEANUP([hv1],[hv2],[hv3],[hv4])
 
 AT_CLEANUP
+
+AT_SETUP([ovn -- 2 HVs, 2 lports/HV, localnet ports, DVR N-S Ping])
+ovn_start
+
+# In this test cases we create 3 switches, all connected to same
+# physical network (through br-phys on each HV). LS1 and LS2 have
+# 1 VIF each. Each HV has 1 VIF port. The first digit
+# of VIF port name indicates the hypervisor it is bound to, e.g.
+# lp23 means VIF 3 on hv2.
+#
+# All the switches are connected to a logical router "router".
+#
+# Each switch's VLAN tag and their logical switch ports are:
+#   - ls1:
+#       - tagged with VLAN 101
+#       - ports: lp11
+#   - ls2:
+#       - tagged with VLAN 201
+#       - ports: lp22
+#   - ls-underlay:
+#       - tagged with VLAN 1000
+# Note: a localnet port is created for each switch to connect to
+# physical network.
+
+for i in 1 2; do
+    ls_name=ls$i
+    ovn-nbctl ls-add $ls_name
+    ln_port_name=ln$i
+    if test $i -eq 1; then
+        ovn-nbctl lsp-add $ls_name $ln_port_name "" 101
+    elif test $i -eq 2; then
+        ovn-nbctl lsp-add $ls_name $ln_port_name "" 201
+    fi
+    ovn-nbctl lsp-set-addresses $ln_port_name unknown
+    ovn-nbctl lsp-set-type $ln_port_name localnet
+    ovn-nbctl lsp-set-options $ln_port_name network_name=phys
+done
+
+# lsp_to_ls LSP
+#
+# Prints the name of the logical switch that contains LSP.
+lsp_to_ls () {
+    case $1 in dnl (
+        lp?[[11]]) echo ls1 ;; dnl (
+        lp?[[12]]) echo ls2 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+vif_to_hv () {
+    case $1 in dnl (
+        vif[[1]]?) echo hv1 ;; dnl (
+        vif[[2]]?) echo hv2 ;; dnl (
+        vif?[[north]]?) echo hv4 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+ip_to_hex() {
+       printf "%02x%02x%02x%02x" "$@"
+}
+
+net_add n1
+for i in 1 2; do
+    sim_add hv$i
+    as hv$i
+    ovs-vsctl add-br br-phys
+    ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+    ovs-vsctl set open . external-ids:ovn-chassis-mac-mappings="phys:aa:bb:cc:dd:ee:$i$i"
+    ovn_attach n1 br-phys 192.168.0.$i
+
+    ovs-vsctl add-port br-int vif$i$i -- \
+        set Interface vif$i$i external-ids:iface-id=lp$i$i \
+                              options:tx_pcap=hv$i/vif$i$i-tx.pcap \
+                              options:rxq_pcap=hv$i/vif$i$i-rx.pcap \
+                              ofport-request=$i$i
+
+    lsp_name=lp$i$i
+    ls_name=$(lsp_to_ls $lsp_name)
+
+    ovn-nbctl lsp-add $ls_name $lsp_name
+    ovn-nbctl lsp-set-addresses $lsp_name "f0:00:00:00:00:$i$i 192.168.$i.$i"
+    ovn-nbctl lsp-set-port-security $lsp_name f0:00:00:00:00:$i$i
+
+    OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up $lsp_name` = xup])
+
+done
+
+ovn-nbctl ls-add ls-underlay
+ovn-nbctl lsp-add ls-underlay ln3 "" 1000
+ovn-nbctl lsp-set-addresses ln3 unknown
+ovn-nbctl lsp-set-type ln3 localnet
+ovn-nbctl lsp-set-options ln3 network_name=phys
+
+ovn-nbctl ls-add ls-north
+ovn-nbctl lsp-add ls-north ln4 "" 1000
+ovn-nbctl lsp-set-addresses ln4 unknown
+ovn-nbctl lsp-set-type ln4 localnet
+ovn-nbctl lsp-set-options ln4 network_name=phys
+
+# Add a VM on ls-north
+ovn-nbctl lsp-add ls-north lp-north
+ovn-nbctl lsp-set-addresses lp-north "f0:f0:00:00:00:11 172.31.0.10"
+ovn-nbctl lsp-set-port-security lp-north f0:f0:00:00:00:11
+
+# Add 3rd hypervisor
+sim_add hv3
+as hv3 ovs-vsctl add-br br-phys
+as hv3 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+as hv3 ovs-vsctl set open . external-ids:ovn-chassis-mac-mappings="phys:aa:bb:cc:dd:ee:33"
+as hv3 ovn_attach n1 br-phys 192.168.0.3
+
+# Add 4th hypervisor
+sim_add hv4
+as hv4 ovs-vsctl add-br br-phys
+as hv4 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+as hv4 ovs-vsctl set open . external-ids:ovn-chassis-mac-mappings="phys:aa:bb:cc:dd:ee:44"
+as hv4 ovn_attach n1 br-phys 192.168.0.4
+
+as hv4 ovs-vsctl add-port br-int vif-north -- \
+        set Interface vif-north external-ids:iface-id=lp-north \
+                              options:tx_pcap=hv4/vif-north-tx.pcap \
+                              options:rxq_pcap=hv4/vif-north-rx.pcap \
+                              ofport-request=44
+
+ovn-nbctl lr-add router
+ovn-nbctl lrp-add router router-to-ls1 00:00:01:01:02:03 192.168.1.3/24
+ovn-nbctl lrp-add router router-to-ls2 00:00:01:01:02:05 192.168.2.3/24
+ovn-nbctl lrp-add router router-to-underlay 00:00:01:01:02:07 172.31.0.1/24
+
+ovn-nbctl lsp-add ls1 ls1-to-router -- set Logical_Switch_Port ls1-to-router type=router \
+          options:router-port=router-to-ls1 -- lsp-set-addresses ls1-to-router router
+ovn-nbctl lsp-add ls2 ls2-to-router -- set Logical_Switch_Port ls2-to-router type=router \
+          options:router-port=router-to-ls2 -- lsp-set-addresses ls2-to-router router
+ovn-nbctl lsp-add ls-underlay underlay-to-router -- set Logical_Switch_Port \
+                              underlay-to-router type=router \
+                              options:router-port=router-to-underlay \
+                              -- lsp-set-addresses underlay-to-router router
+
+ovn-nbctl lrp-set-gateway-chassis router-to-underlay hv3
+ovn-nbctl lrp-set-redirect-type router-to-underlay vlan
+
+ovn-nbctl --wait=sb sync
+
+sleep 2
+
+OVN_POPULATE_ARP
+
+# lsp_to_ls LSP
+#
+# Prints the name of the logical switch that contains LSP.
+lsp_to_ls () {
+    case $1 in dnl (
+        lp?[[11]]) echo ls1 ;; dnl (
+        lp?[[12]]) echo ls2 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+vif_to_ls () {
+    case $1 in dnl (
+        vif?[[11]]) echo ls1 ;; dnl (
+        vif?[[12]]) echo ls2 ;; dnl (
+        vif-north) echo ls-north ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+hv_to_num () {
+    case $1 in dnl (
+        hv1) echo 1 ;; dnl (
+        hv2) echo 2 ;; dnl (
+        hv3) echo 3 ;; dnl (
+        hv4) echo 4 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+vif_to_num () {
+    case $1 in dnl (
+        vif22) echo 22 ;; dnl (
+        vif21) echo 21 ;; dnl (
+        vif11) echo 11 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+vif_to_hv () {
+    case $1 in dnl (
+        vif[[1]]?) echo hv1 ;; dnl (
+        vif[[2]]?) echo hv2 ;; dnl (
+        vif-north) echo hv4 ;; dnl (
+        *) AT_FAIL_IF([:]) ;;
+    esac
+}
+
+vif_to_lrp () {
+    echo router-to-`vif_to_ls $1`
+}
+
+ip_to_hex() {
+       printf "%02x%02x%02x%02x" "$@"
+}
+
+
+test_ip() {
+        # This packet has bad checksums but logical L3 routing doesn't check.
+        local inport=$1 src_mac=$2 dst_mac=$3 src_ip=$4 dst_ip=$5 outport=$6
+        local packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+        shift; shift; shift; shift; shift
+        hv=`vif_to_hv $inport`
+        as $hv ovs-appctl netdev-dummy/receive $inport $packet
+        in_ls=`vif_to_ls $inport`
+        for outport; do
+            out_ls=`vif_to_ls $outport`
+            if test $in_ls = $out_ls; then
+                # Ports on the same logical switch receive exactly the same packet.
+                echo $packet
+            else
+                # Routing decrements TTL and updates source and dest MAC
+                # (and checksum).
+                out_lrp=`vif_to_lrp $outport`
+                # For North-South, packet will come via gateway chassis, i.e hv3
+                if test $inport = vif-north; then
+                    echo f00000000011aabbccddee3308004500001c000000003f110100${src_ip}${dst_ip}0035111100080000 >> $outport.expected
+                fi
+                if test $outport = vif-north; then
+                    echo f0f00000001100000101020708004500001c000000003e110200${src_ip}${dst_ip}0035111100080000 >> $outport.expected
+                fi
+            fi >> $outport.expected
+        done
+}
+
+# Dump a bunch of info helpful for debugging if there's a failure.
+
+echo "------ OVN dump ------"
+ovn-nbctl show
+ovn-sbctl show
+ovn-sbctl list port_binding
+ovn-sbctl list mac_binding
+
+echo "------ hv1 dump ------"
+as hv1 ovs-vsctl show
+as hv1 ovs-vsctl list Open_Vswitch
+
+echo "------ hv2 dump ------"
+as hv2 ovs-vsctl show
+as hv2 ovs-vsctl list Open_Vswitch
+
+echo "------ hv3 dump ------"
+as hv3 ovs-vsctl show
+as hv3 ovs-vsctl list Open_Vswitch
+
+echo "------ hv4 dump ------"
+as hv4 ovs-vsctl show
+as hv4 ovs-vsctl list Open_Vswitch
+
+echo "Send traffic North to South"
+
+sip=`ip_to_hex 172 31 0 10`
+dip=`ip_to_hex 192 168 1 1`
+test_ip vif-north f0f000000011 000001010207 $sip $dip vif11
+sleep 1
+
+# Confirm that North to south traffic works fine.
+OVN_CHECK_PACKETS([hv1/vif11-tx.pcap], [vif11.expected])
+
+echo "Send traffic South to Nouth"
+sip=`ip_to_hex 192 168 1 1`
+dip=`ip_to_hex 172 31 0 10`
+test_ip vif11 f00000000011 000001010203 $sip $dip vif-north
+sleep 5
+
+# Confirm that South to North traffic works fine.
+OVN_CHECK_PACKETS_REMOVE_BROADCAST([hv4/vif-north-tx.pcap], [vif-north.expected])
+
+# Confirm that packets did not go out via tunnel port.
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=32 | grep NXM_NX_TUN_METADATA0 | grep n_packets=0 | wc -l], [0], [[0
+]])
+
+# Confirm that packet went out via localnet port
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=65 | grep priority=150 | grep src=00:00:01:01:02:07 | grep n_packets=1 | wc -l], [0], [[1
+]])
+
+echo "----------- Post Traffic hv1 dump -----------"
+as hv1 ovs-ofctl dump-flows br-int
+as hv1 ovs-ofctl show br-phys
+as hv1 ovs-appctl fdb/show br-phys
+
+echo "----------- Post Traffic hv2 dump -----------"
+as hv2 ovs-ofctl dump-flows br-int
+as hv2 ovs-ofctl show br-phys
+as hv2 ovs-appctl fdb/show br-phys
+
+echo "----------- Post Traffic hv3 dump -----------"
+as hv3 ovs-ofctl dump-flows br-int
+as hv3 ovs-ofctl show br-phys
+as hv3 ovs-appctl fdb/show br-phys
+
+echo "----------- Post Traffic hv4 dump -----------"
+as hv4 ovs-ofctl dump-flows br-int
+as hv4 ovs-ofctl show br-phys
+as hv4 ovs-appctl fdb/show br-phys
+
+OVN_CLEANUP([hv1],[hv2],[hv3],[hv4])
+
+AT_CLEANUP
-- 
1.8.3.1



More information about the dev mailing list