[ovs-dev] [PATCH v2 3/3] OVN localport type support
Daniel Alvarez
dalvarez at redhat.com
Fri May 5 12:54:44 UTC 2017
This patch introduces a new type of OVN ports called "localport".
These ports will be present in every hypervisor and may have the
same IP/MAC addresses. They are not bound to any chassis and traffic
to these ports will never go through a tunnel.
Its main use case is the OpenStack metadata API support which relies
on a local agent running on every hypervisor and serving metadata to
VM's locally. This service is described in detail at [0].
[0] https://review.openstack.org/#/c/452811/
Signed-off-by: Daniel Alvarez <dalvarez at redhat.com>
---
ovn/controller/binding.c | 39 ++++++++----
ovn/controller/ovn-controller.8.xml | 15 +++++
ovn/controller/physical.c | 34 ++++++++++
ovn/northd/ovn-northd.8.xml | 8 +--
ovn/northd/ovn-northd.c | 6 +-
ovn/ovn-architecture.7.xml | 25 ++++++--
ovn/ovn-nb.xml | 9 +++
ovn/ovn-sb.xml | 14 +++++
tests/ovn.at | 122 ++++++++++++++++++++++++++++++++++++
9 files changed, 249 insertions(+), 23 deletions(-)
diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c
index 95e9deb..2a53b80 100644
--- a/ovn/controller/binding.c
+++ b/ovn/controller/binding.c
@@ -368,19 +368,34 @@ consider_local_datapath(struct controller_ctx *ctx,
= shash_find_data(lport_to_iface, binding_rec->logical_port);
bool our_chassis = false;
- if (iface_rec
- || (binding_rec->parent_port && binding_rec->parent_port[0] &&
- sset_contains(local_lports, binding_rec->parent_port))) {
- if (binding_rec->parent_port && binding_rec->parent_port[0]) {
- /* Add child logical port to the set of all local ports. */
- sset_add(local_lports, binding_rec->logical_port);
- }
- add_local_datapath(ldatapaths, lports, binding_rec->datapath,
- false, local_datapaths);
- if (iface_rec && qos_map && ctx->ovs_idl_txn) {
- get_qos_params(binding_rec, qos_map);
+
+ if (ctx->ovs_idl_txn && iface_rec &&
+ !strcmp(binding_rec->type, "localport")) {
+ /* Make sure localport external_id is present. This will allow us to
+ * identify a localport in physical.c and insert the required flows for
+ * it. */
+ if (!smap_get(&iface_rec->external_ids, "ovn-localport-port")) {
+ struct smap new_ids;
+ smap_clone(&new_ids, &iface_rec->external_ids);
+ smap_replace(&new_ids, "ovn-localport-port",
+ binding_rec->logical_port);
+ ovsrec_interface_verify_external_ids(iface_rec);
+ ovsrec_interface_set_external_ids(iface_rec, &new_ids);
+ smap_destroy(&new_ids);
}
- our_chassis = true;
+ } else if (iface_rec
+ || (binding_rec->parent_port && binding_rec->parent_port[0] &&
+ sset_contains(local_lports, binding_rec->parent_port))) {
+ if (binding_rec->parent_port && binding_rec->parent_port[0]) {
+ /* Add child logical port to the set of all local ports. */
+ sset_add(local_lports, binding_rec->logical_port);
+ }
+ add_local_datapath(ldatapaths, lports, binding_rec->datapath,
+ false, local_datapaths);
+ if (iface_rec && qos_map && ctx->ovs_idl_txn) {
+ get_qos_params(binding_rec, qos_map);
+ }
+ our_chassis = true;
} else if (!strcmp(binding_rec->type, "l2gateway")) {
const char *chassis_id = smap_get(&binding_rec->options,
"l2gateway-chassis");
diff --git a/ovn/controller/ovn-controller.8.xml b/ovn/controller/ovn-controller.8.xml
index d1fcd8a..33bb99e 100644
--- a/ovn/controller/ovn-controller.8.xml
+++ b/ovn/controller/ovn-controller.8.xml
@@ -287,6 +287,21 @@
logical patch port that it implements.
</p>
</dd>
+
+ <dt>
+ <code>external_ids:ovn-localport-port</code> in the
+ <code>Interface</code> table
+ </dt>
+
+ <dd>
+ <p>
+ The presence of this key identifies a port as a
+ <code>localport</code> so that <code>ovn-controller</code> can
+ properly set the right flows to allow only local traffic and
+ drop any packets directed to an external chassis.
+ </p>
+ </dd>
+
</dl>
<h1>Runtime Management Commands</h1>
diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
index 457fc45..a018126 100644
--- a/ovn/controller/physical.c
+++ b/ovn/controller/physical.c
@@ -59,6 +59,8 @@ physical_register_ovs_idl(struct ovsdb_idl *ovs_idl)
static struct simap localvif_to_ofport =
SIMAP_INITIALIZER(&localvif_to_ofport);
static struct hmap tunnels = HMAP_INITIALIZER(&tunnels);
+static struct simap localport_to_ofport =
+ SIMAP_INITIALIZER(&localport_to_ofport);
/* Maps from a chassis to the OpenFlow port number of the tunnel that can be
* used to reach that chassis. */
@@ -601,6 +603,28 @@ consider_port_binding(enum mf_field_id mff_ovn_geneve,
} else {
/* Remote port connected by tunnel */
+ /* Table 32, priority 150.
+ * =======================
+ *
+ * Drop traffic originated from a localport to a remote destination.
+ */
+ struct simap_node *localport;
+ SIMAP_FOR_EACH (localport, &localport_to_ofport) {
+ unsigned int inport = simap_get(&localport_to_ofport,
+ localport->name);
+ if (inport) {
+ match_init_catchall(&match);
+ ofpbuf_clear(ofpacts_p);
+ /* Match localport in_port. */
+ match_set_in_port(&match, u16_to_ofp(inport));
+ /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
+ match_set_metadata(&match, htonll(dp_key));
+ match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
+ ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
+ &match, ofpacts_p);
+ }
+ }
+
/* Table 32, priority 100.
* =======================
*
@@ -779,6 +803,9 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
SIMAP_INITIALIZER(&new_localvif_to_ofport);
struct simap new_tunnel_to_ofport =
SIMAP_INITIALIZER(&new_tunnel_to_ofport);
+ struct simap new_localport_to_ofport =
+ SIMAP_INITIALIZER(&new_localport_to_ofport);
+
for (int i = 0; i < br_int->n_ports; i++) {
const struct ovsrec_port *port_rec = br_int->ports[i];
if (!strcmp(port_rec->name, br_int->name)) {
@@ -808,6 +835,8 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
continue;
}
+ const char *localport = smap_get(&iface_rec->external_ids,
+ "ovn-localport-port");
/* Record as patch to local net, logical patch port, chassis, or
* local logical port. */
bool is_patch = !strcmp(iface_rec->type, "patch");
@@ -859,6 +888,9 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
"iface-id");
if (iface_id) {
simap_put(&new_localvif_to_ofport, iface_id, ofport);
+ if (localport) {
+ simap_put(&new_localport_to_ofport, localport, ofport);
+ }
}
}
}
@@ -877,6 +909,8 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
/* Capture changed or removed openflow ports. */
physical_map_changed |= update_ofports(&localvif_to_ofport,
&new_localvif_to_ofport);
+ physical_map_changed |= update_ofports(&localport_to_ofport,
+ &new_localport_to_ofport);
if (physical_map_changed) {
/* Reprocess logical flow table immediately. */
poll_immediate_wake();
diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
index c0b4c5e..7ff5245 100644
--- a/ovn/northd/ovn-northd.8.xml
+++ b/ovn/northd/ovn-northd.8.xml
@@ -492,8 +492,8 @@ output;
</pre>
<p>
- These flows are omitted for logical ports (other than router ports)
- that are down.
+ These flows are omitted for logical ports (other than router ports or
+ <code>localport</code> ports) that are down.
</p>
</li>
@@ -519,8 +519,8 @@ nd_na {
</pre>
<p>
- These flows are omitted for logical ports (other than router ports)
- that are down.
+ These flows are omitted for logical ports (other than router ports or
+ <code>localport</code> ports) that are down.
</p>
</li>
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index 83db753..a3bd859 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -3305,9 +3305,11 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
/*
* Add ARP/ND reply flows if either the
* - port is up or
- * - port type is router
+ * - port type is router or
+ * - port type is localport
*/
- if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router")) {
+ if (!lsp_is_up(op->nbsp) && strcmp(op->nbsp->type, "router") &&
+ strcmp(op->nbsp->type, "localport")) {
continue;
}
diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
index d8114f1..2d2d0cc 100644
--- a/ovn/ovn-architecture.7.xml
+++ b/ovn/ovn-architecture.7.xml
@@ -409,6 +409,20 @@
logical patch ports at each such point of connectivity, one on
each side.
</li>
+ <li>
+ <dfn>Localport ports</dfn> represent the points of local
+ connectivity between logical switches and VIFs. These ports are
+ present in every chassis (not bound to any particular one) and
+ traffic from them will never go through a tunnel. A
+ <code>localport</code> is expected to only generate traffic destined
+ for a local destination, typically in response to a request it
+ received.
+ One use case is how OpenStack Neutron uses a <code>localport</code>
+ port for serving metadata to VM's residing on every hypervisor. A
+ metadata proxy process is attached to this port on every host and all
+ VM's within the same network will reach it at the same IP/MAC address
+ without any traffic being sent over a tunnel.
+ </li>
</ul>
</li>
</ul>
@@ -986,11 +1000,12 @@
hypervisor. Each flow's actions implement sending a packet to the port
it matches. For unicast logical output ports on remote hypervisors,
the actions set the tunnel key to the correct value, then send the
- packet on the tunnel port to the correct hypervisor. (When the remote
- hypervisor receives the packet, table 0 there will recognize it as a
- tunneled packet and pass it along to table 33.) For multicast logical
- output ports, the actions send one copy of the packet to each remote
- hypervisor, in the same way as for unicast destinations. If a
+ packet on the tunnel port to the correct hypervisor (unless the packet
+ comes from a localport, in which case it will be dropped). (When the
+ remote hypervisor receives the packet, table 0 there will recognize it
+ as a tunneled packet and pass it along to table 33.) For multicast
+ logical output ports, the actions send one copy of the packet to each
+ remote hypervisor, in the same way as for unicast destinations. If a
multicast group includes a logical port or ports on the local
hypervisor, then its actions also resubmit to table 33. Table 32 also
includes a fallback flow that resubmits to table 33 if there is no
diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
index 383b5b7..b51b95d 100644
--- a/ovn/ovn-nb.xml
+++ b/ovn/ovn-nb.xml
@@ -283,6 +283,15 @@
to model direct connectivity to an existing network.
</dd>
+ <dt><code>localport</code></dt>
+ <dd>
+ A connection to a local VIF. Traffic that arrives on a
+ <code>localport</code> is never forwarded over a tunnel to another
+ chassis. These ports are present on every chassis and have the same
+ address in all of them. This is used to model connectivity to local
+ services that run on every hypervisor.
+ </dd>
+
<dt><code>l2gateway</code></dt>
<dd>
A connection to a physical network.
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index 387adb8..f3c3212 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -1802,6 +1802,11 @@ tcp.flags = RST;
connectivity to the corresponding physical network.
</dd>
+ <dt>localport</dt>
+ <dd>
+ Always empty. A localport port is present on every chassis.
+ </dd>
+
<dt>l3gateway</dt>
<dd>
The physical location of the L3 gateway. To successfully identify a
@@ -1882,6 +1887,15 @@ tcp.flags = RST;
to model direct connectivity to an existing network.
</dd>
+ <dt><code>localport</code></dt>
+ <dd>
+ A connection to a local VIF. Traffic that arrives on a
+ <code>localport</code> is never forwarded over a tunnel to another
+ chassis. These ports are present on every chassis and have the same
+ address in all of them. This is used to model connectivity to local
+ services that run on every hypervisor.
+ </dd>
+
<dt><code>l2gateway</code></dt>
<dd>
An L2 connection to a physical network. The chassis this
diff --git a/tests/ovn.at b/tests/ovn.at
index e67d33b..59beccf 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -7378,3 +7378,125 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
OVN_CLEANUP([hv1],[hv2])
AT_CLEANUP
+
+AT_SETUP([ovn -- 2 HVs, 1 lport/HV, localport ports])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+ovn_start
+
+ovn-nbctl ls-add ls1
+
+# Add localport to the switch
+ovn-nbctl lsp-add ls1 lp01
+ovn-nbctl lsp-set-addresses lp01 f0:00:00:00:00:01
+ovn-nbctl lsp-set-type lp01 localport
+
+net_add n1
+
+for i in 1 2; do
+ sim_add hv$i
+ as hv$i
+ ovs-vsctl add-br br-phys
+ ovn_attach n1 br-phys 192.168.0.$i
+ ovs-vsctl add-port br-int vif01 -- \
+ set Interface vif01 external-ids:iface-id=lp01 \
+ options:tx_pcap=hv${i}/vif01-tx.pcap \
+ options:rxq_pcap=hv${i}/vif01-rx.pcap \
+ ofport-request=${i}0
+
+ ovs-vsctl add-port br-int vif${i}1 -- \
+ set Interface vif${i}1 external-ids:iface-id=lp${i}1 \
+ options:tx_pcap=hv${i}/vif${i}1-tx.pcap \
+ options:rxq_pcap=hv${i}/vif${i}1-rx.pcap \
+ ofport-request=${i}1
+
+ ovn-nbctl lsp-add ls1 lp${i}1
+ ovn-nbctl lsp-set-addresses lp${i}1 f0:00:00:00:00:${i}1
+ ovn-nbctl lsp-set-port-security lp${i}1 f0:00:00:00:00:${i}1
+
+ OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up lp${i}1` = xup])
+done
+
+ovn-nbctl --wait=sb sync
+ovn-sbctl dump-flows
+
+ovn_populate_arp
+
+# Given the name of a logical port, prints the name of the hypervisor
+# on which it is located.
+vif_to_hv() {
+ echo hv${1%?}
+}
+#
+# test_packet INPORT DST SRC ETHTYPE EOUT LOUT DEFHV
+#
+# This shell function causes a packet to be received on INPORT. The packet's
+# content has Ethernet destination DST and source SRC (each exactly 12 hex
+# digits) and Ethernet type ETHTYPE (4 hex digits). INPORT is specified as
+# logical switch port numbers, e.g. 11 for vif11.
+#
+# EOUT is the end-to-end output port, that is, where the packet will end up
+# after possibly bouncing through one or more localnet ports. LOUT is the
+# logical output port, which might be a localnet port, as seen by ovn-trace
+# (which doesn't know what localnet ports are connected to and therefore can't
+# figure out the end-to-end answer).
+#
+# DEFHV is the default hypervisor from where the packet is going to be sent
+# if the source port is a localport.
+for i in 1 2; do
+ for j in 0 1; do
+ : > $i$j.expected
+ done
+done
+test_packet() {
+ local inport=$1 dst=$2 src=$3 eth=$4 eout=$5 lout=$6 defhv=$7
+ echo "$@"
+
+ # First try tracing the packet.
+ uflow="inport==\"lp$inport\" && eth.dst==$dst && eth.src==$src && eth.type==0x$eth"
+ if test $lout != drop; then
+ echo "output(\"$lout\");"
+ fi > expout
+ AT_CAPTURE_FILE([trace])
+ AT_CHECK([ovn-trace --all ls1 "$uflow" | tee trace | sed '1,/Minimal trace/d'], [0], [expout])
+
+ # Then actually send a packet, for an end-to-end test.
+ local packet=$(echo $dst$src | sed 's/://g')${eth}
+ hv=`vif_to_hv $inport`
+ # If hypervisor 0 (localport) use the defhv parameter
+ if test $hv == hv0; then
+ hv=$defhv
+ fi
+ vif=vif$inport
+ as $hv ovs-appctl netdev-dummy/receive $vif $packet
+ if test $eout != drop; then
+ echo $packet >> ${eout#lp}.expected
+ fi
+}
+
+
+# lp11 and lp21 are on different hypervisors
+test_packet 11 f0:00:00:00:00:21 f0:00:00:00:00:11 1121 lp21 lp21
+test_packet 21 f0:00:00:00:00:11 f0:00:00:00:00:21 2111 lp11 lp11
+
+# Both VIFs should be able to reach the localport on their own HV
+test_packet 11 f0:00:00:00:00:01 f0:00:00:00:00:11 1101 lp01 lp01
+test_packet 21 f0:00:00:00:00:01 f0:00:00:00:00:21 2101 lp01 lp01
+
+# Packet sent from localport on same hv should reach the vif
+test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 lp11 lp11 hv1
+test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 lp21 lp21 hv2
+
+# Packet sent from localport on different hv should be dropped
+test_packet 01 f0:00:00:00:00:21 f0:00:00:00:00:01 0121 drop lp21 hv1
+test_packet 01 f0:00:00:00:00:11 f0:00:00:00:00:01 0111 drop lp11 hv2
+
+# Now check the packets actually received against the ones expected.
+for i in 1 2; do
+ for j in 0 1; do
+ OVN_CHECK_PACKETS([hv$i/vif$i$j-tx.pcap], [$i$j.expected])
+ done
+done
+
+OVN_CLEANUP([hv1],[hv2])
+
+AT_CLEANUP
--
1.8.3.1
More information about the dev
mailing list