[ovs-dev] [PATCH v2 21/21] ovn: Change strategy for tunnel keys.

Ben Pfaff blp at nicira.com
Mon Aug 3 23:42:10 UTC 2015


Thank you for such a careful review!

I incorporated all of these comments and applied this to master.  Here's
the incremental (it omits the changes to this patch due to changes in
earlier patches, e.g. the Rule->Logical_Flow renaming):

diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
index 0f3a1fc..9246e61 100644
--- a/ovn/controller/lflow.c
+++ b/ovn/controller/lflow.c
@@ -34,6 +34,15 @@ VLOG_DEFINE_THIS_MODULE(lflow);
 static struct shash symtab;
 
 static void
+add_logical_register(struct shash *symtab, enum mf_field_id id)
+{
+    char name[8];
+
+    snprintf(name, sizeof name, "reg%d", id - MFF_REG0);
+    expr_symtab_add_field(symtab, name, id, NULL, false);
+}
+
+static void
 symtab_init(void)
 {
     shash_init(&symtab);
@@ -44,16 +53,10 @@ symtab_init(void)
     expr_symtab_add_string(&symtab, "inport", MFF_LOG_INPORT, NULL);
     expr_symtab_add_string(&symtab, "outport", MFF_LOG_OUTPORT, NULL);
 
-    /* Registers.  We omit the registers that would otherwise overlap the
-     * reserved fields. */
-    for (enum mf_field_id id = MFF_REG0; id < MFF_REG0 + FLOW_N_REGS; id++) {
-        if (id != MFF_LOG_INPORT && id != MFF_LOG_OUTPORT) {
-            char name[8];
-
-            snprintf(name, sizeof name, "reg%d", id - MFF_REG0);
-            expr_symtab_add_field(&symtab, name, id, NULL, false);
-        }
-    }
+    /* Logical registers. */
+#define MFF_LOG_REG(ID) add_logical_register(&symtab, ID);
+    MFF_LOG_REGS;
+#undef MFF_LOG_REG
 
     /* Data fields. */
     expr_symtab_add_field(&symtab, "eth.src", MFF_ETH_SRC, NULL, false);
@@ -147,8 +150,8 @@ struct logical_datapath {
 /* Contains "struct logical_datapath"s. */
 static struct hmap logical_datapaths = HMAP_INITIALIZER(&logical_datapaths);
 
-/* Finds and returns the logical_datapath with the given 'uuid', or NULL if
- * no such logical_datapath exists. */
+/* Finds and returns the logical_datapath for 'binding', or NULL if no such
+ * logical_datapath exists. */
 static struct logical_datapath *
 ldp_lookup(const struct sbrec_datapath_binding *binding)
 {
@@ -162,7 +165,7 @@ ldp_lookup(const struct sbrec_datapath_binding *binding)
     return NULL;
 }
 
-/* Creates a new logical_datapath with the given 'uuid'. */
+/* Creates a new logical_datapath for the given 'binding'. */
 static struct logical_datapath *
 ldp_create(const struct sbrec_datapath_binding *binding)
 {
@@ -263,10 +266,14 @@ lflow_run(struct controller_ctx *ctx, struct hmap *flow_table)
 
         /* Translate logical table ID to physical table ID. */
         bool ingress = !strcmp(lflow->pipeline, "ingress");
-        uint8_t phys_table = lflow->table_id + (ingress ? 16 : 48);
-        uint8_t next_phys_table = lflow->table_id < 15 ? phys_table + 1 : 0;
-        uint8_t output_phys_table = ingress ? 32 : 64;
-
+        uint8_t phys_table = lflow->table_id + (ingress
+                                                ? OFTABLE_LOG_INGRESS_PIPELINE
+                                                : OFTABLE_LOG_EGRESS_PIPELINE);
+        uint8_t next_phys_table
+            = lflow->table_id + 1 < LOG_PIPELINE_LEN ? phys_table + 1 : 0;
+        uint8_t output_phys_table = (ingress
+                                     ? OFTABLE_REMOTE_OUTPUT
+                                     : OFTABLE_LOG_TO_PHY);
         /* Translate OVN actions into OpenFlow actions.
          *
          * XXX Deny changes to 'outport' in egress pipeline. */
@@ -322,7 +329,7 @@ lflow_run(struct controller_ctx *ctx, struct hmap *flow_table)
                 m->match.flow.conj_id += conj_id_ofs;
             }
             if (!m->n) {
-            ofctrl_add_flow(flow_table, phys_table, lflow->priority,
+                ofctrl_add_flow(flow_table, phys_table, lflow->priority,
                                 &m->match, &ofpacts);
             } else {
                 uint64_t conj_stubs[64 / 8];
diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
index 953dbc0..59fe559 100644
--- a/ovn/controller/lflow.h
+++ b/ovn/controller/lflow.h
@@ -38,11 +38,37 @@ struct controller_ctx;
 struct hmap;
 struct uuid;
 
+/* OpenFlow table numbers.
+ *
+ * These are heavily documented in ovn-architecture(7), please update it if
+ * you make any changes. */
+#define OFTABLE_PHY_TO_LOG            0
+#define OFTABLE_LOG_INGRESS_PIPELINE 16 /* First of LOG_PIPELINE_LEN tables. */
+#define OFTABLE_REMOTE_OUTPUT        32
+#define OFTABLE_LOCAL_OUTPUT         33
+#define OFTABLE_DROP_LOOPBACK        34
+#define OFTABLE_LOG_EGRESS_PIPELINE  48 /* First of LOG_PIPELINE_LEN tables. */
+#define OFTABLE_LOG_TO_PHY           64
+
+/* The number of tables for the ingress and egress pipelines. */
+#define LOG_PIPELINE_LEN 16
+
 /* Logical fields. */
 #define MFF_LOG_DATAPATH MFF_METADATA /* Logical datapath (64 bits). */
 #define MFF_LOG_INPORT   MFF_REG6     /* Logical input port (32 bits). */
 #define MFF_LOG_OUTPORT  MFF_REG7     /* Logical output port (32 bits). */
 
+/* Logical registers.
+ *
+ * Make sure these don't overlap with the logical fields! */
+#define MFF_LOG_REGS \
+    MFF_LOG_REG(MFF_REG0) \
+    MFF_LOG_REG(MFF_REG1) \
+    MFF_LOG_REG(MFF_REG2) \
+    MFF_LOG_REG(MFF_REG3) \
+    MFF_LOG_REG(MFF_REG4) \
+    MFF_LOG_REG(MFF_REG5)
+
 void lflow_init(void);
 void lflow_run(struct controller_ctx *, struct hmap *flow_table);
 void lflow_destroy(void);
diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
index dc561a2..ff6cf57 100644
--- a/ovn/controller/physical.c
+++ b/ovn/controller/physical.c
@@ -269,8 +269,9 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
             }
 
             /* Resubmit to first logical ingress pipeline table. */
-            put_resubmit(16, &ofpacts);
-            ofctrl_add_flow(flow_table, 0, tag ? 150 : 100, &match, &ofpacts);
+            put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, &ofpacts);
+            ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, tag ? 150 : 100,
+                            &match, &ofpacts);
 
             /* Table 33, priority 100.
              * =======================
@@ -289,10 +290,11 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
                           binding->tunnel_key);
 
             /* Resubmit to table 34. */
-            put_resubmit(34, &ofpacts);
-            ofctrl_add_flow(flow_table, 33, 100, &match, &ofpacts);
+            put_resubmit(OFTABLE_DROP_LOOPBACK, &ofpacts);
+            ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100, &match,
+                            &ofpacts);
 
-            /* Table 64, Priority 50.
+            /* Table 64, Priority 100.
              * =======================
              *
              * Deliver the packet to the local vif. */
@@ -325,7 +327,8 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
                 ofpact_put_STRIP_VLAN(&ofpacts);
                 put_stack(MFF_IN_PORT, ofpact_put_STACK_POP(&ofpacts));
             }
-            ofctrl_add_flow(flow_table, 64, 100, &match, &ofpacts);
+            ofctrl_add_flow(flow_table, OFTABLE_LOG_TO_PHY, 100,
+                            &match, &ofpacts);
         } else {
             /* Table 32, priority 100.
              * =======================
@@ -348,7 +351,8 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
 
             /* Output to tunnel. */
             ofpact_put_OUTPUT(&ofpacts)->port = ofport;
-            ofctrl_add_flow(flow_table, 32, 100, &match, &ofpacts);
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                            &match, &ofpacts);
         }
 
         /* Table 34, Priority 100.
@@ -360,9 +364,11 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
         match_set_metadata(&match, htonll(binding->datapath->tunnel_key));
         match_set_reg(&match, MFF_LOG_INPORT - MFF_REG0, binding->tunnel_key);
         match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, binding->tunnel_key);
-        ofctrl_add_flow(flow_table, 34, 100, &match, &ofpacts);
+        ofctrl_add_flow(flow_table, OFTABLE_DROP_LOOPBACK, 100,
+                        &match, &ofpacts);
     }
 
+    /* Handle output to multicast groups, in tables 32 and 33. */
     const struct sbrec_multicast_group *mc;
     SBREC_MULTICAST_GROUP_FOR_EACH (mc, ctx->ovnsb_idl) {
         struct sset remote_chassis = SSET_INITIALIZER(&remote_chassis);
@@ -372,6 +378,12 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
         match_set_metadata(&match, htonll(mc->datapath->tunnel_key));
         match_set_reg(&match, MFF_LOG_OUTPORT - MFF_REG0, mc->tunnel_key);
 
+        /* Go through all of the ports in the multicast group:
+         *
+         *    - For local ports, add actions to 'ofpacts' to set the output
+         *      port and resubmit.
+         *
+         *    - For remote ports, add the chassis to 'remote_chassis'. */
         ofpbuf_clear(&ofpacts);
         for (size_t i = 0; i < mc->n_ports; i++) {
             struct sbrec_port_binding *port = mc->ports[i];
@@ -386,17 +398,28 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
 
             if (simap_contains(&lport_to_ofport, port->logical_port)) {
                 put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
-                put_resubmit(34, &ofpacts);
+                put_resubmit(OFTABLE_DROP_LOOPBACK, &ofpacts);
             } else if (port->chassis) {
                 sset_add(&remote_chassis, port->chassis->name);
             }
         }
 
+        /* Table 33, priority 100.
+         * =======================
+         *
+         * Handle output to the local logical ports in the multicast group, if
+         * any. */
         bool local_ports = ofpacts.size > 0;
         if (local_ports) {
-            ofctrl_add_flow(flow_table, 33, 100, &match, &ofpacts);
+            ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
+                            &match, &ofpacts);
         }
 
+        /* Table 32, priority 100.
+         * =======================
+         *
+         * Handle output to the remote chassis in the multicast group, if
+         * any. */
         if (!sset_is_empty(&remote_chassis)) {
             ofpbuf_clear(&ofpacts);
 
@@ -419,9 +442,10 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
 
             if (ofpacts.size) {
                 if (local_ports) {
-                    put_resubmit(33, &ofpacts);
+                    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
                 }
-                ofctrl_add_flow(flow_table, 32, 100, &match, &ofpacts);
+                ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                                &match, &ofpacts);
             }
         }
         sset_destroy(&remote_chassis);
@@ -454,21 +478,25 @@ physical_run(struct controller_ctx *ctx, enum mf_field_id mff_ovn_geneve,
         } else {
             OVS_NOT_REACHED();
         }
-        put_resubmit(33, &ofpacts);
+        put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
 
-        ofctrl_add_flow(flow_table, 0, 100, &match, &ofpacts);
+        ofctrl_add_flow(flow_table, OFTABLE_PHY_TO_LOG, 100, &match, &ofpacts);
     }
 
     /* Table 34, Priority 0.
      * =======================
      *
-     * Resubmit packets that don't output to the ingress port to the logical
-     * egress pipeline. */
+     * Resubmit packets that don't output to the ingress port (already checked
+     * in table 33) to the logical egress pipeline, clearing the logical
+     * registers (for consistent behavior with packets that get tunneled). */
     struct match match;
     match_init_catchall(&match);
     ofpbuf_clear(&ofpacts);
-    put_resubmit(48, &ofpacts);
-    ofctrl_add_flow(flow_table, 34, 0, &match, &ofpacts);
+#define MFF_LOG_REG(ID) put_load(0, ID, 0, 32, &ofpacts);
+    MFF_LOG_REGS;
+#undef MFF_LOG_REGS
+    put_resubmit(OFTABLE_LOG_EGRESS_PIPELINE, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_DROP_LOOPBACK, 0, &match, &ofpacts);
 
     ofpbuf_uninit(&ofpacts);
     simap_destroy(&lport_to_ofport);
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index 9fb86db..cf8e222 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -75,36 +75,36 @@ Options:\n\
     stream_usage("database", true, true, false);
 }
 
-struct key_node {
+struct tnlid_node {
     struct hmap_node hmap_node;
-    uint32_t key;
+    uint32_t tnlid;
 };
 
 static void
-keys_destroy(struct hmap *keys)
+destroy_tnlids(struct hmap *tnlids)
 {
-    struct key_node *node, *next;
-    HMAP_FOR_EACH_SAFE (node, next, hmap_node, keys) {
-        hmap_remove(keys, &node->hmap_node);
+    struct tnlid_node *node, *next;
+    HMAP_FOR_EACH_SAFE (node, next, hmap_node, tnlids) {
+        hmap_remove(tnlids, &node->hmap_node);
         free(node);
     }
-    hmap_destroy(keys);
+    hmap_destroy(tnlids);
 }
 
 static void
-add_key(struct hmap *set, uint32_t key)
+add_tnlid(struct hmap *set, uint32_t tnlid)
 {
-    struct key_node *node = xmalloc(sizeof *node);
-    hmap_insert(set, &node->hmap_node, hash_int(key, 0));
-    node->key = key;
+    struct tnlid_node *node = xmalloc(sizeof *node);
+    hmap_insert(set, &node->hmap_node, hash_int(tnlid, 0));
+    node->tnlid = tnlid;
 }
 
 static bool
-key_in_use(const struct hmap *set, uint32_t key)
+tnlid_in_use(const struct hmap *set, uint32_t tnlid)
 {
-    const struct key_node *node;
-    HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_int(key, 0), set) {
-        if (node->key == key) {
+    const struct tnlid_node *node;
+    HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_int(tnlid, 0), set) {
+        if (node->tnlid == tnlid) {
             return true;
         }
     }
@@ -112,23 +112,24 @@ key_in_use(const struct hmap *set, uint32_t key)
 }
 
 static uint32_t
-allocate_key(struct hmap *set, const char *name, uint32_t max, uint32_t *prev)
+allocate_tnlid(struct hmap *set, const char *name, uint32_t max,
+               uint32_t *hint)
 {
-    for (uint32_t key = *prev + 1; key != *prev;
-         key = key + 1 <= max ? key + 1 : 1) {
-        if (!key_in_use(set, key)) {
-            add_key(set, key);
-            *prev = key;
-            return key;
+    for (uint32_t tnlid = *hint + 1; tnlid != *hint;
+         tnlid = tnlid + 1 <= max ? tnlid + 1 : 1) {
+        if (!tnlid_in_use(set, tnlid)) {
+            add_tnlid(set, tnlid);
+            *hint = tnlid;
+            return tnlid;
         }
     }
 
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-    VLOG_WARN_RL(&rl, "all %s tunnel keys exhausted", name);
+    VLOG_WARN_RL(&rl, "all %s tunnel ids exhausted", name);
     return 0;
 }
 
-/* The 'key' comes from nb->header_.uuid or sb->external_ids's ' */
+/* The 'key' comes from nb->header_.uuid or sb->external_ids:logical-switch. */
 struct ovn_datapath {
     struct hmap_node key_node;  /* Index on 'key'. */
     struct uuid key;            /* nb->header_.uuid. */
@@ -138,14 +139,14 @@ struct ovn_datapath {
 
     struct ovs_list list;       /* In list of similar records. */
 
-    struct hmap port_keys;
-    uint32_t max_port_key;
+    struct hmap port_tnlids;
+    uint32_t port_key_hint;
 
     bool has_unknown;
 };
 
 static struct ovn_datapath *
-ovn_datapath_create(struct hmap *dp_map, const struct uuid *key,
+ovn_datapath_create(struct hmap *datapaths, const struct uuid *key,
                     const struct nbrec_logical_switch *nb,
                     const struct sbrec_datapath_binding *sb)
 {
@@ -153,30 +154,31 @@ ovn_datapath_create(struct hmap *dp_map, const struct uuid *key,
     od->key = *key;
     od->sb = sb;
     od->nb = nb;
-    hmap_init(&od->port_keys);
-    od->max_port_key = 0;
-    hmap_insert(dp_map, &od->key_node, uuid_hash(&od->key));
+    hmap_init(&od->port_tnlids);
+    od->port_key_hint = 0;
+    hmap_insert(datapaths, &od->key_node, uuid_hash(&od->key));
     return od;
 }
 
 static void
-ovn_datapath_destroy(struct hmap *dp_map, struct ovn_datapath *od)
+ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od)
 {
     if (od) {
-        /* Don't remove od->list, it's only safe and only used within
-         * build_datapaths(). */
-        hmap_remove(dp_map, &od->key_node);
-        keys_destroy(&od->port_keys);
+        /* Don't remove od->list.  It is used within build_datapaths() as a
+         * private list and once we've exited that function it is not safe to
+         * use it. */
+        hmap_remove(datapaths, &od->key_node);
+        destroy_tnlids(&od->port_tnlids);
         free(od);
     }
 }
 
 static struct ovn_datapath *
-ovn_datapath_find(struct hmap *dp_map, const struct uuid *uuid)
+ovn_datapath_find(struct hmap *datapaths, const struct uuid *uuid)
 {
     struct ovn_datapath *od;
 
-    HMAP_FOR_EACH_WITH_HASH (od, key_node, uuid_hash(uuid), dp_map) {
+    HMAP_FOR_EACH_WITH_HASH (od, key_node, uuid_hash(uuid), datapaths) {
         if (uuid_equals(uuid, &od->key)) {
             return od;
         }
@@ -185,7 +187,7 @@ ovn_datapath_find(struct hmap *dp_map, const struct uuid *uuid)
 }
 
 static struct ovn_datapath *
-ovn_datapath_from_sbrec(struct hmap *dp_map,
+ovn_datapath_from_sbrec(struct hmap *datapaths,
                         const struct sbrec_datapath_binding *sb)
 {
     struct uuid key;
@@ -193,15 +195,15 @@ ovn_datapath_from_sbrec(struct hmap *dp_map,
     if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key)) {
         return NULL;
     }
-    return ovn_datapath_find(dp_map, &key);
+    return ovn_datapath_find(datapaths, &key);
 }
 
 static void
-join_datapaths(struct northd_context *ctx, struct hmap *dp_map,
+join_datapaths(struct northd_context *ctx, struct hmap *datapaths,
                struct ovs_list *sb_only, struct ovs_list *nb_only,
                struct ovs_list *both)
 {
-    hmap_init(dp_map);
+    hmap_init(datapaths);
     list_init(sb_only);
     list_init(nb_only);
     list_init(both);
@@ -218,7 +220,7 @@ join_datapaths(struct northd_context *ctx, struct hmap *dp_map,
             continue;
         }
 
-        if (ovn_datapath_find(dp_map, &key)) {
+        if (ovn_datapath_find(datapaths, &key)) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
             VLOG_INFO_RL(&rl, "deleting Datapath_Binding "UUID_FMT" with "
                          "duplicate external-ids:logical-switch "UUID_FMT,
@@ -227,49 +229,51 @@ join_datapaths(struct northd_context *ctx, struct hmap *dp_map,
             continue;
         }
 
-        struct ovn_datapath *od = ovn_datapath_create(dp_map, &key, NULL, sb);
+        struct ovn_datapath *od = ovn_datapath_create(datapaths, &key,
+                                                      NULL, sb);
         list_push_back(sb_only, &od->list);
     }
 
     const struct nbrec_logical_switch *nb;
     NBREC_LOGICAL_SWITCH_FOR_EACH (nb, ctx->ovnnb_idl) {
-        struct ovn_datapath *od = ovn_datapath_find(dp_map, &nb->header_.uuid);
+        struct ovn_datapath *od = ovn_datapath_find(datapaths,
+                                                    &nb->header_.uuid);
         if (od) {
             od->nb = nb;
             list_remove(&od->list);
             list_push_back(both, &od->list);
         } else {
-            od = ovn_datapath_create(dp_map, &nb->header_.uuid, nb, NULL);
+            od = ovn_datapath_create(datapaths, &nb->header_.uuid, nb, NULL);
             list_push_back(nb_only, &od->list);
         }
     }
 }
 
 static uint32_t
-ovn_datapath_allocate_key(struct hmap *dp_keys)
+ovn_datapath_allocate_key(struct hmap *dp_tnlids)
 {
-    static uint32_t prev;
-    return allocate_key(dp_keys, "datapath", (1u << 24) - 1, &prev);
+    static uint32_t hint;
+    return allocate_tnlid(dp_tnlids, "datapath", (1u << 24) - 1, &hint);
 }
 
 static void
-build_datapaths(struct northd_context *ctx, struct hmap *dp_map)
+build_datapaths(struct northd_context *ctx, struct hmap *datapaths)
 {
-    struct ovs_list sb_dps, nb_dps, both_dps;
+    struct ovs_list sb_only, nb_only, both;
 
-    join_datapaths(ctx, dp_map, &sb_dps, &nb_dps, &both_dps);
+    join_datapaths(ctx, datapaths, &sb_only, &nb_only, &both);
 
-    if (!list_is_empty(&nb_dps)) {
-        /* First index the in-use datapath tunnel keys. */
-        struct hmap dp_keys = HMAP_INITIALIZER(&dp_keys);
+    if (!list_is_empty(&nb_only)) {
+        /* First index the in-use datapath tunnel IDs. */
+        struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
         struct ovn_datapath *od;
-        LIST_FOR_EACH (od, list, &both_dps) {
-            add_key(&dp_keys, od->sb->tunnel_key);
+        LIST_FOR_EACH (od, list, &both) {
+            add_tnlid(&dp_tnlids, od->sb->tunnel_key);
         }
 
         /* Add southbound record for each unmatched northbound record. */
-        LIST_FOR_EACH (od, list, &nb_dps) {
-            uint16_t tunnel_key = ovn_datapath_allocate_key(&dp_keys);
+        LIST_FOR_EACH (od, list, &nb_only) {
+            uint16_t tunnel_key = ovn_datapath_allocate_key(&dp_tnlids);
             if (!tunnel_key) {
                 break;
             }
@@ -285,15 +289,15 @@ build_datapaths(struct northd_context *ctx, struct hmap *dp_map)
 
             sbrec_datapath_binding_set_tunnel_key(od->sb, tunnel_key);
         }
-        keys_destroy(&dp_keys);
+        destroy_tnlids(&dp_tnlids);
     }
 
     /* Delete southbound records without northbound matches. */
     struct ovn_datapath *od, *next;
-    LIST_FOR_EACH_SAFE (od, next, list, &sb_dps) {
+    LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
         list_remove(&od->list);
         sbrec_datapath_binding_delete(od->sb);
-        ovn_datapath_destroy(dp_map, od);
+        ovn_datapath_destroy(datapaths, od);
     }
 }
 
@@ -310,7 +314,7 @@ struct ovn_port {
 };
 
 static struct ovn_port *
-ovn_port_create(struct hmap *port_map, const char *key,
+ovn_port_create(struct hmap *ports, const char *key,
                 const struct nbrec_logical_port *nb,
                 const struct sbrec_port_binding *sb)
 {
@@ -318,27 +322,28 @@ ovn_port_create(struct hmap *port_map, const char *key,
     op->key = key;
     op->sb = sb;
     op->nb = nb;
-    hmap_insert(port_map, &op->key_node, hash_string(op->key, 0));
+    hmap_insert(ports, &op->key_node, hash_string(op->key, 0));
     return op;
 }
 
 static void
-ovn_port_destroy(struct hmap *port_map, struct ovn_port *port)
+ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
 {
     if (port) {
-        /* Don't remove port->list, it's only safe and only used within
-         * build_ports(). */
-        hmap_remove(port_map, &port->key_node);
+        /* Don't remove port->list.  It is used within build_ports() as a
+         * private list and once we've exited that function it is not safe to
+         * use it. */
+        hmap_remove(ports, &port->key_node);
         free(port);
     }
 }
 
 static struct ovn_port *
-ovn_port_find(struct hmap *port_map, const char *name)
+ovn_port_find(struct hmap *ports, const char *name)
 {
     struct ovn_port *op;
 
-    HMAP_FOR_EACH_WITH_HASH (op, key_node, hash_string(name, 0), port_map) {
+    HMAP_FOR_EACH_WITH_HASH (op, key_node, hash_string(name, 0), ports) {
         if (!strcmp(op->key, name)) {
             return op;
         }
@@ -349,39 +354,39 @@ ovn_port_find(struct hmap *port_map, const char *name)
 static uint32_t
 ovn_port_allocate_key(struct ovn_datapath *od)
 {
-    return allocate_key(&od->port_keys, "port",
-                        (1u << 16) - 1, &od->max_port_key);
+    return allocate_tnlid(&od->port_tnlids, "port",
+                          (1u << 15) - 1, &od->port_key_hint);
 }
 
 static void
 join_logical_ports(struct northd_context *ctx,
-                   struct hmap *dp_map, struct hmap *port_map,
+                   struct hmap *datapaths, struct hmap *ports,
                    struct ovs_list *sb_only, struct ovs_list *nb_only,
                    struct ovs_list *both)
 {
-    hmap_init(port_map);
+    hmap_init(ports);
     list_init(sb_only);
     list_init(nb_only);
     list_init(both);
 
     const struct sbrec_port_binding *sb;
     SBREC_PORT_BINDING_FOR_EACH (sb, ctx->ovnsb_idl) {
-        struct ovn_port *op = ovn_port_create(port_map, sb->logical_port,
+        struct ovn_port *op = ovn_port_create(ports, sb->logical_port,
                                               NULL, sb);
         list_push_back(sb_only, &op->list);
     }
 
     struct ovn_datapath *od;
-    HMAP_FOR_EACH (od, key_node, dp_map) {
+    HMAP_FOR_EACH (od, key_node, datapaths) {
         for (size_t i = 0; i < od->nb->n_ports; i++) {
             const struct nbrec_logical_port *nb = od->nb->ports[i];
-            struct ovn_port *op = ovn_port_find(port_map, nb->name);
+            struct ovn_port *op = ovn_port_find(ports, nb->name);
             if (op) {
                 op->nb = nb;
                 list_remove(&op->list);
                 list_push_back(both, &op->list);
             } else {
-                op = ovn_port_create(port_map, nb->name, nb, NULL);
+                op = ovn_port_create(ports, nb->name, nb, NULL);
                 list_push_back(nb_only, &op->list);
             }
             op->od = od;
@@ -400,28 +405,27 @@ ovn_port_update_sbrec(const struct ovn_port *op)
 }
 
 static void
-build_ports(struct northd_context *ctx, struct hmap *dp_map,
-            struct hmap *port_map)
+build_ports(struct northd_context *ctx, struct hmap *datapaths,
+            struct hmap *ports)
 {
-    struct ovs_list sb_ports, nb_ports, both_ports;
+    struct ovs_list sb_only, nb_only, both;
 
-    join_logical_ports(ctx, dp_map, port_map,
-                       &sb_ports, &nb_ports, &both_ports);
+    join_logical_ports(ctx, datapaths, ports, &sb_only, &nb_only, &both);
 
     /* For logical ports that are in both databases, update the southbound
      * record based on northbound data.  Also index the in-use tunnel_keys. */
     struct ovn_port *op, *next;
-    LIST_FOR_EACH_SAFE (op, next, list, &both_ports) {
+    LIST_FOR_EACH_SAFE (op, next, list, &both) {
         ovn_port_update_sbrec(op);
 
-        add_key(&op->od->port_keys, op->sb->tunnel_key);
-        if (op->sb->tunnel_key > op->od->max_port_key) {
-            op->od->max_port_key = op->sb->tunnel_key;
+        add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key);
+        if (op->sb->tunnel_key > op->od->port_key_hint) {
+            op->od->port_key_hint = op->sb->tunnel_key;
         }
     }
 
     /* Add southbound record for each unmatched northbound record. */
-    LIST_FOR_EACH_SAFE (op, next, list, &nb_ports) {
+    LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
         uint16_t tunnel_key = ovn_port_allocate_key(op->od);
         if (!tunnel_key) {
             continue;
@@ -435,10 +439,10 @@ build_ports(struct northd_context *ctx, struct hmap *dp_map,
     }
 
     /* Delete southbound records without northbound matches. */
-    LIST_FOR_EACH_SAFE(op, next, list, &sb_ports) {
+    LIST_FOR_EACH_SAFE(op, next, list, &sb_only) {
         list_remove(&op->list);
         sbrec_port_binding_delete(op->sb);
-        ovn_port_destroy(port_map, op);
+        ovn_port_destroy(ports, op);
     }
 }
 
@@ -465,7 +469,7 @@ multicast_group_equal(const struct multicast_group *a,
 
 /* Multicast group entry. */
 struct ovn_multicast {
-    struct hmap_node hmap_node; /* Index on 'datapath', 'key', */
+    struct hmap_node hmap_node; /* Index on 'datapath' and 'key'. */
     struct ovn_datapath *datapath;
     const struct multicast_group *group;
 
@@ -592,9 +596,9 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
 
 /* Adds a row with the specified contents to the Logical_Flow table. */
 static void
-lflow_add(struct hmap *lflow_map, struct ovn_datapath *od,
-          enum ovn_pipeline pipeline, uint8_t table_id, uint16_t priority,
-          const char *match, const char *actions)
+ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od,
+              enum ovn_pipeline pipeline, uint8_t table_id, uint16_t priority,
+              const char *match, const char *actions)
 {
     struct ovn_lflow *lflow = xmalloc(sizeof *lflow);
     ovn_lflow_init(lflow, od, pipeline, table_id, priority,
@@ -671,29 +675,29 @@ lport_is_enabled(const struct nbrec_logical_port *lport)
 /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,
  * constructing their contents based on the OVN_NB database. */
 static void
-build_lflow(struct northd_context *ctx, struct hmap *datapaths,
-            struct hmap *ports)
+build_lflows(struct northd_context *ctx, struct hmap *datapaths,
+             struct hmap *ports)
 {
     struct hmap lflows = HMAP_INITIALIZER(&lflows);
     struct hmap mcgroups = HMAP_INITIALIZER(&mcgroups);
 
-    /* Ingress table 0: Admission control framework. */
+    /* Ingress table 0: Admission control framework (priorities 0 and 100). */
     struct ovn_datapath *od;
     HMAP_FOR_EACH (od, key_node, datapaths) {
         /* Logical VLANs not supported. */
-        lflow_add(&lflows, od, P_IN, 0, 100, "vlan.present", "drop;");
+        ovn_lflow_add(&lflows, od, P_IN, 0, 100, "vlan.present", "drop;");
 
         /* Broadcast/multicast source address is invalid. */
-        lflow_add(&lflows, od, P_IN, 0, 100, "eth.src[40]", "drop;");
+        ovn_lflow_add(&lflows, od, P_IN, 0, 100, "eth.src[40]", "drop;");
 
         /* Port security flows have priority 50 (see below) and will continue
          * to the next table if packet source is acceptable. */
 
         /* Otherwise drop the packet. */
-        lflow_add(&lflows, od, P_IN, 0, 0, "1", "drop;");
+        ovn_lflow_add(&lflows, od, P_IN, 0, 0, "1", "drop;");
     }
 
-    /* Ingress table 0: Ingress port security. */
+    /* Ingress table 0: Ingress port security (priority 50). */
     struct ovn_port *op;
     HMAP_FOR_EACH (op, key_node, ports) {
         struct ds match = DS_EMPTY_INITIALIZER;
@@ -702,8 +706,8 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
         build_port_security("eth.src",
                             op->nb->port_security, op->nb->n_port_security,
                             &match);
-        lflow_add(&lflows, op->od, P_IN, 0, 50, ds_cstr(&match),
-                  lport_is_enabled(op->nb) ? "next;" : "drop;");
+        ovn_lflow_add(&lflows, op->od, P_IN, 0, 50, ds_cstr(&match),
+                      lport_is_enabled(op->nb) ? "next;" : "drop;");
         ds_destroy(&match);
     }
 
@@ -715,8 +719,8 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
         }
     }
     HMAP_FOR_EACH (od, key_node, datapaths) {
-        lflow_add(&lflows, od, P_IN, 1, 100, "eth.dst[40]",
-                  "outport = \""MC_FLOOD"\"; output;");
+        ovn_lflow_add(&lflows, od, P_IN, 1, 100, "eth.dst[40]",
+                      "outport = \""MC_FLOOD"\"; output;");
     }
 
     /* Ingress table 1: Destination lookup, unicast handling (priority 50), */
@@ -734,8 +738,8 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
                 ds_put_cstr(&actions, "outport = ");
                 json_string_escape(op->nb->name, &actions);
                 ds_put_cstr(&actions, "; output;");
-                lflow_add(&lflows, op->od, P_IN, 1, 50,
-                          ds_cstr(&match), ds_cstr(&actions));
+                ovn_lflow_add(&lflows, op->od, P_IN, 1, 50,
+                              ds_cstr(&match), ds_cstr(&actions));
                 ds_destroy(&actions);
                 ds_destroy(&match);
             } else if (!strcmp(op->nb->macs[i], "unknown")) {
@@ -753,12 +757,12 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
     /* Ingress table 1: Destination lookup for unknown MACs (priority 0). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (od->has_unknown) {
-            lflow_add(&lflows, od, P_IN, 1, 0, "1",
-                      "outport = \""MC_UNKNOWN"\"; output;");
+            ovn_lflow_add(&lflows, od, P_IN, 1, 0, "1",
+                          "outport = \""MC_UNKNOWN"\"; output;");
         }
     }
 
-    /* Egress table 0: ACLs. */
+    /* Egress table 0: ACLs (any priority). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
         for (size_t i = 0; i < od->nb->n_acls; i++) {
             const struct nbrec_acl *acl = od->nb->acls[i];
@@ -767,18 +771,21 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
             action = (!strcmp(acl->action, "allow") ||
                       !strcmp(acl->action, "allow-related"))
                 ? "next;" : "drop;";
-            lflow_add(&lflows, od, P_OUT, 0, acl->priority, acl->match,
-                      action);
+            ovn_lflow_add(&lflows, od, P_OUT, 0, acl->priority, acl->match,
+                          action);
         }
     }
     HMAP_FOR_EACH (od, key_node, datapaths) {
-        lflow_add(&lflows, od, P_OUT, 0, 0, "1", "next;");
+        ovn_lflow_add(&lflows, od, P_OUT, 0, 0, "1", "next;");
     }
 
-    /* Egress table 1: Egress port security. */
+    /* Egress table 1: Egress port security multicast/broadcast (priority
+     * 100). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
-        lflow_add(&lflows, od, P_OUT, 1, 100, "eth.dst[40]", "output;");
+        ovn_lflow_add(&lflows, od, P_OUT, 1, 100, "eth.dst[40]", "output;");
     }
+
+    /* Egress table 1: Egress port security (priority 50). */
     HMAP_FOR_EACH (op, key_node, ports) {
         struct ds match;
 
@@ -789,8 +796,8 @@ build_lflow(struct northd_context *ctx, struct hmap *datapaths,
                             op->nb->port_security, op->nb->n_port_security,
                             &match);
 
-        lflow_add(&lflows, op->od, P_OUT, 1, 50, ds_cstr(&match),
-                  lport_is_enabled(op->nb) ? "output;" : "drop;");
+        ovn_lflow_add(&lflows, op->od, P_OUT, 1, 50, ds_cstr(&match),
+                      lport_is_enabled(op->nb) ? "output;" : "drop;");
 
         ds_destroy(&match);
     }
@@ -869,7 +876,19 @@ ovnnb_db_changed(struct northd_context *ctx)
     struct hmap datapaths, ports;
     build_datapaths(ctx, &datapaths);
     build_ports(ctx, &datapaths, &ports);
-    build_lflow(ctx, &datapaths, &ports);
+    build_lflows(ctx, &datapaths, &ports);
+
+    struct ovn_datapath *dp, *next_dp;
+    HMAP_FOR_EACH_SAFE (dp, next_dp, key_node, &datapaths) {
+        ovn_datapath_destroy(&datapaths, dp);
+    }
+    hmap_destroy(&datapaths);
+
+    struct ovn_port *port, *next_port;
+    HMAP_FOR_EACH_SAFE (port, next_port, key_node, &ports) {
+        ovn_port_destroy(&ports, port);
+    }
+    hmap_destroy(&ports);
 }
 
 /*
diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
index fc62163..1b537f9 100644
--- a/ovn/ovn-architecture.7.xml
+++ b/ovn/ovn-architecture.7.xml
@@ -595,10 +595,10 @@
   <h2>Life Cycle of a Packet</h2>
 
   <p>
-    This section describes how a packet travels from ingress into OVN from one
-    virtual machine or container to another.  This description focuses on the
-    physical treatment of a packet; for a description of the logical life cycle
-    of a packet, please refer to the <code>Logical_Flow</code> table in
+    This section describes how a packet travels from one virtual machine or
+    container to another through OVN.  This description focuses on the physical
+    treatment of a packet; for a description of the logical life cycle of a
+    packet, please refer to the <code>Logical_Flow</code> table in
     <code>ovn-sb</code>(5).
   </p>
 
@@ -668,14 +668,14 @@
       <p>
         Packets that originate from a container nested within a VM are treated
         in a slightly different way.  The originating container can be
-        distinguished based on the VLAN ID, so the physical-to-logical
-        translation flows additionally match on VLAN ID and the actions strip
-        the VLAN header.  Following this step, OVN treats packets from
-        containers just like any other packets.
+        distinguished based on the VIF-specific VLAN ID, so the
+        physical-to-logical translation flows additionally match on VLAN ID and
+        the actions strip the VLAN header.  Following this step, OVN treats
+        packets from containers just like any other packets.
       </p>
 
       <p>
-        Table 0 also processes packets that arrive from other hypervisors.  It
+        Table 0 also processes packets that arrive from other chassis.  It
         distinguishes them from other packets by ingress port, which is a
         tunnel.  As with packets just entering the OVN pipeline, the actions
         annotate these packets with logical datapath and logical ingress port
@@ -716,7 +716,7 @@
         <li>
           If the pipeline can execute more than one <code>output</code> action,
           then each one is separately resubmitted to table 32.  This can be
-          used to send multiple copies to the packet to multiple ports.  (If
+          used to send multiple copies of the packet to multiple ports.  (If
           the packet was not modified between the <code>output</code> actions,
           and some of the copies are destined to the same hypervisor, then
           using a logical multicast output port would save bandwidth between
@@ -728,7 +728,7 @@
     <li>
       <p>
         OpenFlow tables 32 through 47 implement the <code>output</code> action
-        in the the logical ingress pipeline.  Specifically, table 32 handles
+        in the logical ingress pipeline.  Specifically, table 32 handles
         packets to remote hypervisors, table 33 handles packets to the local
         hypervisor, and table 34 discards packets whose logical ingress and
         egress port are the same.
@@ -814,15 +814,18 @@
     </li>
 
     <li>
-      15-bit logical ingress port identifier, from the <code>tunnel_key</code>
-      column in the OVN Southbound <code>Port_Binding</code> table.
+      15-bit logical ingress port identifier.  ID 0 is reserved for internal
+      use within OVN.  IDs 1 through 32767, inclusive, may be assigned to
+      logical ports (see the <code>tunnel_key</code> column in the OVN
+      Southbound <code>Port_Binding</code> table).
     </li>
 
     <li>
-      16-bit logical egress port identifier, from the <code>Port_Binding</code>
+      16-bit logical egress port identifier.  IDs 0 through 32767 have the same
+      meaning as for logical ingress ports.  IDs 32768 through 65535,
+      inclusive, may be assigned to logical multicast groups (see the
       <code>tunnel_key</code> column in the OVN Southbound
-      <code>Port_Binding</code> (as for the logical ingress port) or
-      <code>Multicast_Group</code> table.
+      <code>Multicast_Group</code> table).
     </li>
   </ul>
 
@@ -886,7 +889,7 @@
 
   <p>
     For connecting to gateways, in addition to Geneve and STT, OVN supports
-    VXLAN, because only VXLAN support is common on top-of-rack (ToR) switch.
+    VXLAN, because only VXLAN support is common on top-of-rack (ToR) switches.
     Currently, gateways have a feature set that matches the capabilities as
     defined by the VTEP schema, so fewer bits of metadata are necessary.  In
     the future, gateways that do not support encapsulations with large amounts
diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
index 6ac3656..7d58c50 100644
--- a/ovn/ovn-sb.xml
+++ b/ovn/ovn-sb.xml
@@ -305,8 +305,10 @@
       <code>inport</code> to <code>outport</code>; if they are equal, it treats
       the <code>output</code> as a no-op.  In the common case, where they are
       different, the packet enters the egress pipeline.  This transition to the
-      egress pipeline discards register data (<code>reg0</code>
-      ... <code>reg5</code>).
+      egress pipeline discards register data, e.g. <code>reg0</code>
+      ... <code>reg5</code>, to achieve uniform behavior regardless of whether
+      the egress pipeline is on a different hypervisor (because registers
+      aren't preserve across tunnel encapsulation).
     </p>
 
     <p>
@@ -338,13 +340,7 @@
     </p>
 
     <column name="logical_datapath">
-      The logical datapath to which the logical flow belongs.  A logical
-      datapath implements a logical pipeline among the ports in the <ref
-      table="Port_Binding"/> table associated with it.  In practice, the
-      pipeline in a given logical datapath implements either a logical switch
-      or a logical router, and <code>ovn-northd</code> reuses the UUIDs for
-      those logical entities from the <code>OVN_Northbound</code> for logical
-      datapaths.
+      The logical datapath to which the logical flow belongs.
     </column>
 
     <column name="pipeline">
@@ -645,20 +641,17 @@
       <p>
         Most of the symbols below have integer type.  Only <code>inport</code>
         and <code>outport</code> have string type.  <code>inport</code> names a
-        logical port.  Thus, its value is a <ref column="logical_port"/> names
-        from the <ref table="Port_Binding"/> and <ref table="Gateway"/> tables
-        in a logical flow's <ref column="logical_datapath"/>.
-        <code>outport</code> may name a logical port, as <code>inport</code>.
-        It may also name a logical multicast group defined in the <ref
-        table="Multicast_Group"/> table.
+        logical port.  Thus, its value is a <ref column="logical_port"/> name
+        from the <ref table="Port_Binding"/> or <ref table="Gateway"/> tables.
+        <code>outport</code> may name a logical port, as <code>inport</code>,
+        or a logical multicast group defined in the <ref
+        table="Multicast_Group"/> table.  For both symbols, only names within
+        the flow's logical datapath may be used.
       </p>
 
       <ul>
-        <li>
-          <code>reg0</code>...<code>reg5</code>
-          <code>xreg0</code>...<code>xreg2</code>
-        </li>
-        <li><code>inport</code> <code>outport</code> <code>queue</code></li>
+        <li><code>reg0</code>...<code>reg5</code></li>
+        <li><code>inport</code> <code>outport</code></li>
         <li><code>eth.src</code> <code>eth.dst</code> <code>eth.type</code></li>
         <li><code>vlan.tci</code> <code>vlan.vid</code> <code>vlan.pcp</code> <code>vlan.present</code></li>
         <li><code>ip.proto</code> <code>ip.dscp</code> <code>ip.ecn</code> <code>ip.ttl</code> <code>ip.frag</code></li>
@@ -698,7 +691,7 @@
         <dt><code>output;</code></dt>
         <dd>
           <p>
-	    In an <code>ingress</code> flow, this action executes the
+	    In the ingress pipeline, this action executes the
 	    <code>egress</code> pipeline as a subroutine.  If
 	    <code>outport</code> names a logical port, the egress pipeline
 	    executes once; if it is a multicast group, the egress pipeline runs
@@ -706,7 +699,7 @@
           </p>
 
           <p>
-            In an <code>egress</code> flow, this action performs the actual
+            In the egress pipeline, this action performs the actual
             output to the <code>outport</code> logical port.  (In the egress
             pipeline, <code>outport</code> never names a multicast group.)
           </p>
@@ -747,8 +740,9 @@
             Not all fields are modifiable (e.g. <code>eth.type</code> and
             <code>ip.proto</code> are read-only), and not all modifiable fields
             may be partially modified (e.g. <code>ip.ttl</code> must assigned
-            as a whole).  The <code>outport</code> field is modifiable for an
-            <code>ingress</code> flow but not an <code>egress</code> flow.
+            as a whole).  The <code>outport</code> field is modifiable in the
+            <code>ingress</code> pipeline but not in the <code>egress</code>
+            pipeline.
           </p>
 	</dd>
       </dl>
@@ -794,42 +788,51 @@
     <p>
       Each row in this table defines a logical multicast group numbered <ref
       column="tunnel_key"/> within <ref column="datapath"/>, whose logical
-      ports are listed in the <ref column="ports"/> column.  All of the ports
-      must be in the <ref column="datapath"/> logical datapath (but the
-      database schema cannot enforce this).
+      ports are listed in the <ref column="ports"/> column.
     </p>
 
-    <p>
-      Multicast group numbers and names are scoped within a logical datapath.
-    </p>
+    <column name="datapath">
+      The logical datapath in which the multicast group resides.
+    </column>
 
-    <p>
-      In the <ref table="Logical_Flow"/> table, multicast groups may be used
-      for output just as for individual logical ports, by assigning the group's
-      name to <code>outport</code>,
-    </p>
+    <column name="tunnel_key">
+      The value used to designate this logical egress port in tunnel
+      encapsulations.  An index forces the key to be unique within the <ref
+      column="datapath"/>.  The unusual range ensures that multicast group IDs
+      do not overlap with logical port IDs.
+    </column>
 
-    <p>
-      Multicast group names and logical port names share a single namespace and
-      thus should not overlap (but the database schema cannot enforce this).
-    </p>
+    <column name="name">
+      <p>
+        The logical multicast group's name.  An index forces the name to be
+        unique within the <ref column="datapath"/>.  Logical flows in the
+        ingress pipeline may output to the group just as for individual logical
+        ports, by assigning the group's name to <code>outport</code> and
+        executing an <code>output</code> action.
+      </p>
 
-    <p>
-      An index prevents this table from containing any two rows with the same
-      <ref column="datapath"/> and <ref column="tunnel_key"/> values or the
-      same <ref column="datapath"/> and <ref column="name"/> values.
-    </p>
+      <p>
+        Multicast group names and logical port names share a single namespace
+        and thus should not overlap (but the database schema cannot enforce
+        this).  To try to avoid conflicts, <code>ovn-northd</code> uses names
+        that begin with <code>_MC_</code>.
+      </p>
+    </column>
 
-    <column name="datapath"/>
-    <column name="tunnel_key"/>
-    <column name="name"/>
-    <column name="ports"/>
+    <column name="ports">
+      The logical ports included in the multicast group.  All of these ports
+      must be in the <ref column="datapath"/> logical datapath (but the
+      database schema cannot enforce this).
+    </column>
   </table>
 
   <table name="Datapath_Binding" title="Physical-Logical Datapath Bindings">
     <p>
       Each row in this table identifies physical bindings of a logical
-      datapath.
+      datapath.  A logical datapath implements a logical pipeline among the
+      ports in the <ref table="Port_Binding"/> table associated with it.  In
+      practice, the pipeline in a given logical datapath implements either a
+      logical switch or a logical router.
     </p>
 
     <column name="tunnel_key">
@@ -917,28 +920,12 @@
     <column name="tunnel_key">
       <p>
         A number that represents the logical port in the key (e.g. STT key or
-        Geneve TLV) field carried within tunnel protocol packets.  This avoids
-        wasting space for a whole UUID in tunneled packets.  It also allows OVN
-        to support encapsulations that cannot fit an entire UUID in their
-        tunnel keys (i.e. every encapsulation other than Geneve).
+        Geneve TLV) field carried within tunnel protocol packets.
       </p>
 
       <p>
         The tunnel ID must be unique within the scope of a logical datapath.
       </p>
-
-      <p>
-        Logical port tunnel IDs form a 16-bit space:
-      </p>
-
-      <ul>
-        <li>Tunnel ID 0 is reserved for internal use within OVN.</li>
-        <li>Tunnel IDs 1 through 32767, inclusive, may be assigned to logical
-        ports.</li>
-        <li>Tunnel IDs 32768 through 65535, inclusive, may be assigned to
-        logical multicast groups (see the <ref table="Multicast_Group"/>
-        table).</li>
-      </ul>
     </column>
 
     <column name="parent_port">



More information about the dev mailing list