[ovs-dev] [PATCH ovn v2 3/5] ovn-northd: Remove lflow_add_unique.
Han Zhou
hzhou at ovn.org
Fri Jun 11 19:35:28 UTC 2021
This patch removes the workaround when adding multicast group related
lflows, because the multicast group dependency problem is fixed in
ovn-controller in the previous commit.
This patch also removes the UniqueFlow/AnnotatedFlow usage in northd
DDlog implementation for the same reason.
Signed-off-by: Han Zhou <hzhou at ovn.org>
---
northd/ovn-northd.c | 89 ++++++-----------
northd/ovn_northd.dl | 233 +++++++++++++++++++------------------------
tests/ovn-northd.at | 2 +-
3 files changed, 137 insertions(+), 187 deletions(-)
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 005c1fc86..411b14adf 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -3663,9 +3663,6 @@ build_ports(struct northd_context *ctx,
sset_destroy(&active_ha_chassis_grps);
}
-/* XXX: The 'ovn_lflow_add_unique*()' functions should be used for logical
- * flows using a multicast group.
- * See the comment on 'ovn_lflow_add_unique()' for details. */
struct multicast_group {
const char *name;
uint16_t key; /* OVN_MIN_MULTICAST...OVN_MAX_MULTICAST. */
@@ -4087,14 +4084,14 @@ static struct hashrow_locks lflow_locks;
* Version to use when locking is required.
*/
static void
-do_ovn_lflow_add(struct hmap *lflow_map, bool shared,
+do_ovn_lflow_add(struct hmap *lflow_map,
struct ovn_datapath *od,
uint32_t hash, struct ovn_lflow *lflow)
{
struct ovn_lflow *old_lflow;
- if (shared && use_logical_dp_groups) {
+ if (use_logical_dp_groups) {
old_lflow = ovn_lflow_find_by_lflow(lflow_map, lflow, hash);
if (old_lflow) {
ovn_lflow_destroy(NULL, lflow);
@@ -4111,7 +4108,7 @@ do_ovn_lflow_add(struct hmap *lflow_map, bool shared,
static void
ovn_lflow_add_at(struct hmap *lflow_map, struct ovn_datapath *od,
enum ovn_stage stage, uint16_t priority,
- const char *match, const char *actions, bool shared,
+ const char *match, const char *actions,
const struct ovsdb_idl_row *stage_hint, const char *where)
{
ovs_assert(ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od));
@@ -4131,40 +4128,21 @@ ovn_lflow_add_at(struct hmap *lflow_map, struct ovn_datapath *od,
if (use_logical_dp_groups && use_parallel_build) {
lock_hash_row(&lflow_locks, hash);
- do_ovn_lflow_add(lflow_map, shared, od, hash, lflow);
+ do_ovn_lflow_add(lflow_map, od, hash, lflow);
unlock_hash_row(&lflow_locks, hash);
} else {
- do_ovn_lflow_add(lflow_map, shared, od, hash, lflow);
+ do_ovn_lflow_add(lflow_map, od, hash, lflow);
}
}
/* Adds a row with the specified contents to the Logical_Flow table. */
#define ovn_lflow_add_with_hint(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, \
ACTIONS, STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, true, \
+ ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, \
STAGE_HINT, OVS_SOURCE_LOCATOR)
#define ovn_lflow_add(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS) \
- ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, true, \
- NULL, OVS_SOURCE_LOCATOR)
-
-/* Adds a row with the specified contents to the Logical_Flow table.
- * Combining of this logical flow with already existing ones, e.g., by using
- * Logical Datapath Groups, is forbidden.
- *
- * XXX: ovn-controller assumes that a logical flow using multicast group always
- * comes after or in the same database update with the corresponding
- * multicast group. That will not be the case with datapath groups.
- * For this reason, the 'ovn_lflow_add_unique*()' functions should be used
- * for such logical flows.
- */
-#define ovn_lflow_add_unique_with_hint(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, \
- ACTIONS, STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, false, \
- STAGE_HINT, OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_add_unique(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS) \
- ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, false, \
+ ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, \
NULL, OVS_SOURCE_LOCATOR)
static struct ovn_lflow *
@@ -6447,9 +6425,8 @@ build_lswitch_rport_arp_req_self_orig_flow(struct ovn_port *op,
ds_put_format(&match, "eth.src == %s && (arp.op == 1 || nd_ns)",
ds_cstr(ð_src));
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, priority,
- ds_cstr(&match),
- "outport = \""MC_FLOOD_L2"\"; output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, priority, ds_cstr(&match),
+ "outport = \""MC_FLOOD_L2"\"; output;");
sset_destroy(&all_eth_addrs);
ds_destroy(ð_src);
@@ -6502,7 +6479,7 @@ build_lswitch_rport_arp_req_flow_for_ip(struct sset *ips,
ds_put_format(&actions, "clone {outport = %s; output; }; "
"outport = \""MC_FLOOD_L2"\"; output;",
patch_op->json_key);
- ovn_lflow_add_unique_with_hint(lflows, od, S_SWITCH_IN_L2_LKUP,
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_L2_LKUP,
priority, ds_cstr(&match),
ds_cstr(&actions), stage_hint);
} else {
@@ -6858,9 +6835,9 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *lflows)
"outport = get_fdb(eth.dst); next;");
if (od->has_unknown) {
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50,
- "outport == \"none\"",
- "outport = \""MC_UNKNOWN "\"; output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50,
+ "outport == \"none\"",
+ "outport = \""MC_UNKNOWN "\"; output;");
} else {
ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50,
"outport == \"none\"", "drop;");
@@ -7304,26 +7281,26 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
}
ds_put_cstr(actions, "igmp;");
/* Punt IGMP traffic to controller. */
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
- "ip4 && ip.proto == 2", ds_cstr(actions));
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
+ "ip4 && ip.proto == 2", ds_cstr(actions));
/* Punt MLD traffic to controller. */
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
- "mldv1 || mldv2", ds_cstr(actions));
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
+ "mldv1 || mldv2", ds_cstr(actions));
/* Flood all IP multicast traffic destined to 224.0.0.X to all
* ports - RFC 4541, section 2.1.2, item 2.
*/
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 85,
- "ip4.mcast && ip4.dst == 224.0.0.0/24",
- "outport = \""MC_FLOOD"\"; output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 85,
+ "ip4.mcast && ip4.dst == 224.0.0.0/24",
+ "outport = \""MC_FLOOD"\"; output;");
/* Flood all IPv6 multicast traffic destined to reserved
* multicast IPs (RFC 4291, 2.7.1).
*/
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 85,
- "ip6.mcast_flood",
- "outport = \""MC_FLOOD"\"; output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 85,
+ "ip6.mcast_flood",
+ "outport = \""MC_FLOOD"\"; output;");
/* Forward uregistered IP multicast to routers with relay enabled
* and to any ports configured to flood IP multicast traffic.
@@ -7353,14 +7330,14 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
ds_put_cstr(actions, "drop;");
}
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 80,
- "ip4.mcast || ip6.mcast",
- ds_cstr(actions));
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 80,
+ "ip4.mcast || ip6.mcast",
+ ds_cstr(actions));
}
}
- ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 70, "eth.mcast",
- "outport = \""MC_FLOOD"\"; output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 70, "eth.mcast",
+ "outport = \""MC_FLOOD"\"; output;");
}
}
@@ -7438,8 +7415,8 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
ds_put_format(actions, "outport = \"%s\"; output; ",
igmp_group->mcgroup.name);
- ovn_lflow_add_unique(lflows, igmp_group->datapath, S_SWITCH_IN_L2_LKUP,
- 90, ds_cstr(match), ds_cstr(actions));
+ ovn_lflow_add(lflows, igmp_group->datapath, S_SWITCH_IN_L2_LKUP,
+ 90, ds_cstr(match), ds_cstr(actions));
}
}
@@ -9980,15 +9957,15 @@ build_mcast_lookup_flows_for_lrouter(
}
ds_put_format(actions, "outport = \"%s\"; ip.ttl--; next;",
igmp_group->mcgroup.name);
- ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 500,
- ds_cstr(match), ds_cstr(actions));
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 500,
+ ds_cstr(match), ds_cstr(actions));
}
/* If needed, flood unregistered multicast on statically configured
* ports. Otherwise drop any multicast traffic.
*/
if (od->mcast_info.rtr.flood_static) {
- ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
"ip4.mcast || ip6.mcast",
"clone { "
"outport = \""MC_STATIC"\"; "
diff --git a/northd/ovn_northd.dl b/northd/ovn_northd.dl
index 3afa80a3b..1323b935e 100644
--- a/northd/ovn_northd.dl
+++ b/northd/ovn_northd.dl
@@ -1604,12 +1604,6 @@ function mFF_N_LOG_REGS() : bit<32> = 10
*
* - There's a setting "use_logical_dp_groups" that globally
* enables or disables this feature.
- *
- * - Some flows can't use this feature even if it's globally
- * enabled, due to ovn-controller bugs (see commit bfed224006750
- * "northd: Add support for Logical Datapath Groups."). Flows
- * that can't be shared must get added into AnnotatedFlow with
- * 'shared' set to 'false', instead of Flow.
*/
relation Flow(
@@ -1631,12 +1625,6 @@ UseLogicalDatapathGroups[false] :-
Unit(),
not nb in nb::NB_Global().
-relation AnnotatedFlow(f: Flow, shared: bool)
-AnnotatedFlow(f, b) :- UseLogicalDatapathGroups[b], Flow[f].
-
-relation UniqueFlow[Flow]
-AnnotatedFlow(f, false) :- UniqueFlow[f].
-
relation AggregatedFlow (
logical_datapaths: Set<uuid>,
stage: Stage,
@@ -1651,15 +1639,8 @@ AggregatedFlow(.logical_datapaths = g.to_set(),
.__match = __match,
.actions = actions,
.external_ids = external_ids) :-
- AnnotatedFlow(Flow{logical_datapath, stage, priority, __match, actions, external_ids}, true),
+ Flow(logical_datapath, stage, priority, __match, actions, external_ids),
var g = logical_datapath.group_by((stage, priority, __match, actions, external_ids)).
-AggregatedFlow(.logical_datapaths = set_singleton(logical_datapath),
- .stage = stage,
- .priority = priority,
- .__match = __match,
- .actions = actions,
- .external_ids = external_ids) :-
- AnnotatedFlow(Flow{logical_datapath, stage, priority, __match, actions, external_ids}, false).
for (f in AggregatedFlow()) {
var pipeline = if (f.stage.pipeline == Ingress) "ingress" else "egress" in
@@ -3813,42 +3794,42 @@ for (sw in &Switch(._uuid = ls_uuid, .mcast_cfg = mcast_cfg)
}
} in {
/* Punt IGMP traffic to controller. */
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 100,
- .__match = "ip4 && ip.proto == 2",
- .actions = "${igmp_act}",
- .external_ids = map_empty()}];
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 100,
+ .__match = "ip4 && ip.proto == 2",
+ .actions = "${igmp_act}",
+ .external_ids = map_empty());
/* Punt MLD traffic to controller. */
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 100,
- .__match = "mldv1 || mldv2",
- .actions = "${igmp_act}",
- .external_ids = map_empty()}];
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 100,
+ .__match = "mldv1 || mldv2",
+ .actions = "${igmp_act}",
+ .external_ids = map_empty());
/* Flood all IP multicast traffic destined to 224.0.0.X to
* all ports - RFC 4541, section 2.1.2, item 2.
*/
var flood = json_string_escape(mC_FLOOD().0) in
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 85,
- .__match = "ip4.mcast && ip4.dst == 224.0.0.0/24",
- .actions = "outport = ${flood}; output;",
- .external_ids = map_empty()}];
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 85,
+ .__match = "ip4.mcast && ip4.dst == 224.0.0.0/24",
+ .actions = "outport = ${flood}; output;",
+ .external_ids = map_empty());
/* Flood all IPv6 multicast traffic destined to reserved
* multicast IPs (RFC 4291, 2.7.1).
*/
var flood = json_string_escape(mC_FLOOD().0) in
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 85,
- .__match = "ip6.mcast_flood",
- .actions = "outport = ${flood}; output;",
- .external_ids = map_empty()}];
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 85,
+ .__match = "ip6.mcast_flood",
+ .actions = "outport = ${flood}; output;",
+ .external_ids = map_empty());
/* Forward uregistered IP multicast to routers with relay
* enabled and to any ports configured to flood IP
@@ -3882,13 +3863,13 @@ for (sw in &Switch(._uuid = ls_uuid, .mcast_cfg = mcast_cfg)
""
}
} in
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 80,
- .__match = "ip4.mcast || ip6.mcast",
- .actions =
- "${relay_act}${static_act}${drop_act}",
- .external_ids = map_empty()}]
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 80,
+ .__match = "ip4.mcast || ip6.mcast",
+ .actions =
+ "${relay_act}${static_act}${drop_act}",
+ .external_ids = map_empty())
}
}
}
@@ -3936,14 +3917,14 @@ for (IgmpSwitchMulticastGroup(.address = address, .switch = sw)) {
""
}
} in
- UniqueFlow[Flow{.logical_datapath = sw._uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 90,
- .__match = "eth.mcast && ${ipX} && ${ipX}.dst == ${address}",
- .actions =
- "${relay_act} ${static_act} outport = \"${address}\"; "
- "output;",
- .external_ids = map_empty()}]
+ Flow(.logical_datapath = sw._uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 90,
+ .__match = "eth.mcast && ${ipX} && ${ipX}.dst == ${address}",
+ .actions =
+ "${relay_act} ${static_act} outport = \"${address}\"; "
+ "output;",
+ .external_ids = map_empty())
}
}
}
@@ -4010,12 +3991,12 @@ Flow(.logical_datapath = sp.sw._uuid,
* (priority 100). */
for (ls in nb::Logical_Switch) {
var mc_flood = json_string_escape(mC_FLOOD().0) in
- UniqueFlow[Flow{.logical_datapath = ls._uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 70,
- .__match = "eth.mcast",
- .actions = "outport = ${mc_flood}; output;",
- .external_ids = map_empty()}]
+ Flow(.logical_datapath = ls._uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 70,
+ .__match = "eth.mcast",
+ .actions = "outport = ${mc_flood}; output;",
+ .external_ids = map_empty())
}
/* Ingress table L2_LKUP: Destination lookup, unicast handling (priority 50).
@@ -4064,12 +4045,12 @@ function lrouter_port_ip_reachable(rp: Intern<RouterPort>, addr: v46_ip): bool {
};
false
}
-UniqueFlow[Flow{.logical_datapath = sw._uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 75,
- .__match = __match,
- .actions = actions,
- .external_ids = stage_hint(sp.lsp._uuid)}] :-
+Flow(.logical_datapath = sw._uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 75,
+ .__match = __match,
+ .actions = actions,
+ .external_ids = stage_hint(sp.lsp._uuid)) :-
sp in &SwitchPort(.sw = sw@&Switch{.has_non_router_port = true}, .peer = Some{rp}),
rp.is_enabled(),
var eth_src_set = {
@@ -4152,39 +4133,37 @@ function get_arp_forward_ips(rp: Intern<RouterPort>): (Set<string>, Set<string>)
* delivers to patch ports) but we're bypassing multicast_groups.
* (This is why we match against fLAGBIT_NOT_VXLAN() here.)
*/
-AnnotatedFlow(.f = Flow{.logical_datapath = sw._uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 80,
- .__match = fLAGBIT_NOT_VXLAN() ++
- " && arp.op == 1 && arp.tpa == { " ++
- all_ips_v4.to_vec().join(", ") ++ "}",
- .actions = if (sw.has_non_router_port) {
- "clone {outport = ${sp.json_name}; output; }; "
- "outport = ${mc_flood_l2}; output;"
- } else {
- "outport = ${sp.json_name}; output;"
- },
- .external_ids = stage_hint(sp.lsp._uuid)},
- .shared = not sw.has_non_router_port) :-
+Flow(.logical_datapath = sw._uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 80,
+ .__match = fLAGBIT_NOT_VXLAN() ++
+ " && arp.op == 1 && arp.tpa == { " ++
+ all_ips_v4.to_vec().join(", ") ++ "}",
+ .actions = if (sw.has_non_router_port) {
+ "clone {outport = ${sp.json_name}; output; }; "
+ "outport = ${mc_flood_l2}; output;"
+ } else {
+ "outport = ${sp.json_name}; output;"
+ },
+ .external_ids = stage_hint(sp.lsp._uuid)) :-
sp in &SwitchPort(.sw = sw, .peer = Some{rp}),
rp.is_enabled(),
(var all_ips_v4, _) = get_arp_forward_ips(rp),
not all_ips_v4.is_empty(),
var mc_flood_l2 = json_string_escape(mC_FLOOD_L2().0).
-AnnotatedFlow(.f = Flow{.logical_datapath = sw._uuid,
- .stage = s_SWITCH_IN_L2_LKUP(),
- .priority = 80,
- .__match = fLAGBIT_NOT_VXLAN() ++
- " && nd_ns && nd.target == { " ++
- all_ips_v6.to_vec().join(", ") ++ "}",
- .actions = if (sw.has_non_router_port) {
- "clone {outport = ${sp.json_name}; output; }; "
- "outport = ${mc_flood_l2}; output;"
- } else {
- "outport = ${sp.json_name}; output;"
- },
- .external_ids = stage_hint(sp.lsp._uuid)},
- .shared = not sw.has_non_router_port) :-
+Flow(.logical_datapath = sw._uuid,
+ .stage = s_SWITCH_IN_L2_LKUP(),
+ .priority = 80,
+ .__match = fLAGBIT_NOT_VXLAN() ++
+ " && nd_ns && nd.target == { " ++
+ all_ips_v6.to_vec().join(", ") ++ "}",
+ .actions = if (sw.has_non_router_port) {
+ "clone {outport = ${sp.json_name}; output; }; "
+ "outport = ${mc_flood_l2}; output;"
+ } else {
+ "outport = ${sp.json_name}; output;"
+ },
+ .external_ids = stage_hint(sp.lsp._uuid)) :-
sp in &SwitchPort(.sw = sw, .peer = Some{rp}),
rp.is_enabled(),
(_, var all_ips_v6) = get_arp_forward_ips(rp),
@@ -4280,22 +4259,17 @@ for (sw in &Switch(._uuid = ls_uuid)) {
.actions = "outport = get_fdb(eth.dst); next;",
.external_ids = map_empty());
- if (sw.has_unknown_ports) {
- var mc_unknown = json_string_escape(mC_UNKNOWN().0) in
- UniqueFlow[Flow{.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_UNKNOWN(),
- .priority = 50,
- .__match = "outport == \"none\"",
- .actions = "outport = ${mc_unknown}; output;",
- .external_ids = map_empty()}]
- } else {
- Flow(.logical_datapath = ls_uuid,
- .stage = s_SWITCH_IN_L2_UNKNOWN(),
- .priority = 50,
- .__match = "outport == \"none\"",
- .actions = "drop;",
- .external_ids = map_empty())
- };
+ Flow(.logical_datapath = ls_uuid,
+ .stage = s_SWITCH_IN_L2_UNKNOWN(),
+ .priority = 50,
+ .__match = "outport == \"none\"",
+ .actions = if (sw.has_unknown_ports) {
+ var mc_unknown = json_string_escape(mC_UNKNOWN().0);
+ "outport = ${mc_unknown}; output;"
+ } else {
+ "drop;"
+ },
+ .external_ids = map_empty());
Flow(.logical_datapath = ls_uuid,
.stage = s_SWITCH_IN_L2_UNKNOWN(),
@@ -6639,14 +6613,14 @@ for (IgmpRouterMulticastGroup(address, rtr, ports)) {
} in
Some{var ip} = ip46_parse(address) in
var ipX = ip.ipX() in
- UniqueFlow[Flow{.logical_datapath = rtr._uuid,
- .stage = s_ROUTER_IN_IP_ROUTING(),
- .priority = 500,
- .__match = "${ipX} && ${ipX}.dst == ${address} ",
- .actions =
- "${static_act}outport = ${json_string_escape(address)}; "
- "ip.ttl--; next;",
- .external_ids = map_empty()}]
+ Flow(.logical_datapath = rtr._uuid,
+ .stage = s_ROUTER_IN_IP_ROUTING(),
+ .priority = 500,
+ .__match = "${ipX} && ${ipX}.dst == ${address} ",
+ .actions =
+ "${static_act}outport = ${json_string_escape(address)}; "
+ "ip.ttl--; next;",
+ .external_ids = map_empty())
}
}
@@ -6665,13 +6639,12 @@ for (RouterMcastFloodPorts(rtr, flood_ports) if rtr.mcast_cfg.relay) {
} else {
"drop;"
} in
- AnnotatedFlow(.f = Flow{.logical_datapath = rtr._uuid,
- .stage = s_ROUTER_IN_IP_ROUTING(),
- .priority = 450,
- .__match = "ip4.mcast || ip6.mcast",
- .actions = actions,
- .external_ids = map_empty()},
- .shared = not flood_static)
+ Flow(.logical_datapath = rtr._uuid,
+ .stage = s_ROUTER_IN_IP_ROUTING(),
+ .priority = 450,
+ .__match = "ip4.mcast || ip6.mcast",
+ .actions = actions,
+ .external_ids = map_empty())
}
/* Logical router ingress table POLICY: Policy.
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 4692775ad..67531d203 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2487,7 +2487,7 @@ check_row_count Logical_DP_Group 0
dnl Number of logical flows that depends on logical switch or multicast group.
dnl These will not be combined.
-n_flows_specific=$(ovn-sbctl --bare find Logical_Flow | grep -cE 'swp|_MC_')
+n_flows_specific=$(ovn-sbctl --bare find Logical_Flow | grep -cE 'swp')
echo "Number of specific flows: "${n_flows_specific}
dnl Both logical switches configured identically, so there should be same
--
2.30.2
More information about the dev
mailing list