[ovs-dev] [PATCH v2 ovn 2/2] ovn: Add MLD support.

Dumitru Ceara dceara at redhat.com
Mon Jan 27 13:20:46 UTC 2020


Extend the existing infrastructure used for IPv4 multicast to
IPv6 multicast:
- snoop MLDv1 & MLDv2 reports.
- if multicast querier is configured, generate MLDv2 queries.
- support IPv6 multicast relay.
- support static flood configuration for IPv6 multicast too.

Acked-by: Mark Michelson <mmichels at redhat.com>
Signed-off-by: Dumitru Ceara <dceara at redhat.com>
---
 NEWS                    |    1 
 controller/pinctrl.c    |  359 +++++++++++++++++++++++------
 lib/logical-fields.c    |   33 +++
 lib/ovn-l7.h            |   97 ++++++++
 northd/ovn-northd.8.xml |   22 ++
 northd/ovn-northd.c     |  103 ++++++--
 ovn-nb.xml              |    4 
 ovn-sb.ovsschema        |    5 
 ovn-sb.xml              |    5 
 tests/ovn.at            |  579 +++++++++++++++++++++++++++++++++++++++++++++++
 10 files changed, 1099 insertions(+), 109 deletions(-)

diff --git a/NEWS b/NEWS
index 9e7d601..2b8cd6f 100644
--- a/NEWS
+++ b/NEWS
@@ -5,6 +5,7 @@ Post-OVS-v2.12.0
    - Added IPv6 NAT support for OVN routers.
    - Added Stateless Floating IP support in OVN.
    - Added Forwarding Group support in OVN.
+   - Added support for MLD Snooping and MLD Querier.
 
 v2.12.0 - 03 Sep 2019
 ---------------------
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 452ca8a..f7e31f9 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -263,7 +263,7 @@ static void ip_mcast_sync(
     struct ovsdb_idl_index *sbrec_igmp_groups,
     struct ovsdb_idl_index *sbrec_ip_multicast)
     OVS_REQUIRES(pinctrl_mutex);
-static void pinctrl_ip_mcast_handle_igmp(
+static void pinctrl_ip_mcast_handle(
     struct rconn *swconn,
     const struct flow *ip_flow,
     struct dp_packet *pkt_in,
@@ -1908,8 +1908,8 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
                            &userdata);
         break;
     case ACTION_OPCODE_IGMP:
-        pinctrl_ip_mcast_handle_igmp(swconn, &headers, &packet,
-                                     &pin.flow_metadata, &userdata);
+        pinctrl_ip_mcast_handle(swconn, &headers, &packet, &pin.flow_metadata,
+                                &userdata);
         break;
 
     case ACTION_OPCODE_PUT_ARP:
@@ -3198,32 +3198,55 @@ pinctrl_compose_ipv4(struct dp_packet *packet, struct eth_addr eth_src,
     packet->packet_type = htonl(PT_ETH);
 
     struct eth_header *eh = dp_packet_put_zeros(packet, sizeof *eh);
-    eh->eth_dst = eth_dst;
-    eh->eth_src = eth_src;
-
     struct ip_header *nh = dp_packet_put_zeros(packet, sizeof *nh);
 
+    eh->eth_dst = eth_dst;
+    eh->eth_src = eth_src;
     eh->eth_type = htons(ETH_TYPE_IP);
     dp_packet_set_l3(packet, nh);
     nh->ip_ihl_ver = IP_IHL_VER(5, 4);
-    nh->ip_tot_len = htons(sizeof(struct ip_header) + ip_payload_len);
+    nh->ip_tot_len = htons(sizeof *nh + ip_payload_len);
     nh->ip_tos = IP_DSCP_CS6;
     nh->ip_proto = ip_proto;
     nh->ip_frag_off = htons(IP_DF);
 
-    /* Setting tos and ttl to 0 and 1 respectively. */
     packet_set_ipv4(packet, ipv4_src, ipv4_dst, 0, ttl);
 
     nh->ip_csum = 0;
     nh->ip_csum = csum(nh, sizeof *nh);
 }
 
+static void
+pinctrl_compose_ipv6(struct dp_packet *packet, struct eth_addr eth_src,
+                     struct eth_addr eth_dst, struct in6_addr *ipv6_src,
+                     struct in6_addr *ipv6_dst, uint8_t ip_proto, uint8_t ttl,
+                     uint16_t ip_payload_len)
+{
+    dp_packet_clear(packet);
+    packet->packet_type = htonl(PT_ETH);
+
+    struct eth_header *eh = dp_packet_put_zeros(packet, sizeof *eh);
+    struct ip6_hdr *nh = dp_packet_put_zeros(packet, sizeof *nh);
+
+    eh->eth_dst = eth_dst;
+    eh->eth_src = eth_src;
+    eh->eth_type = htons(ETH_TYPE_IPV6);
+    dp_packet_set_l3(packet, nh);
+
+    nh->ip6_vfc = 0x60;
+    nh->ip6_nxt = ip_proto;
+    nh->ip6_plen = htons(ip_payload_len);
+
+    packet_set_ipv6(packet, ipv6_src, ipv6_dst, 0, 0, ttl);
+}
+
 /*
  * Multicast snooping configuration.
  */
 struct ip_mcast_snoop_cfg {
     bool enabled;
-    bool querier_enabled;
+    bool querier_v4_enabled;
+    bool querier_v6_enabled;
 
     uint32_t table_size;       /* Max number of allowed multicast groups. */
     uint32_t idle_time_s;      /* Idle timeout for multicast groups. */
@@ -3231,10 +3254,19 @@ struct ip_mcast_snoop_cfg {
     uint32_t query_max_resp_s; /* Multicast query max-response field. */
     uint32_t seq_no;           /* Used for flushing learnt groups. */
 
-    struct eth_addr query_eth_src; /* Src ETH address used for queries. */
-    struct eth_addr query_eth_dst; /* Dst ETH address used for queries. */
-    ovs_be32 query_ipv4_src;       /* Src IPv4 address used for queries. */
-    ovs_be32 query_ipv4_dst;       /* Dsc IPv4 address used for queries. */
+    struct eth_addr query_eth_src;    /* Src ETH address used for queries. */
+    struct eth_addr query_eth_v4_dst; /* Dst ETH address used for IGMP
+                                       * queries.
+                                       */
+    struct eth_addr query_eth_v6_dst; /* Dst ETH address used for MLD
+                                       * queries.
+                                       */
+
+    ovs_be32 query_ipv4_src; /* Src IPv4 address used for queries. */
+    ovs_be32 query_ipv4_dst; /* Dsc IPv4 address used for queries. */
+
+    struct in6_addr query_ipv6_src; /* Src IPv6 address used for queries. */
+    struct in6_addr query_ipv6_dst; /* Dsc IPv6 address used for queries. */
 };
 
 /*
@@ -3264,6 +3296,9 @@ struct ip_mcast_snoop_state {
 /* Only default vlan supported for now. */
 #define IP_MCAST_VLAN 1
 
+/* MLD router-alert IPv6 extension header value. */
+static const uint8_t mld_router_alert[4] = {0x05, 0x02, 0x00, 0x00};
+
 /* Multicast snooping information stored independently by datapath key.
  * Protected by pinctrl_mutex. pinctrl_handler has RW access and pinctrl_main
  * has RO access.
@@ -3277,7 +3312,7 @@ static struct ovs_list mcast_query_list;
 
 /* Multicast config information stored independently by datapath key.
  * Protected by pinctrl_mutex. pinctrl_handler has RO access and pinctrl_main
- * has RW access. Read accesses from pinctrl_ip_mcast_handle_igmp() can be
+ * has RW access. Read accesses from pinctrl_ip_mcast_handle() can be
  * performed without taking the lock as they are executed in the pinctrl_main
  * thread.
  */
@@ -3292,8 +3327,10 @@ ip_mcast_snoop_cfg_load(struct ip_mcast_snoop_cfg *cfg,
     memset(cfg, 0, sizeof *cfg);
     cfg->enabled =
         (ip_mcast->enabled && ip_mcast->enabled[0]);
-    cfg->querier_enabled =
+    bool querier_enabled =
         (cfg->enabled && ip_mcast->querier && ip_mcast->querier[0]);
+    cfg->querier_v4_enabled = querier_enabled;
+    cfg->querier_v6_enabled = querier_enabled;
 
     if (ip_mcast->table_size) {
         cfg->table_size = ip_mcast->table_size[0];
@@ -3324,30 +3361,56 @@ ip_mcast_snoop_cfg_load(struct ip_mcast_snoop_cfg *cfg,
 
     cfg->seq_no = ip_mcast->seq_no;
 
-    if (cfg->querier_enabled) {
+    if (querier_enabled) {
         /* Try to parse the source ETH address. */
         if (!ip_mcast->eth_src ||
                 !eth_addr_from_string(ip_mcast->eth_src,
                                       &cfg->query_eth_src)) {
             VLOG_WARN_RL(&rl,
                          "IGMP Querier enabled with invalid ETH src address");
-            /* Failed to parse the IPv4 source address. Disable the querier. */
-            cfg->querier_enabled = false;
+            /* Failed to parse the ETH source address. Disable the querier. */
+            cfg->querier_v4_enabled = false;
+            cfg->querier_v6_enabled = false;
         }
 
-        /* Try to parse the source IP address. */
-        if (!ip_mcast->ip4_src ||
-                !ip_parse(ip_mcast->ip4_src, &cfg->query_ipv4_src)) {
-            VLOG_WARN_RL(&rl,
-                         "IGMP Querier enabled with invalid IPv4 src address");
-            /* Failed to parse the IPv4 source address. Disable the querier. */
-            cfg->querier_enabled = false;
+        /* Try to parse the source IPv4 address. */
+        if (cfg->querier_v4_enabled) {
+            if (!ip_mcast->ip4_src ||
+                    !ip_parse(ip_mcast->ip4_src, &cfg->query_ipv4_src)) {
+                VLOG_WARN_RL(&rl,
+                            "IGMP Querier enabled with invalid IPv4 "
+                            "src address");
+                /* Failed to parse the IPv4 source address. Disable the
+                 * querier.
+                 */
+                cfg->querier_v4_enabled = false;
+            }
+
+            /* IGMP queries must be sent to 224.0.0.1. */
+            cfg->query_eth_v4_dst =
+                (struct eth_addr)ETH_ADDR_C(01, 00, 5E, 00, 00, 01);
+            cfg->query_ipv4_dst = htonl(0xe0000001);
         }
 
-        /* IGMP queries must be sent to 224.0.0.1. */
-        cfg->query_eth_dst =
-            (struct eth_addr)ETH_ADDR_C(01, 00, 5E, 00, 00, 01);
-        cfg->query_ipv4_dst = htonl(0xe0000001);
+        /* Try to parse the source IPv6 address. */
+        if (cfg->querier_v6_enabled) {
+            if (!ip_mcast->ip6_src ||
+                    !ipv6_parse(ip_mcast->ip6_src, &cfg->query_ipv6_src)) {
+                VLOG_WARN_RL(&rl,
+                            "MLD Querier enabled with invalid IPv6 "
+                            "src address");
+                /* Failed to parse the IPv6 source address. Disable the
+                 * querier.
+                 */
+                cfg->querier_v6_enabled = false;
+            }
+
+            /* MLD queries must be sent to ALL-HOSTS (ff02::1). */
+            cfg->query_eth_v6_dst =
+                (struct eth_addr)ETH_ADDR_C(33, 33, 00, 00, 00, 00);
+            cfg->query_ipv6_dst =
+                (struct in6_addr)IN6ADDR_ALL_HOSTS_INIT;
+        }
     }
 }
 
@@ -3455,9 +3518,15 @@ ip_mcast_snoop_configure(struct ip_mcast_snoop *ip_ms,
             ip_mcast_snoop_flush(ip_ms);
         }
 
-        if (ip_ms->cfg.querier_enabled && !cfg->querier_enabled) {
+        bool old_querier_enabled =
+            (ip_ms->cfg.querier_v4_enabled || ip_ms->cfg.querier_v6_enabled);
+
+        bool querier_enabled =
+            (cfg->querier_v4_enabled || cfg->querier_v6_enabled);
+
+        if (old_querier_enabled && !querier_enabled) {
             ovs_list_remove(&ip_ms->query_node);
-        } else if (!ip_ms->cfg.querier_enabled && cfg->querier_enabled) {
+        } else if (!old_querier_enabled && querier_enabled) {
             ovs_list_push_back(&mcast_query_list, &ip_ms->query_node);
         }
     } else {
@@ -3526,7 +3595,7 @@ ip_mcast_snoop_remove(struct ip_mcast_snoop *ip_ms)
 {
     hmap_remove(&mcast_snoop_map, &ip_ms->hmap_node);
 
-    if (ip_ms->cfg.querier_enabled) {
+    if (ip_ms->cfg.querier_v4_enabled || ip_ms->cfg.querier_v6_enabled) {
         ovs_list_remove(&ip_ms->query_node);
     }
 
@@ -3664,7 +3733,8 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
      * - or the group has expired.
      */
     SBREC_IGMP_GROUP_FOR_EACH_BYINDEX (sbrec_igmp, sbrec_igmp_groups) {
-        ovs_be32 group_addr;
+        ovs_be32 group_v4_addr;
+        struct in6_addr group_addr;
 
         if (!sbrec_igmp->datapath) {
             continue;
@@ -3681,14 +3751,15 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
             continue;
         }
 
-        if (!ip_parse(sbrec_igmp->address, &group_addr)) {
+        if (ip_parse(sbrec_igmp->address, &group_v4_addr)) {
+            group_addr = in6_addr_mapped_ipv4(group_v4_addr);
+        } else if (!ipv6_parse(sbrec_igmp->address, &group_addr)) {
             continue;
         }
 
         ovs_rwlock_rdlock(&ip_ms->ms->rwlock);
         struct mcast_group *mc_group =
-            mcast_snooping_lookup4(ip_ms->ms, group_addr,
-                                   IP_MCAST_VLAN);
+            mcast_snooping_lookup(ip_ms->ms, &group_addr, IP_MCAST_VLAN);
 
         if (!mc_group || ovs_list_is_empty(&mc_group->bundle_lru)) {
             igmp_group_delete(sbrec_igmp);
@@ -3742,54 +3813,29 @@ ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
     }
 }
 
-static void
-pinctrl_ip_mcast_handle_igmp(struct rconn *swconn OVS_UNUSED,
+static bool
+pinctrl_ip_mcast_handle_igmp(struct ip_mcast_snoop *ip_ms,
                              const struct flow *ip_flow,
                              struct dp_packet *pkt_in,
-                             const struct match *md,
-                             struct ofpbuf *userdata OVS_UNUSED)
-    OVS_NO_THREAD_SAFETY_ANALYSIS
+                             void *port_key_data)
 {
-    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-
-    /* This action only works for IP packets, and the switch should only send
-     * us IP packets this way, but check here just to be sure.
-     */
-    if (ip_flow->dl_type != htons(ETH_TYPE_IP)) {
-        VLOG_WARN_RL(&rl,
-                     "IGMP action on non-IP packet (eth_type 0x%"PRIx16")",
-                     ntohs(ip_flow->dl_type));
-        return;
-    }
-
-    int64_t dp_key = ntohll(md->flow.metadata);
-    uint32_t port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
-
     const struct igmp_header *igmp;
     size_t offset;
 
     offset = (char *) dp_packet_l4(pkt_in) - (char *) dp_packet_data(pkt_in);
     igmp = dp_packet_at(pkt_in, offset, IGMP_HEADER_LEN);
     if (!igmp || csum(igmp, dp_packet_l4_size(pkt_in)) != 0) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
         VLOG_WARN_RL(&rl, "multicast snooping received bad IGMP checksum");
-        return;
+        return false;
     }
 
     ovs_be32 ip4 = ip_flow->igmp_group_ip4;
-
-    struct ip_mcast_snoop *ip_ms = ip_mcast_snoop_find(dp_key);
-    if (!ip_ms || !ip_ms->cfg.enabled) {
-        /* IGMP snooping is not configured or is disabled. */
-        return;
-    }
-
-    void *port_key_data = (void *)(uintptr_t)port_key;
-
     bool group_change = false;
 
+    /* Only default VLAN is supported for now. */
     ovs_rwlock_wrlock(&ip_ms->ms->rwlock);
     switch (ntohs(ip_flow->tp_src)) {
-     /* Only default VLAN is supported for now. */
     case IGMP_HOST_MEMBERSHIP_REPORT:
     case IGMPV2_HOST_MEMBERSHIP_REPORT:
         group_change =
@@ -3816,27 +3862,118 @@ pinctrl_ip_mcast_handle_igmp(struct rconn *swconn OVS_UNUSED,
         break;
     }
     ovs_rwlock_unlock(&ip_ms->ms->rwlock);
+    return group_change;
+}
 
-    if (group_change) {
-        notify_pinctrl_main();
+static bool
+pinctrl_ip_mcast_handle_mld(struct ip_mcast_snoop *ip_ms,
+                            const struct flow *ip_flow,
+                            struct dp_packet *pkt_in,
+                            void *port_key_data)
+{
+    const struct mld_header *mld;
+    size_t offset;
+
+    offset = (char *) dp_packet_l4(pkt_in) - (char *) dp_packet_data(pkt_in);
+    mld = dp_packet_at(pkt_in, offset, MLD_HEADER_LEN);
+
+    if (!mld || packet_csum_upperlayer6(dp_packet_l3(pkt_in),
+                                        mld, IPPROTO_ICMPV6,
+                                        dp_packet_l4_size(pkt_in)) != 0) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl,
+                     "multicast snooping received bad MLD checksum");
+        return false;
     }
+
+    bool group_change = false;
+
+    /* Only default VLAN is supported for now. */
+    ovs_rwlock_wrlock(&ip_ms->ms->rwlock);
+    switch (ntohs(ip_flow->tp_src)) {
+    case MLD_QUERY:
+        /* Shouldn't be receiving any of these since we are the multicast
+         * router. Store them for now.
+         */
+        if (!ipv6_addr_equals(&ip_flow->ipv6_src, &in6addr_any)) {
+            group_change =
+                mcast_snooping_add_mrouter(ip_ms->ms, IP_MCAST_VLAN,
+                                           port_key_data);
+        }
+        break;
+    case MLD_REPORT:
+    case MLD_DONE:
+    case MLD2_REPORT:
+        group_change =
+            mcast_snooping_add_mld(ip_ms->ms, pkt_in, IP_MCAST_VLAN,
+                                   port_key_data);
+        break;
+    }
+    ovs_rwlock_unlock(&ip_ms->ms->rwlock);
+    return group_change;
 }
 
-static long long int
-ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
-                      long long int current_time)
+static void
+pinctrl_ip_mcast_handle(struct rconn *swconn OVS_UNUSED,
+                        const struct flow *ip_flow,
+                        struct dp_packet *pkt_in,
+                        const struct match *md,
+                        struct ofpbuf *userdata OVS_UNUSED)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
-    if (current_time < ip_ms->query_time_ms) {
-        return ip_ms->query_time_ms;
+    uint16_t dl_type = ntohs(ip_flow->dl_type);
+
+    /* This action only works for IP packets, and the switch should only send
+     * us IP packets this way, but check here just to be sure.
+     */
+    if (dl_type != ETH_TYPE_IP && dl_type != ETH_TYPE_IPV6) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl,
+                     "IGMP action on non-IP packet (eth_type 0x%"PRIx16")",
+                     dl_type);
+        return;
     }
 
+    int64_t dp_key = ntohll(md->flow.metadata);
+
+    struct ip_mcast_snoop *ip_ms = ip_mcast_snoop_find(dp_key);
+    if (!ip_ms || !ip_ms->cfg.enabled) {
+        /* IGMP snooping is not configured or is disabled. */
+        return;
+    }
+
+    uint32_t port_key = md->flow.regs[MFF_LOG_INPORT - MFF_REG0];
+    void *port_key_data = (void *)(uintptr_t)port_key;
+
+    switch (dl_type) {
+    case ETH_TYPE_IP:
+        if (pinctrl_ip_mcast_handle_igmp(ip_ms, ip_flow, pkt_in,
+                                         port_key_data)) {
+            notify_pinctrl_main();
+        }
+        break;
+    case ETH_TYPE_IPV6:
+        if (pinctrl_ip_mcast_handle_mld(ip_ms, ip_flow, pkt_in,
+                                        port_key_data)) {
+            notify_pinctrl_main();
+        }
+        break;
+    default:
+        OVS_NOT_REACHED();
+        break;
+    }
+}
+
+static void
+ip_mcast_querier_send_igmp(struct rconn *swconn, struct ip_mcast_snoop *ip_ms)
+{
     /* Compose a multicast query. */
     uint64_t packet_stub[128 / 8];
     struct dp_packet packet;
 
     dp_packet_use_stub(&packet, packet_stub, sizeof packet_stub);
     pinctrl_compose_ipv4(&packet, ip_ms->cfg.query_eth_src,
-                         ip_ms->cfg.query_eth_dst,
+                         ip_ms->cfg.query_eth_v4_dst,
                          ip_ms->cfg.query_ipv4_src,
                          ip_ms->cfg.query_ipv4_dst,
                          IPPROTO_IGMP, 1, sizeof(struct igmpv3_query_header));
@@ -3873,6 +4010,78 @@ ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
     queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
     dp_packet_uninit(&packet);
     ofpbuf_uninit(&ofpacts);
+}
+
+static void
+ip_mcast_querier_send_mld(struct rconn *swconn, struct ip_mcast_snoop *ip_ms)
+{
+    /* Compose a multicast query. */
+    uint64_t packet_stub[128 / 8];
+    struct dp_packet packet;
+
+    dp_packet_use_stub(&packet, packet_stub, sizeof packet_stub);
+    pinctrl_compose_ipv6(&packet, ip_ms->cfg.query_eth_src,
+                         ip_ms->cfg.query_eth_v6_dst,
+                         &ip_ms->cfg.query_ipv6_src,
+                         &ip_ms->cfg.query_ipv6_dst,
+                         IPPROTO_HOPOPTS, 1,
+                         IPV6_EXT_HEADER_LEN + MLD_QUERY_HEADER_LEN);
+
+    struct ipv6_ext_header *ext_hdr =
+        dp_packet_put_zeros(&packet, IPV6_EXT_HEADER_LEN);
+    packet_set_ipv6_ext_header(ext_hdr, IPPROTO_ICMPV6, 0, mld_router_alert,
+                               ARRAY_SIZE(mld_router_alert));
+
+    struct mld_header *mh =
+        dp_packet_put_zeros(&packet, MLD_QUERY_HEADER_LEN);
+    dp_packet_set_l4(&packet, mh);
+
+    /* MLD query max-response in milliseconds. */
+    uint16_t max_response = ip_ms->cfg.query_max_resp_s * 1000;
+    uint8_t qqic = ip_ms->cfg.query_max_resp_s;
+    struct in6_addr unspecified = { { { 0 } } };
+    packet_set_mld_query(&packet, max_response, &unspecified, false, 0, qqic);
+
+    /* Inject multicast query. */
+    uint64_t ofpacts_stub[4096 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(ofpacts_stub);
+    enum ofp_version version = rconn_get_version(swconn);
+    put_load(ip_ms->dp_key, MFF_LOG_DATAPATH, 0, 64, &ofpacts);
+    put_load(OVN_MCAST_FLOOD_TUNNEL_KEY, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
+    put_load(1, MFF_LOG_FLAGS, MLF_LOCAL_ONLY, 1, &ofpacts);
+    struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
+    resubmit->in_port = OFPP_CONTROLLER;
+    resubmit->table_id = OFTABLE_LOCAL_OUTPUT;
+
+    struct ofputil_packet_out po = {
+        .packet = dp_packet_data(&packet),
+        .packet_len = dp_packet_size(&packet),
+        .buffer_id = UINT32_MAX,
+        .ofpacts = ofpacts.data,
+        .ofpacts_len = ofpacts.size,
+    };
+    match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
+    enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
+    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    dp_packet_uninit(&packet);
+    ofpbuf_uninit(&ofpacts);
+}
+
+static long long int
+ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
+                      long long int current_time)
+{
+    if (current_time < ip_ms->query_time_ms) {
+        return ip_ms->query_time_ms;
+    }
+
+    if (ip_ms->cfg.querier_v4_enabled) {
+        ip_mcast_querier_send_igmp(swconn, ip_ms);
+    }
+
+    if (ip_ms->cfg.querier_v6_enabled) {
+        ip_mcast_querier_send_mld(swconn, ip_ms);
+    }
 
     /* Set the next query time. */
     ip_ms->query_time_ms = current_time + ip_ms->cfg.query_interval_s * 1000;
diff --git a/lib/logical-fields.c b/lib/logical-fields.c
index 5748b67..25ace58 100644
--- a/lib/logical-fields.c
+++ b/lib/logical-fields.c
@@ -138,6 +138,8 @@ ovn_init_symtab(struct shash *symtab)
     expr_symtab_add_predicate(symtab, "eth.bcast",
                               "eth.dst == ff:ff:ff:ff:ff:ff");
     expr_symtab_add_subfield(symtab, "eth.mcast", NULL, "eth.dst[40]");
+    expr_symtab_add_predicate(symtab, "eth.mcastv6",
+                              "eth.dst[32..47] == 0x3333");
 
     expr_symtab_add_field(symtab, "vlan.tci", MFF_VLAN_TCI, NULL, false);
     expr_symtab_add_predicate(symtab, "vlan.present", "vlan.tci[12]");
@@ -173,6 +175,27 @@ ovn_init_symtab(struct shash *symtab)
     expr_symtab_add_field(symtab, "ip6.dst", MFF_IPV6_DST, "ip6", false);
     expr_symtab_add_field(symtab, "ip6.label", MFF_IPV6_LABEL, "ip6", false);
 
+    /* Predefined IPv6 multicast groups (RFC 4291, 2.7.1). */
+    expr_symtab_add_predicate(symtab, "ip6.mcast_rsvd",
+                              "ip6.dst[116..127] == 0xff0 && "
+                              "ip6.dst[0..111] == 0x0");
+    expr_symtab_add_predicate(symtab, "ip6.mcast_all_nodes",
+                              "ip6.dst == ff01::1 || ip6.dst == ff02::1");
+    expr_symtab_add_predicate(symtab, "ip6.mcast_all_rtrs",
+                              "ip6.dst == ff01::2 || ip6.dst == ff02::2 || "
+                              "ip6.dst == ff05::2");
+    expr_symtab_add_predicate(symtab, "ip6.mcast_sol_node",
+                              "ip6.dst == ff02::1:ff00:0000/104");
+    expr_symtab_add_predicate(symtab, "ip6.mcast_flood",
+                              "eth.mcastv6 && "
+                              "(ip6.mcast_rsvd || "
+                              "ip6.mcast_all_nodes || "
+                              "ip6.mcast_all_rtrs || "
+                              "ip6.mcast_sol_node)");
+
+    expr_symtab_add_predicate(symtab, "ip6.mcast",
+                              "eth.mcastv6 && ip6.dst[120..127] == 0xff");
+
     expr_symtab_add_predicate(symtab, "icmp6", "ip6 && ip.proto == 58");
     expr_symtab_add_field(symtab, "icmp6.type", MFF_ICMPV6_TYPE, "icmp6",
                           true);
@@ -208,6 +231,16 @@ ovn_init_symtab(struct shash *symtab)
     expr_symtab_add_field(symtab, "nd.sll", MFF_ND_SLL, "nd_ns", false);
     expr_symtab_add_field(symtab, "nd.tll", MFF_ND_TLL, "nd_na", false);
 
+    /* MLDv1 packets use link-local source addresses
+     * (RFC 2710 and RFC 3810).
+     */
+    expr_symtab_add_predicate(symtab, "mldv1",
+                              "ip6.src == fe80::/10 && "
+                              "icmp6.type == {130, 131, 132}");
+    /* MLDv2 packets are sent to ff02::16 (RFC 3810, 5.2.14) */
+    expr_symtab_add_predicate(symtab, "mldv2",
+                              "ip6.dst == ff02::16 && icmp6.type == 143");
+
     expr_symtab_add_predicate(symtab, "tcp", "ip.proto == 6");
     expr_symtab_add_field(symtab, "tcp.src", MFF_TCP_SRC, "tcp", false);
     expr_symtab_add_field(symtab, "tcp.dst", MFF_TCP_DST, "tcp", false);
diff --git a/lib/ovn-l7.h b/lib/ovn-l7.h
index 375b770..f20d86c 100644
--- a/lib/ovn-l7.h
+++ b/lib/ovn-l7.h
@@ -14,12 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef OVN_DHCP_H
-#define OVN_DHCP_H 1
+#ifndef OVN_L7_H
+#define OVN_L7_H 1
 
 #include <sys/types.h>
 #include <netinet/in.h>
 #include <netinet/icmp6.h>
+#include "csum.h"
+#include "dp-packet.h"
 #include "openvswitch/hmap.h"
 #include "hash.h"
 #include "ovn/logical-fields.h"
@@ -357,4 +359,93 @@ controller_event_opts_destroy(struct controller_event_options *opts)
     }
 }
 
-#endif /* OVN_DHCP_H */
+static inline bool
+ipv6_addr_is_routable_multicast(const struct in6_addr *ip) {
+    if (!ipv6_addr_is_multicast(ip)) {
+        return false;
+    }
+
+    /* Check multicast group scope, RFC 4291, 2.7. */
+    switch (ip->s6_addr[1] & 0x0F) {
+    case 0x00:
+    case 0x01:
+    case 0x02:
+    case 0x03:
+    case 0x0F:
+        return false;
+    default:
+        return true;
+    }
+}
+
+#define IPV6_EXT_HEADER_LEN 8
+struct ipv6_ext_header {
+    uint8_t ip6_nxt_proto;
+    uint8_t len;
+    uint8_t values[6];
+};
+BUILD_ASSERT_DECL(IPV6_EXT_HEADER_LEN == sizeof(struct ipv6_ext_header));
+
+/* Sets the IPv6 extension header fields (next proto and length) and
+ * copies the first max 6 values to the header. Returns the number of values
+ * copied to the header.
+ */
+static inline size_t
+packet_set_ipv6_ext_header(struct ipv6_ext_header *ext_hdr, uint8_t ip_proto,
+                           uint8_t ext_len, const uint8_t *values,
+                           size_t n_values)
+{
+    ext_hdr->ip6_nxt_proto = ip_proto;
+    ext_hdr->len = (ext_len >= 8 ? ext_len - 8 : 0);
+    if (OVS_UNLIKELY(n_values > 6)) {
+        n_values = 6;
+    }
+    memcpy(&ext_hdr->values, values, n_values);
+    return n_values;
+}
+
+#define MLD_QUERY_HEADER_LEN 28
+struct mld_query_header {
+    uint8_t type;
+    uint8_t code;
+    ovs_be16 csum;
+    ovs_be16 max_resp;
+    ovs_be16 rsvd;
+    struct in6_addr group;
+    uint8_t srs_qrv;
+    uint8_t qqic;
+    ovs_be16 nsrcs;
+};
+BUILD_ASSERT_DECL(MLD_QUERY_HEADER_LEN == sizeof(struct mld_query_header));
+
+/* Sets the MLD type to MLD_QUERY and populates the MLD query header
+ * 'packet'. 'packet' must be a valid MLD query packet with its l4
+ * offset properly populated.
+ */
+static inline void
+packet_set_mld_query(struct dp_packet *packet, uint16_t max_resp,
+                     const struct in6_addr *group,
+                     bool srs, uint8_t qrv, uint8_t qqic)
+{
+    struct mld_query_header *mqh = dp_packet_l4(packet);
+    mqh->type = MLD_QUERY;
+    mqh->code = 0;
+    mqh->max_resp = htons(max_resp);
+    mqh->rsvd = 0;
+    memcpy(&mqh->group, group, sizeof mqh->group);
+
+    /* See RFC 3810 5.1.8. */
+    if (qrv > 7) {
+        qrv = 0;
+    }
+
+    mqh->srs_qrv = (srs << 3 | qrv);
+    mqh->qqic = qqic;
+    mqh->nsrcs = 0;
+
+    struct ovs_16aligned_ip6_hdr *nh6 = dp_packet_l3(packet);
+    mqh->csum = 0;
+    mqh->csum = packet_csum_upperlayer6(nh6, mqh, IPPROTO_ICMPV6, sizeof *mqh);
+}
+
+#endif /* OVN_L7_H */
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index bcb320b..96e8262 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -1069,9 +1069,9 @@ output;
       </li>
 
       <li>
-        A priority-100 flow that punts all IGMP packets to
-        <code>ovn-controller</code> if IGMP snooping is enabled on the
-        logical switch. The flow also forwards the IGMP packets to the
+        A priority-100 flow that punts all IGMP/MLD packets to
+        <code>ovn-controller</code> if multicast snooping is enabled on the
+        logical switch. The flow also forwards the IGMP/MLD packets to the
         <code>MC_MROUTER_STATIC</code> multicast group, which
         <code>ovn-northd</code> populates with all the logical ports that
         have <ref column="options" table="Logical_Switch_Port"/>
@@ -1096,6 +1096,14 @@ output;
       </li>
 
       <li>
+        A priority-85 flow that forwards all IP multicast traffic destined to
+        reserved multicast IPv6 addresses (RFC 4291, 2.7.1, e.g.,
+        Solicited-Node multicast) to the <code>MC_FLOOD</code> multicast
+        group, which <code>ovn-northd</code> populates with all enabled
+        logical ports.
+      </li>
+
+      <li>
         A priority-80 flow that forwards all unregistered IP multicast traffic
         to the <code>MC_STATIC</code> multicast group, which
         <code>ovn-northd</code> populates with all the logical ports that
@@ -1560,6 +1568,14 @@ next;
 
       <li>
         <p>
+          A priority-96 flow explicitly allows IPv6 multicast traffic that is
+          supposed to reach the router pipeline (e.g., neighbor solicitations
+          and traffic destined to the All-Routers multicast group).
+        </p>
+      </li>
+
+      <li>
+        <p>
           A priority-95 flow allows IP multicast traffic if
           <ref column="options" table="Logical_Router"/>:mcast_relay='true',
           otherwise drops it.
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index b2bf461..6026730 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -498,13 +498,18 @@ struct mcast_switch_info {
                                  * flushed.
                                  */
     int64_t query_interval;     /* Interval between multicast queries. */
-    char *eth_src;              /* ETH src address of the multicast queries. */
-    char *ipv4_src;             /* IP src address of the multicast queries. */
+    char *eth_src;              /* ETH src address of the queries. */
+    char *ipv4_src;             /* IPv4 src address of the queries. */
+    char *ipv6_src;             /* IPv6 src address of the queries. */
+
     int64_t query_max_response; /* Expected time after which reports should
                                  * be received for queries that were sent out.
                                  */
 
-    uint32_t active_flows;      /* Current number of active IP multicast
+    uint32_t active_v4_flows;   /* Current number of active IPv4 multicast
+                                 * flows.
+                                 */
+    uint32_t active_v6_flows;   /* Current number of active IPv6 multicast
                                  * flows.
                                  */
 };
@@ -854,12 +859,15 @@ init_mcast_info_for_switch_datapath(struct ovn_datapath *od)
         nullable_xstrdup(smap_get(&od->nbs->other_config, "mcast_eth_src"));
     mcast_sw_info->ipv4_src =
         nullable_xstrdup(smap_get(&od->nbs->other_config, "mcast_ip4_src"));
+    mcast_sw_info->ipv6_src =
+        nullable_xstrdup(smap_get(&od->nbs->other_config, "mcast_ip6_src"));
 
     mcast_sw_info->query_max_response =
         smap_get_ullong(&od->nbs->other_config, "mcast_query_max_response",
                         OVN_MCAST_DEFAULT_QUERY_MAX_RESPONSE_S);
 
-    mcast_sw_info->active_flows = 0;
+    mcast_sw_info->active_v4_flows = 0;
+    mcast_sw_info->active_v6_flows = 0;
 }
 
 static void
@@ -887,6 +895,7 @@ destroy_mcast_info_for_switch_datapath(struct ovn_datapath *od)
 
     free(mcast_sw_info->eth_src);
     free(mcast_sw_info->ipv4_src);
+    free(mcast_sw_info->ipv6_src);
 }
 
 static void
@@ -927,6 +936,10 @@ store_mcast_info_for_switch_datapath(const struct sbrec_ip_multicast *sb,
     if (mcast_sw_info->ipv4_src) {
         sbrec_ip_multicast_set_ip4_src(sb, mcast_sw_info->ipv4_src);
     }
+
+    if (mcast_sw_info->ipv6_src) {
+        sbrec_ip_multicast_set_ip6_src(sb, mcast_sw_info->ipv6_src);
+    }
 }
 
 static void
@@ -6301,6 +6314,10 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
                           "ip4 && ip.proto == 2", ds_cstr(&actions));
 
+            /* Punt MLD traffic to controller. */
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
+                          "mldv1 || mldv2", ds_cstr(&actions));
+
             /* Flood all IP multicast traffic destined to 224.0.0.X to all
              * ports - RFC 4541, section 2.1.2, item 2.
              */
@@ -6308,6 +6325,13 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                           "ip4.mcast && ip4.dst == 224.0.0.0/24",
                           "outport = \""MC_FLOOD"\"; output;");
 
+            /* Flood all IPv6 multicast traffic destined to reserved
+             * multicast IPs (RFC 4291, 2.7.1).
+             */
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 85,
+                          "ip6.mcast_flood",
+                          "outport = \""MC_FLOOD"\"; output;");
+
             /* Forward uregistered IP multicast to routers with relay enabled
              * and to any ports configured to flood IP multicast traffic.
              * If configured to flood unregistered traffic this will be
@@ -6337,7 +6361,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                 }
 
                 ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 80,
-                              "ip4 && ip4.mcast", ds_cstr(&actions));
+                              "ip4.mcast || ip6.mcast", ds_cstr(&actions));
             }
         }
 
@@ -6346,7 +6370,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
     }
     free(svc_check_match);
 
-    /* Ingress table 17: Add IP multicast flows learnt from IGMP
+    /* Ingress table 17: Add IP multicast flows learnt from IGMP/MLD
      * (priority 90). */
     struct ovn_igmp_group *igmp_group;
 
@@ -6355,19 +6379,27 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             continue;
         }
 
+        ds_clear(&match);
+        ds_clear(&actions);
+
         struct mcast_switch_info *mcast_sw_info =
             &igmp_group->datapath->mcast_info.sw;
 
-        if (mcast_sw_info->active_flows >= mcast_sw_info->table_size) {
-            continue;
+        if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
+            if (mcast_sw_info->active_v4_flows >= mcast_sw_info->table_size) {
+                continue;
+            }
+            mcast_sw_info->active_v4_flows++;
+            ds_put_format(&match, "eth.mcast && ip4 && ip4.dst == %s ",
+                          igmp_group->mcgroup.name);
+        } else {
+            if (mcast_sw_info->active_v6_flows >= mcast_sw_info->table_size) {
+                continue;
+            }
+            mcast_sw_info->active_v6_flows++;
+            ds_put_format(&match, "eth.mcast && ip6 && ip6.dst == %s ",
+                          igmp_group->mcgroup.name);
         }
-        mcast_sw_info->active_flows++;
-
-        ds_clear(&match);
-        ds_clear(&actions);
-
-        ds_put_format(&match, "eth.mcast && ip4 && ip4.dst == %s ",
-                      igmp_group->mcgroup.name);
 
         /* Also flood traffic to all multicast routers with relay enabled. */
         if (mcast_sw_info->flood_relay) {
@@ -7714,8 +7746,15 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
                       "ip4.dst == 0.0.0.0/8",
                       "drop;");
 
+        /* Allow IPv6 multicast traffic that's supposed to reach the
+         * router pipeline (e.g., neighbor solicitations).
+         */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 96, "ip6.mcast_flood",
+                      "next;");
+
         /* Allow multicast if relay enabled (priority 95). */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 95, "ip4.mcast",
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 95,
+                      "ip4.mcast || ip6.mcast",
                       od->mcast_info.rtr.relay ? "next;" : "drop;");
 
         /* Drop ARP packets (priority 85). ARP request packets for router's own
@@ -9207,8 +9246,13 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
         LIST_FOR_EACH (igmp_group, list_node, &od->mcast_info.groups) {
             ds_clear(&match);
             ds_clear(&actions);
-            ds_put_format(&match, "ip4 && ip4.dst == %s ",
-                          igmp_group->mcgroup.name);
+            if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
+                ds_put_format(&match, "ip4 && ip4.dst == %s ",
+                            igmp_group->mcgroup.name);
+            } else {
+                ds_put_format(&match, "ip6 && ip6.dst == %s ",
+                            igmp_group->mcgroup.name);
+            }
             if (od->mcast_info.rtr.flood_static) {
                 ds_put_cstr(&actions,
                             "clone { "
@@ -9227,11 +9271,9 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
          * ports.
          */
         if (od->mcast_info.rtr.flood_static) {
-            ds_clear(&match);
             ds_clear(&actions);
-            ds_put_format(&match, "ip4.mcast");
             ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
-                          "ip4.mcast",
+                          "ip4.mcast || ip6.mcast",
                           "clone { "
                                 "outport = \""MC_STATIC"\"; "
                                 "ip.ttl--; "
@@ -9278,7 +9320,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
         }
 
         ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 500,
-                      "ip4.mcast", "next;");
+                      "ip4.mcast || ip6.mcast", "next;");
     }
 
     /* Local router ingress table ARP_RESOLVE: ARP Resolution.
@@ -9823,7 +9865,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
         if (op->od->mcast_info.rtr.relay) {
             ds_clear(&match);
             ds_clear(&actions);
-            ds_put_format(&match, "ip4.mcast && outport == %s",
+            ds_put_format(&match, "(ip4.mcast || ip6.mcast) && outport == %s",
                           op->json_key);
             ds_put_format(&actions, "eth.src = %s; output;",
                           op->lrp_networks.ea_s);
@@ -10515,10 +10557,19 @@ build_mcast_groups(struct northd_context *ctx,
 
             struct ovn_igmp_group *igmp_group;
             LIST_FOR_EACH (igmp_group, list_node, &od->mcast_info.groups) {
+                struct in6_addr *address = &igmp_group->address;
+
+                /* For IPv6 only relay routable multicast groups
+                 * (RFC 4291 2.7).
+                 */
+                if (!IN6_IS_ADDR_V4MAPPED(address) &&
+                        !ipv6_addr_is_routable_multicast(address)) {
+                    continue;
+                }
+
                 struct ovn_igmp_group *igmp_group_rtr =
                     ovn_igmp_group_add(ctx, igmp_groups, router_port->od,
-                                       &igmp_group->address,
-                                       igmp_group->mcgroup.name);
+                                       address, igmp_group->mcgroup.name);
                 struct ovn_port **router_igmp_ports =
                     xmalloc(sizeof *router_igmp_ports);
                 router_igmp_ports[0] = router_port;
@@ -11481,6 +11532,8 @@ main(int argc, char *argv[])
     add_column_noalert(ovnsb_idl_loop.idl,
                        &sbrec_ip_multicast_col_ip4_src);
     add_column_noalert(ovnsb_idl_loop.idl,
+                       &sbrec_ip_multicast_col_ip6_src);
+    add_column_noalert(ovnsb_idl_loop.idl,
                        &sbrec_ip_multicast_col_table_size);
     add_column_noalert(ovnsb_idl_loop.idl,
                        &sbrec_ip_multicast_col_idle_timeout);
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 8c9ef90..7ff0bde 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -350,6 +350,10 @@
         Configures the source IPv4 address for queries originated by the
         logical switch.
       </column>
+      <column name="other_config" key="mcast_ip6_src">
+        Configures the source IPv6 address for queries originated by the
+        logical switch.
+      </column>
     </group>
 
     <group title="Common Columns">
diff --git a/ovn-sb.ovsschema b/ovn-sb.ovsschema
index 56af0ed..d89f8db 100644
--- a/ovn-sb.ovsschema
+++ b/ovn-sb.ovsschema
@@ -1,7 +1,7 @@
 {
     "name": "OVN_Southbound",
-    "version": "2.6.0",
-    "cksum": "4271405686 21646",
+    "version": "2.7.0",
+    "cksum": "4286723485 21693",
     "tables": {
         "SB_Global": {
             "columns": {
@@ -374,6 +374,7 @@
                 "querier": {"type": {"key": "boolean", "min": 0, "max": 1}},
                 "eth_src": {"type": "string"},
                 "ip4_src": {"type": "string"},
+                "ip6_src": {"type": "string"},
                 "table_size": {"type": {"key": "integer",
                                         "min": 0, "max": 1}},
                 "idle_timeout": {"type": {"key": "integer",
diff --git a/ovn-sb.xml b/ovn-sb.xml
index 93bbb86..cf29430 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -3778,7 +3778,7 @@ tcp.flags = RST;
 
     <group title="Querier configuration options">
       The <code>ovn-controller</code> process that runs on OVN hypervisor
-      nodes uses the following columns to determine field values in IGMP
+      nodes uses the following columns to determine field values in IGMP/MLD
       queries that it originates:
       <column name="eth_src">
         Source Ethernet address.
@@ -3786,6 +3786,9 @@ tcp.flags = RST;
       <column name="ip4_src">
         Source IPv4 address.
       </column>
+      <column name="ip6_src">
+        Source IPv6 address.
+      </column>
       <column name="query_max_resp">
         Value (in seconds) to be used as "max-response" field in multicast
         queries. Default: 1 second.
diff --git a/tests/ovn.at b/tests/ovn.at
index 89e2b83..89c4a82 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -15761,6 +15761,585 @@ OVN_CHECK_PACKETS([hv2/vif4-tx.pcap], [expected_empty])
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
 
+AT_SETUP([ovn -- MLD snoop/querier/relay])
+ovn_start
+
+# Logical network:
+# Three logical switches (sw1-sw3) connected to a logical router (rtr).
+# sw1:
+#   - subnet 10::/64
+#   - 2 ports bound on hv1 (sw1-p11, sw1-p12)
+#   - 2 ports bound on hv2 (sw1-p21, sw1-p22)
+# sw2:
+#   - subnet 20::/64
+#   - 1 port bound on hv1 (sw2-p1)
+#   - 1 port bound on hv2 (sw2-p2)
+#   - MLD Querier from 20::fe
+# sw3:
+#   - subnet 30::/64
+#   - 1 port bound on hv1 (sw3-p1)
+#   - 1 port bound on hv2 (sw3-p2)
+
+reset_pcap_file() {
+    local iface=$1
+    local pcap_file=$2
+    ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+options:rxq_pcap=dummy-rx.pcap
+    rm -f ${pcap_file}*.pcap
+    ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+#
+# send_mld_v2_report INPORT HV ETH_SRC IP_SRC GROUP REC_TYPE
+#                    MLD_CSUM OUTFILE
+#
+# This shell function causes an MLDv2 report to be received on INPORT of HV.
+# The packet's content has Ethernet destination 33:33:00:00:00:16 and source
+# ETH_SRC (exactly 12 hex digits). Ethernet type is set to IPv6.
+# GROUP is the IPv6 multicast group to be joined/to leave (based on REC_TYPE).
+# REC_TYPE == 04: join GROUP
+# REC_TYPE == 03: leave GROUP
+# The packet hexdump is also stored in OUTFILE.
+#
+send_mld_v2_report() {
+    local inport=$1 hv=$2 eth_src=$3 ip_src=$4 group=$5
+    local rec_type=$6 mld_chksum=$7 outfile=$8
+
+    local eth_dst=333300000016
+    local ip_dst=ff020000000000000000000000000016
+    local ip_ttl=01
+    local ip_ra_opt=3a00050200000100
+
+    local mld_type=8f
+    local mld_code=00
+    local num_rec=0001
+    local aux_dlen=00
+    local num_src=0000
+
+    local eth=${eth_dst}${eth_src}86dd
+    local ip=60000000002400${ip_ttl}${ip_src}${ip_dst}${ip_ra_opt}
+    local mld=${mld_type}${mld_code}${mld_chksum}0000${num_rec}${rec_type}${aux_dlen}${num_src}${group}
+    local packet=${eth}${ip}${mld}
+
+    echo ${packet} >> ${outfile}
+    as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
+}
+
+#
+# store_mld_query ETH_SRC IP_SRC OUTFILE
+#
+# This shell function builds an MLD general query from ETH_SRC and IP_SRC
+# and stores the hexdump of the packet in OUTFILE.
+#
+store_mld_query() {
+    local eth_src=$1 ip_src=$2 outfile=$3
+
+    local eth_dst=333300000000
+    local ip_dst=ff020000000000000000000000000001
+    local ip_ttl=01
+    local ip_ra_opt=3a00050200000000
+
+    local mld_type=82
+    local mld_code=00
+    local max_resp=03e8
+    local mld_chksum=59be
+    local addr=00000000000000000000000000000000
+
+    local eth=${eth_dst}${eth_src}86dd
+    local ip=60000000002400${ip_ttl}${ip_src}${ip_dst}${ip_ra_opt}
+    local mld=${mld_type}${mld_code}${mld_chksum}${max_resp}0000${addr}00010000
+    local packet=${eth}${ip}${mld}
+
+    echo ${packet} >> ${outfile}
+}
+
+#
+# send_ip_multicast_pkt INPORT HV ETH_SRC ETH_DST IP_SRC IP_DST IP_LEN TTL
+#    IP_PROTO DATA
+#
+# This shell function causes an IP multicast packet to be received on INPORT
+# of HV.
+#
+send_ip_multicast_pkt() {
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4
+    local ip_src=$5 ip_dst=$6 ip_len=$7 ip_ttl=$8 proto=$9
+    local data=${10}
+
+    local eth=${eth_dst}${eth_src}86dd
+    local ip=60000000${ip_len}${proto}${ip_ttl}${ip_src}${ip_dst}
+    local packet=${eth}${ip}${data}
+
+    as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
+}
+
+#
+# store_ip_multicast_pkt ETH_SRC ETH_DST IP_SRC IP_DST IP_LEN TTL
+#    IP_PROTO DATA OUTFILE
+#
+# This shell function builds an IP multicast packet and stores the hexdump of
+# the packet in OUTFILE.
+#
+store_ip_multicast_pkt() {
+    local eth_src=$1 eth_dst=$2
+    local ip_src=$3 ip_dst=$4 ip_len=$5 ip_ttl=$6 proto=$7
+    local data=$8 outfile=$9
+
+    local eth=${eth_dst}${eth_src}86dd
+    local ip=60000000${ip_len}${proto}${ip_ttl}${ip_src}${ip_dst}
+    local packet=${eth}${ip}${data}
+
+    echo ${packet} >> ${outfile}
+}
+
+ovn-nbctl ls-add sw1
+ovn-nbctl ls-add sw2
+ovn-nbctl ls-add sw3
+
+ovn-nbctl lsp-add sw1 sw1-p11
+ovn-nbctl lsp-add sw1 sw1-p12
+ovn-nbctl lsp-add sw1 sw1-p21
+ovn-nbctl lsp-add sw1 sw1-p22
+ovn-nbctl lsp-add sw2 sw2-p1
+ovn-nbctl lsp-add sw2 sw2-p2
+ovn-nbctl lsp-add sw3 sw3-p1
+ovn-nbctl lsp-add sw3 sw3-p2
+
+ovn-nbctl lr-add rtr
+ovn-nbctl lrp-add rtr rtr-sw1 00:00:00:00:01:00 10::fe/64
+ovn-nbctl lrp-add rtr rtr-sw2 00:00:00:00:02:00 20::fe/64
+ovn-nbctl lrp-add rtr rtr-sw3 00:00:00:00:03:00 30::fe/64
+
+ovn-nbctl lsp-add sw1 sw1-rtr                      \
+    -- lsp-set-type sw1-rtr router                 \
+    -- lsp-set-addresses sw1-rtr 00:00:00:00:01:00 \
+    -- lsp-set-options sw1-rtr router-port=rtr-sw1
+ovn-nbctl lsp-add sw2 sw2-rtr                      \
+    -- lsp-set-type sw2-rtr router                 \
+    -- lsp-set-addresses sw2-rtr 00:00:00:00:02:00 \
+    -- lsp-set-options sw2-rtr router-port=rtr-sw2
+ovn-nbctl lsp-add sw3 sw3-rtr                      \
+    -- lsp-set-type sw3-rtr router                 \
+    -- lsp-set-addresses sw3-rtr 00:00:00:00:03:00 \
+    -- lsp-set-options sw3-rtr router-port=rtr-sw3
+
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+ovs-vsctl -- add-port br-int hv1-vif1 -- \
+    set interface hv1-vif1 external-ids:iface-id=sw1-p11 \
+    options:tx_pcap=hv1/vif1-tx.pcap \
+    options:rxq_pcap=hv1/vif1-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+    set interface hv1-vif2 external-ids:iface-id=sw1-p12 \
+    options:tx_pcap=hv1/vif2-tx.pcap \
+    options:rxq_pcap=hv1/vif2-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif3 -- \
+    set interface hv1-vif3 external-ids:iface-id=sw2-p1 \
+    options:tx_pcap=hv1/vif3-tx.pcap \
+    options:rxq_pcap=hv1/vif3-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif4 -- \
+    set interface hv1-vif4 external-ids:iface-id=sw3-p1 \
+    options:tx_pcap=hv1/vif4-tx.pcap \
+    options:rxq_pcap=hv1/vif4-rx.pcap \
+    ofport-request=1
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+ovs-vsctl -- add-port br-int hv2-vif1 -- \
+    set interface hv2-vif1 external-ids:iface-id=sw1-p21 \
+    options:tx_pcap=hv2/vif1-tx.pcap \
+    options:rxq_pcap=hv2/vif1-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv2-vif2 -- \
+    set interface hv2-vif2 external-ids:iface-id=sw1-p22 \
+    options:tx_pcap=hv2/vif2-tx.pcap \
+    options:rxq_pcap=hv2/vif2-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv2-vif3 -- \
+    set interface hv2-vif3 external-ids:iface-id=sw2-p2 \
+    options:tx_pcap=hv2/vif3-tx.pcap \
+    options:rxq_pcap=hv2/vif3-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv2-vif4 -- \
+    set interface hv2-vif4 external-ids:iface-id=sw3-p2 \
+    options:tx_pcap=hv2/vif4-tx.pcap \
+    options:rxq_pcap=hv2/vif4-rx.pcap \
+    ofport-request=1
+
+OVN_POPULATE_ARP
+
+# Enable multicast snooping on sw1.
+ovn-nbctl set Logical_Switch sw1       \
+    other_config:mcast_querier="false" \
+    other_config:mcast_snoop="true"
+
+# No IGMP/MLD query should be generated by sw1 (mcast_querier="false").
+> expected
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected])
+
+ovn-nbctl --wait=hv sync
+
+# Inject MLD Join for ff0a:dead:beef::1 on sw1-p11.
+send_mld_v2_report hv1-vif1 hv1 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 04 c0e4 \
+    /dev/null
+# Inject MLD Join for ff0a:dead:beef::1 on sw1-p21.
+send_mld_v2_report hv2-vif1 hv2 \
+    000000000002 10000000000000000000000000000002 \
+    ff0adeadbeef00000000000000000001 04 c0e3 \
+    /dev/null
+
+# Check that the IP multicast group is learned on both hv.
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "2"
+])
+
+# Send traffic and make sure it gets forwarded only on the two ports that
+# joined.
+> expected
+> expected_empty
+send_ip_multicast_pkt hv1-vif2 hv1 \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a
+
+store_ip_multicast_pkt \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_empty])
+
+# Inject MLD Leave for ff0a:dead:beef::1 on sw1-p11.
+send_mld_v2_report hv1-vif1 hv1 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 03 c1e4 \
+    /dev/null
+
+# Check IGMP_Group table on both HV.
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "1"
+])
+
+# Send traffic and make sure it gets forwarded only on the port that joined.
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+> expected
+> expected_empty
+send_ip_multicast_pkt hv1-vif2 hv1 \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a
+
+store_ip_multicast_pkt \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_empty])
+
+# Flush IP multicast groups.
+ovn-sbctl ip-multicast-flush sw1
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep " ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "0"
+])
+
+# Enable multicast snooping and querier on sw2 and set query interval to
+# minimum.
+ovn-nbctl set Logical_Switch sw2 \
+    other_config:mcast_snoop="true" \
+    other_config:mcast_querier="true" \
+    other_config:mcast_query_interval=1 \
+    other_config:mcast_eth_src="00:00:00:00:02:fe" \
+    other_config:mcast_ip6_src="2000::fe"
+
+# Wait for 1 query interval (1 sec) and check that two queries are generated.
+> expected
+store_mld_query 0000000002fe 200000000000000000000000000000fe expected
+store_mld_query 0000000002fe 200000000000000000000000000000fe expected
+sleep 1
+
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected])
+
+# Disable multicast querier on sw2.
+ovn-nbctl set Logical_Switch sw2 \
+    other_config:mcast_querier="false"
+
+# Enable multicast snooping on sw3.
+ovn-nbctl set Logical_Switch sw3       \
+    other_config:mcast_querier="false" \
+    other_config:mcast_snoop="true"
+
+# Send traffic from sw3 and make sure rtr doesn't relay it.
+> expected_empty
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif2 hv1/vif2
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+as hv1 reset_pcap_file hv1-vif4 hv1/vif4
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+as hv2 reset_pcap_file hv2-vif2 hv2/vif2
+as hv2 reset_pcap_file hv2-vif3 hv2/vif3
+as hv2 reset_pcap_file hv2-vif4 hv2/vif4
+
+send_ip_multicast_pkt hv2-vif4 hv2 \
+    000000000001 333300000001 \
+    00100000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e2b4e61736461640a
+
+# Sleep a bit to make sure no traffic is received and then check.
+sleep 1
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif4-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif4-tx.pcap], [expected_empty])
+
+# Enable multicast relay on rtr
+ovn-nbctl set logical_router rtr \
+    options:mcast_relay="true"
+
+# Inject MLD Join for ff0a:dead:beef::1 on sw1-p11.
+send_mld_v2_report hv1-vif1 hv1 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 04 c0e4 \
+    /dev/null
+
+# Inject MLD Join for ff0a:dead:beef::1 on sw2-p2.
+send_mld_v2_report hv2-vif3 hv2 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 04 c0e4 \
+    /dev/null
+
+# Check that the IGMP Group is learned by all switches.
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "2"
+])
+
+# Send traffic from sw3 and make sure it is relayed by rtr.
+# to ports that joined.
+> expected_routed_sw1
+> expected_routed_sw2
+> expected_empty
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif2 hv1/vif2
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+as hv1 reset_pcap_file hv1-vif4 hv1/vif4
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+as hv2 reset_pcap_file hv2-vif2 hv2/vif2
+as hv2 reset_pcap_file hv2-vif3 hv2/vif3
+as hv2 reset_pcap_file hv2-vif4 hv2/vif4
+
+send_ip_multicast_pkt hv2-vif4 hv2 \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e1b5e61736461640a
+store_ip_multicast_pkt \
+    000000000100 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected_routed_sw1
+store_ip_multicast_pkt \
+    000000000200 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected_routed_sw2
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected_routed_sw1])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_routed_sw2])
+OVN_CHECK_PACKETS([hv1/vif4-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif4-tx.pcap], [expected_empty])
+
+# Inject MLD Join for 239.0.1.68 on sw3-p1.
+send_mld_v2_report hv1-vif4 hv1 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 04 c0e4 \
+    /dev/null
+
+# Check that the Multicast Group is learned by all switches.
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "3"
+])
+
+# Send traffic from sw3 and make sure it is relayed by rtr
+# to ports that joined.
+> expected_routed_sw1
+> expected_routed_sw2
+> expected_switched
+> expected_empty
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif2 hv1/vif2
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+as hv1 reset_pcap_file hv1-vif4 hv1/vif4
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+as hv2 reset_pcap_file hv2-vif2 hv2/vif2
+as hv2 reset_pcap_file hv2-vif3 hv2/vif3
+as hv2 reset_pcap_file hv2-vif4 hv2/vif4
+
+send_ip_multicast_pkt hv2-vif4 hv2 \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e1b5e61736461640a
+store_ip_multicast_pkt \
+    000000000100 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected_routed_sw1
+store_ip_multicast_pkt \
+    000000000200 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e1b5e61736461640a \
+    expected_routed_sw2
+store_ip_multicast_pkt \
+    000000000001 333300000001 \
+    10000000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e1b5e61736461640a \
+    expected_switched
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected_routed_sw1])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_routed_sw2])
+OVN_CHECK_PACKETS([hv1/vif4-tx.pcap], [expected_switched])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif4-tx.pcap], [expected_empty])
+
+# Flush multicast groups.
+ovn-sbctl ip-multicast-flush sw1
+ovn-sbctl ip-multicast-flush sw2
+ovn-sbctl ip-multicast-flush sw3
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "0"
+])
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif2 hv1/vif2
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+as hv1 reset_pcap_file hv1-vif4 hv1/vif4
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+as hv2 reset_pcap_file hv2-vif2 hv2/vif2
+as hv2 reset_pcap_file hv2-vif3 hv2/vif3
+as hv2 reset_pcap_file hv2-vif4 hv2/vif4
+
+> expected_empty
+> expected_switched
+> expected_routed
+> expected_reports
+
+# Enable mcast_flood on sw1-p11
+ovn-nbctl set Logical_Switch_Port sw1-p11 options:mcast_flood='true'
+
+# Enable mcast_flood_reports on sw1-p21
+ovn-nbctl set Logical_Switch_Port sw1-p21 options:mcast_flood_reports='true'
+# Enable mcast_flood on rtr-sw2
+ovn-nbctl set Logical_Router_Port rtr-sw2 options:mcast_flood='true'
+# Enable mcast_flood on sw2-p1
+ovn-nbctl set Logical_Switch_Port sw2-p1 options:mcast_flood='true'
+
+ovn-nbctl --wait=hv sync
+
+# Inject MLD Join for ff0a:dead:beef::1 on sw1-p12.
+send_mld_v2_report hv1-vif2 hv1 \
+    000000000001 10000000000000000000000000000001 \
+    ff0adeadbeef00000000000000000001 04 c0e4 \
+    expected_reports
+
+# Check that the IP multicast group is learned.
+OVS_WAIT_UNTIL([
+    total_entries=`ovn-sbctl find IGMP_Group | grep "ff0a:dead:beef::1" -c`
+    test "${total_entries}" = "1"
+])
+
+# Send traffic from sw1-p21
+send_ip_multicast_pkt hv2-vif1 hv2 \
+    000000000001 333300000001 \
+    00100000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e2b4e61736461640a
+store_ip_multicast_pkt \
+    000000000001 333300000001 \
+    00100000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 02 11 \
+    93407a69000e2b4e61736461640a \
+    expected_switched
+store_ip_multicast_pkt \
+    000000000200 333300000001 \
+    00100000000000000000000000000042 ff0adeadbeef00000000000000000001 \
+    000e 01 11 \
+    93407a69000e2b4e61736461640a \
+    expected_routed
+
+# Sleep a bit to make sure no duplicate traffic is received
+sleep 1
+
+# Check that traffic is switched to sw1-p11 and sw1-p12
+# Check that MLD join is flooded on sw1-p21
+# Check that traffic is routed by rtr to rtr-sw2 and then switched to sw2-p1
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected_switched])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [expected_switched])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected_routed])
+OVN_CHECK_PACKETS([hv1/vif4-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected_reports])
+OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif3-tx.pcap], [expected_empty])
+OVN_CHECK_PACKETS([hv2/vif4-tx.pcap], [expected_empty])
+
+OVN_CLEANUP([hv1], [hv2])
+AT_CLEANUP
+
 AT_SETUP([ovn -- unixctl socket])
 ovn_start
 



More information about the dev mailing list