[ovs-dev] [PATCH] Windows: Fix broken kernel userspace communication

Ben Pfaff blp at ovn.org
Thu Nov 15 17:11:36 UTC 2018


On Thu, Nov 15, 2018 at 06:42:38PM +0200, Alin Gabriel Serdean wrote:
> Patch: https://github.com/openvswitch/ovs/commit/69c51582ff786a68fc325c1c50624715482bc460
> broke Windows userpace - kernel communication.
> 
> On windows we create netlink sockets when the handlers are initiated and
> reuse them.
> This patch remaps the usage of the netlink socket pool.
> 
> Fixes:
> https://github.com/openvswitch/ovs-issues/issues/164
> 
> Signed-off-by: Alin Gabriel Serdean <aserdean at ovn.org>
> Acked-by: Shashank Ram <rams at vmware.com>
> Tested-by: Shashank Ram <rams at vmware.com>

Sorry we broke Windows.  I apologize.

Usually I prefer to put all the #ifdefs in a single place when I can.
In this case, I think that means putting them inside
create_socksp_windows() and del_socksp_windows().  It's easy enough.

But del_socksp_windows() is a really weird function.  It does nothing:
it sets one of its parameters to NULL, which is not visible outside the
function, and it ignores its other parameter.  That would make sense if
there were multiple implementations, or if it were referred to via
function pointer, etc., but I don't understand it here.  Why call it at
all?

Anyhow, here is one variation that I suggest.  I built it on Linux but
not on Windows, so maybe I screwed some things up.

Actually, looking at it again, I think that the first change in
dpif_netlink_port_add__() fixes a bug: it looks like, previously, an
error return from nl_sock_create() was passed up to the caller as a
success.  I guess that should be fixed in a separate patch, first:
        https://mail.openvswitch.org/pipermail/ovs-dev/2018-November/353952.html

--8<--------------------------cut here-------------------------->8--


From: Alin Gabriel Serdean <aserdean at ovn.org>
Date: Thu, 15 Nov 2018 18:42:38 +0200
Subject: [PATCH] Windows: Fix broken kernel userspace communication

Patch: https://github.com/openvswitch/ovs/commit/69c51582ff786a68fc325c1c50624715482bc460
broke Windows userpace - kernel communication.

On windows we create netlink sockets when the handlers are initiated and
reuse them.
This patch remaps the usage of the netlink socket pool.

Fixes:
https://github.com/openvswitch/ovs-issues/issues/164

Signed-off-by: Alin Gabriel Serdean <aserdean at ovn.org>
Acked-by: Shashank Ram <rams at vmware.com>
Tested-by: Shashank Ram <rams at vmware.com>
Signed-off-by: Ben Pfaff <blp at ovn.org>
---
 lib/dpif-netlink.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 48 insertions(+), 4 deletions(-)

diff --git a/lib/dpif-netlink.c b/lib/dpif-netlink.c
index f2011f22e548..3fed5e97b5ec 100644
--- a/lib/dpif-netlink.c
+++ b/lib/dpif-netlink.c
@@ -246,6 +246,42 @@ static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
                                      odp_port_t port_no, const char *port_name,
                                      struct dpif_port *dpif_port);
 
+static int
+create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **socksp)
+    OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+#ifndef _WIN32
+    return nl_sock_create(NETLINK_GENERIC, socksp);
+#else
+    /* Pick netlink sockets to use in a round-robin fashion from each
+     * handler's pool of sockets. */
+    struct dpif_handler *handler = &dpif->handlers[0];
+    struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
+    size_t index = handler->last_used_pool_idx;
+
+    /* A pool of sockets is allocated when the handler is initialized. */
+    if (sock_pool == NULL) {
+        *socksp = NULL;
+        return EINVAL;
+    }
+
+    ovs_assert(index < VPORT_SOCK_POOL_SIZE);
+    *socksp = sock_pool[index].nl_sock;
+    ovs_assert(*socksp);
+    index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
+    handler->last_used_pool_idx = index;
+    return 0;
+#endif
+}
+
+static void
+close_nl_sock(struct nl_sock *socksp)
+{
+#ifndef _WIN32
+    nl_sock_destroy(socksp);
+#endif
+}
+
 static struct dpif_netlink *
 dpif_netlink_cast(const struct dpif *dpif)
 {
@@ -716,7 +752,8 @@ dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
     int error = 0;
 
     if (dpif->handlers) {
-        if (nl_sock_create(NETLINK_GENERIC, &socksp)) {
+        error = create_nl_sock(dpif, &socksp);
+        if (error) {
             return error;
         }
     }
@@ -748,7 +785,7 @@ dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
                       dpif_name(&dpif->dpif), *port_nop);
         }
 
-        nl_sock_destroy(socksp);
+        close_nl_sock(socksp);
         goto exit;
     }
 
@@ -763,7 +800,7 @@ dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
         request.dp_ifindex = dpif->dp_ifindex;
         request.port_no = *port_nop;
         dpif_netlink_vport_transact(&request, NULL, NULL);
-        nl_sock_destroy(socksp);
+        close_nl_sock(socksp);
         goto exit;
     }
 
@@ -2280,15 +2317,22 @@ dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
             || !vport_get_pid(dpif, port_no, &upcall_pid)) {
             struct nl_sock *socksp;
 
+#ifdef _WIN32
+            socksp = create_socksp_windows(dpif, &error);
+            if (!socksp) {
+                goto error;
+            }
+#else
             if (nl_sock_create(NETLINK_GENERIC, &socksp)) {
                 goto error;
             }
+#endif
 
             error = vport_add_channel(dpif, vport.port_no, socksp);
             if (error) {
                 VLOG_INFO("%s: could not add channels for port %s",
                           dpif_name(&dpif->dpif), vport.name);
-                nl_sock_destroy(socksp);
+                close_nl_sock(socksp);
                 retval = error;
                 goto error;
             }
-- 
2.16.1



More information about the dev mailing list