[ovs-dev] [PATCH] RFC: datapath: Generic Netlink compatible locking.

Ben Pfaff blp at nicira.com
Thu Dec 16 23:35:03 UTC 2010


Open vSwitch is transitioning from a character device ioctl interface to
one based on Generic Netlink.  When Generic Netlink comes into the picture,
the datapath module has to change its locking strategy.

When Generic Netlink message receive calls into Open vSwitch, it always
holds the genl_lock mutex.  This means that there is no point in having
per-datapath mutexes any longer, because no more than one would ever be
held at a time, so this commit removes them.

Given that genl_lock is always held, it would be nice to drop dp_mutex
also, but we cannot because of the dp_notify path into OVS.  On this path,
rtnl_lock is held but genl_lock is not.  We cannot simply take genl_lock
on this path because it would cause a deadlock against the message receive
path, which has to take rtnl_lock inside genl_lock.  So this patch retains
dp_mutex as an innermost lock that both message receive and dp_notify take.

There are probably many code paths here where dp_mutex is not really needed
for mutual exclusion against the dp_notify path, but dp_mutex should be
uncontended so it does not seem worthwhile to try to limit its use.

This patch does not actually change OVS to use Generic Netlink; it just
illustrates how the locking would have to work.  I'm presenting it to
solicit comments, and maybe even a better solution.
---
 datapath/datapath.c  |  210 +++++++++++++++++++++++++++-----------------------
 datapath/datapath.h  |    8 ++-
 datapath/dp_notify.c |    8 +-
 datapath/vport.c     |  107 +++++--------------------
 datapath/vport.h     |    3 -
 5 files changed, 146 insertions(+), 190 deletions(-)

diff --git a/datapath/datapath.c b/datapath/datapath.c
index fded95c..ac33539 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/time.h>
 #include <linux/etherdevice.h>
+#include <linux/genetlink.h>
 #include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
@@ -58,20 +59,48 @@
 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
 EXPORT_SYMBOL(dp_ioctl_hook);
 
-/* Datapaths.  Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex.
+/* Locks have the following hierarchy:
+ *
+ * genl_lock
+ *         rtnl_lock
+ *                 dp_mutex
+ *
+ * This is undesirable, but we're stuck with it:
  *
- * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
- * lock first.
+ *   - Generic Netlink message receive holds genl_lock while calling into us,
+ *     so genl_lock is always held while executing userspace commands.  Some
+ *     commands need rtnl_lock, so rtnl_lock has to nest inside genl_lock.
  *
- * It is safe to access the datapath and vport structures with just
- * dp_mutex.
+ *   - Network device notifiers hold the rtnl_lock when they call into us.
+ *     We can't take genl_lock there to get mutual exclusion versus userspace
+ *     command execution because that would invert the order.
+ *
+ * Therefore, dp_mutex exists as an innermost lock that can be taken in both
+ * these situations.  There should be no contention on it, because it is almost
+ * completely redundant with genl_lock, since genl_lock is taken before
+ * dp_mutex in every situation except when the Open vSwitch network device
+ * notifier is running.
  */
 static struct datapath __rcu *dps[ODP_MAX];
 static DEFINE_MUTEX(dp_mutex);
 
+/* Datapaths.  Protected on the read side by rcu_read_lock, on the write side
+ * by dp_mutex.  See above for locking hierarchy.
+ */
+static struct datapath *dps[ODP_MAX];
+
 static int new_vport(struct datapath *, struct odp_port *, int port_no);
 
+void dp_mutex_lock(void)
+{
+	mutex_lock(&dp_mutex);
+}
+
+void dp_mutex_unlock(void)
+{
+	mutex_unlock(&dp_mutex);
+}
+
 /* Must be called with rcu_read_lock or dp_mutex. */
 struct datapath *get_dp(int dp_idx)
 {
@@ -82,21 +111,9 @@ struct datapath *get_dp(int dp_idx)
 }
 EXPORT_SYMBOL_GPL(get_dp);
 
-static struct datapath *get_dp_locked(int dp_idx)
-{
-	struct datapath *dp;
-
-	mutex_lock(&dp_mutex);
-	dp = get_dp(dp_idx);
-	if (dp)
-		mutex_lock(&dp->mutex);
-	mutex_unlock(&dp_mutex);
-	return dp;
-}
-
 static struct tbl *get_table_protected(struct datapath *dp)
 {
-	return rcu_dereference_protected(dp->table, lockdep_is_held(&dp->mutex));
+	return rcu_dereference_protected(dp->table, lockdep_is_held(&dp_mutex));
 }
 
 /* Must be called with rcu_read_lock or RTNL lock. */
@@ -222,7 +239,7 @@ static int create_dp(int dp_idx, const char __user *devnamep)
 	}
 
 	rtnl_lock();
-	mutex_lock(&dp_mutex);
+	dp_mutex_lock();
 	err = -ENODEV;
 	if (!try_module_get(THIS_MODULE))
 		goto err_unlock;
@@ -239,7 +256,6 @@ static int create_dp(int dp_idx, const char __user *devnamep)
 	if (dp == NULL)
 		goto err_put_module;
 	INIT_LIST_HEAD(&dp->port_list);
-	mutex_init(&dp->mutex);
 	dp->dp_idx = dp_idx;
 	for (i = 0; i < DP_N_QUEUES; i++)
 		skb_queue_head_init(&dp->queues[i]);
@@ -278,7 +294,7 @@ static int create_dp(int dp_idx, const char __user *devnamep)
 	rcu_assign_pointer(dps[dp_idx], dp);
 	dp_sysfs_add_dp(dp);
 
-	mutex_unlock(&dp_mutex);
+	dp_mutex_unlock();
 	rtnl_unlock();
 
 	return 0;
@@ -292,7 +308,7 @@ err_free_dp:
 err_put_module:
 	module_put(THIS_MODULE);
 err_unlock:
-	mutex_unlock(&dp_mutex);
+	dp_mutex_unlock();
 	rtnl_unlock();
 err:
 	return err;
@@ -328,7 +344,7 @@ static int destroy_dp(int dp_idx)
 	int err;
 
 	rtnl_lock();
-	mutex_lock(&dp_mutex);
+	dp_mutex_lock();
 	dp = get_dp(dp_idx);
 	err = -ENODEV;
 	if (!dp)
@@ -338,7 +354,7 @@ static int destroy_dp(int dp_idx)
 	err = 0;
 
 err_unlock:
-	mutex_unlock(&dp_mutex);
+	dp_mutex_unlock();
 	rtnl_unlock();
 	return err;
 }
@@ -355,9 +371,7 @@ static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no
 	parms.dp = dp;
 	parms.port_no = port_no;
 
-	vport_lock();
 	vport = vport_add(&parms);
-	vport_unlock();
 
 	if (IS_ERR(vport))
 		return PTR_ERR(vport);
@@ -385,30 +399,30 @@ static int attach_port(int dp_idx, struct odp_port __user *portp)
 	port.type[VPORT_TYPE_SIZE - 1] = '\0';
 
 	rtnl_lock();
-	dp = get_dp_locked(dp_idx);
+	dp_mutex_lock();
+	dp = get_dp(dp_idx);
 	err = -ENODEV;
 	if (!dp)
-		goto out_unlock_rtnl;
+		goto out_unlock;
 
 	for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
 		if (!dp->ports[port_no])
 			goto got_port_no;
 	err = -EFBIG;
-	goto out_unlock_dp;
+	goto out_unlock;
 
 got_port_no:
 	err = new_vport(dp, &port, port_no);
 	if (err)
-		goto out_unlock_dp;
+		goto out_unlock;
 
 	set_internal_devs_mtu(dp);
 	dp_sysfs_add_if(dp->ports[port_no]);
 
 	err = put_user(port_no, &portp->port);
 
-out_unlock_dp:
-	mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
+out_unlock:
+	dp_mutex_unlock();
 	rtnl_unlock();
 out:
 	return err;
@@ -430,9 +444,7 @@ int dp_detach_port(struct vport *p)
 	rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
 
 	/* Then destroy it. */
-	vport_lock();
 	err = vport_del(p);
-	vport_unlock();
 
 	return err;
 }
@@ -448,21 +460,21 @@ static int detach_port(int dp_idx, int port_no)
 		goto out;
 
 	rtnl_lock();
-	dp = get_dp_locked(dp_idx);
+	dp_mutex_lock();
+	dp = get_dp(dp_idx);
 	err = -ENODEV;
 	if (!dp)
-		goto out_unlock_rtnl;
+		goto out_unlock;
 
 	p = dp->ports[port_no];
 	err = -ENOENT;
 	if (!p)
-		goto out_unlock_dp;
+		goto out_unlock;
 
 	err = dp_detach_port(p);
 
-out_unlock_dp:
-	mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
+out_unlock:
+	dp_mutex_unlock();
 	rtnl_unlock();
 out:
 	return err;
@@ -876,7 +888,7 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
 			goto error;
 
 		old_acts = rcu_dereference_protected(flow->sf_acts,
-						     lockdep_is_held(&dp->mutex));
+						     lockdep_is_held(&dp_mutex));
 		if (old_acts->actions_len != new_acts->actions_len ||
 		    memcmp(old_acts->actions, new_acts->actions,
 			   old_acts->actions_len)) {
@@ -950,7 +962,7 @@ static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
 		return 0;
 
 	sf_acts = rcu_dereference_protected(flow->sf_acts,
-					    lockdep_is_held(&dp->mutex));
+					    lockdep_is_held(&dp_mutex));
 	if (put_user(sf_acts->actions_len, actions_lenp) ||
 	    (actions && copy_to_user(actions, sf_acts->actions,
 				     min(sf_acts->actions_len, actions_len))))
@@ -1281,7 +1293,6 @@ static int query_port(struct datapath *dp, struct odp_port __user *uport)
 
 		port.devname[IFNAMSIZ - 1] = '\0';
 
-		vport_lock();
 		rcu_read_lock();
 
 		vport = vport_locate(port.devname);
@@ -1298,7 +1309,6 @@ static int query_port(struct datapath *dp, struct odp_port __user *uport)
 
 error_unlock:
 		rcu_read_unlock();
-		vport_unlock();
 
 		if (err)
 			return err;
@@ -1364,6 +1374,8 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
 	unsigned int sflow_probability;
 	int err;
 
+	genl_lock();
+
 	/* Handle commands with special locking requirements up front. */
 	switch (cmd) {
 	case ODP_DP_CREATE:
@@ -1413,10 +1425,11 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
 		goto exit;
 	}
 
-	dp = get_dp_locked(dp_idx);
+	dp_mutex_lock();
+	dp = get_dp(dp_idx);
 	err = -ENODEV;
 	if (!dp)
-		goto exit;
+		goto exit_unlock;
 
 	switch (cmd) {
 	case ODP_DP_STATS:
@@ -1499,8 +1512,10 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
 		err = -ENOIOCTLCMD;
 		break;
 	}
-	mutex_unlock(&dp->mutex);
+exit_unlock:
+	dp_mutex_unlock();
 exit:
+	genl_unlock();
 	return err;
 }
 
@@ -1751,7 +1766,8 @@ static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned
 		return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
 	}
 
-	dp = get_dp_locked(dp_idx);
+	dp_mutex_lock();
+	dp = get_dp(dp_idx);
 	err = -ENODEV;
 	if (!dp)
 		goto exit;
@@ -1785,8 +1801,8 @@ static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned
 		err = -ENOIOCTLCMD;
 		break;
 	}
-	mutex_unlock(&dp->mutex);
 exit:
+	dp_mutex_unlock();
 	return err;
 }
 #endif
@@ -1885,50 +1901,12 @@ fault:
 	return -EFAULT;
 }
 
-static ssize_t openvswitch_read(struct file *f, char __user *buf,
-				size_t nbytes, loff_t *ppos)
+static ssize_t openvswitch_read_skb(struct sk_buff *skb, char __user *buf,
+				    size_t nbytes)
 {
-	int listeners = get_listen_mask(f);
-	int dp_idx = iminor(f->f_dentry->d_inode);
-	struct datapath *dp = get_dp_locked(dp_idx);
-	struct sk_buff *skb;
 	size_t copy_bytes, tot_copy_bytes;
 	int retval;
 
-	if (!dp)
-		return -ENODEV;
-
-	if (nbytes == 0 || !listeners)
-		return 0;
-
-	for (;;) {
-		int i;
-
-		for (i = 0; i < DP_N_QUEUES; i++) {
-			if (listeners & (1 << i)) {
-				skb = skb_dequeue(&dp->queues[i]);
-				if (skb)
-					goto success;
-			}
-		}
-
-		if (f->f_flags & O_NONBLOCK) {
-			retval = -EAGAIN;
-			goto error;
-		}
-
-		wait_event_interruptible(dp->waitqueue,
-					 dp_has_packet_of_interest(dp,
-								   listeners));
-
-		if (signal_pending(current)) {
-			retval = -ERESTARTSYS;
-			goto error;
-		}
-	}
-success:
-	mutex_unlock(&dp->mutex);
-
 	copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
 
 	retval = 0;
@@ -1948,7 +1926,7 @@ success:
 				csump = (__sum16 __user *)(buf + csum_start + csum_offset);
 
 				BUG_ON((char __user *)csump + sizeof(__sum16) >
-					buf + nbytes);
+				       buf + nbytes);
 				put_user(csum_fold(csum), csump);
 			}
 		} else
@@ -1968,27 +1946,67 @@ success:
 
 	kfree_skb(skb);
 	return retval;
+}
 
-error:
-	mutex_unlock(&dp->mutex);
+static ssize_t openvswitch_read(struct file *f, char __user *buf,
+				size_t nbytes, loff_t *ppos)
+{
+	int listeners = get_listen_mask(f);
+	int dp_idx = iminor(f->f_dentry->d_inode);
+	struct datapath *dp;
+	int retval;
+	int i;
+
+	dp_mutex_lock();
+
+	dp = get_dp(dp_idx);
+	if (!dp) {
+		retval = -ENODEV;
+		goto exit_unlock;
+	}
+
+	if (nbytes == 0) {
+		retval = 0;
+		goto exit_unlock;
+	}
+
+	for (i = 0; i < DP_N_QUEUES; i++) {
+		if (listeners & (1 << i)) {
+			struct sk_buff *skb = skb_dequeue(&dp->queues[i]);
+			if (!skb)
+				continue;
+
+			dp_mutex_unlock();
+			retval = openvswitch_read_skb(skb, buf, nbytes);
+			goto exit;
+		}
+	}
+	retval = -EAGAIN;
+
+exit_unlock:
+	dp_mutex_unlock();
+exit:
 	return retval;
 }
 
 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
 {
 	int dp_idx = iminor(file->f_dentry->d_inode);
-	struct datapath *dp = get_dp_locked(dp_idx);
+	struct datapath *dp;
 	unsigned int mask;
 
+	dp_mutex_lock();
+	dp = get_dp(dp_idx);
 	if (dp) {
 		mask = 0;
 		poll_wait(file, &dp->waitqueue, wait);
 		if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
 			mask |= POLLIN | POLLRDNORM;
-		mutex_unlock(&dp->mutex);
 	} else {
 		mask = POLLIN | POLLRDNORM | POLLHUP;
 	}
+	dp_mutex_unlock();
+
 	return mask;
 }
 
diff --git a/datapath/datapath.h b/datapath/datapath.h
index e4c6534..d41c41a 100644
--- a/datapath/datapath.h
+++ b/datapath/datapath.h
@@ -59,7 +59,6 @@ struct dp_stats_percpu {
 
 /**
  * struct datapath - datapath for flow-based packet switching
- * @mutex: Mutual exclusion for ioctls.
  * @dp_idx: Datapath number (index into the dps[] array in datapath.c).
  * @ifobj: Represents /sys/class/net/<devname>/brif.
  * @drop_frags: Drop all IP fragments if nonzero.
@@ -75,9 +74,11 @@ struct dp_stats_percpu {
  * @sflow_probability: Number of packets out of UINT_MAX to sample to the
  * %ODPL_SFLOW queue, e.g. (@sflow_probability/UINT_MAX) is the probability of
  * sampling a given packet.
+ *
+ * Protected on the read side by rcu_read_lock, on the write side by dp_mutex.
+ * See comment on dp_mutex for locking hierarchy.
  */
 struct datapath {
-	struct mutex mutex;
 	int dp_idx;
 	struct kobject ifobj;
 
@@ -124,6 +125,9 @@ struct ovs_skb_cb {
 extern struct notifier_block dp_device_notifier;
 extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
 
+void dp_mutex_lock(void);
+void dp_mutex_unlock(void);
+
 void dp_process_received_packet(struct vport *, struct sk_buff *);
 int dp_detach_port(struct vport *);
 int dp_output_control(struct datapath *, struct sk_buff *, int, u64 arg);
diff --git a/datapath/dp_notify.c b/datapath/dp_notify.c
index 1415833..de63989 100644
--- a/datapath/dp_notify.c
+++ b/datapath/dp_notify.c
@@ -34,18 +34,18 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
 	switch (event) {
 	case NETDEV_UNREGISTER:
 		if (!is_internal_dev(dev)) {
-			mutex_lock(&dp->mutex);
+			dp_mutex_lock();
 			dp_detach_port(vport);
-			mutex_unlock(&dp->mutex);
+			dp_mutex_unlock();
 		}
 		break;
 
 	case NETDEV_CHANGENAME:
 		if (vport->port_no != ODPP_LOCAL) {
-			mutex_lock(&dp->mutex);
+			dp_mutex_lock();
 			dp_sysfs_del_if(vport);
 			dp_sysfs_add_if(vport);
-			mutex_unlock(&dp->mutex);
+			dp_mutex_unlock();
 		}
 		break;
 
diff --git a/datapath/vport.c b/datapath/vport.c
index e699f8c..ddc4b1f 100644
--- a/datapath/vport.c
+++ b/datapath/vport.c
@@ -41,52 +41,6 @@ static int n_vport_types;
 static struct hlist_head *dev_table;
 #define VPORT_HASH_BUCKETS 1024
 
-/* Both RTNL lock and vport_mutex need to be held when updating dev_table.
- *
- * If you use vport_locate and then perform some operations, you need to hold
- * one of these locks if you don't want the vport to be deleted out from under
- * you.
- *
- * If you get a reference to a vport through a datapath, it is protected
- * by RCU and you need to hold rcu_read_lock instead when reading.
- *
- * If multiple locks are taken, the hierarchy is:
- * 1. RTNL
- * 2. DP
- * 3. vport
- */
-static DEFINE_MUTEX(vport_mutex);
-
-/**
- *	vport_lock - acquire vport lock
- *
- * Acquire global vport lock.  See above comment about locking requirements
- * and specific function definitions.  May sleep.
- */
-void vport_lock(void)
-{
-	mutex_lock(&vport_mutex);
-}
-
-/**
- *	vport_unlock - release vport lock
- *
- * Release lock acquired with vport_lock.
- */
-void vport_unlock(void)
-{
-	mutex_unlock(&vport_mutex);
-}
-
-#define ASSERT_VPORT()						\
-do {								\
-	if (unlikely(!mutex_is_locked(&vport_mutex))) {		\
-		pr_err("vport lock not held at %s (%d)\n",	\
-		       __FILE__, __LINE__);			\
-		dump_stack();					\
-	}							\
-} while (0)
-
 /**
  *	vport_init - initialize vport subsystem
  *
@@ -141,7 +95,7 @@ static void vport_del_all(void)
 	int i;
 
 	rtnl_lock();
-	vport_lock();
+	dp_mutex_lock();
 
 	for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
 		struct hlist_head *bucket = &dev_table[i];
@@ -152,7 +106,7 @@ static void vport_del_all(void)
 			vport_del(vport);
 	}
 
-	vport_unlock();
+	dp_mutex_unlock();
 	rtnl_unlock();
 }
 
@@ -198,6 +152,7 @@ int vport_user_mod(const struct odp_port __user *uport)
 	port.devname[IFNAMSIZ - 1] = '\0';
 
 	rtnl_lock();
+	dp_mutex_lock();
 
 	vport = vport_locate(port.devname);
 	if (!vport) {
@@ -205,11 +160,10 @@ int vport_user_mod(const struct odp_port __user *uport)
 		goto out;
 	}
 
-	vport_lock();
 	err = vport_mod(vport, &port);
-	vport_unlock();
-
+	
 out:
+	dp_mutex_unlock();
 	rtnl_unlock();
 	return err;
 }
@@ -233,8 +187,7 @@ int vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
 
 	stats_req.devname[IFNAMSIZ - 1] = '\0';
 
-	vport_lock();
-
+	dp_mutex_lock();
 	vport = vport_locate(stats_req.devname);
 	if (!vport) {
 		err = -ENODEV;
@@ -244,7 +197,8 @@ int vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
 	err = vport_get_stats(vport, &stats_req.stats);
 
 out:
-	vport_unlock();
+	dp_mutex_unlock();
+
 
 	if (!err)
 		if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
@@ -276,7 +230,7 @@ int vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
 	stats_req.devname[IFNAMSIZ - 1] = '\0';
 
 	rtnl_lock();
-	vport_lock();
+	dp_mutex_lock();
 
 	vport = vport_locate(stats_req.devname);
 	if (!vport) {
@@ -287,7 +241,7 @@ int vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
 	err = vport_set_stats(vport, &stats_req.stats);
 
 out:
-	vport_unlock();
+	dp_mutex_unlock();
 	rtnl_unlock();
 	return err;
 }
@@ -312,8 +266,7 @@ int vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
 
 	vport_ether.devname[IFNAMSIZ - 1] = '\0';
 
-	vport_lock();
-
+	dp_mutex_lock();
 	vport = vport_locate(vport_ether.devname);
 	if (!vport) {
 		err = -ENODEV;
@@ -325,8 +278,7 @@ int vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
 	rcu_read_unlock();
 
 out:
-	vport_unlock();
-
+	dp_mutex_unlock();
 	if (!err)
 		if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
 			err = -EFAULT;
@@ -356,7 +308,7 @@ int vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
 	vport_ether.devname[IFNAMSIZ - 1] = '\0';
 
 	rtnl_lock();
-	vport_lock();
+	dp_mutex_lock();
 
 	vport = vport_locate(vport_ether.devname);
 	if (!vport) {
@@ -367,7 +319,7 @@ int vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
 	err = vport_set_addr(vport, vport_ether.ether_addr);
 
 out:
-	vport_unlock();
+	dp_mutex_unlock();
 	rtnl_unlock();
 	return err;
 }
@@ -391,8 +343,7 @@ int vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
 
 	vport_mtu.devname[IFNAMSIZ - 1] = '\0';
 
-	vport_lock();
-
+	dp_mutex_lock();
 	vport = vport_locate(vport_mtu.devname);
 	if (!vport) {
 		err = -ENODEV;
@@ -402,8 +353,7 @@ int vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
 	vport_mtu.mtu = vport_get_mtu(vport);
 
 out:
-	vport_unlock();
-
+	dp_mutex_unlock();
 	if (!err)
 		if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
 			err = -EFAULT;
@@ -432,7 +382,7 @@ int vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
 	vport_mtu.devname[IFNAMSIZ - 1] = '\0';
 
 	rtnl_lock();
-	vport_lock();
+	dp_mutex_lock();
 
 	vport = vport_locate(vport_mtu.devname);
 	if (!vport) {
@@ -443,7 +393,7 @@ int vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
 	err = vport_set_mtu(vport, vport_mtu.mtu);
 
 out:
-	vport_unlock();
+	dp_mutex_unlock();
 	rtnl_unlock();
 	return err;
 }
@@ -458,10 +408,6 @@ static struct hlist_head *hash_bucket(const char *name)
  *	vport_locate - find a port that has already been created
  *
  * @name: name of port to find
- *
- * Either RTNL or vport lock must be acquired before calling this function
- * and held while using the found port.  See the locking comments at the
- * top of the file.
  */
 struct vport *vport_locate(const char *name)
 {
@@ -469,11 +415,6 @@ struct vport *vport_locate(const char *name)
 	struct vport *vport;
 	struct hlist_node *node;
 
-	if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
-		pr_err("neither RTNL nor vport lock held in vport_locate\n");
-		dump_stack();
-	}
-
 	rcu_read_lock();
 
 	hlist_for_each_entry(vport, node, bucket, hash_node)
@@ -578,8 +519,7 @@ void vport_free(struct vport *vport)
  * @parms: Information about new vport.
  *
  * Creates a new vport with the specified configuration (which is dependent on
- * device type) and attaches it to a datapath.  Both RTNL and vport locks must
- * be held.
+ * device type) and attaches it to a datapath.  RTNL lock must be held.
  */
 struct vport *vport_add(const struct vport_parms *parms)
 {
@@ -588,7 +528,6 @@ struct vport *vport_add(const struct vport_parms *parms)
 	int i;
 
 	ASSERT_RTNL();
-	ASSERT_VPORT();
 
 	for (i = 0; i < n_vport_types; i++) {
 		if (!strcmp(vport_ops_list[i]->type, parms->type)) {
@@ -616,12 +555,11 @@ out:
  * @port: New configuration.
  *
  * Modifies an existing device with the specified configuration (which is
- * dependent on device type).  Both RTNL and vport locks must be held.
+ * dependent on device type).  RTNL lock must be held.
  */
 int vport_mod(struct vport *vport, struct odp_port *port)
 {
 	ASSERT_RTNL();
-	ASSERT_VPORT();
 
 	if (vport->ops->modify)
 		return vport->ops->modify(vport, port);
@@ -635,12 +573,11 @@ int vport_mod(struct vport *vport, struct odp_port *port)
  * @vport: vport to delete.
  *
  * Detaches @vport from its datapath and destroys it.  It is possible to fail
- * for reasons such as lack of memory.  Both RTNL and vport locks must be held.
+ * for reasons such as lack of memory.  RTNL lock must be held.
  */
-int vport_del(struct vport *vport)
+ int vport_del(struct vport *vport)
 {
 	ASSERT_RTNL();
-	ASSERT_VPORT();
 
 	unregister_vport(vport);
 
diff --git a/datapath/vport.h b/datapath/vport.h
index fb3a3e3..2ba85f7 100644
--- a/datapath/vport.h
+++ b/datapath/vport.h
@@ -32,9 +32,6 @@ int vport_user_ether_set(struct odp_vport_ether __user *);
 int vport_user_mtu_get(struct odp_vport_mtu __user *);
 int vport_user_mtu_set(struct odp_vport_mtu __user *);
 
-void vport_lock(void);
-void vport_unlock(void);
-
 int vport_init(void);
 void vport_exit(void);
 
-- 
1.7.1





More information about the dev mailing list