[ovs-dev] [PATCH] clang: Add annotations for thread safety check.

Alex Wang alexw at nicira.com
Fri Jul 26 21:29:38 UTC 2013


Note:
1. Currently, the patch (to enable "-Wthread-safety" flag for c) has not
been applied to 'clang' master. So, I'm not sure if we should apply it if
it looks good.

The progress can be check from this link:
http://clang-developers.42468.n3.nabble.com/PATCH-2-2-Thread-safety-Add-test-for-Wthread-safety-check-for-C-td4033444.html

2. I compiled ovs in my system using: gcc, gcc + sparse, clang, clang
-Wthread-safety. All compiled correctly. And "make check" passed all tests.

3. As Ethan suggest, the next step may be hack the sparse so that we can
run "clang + sparse + -Wthread-safety" all together.

Hope to hear more comments,

Thanks,
Alex Wang,


On Fri, Jul 26, 2013 at 2:41 PM, Alex Wang <alexw at nicira.com> wrote:

> From: Ethan Jackson <ethan at nicira.com>
>
> This commit adds annotations for thread safety check. And the
> check can be conducted by using -Wthread-safety flag in clang.
>
> Co-authored-by: Alex Wang <alexw at nicira.com>
> Signed-off-by: Alex Wang <alexw at nicira.com>
> ---
>  include/sparse/pthread.h  |   14 +++---
>  lib/command-line.c        |   28 +++++++----
>  lib/compiler.h            |  109 +++++++++++++++++++++++++++++++----------
>  lib/dpif-linux.c          |   34 ++++++-------
>  lib/dpif-netdev.c         |   98 ++++++++++++++++++-------------------
>  lib/dpif.c                |   30 ++++++------
>  lib/fatal-signal.c        |   24 ++++-----
>  lib/jsonrpc.c             |    8 +--
>  lib/lockfile.c            |   32 ++++++------
>  lib/netdev-dummy.c        |    9 +---
>  lib/netlink-socket.c      |   12 ++---
>  lib/ofp-msgs.c            |    8 +--
>  lib/ofp-parse.c           |    8 +--
>  lib/ovs-atomic-gcc4+.c    |   14 +++---
>  lib/ovs-atomic-pthreads.c |    8 +--
>  lib/ovs-atomic-pthreads.h |    2 +-
>  lib/ovs-thread.c          |  119
> +++++++++++++++++++++++++++++++++------------
>  lib/ovs-thread.h          |  103 ++++++++++++++++++++++++++++-----------
>  lib/timeval.c             |   22 ++++-----
>  lib/uuid.c                |    6 +--
>  lib/vlog.c                |   84 +++++++++++++++++++-------------
>  lib/vlog.h                |    4 +-
>  utilities/ovs-dev.py      |   13 ++++-
>  vswitchd/system-stats.c   |   28 +++++------
>  24 files changed, 500 insertions(+), 317 deletions(-)
>
> diff --git a/include/sparse/pthread.h b/include/sparse/pthread.h
> index 6cdf5c8..7ff43f7 100644
> --- a/include/sparse/pthread.h
> +++ b/include/sparse/pthread.h
> @@ -23,15 +23,15 @@
>
>  #include "compiler.h"
>
> -int pthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
> +int pthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQ_WRLOCK(mutex);
>  int pthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
>
> -int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
> -int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
> +int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
> OVS_ACQ_RDLOCK(rwlock);
> +int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
> OVS_ACQ_WRLOCK(rwlock);
>  int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) OVS_RELEASES(rwlock);
>
>  int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
> -    OVS_MUST_HOLD(mutex);
> +    OVS_REQ_WRLOCK(mutex);
>
>  /* Sparse complains about the proper PTHREAD_*_INITIALIZER definitions.
>   * Luckily, it's not a real compiler so we can overwrite it with something
> @@ -52,7 +52,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t
> *mutex)
>      ({                                                  \
>          int retval = pthread_mutex_trylock(mutex);      \
>          if (!retval) {                                  \
> -            OVS_ACQUIRE(MUTEX);                         \
> +            OVS_MACRO_LOCK(MUTEX);                      \
>          }                                               \
>          retval;                                         \
>      })
> @@ -61,7 +61,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t
> *mutex)
>      ({                                                  \
>          int retval = pthread_rwlock_tryrdlock(rwlock);  \
>          if (!retval) {                                  \
> -            OVS_ACQUIRE(RWLOCK);                        \
> +            OVS_MACRO_LOCK(RWLOCK);                     \
>          }                                               \
>          retval;                                         \
>      })
> @@ -69,7 +69,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t
> *mutex)
>      ({                                                  \
>          int retval = pthread_rwlock_trywrlock(rwlock);  \
>          if (!retval) {                                  \
> -            OVS_ACQUIRE(RWLOCK);                        \
> +            OVS_MACRO_LOCK(RWLOCK);                     \
>          }                                               \
>          retval;                                         \
>      })
> diff --git a/lib/command-line.c b/lib/command-line.c
> index 39b26da..805e51b 100644
> --- a/lib/command-line.c
> +++ b/lib/command-line.c
> @@ -94,10 +94,16 @@ run_command(int argc, char *argv[], const struct
> command commands[])
>  /* Process title. */
>
>  #ifdef LINUX_DATAPATH
> -static char *argv_start;       /* Start of command-line arguments in
> memory. */
> -static size_t argv_size;       /* Number of bytes of command-line
> arguments. */
> -static char *saved_proctitle;  /* Saved command-line arguments. */
> -static pthread_mutex_t proctitle_mutex = PTHREAD_MUTEX_INITIALIZER;
> +static struct ovs_mutex proctitle_mutex = OVS_MUTEX_INITIALIZER;
> +
> +/* Start of command-line arguments in memory. */
> +static char *argv_start OVS_GUARDED_BY(proctitle_mutex);
> +
> +/* Number of bytes of command-line arguments. */
> +static size_t argv_size OVS_GUARDED_BY(proctitle_mutex);
> +
> +/* Saved command-line arguments. */
> +static char *saved_proctitle OVS_GUARDED_BY(proctitle_mutex);
>
>  /* Prepares the process so that proctitle_set() can later succeed.
>   *
> @@ -118,6 +124,7 @@ proctitle_init(int argc, char **argv)
>          return;
>      }
>
> +    ovs_mutex_lock(&proctitle_mutex);
>      /* Specialized version of first loop iteration below. */
>      argv_start = argv[0];
>      argv_size = strlen(argv[0]) + 1;
> @@ -141,6 +148,7 @@ proctitle_init(int argc, char **argv)
>          /* Copy out the old argument so we can reuse the space. */
>          argv[i] = xstrdup(argv[i]);
>      }
> +    ovs_mutex_unlock(&proctitle_mutex);
>  }
>
>  /* Changes the name of the process, as shown by "ps", to the program name
> @@ -151,11 +159,11 @@ proctitle_set(const char *format, ...)
>      va_list args;
>      int n;
>
> +    ovs_mutex_lock(&proctitle_mutex);
>      if (!argv_start || argv_size < 8) {
> -        return;
> +        goto out;
>      }
>
> -    xpthread_mutex_lock(&proctitle_mutex);
>      if (!saved_proctitle) {
>          saved_proctitle = xmemdup(argv_start, argv_size);
>      }
> @@ -174,20 +182,22 @@ proctitle_set(const char *format, ...)
>          memset(&argv_start[n], '\0', argv_size - n);
>      }
>      va_end(args);
> -    xpthread_mutex_unlock(&proctitle_mutex);
> +
> +out:
> +    ovs_mutex_unlock(&proctitle_mutex);
>  }
>
>  /* Restores the process's original command line, as seen by "ps". */
>  void
>  proctitle_restore(void)
>  {
> -    xpthread_mutex_lock(&proctitle_mutex);
> +    ovs_mutex_lock(&proctitle_mutex);
>      if (saved_proctitle) {
>          memcpy(argv_start, saved_proctitle, argv_size);
>          free(saved_proctitle);
>          saved_proctitle = NULL;
>      }
> -    xpthread_mutex_unlock(&proctitle_mutex);
> +    ovs_mutex_unlock(&proctitle_mutex);
>  }
>  #else  /* !LINUX_DATAPATH*/
>  /* Stubs that don't do anything on non-Linux systems. */
> diff --git a/lib/compiler.h b/lib/compiler.h
> index 4b1834f..ca5af0b 100644
> --- a/lib/compiler.h
> +++ b/lib/compiler.h
> @@ -41,38 +41,95 @@
>  #define OVS_UNLIKELY(CONDITION) (!!(CONDITION))
>  #endif
>
> -#ifdef __CHECKER__
> -/* "sparse" annotations for mutexes and mutex-like constructs.
> +/* TODO: the patch to clang also include feature
> + *(__has_feature(c_thread_safety_attributes). */
> +#if __clang_major__ >= 3 && __clang_minor__ >= 4
> +/* "clang" annotations for thread safety check.
> + *
> + * OVS_LOCKABLE indicates that the struct contains mutex element
> + * which can be locked by ovs_mutex_lock().
> + *
> + * MUTEX below can be more than one OVS_LOCKABLE sturcts.
> + *
> + *
> + * In a global variable declaration:
> + *
> + *
> + * OVS_GUARDED_VAR indicates that the variable must be access while
> + * some MUTEX is acquired.
> + *
> + * OVS_GUARDED_BY(...) indicates that the variable must be access while
> + * MUTEX is acquired.
>   *
> - * In a function prototype, OVS_ACQUIRES(MUTEX) indicates that the
> function
> - * must be called without MUTEX acquired and that it returns with MUTEX
> - * acquired.  OVS_RELEASES(MUTEX) indicates the reverse.  OVS_MUST_HOLD
> - * indicates that the function must be called with MUTEX acquired by the
> - * caller and that the function does not release MUTEX.
>   *
> - * In practice, sparse ignores the MUTEX argument.  It need not even be a
> - * valid expression.  It is meant to indicate to human readers what mutex
> is
> - * being acquired.
> + * In a function prototype:
> + *
> + *
> + * OVS_ACQ_RDLOCK(...) and OVS_ACQ_WRLOCK(...) indicate that the
> + * function must be called without MUTEX acquired and that it returns
> + * with MUTEX acquired. OVS_RELEASES(...) indicates the reverse.
> + *
> + * OVS_TRY_RDLOCK(...) and OVS_TRY_WRLOCK(...) indicate that the
> + * function will try to acquire MUTEX. The macro takes one or more
> + * arguments. The first argument is an interger or boolean value
> + * specifying the return value of a successful lock acquisition.
> + * The remaining arguments are MUTEX.
> + *
> + * OVS_REQ_RDLOCK(...) and OVS_REQ_WRLOCK(...) indicate that the
> + * function must be called with MUTEX acquired by the caller and that
> + * the function does not release MUTEX.
> + *
> + * OVS_LOCKS_EXCLUDED(MUTEX) indicates that the function must be called
> + * without MUTEX acquired.
> + *
> + * */
> +#define OVS_LOCKABLE __attribute__((lockable))
> +#define OVS_REQ_RDLOCK(...)
> __attribute__((shared_locks_required(__VA_ARGS__)))
> +#define OVS_ACQ_RDLOCK(...)
> __attribute__((shared_lock_function(__VA_ARGS__)))
> +#define OVS_REQ_WRLOCK(...) \
> +    __attribute__((exclusive_locks_required(__VA_ARGS__)))
> +#define OVS_ACQ_WRLOCK(...) \
> +    __attribute__((exclusive_lock_function(__VA_ARGS__)))
> +#define OVS_TRY_WRLOCK(...) \
> +    __attribute__((exclusive_trylock_function(__VA_ARGS__)))
> +#define OVS_TRY_RDLOCK(...) \
> +    __attribute__((shared_trylock_function(__VA_ARGS__)))
> +#define OVS_GUARDED_VAR __attribute__((guarded_var))
> +#define OVS_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__)))
> +#define OVS_RELEASES(...) __attribute__((unlock_function(__VA_ARGS__)))
> +#define OVS_LOCKS_EXCLUDED(...)
> __attribute__((locks_excluded(__VA_ARGS__)))
> +#elif __CHECKER__
> +/* "sparse" annotations for mutexes and mutex-like constructs.
>   *
> - * Since sparse ignores MUTEX, it need not be an actual mutex.  It can be
> - * any construct on which paired acquire and release semantics make sense:
> - * read/write locks, temporary memory allocations, whatever.
> + * Change the thread-safety check annotations to use "context" attribute.
>   *
> - * OVS_ACQUIRE, OVS_RELEASE, and OVS_HOLDS are suitable for use within
> macros,
> + * OVS_MACRO_LOCK and OVS_MACRO_RELEASE are suitable for use within
> macros,
>   * where there is no function prototype to annotate. */
> -#define OVS_ACQUIRES(MUTEX) __attribute__((context(MUTEX, 0, 1)))
> -#define OVS_RELEASES(MUTEX) __attribute__((context(MUTEX, 1, 0)))
> -#define OVS_MUST_HOLD(MUTEX) __attribute__((context(MUTEX, 1, 1)))
> -#define OVS_ACQUIRE(MUTEX) __context__(MUTEX, 0, 1)
> -#define OVS_RELEASE(MUTEX) __context__(MUTEX, 1, 0)
> -#define OVS_HOLDS(MUTEX) __context__(MUTEX, 1, 1)
> +#define OVS_LOCKABLE
> +#define OVS_REQ_RDLOCK(...) __attribute__((context(MUTEX, 1, 1)))
> +#define OVS_ACQ_RDLOCK(...) __attribute__((context(MUTEX, 0, 1)))
> +#define OVS_REQ_WRLOCK(...) __attribute__((context(MUTEX, 1, 1)))
> +#define OVS_ACQ_WRLOCK(...) __attribute__((context(MUTEX, 0, 1)))
> +#define OVS_TRY_WRLOCK(...)
> +#define OVS_TRY_RDLOCK(...)
> +#define OVS_GUARDED_VAR
> +#define OVS_GUARDED_BY(...)
> +#define OVS_RELEASES(...) __attribute__((context(MUTEX, 1, 0)))
> +#define OVS_MACRO_LOCK(...) __context__(MUTEX, 0, 1)
> +#define OVS_MACRO_RELEASE(...) __context__(MUTEX, 1, 0)
>  #else
> -#define OVS_ACQUIRES(MUTEX)
> -#define OVS_RELEASES(MUTEX)
> -#define OVS_MUST_HOLD(MUTEX)
> -#define OVS_ACQUIRE(MUTEX)
> -#define OVS_RELEASE(MUTEX)
> -#define OVS_HOLDS(MUTEX)
> +#define OVS_LOCKABLE
> +#define OVS_REQ_RDLOCK(...)
> +#define OVS_ACQ_RDLOCK(...)
> +#define OVS_REQ_WRLOCK(...)
> +#define OVS_ACQ_WRLOCK(...)
> +#define OVS_TRY_WRLOCK(...)
> +#define OVS_TRY_RDLOCK(...)
> +#define OVS_GUARDED_VAR
> +#define OVS_GUARDED_BY(...)
> +#define OVS_RELEASES(...)
> +#define OVS_MACRO_LOCK(...)
> +#define OVS_MACRO_RELEASE(...)
>  #endif
>
>  /* ISO C says that a C implementation may choose any integer type for an
> enum
> diff --git a/lib/dpif-linux.c b/lib/dpif-linux.c
> index a6df339..8c716d9 100644
> --- a/lib/dpif-linux.c
> +++ b/lib/dpif-linux.c
> @@ -140,7 +140,7 @@ struct dpif_linux {
>      int dp_ifindex;
>
>      /* Upcall messages. */
> -    pthread_mutex_t upcall_lock;
> +    struct ovs_mutex upcall_lock;
>      int uc_array_size;          /* Size of 'channels' and 'epoll_events'.
> */
>      struct dpif_channel *channels;
>      struct epoll_event *epoll_events;
> @@ -248,7 +248,7 @@ open_dpif(const struct dpif_linux_dp *dp, struct dpif
> **dpifp)
>
>      dpif = xzalloc(sizeof *dpif);
>      dpif->port_notifier = NULL;
> -    xpthread_mutex_init(&dpif->upcall_lock, NULL);
> +    ovs_mutex_init(&dpif->upcall_lock, NULL);
>      dpif->epoll_fd = -1;
>
>      dpif_init(&dpif->dpif, &dpif_linux_class, dp->name,
> @@ -383,7 +383,7 @@ dpif_linux_close(struct dpif *dpif_)
>      if (dpif->epoll_fd >= 0) {
>          close(dpif->epoll_fd);
>      }
> -    xpthread_mutex_destroy(&dpif->upcall_lock);
> +    ovs_mutex_destroy(&dpif->upcall_lock);
>      free(dpif);
>  }
>
> @@ -572,9 +572,9 @@ dpif_linux_port_add(struct dpif *dpif_, struct netdev
> *netdev,
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>      int error;
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      error = dpif_linux_port_add__(dpif_, netdev, port_nop);
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>
>      return error;
>  }
> @@ -603,9 +603,9 @@ dpif_linux_port_del(struct dpif *dpif_, odp_port_t
> port_no)
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>      int error;
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      error = dpif_linux_port_del__(dpif_, port_no);
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>
>      return error;
>  }
> @@ -668,14 +668,14 @@ dpif_linux_port_get_pid(const struct dpif *dpif_,
> odp_port_t port_no)
>      uint32_t port_idx = odp_to_u32(port_no);
>      uint32_t pid = 0;
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      if (dpif->epoll_fd >= 0) {
>          /* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
>           * channel, since it is not heavily loaded. */
>          uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
>          pid = nl_sock_pid(dpif->channels[idx].sock);
>      }
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>
>      return pid;
>  }
> @@ -1310,9 +1310,9 @@ dpif_linux_recv_set(struct dpif *dpif_, bool enable)
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>      int error;
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      error = dpif_linux_recv_set__(dpif_, enable);
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>
>      return error;
>  }
> @@ -1463,9 +1463,9 @@ dpif_linux_recv(struct dpif *dpif_, struct
> dpif_upcall *upcall,
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>      int error;
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      error = dpif_linux_recv__(dpif_, upcall, buf);
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>
>      return error;
>  }
> @@ -1475,11 +1475,11 @@ dpif_linux_recv_wait(struct dpif *dpif_)
>  {
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      if (dpif->epoll_fd >= 0) {
>          poll_fd_wait(dpif->epoll_fd, POLLIN);
>      }
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>  }
>
>  static void
> @@ -1487,7 +1487,7 @@ dpif_linux_recv_purge(struct dpif *dpif_)
>  {
>      struct dpif_linux *dpif = dpif_linux_cast(dpif_);
>
> -    xpthread_mutex_lock(&dpif->upcall_lock);
> +    ovs_mutex_lock(&dpif->upcall_lock);
>      if (dpif->epoll_fd >= 0) {
>          struct dpif_channel *ch;
>
> @@ -1498,7 +1498,7 @@ dpif_linux_recv_purge(struct dpif *dpif_)
>              }
>          }
>      }
> -    xpthread_mutex_unlock(&dpif->upcall_lock);
> +    ovs_mutex_unlock(&dpif->upcall_lock);
>  }
>
>  const struct dpif_class dpif_linux_class = {
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 8763e5c..ca12aee 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -140,7 +140,7 @@ struct dpif_netdev {
>  static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
>
>  /* Global lock for all data. */
> -static pthread_mutex_t dp_netdev_mutex = PTHREAD_MUTEX_INITIALIZER;
> +static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
>
>  static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
>                                struct dp_netdev_port **portp);
> @@ -183,11 +183,11 @@ dpif_netdev_enumerate(struct sset *all_dps)
>  {
>      struct shash_node *node;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      SHASH_FOR_EACH(node, &dp_netdevs) {
>          sset_add(all_dps, node->name);
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return 0;
>  }
> @@ -301,7 +301,7 @@ dpif_netdev_open(const struct dpif_class *class, const
> char *name,
>      struct dp_netdev *dp;
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dp = shash_find_data(&dp_netdevs, name);
>      if (!dp) {
>          error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
> @@ -313,7 +313,7 @@ dpif_netdev_open(const struct dpif_class *class, const
> char *name,
>      if (!error) {
>          *dpifp = create_dpif_netdev(dp);
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -353,7 +353,7 @@ dpif_netdev_close(struct dpif *dpif)
>  {
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>
>      ovs_assert(dp->open_cnt > 0);
>      if (--dp->open_cnt == 0 && dp->destroyed) {
> @@ -362,7 +362,7 @@ dpif_netdev_close(struct dpif *dpif)
>      }
>      free(dpif);
>
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static int
> @@ -370,9 +370,9 @@ dpif_netdev_destroy(struct dpif *dpif)
>  {
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dp->destroyed = true;
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return 0;
>  }
> @@ -382,12 +382,12 @@ dpif_netdev_get_stats(const struct dpif *dpif,
> struct dpif_dp_stats *stats)
>  {
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      stats->n_flows = hmap_count(&dp->flow_table);
>      stats->n_hit = dp->n_hit;
>      stats->n_missed = dp->n_missed;
>      stats->n_lost = dp->n_lost;
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return 0;
>  }
> @@ -460,7 +460,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev
> *netdev,
>      odp_port_t port_no;
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof
> namebuf);
>      if (*port_nop != ODPP_NONE) {
>          uint32_t port_idx = odp_to_u32(*port_nop);
> @@ -480,7 +480,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev
> *netdev,
>          *port_nop = port_no;
>          error = do_add_port(dp, dpif_port, netdev_get_type(netdev),
> port_no);
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -491,9 +491,9 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t
> port_no)
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -573,12 +573,12 @@ dpif_netdev_port_query_by_number(const struct dpif
> *dpif, odp_port_t port_no,
>      struct dp_netdev_port *port;
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      error = get_port_by_number(dp, port_no, &port);
>      if (!error && dpif_port) {
>          answer_port_query(port, dpif_port);
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -591,12 +591,12 @@ dpif_netdev_port_query_by_name(const struct dpif
> *dpif, const char *devname,
>      struct dp_netdev_port *port;
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      error = get_port_by_name(dp, devname, &port);
>      if (!error && dpif_port) {
>          answer_port_query(port, dpif_port);
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -630,9 +630,9 @@ dpif_netdev_flow_flush(struct dpif *dpif)
>  {
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dp_netdev_flow_flush(dp);
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return 0;
>  }
> @@ -657,7 +657,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif,
> void *state_,
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>      uint32_t port_idx;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      for (port_idx = odp_to_u32(state->port_no);
>           port_idx < MAX_PORTS; port_idx++) {
>          struct dp_netdev_port *port = dp->ports[port_idx];
> @@ -668,12 +668,12 @@ dpif_netdev_port_dump_next(const struct dpif *dpif,
> void *state_,
>              dpif_port->type = port->type;
>              dpif_port->port_no = port->port_no;
>              state->port_no = u32_to_odp(port_idx + 1);
> -            xpthread_mutex_unlock(&dp_netdev_mutex);
> +            ovs_mutex_unlock(&dp_netdev_mutex);
>
>              return 0;
>          }
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return EOF;
>  }
> @@ -693,14 +693,14 @@ dpif_netdev_port_poll(const struct dpif *dpif_, char
> **devnamep OVS_UNUSED)
>      struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      if (dpif->dp_serial != dpif->dp->serial) {
>          dpif->dp_serial = dpif->dp->serial;
>          error = ENOBUFS;
>      } else {
>          error = EAGAIN;
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -714,11 +714,11 @@ dpif_netdev_port_poll_wait(const struct dpif *dpif_)
>       * function and the poll_block() in one thread and a change in
>       * dpif->dp->serial in another thread. */
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      if (dpif->dp_serial != dpif->dp->serial) {
>          poll_immediate_wake();
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static struct dp_netdev_flow *
> @@ -791,7 +791,7 @@ dpif_netdev_flow_get(const struct dpif *dpif,
>          return error;
>      }
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      flow = dp_netdev_lookup_flow(dp, &key);
>      if (flow) {
>          if (stats) {
> @@ -803,7 +803,7 @@ dpif_netdev_flow_get(const struct dpif *dpif,
>      } else {
>          error = ENOENT;
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -860,7 +860,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct
> dpif_flow_put *put)
>          return error;
>      }
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      flow = dp_netdev_lookup_flow(dp, &key);
>      if (!flow) {
>          if (put->flags & DPIF_FP_CREATE) {
> @@ -891,7 +891,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct
> dpif_flow_put *put)
>              error = EEXIST;
>          }
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -909,7 +909,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct
> dpif_flow_del *del)
>          return error;
>      }
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      flow = dp_netdev_lookup_flow(dp, &key);
>      if (flow) {
>          if (del->stats) {
> @@ -919,7 +919,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct
> dpif_flow_del *del)
>      } else {
>          error = ENOENT;
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -956,10 +956,10 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif,
> void *state_,
>      struct dp_netdev_flow *flow;
>      struct hmap_node *node;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      node = hmap_at_position(&dp->flow_table, &state->bucket,
> &state->offset);
>      if (!node) {
> -        xpthread_mutex_unlock(&dp_netdev_mutex);
> +        ovs_mutex_unlock(&dp_netdev_mutex);
>          return EOF;
>      }
>
> @@ -993,7 +993,7 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif,
> void *state_,
>          *stats = &state->stats;
>      }
>
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>      return 0;
>  }
>
> @@ -1029,10 +1029,10 @@ dpif_netdev_execute(struct dpif *dpif, const
> struct dpif_execute *execute)
>      error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len,
>                                            &key);
>      if (!error) {
> -        xpthread_mutex_lock(&dp_netdev_mutex);
> +        ovs_mutex_lock(&dp_netdev_mutex);
>          dp_netdev_execute_actions(dp, &copy, &key,
>                                    execute->actions, execute->actions_len);
> -        xpthread_mutex_unlock(&dp_netdev_mutex);
> +        ovs_mutex_unlock(&dp_netdev_mutex);
>      }
>
>      ofpbuf_uninit(&copy);
> @@ -1075,7 +1075,7 @@ dpif_netdev_recv(struct dpif *dpif, struct
> dpif_upcall *upcall,
>      struct dp_netdev_queue *q;
>      int error;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      q = find_nonempty_queue(dpif);
>      if (q) {
>          struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
> @@ -1090,7 +1090,7 @@ dpif_netdev_recv(struct dpif *dpif, struct
> dpif_upcall *upcall,
>      } else {
>          error = EAGAIN;
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>
>      return error;
>  }
> @@ -1102,20 +1102,20 @@ dpif_netdev_recv_wait(struct dpif *dpif)
>       * function and the poll_block() in one thread and a packet being
> queued in
>       * another thread. */
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      if (find_nonempty_queue(dpif)) {
>          poll_immediate_wake();
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static void
>  dpif_netdev_recv_purge(struct dpif *dpif)
>  {
>      struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dp_netdev_purge_queues(dpif_netdev->dp);
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static void
> @@ -1160,7 +1160,7 @@ dpif_netdev_run(struct dpif *dpif)
>      struct dp_netdev *dp;
>      struct ofpbuf packet;
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      dp = get_dp_netdev(dpif);
>      ofpbuf_init(&packet,
>                  DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu);
> @@ -1183,7 +1183,7 @@ dpif_netdev_run(struct dpif *dpif)
>          }
>      }
>      ofpbuf_uninit(&packet);
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static void
> @@ -1204,13 +1204,13 @@ dpif_netdev_wait(struct dpif *dpif)
>       *     - In the dpif_port_remove() case, A might wake up spuriously,
> but
>       *       that is harmless. */
>
> -    xpthread_mutex_lock(&dp_netdev_mutex);
> +    ovs_mutex_lock(&dp_netdev_mutex);
>      LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) {
>          if (port->rx) {
>              netdev_rx_wait(port->rx);
>          }
>      }
> -    xpthread_mutex_unlock(&dp_netdev_mutex);
> +    ovs_mutex_unlock(&dp_netdev_mutex);
>  }
>
>  static void
> diff --git a/lib/dpif.c b/lib/dpif.c
> index 0d8dd9d..3d6aed7 100644
> --- a/lib/dpif.c
> +++ b/lib/dpif.c
> @@ -71,7 +71,7 @@ static struct shash dpif_classes =
> SHASH_INITIALIZER(&dpif_classes);
>  static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
>
>  /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'.
> */
> -static pthread_mutex_t dpif_mutex = PTHREAD_MUTEX_INITIALIZER;
> +static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
>
>  /* Rate limit for individual messages going to or from the datapath,
> output at
>   * DBG level.  This is very high because, if these are enabled, it is
> because
> @@ -145,9 +145,9 @@ dp_register_provider(const struct dpif_class
> *new_class)
>  {
>      int error;
>
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      error = dp_register_provider__(new_class);
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>
>      return error;
>  }
> @@ -190,9 +190,9 @@ dp_unregister_provider(const char *type)
>
>      dp_initialize();
>
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      error = dp_unregister_provider__(type);
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>
>      return error;
>  }
> @@ -202,9 +202,9 @@ dp_unregister_provider(const char *type)
>  void
>  dp_blacklist_provider(const char *type)
>  {
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      sset_add(&dpif_blacklist, type);
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>  }
>
>  /* Clears 'types' and enumerates the types of all currently registered
> datapath
> @@ -217,21 +217,21 @@ dp_enumerate_types(struct sset *types)
>      dp_initialize();
>      sset_clear(types);
>
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      SHASH_FOR_EACH(node, &dpif_classes) {
>          const struct registered_dpif_class *registered_class = node->data;
>          sset_add(types, registered_class->dpif_class->type);
>      }
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>  }
>
>  static void
>  dp_class_unref(struct registered_dpif_class *rc)
>  {
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      ovs_assert(rc->refcount);
>      rc->refcount--;
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>  }
>
>  static struct registered_dpif_class *
> @@ -239,12 +239,12 @@ dp_class_lookup(const char *type)
>  {
>      struct registered_dpif_class *rc;
>
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      rc = shash_find_data(&dpif_classes, type);
>      if (rc) {
>          rc->refcount++;
>      }
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>
>      return rc;
>  }
> @@ -481,12 +481,12 @@ dpif_port_open_type(const char *datapath_type, const
> char *port_type)
>
>      datapath_type = dpif_normalize_type(datapath_type);
>
> -    xpthread_mutex_lock(&dpif_mutex);
> +    ovs_mutex_lock(&dpif_mutex);
>      rc = shash_find_data(&dpif_classes, datapath_type);
>      if (rc && rc->dpif_class->port_open_type) {
>          port_type = rc->dpif_class->port_open_type(rc->dpif_class,
> port_type);
>      }
> -    xpthread_mutex_unlock(&dpif_mutex);
> +    ovs_mutex_unlock(&dpif_mutex);
>
>      return port_type;
>  }
> diff --git a/lib/fatal-signal.c b/lib/fatal-signal.c
> index 40daf5a..e655683 100644
> --- a/lib/fatal-signal.c
> +++ b/lib/fatal-signal.c
> @@ -57,7 +57,7 @@ static size_t n_hooks;
>  static int signal_fds[2];
>  static volatile sig_atomic_t stored_sig_nr = SIG_ATOMIC_MAX;
>
> -static pthread_mutex_t mutex;
> +static struct ovs_mutex mutex;
>
>  static void atexit_handler(void);
>  static void call_hooks(int sig_nr);
> @@ -80,7 +80,7 @@ fatal_signal_init(void)
>
>          xpthread_mutexattr_init(&attr);
>          xpthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
> -        xpthread_mutex_init(&mutex, &attr);
> +        ovs_mutex_init(&mutex, &attr);
>          xpthread_mutexattr_destroy(&attr);
>
>          xpipe_nonblocking(signal_fds);
> @@ -119,14 +119,14 @@ fatal_signal_add_hook(void (*hook_cb)(void *aux),
> void (*cancel_cb)(void *aux),
>  {
>      fatal_signal_init();
>
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>      ovs_assert(n_hooks < MAX_HOOKS);
>      hooks[n_hooks].hook_cb = hook_cb;
>      hooks[n_hooks].cancel_cb = cancel_cb;
>      hooks[n_hooks].aux = aux;
>      hooks[n_hooks].run_at_exit = run_at_exit;
>      n_hooks++;
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>  }
>
>  /* Handles fatal signal number 'sig_nr'.
> @@ -167,7 +167,7 @@ fatal_signal_run(void)
>      if (sig_nr != SIG_ATOMIC_MAX) {
>          char namebuf[SIGNAL_NAME_BUFSIZE];
>
> -        xpthread_mutex_lock(&mutex);
> +        ovs_mutex_lock(&mutex);
>
>          VLOG_WARN("terminating with signal %d (%s)",
>                    (int)sig_nr, signal_name(sig_nr, namebuf, sizeof
> namebuf));
> @@ -178,7 +178,7 @@ fatal_signal_run(void)
>          signal(sig_nr, SIG_DFL);
>          raise(sig_nr);
>
> -        xpthread_mutex_unlock(&mutex);
> +        ovs_mutex_unlock(&mutex);
>          NOT_REACHED();
>      }
>  }
> @@ -232,14 +232,14 @@ fatal_signal_add_file_to_unlink(const char *file)
>  {
>      fatal_signal_init();
>
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>      if (!added_hook) {
>          added_hook = true;
>          fatal_signal_add_hook(unlink_files, cancel_files, NULL, true);
>      }
>
>      sset_add(&files, file);
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>  }
>
>  /* Unregisters 'file' from being unlinked when the program terminates via
> @@ -249,9 +249,9 @@ fatal_signal_remove_file_to_unlink(const char *file)
>  {
>      fatal_signal_init();
>
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>      sset_find_and_delete(&files, file);
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>  }
>
>  /* Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
> @@ -263,7 +263,7 @@ fatal_signal_unlink_file_now(const char *file)
>
>      fatal_signal_init();
>
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>
>      error = unlink(file) ? errno : 0;
>      if (error) {
> @@ -272,7 +272,7 @@ fatal_signal_unlink_file_now(const char *file)
>
>      fatal_signal_remove_file_to_unlink(file);
>
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>
>      return error;
>  }
> diff --git a/lib/jsonrpc.c b/lib/jsonrpc.c
> index 6c482c2..e6e0133 100644
> --- a/lib/jsonrpc.c
> +++ b/lib/jsonrpc.c
> @@ -515,14 +515,10 @@ jsonrpc_create(enum jsonrpc_msg_type type, const
> char *method,
>  static struct json *
>  jsonrpc_create_id(void)
>  {
> -    static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> -    static unsigned int next_id;
> +    static atomic_uint next_id = ATOMIC_VAR_INIT(0);
>      unsigned int id;
>
> -    xpthread_mutex_lock(&mutex);
> -    id = next_id++;
> -    xpthread_mutex_unlock(&mutex);
> -
> +    atomic_add(&next_id, 1, &id);
>      return json_integer_create(id);
>  }
>
> diff --git a/lib/lockfile.c b/lib/lockfile.c
> index 50a4e0c..aec019c 100644
> --- a/lib/lockfile.c
> +++ b/lib/lockfile.c
> @@ -53,10 +53,10 @@ struct lockfile {
>   * descriptor for a file on which a process holds a lock drops *all*
> locks on
>   * that file.  That means that we can't afford to open a lockfile more
> than
>   * once. */
> -static struct hmap lock_table = HMAP_INITIALIZER(&lock_table);
> -
> -/* Protects 'lock_table'. */
> -static pthread_mutex_t lock_table_mutex = PTHREAD_MUTEX_INITIALIZER;
> +static struct ovs_mutex lock_table_mutex = OVS_MUTEX_INITIALIZER;
> +static struct hmap lock_table__ = HMAP_INITIALIZER(&lock_table__);
> +static struct hmap *lock_table OVS_GUARDED_BY(lock_table_mutex)
> +    = &lock_table__;
>
>  static void lockfile_unhash(struct lockfile *);
>  static int lockfile_try_lock(const char *name, pid_t *pidp,
> @@ -110,9 +110,9 @@ lockfile_lock(const char *file, struct lockfile
> **lockfilep)
>
>      lock_name = lockfile_name(file);
>
> -    xpthread_mutex_lock(&lock_table_mutex);
> +    ovs_mutex_lock(&lock_table_mutex);
>      error = lockfile_try_lock(lock_name, &pid, lockfilep);
> -    xpthread_mutex_unlock(&lock_table_mutex);
> +    ovs_mutex_unlock(&lock_table_mutex);
>
>      if (error) {
>          COVERAGE_INC(lockfile_error);
> @@ -138,9 +138,9 @@ void
>  lockfile_unlock(struct lockfile *lockfile)
>  {
>      if (lockfile) {
> -        xpthread_mutex_lock(&lock_table_mutex);
> +        ovs_mutex_lock(&lock_table_mutex);
>          lockfile_unhash(lockfile);
> -        xpthread_mutex_unlock(&lock_table_mutex);
> +        ovs_mutex_unlock(&lock_table_mutex);
>
>          COVERAGE_INC(lockfile_unlock);
>          free(lockfile->name);
> @@ -156,12 +156,14 @@ lockfile_postfork(void)
>  {
>      struct lockfile *lockfile;
>
> -    HMAP_FOR_EACH (lockfile, hmap_node, &lock_table) {
> +    ovs_mutex_lock(&lock_table_mutex);
> +    HMAP_FOR_EACH (lockfile, hmap_node, lock_table) {
>          if (lockfile->fd >= 0) {
>              VLOG_WARN("%s: child does not inherit lock", lockfile->name);
>              lockfile_unhash(lockfile);
>          }
>      }
> +    ovs_mutex_unlock(&lock_table_mutex);
>  }
>
>  static uint32_t
> @@ -172,12 +174,12 @@ lockfile_hash(dev_t device, ino_t inode)
>  }
>
>  static struct lockfile *
> -lockfile_find(dev_t device, ino_t inode)
> +lockfile_find(dev_t device, ino_t inode) OVS_REQ_WRLOCK(lock_table_mutex)
>  {
>      struct lockfile *lockfile;
>
>      HMAP_FOR_EACH_WITH_HASH (lockfile, hmap_node,
> -                             lockfile_hash(device, inode), &lock_table) {
> +                             lockfile_hash(device, inode), lock_table) {
>          if (lockfile->device == device && lockfile->inode == inode) {
>              return lockfile;
>          }
> @@ -186,17 +188,18 @@ lockfile_find(dev_t device, ino_t inode)
>  }
>
>  static void
> -lockfile_unhash(struct lockfile *lockfile)
> +lockfile_unhash(struct lockfile *lockfile)
> OVS_REQ_WRLOCK(&lock_table_mutex)
>  {
>      if (lockfile->fd >= 0) {
>          close(lockfile->fd);
>          lockfile->fd = -1;
> -        hmap_remove(&lock_table, &lockfile->hmap_node);
> +        hmap_remove(lock_table, &lockfile->hmap_node);
>      }
>  }
>
>  static struct lockfile *
>  lockfile_register(const char *name, dev_t device, ino_t inode, int fd)
> +    OVS_REQ_WRLOCK(&lock_table_mutex)
>  {
>      struct lockfile *lockfile;
>
> @@ -211,13 +214,14 @@ lockfile_register(const char *name, dev_t device,
> ino_t inode, int fd)
>      lockfile->device = device;
>      lockfile->inode = inode;
>      lockfile->fd = fd;
> -    hmap_insert(&lock_table, &lockfile->hmap_node,
> +    hmap_insert(lock_table, &lockfile->hmap_node,
>                  lockfile_hash(device, inode));
>      return lockfile;
>  }
>
>  static int
>  lockfile_try_lock(const char *name, pid_t *pidp, struct lockfile
> **lockfilep)
> +    OVS_REQ_WRLOCK(&lock_table_mutex)
>  {
>      struct flock l;
>      struct stat s;
> diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c
> index 82473a1..c4f58b7 100644
> --- a/lib/netdev-dummy.c
> +++ b/lib/netdev-dummy.c
> @@ -241,16 +241,11 @@ static int
>  netdev_dummy_create(const struct netdev_class *class, const char *name,
>                      struct netdev **netdevp)
>  {
> -    static unsigned int next_n = 0xaa550000;
> -    static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> -
> +    static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
>      struct netdev_dummy *netdev;
>      unsigned int n;
>
> -    xpthread_mutex_lock(&mutex);
> -    n = next_n++;
> -    xpthread_mutex_unlock(&mutex);
> -
> +    atomic_add(&next_n, 1, &n);
>      netdev = xzalloc(sizeof *netdev);
>      netdev_init(&netdev->up, name, class);
>      netdev->hwaddr[0] = 0xaa;
> diff --git a/lib/netlink-socket.c b/lib/netlink-socket.c
> index da32284..99bd4cc 100644
> --- a/lib/netlink-socket.c
> +++ b/lib/netlink-socket.c
> @@ -1012,8 +1012,8 @@ struct nl_pool {
>      int n;
>  };
>
> -static struct nl_pool pools[MAX_LINKS];
> -static pthread_mutex_t pool_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> +static struct ovs_mutex pool_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
> +static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
>
>  static int
>  nl_pool_alloc(int protocol, struct nl_sock **sockp)
> @@ -1023,12 +1023,12 @@ nl_pool_alloc(int protocol, struct nl_sock **sockp)
>
>      ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
>
> -    xpthread_mutex_lock(&pool_mutex);
> +    ovs_mutex_lock(&pool_mutex);
>      pool = &pools[protocol];
>      if (pool->n > 0) {
>          sock = pool->socks[--pool->n];
>      }
> -    xpthread_mutex_unlock(&pool_mutex);
> +    ovs_mutex_unlock(&pool_mutex);
>
>      if (sock) {
>          *sockp = sock;
> @@ -1044,12 +1044,12 @@ nl_pool_release(struct nl_sock *sock)
>      if (sock) {
>          struct nl_pool *pool = &pools[sock->protocol];
>
> -        xpthread_mutex_lock(&pool_mutex);
> +        ovs_mutex_lock(&pool_mutex);
>          if (pool->n < ARRAY_SIZE(pool->socks)) {
>              pool->socks[pool->n++] = sock;
>              sock = NULL;
>          }
> -        xpthread_mutex_unlock(&pool_mutex);
> +        ovs_mutex_unlock(&pool_mutex);
>
>          nl_sock_destroy(sock);
>      }
> diff --git a/lib/ofp-msgs.c b/lib/ofp-msgs.c
> index 8bb1754..d136f73 100644
> --- a/lib/ofp-msgs.c
> +++ b/lib/ofp-msgs.c
> @@ -110,14 +110,10 @@ static enum ofperr ofpraw_from_ofphdrs(enum ofpraw
> *, const struct ofphdrs *);
>  static ovs_be32
>  alloc_xid(void)
>  {
> -    static uint32_t next_xid = 1;
> -    static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> +    static atomic_uint32_t next_xid = ATOMIC_VAR_INIT(1);
>      uint32_t xid;
>
> -    xpthread_mutex_lock(&mutex);
> -    xid = next_xid++;
> -    xpthread_mutex_unlock(&mutex);
> -
> +    atomic_add(&next_xid, 1, &xid);
>      return htonl(xid);
>  }
>
> diff --git a/lib/ofp-parse.c b/lib/ofp-parse.c
> index 4b8036d..5cb39f5 100644
> --- a/lib/ofp-parse.c
> +++ b/lib/ofp-parse.c
> @@ -1520,15 +1520,11 @@ static char * WARN_UNUSED_RESULT
>  parse_flow_monitor_request__(struct ofputil_flow_monitor_request *fmr,
>                               const char *str_, char *string)
>  {
> -    static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> -    static uint32_t id;
> -
> +    static atomic_uint32_t id = ATOMIC_VAR_INIT(0);
>      char *save_ptr = NULL;
>      char *name;
>
> -    xpthread_mutex_lock(&mutex);
> -    fmr->id = id++;
> -    xpthread_mutex_unlock(&mutex);
> +    atomic_add(&id, 1, &fmr->id);
>
>      fmr->flags = (NXFMF_INITIAL | NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY
>                    | NXFMF_OWN | NXFMF_ACTIONS);
> diff --git a/lib/ovs-atomic-gcc4+.c b/lib/ovs-atomic-gcc4+.c
> index aeff845..1693848 100644
> --- a/lib/ovs-atomic-gcc4+.c
> +++ b/lib/ovs-atomic-gcc4+.c
> @@ -20,7 +20,7 @@
>  #include "ovs-thread.h"
>
>  #if OVS_ATOMIC_GCC4P_IMPL
> -static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> +static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
>
>  #define DEFINE_LOCKED_OP(TYPE, NAME, OPERATOR)                          \
>      TYPE##_t                                                            \
> @@ -28,10 +28,10 @@ static pthread_mutex_t mutex =
> PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
>      {                                                                   \
>          TYPE##_t old_value;                                             \
>                                                                          \
> -        xpthread_mutex_lock(&mutex);                                    \
> +        ovs_mutex_lock(&mutex);                                         \
>          old_value = u->value;                                           \
>          u->value OPERATOR arg;                                          \
> -        xpthread_mutex_unlock(&mutex);                                  \
> +        ovs_mutex_unlock(&mutex);                                       \
>                                                                          \
>          return old_value;                                               \
>      }
> @@ -42,9 +42,9 @@ static pthread_mutex_t mutex =
> PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
>      {                                                                   \
>          TYPE##_t value;                                                 \
>                                                                          \
> -        xpthread_mutex_lock(&mutex);                                    \
> +        ovs_mutex_lock(&mutex);                                         \
>          value = u->value;                                               \
> -        xpthread_mutex_unlock(&mutex);                                  \
> +        ovs_mutex_unlock(&mutex);                                       \
>                                                                          \
>          return value;                                                   \
>      }                                                                   \
> @@ -52,9 +52,9 @@ static pthread_mutex_t mutex =
> PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
>      void                                                                \
>      locked_##TYPE##_store(struct locked_##TYPE *u, TYPE##_t value)      \
>      {                                                                   \
> -        xpthread_mutex_lock(&mutex);                                    \
> +        ovs_mutex_lock(&mutex);                                         \
>          u->value = value;                                               \
> -        xpthread_mutex_unlock(&mutex);                                  \
> +        ovs_mutex_unlock(&mutex);                                       \
>      }                                                                   \
>      DEFINE_LOCKED_OP(TYPE, add, +=);                                    \
>      DEFINE_LOCKED_OP(TYPE, sub, -=);                                    \
> diff --git a/lib/ovs-atomic-pthreads.c b/lib/ovs-atomic-pthreads.c
> index a501b82..7e7ef05 100644
> --- a/lib/ovs-atomic-pthreads.c
> +++ b/lib/ovs-atomic-pthreads.c
> @@ -26,10 +26,10 @@ atomic_flag_test_and_set(volatile atomic_flag *flag_)
>      atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
>      bool old_value;
>
> -    xpthread_mutex_lock(&flag->mutex);
> +    ovs_mutex_lock(&flag->mutex);
>      old_value = flag->b;
>      flag->b = true;
> -    xpthread_mutex_unlock(&flag->mutex);
> +    ovs_mutex_unlock(&flag->mutex);
>
>      return old_value;
>  }
> @@ -46,9 +46,9 @@ atomic_flag_clear(volatile atomic_flag *flag_)
>  {
>      atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
>
> -    xpthread_mutex_lock(&flag->mutex);
> +    ovs_mutex_lock(&flag->mutex);
>      flag->b = false;
> -    xpthread_mutex_unlock(&flag->mutex);
> +    ovs_mutex_unlock(&flag->mutex);
>  }
>
>  void
> diff --git a/lib/ovs-atomic-pthreads.h b/lib/ovs-atomic-pthreads.h
> index 2f47a9c..61a9771 100644
> --- a/lib/ovs-atomic-pthreads.h
> +++ b/lib/ovs-atomic-pthreads.h
> @@ -144,7 +144,7 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
>
>  typedef struct {
>      bool b;
> -    pthread_mutex_t mutex;
> +    struct ovs_mutex mutex;
>  } atomic_flag;
>  #define ATOMIC_FLAG_INIT { false, PTHREAD_MUTEX_INITIALIZER }
>
> diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c
> index 91bb3d9..e0c4a03 100644
> --- a/lib/ovs-thread.c
> +++ b/lib/ovs-thread.c
> @@ -33,6 +33,13 @@
>  #else
>  #include "vlog.h"
>
> +#undef ovs_mutex_lock
> +#undef ovs_mutex_trylock
> +#undef ovs_rwlock_rdlock
> +#undef ovs_rwlock_tryrdlock
> +#undef ovs_rwlock_wrlock
> +#undef ovs_rwlock_trywrlock
> +
>  VLOG_DEFINE_THIS_MODULE(ovs_thread);
>
>  /* If there is a reason that we cannot fork anymore (unless the fork will
> be
> @@ -43,6 +50,74 @@ static const char *must_not_fork;
>  /* True if we created any threads beyond the main initial thread. */
>  static bool multithreaded;
>
> +#define INIT_FUNCTION(LOCK_TYPE) \
> +    void \
> +    ovs_##LOCK_TYPE##_init(const struct ovs_##LOCK_TYPE *l_, \
> +                           pthread_##LOCK_TYPE##attr_t *attr) \
> +    { \
> +        struct ovs_##LOCK_TYPE *l = (struct ovs_##LOCK_TYPE *) l_; \
> +        int error; \
> +        l->where = NULL; \
> +        error = pthread_##LOCK_TYPE##_init(&l->lock, attr); \
> +        if (OVS_UNLIKELY(error)) { \
> +            ovs_abort(error, "pthread_%s_init failed", #LOCK_TYPE); \
> +        } \
> +    }
> +INIT_FUNCTION(mutex)
> +INIT_FUNCTION(rwlock)
> +
> +#define LOCK_FUNCTION(LOCK_TYPE, LOCK_FUN) \
> +    void \
> +    ovs_##LOCK_TYPE##_##LOCK_FUN(const struct ovs_##LOCK_TYPE *l_, \
> +                                 const char *where) \
> +    { \
> +        struct ovs_##LOCK_TYPE *l = (struct ovs_##LOCK_TYPE *) l_; \
> +        int error = pthread_##LOCK_TYPE##_##LOCK_FUN(&l->lock); \
> +        if (OVS_UNLIKELY(error)) { \
> +            ovs_abort(error, "pthread_%s_%s failed", #LOCK_TYPE,
> #LOCK_FUN); \
> +        } \
> +        l->where = where; \
> +    }
> +LOCK_FUNCTION(mutex, lock);
> +LOCK_FUNCTION(rwlock, rdlock);
> +LOCK_FUNCTION(rwlock, wrlock);
> +
> +#define TRY_LOCK_FUNCTION(LOCK_TYPE, LOCK_FUN) \
> +    int \
> +    ovs_##LOCK_TYPE##_##LOCK_FUN(const struct ovs_##LOCK_TYPE *l_, \
> +                                 const char *where) \
> +    { \
> +        struct ovs_##LOCK_TYPE *l = (struct ovs_##LOCK_TYPE *) l_; \
> +        int error = pthread_##LOCK_TYPE##_##LOCK_FUN(&l->lock); \
> +        if (OVS_UNLIKELY(error) && error != EBUSY) { \
> +            ovs_abort(error, "pthread_%s_%s failed", #LOCK_TYPE,
> #LOCK_FUN); \
> +        } \
> +        if (!error) { \
> +            l->where = where; \
> +        } \
> +        return error; \
> +    }
> +TRY_LOCK_FUNCTION(mutex, trylock);
> +TRY_LOCK_FUNCTION(rwlock, tryrdlock);
> +TRY_LOCK_FUNCTION(rwlock, trywrlock);
> +
> +#define UNLOCK_FUNCTION(LOCK_TYPE, LOCK_FUN) \
> +    void \
> +    ovs_##LOCK_TYPE##_##LOCK_FUN(const struct ovs_##LOCK_TYPE *l_) \
> +    { \
> +        struct ovs_##LOCK_TYPE *l = (struct ovs_##LOCK_TYPE *) l_; \
> +        int error; \
> +        l->where = NULL; \
> +        error = pthread_##LOCK_TYPE##_##LOCK_FUN(&l->lock); \
> +        if (OVS_UNLIKELY(error)) { \
> +            ovs_abort(error, "pthread_%s_%sfailed", #LOCK_TYPE,
> #LOCK_FUN); \
> +        } \
> +    }
> +UNLOCK_FUNCTION(mutex, unlock)
> +UNLOCK_FUNCTION(mutex, destroy)
> +UNLOCK_FUNCTION(rwlock, unlock)
> +UNLOCK_FUNCTION(rwlock, destroy)
> +
>  #define XPTHREAD_FUNC1(FUNCTION, PARAM1)                \
>      void                                                \
>      x##FUNCTION(PARAM1 arg1)                            \
> @@ -52,16 +127,6 @@ static bool multithreaded;
>              ovs_abort(error, "%s failed", #FUNCTION);   \
>          }                                               \
>      }
> -#define XPTHREAD_TRY_FUNC1(FUNCTION, PARAM1)            \
> -    int                                                 \
> -    x##FUNCTION(PARAM1 arg1)                            \
> -    {                                                   \
> -        int error = FUNCTION(arg1);                     \
> -        if (OVS_UNLIKELY(error && error != EBUSY)) {    \
> -            ovs_abort(error, "%s failed", #FUNCTION);   \
> -        }                                               \
> -        return error;                                   \
> -    }
>  #define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2)        \
>      void                                                \
>      x##FUNCTION(PARAM1 arg1, PARAM2 arg2)               \
> @@ -72,36 +137,30 @@ static bool multithreaded;
>          }                                               \
>      }
>
> -XPTHREAD_FUNC2(pthread_mutex_init, pthread_mutex_t *, pthread_mutexattr_t
> *);
> -XPTHREAD_FUNC1(pthread_mutex_destroy, pthread_mutex_t *);
> -XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *);
> -XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *);
> -XPTHREAD_TRY_FUNC1(pthread_mutex_trylock, pthread_mutex_t *);
> -
>  XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *);
>  XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *);
>  XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int);
>  XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *);
>
> -XPTHREAD_FUNC2(pthread_rwlock_init,
> -               pthread_rwlock_t *, pthread_rwlockattr_t *);
> -XPTHREAD_FUNC1(pthread_rwlock_destroy, pthread_rwlock_t *);
> -XPTHREAD_FUNC1(pthread_rwlock_rdlock, pthread_rwlock_t *);
> -XPTHREAD_FUNC1(pthread_rwlock_wrlock, pthread_rwlock_t *);
> -XPTHREAD_FUNC1(pthread_rwlock_unlock, pthread_rwlock_t *);
> -XPTHREAD_TRY_FUNC1(pthread_rwlock_tryrdlock, pthread_rwlock_t *);
> -XPTHREAD_TRY_FUNC1(pthread_rwlock_trywrlock, pthread_rwlock_t *);
> -
>  XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *);
>  XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *);
>  XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *);
>  XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *);
> -XPTHREAD_FUNC2(pthread_cond_wait, pthread_cond_t *, pthread_mutex_t *);
>
>  typedef void destructor_func(void *);
>  XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *);
>
>  void
> +ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_)
> +{
> +    struct ovs_mutex *mutex = (struct ovs_mutex *) mutex_;
> +    int error = pthread_cond_wait(cond, &mutex->lock);
> +    if (OVS_UNLIKELY(error)) {
> +        ovs_abort(error, "pthread_cond_wait failed");
> +    }
> +}
> +
> +void
>  xpthread_create(pthread_t *threadp, pthread_attr_t *attr,
>                  void *(*start)(void *), void *arg)
>  {
> @@ -120,19 +179,19 @@ xpthread_create(pthread_t *threadp, pthread_attr_t
> *attr,
>  bool
>  ovsthread_once_start__(struct ovsthread_once *once)
>  {
> -    xpthread_mutex_lock(&once->mutex);
> +    ovs_mutex_lock(&once->mutex, SOURCE_LOCATOR);
>      if (!ovsthread_once_is_done__(once)) {
>          return false;
>      }
> -    xpthread_mutex_unlock(&once->mutex);
> +    ovs_mutex_unlock(&once->mutex);
>      return true;
>  }
>
> -void OVS_RELEASES(once)
> +void
>  ovsthread_once_done(struct ovsthread_once *once)
>  {
>      atomic_store(&once->done, true);
> -    xpthread_mutex_unlock(&once->mutex);
> +    ovs_mutex_unlock(&once->mutex);
>  }
>
>  /* Asserts that the process has not yet created any threads (beyond the
> initial
> diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h
> index 29a8637..c8aa88a 100644
> --- a/lib/ovs-thread.h
> +++ b/lib/ovs-thread.h
> @@ -23,6 +23,21 @@
>  #include "ovs-atomic.h"
>  #include "util.h"
>
> +struct OVS_LOCKABLE ovs_mutex {
> +    pthread_mutex_t lock;
> +    const char *where;
> +};
> +
> +struct OVS_LOCKABLE ovs_rwlock {
> +    pthread_rwlock_t lock;
> +    const char *where;
> +};
> +
> +#define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, NULL }
> +#define OVS_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, NULL }
> +#define OVS_RECURSIVE_MUTEX_INITIALIZER \
> +    { PTHREAD_RECURSIVE_MUTEX_INITIALIZER, NULL }
> +
>  /* glibc has some non-portable mutex types and initializers:
>   *
>   *    - PTHREAD_MUTEX_ADAPTIVE_NP is a mutex type that works as a
> spinlock that
> @@ -37,19 +52,20 @@
>   * (glibc has other non-portable initializers, but we can't reasonably
>   * substitute for them here.) */
>  #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
> -#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
> -#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER \
> -    PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
> +#define OVS_MUTEX_ADAPTIVE { PTHREAD_MUTEX_ADAPTIVE_NP, NULL }
> +#define OVS_ADAPTIVE_MUTEX_INITIALIZER \
> +    { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, NULL }
>  #else
> -#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_NORMAL
> -#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
> +#define OVS_MUTEX_ADAPTIVE { PTHREAD_MUTEX_NORMAL }
> +#define OVS_ADAPTIVE_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, NULL }
>  #endif
>
>  #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
> -#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \
> -    PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
> +#define OVS_ERRORCHECK_MUTEX_INITIALIZER \
> +    { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, NULL }
>  #else
> -#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
> +#define OVS_ERRORCHECK_MUTEX_INITIALIZER \
> +    OVS_MUTEX_INITIALIZER
>  #endif
>
>  /* Simple wrappers for pthreads functions.  Most of these functions abort
> the
> @@ -57,31 +73,57 @@
>   * exceptions: they pass through a 0 or EBUSY return value to the caller
> and
>   * abort on any other error. */
>
> -void xpthread_mutex_init(pthread_mutex_t *, pthread_mutexattr_t *);
> -void xpthread_mutex_destroy(pthread_mutex_t *);
> -void xpthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
> -void xpthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
> -int xpthread_mutex_trylock(pthread_mutex_t *);
> +void ovs_mutex_init(const struct ovs_mutex *, pthread_mutexattr_t *);
> +void ovs_mutex_destroy(const struct ovs_mutex *);
> +void ovs_mutex_unlock(const struct ovs_mutex *mutex) OVS_RELEASES(mutex);
> +
> +void ovs_mutex_lock(const struct ovs_mutex *mutex, const char *where)
> +    OVS_ACQ_WRLOCK(mutex);
> +#define ovs_mutex_lock(mutex) \
> +        ovs_mutex_lock(mutex, SOURCE_LOCATOR)
> +
> +int ovs_mutex_trylock(const struct ovs_mutex *mutex, const char *where)
> +    OVS_TRY_WRLOCK(0, mutex);
> +#define ovs_mutex_trylock(mutex) \
> +        ovs_mutex_trylock(mutex, SOURCE_LOCATOR)
> +
> +void ovs_mutex_cond_wait(pthread_cond_t *, const struct ovs_mutex *);
> +
> +void ovs_rwlock_init(const struct ovs_rwlock *, pthread_rwlockattr_t *);
> +void ovs_rwlock_destroy(const struct ovs_rwlock *);
> +void ovs_rwlock_unlock(const struct ovs_rwlock *rwlock)
> OVS_RELEASES(rwlock);
> +
> +void ovs_rwlock_wrlock(const struct ovs_rwlock *rwlock, const char *where)
> +    OVS_ACQ_WRLOCK(rwlock);
> +#define ovs_rwlock_wrlock(rwlock) \
> +        ovs_rwlock_wrlock(rwlock, SOURCE_LOCATOR);
> +
> +int ovs_rwlock_trywrlock(const struct ovs_rwlock *rwlock, const char
> *where)
> +    OVS_TRY_WRLOCK(0, rwlock);
> +#define ovs_rwlock_trywrlock(rwlock) \
> +        ovs_rwlock_trywrlock(rwlock, SOURCE_LOCATOR)
> +
> +void ovs_rwlock_rdlock(const struct ovs_rwlock *rwlock, const char *where)
> +    OVS_ACQ_RDLOCK(rwlock);
> +#define ovs_rwlock_rdlock(rwlock) \
> +        ovs_rwlock_rdlock(rwlock, SOURCE_LOCATOR);
> +
> +int ovs_rwlock_tryrdlock(const struct ovs_rwlock *rwlock, const char
> *where)
> +    OVS_TRY_RDLOCK(0, rwlock);
> +#define ovs_rwlock_tryrdlock(rwlock) \
> +        ovs_rwlock_tryrdlock(rwlock, SOURCE_LOCATOR)
>
>  void xpthread_mutexattr_init(pthread_mutexattr_t *);
>  void xpthread_mutexattr_destroy(pthread_mutexattr_t *);
>  void xpthread_mutexattr_settype(pthread_mutexattr_t *, int type);
>  void xpthread_mutexattr_gettype(pthread_mutexattr_t *, int *typep);
>
> -void xpthread_rwlock_init(pthread_rwlock_t *, pthread_rwlockattr_t *);
> -void xpthread_rwlock_destroy(pthread_rwlock_t *);
> -void xpthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
> OVS_ACQUIRES(rwlock);
> -void xpthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
> OVS_ACQUIRES(rwlock);
> -void xpthread_rwlock_unlock(pthread_rwlock_t *rwlock)
> OVS_RELEASES(rwlock);
> -int xpthread_rwlock_tryrdlock(pthread_rwlock_t *);
> -int xpthread_rwlock_trywrlock(pthread_rwlock_t *);
> -
>  void xpthread_cond_init(pthread_cond_t *, pthread_condattr_t *);
>  void xpthread_cond_destroy(pthread_cond_t *);
>  void xpthread_cond_signal(pthread_cond_t *);
>  void xpthread_cond_broadcast(pthread_cond_t *);
> -void xpthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
> -    OVS_MUST_HOLD(mutex);
> +void xpthread_cond_wait(pthread_cond_t *, const struct ovs_mutex *mutex)
> +    OVS_REQ_WRLOCK(mutex);
>
>  #ifdef __CHECKER__
>  /* Replace these functions by the macros already defined in the
> <pthread.h>
> @@ -335,19 +377,22 @@ void xpthread_create(pthread_t *, pthread_attr_t *,
> void *(*)(void *), void *);
>
>  struct ovsthread_once {
>      atomic_bool done;
> -    pthread_mutex_t mutex;
> +    struct ovs_mutex mutex;
>  };
>
>  #define OVSTHREAD_ONCE_INITIALIZER              \
>      {                                           \
>          ATOMIC_VAR_INIT(false),                 \
> -        PTHREAD_ADAPTIVE_MUTEX_INITIALIZER,     \
> +        OVS_ADAPTIVE_MUTEX_INITIALIZER,         \
>      }
>
> -static inline bool ovsthread_once_start(struct ovsthread_once *);
> -void ovsthread_once_done(struct ovsthread_once *once) OVS_RELEASES(once);
> +static inline bool ovsthread_once_start(struct ovsthread_once *once)
> +    OVS_TRY_WRLOCK(true, once->mutex);
> +void ovsthread_once_done(struct ovsthread_once *once)
> +    OVS_RELEASES(once->mutex);
>
> -bool ovsthread_once_start__(struct ovsthread_once *);
> +bool ovsthread_once_start__(struct ovsthread_once *once)
> +    OVS_TRY_WRLOCK(false, once->mutex);
>
>  static inline bool
>  ovsthread_once_is_done__(const struct ovsthread_once *once)
> @@ -374,7 +419,7 @@ ovsthread_once_start(struct ovsthread_once *once)
>
>  #ifdef __CHECKER__
>  #define ovsthread_once_start(ONCE) \
> -    ((ONCE)->done ? false : ({ OVS_ACQUIRE(ONCE); true; }))
> +    ((ONCE)->done ? false : ({ OVS_MACRO_LOCK((ONCE->mutex)); true; }))
>  #endif
>
>  void assert_single_threaded(const char *where);
> diff --git a/lib/timeval.c b/lib/timeval.c
> index 4ed3ebf..f7b9ffa 100644
> --- a/lib/timeval.c
> +++ b/lib/timeval.c
> @@ -41,7 +41,7 @@ VLOG_DEFINE_THIS_MODULE(timeval);
>
>  struct clock {
>      clockid_t id;               /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
> -    pthread_rwlock_t rwlock;    /* Mutual exclusion for 'cache'. */
> +    struct ovs_rwlock rwlock;   /* Mutual exclusion for 'cache'. */
>
>      /* Features for use by unit tests.  Protected by 'rwlock'. */
>      struct timespec warp;       /* Offset added for unit tests. */
> @@ -83,7 +83,7 @@ init_clock(struct clock *c, clockid_t id)
>  {
>      memset(c, 0, sizeof *c);
>      c->id = id;
> -    xpthread_rwlock_init(&c->rwlock, NULL);
> +    ovs_rwlock_init(&c->rwlock, NULL);
>      xclock_gettime(c->id, &c->cache);
>  }
>
> @@ -178,21 +178,21 @@ time_timespec__(struct clock *c, struct timespec *ts)
>      for (;;) {
>          /* Use the cached time by preference, but fall through if there's
> been
>           * a clock tick.  */
> -        xpthread_rwlock_rdlock(&c->rwlock);
> +        ovs_rwlock_rdlock(&c->rwlock);
>          if (c->stopped || !c->tick) {
>              timespec_add(ts, &c->cache, &c->warp);
> -            xpthread_rwlock_unlock(&c->rwlock);
> +            ovs_rwlock_unlock(&c->rwlock);
>              return;
>          }
> -        xpthread_rwlock_unlock(&c->rwlock);
> +        ovs_rwlock_unlock(&c->rwlock);
>
>          /* Refresh the cache. */
> -        xpthread_rwlock_wrlock(&c->rwlock);
> +        ovs_rwlock_wrlock(&c->rwlock);
>          if (c->tick) {
>              c->tick = false;
>              xclock_gettime(c->id, &c->cache);
>          }
> -        xpthread_rwlock_unlock(&c->rwlock);
> +        ovs_rwlock_unlock(&c->rwlock);
>      }
>  }
>
> @@ -568,9 +568,9 @@ timeval_stop_cb(struct unixctl_conn *conn,
>                   int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
>                   void *aux OVS_UNUSED)
>  {
> -    xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
> +    ovs_rwlock_wrlock(&monotonic_clock.rwlock);
>      monotonic_clock.stopped = true;
> -    xpthread_rwlock_unlock(&monotonic_clock.rwlock);
> +    ovs_rwlock_unlock(&monotonic_clock.rwlock);
>
>      unixctl_command_reply(conn, NULL);
>  }
> @@ -596,9 +596,9 @@ timeval_warp_cb(struct unixctl_conn *conn,
>      ts.tv_sec = msecs / 1000;
>      ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
>
> -    xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
> +    ovs_rwlock_wrlock(&monotonic_clock.rwlock);
>      timespec_add(&monotonic_clock.warp, &monotonic_clock.warp, &ts);
> -    xpthread_rwlock_unlock(&monotonic_clock.rwlock);
> +    ovs_rwlock_unlock(&monotonic_clock.rwlock);
>
>      unixctl_command_reply(conn, "warped");
>  }
> diff --git a/lib/uuid.c b/lib/uuid.c
> index 7b03fe0..18748ea 100644
> --- a/lib/uuid.c
> +++ b/lib/uuid.c
> @@ -81,19 +81,19 @@ uuid_init(void)
>  void
>  uuid_generate(struct uuid *uuid)
>  {
> -    static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> +    static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
>      uint64_t copy[2];
>
>      uuid_init();
>
>      /* Copy out the counter's current value, then increment it. */
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>      copy[0] = counter[0];
>      copy[1] = counter[1];
>      if (++counter[1] == 0) {
>          counter[0]++;
>      }
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>
>      /* AES output is exactly 16 bytes, so we encrypt directly into
> 'uuid'. */
>      aes128_encrypt(&key, copy, uuid);
> diff --git a/lib/vlog.c b/lib/vlog.c
> index c2fc8e0..c65efbc 100644
> --- a/lib/vlog.c
> +++ b/lib/vlog.c
> @@ -82,10 +82,15 @@ struct vlog_module *vlog_modules[] = {
>  #define n_vlog_modules ARRAY_SIZE(vlog_modules)
>  #endif
>
> +/* Protects the 'pattern' in all "struct facility"s, so that a race
> between
> + * changing and reading the pattern does not cause an access to freed
> + * memory. */
> +static struct ovs_rwlock pattern_rwlock = OVS_RWLOCK_INITIALIZER;
> +
>  /* Information about each facility. */
>  struct facility {
>      const char *name;           /* Name. */
> -    char *pattern;              /* Current pattern (see
> 'pattern_rwlock'). */
> +    char *pattern OVS_GUARDED_BY(pattern_rwlock); /* Current pattern. */
>      bool default_pattern;       /* Whether current pattern is the
> default. */
>  };
>  static struct facility facilities[VLF_N_FACILITIES] = {
> @@ -94,27 +99,22 @@ static struct facility facilities[VLF_N_FACILITIES] = {
>  #undef VLOG_FACILITY
>  };
>
> -/* Protects the 'pattern' in all "struct facility"s, so that a race
> between
> - * changing and reading the pattern does not cause an access to freed
> - * memory. */
> -static pthread_rwlock_t pattern_rwlock = PTHREAD_RWLOCK_INITIALIZER;
> -
>  /* Sequence number for the message currently being composed. */
>  DEFINE_PER_THREAD_DATA(unsigned int, msg_num, 0);
>
>  /* VLF_FILE configuration.
>   *
>   * All of the following is protected by 'log_file_mutex', which nests
> inside
> - * pattern_rwlock. */
> -static pthread_mutex_t log_file_mutex =
> PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> -static char *log_file_name;
> -static int log_fd = -1;
> -static struct async_append *log_writer;
> + * pattern_rwlock. */ /* TODO try out acquired_before and after here. */
> +static struct ovs_mutex log_file_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
> +static char *log_file_name OVS_GUARDED_BY(log_file_mutex);
> +static int log_fd OVS_GUARDED_BY(log_file_mutex) = -1;
> +static struct async_append *log_writer OVS_GUARDED_BY(log_file_mutex);
>
>  static void format_log_message(const struct vlog_module *, enum
> vlog_level,
>                                 enum vlog_facility,
>                                 const char *message, va_list, struct ds *)
> -    PRINTF_FORMAT(4, 0);
> +    PRINTF_FORMAT(4, 0) OVS_REQ_RDLOCK(pattern_rwlock);
>
>  /* Searches the 'n_names' in 'names'.  Returns the index of a match for
>   * 'target', or 'n_names' if no name matches. */
> @@ -201,8 +201,8 @@ vlog_get_level(const struct vlog_module *module, enum
> vlog_facility facility)
>      return module->levels[facility];
>  }
>
> -static void OVS_MUST_HOLD(log_file_mutex)
> -update_min_level(struct vlog_module *module)
> +static void
> +update_min_level(struct vlog_module *module)
> OVS_REQ_WRLOCK(log_file_mutex)
>  {
>      enum vlog_facility facility;
>
> @@ -224,7 +224,7 @@ set_facility_level(enum vlog_facility facility, struct
> vlog_module *module,
>      assert(facility >= 0 && facility < VLF_N_FACILITIES);
>      assert(level < VLL_N_LEVELS);
>
> -    xpthread_mutex_lock(&log_file_mutex);
> +    ovs_mutex_lock(&log_file_mutex);
>      if (!module) {
>          struct vlog_module **mp;
>
> @@ -236,7 +236,7 @@ set_facility_level(enum vlog_facility facility, struct
> vlog_module *module,
>          module->levels[facility] = level;
>          update_min_level(module);
>      }
> -    xpthread_mutex_unlock(&log_file_mutex);
> +    ovs_mutex_unlock(&log_file_mutex);
>  }
>
>  /* Sets the logging level for the given 'module' and 'facility' to
> 'level'.  A
> @@ -261,14 +261,14 @@ do_set_pattern(enum vlog_facility facility, const
> char *pattern)
>  {
>      struct facility *f = &facilities[facility];
>
> -    xpthread_rwlock_wrlock(&pattern_rwlock);
> +    ovs_rwlock_wrlock(&pattern_rwlock);
>      if (!f->default_pattern) {
>          free(f->pattern);
>      } else {
>          f->default_pattern = false;
>      }
>      f->pattern = xstrdup(pattern);
> -    xpthread_rwlock_unlock(&pattern_rwlock);
> +    ovs_rwlock_unlock(&pattern_rwlock);
>  }
>
>  /* Sets the pattern for the given 'facility' to 'pattern'. */
> @@ -297,6 +297,7 @@ vlog_set_log_file(const char *file_name)
>      struct stat new_stat;
>      int new_log_fd;
>      bool same_file;
> +    bool log_close;
>
>      /* Open new log file. */
>      new_log_file_name = (file_name
> @@ -311,14 +312,14 @@ vlog_set_log_file(const char *file_name)
>      }
>
>      /* If the new log file is the same one we already have open, bail
> out. */
> -    xpthread_mutex_lock(&log_file_mutex);
> +    ovs_mutex_lock(&log_file_mutex);
>      same_file = (log_fd >= 0
>                   && new_log_fd >= 0
>                   && !fstat(log_fd, &old_stat)
>                   && !fstat(new_log_fd, &new_stat)
>                   && old_stat.st_dev == new_stat.st_dev
>                   && old_stat.st_ino == new_stat.st_ino);
> -    xpthread_mutex_unlock(&log_file_mutex);
> +    ovs_mutex_unlock(&log_file_mutex);
>      if (same_file) {
>          close(new_log_fd);
>          free(new_log_file_name);
> @@ -326,12 +327,15 @@ vlog_set_log_file(const char *file_name)
>      }
>
>      /* Log closing old log file (we can't log while holding
> log_file_mutex). */
> -    if (log_fd >= 0) {
> +    ovs_mutex_lock(&log_file_mutex);
> +    log_close = log_fd >= 0;
> +    ovs_mutex_unlock(&log_file_mutex);
> +    if (log_close) {
>          VLOG_INFO("closing log file");
>      }
>
>      /* Close old log file, if any, and install new one. */
> -    xpthread_mutex_lock(&log_file_mutex);
> +    ovs_mutex_lock(&log_file_mutex);
>      if (log_fd >= 0) {
>          free(log_file_name);
>          close(log_fd);
> @@ -345,7 +349,7 @@ vlog_set_log_file(const char *file_name)
>      for (mp = vlog_modules; mp < &vlog_modules[n_vlog_modules]; mp++) {
>          update_min_level(*mp);
>      }
> -    xpthread_mutex_unlock(&log_file_mutex);
> +    ovs_mutex_unlock(&log_file_mutex);
>
>      /* Log opening new log file (we can't log while holding
> log_file_mutex). */
>      VLOG_INFO("opened log file %s", new_log_file_name);
> @@ -362,9 +366,9 @@ vlog_reopen_log_file(void)
>  {
>      char *fn;
>
> -    xpthread_mutex_lock(&log_file_mutex);
> +    ovs_mutex_lock(&log_file_mutex);
>      fn = log_file_name ? xstrdup(log_file_name) : NULL;
> -    xpthread_mutex_unlock(&log_file_mutex);
> +    ovs_mutex_unlock(&log_file_mutex);
>
>      if (fn) {
>          int error = vlog_set_log_file(fn);
> @@ -504,7 +508,13 @@ static void
>  vlog_unixctl_reopen(struct unixctl_conn *conn, int argc OVS_UNUSED,
>                      const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
>  {
> -    if (log_file_name) {
> +    bool has_log_file;
> +
> +    ovs_mutex_lock(&log_file_mutex);
> +    has_log_file = log_file_name != NULL;
> +    ovs_mutex_unlock(&log_file_mutex);
> +
> +    if (has_log_file) {
>          int error = vlog_reopen_log_file();
>          if (error) {
>              unixctl_command_reply_error(conn, ovs_strerror(errno));
> @@ -786,7 +796,11 @@ vlog_valist(const struct vlog_module *module, enum
> vlog_level level,
>  {
>      bool log_to_console = module->levels[VLF_CONSOLE] >= level;
>      bool log_to_syslog = module->levels[VLF_SYSLOG] >= level;
> -    bool log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
> +    bool log_to_file;
> +
> +    ovs_mutex_lock(&log_file_mutex);
> +    log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
> +    ovs_mutex_unlock(&log_file_mutex);
>      if (log_to_console || log_to_syslog || log_to_file) {
>          int save_errno = errno;
>          struct ds s;
> @@ -797,7 +811,7 @@ vlog_valist(const struct vlog_module *module, enum
> vlog_level level,
>          ds_reserve(&s, 1024);
>          ++*msg_num_get();
>
> -        xpthread_rwlock_rdlock(&pattern_rwlock);
> +        ovs_rwlock_rdlock(&pattern_rwlock);
>          if (log_to_console) {
>              format_log_message(module, level, VLF_CONSOLE, message, args,
> &s);
>              ds_put_char(&s, '\n');
> @@ -820,16 +834,16 @@ vlog_valist(const struct vlog_module *module, enum
> vlog_level level,
>              format_log_message(module, level, VLF_FILE, message, args,
> &s);
>              ds_put_char(&s, '\n');
>
> -            xpthread_mutex_lock(&log_file_mutex);
> +            ovs_mutex_lock(&log_file_mutex);
>              if (log_fd >= 0) {
>                  async_append_write(log_writer, s.string, s.length);
>                  if (level == VLL_EMER) {
>                      async_append_flush(log_writer);
>                  }
>              }
> -            xpthread_mutex_unlock(&log_file_mutex);
> +            ovs_mutex_unlock(&log_file_mutex);
>          }
> -        xpthread_rwlock_unlock(&pattern_rwlock);
> +        ovs_rwlock_unlock(&pattern_rwlock);
>
>          ds_destroy(&s);
>          errno = save_errno;
> @@ -929,7 +943,7 @@ vlog_should_drop(const struct vlog_module *module,
> enum vlog_level level,
>          return true;
>      }
>
> -    xpthread_mutex_lock(&rl->mutex);
> +    ovs_mutex_lock(&rl->mutex);
>      if (!token_bucket_withdraw(&rl->token_bucket, VLOG_MSG_TOKENS)) {
>          time_t now = time_now();
>          if (!rl->n_dropped) {
> @@ -937,19 +951,19 @@ vlog_should_drop(const struct vlog_module *module,
> enum vlog_level level,
>          }
>          rl->last_dropped = now;
>          rl->n_dropped++;
> -        xpthread_mutex_unlock(&rl->mutex);
> +        ovs_mutex_unlock(&rl->mutex);
>          return true;
>      }
>
>      if (!rl->n_dropped) {
> -        xpthread_mutex_unlock(&rl->mutex);
> +        ovs_mutex_unlock(&rl->mutex);
>      } else {
>          time_t now = time_now();
>          unsigned int n_dropped = rl->n_dropped;
>          unsigned int first_dropped_elapsed = now - rl->first_dropped;
>          unsigned int last_dropped_elapsed = now - rl->last_dropped;
>          rl->n_dropped = 0;
> -        xpthread_mutex_unlock(&rl->mutex);
> +        ovs_mutex_unlock(&rl->mutex);
>
>          vlog(module, level,
>               "Dropped %u log messages in last %u seconds (most recently, "
> diff --git a/lib/vlog.h b/lib/vlog.h
> index c111ff6..901b3d3 100644
> --- a/lib/vlog.h
> +++ b/lib/vlog.h
> @@ -104,7 +104,7 @@ struct vlog_rate_limit {
>      time_t first_dropped;       /* Time first message was dropped. */
>      time_t last_dropped;        /* Time of most recent message drop. */
>      unsigned int n_dropped;     /* Number of messages dropped. */
> -    pthread_mutex_t mutex;      /* Mutual exclusion for rate limit. */
> +    struct ovs_mutex mutex;     /* Mutual exclusion for rate limit. */
>  };
>
>  /* Number of tokens to emit a message.  We add 'rate' tokens per
> millisecond,
> @@ -119,7 +119,7 @@ struct vlog_rate_limit {
>              0,                              /* first_dropped */         \
>              0,                              /* last_dropped */          \
>              0,                              /* n_dropped */             \
> -            PTHREAD_ADAPTIVE_MUTEX_INITIALIZER /* mutex */              \
> +            OVS_ADAPTIVE_MUTEX_INITIALIZER  /* mutex */                 \
>          }
>
>  /* Configuring how each module logs messages. */
> diff --git a/utilities/ovs-dev.py b/utilities/ovs-dev.py
> index 8c8f5bd..5485e96 100755
> --- a/utilities/ovs-dev.py
> +++ b/utilities/ovs-dev.py
> @@ -52,6 +52,16 @@ def uname():
>
>  def conf():
>      tag()
> +    if options.clang:
> +        ENV["CC"] = "clang"
> +        ENV["CFLAGS"] = ENV["CFLAGS"] \
> +                + " -Wno-cast-align -Wno-sign-compare" \
> +                + " -Wno-initializer-overrides" \
> +                + " -Wthread-safety"
> +
> +        # TODO only needed for sflow.  come up with something better?
> +        ENV["CFLAGS"] = ENV["CFLAGS"] + " -Wno-unused
> -Wno-unused-parameter"
> +
>      configure = ["./configure", "--prefix=" + ROOT, "--localstatedir=" +
> ROOT,
>                   "--with-logdir=%s/log" % ROOT, "--with-rundir=%s/run" %
> ROOT,
>                   "--with-linux=/lib/modules/%s/build" % uname(),
> @@ -75,7 +85,6 @@ def make(args=""):
>      make = "make -s -j 8 " + args
>      try:
>          _sh("cgcc", "--version", capture=True)
> -        make += " C=1"
>      except OSError:
>          pass
>      _sh(make)
> @@ -275,6 +284,8 @@ def main():
>                       help="run ovs-vswitchd under gdb")
>      group.add_option("--valgrind", dest="valgrind", action="store_true",
>                       help="run ovs-vswitchd under valgrind")
> +    group.add_option("--clang", dest="clang", action="store_true",
> +                     help="build ovs-vswitchd with clang")
>      parser.add_option_group(group)
>
>      options, args = parser.parse_args()
> diff --git a/vswitchd/system-stats.c b/vswitchd/system-stats.c
> index b164228..e178a74 100644
> --- a/vswitchd/system-stats.c
> +++ b/vswitchd/system-stats.c
> @@ -506,12 +506,12 @@ get_filesys_stats(struct smap *stats OVS_UNUSED)
>
>  #define SYSTEM_STATS_INTERVAL (5 * 1000) /* In milliseconds. */
>
> -static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
> +static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
>  static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
> -static struct latch latch;
> +static struct latch latch OVS_GUARDED_BY(mutex);
>  static bool enabled;
> -static bool started;
> -static struct smap *system_stats;
> +static bool started OVS_GUARDED_BY(mutex);
> +static struct smap *system_stats OVS_GUARDED_BY(mutex);
>
>  static void *system_stats_thread_func(void *);
>  static void discard_stats(void);
> @@ -521,7 +521,7 @@ void
>  system_stats_enable(bool enable)
>  {
>      if (enabled != enable) {
> -        xpthread_mutex_lock(&mutex);
> +        ovs_mutex_lock(&mutex);
>          if (enable) {
>              if (!started) {
>                  xpthread_create(NULL, NULL, system_stats_thread_func,
> NULL);
> @@ -532,7 +532,7 @@ system_stats_enable(bool enable)
>              xpthread_cond_signal(&cond);
>          }
>          enabled = enable;
> -        xpthread_mutex_unlock(&mutex);
> +        ovs_mutex_unlock(&mutex);
>      }
>  }
>
> @@ -549,7 +549,7 @@ system_stats_run(void)
>  {
>      struct smap *stats = NULL;
>
> -    xpthread_mutex_lock(&mutex);
> +    ovs_mutex_lock(&mutex);
>      if (system_stats) {
>          latch_poll(&latch);
>
> @@ -560,7 +560,7 @@ system_stats_run(void)
>              discard_stats();
>          }
>      }
> -    xpthread_mutex_unlock(&mutex);
> +    ovs_mutex_unlock(&mutex);
>
>      return stats;
>  }
> @@ -576,7 +576,7 @@ system_stats_wait(void)
>  }
>
>  static void
> -discard_stats(void)
> +discard_stats(void) OVS_REQ_WRLOCK(mutex)
>  {
>      if (system_stats) {
>          smap_destroy(system_stats);
> @@ -594,11 +594,11 @@ system_stats_thread_func(void *arg OVS_UNUSED)
>          long long int next_refresh;
>          struct smap *stats;
>
> -        xpthread_mutex_lock(&mutex);
> +        ovs_mutex_lock(&mutex);
>          while (!enabled) {
> -            xpthread_cond_wait(&cond, &mutex);
> +            ovs_mutex_cond_wait(&cond, &mutex);
>          }
> -        xpthread_mutex_unlock(&mutex);
> +        ovs_mutex_unlock(&mutex);
>
>          stats = xmalloc(sizeof *stats);
>          smap_init(stats);
> @@ -608,11 +608,11 @@ system_stats_thread_func(void *arg OVS_UNUSED)
>          get_process_stats(stats);
>          get_filesys_stats(stats);
>
> -        xpthread_mutex_lock(&mutex);
> +        ovs_mutex_lock(&mutex);
>          discard_stats();
>          system_stats = stats;
>          latch_set(&latch);
> -        xpthread_mutex_unlock(&mutex);
> +        ovs_mutex_unlock(&mutex);
>
>          next_refresh = time_msec() + SYSTEM_STATS_INTERVAL;
>          do {
> --
> 1.7.9.5
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.openvswitch.org/pipermail/ovs-dev/attachments/20130726/52f265a6/attachment-0003.html>


More information about the dev mailing list