[ovs-dev] [urcu v2 06/15] ovs-atomic: Use raw types, not structs, when locks are required.
Andy Zhou
azhou at nicira.com
Thu Mar 13 08:40:02 UTC 2014
Nice interface and abstraction of different compilers.
Acked-by: Andy Zhou <azhou at nicira.com>
On Tue, Mar 11, 2014 at 1:56 PM, Ben Pfaff <blp at nicira.com> wrote:
> Until now, the GCC 4+ and pthreads implementations of atomics have used
> struct wrappers for their atomic types. This had the advantage of allowing
> a mutex to be wrapped in, in some cases, and of better type-checking by
> preventing stray uses of atomic variables other than through one of the
> atomic_*() functions or macros. However, the mutex meant that an
> atomic_destroy() function-like macro needed to be used. The struct wrapper
> also made it impossible to define new atomic types that were compatible
> with each other without using a typedef. For example, one could not simply
> define a macro like
> #define ATOMIC(TYPE) struct { TYPE value; }
> and then have two declarations like:
> ATOMIC(void *) x;
> ATOMIC(void *) y;
> and do anything with these objects that require type-compatibility, even
> "&x == &y", because the two structs are not compatible. One can do it
> through a typedef:
> typedef ATOMIC(void *) atomic_voidp;
> atomic_voidp x, y;
> but that is inconvenient, especially because of the need to invent a name
> for the type.
>
> This commit aims to ease the problem by getting rid of the wrapper structs
> in the cases where the atomic library used them. It gets rid of the
> mutexes, in the cases where they are still needed, by using a global
> array of mutexes instead.
>
> This commit also defines the ATOMIC macro described above and documents
> its use in ovs-atomic.h.
>
> Signed-off-by: Ben Pfaff <blp at nicira.com>
> ---
> lib/automake.mk | 3 +-
> lib/ovs-atomic-gcc4+.c | 68 ----------------
> lib/ovs-atomic-gcc4+.h | 198
> ++++++++++++---------------------------------
> lib/ovs-atomic-locked.c | 58 +++++++++++++
> lib/ovs-atomic-locked.h | 32 ++++++++
> lib/ovs-atomic-pthreads.h | 66 ++++++---------
> lib/ovs-atomic.h | 8 +-
> 7 files changed, 175 insertions(+), 258 deletions(-)
> delete mode 100644 lib/ovs-atomic-gcc4+.c
> create mode 100644 lib/ovs-atomic-locked.c
> create mode 100644 lib/ovs-atomic-locked.h
>
> diff --git a/lib/automake.mk b/lib/automake.mk
> index 3b2b75f..1310dcd 100644
> --- a/lib/automake.mk
> +++ b/lib/automake.mk
> @@ -134,9 +134,10 @@ lib_libopenvswitch_la_SOURCES = \
> lib/ovs-atomic-c11.h \
> lib/ovs-atomic-clang.h \
> lib/ovs-atomic-flag-gcc4.7+.h \
> - lib/ovs-atomic-gcc4+.c \
> lib/ovs-atomic-gcc4+.h \
> lib/ovs-atomic-gcc4.7+.h \
> + lib/ovs-atomic-locked.c \
> + lib/ovs-atomic-locked.h \
> lib/ovs-atomic-pthreads.c \
> lib/ovs-atomic-pthreads.h \
> lib/ovs-atomic-types.h \
> diff --git a/lib/ovs-atomic-gcc4+.c b/lib/ovs-atomic-gcc4+.c
> deleted file mode 100644
> index d6a68ae..0000000
> --- a/lib/ovs-atomic-gcc4+.c
> +++ /dev/null
> @@ -1,68 +0,0 @@
> -/*
> - * Copyright (c) 2013 Nicira, Inc.
> - *
> - * Licensed under the Apache License, Version 2.0 (the "License");
> - * you may not use this file except in compliance with the License.
> - * You may obtain a copy of the License at:
> - *
> - * http://www.apache.org/licenses/LICENSE-2.0
> - *
> - * Unless required by applicable law or agreed to in writing, software
> - * distributed under the License is distributed on an "AS IS" BASIS,
> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
> implied.
> - * See the License for the specific language governing permissions and
> - * limitations under the License.
> - */
> -
> -#include <config.h>
> -
> -#include "ovs-atomic.h"
> -#include "ovs-thread.h"
> -
> -#if OVS_ATOMIC_GCC4P_IMPL
> -static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
> -
> -#define DEFINE_LOCKED_OP(TYPE, NAME, OPERATOR) \
> - TYPE##_t \
> - locked_##TYPE##_##NAME(struct locked_##TYPE *u, TYPE##_t arg) \
> - { \
> - TYPE##_t old_value; \
> - \
> - ovs_mutex_lock(&mutex); \
> - old_value = u->value; \
> - u->value OPERATOR arg; \
> - ovs_mutex_unlock(&mutex); \
> - \
> - return old_value; \
> - }
> -
> -#define DEFINE_LOCKED_TYPE(TYPE) \
> - TYPE##_t \
> - locked_##TYPE##_load(const struct locked_##TYPE *u) \
> - { \
> - TYPE##_t value; \
> - \
> - ovs_mutex_lock(&mutex); \
> - value = u->value; \
> - ovs_mutex_unlock(&mutex); \
> - \
> - return value; \
> - } \
> - \
> - void \
> - locked_##TYPE##_store(struct locked_##TYPE *u, TYPE##_t value) \
> - { \
> - ovs_mutex_lock(&mutex); \
> - u->value = value; \
> - ovs_mutex_unlock(&mutex); \
> - } \
> - DEFINE_LOCKED_OP(TYPE, add, +=); \
> - DEFINE_LOCKED_OP(TYPE, sub, -=); \
> - DEFINE_LOCKED_OP(TYPE, or, |=); \
> - DEFINE_LOCKED_OP(TYPE, xor, ^=); \
> - DEFINE_LOCKED_OP(TYPE, and, &=)
> -
> -DEFINE_LOCKED_TYPE(uint64);
> -DEFINE_LOCKED_TYPE(int64);
> -
> -#endif /* OVS_ATOMIC_GCC4P_IMPL */
> diff --git a/lib/ovs-atomic-gcc4+.h b/lib/ovs-atomic-gcc4+.h
> index ddfd03c..923e624 100644
> --- a/lib/ovs-atomic-gcc4+.h
> +++ b/lib/ovs-atomic-gcc4+.h
> @@ -1,5 +1,5 @@
> /*
> - * Copyright (c) 2013 Nicira, Inc.
> + * Copyright (c) 2013, 2014 Nicira, Inc.
> *
> * Licensed under the Apache License, Version 2.0 (the "License");
> * you may not use this file except in compliance with the License.
> @@ -19,88 +19,19 @@
> #error "This header should only be included indirectly via ovs-atomic.h."
> #endif
>
> +#include "ovs-atomic-locked.h"
> #define OVS_ATOMIC_GCC4P_IMPL 1
>
> -#define DEFINE_LOCKLESS_ATOMIC(TYPE, NAME) typedef struct { TYPE value; }
> NAME
> +#define ATOMIC(TYPE) TYPE
> +#include "ovs-atomic-types.h"
>
> #define ATOMIC_BOOL_LOCK_FREE 2
> -DEFINE_LOCKLESS_ATOMIC(bool, atomic_bool);
> -
> #define ATOMIC_CHAR_LOCK_FREE 2
> -DEFINE_LOCKLESS_ATOMIC(char, atomic_char);
> -DEFINE_LOCKLESS_ATOMIC(signed char, atomic_schar);
> -DEFINE_LOCKLESS_ATOMIC(unsigned char, atomic_uchar);
> -
> #define ATOMIC_SHORT_LOCK_FREE 2
> -DEFINE_LOCKLESS_ATOMIC(short, atomic_short);
> -DEFINE_LOCKLESS_ATOMIC(unsigned short, atomic_ushort);
> -
> #define ATOMIC_INT_LOCK_FREE 2
> -DEFINE_LOCKLESS_ATOMIC(int, atomic_int);
> -DEFINE_LOCKLESS_ATOMIC(unsigned int, atomic_uint);
> -
> -#if ULONG_MAX <= UINTPTR_MAX
> - #define ATOMIC_LONG_LOCK_FREE 2
> - DEFINE_LOCKLESS_ATOMIC(long, atomic_long);
> - DEFINE_LOCKLESS_ATOMIC(unsigned long, atomic_ulong);
> -#elif ULONG_MAX == UINT64_MAX
> - #define ATOMIC_LONG_LOCK_FREE 0
> - typedef struct locked_int64 atomic_long;
> - typedef struct locked_uint64 atomic_ulong;
> -#else
> - #error "not implemented"
> -#endif
> -
> -#if ULLONG_MAX <= UINTPTR_MAX
> - #define ATOMIC_LLONG_LOCK_FREE 2
> - DEFINE_LOCKLESS_ATOMIC(long long, atomic_llong);
> - DEFINE_LOCKLESS_ATOMIC(unsigned long long, atomic_ullong);
> -#elif ULLONG_MAX == UINT64_MAX
> - #define ATOMIC_LLONG_LOCK_FREE 0
> - typedef struct locked_int64 atomic_llong;
> - typedef struct locked_uint64 atomic_ullong;
> -#else
> - #error "not implemented"
> -#endif
> -
> -#if SIZE_MAX <= UINTPTR_MAX
> - DEFINE_LOCKLESS_ATOMIC(size_t, atomic_size_t);
> - DEFINE_LOCKLESS_ATOMIC(ptrdiff_t, atomic_ptrdiff_t);
> -#elif SIZE_MAX == UINT64_MAX
> - typedef struct locked_uint64 atomic_size_t;
> - typedef struct locked_int64 atomic_ptrdiff_t;
> -#else
> - #error "not implemented"
> -#endif
> -
> -#if UINTMAX_MAX <= UINTPTR_MAX
> - DEFINE_LOCKLESS_ATOMIC(intmax_t, atomic_intmax_t);
> - DEFINE_LOCKLESS_ATOMIC(uintmax_t, atomic_uintmax_t);
> -#elif UINTMAX_MAX == UINT64_MAX
> - typedef struct locked_int64 atomic_intmax_t;
> - typedef struct locked_uint64 atomic_uintmax_t;
> -#else
> - #error "not implemented"
> -#endif
> -
> +#define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
> +#define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
> #define ATOMIC_POINTER_LOCK_FREE 2
> -DEFINE_LOCKLESS_ATOMIC(intptr_t, atomic_intptr_t);
> -DEFINE_LOCKLESS_ATOMIC(uintptr_t, atomic_uintptr_t);
> -
> -/* Nonstandard atomic types. */
> -DEFINE_LOCKLESS_ATOMIC(uint8_t, atomic_uint8_t);
> -DEFINE_LOCKLESS_ATOMIC(uint16_t, atomic_uint16_t);
> -DEFINE_LOCKLESS_ATOMIC(uint32_t, atomic_uint32_t);
> -DEFINE_LOCKLESS_ATOMIC(int8_t, atomic_int8_t);
> -DEFINE_LOCKLESS_ATOMIC(int16_t, atomic_int16_t);
> -DEFINE_LOCKLESS_ATOMIC(int32_t, atomic_int32_t);
> -#if UINT64_MAX <= UINTPTR_MAX
> - DEFINE_LOCKLESS_ATOMIC(uint64_t, atomic_uint64_t);
> - DEFINE_LOCKLESS_ATOMIC(int64_t, atomic_int64_t);
> -#else
> - typedef struct locked_uint64 atomic_uint64_t;
> - typedef struct locked_int64 atomic_int64_t;
> -#endif
>
> typedef enum {
> memory_order_relaxed,
> @@ -111,45 +42,10 @@ typedef enum {
> memory_order_seq_cst
> } memory_order;
>
> -/* locked_uint64. */
> -
> -#define IF_LOCKED_UINT64(OBJECT, THEN, ELSE) \
> - __builtin_choose_expr( \
> - __builtin_types_compatible_p(typeof(OBJECT), struct
> locked_uint64), \
> - (THEN), (ELSE))
> -#define AS_LOCKED_UINT64(OBJECT) ((struct locked_uint64 *) (void *)
> (OBJECT))
> -#define AS_UINT64(OBJECT) ((uint64_t *) (OBJECT))
> -struct locked_uint64 {
> - uint64_t value;
> -};
> -
> -uint64_t locked_uint64_load(const struct locked_uint64 *);
> -void locked_uint64_store(struct locked_uint64 *, uint64_t);
> -uint64_t locked_uint64_add(struct locked_uint64 *, uint64_t arg);
> -uint64_t locked_uint64_sub(struct locked_uint64 *, uint64_t arg);
> -uint64_t locked_uint64_or(struct locked_uint64 *, uint64_t arg);
> -uint64_t locked_uint64_xor(struct locked_uint64 *, uint64_t arg);
> -uint64_t locked_uint64_and(struct locked_uint64 *, uint64_t arg);
> -
> -#define IF_LOCKED_INT64(OBJECT, THEN, ELSE) \
> - __builtin_choose_expr( \
> - __builtin_types_compatible_p(typeof(OBJECT), struct
> locked_int64), \
> - (THEN), (ELSE))
> -#define AS_LOCKED_INT64(OBJECT) ((struct locked_int64 *) (void *)
> (OBJECT))
> -#define AS_INT64(OBJECT) ((int64_t *) (OBJECT))
> -struct locked_int64 {
> - int64_t value;
> -};
> -int64_t locked_int64_load(const struct locked_int64 *);
> -void locked_int64_store(struct locked_int64 *, int64_t);
> -int64_t locked_int64_add(struct locked_int64 *, int64_t arg);
> -int64_t locked_int64_sub(struct locked_int64 *, int64_t arg);
> -int64_t locked_int64_or(struct locked_int64 *, int64_t arg);
> -int64_t locked_int64_xor(struct locked_int64 *, int64_t arg);
> -int64_t locked_int64_and(struct locked_int64 *, int64_t arg);
> +#define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
>
> -#define ATOMIC_VAR_INIT(VALUE) { .value = (VALUE) }
> -#define atomic_init(OBJECT, VALUE) ((OBJECT)->value = (VALUE), (void) 0)
> +#define ATOMIC_VAR_INIT(VALUE) VALUE
> +#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
> #define atomic_destroy(OBJECT) ((void) (OBJECT))
>
> static inline void
> @@ -176,44 +72,56 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
> }
> }
>
> -#define ATOMIC_SWITCH(OBJECT, LOCKLESS_CASE, \
> - LOCKED_UINT64_CASE, LOCKED_INT64_CASE) \
> - IF_LOCKED_UINT64(OBJECT, LOCKED_UINT64_CASE, \
> - IF_LOCKED_INT64(OBJECT, LOCKED_INT64_CASE, \
> - LOCKLESS_CASE))
> -
> #define atomic_is_lock_free(OBJ) \
> - ((void) (OBJ)->value, \
> - ATOMIC_SWITCH(OBJ, true, false, false))
> + ((void) *(OBJ), \
> + IF_LOCKLESS_ATOMIC(OBJ, true, false))
>
> #define atomic_store(DST, SRC) \
> atomic_store_explicit(DST, SRC, memory_order_seq_cst)
> -#define atomic_store_explicit(DST, SRC, ORDER) \
> - (ATOMIC_SWITCH(DST, \
> - (atomic_thread_fence(ORDER), \
> - (DST)->value = (SRC), \
> - atomic_thread_fence_if_seq_cst(ORDER)), \
> - locked_uint64_store(AS_LOCKED_UINT64(DST), SRC), \
> - locked_int64_store(AS_LOCKED_INT64(DST), SRC)), \
> - (void) 0)
> -
> +#define atomic_store_explicit(DST, SRC, ORDER) \
> + ({ \
> + typeof(DST) dst__ = (DST); \
> + typeof(SRC) src__ = (SRC); \
> + memory_order order__ = (ORDER); \
> + \
> + if (IS_LOCKLESS_ATOMIC(*dst__)) { \
> + atomic_thread_fence(order__); \
> + *dst__ = src__; \
> + atomic_thread_fence_if_seq_cst(order__); \
> + } else { \
> + atomic_store_locked(DST, SRC); \
> + } \
> + (void) 0; \
> + })
> #define atomic_read(SRC, DST) \
> atomic_read_explicit(SRC, DST, memory_order_seq_cst)
> -#define atomic_read_explicit(SRC, DST, ORDER) \
> - (ATOMIC_SWITCH(SRC, \
> - (atomic_thread_fence_if_seq_cst(ORDER), \
> - *(DST) = (SRC)->value, \
> - atomic_thread_fence(ORDER)), \
> - *(DST) = locked_uint64_load(AS_LOCKED_UINT64(SRC)), \
> - *(DST) = locked_int64_load(AS_LOCKED_INT64(SRC))), \
> - (void) 0)
> -
> -#define atomic_op__(RMW, OP, ARG, ORIG) \
> - (ATOMIC_SWITCH(RMW, \
> - *(ORIG) = __sync_fetch_and_##OP(&(RMW)->value, ARG), \
> - *(ORIG) = locked_uint64_##OP(AS_LOCKED_UINT64(RMW),
> ARG), \
> - *(ORIG) = locked_int64_##OP(AS_LOCKED_INT64(RMW),
> ARG)), \
> - (void) 0)
> +#define atomic_read_explicit(SRC, DST, ORDER) \
> + ({ \
> + typeof(DST) dst__ = (DST); \
> + typeof(SRC) src__ = (SRC); \
> + memory_order order__ = (ORDER); \
> + \
> + if (IS_LOCKLESS_ATOMIC(*src__)) { \
> + atomic_thread_fence_if_seq_cst(order__); \
> + *dst__ = *src__; \
> + } else { \
> + atomic_read_locked(SRC, DST); \
> + } \
> + (void) 0; \
> + })
> +
> +#define atomic_op__(RMW, OP, ARG, ORIG) \
> + ({ \
> + typeof(RMW) rmw__ = (RMW); \
> + typeof(ARG) arg__ = (ARG); \
> + typeof(ORIG) orig__ = (ORIG); \
> + \
> + if (IS_LOCKLESS_ATOMIC(*rmw__)) { \
> + *orig__ = __sync_fetch_and_##OP(rmw__, arg__); \
> + } else { \
> + atomic_op_locked(RMW, OP, ARG, ORIG); \
> + } \
> + })
>
> #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
> #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
> diff --git a/lib/ovs-atomic-locked.c b/lib/ovs-atomic-locked.c
> new file mode 100644
> index 0000000..dc2a435
> --- /dev/null
> +++ b/lib/ovs-atomic-locked.c
> @@ -0,0 +1,58 @@
> +/*
> + * Copyright (c) 2013, 2014 Nicira, Inc.
> + *
> + * Licensed under the Apache License, Version 2.0 (the "License");
> + * you may not use this file except in compliance with the License.
> + * You may obtain a copy of the License at:
> + *
> + * http://www.apache.org/licenses/LICENSE-2.0
> + *
> + * Unless required by applicable law or agreed to in writing, software
> + * distributed under the License is distributed on an "AS IS" BASIS,
> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
> implied.
> + * See the License for the specific language governing permissions and
> + * limitations under the License.
> + */
> +
> +#include <config.h>
> +
> +#include "ovs-atomic.h"
> +#include "hash.h"
> +#include "ovs-thread.h"
> +
> +#ifdef OVS_ATOMIC_LOCKED_IMPL
> +static struct ovs_mutex *
> +mutex_for_pointer(void *p)
> +{
> + OVS_ALIGNED_STRUCT(CACHE_LINE_SIZE, aligned_mutex) {
> + struct ovs_mutex mutex;
> + char pad[PAD_SIZE(sizeof(struct ovs_mutex), CACHE_LINE_SIZE)];
> + };
> +
> + static struct aligned_mutex atomic_mutexes[] = {
> +#define MUTEX_INIT { .mutex = OVS_MUTEX_INITIALIZER }
> +#define MUTEX_INIT4 MUTEX_INIT, MUTEX_INIT, MUTEX_INIT, MUTEX_INIT
> +#define MUTEX_INIT16 MUTEX_INIT4, MUTEX_INIT4, MUTEX_INIT4, MUTEX_INIT4
> + MUTEX_INIT16, MUTEX_INIT16,
> + };
> + BUILD_ASSERT_DECL(IS_POW2(ARRAY_SIZE(atomic_mutexes)));
> +
> + uint32_t hash = hash_pointer(p, 0);
> + uint32_t indx = hash & (ARRAY_SIZE(atomic_mutexes) - 1);
> + return &atomic_mutexes[indx].mutex;
> +}
> +
> +void
> +atomic_lock__(void *p)
> + OVS_ACQUIRES(mutex_for_pointer(p))
> +{
> + ovs_mutex_lock(mutex_for_pointer(p));
> +}
> +
> +void
> +atomic_unlock__(void *p)
> + OVS_RELEASES(mutex_for_pointer(p))
> +{
> + ovs_mutex_unlock(mutex_for_pointer(p));
> +}
> +#endif /* OVS_ATOMIC_LOCKED_IMPL */
> diff --git a/lib/ovs-atomic-locked.h b/lib/ovs-atomic-locked.h
> new file mode 100644
> index 0000000..438e78c
> --- /dev/null
> +++ b/lib/ovs-atomic-locked.h
> @@ -0,0 +1,32 @@
> +/* This header implements atomic operation locking helpers. */
> +#ifndef IN_OVS_ATOMIC_H
> +#error "This header should only be included indirectly via ovs-atomic.h."
> +#endif
> +
> +#define OVS_ATOMIC_LOCKED_IMPL 1
> +
> +void atomic_lock__(void *);
> +void atomic_unlock__(void *);
> +
> +#define atomic_store_locked(DST, SRC) \
> + (atomic_lock__(DST), \
> + *(DST) = (SRC), \
> + atomic_unlock__(DST), \
> + (void) 0)
> +
> +#define atomic_read_locked(SRC, DST) \
> + (atomic_lock__(SRC), \
> + *(DST) = *(SRC), \
> + atomic_unlock__(SRC), \
> + (void) 0)
> +
> +#define atomic_op_locked_add +=
> +#define atomic_op_locked_sub -=
> +#define atomic_op_locked_or |=
> +#define atomic_op_locked_xor ^=
> +#define atomic_op_locked_and &=
> +#define atomic_op_locked(RMW, OP, OPERAND, ORIG) \
> + (atomic_lock__(RMW), \
> + *(ORIG) = *(RMW), \
> + *(RMW) atomic_op_locked_##OP (OPERAND), \
> + atomic_unlock__(RMW))
> diff --git a/lib/ovs-atomic-pthreads.h b/lib/ovs-atomic-pthreads.h
> index ff39925..7b742cd 100644
> --- a/lib/ovs-atomic-pthreads.h
> +++ b/lib/ovs-atomic-pthreads.h
> @@ -19,9 +19,11 @@
> #error "This header should only be included indirectly via ovs-atomic.h."
> #endif
>
> +#include "ovs-atomic-locked.h"
> +
> #define OVS_ATOMIC_PTHREADS_IMPL 1
>
> -#define ATOMIC(TYPE) struct { TYPE value; pthread_mutex_t mutex; }
> +#define ATOMIC(TYPE) TYPE
> #include "ovs-atomic-types.h"
>
> #define ATOMIC_BOOL_LOCK_FREE 0
> @@ -41,14 +43,9 @@ typedef enum {
> memory_order_seq_cst
> } memory_order;
>
> -#define ATOMIC_VAR_INIT(VALUE) { VALUE, PTHREAD_MUTEX_INITIALIZER }
> -#define atomic_init(OBJECT, VALUE) \
> - ((OBJECT)->value = (VALUE), \
> - pthread_mutex_init(&(OBJECT)->mutex, NULL), \
> - (void) 0)
> -#define atomic_destroy(OBJECT) \
> - (pthread_mutex_destroy(&(OBJECT)->mutex), \
> - (void) 0)
> +#define ATOMIC_VAR_INIT(VALUE) (VALUE)
> +#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
> +#define atomic_destroy(OBJECT) ((void) (OBJECT))
>
> static inline void
> atomic_thread_fence(memory_order order OVS_UNUSED)
> @@ -64,45 +61,30 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
>
> #define atomic_is_lock_free(OBJ) false
>
> -#define atomic_store(DST, SRC) \
> - (pthread_mutex_lock(&(DST)->mutex), \
> - (DST)->value = (SRC), \
> - pthread_mutex_unlock(&(DST)->mutex), \
> - (void) 0)
> +#define atomic_store(DST, SRC) atomic_store_locked(DST, SRC)
> #define atomic_store_explicit(DST, SRC, ORDER) \
> ((void) (ORDER), atomic_store(DST, SRC))
>
> -#define atomic_read(SRC, DST) \
> - (pthread_mutex_lock(CONST_CAST(pthread_mutex_t *, &(SRC)->mutex)), \
> - *(DST) = (SRC)->value, \
> - pthread_mutex_unlock(CONST_CAST(pthread_mutex_t *, &(SRC)->mutex)), \
> - (void) 0)
> +#define atomic_read(SRC, DST) atomic_read_locked(SRC, DST)
> #define atomic_read_explicit(SRC, DST, ORDER) \
> ((void) (ORDER), atomic_read(SRC, DST))
>
> -#define atomic_op__(RMW, OPERATOR, OPERAND, ORIG) \
> - (pthread_mutex_lock(&(RMW)->mutex), \
> - *(ORIG) = (RMW)->value, \
> - (RMW)->value OPERATOR (OPERAND), \
> - pthread_mutex_unlock(&(RMW)->mutex), \
> - (void) 0)
> -
> -#define atomic_add(RMW, OPERAND, ORIG) atomic_op__(RMW, +=, OPERAND, ORIG)
> -#define atomic_sub(RMW, OPERAND, ORIG) atomic_op__(RMW, -=, OPERAND, ORIG)
> -#define atomic_or( RMW, OPERAND, ORIG) atomic_op__(RMW, |=, OPERAND, ORIG)
> -#define atomic_xor(RMW, OPERAND, ORIG) atomic_op__(RMW, ^=, OPERAND, ORIG)
> -#define atomic_and(RMW, OPERAND, ORIG) atomic_op__(RMW, &=, OPERAND, ORIG)
> -
> -#define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER) \
> - ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
> -#define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER) \
> - ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
> -#define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER) \
> - ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
> -#define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER) \
> - ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
> -#define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER) \
> - ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
> +#define atomic_add(RMW, ARG, ORIG) atomic_op_locked(RMW, add, ARG, ORIG)
> +#define atomic_sub(RMW, ARG, ORIG) atomic_op_locked(RMW, sub, ARG, ORIG)
> +#define atomic_or( RMW, ARG, ORIG) atomic_op_locked(RMW, or, ARG, ORIG)
> +#define atomic_xor(RMW, ARG, ORIG) atomic_op_locked(RMW, xor, ARG, ORIG)
> +#define atomic_and(RMW, ARG, ORIG) atomic_op_locked(RMW, and, ARG, ORIG)
> +
> +#define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \
> + ((void) (ORDER), atomic_add(RMW, ARG, ORIG))
> +#define atomic_sub_explicit(RMW, ARG, ORIG, ORDER) \
> + ((void) (ORDER), atomic_sub(RMW, ARG, ORIG))
> +#define atomic_or_explicit(RMW, ARG, ORIG, ORDER) \
> + ((void) (ORDER), atomic_or(RMW, ARG, ORIG))
> +#define atomic_xor_explicit(RMW, ARG, ORIG, ORDER) \
> + ((void) (ORDER), atomic_xor(RMW, ARG, ORIG))
> +#define atomic_and_explicit(RMW, ARG, ORIG, ORDER) \
> + ((void) (ORDER), atomic_and(RMW, ARG, ORIG))
>
> /* atomic_flag */
>
> diff --git a/lib/ovs-atomic.h b/lib/ovs-atomic.h
> index 4d43a42..a9d257a 100644
> --- a/lib/ovs-atomic.h
> +++ b/lib/ovs-atomic.h
> @@ -74,10 +74,14 @@
> *
> * (*) Not specified by C11.
> *
> + * Atomic types may also be obtained via ATOMIC(TYPE), e.g. ATOMIC(void
> *).
> + * Only basic integer types and pointer types can be made atomic this way,
> + * e.g. atomic structs are not supported.
> + *
> * The atomic version of a type doesn't necessarily have the same size or
> * representation as the ordinary version; for example, atomic_int might
> be a
> - * typedef for a struct that also includes a mutex. The range of an
> atomic
> - * type does match the range of the corresponding ordinary type.
> + * typedef for a struct. The range of an atomic type does match the
> range of
> + * the corresponding ordinary type.
> *
> * C11 says that one may use the _Atomic keyword in place of the typedef
> name,
> * e.g. "_Atomic int" instead of "atomic_int". This library doesn't
> support
> --
> 1.7.10.4
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.openvswitch.org/pipermail/ovs-dev/attachments/20140313/bdf7a109/attachment-0005.html>
More information about the dev
mailing list