[ovs-dev] [PATCH 01/18] lib/ovs-atomic: Add missing macro argument parentheses.
Jarno Rajahalme
jrajahalme at nicira.com
Fri Aug 22 20:58:12 UTC 2014
Otherwise the dereference operator could target a portion of a ternary
expression, for example.
Also minor style fixes.
Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
lib/ovs-atomic-gcc4+.h | 8 ++++----
lib/ovs-atomic-i586.h | 32 ++++++++++++++++----------------
lib/ovs-atomic-x86_64.h | 14 +++++++-------
3 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/lib/ovs-atomic-gcc4+.h b/lib/ovs-atomic-gcc4+.h
index bb08ff9..25bcf20 100644
--- a/lib/ovs-atomic-gcc4+.h
+++ b/lib/ovs-atomic-gcc4+.h
@@ -83,7 +83,7 @@ atomic_signal_fence(memory_order order)
\
if (IS_LOCKLESS_ATOMIC(*dst__)) { \
atomic_thread_fence(ORDER); \
- *(typeof(*DST) volatile *)dst__ = src__; \
+ *(typeof(*(DST)) volatile *)dst__ = src__; \
atomic_thread_fence_if_seq_cst(ORDER); \
} else { \
atomic_store_locked(dst__, src__); \
@@ -99,7 +99,7 @@ atomic_signal_fence(memory_order order)
\
if (IS_LOCKLESS_ATOMIC(*src__)) { \
atomic_thread_fence_if_seq_cst(ORDER); \
- *dst__ = *(typeof(*SRC) volatile *)src__; \
+ *dst__ = *(typeof(*(SRC)) volatile *)src__; \
} else { \
atomic_read_locked(src__, dst__); \
} \
@@ -128,7 +128,6 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
-
#define atomic_op__(RMW, OP, ARG, ORIG) \
({ \
typeof(RMW) rmw__ = (RMW); \
@@ -140,11 +139,12 @@ atomic_signal_fence(memory_order order)
} else { \
atomic_op_locked(rmw__, OP, arg__, orig__); \
} \
+ (void) 0; \
})
#define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
#define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
-#define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
+#define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
#define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
#define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
diff --git a/lib/ovs-atomic-i586.h b/lib/ovs-atomic-i586.h
index f2419ba..8d954b1 100644
--- a/lib/ovs-atomic-i586.h
+++ b/lib/ovs-atomic-i586.h
@@ -209,10 +209,10 @@ atomic_signal_fence(memory_order order)
#define atomic_exchange__(DST, SRC, ORDER) \
({ \
typeof(DST) dst___ = (DST); \
- typeof(*DST) src___ = (SRC); \
+ typeof(*(DST)) src___ = (SRC); \
\
if ((ORDER) > memory_order_consume) { \
- if (sizeof(*DST) == 8) { \
+ if (sizeof(*(DST)) == 8) { \
atomic_exchange_8__(dst___, src___, "memory"); \
} else { \
asm volatile("xchg %1,%0 ; " \
@@ -222,7 +222,7 @@ atomic_signal_fence(memory_order order)
:: "memory"); \
} \
} else { \
- if (sizeof(*DST) == 8) { \
+ if (sizeof(*(DST)) == 8) { \
atomic_exchange_8__(dst___, src___, "cc"); \
} else { \
asm volatile("xchg %1,%0 ; " \
@@ -237,10 +237,10 @@ atomic_signal_fence(memory_order order)
#define atomic_store_explicit(DST, SRC, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
- typeof(*DST) src__ = (SRC); \
+ typeof(*(DST)) src__ = (SRC); \
\
if ((ORDER) != memory_order_seq_cst \
- && sizeof(*DST) <= 4) { \
+ && sizeof(*(DST)) <= 4) { \
atomic_compiler_barrier(ORDER); \
*dst__ = src__; \
} else { \
@@ -259,10 +259,10 @@ atomic_signal_fence(memory_order order)
typeof(DST) dst__ = (DST); \
typeof(SRC) src__ = (SRC); \
\
- if (sizeof(*DST) <= 4) { \
+ if (sizeof(*(DST)) <= 4) { \
*dst__ = *src__; \
} else { \
- typeof(*DST) res__; \
+ typeof(*(DST)) res__; \
\
asm volatile(" movl %%ebx,%%eax ; " \
" movl %%ecx,%%edx ; " \
@@ -325,13 +325,13 @@ atomic_signal_fence(memory_order order)
({ \
typeof(DST) dst__ = (DST); \
typeof(DST) expp__ = (EXP); \
- typeof(*DST) src__ = (SRC); \
- typeof(*DST) exp__ = *expp__; \
+ typeof(*(DST)) src__ = (SRC); \
+ typeof(*(DST)) exp__ = *expp__; \
uint8_t res__; \
(void)ORD_FAIL; \
\
if ((ORDER) > memory_order_consume) { \
- if (sizeof(*DST) <= 4) { \
+ if (sizeof(*(DST)) <= 4) { \
atomic_compare_exchange__(dst__, exp__, src__, res__, \
"memory"); \
} else { \
@@ -339,7 +339,7 @@ atomic_signal_fence(memory_order order)
"memory"); \
} \
} else { \
- if (sizeof(*DST) <= 4) { \
+ if (sizeof(*(DST)) <= 4) { \
atomic_compare_exchange__(dst__, exp__, src__, res__, \
"cc"); \
} else { \
@@ -371,7 +371,7 @@ atomic_signal_fence(memory_order order)
#define atomic_add_32__(RMW, ARG, ORIG, ORDER) \
({ \
typeof(RMW) rmw__ = (RMW); \
- typeof(*RMW) arg__ = (ARG); \
+ typeof(*(RMW)) arg__ = (ARG); \
\
if ((ORDER) > memory_order_consume) { \
atomic_add__(rmw__, arg__, "memory"); \
@@ -388,7 +388,7 @@ atomic_signal_fence(memory_order order)
typeof(RMW) rmw__ = (RMW); \
typeof(ARG) arg__ = (ARG); \
\
- typeof(*RMW) val__; \
+ typeof(*(RMW)) val__; \
\
atomic_read_explicit(rmw__, &val__, memory_order_relaxed); \
do { \
@@ -400,14 +400,14 @@ atomic_signal_fence(memory_order order)
})
#define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \
- (sizeof(*RMW) <= 4 \
+ (sizeof(*(RMW)) <= 4 \
? atomic_add_32__(RMW, ARG, ORIG, ORDER) \
: atomic_op__(RMW, +, ARG, ORIG, ORDER))
#define atomic_add(RMW, ARG, ORIG) \
atomic_add_explicit(RMW, ARG, ORIG, memory_order_seq_cst)
#define atomic_sub_explicit(RMW, ARG, ORIG, ORDER) \
- (sizeof(*RMW) <= 4 \
+ (sizeof(*(RMW)) <= 4 \
? atomic_add_32__(RMW, -(ARG), ORIG, ORDER) \
: atomic_op__(RMW, -, ARG, ORIG, ORDER))
#define atomic_sub(RMW, ARG, ORIG) \
@@ -415,7 +415,7 @@ atomic_signal_fence(memory_order order)
#define atomic_or_explicit(RMW, ARG, ORIG, ORDER) \
atomic_op__(RMW, |, ARG, ORIG, ORDER)
-#define atomic_or( RMW, ARG, ORIG) \
+#define atomic_or(RMW, ARG, ORIG) \
atomic_or_explicit(RMW, ARG, ORIG, memory_order_seq_cst)
#define atomic_xor_explicit(RMW, ARG, ORIG, ORDER) \
diff --git a/lib/ovs-atomic-x86_64.h b/lib/ovs-atomic-x86_64.h
index f7908f7..1e7d427 100644
--- a/lib/ovs-atomic-x86_64.h
+++ b/lib/ovs-atomic-x86_64.h
@@ -174,7 +174,7 @@ atomic_signal_fence(memory_order order)
#define atomic_exchange__(DST, SRC, ORDER) \
({ \
typeof(DST) dst___ = (DST); \
- typeof(*DST) src___ = (SRC); \
+ typeof(*(DST)) src___ = (SRC); \
\
if ((ORDER) > memory_order_consume) { \
asm volatile("xchg %1,%0 ; " \
@@ -198,7 +198,7 @@ atomic_signal_fence(memory_order order)
#define atomic_store_explicit(DST, SRC, ORDER) \
({ \
typeof(DST) dst__ = (DST); \
- typeof(*DST) src__ = (SRC); \
+ typeof(*(DST)) src__ = (SRC); \
\
if ((ORDER) != memory_order_seq_cst) { \
atomic_compiler_barrier(ORDER); \
@@ -248,8 +248,8 @@ atomic_signal_fence(memory_order order)
({ \
typeof(DST) dst__ = (DST); \
typeof(DST) expp__ = (EXP); \
- typeof(*DST) src__ = (SRC); \
- typeof(*DST) exp__ = *expp__; \
+ typeof(*(DST)) src__ = (SRC); \
+ typeof(*(DST)) exp__ = *expp__; \
uint8_t res__; \
(void)ORD_FAIL; \
\
@@ -284,7 +284,7 @@ atomic_signal_fence(memory_order order)
#define atomic_add_explicit(RMW, ARG, ORIG, ORDER) \
({ \
typeof(RMW) rmw__ = (RMW); \
- typeof(*RMW) arg__ = (ARG); \
+ typeof(*(RMW)) arg__ = (ARG); \
\
if ((ORDER) > memory_order_consume) { \
atomic_add__(rmw__, arg__, "memory"); \
@@ -308,7 +308,7 @@ atomic_signal_fence(memory_order order)
typeof(RMW) rmw__ = (RMW); \
typeof(ARG) arg__ = (ARG); \
\
- typeof(*RMW) val__; \
+ typeof(*(RMW)) val__; \
\
atomic_read_explicit(rmw__, &val__, memory_order_relaxed); \
do { \
@@ -321,7 +321,7 @@ atomic_signal_fence(memory_order order)
#define atomic_or_explicit(RMW, ARG, ORIG, ORDER) \
atomic_op__(RMW, |, ARG, ORIG, ORDER)
-#define atomic_or( RMW, ARG, ORIG) \
+#define atomic_or(RMW, ARG, ORIG) \
atomic_or_explicit(RMW, ARG, ORIG, memory_order_seq_cst)
#define atomic_xor_explicit(RMW, ARG, ORIG, ORDER) \
--
1.7.10.4
More information about the dev
mailing list