[ovs-dev] [PATCH v2 3/7] lib/ovs-atomic: Require memory_order be constant.

Jarno Rajahalme jrajahalme at nicira.com
Thu Jul 31 16:17:22 UTC 2014


Compiler implementations may provide sub-optimal support for
a memory_order passed in as a run-time value
(ref. https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html).

Document that OVS atomics require the memory order to be passed in as
a compile-time constant, and modify ovs-atomic, ovs-rcu, and cmap to
comply with this.

Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
v2: Also update ovs-rcu accordingly.

 lib/cmap.c             |   18 +++++++++------
 lib/ovs-atomic-gcc4+.h |   10 ++++-----
 lib/ovs-atomic.h       |    3 +++
 lib/ovs-rcu.h          |   57 +++++++++++++++++++++++++++++++++---------------
 4 files changed, 58 insertions(+), 30 deletions(-)

diff --git a/lib/cmap.c b/lib/cmap.c
index ba744cc..60629b1 100644
--- a/lib/cmap.c
+++ b/lib/cmap.c
@@ -163,19 +163,23 @@ struct cmap_impl {
 };
 BUILD_ASSERT_DECL(sizeof(struct cmap_impl) == CACHE_LINE_SIZE);
 
-static uint32_t cmap_get_hash__(const atomic_uint32_t *hash,
-                                memory_order order)
+static inline uint32_t cmap_get_hash(const atomic_uint32_t *hash)
 {
     uint32_t hash__;
 
-    atomic_read_explicit(CONST_CAST(ATOMIC(uint32_t) *, hash), &hash__, order);
+    atomic_read_explicit(CONST_CAST(ATOMIC(uint32_t) *, hash), &hash__,
+                         memory_order_acquire);
     return hash__;
 }
 
-#define cmap_get_hash(HASH) \
-    cmap_get_hash__(HASH, memory_order_acquire)
-#define cmap_get_hash_protected(HASH) \
-    cmap_get_hash__(HASH, memory_order_relaxed)
+static inline uint32_t cmap_get_hash_protected(const atomic_uint32_t *hash)
+{
+    uint32_t hash__;
+
+    atomic_read_explicit(CONST_CAST(ATOMIC(uint32_t) *, hash), &hash__,
+                         memory_order_relaxed);
+    return hash__;
+}
 
 static struct cmap_impl *cmap_rehash(struct cmap *, uint32_t mask);
 
diff --git a/lib/ovs-atomic-gcc4+.h b/lib/ovs-atomic-gcc4+.h
index 9a79f7e..bb889c6 100644
--- a/lib/ovs-atomic-gcc4+.h
+++ b/lib/ovs-atomic-gcc4+.h
@@ -63,7 +63,7 @@ atomic_thread_fence_if_seq_cst(memory_order order)
 }
 
 static inline void
-atomic_signal_fence(memory_order order OVS_UNUSED)
+atomic_signal_fence(memory_order order)
 {
     if (order != memory_order_relaxed) {
         asm volatile("" : : : "memory");
@@ -80,12 +80,11 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
     ({                                                  \
         typeof(DST) dst__ = (DST);                      \
         typeof(SRC) src__ = (SRC);                      \
-        memory_order order__ = (ORDER);                 \
                                                         \
         if (IS_LOCKLESS_ATOMIC(*dst__)) {               \
-            atomic_thread_fence(order__);               \
+            atomic_thread_fence(ORDER);                 \
             *dst__ = src__;                             \
-            atomic_thread_fence_if_seq_cst(order__);    \
+            atomic_thread_fence_if_seq_cst(ORDER);      \
         } else {                                        \
             atomic_store_locked(dst__, src__);          \
         }                                               \
@@ -97,10 +96,9 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
     ({                                                  \
         typeof(DST) dst__ = (DST);                      \
         typeof(SRC) src__ = (SRC);                      \
-        memory_order order__ = (ORDER);                 \
                                                         \
         if (IS_LOCKLESS_ATOMIC(*src__)) {               \
-            atomic_thread_fence_if_seq_cst(order__);    \
+            atomic_thread_fence_if_seq_cst(ORDER);      \
             *dst__ = *src__;                            \
         } else {                                        \
             atomic_read_locked(src__, dst__);           \
diff --git a/lib/ovs-atomic.h b/lib/ovs-atomic.h
index fcd7131..78229a7 100644
--- a/lib/ovs-atomic.h
+++ b/lib/ovs-atomic.h
@@ -173,6 +173,9 @@
  *        whole system, providing a total order for stores an all atomic
  *        variables.
  *
+ * OVS atomics require the memory_order to be passed as a compile-time constant
+ * value.
+ *
  * The following functions insert explicit barriers.  Most of the other atomic
  * functions also include barriers.
  *
diff --git a/lib/ovs-rcu.h b/lib/ovs-rcu.h
index 96b3233..47781d8 100644
--- a/lib/ovs-rcu.h
+++ b/lib/ovs-rcu.h
@@ -143,48 +143,71 @@
     ({                                                                  \
         typeof(VAR) ovsrcu_var = (VAR);                                 \
         typeof(VALUE) ovsrcu_value = (VALUE);                           \
-        memory_order ovsrcu_order = (ORDER);                            \
                                                                         \
-        atomic_store_explicit(&ovsrcu_var->p, ovsrcu_value, ovsrcu_order); \
+        atomic_store_explicit(&ovsrcu_var->p, ovsrcu_value, ORDER);     \
         (void *) 0;                                                     \
     })
+
+/* Writes VALUE to the RCU-protected pointer whose address is VAR.
+ *
+ * Users require external synchronization (e.g. a mutex).  See "Usage" above
+ * for an example. */
+#define ovsrcu_set(VAR, VALUE) \
+    ovsrcu_set__(VAR, VALUE, memory_order_release)
+
+/* This can be used for initializing RCU pointers before any readers can
+ * see them.  A later ovsrcu_set() needs to make the bigger structure this
+ * is part of visible to the readers. */
+#define ovsrcu_set_hidden(VAR, VALUE) \
+    ovsrcu_set__(VAR, VALUE, memory_order_relaxed)
+
 #else  /* not GNU C */
+
 struct ovsrcu_pointer { ATOMIC(void *) p; };
 #define OVSRCU_TYPE(TYPE) struct ovsrcu_pointer
 #define OVSRCU_TYPE_INITIALIZER { NULL }
 static inline void *
-ovsrcu_get__(const struct ovsrcu_pointer *pointer, memory_order order)
+ovsrcu_get__(const struct ovsrcu_pointer *pointer)
 {
     void *value;
     atomic_read_explicit(&CONST_CAST(struct ovsrcu_pointer *, pointer)->p,
-                         &value, order);
+                         &value, memory_order_consume);
     return value;
 }
-#define ovsrcu_get(TYPE, VAR) \
-    CONST_CAST(TYPE, ovsrcu_get__(VAR, memory_order_consume))
-#define ovsrcu_get_protected(TYPE, VAR) \
-    CONST_CAST(TYPE, ovsrcu_get__(VAR, memory_order_relaxed))
+#define ovsrcu_get(TYPE, VAR) CONST_CAST(TYPE, ovsrcu_get__(VAR))
 
-static inline void ovsrcu_set__(struct ovsrcu_pointer *pointer,
-                                const void *value,
-                                memory_order order)
+static inline void *
+ovsrcu_get_protected__(const struct ovsrcu_pointer *pointer)
 {
-    atomic_store_explicit(&pointer->p, CONST_CAST(void *, value), order);
+    void *value;
+    atomic_read_explicit(&CONST_CAST(struct ovsrcu_pointer *, pointer)->p,
+                         &value, memory_order_relaxed);
+    return value;
 }
-#endif
+#define ovsrcu_get_protected(TYPE, VAR)                 \
+    CONST_CAST(TYPE, ovsrcu_get_protected__(VAR))
 
 /* Writes VALUE to the RCU-protected pointer whose address is VAR.
  *
  * Users require external synchronization (e.g. a mutex).  See "Usage" above
  * for an example. */
-#define ovsrcu_set(VAR, VALUE) \
-    ovsrcu_set__(VAR, VALUE, memory_order_release)
+static inline void ovsrcu_set(struct ovsrcu_pointer *pointer,
+                              const void *value)
+{
+    atomic_store_explicit(&pointer->p, CONST_CAST(void *, value),
+                          memory_order_release);
+}
 
 /* This can be used for initializing RCU pointers before any readers can
  * see them.  A later ovsrcu_set() needs to make the bigger structure this
  * is part of visible to the readers. */
-#define ovsrcu_set_hidden(VAR, VALUE) \
-    ovsrcu_set__(VAR, VALUE, memory_order_relaxed)
+static inline void ovsrcu_set_hidden(struct ovsrcu_pointer *pointer,
+                                     const void *value)
+{
+    atomic_store_explicit(&pointer->p, CONST_CAST(void *, value),
+                          memory_order_relaxed);
+}
+#endif
 
 /* This can be used for initializing RCU pointers before any readers are
  * executing. */
-- 
1.7.10.4




More information about the dev mailing list