INSTALL.md: Fix shell command line formatting.
[cascardo/ovs.git] / lib / ovs-atomic-gcc4+.h
index 9a79f7e..25bcf20 100644 (file)
@@ -63,7 +63,7 @@ atomic_thread_fence_if_seq_cst(memory_order order)
 }
 
 static inline void
-atomic_signal_fence(memory_order order OVS_UNUSED)
+atomic_signal_fence(memory_order order)
 {
     if (order != memory_order_relaxed) {
         asm volatile("" : : : "memory");
@@ -80,12 +80,11 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
     ({                                                  \
         typeof(DST) dst__ = (DST);                      \
         typeof(SRC) src__ = (SRC);                      \
-        memory_order order__ = (ORDER);                 \
                                                         \
         if (IS_LOCKLESS_ATOMIC(*dst__)) {               \
-            atomic_thread_fence(order__);               \
-            *dst__ = src__;                             \
-            atomic_thread_fence_if_seq_cst(order__);    \
+            atomic_thread_fence(ORDER);                 \
+            *(typeof(*(DST)) volatile *)dst__ = src__;  \
+            atomic_thread_fence_if_seq_cst(ORDER);      \
         } else {                                        \
             atomic_store_locked(dst__, src__);          \
         }                                               \
@@ -97,11 +96,10 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
     ({                                                  \
         typeof(DST) dst__ = (DST);                      \
         typeof(SRC) src__ = (SRC);                      \
-        memory_order order__ = (ORDER);                 \
                                                         \
         if (IS_LOCKLESS_ATOMIC(*src__)) {               \
-            atomic_thread_fence_if_seq_cst(order__);    \
-            *dst__ = *src__;                            \
+            atomic_thread_fence_if_seq_cst(ORDER);      \
+            *dst__ = *(typeof(*(SRC)) volatile *)src__; \
         } else {                                        \
             atomic_read_locked(src__, dst__);           \
         }                                               \
@@ -130,7 +128,6 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
 #define atomic_compare_exchange_weak_explicit   \
     atomic_compare_exchange_strong_explicit
 
-
 #define atomic_op__(RMW, OP, ARG, ORIG)                     \
     ({                                                      \
         typeof(RMW) rmw__ = (RMW);                          \
@@ -142,11 +139,12 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
         } else {                                            \
             atomic_op_locked(rmw__, OP, arg__, orig__);     \
         }                                                   \
+        (void) 0;                                           \
     })
 
 #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
 #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
-#define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or,  ARG, ORIG)
+#define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or,  ARG, ORIG)
 #define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
 #define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
 
@@ -168,28 +166,38 @@ typedef struct {
 } atomic_flag;
 #define ATOMIC_FLAG_INIT { false }
 
-static inline bool
-atomic_flag_test_and_set(volatile atomic_flag *object)
-{
-    return __sync_lock_test_and_set(&object->b, 1);
-}
-
 static inline bool
 atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
-                                  memory_order order OVS_UNUSED)
+                                  memory_order order)
 {
-    return atomic_flag_test_and_set(object);
-}
+    bool old;
 
-static inline void
-atomic_flag_clear(volatile atomic_flag *object)
-{
-    __sync_lock_release(&object->b);
+    /* __sync_lock_test_and_set() by itself is an acquire barrier.
+     * For anything higher additional barriers are needed. */
+    if (order > memory_order_acquire) {
+        atomic_thread_fence(order);
+    }
+    old = __sync_lock_test_and_set(&object->b, 1);
+    atomic_thread_fence_if_seq_cst(order);
+
+    return old;
 }
 
+#define atomic_flag_test_and_set(FLAG)                                  \
+    atomic_flag_test_and_set_explicit(FLAG, memory_order_seq_cst)
+
 static inline void
 atomic_flag_clear_explicit(volatile atomic_flag *object,
-                           memory_order order OVS_UNUSED)
+                           memory_order order)
 {
-    atomic_flag_clear(object);
+    /* __sync_lock_release() by itself is a release barrier.  For
+     * anything else additional barrier may be needed. */
+    if (order != memory_order_release) {
+        atomic_thread_fence(order);
+    }
+    __sync_lock_release(&object->b);
+    atomic_thread_fence_if_seq_cst(order);
 }
+
+#define atomic_flag_clear(FLAG)                                 \
+    atomic_flag_clear_explicit(FLAG, memory_order_seq_cst)