locking/x86: Drop a comment left over from X86_OOSTORE
[cascardo/linux.git] / arch / x86 / include / asm / barrier.h
index 0681d25..a291745 100644 (file)
  */
 
 #ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
+                                     X86_FEATURE_XMM2) ::: "memory", "cc")
+#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
+#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
 #else
 #define mb()   asm volatile("mfence":::"memory")
 #define rmb()  asm volatile("lfence":::"memory")
 #endif
 #define dma_wmb()      barrier()
 
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      dma_rmb()
-#define smp_wmb()      barrier()
-#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else /* !SMP */
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
-#endif /* SMP */
-
-#define read_barrier_depends()         do { } while (0)
-#define smp_read_barrier_depends()     do { } while (0)
+#define __smp_mb()     mb()
+#define __smp_rmb()    dma_rmb()
+#define __smp_wmb()    barrier()
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
 #if defined(CONFIG_X86_PPRO_FENCE)
 
  * model and we should fall back to full barriers.
  */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       __smp_mb();                                                     \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       __smp_mb();                                                     \
        ___p1;                                                          \
 })
 
 #else /* regular x86 TSO memory ordering */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = READ_ONCE(*p);                               \
        compiletime_assert_atomic_type(*p);                             \
@@ -88,7 +77,9 @@ do {                                                                  \
 #endif
 
 /* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+#include <asm-generic/barrier.h>
 
 #endif /* _ASM_X86_BARRIER_H */