cc4c2a77bd0100465236534a688be2f8517fb4b9
[cascardo/linux.git] / arch / x86 / include / asm / barrier.h
1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6
7 /*
8  * Force strict CPU ordering.
9  * And yes, this is required on UP too when we're talking
10  * to devices.
11  */
12
13 #ifdef CONFIG_X86_32
14 /*
15  * Some non-Intel clones support out of order store. wmb() ceases to be a
16  * nop for these.
17  */
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21 #else
22 #define mb()    asm volatile("mfence":::"memory")
23 #define rmb()   asm volatile("lfence":::"memory")
24 #define wmb()   asm volatile("sfence" ::: "memory")
25 #endif
26
27 #ifdef CONFIG_X86_PPRO_FENCE
28 #define dma_rmb()       rmb()
29 #else
30 #define dma_rmb()       barrier()
31 #endif
32 #define dma_wmb()       barrier()
33
34 #ifdef CONFIG_SMP
35 #define smp_mb()        mb()
36 #define smp_rmb()       dma_rmb()
37 #define smp_wmb()       barrier()
38 #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39 #else /* !SMP */
40 #define smp_mb()        barrier()
41 #define smp_rmb()       barrier()
42 #define smp_wmb()       barrier()
43 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
44 #endif /* SMP */
45
46 #if defined(CONFIG_X86_PPRO_FENCE)
47
48 /*
49  * For this option x86 doesn't have a strong TSO memory
50  * model and we should fall back to full barriers.
51  */
52
53 #define smp_store_release(p, v)                                         \
54 do {                                                                    \
55         compiletime_assert_atomic_type(*p);                             \
56         smp_mb();                                                       \
57         WRITE_ONCE(*p, v);                                              \
58 } while (0)
59
60 #define smp_load_acquire(p)                                             \
61 ({                                                                      \
62         typeof(*p) ___p1 = READ_ONCE(*p);                               \
63         compiletime_assert_atomic_type(*p);                             \
64         smp_mb();                                                       \
65         ___p1;                                                          \
66 })
67
68 #else /* regular x86 TSO memory ordering */
69
70 #define smp_store_release(p, v)                                         \
71 do {                                                                    \
72         compiletime_assert_atomic_type(*p);                             \
73         barrier();                                                      \
74         WRITE_ONCE(*p, v);                                              \
75 } while (0)
76
77 #define smp_load_acquire(p)                                             \
78 ({                                                                      \
79         typeof(*p) ___p1 = READ_ONCE(*p);                               \
80         compiletime_assert_atomic_type(*p);                             \
81         barrier();                                                      \
82         ___p1;                                                          \
83 })
84
85 #endif
86
87 /* Atomic operations are already serializing on x86 */
88 #define smp_mb__before_atomic() barrier()
89 #define smp_mb__after_atomic()  barrier()
90
91 #include <asm-generic/barrier.h>
92
93 #endif /* _ASM_X86_BARRIER_H */