ovs-atomic-types: Move into ovs-atomic.h.
[cascardo/ovs.git] / lib / ovs-atomic-gcc4+.h
1 /*
2  * Copyright (c) 2013, 2014 Nicira, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /* This header implements atomic operation primitives on GCC 4.x. */
18 #ifndef IN_OVS_ATOMIC_H
19 #error "This header should only be included indirectly via ovs-atomic.h."
20 #endif
21
22 #include "ovs-atomic-locked.h"
23 #define OVS_ATOMIC_GCC4P_IMPL 1
24
25 #define ATOMIC(TYPE) TYPE
26
27 #define ATOMIC_BOOL_LOCK_FREE 2
28 #define ATOMIC_CHAR_LOCK_FREE 2
29 #define ATOMIC_SHORT_LOCK_FREE 2
30 #define ATOMIC_INT_LOCK_FREE 2
31 #define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
32 #define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
33 #define ATOMIC_POINTER_LOCK_FREE 2
34
35 typedef enum {
36     memory_order_relaxed,
37     memory_order_consume,
38     memory_order_acquire,
39     memory_order_release,
40     memory_order_acq_rel,
41     memory_order_seq_cst
42 } memory_order;
43 \f
44 #define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
45 \f
46 #define ATOMIC_VAR_INIT(VALUE) VALUE
47 #define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
48 #define atomic_destroy(OBJECT) ((void) (OBJECT))
49
50 static inline void
51 atomic_thread_fence(memory_order order)
52 {
53     if (order != memory_order_relaxed) {
54         __sync_synchronize();
55     }
56 }
57
58 static inline void
59 atomic_thread_fence_if_seq_cst(memory_order order)
60 {
61     if (order == memory_order_seq_cst) {
62         __sync_synchronize();
63     }
64 }
65
66 static inline void
67 atomic_signal_fence(memory_order order OVS_UNUSED)
68 {
69     if (order != memory_order_relaxed) {
70         asm volatile("" : : : "memory");
71     }
72 }
73
74 #define atomic_is_lock_free(OBJ)                \
75     ((void) *(OBJ),                             \
76      IF_LOCKLESS_ATOMIC(OBJ, true, false))
77
78 #define atomic_store(DST, SRC) \
79     atomic_store_explicit(DST, SRC, memory_order_seq_cst)
80 #define atomic_store_explicit(DST, SRC, ORDER)          \
81     ({                                                  \
82         typeof(DST) dst__ = (DST);                      \
83         typeof(SRC) src__ = (SRC);                      \
84         memory_order order__ = (ORDER);                 \
85                                                         \
86         if (IS_LOCKLESS_ATOMIC(*dst__)) {               \
87             atomic_thread_fence(order__);               \
88             *dst__ = src__;                             \
89             atomic_thread_fence_if_seq_cst(order__);    \
90         } else {                                        \
91             atomic_store_locked(DST, SRC);              \
92         }                                               \
93         (void) 0;                                       \
94     })
95 #define atomic_read(SRC, DST) \
96     atomic_read_explicit(SRC, DST, memory_order_seq_cst)
97 #define atomic_read_explicit(SRC, DST, ORDER)           \
98     ({                                                  \
99         typeof(DST) dst__ = (DST);                      \
100         typeof(SRC) src__ = (SRC);                      \
101         memory_order order__ = (ORDER);                 \
102                                                         \
103         if (IS_LOCKLESS_ATOMIC(*src__)) {               \
104             atomic_thread_fence_if_seq_cst(order__);    \
105             *dst__ = *src__;                            \
106         } else {                                        \
107             atomic_read_locked(SRC, DST);               \
108         }                                               \
109         (void) 0;                                       \
110     })
111
112 #define atomic_op__(RMW, OP, ARG, ORIG)                     \
113     ({                                                      \
114         typeof(RMW) rmw__ = (RMW);                          \
115         typeof(ARG) arg__ = (ARG);                          \
116         typeof(ORIG) orig__ = (ORIG);                       \
117                                                             \
118         if (IS_LOCKLESS_ATOMIC(*rmw__)) {                   \
119             *orig__ = __sync_fetch_and_##OP(rmw__, arg__);  \
120         } else {                                            \
121             atomic_op_locked(RMW, OP, ARG, ORIG);           \
122         }                                                   \
123     })
124
125 #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
126 #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
127 #define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or,  ARG, ORIG)
128 #define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
129 #define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
130
131 #define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER)  \
132     ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
133 #define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER)  \
134     ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
135 #define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER)   \
136     ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
137 #define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER)  \
138     ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
139 #define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER)  \
140     ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
141 \f
142 /* atomic_flag */
143
144 typedef struct {
145     int b;
146 } atomic_flag;
147 #define ATOMIC_FLAG_INIT { false }
148
149 static inline void
150 atomic_flag_init(volatile atomic_flag *object OVS_UNUSED)
151 {
152     /* Nothing to do. */
153 }
154
155 static inline void
156 atomic_flag_destroy(volatile atomic_flag *object OVS_UNUSED)
157 {
158     /* Nothing to do. */
159 }
160
161 static inline bool
162 atomic_flag_test_and_set(volatile atomic_flag *object)
163 {
164     return __sync_lock_test_and_set(&object->b, 1);
165 }
166
167 static inline bool
168 atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
169                                   memory_order order OVS_UNUSED)
170 {
171     return atomic_flag_test_and_set(object);
172 }
173
174 static inline void
175 atomic_flag_clear(volatile atomic_flag *object)
176 {
177     __sync_lock_release(&object->b);
178 }
179
180 static inline void
181 atomic_flag_clear_explicit(volatile atomic_flag *object,
182                            memory_order order OVS_UNUSED)
183 {
184     atomic_flag_clear(object);
185 }