2 * Copyright (c) 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* This header implements atomic operation primitives on GCC 4.x. */
18 #ifndef IN_OVS_ATOMIC_H
19 #error "This header should only be included indirectly via ovs-atomic.h."
22 #include "ovs-atomic-locked.h"
23 #define OVS_ATOMIC_GCC4P_IMPL 1
25 #define ATOMIC(TYPE) TYPE
27 #define ATOMIC_BOOL_LOCK_FREE 2
28 #define ATOMIC_CHAR_LOCK_FREE 2
29 #define ATOMIC_SHORT_LOCK_FREE 2
30 #define ATOMIC_INT_LOCK_FREE 2
31 #define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
32 #define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
33 #define ATOMIC_POINTER_LOCK_FREE 2
44 #define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
46 #define ATOMIC_VAR_INIT(VALUE) VALUE
47 #define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
50 atomic_thread_fence(memory_order order)
52 if (order != memory_order_relaxed) {
58 atomic_thread_fence_if_seq_cst(memory_order order)
60 if (order == memory_order_seq_cst) {
66 atomic_signal_fence(memory_order order)
68 if (order != memory_order_relaxed) {
69 asm volatile("" : : : "memory");
73 #define atomic_is_lock_free(OBJ) \
75 IS_LOCKLESS_ATOMIC(*(OBJ)) ? 2 : 0)
77 #define atomic_store(DST, SRC) \
78 atomic_store_explicit(DST, SRC, memory_order_seq_cst)
79 #define atomic_store_explicit(DST, SRC, ORDER) \
81 typeof(DST) dst__ = (DST); \
82 typeof(SRC) src__ = (SRC); \
84 if (IS_LOCKLESS_ATOMIC(*dst__)) { \
85 atomic_thread_fence(ORDER); \
86 *(typeof(*(DST)) volatile *)dst__ = src__; \
87 atomic_thread_fence_if_seq_cst(ORDER); \
89 atomic_store_locked(dst__, src__); \
93 #define atomic_read(SRC, DST) \
94 atomic_read_explicit(SRC, DST, memory_order_seq_cst)
95 #define atomic_read_explicit(SRC, DST, ORDER) \
97 typeof(DST) dst__ = (DST); \
98 typeof(SRC) src__ = (SRC); \
100 if (IS_LOCKLESS_ATOMIC(*src__)) { \
101 atomic_thread_fence_if_seq_cst(ORDER); \
102 *dst__ = *(typeof(*(SRC)) volatile *)src__; \
104 atomic_read_locked(src__, dst__); \
109 #define atomic_compare_exchange_strong(DST, EXP, SRC) \
111 typeof(DST) dst__ = (DST); \
112 typeof(EXP) expp__ = (EXP); \
113 typeof(SRC) src__ = (SRC); \
114 typeof(SRC) exp__ = *expp__; \
117 ret__ = __sync_val_compare_and_swap(dst__, exp__, src__); \
118 if (ret__ != exp__) { \
123 #define atomic_compare_exchange_strong_explicit(DST, EXP, SRC, ORD1, ORD2) \
124 ((void) (ORD1), (void) (ORD2), \
125 atomic_compare_exchange_strong(DST, EXP, SRC))
126 #define atomic_compare_exchange_weak \
127 atomic_compare_exchange_strong
128 #define atomic_compare_exchange_weak_explicit \
129 atomic_compare_exchange_strong_explicit
131 #define atomic_op__(RMW, OP, ARG, ORIG) \
133 typeof(RMW) rmw__ = (RMW); \
134 typeof(ARG) arg__ = (ARG); \
135 typeof(ORIG) orig__ = (ORIG); \
137 if (IS_LOCKLESS_ATOMIC(*rmw__)) { \
138 *orig__ = __sync_fetch_and_##OP(rmw__, arg__); \
140 atomic_op_locked(rmw__, OP, arg__, orig__); \
145 #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
146 #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
147 #define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or, ARG, ORIG)
148 #define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
149 #define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
151 #define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER) \
152 ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
153 #define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER) \
154 ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
155 #define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER) \
156 ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
157 #define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER) \
158 ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
159 #define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER) \
160 ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
167 #define ATOMIC_FLAG_INIT { false }
170 atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
175 /* __sync_lock_test_and_set() by itself is an acquire barrier.
176 * For anything higher additional barriers are needed. */
177 if (order > memory_order_acquire) {
178 atomic_thread_fence(order);
180 old = __sync_lock_test_and_set(&object->b, 1);
181 atomic_thread_fence_if_seq_cst(order);
186 #define atomic_flag_test_and_set(FLAG) \
187 atomic_flag_test_and_set_explicit(FLAG, memory_order_seq_cst)
190 atomic_flag_clear_explicit(volatile atomic_flag *object,
193 /* __sync_lock_release() by itself is a release barrier. For
194 * anything else additional barrier may be needed. */
195 if (order != memory_order_release) {
196 atomic_thread_fence(order);
198 __sync_lock_release(&object->b);
199 atomic_thread_fence_if_seq_cst(order);
202 #define atomic_flag_clear(FLAG) \
203 atomic_flag_clear_explicit(FLAG, memory_order_seq_cst)