1
2 /*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7 #ifndef _NXT_ATOMIC_H_INCLUDED_
8 #define _NXT_ATOMIC_H_INCLUDED_
9
10
11 /*
12 * nxt_atomic_try_lock() must set an acquire barrier on lock.
13 * nxt_atomic_xchg() must set an acquire barrier.
14 * nxt_atomic_release() must set a release barrier.
15 */
16
17 #if (NXT_HAVE_GCC_ATOMIC) /* GCC 4.1 builtin atomic operations */
18
19 typedef intptr_t nxt_atomic_int_t;
20 typedef uintptr_t nxt_atomic_uint_t;
21 typedef volatile nxt_atomic_uint_t nxt_atomic_t;
22
23 /*
24 * __sync_bool_compare_and_swap() is a full barrier.
25 * __sync_lock_test_and_set() is an acquire barrier.
26 * __sync_lock_release() is a release barrier.
27 */
28
29 #define nxt_atomic_cmp_set(lock, cmp, set) \
30 __sync_bool_compare_and_swap(lock, cmp, set)
31
32
33 #define nxt_atomic_xchg(lock, set) \
34 __sync_lock_test_and_set(lock, set)
35
36
37 #define nxt_atomic_fetch_add(value, add) \
38 __sync_fetch_and_add(value, add)
39
40
41 #define nxt_atomic_try_lock(lock) \
42 nxt_atomic_cmp_set(lock, 0, 1)
43
44
45 #define nxt_atomic_release(lock) \
46 __sync_lock_release(lock)
47
48
49 #define nxt_atomic_or_fetch(ptr, val) \
50 __sync_or_and_fetch(ptr, val)
51
52
53 #define nxt_atomic_and_fetch(ptr, val) \
54 __sync_and_and_fetch(ptr, val)
55
56
57 #if (__i386__ || __i386 || __amd64__ || __amd64)
58 #define nxt_cpu_pause() \
59 __asm__ ("pause")
60
61 #else
62 #define nxt_cpu_pause()
63 #endif
64
65
66 #elif (NXT_HAVE_SOLARIS_ATOMIC) /* Solaris 10 */
67
68 #include <atomic.h>
69
70 typedef long nxt_atomic_int_t;
71 typedef ulong_t nxt_atomic_uint_t;
72 typedef volatile nxt_atomic_uint_t nxt_atomic_t;
73
74
75 #define nxt_atomic_cmp_set(lock, cmp, set) \
76 (atomic_cas_ulong(lock, cmp, set) == (ulong_t) cmp)
77
78
79 #define nxt_atomic_xchg(lock, set) \
80 atomic_add_swap(lock, set)
81
82
83 #define nxt_atomic_fetch_add(value, add) \
84 (atomic_add_long_nv(value, add) - add)
85
86
87 #define nxt_atomic_or_fetch(ptr, val) \
88 atomic_or_ulong_nv(ptr, val)
89
90
91 #define nxt_atomic_and_fetch(ptr, val) \
92 atomic_and_ulong_nv(ptr, val)
93
94
95 /*
96 * Solaris uses SPARC Total Store Order model. In this model:
97 * 1) Each atomic load-store instruction behaves as if it were followed by
98 * #LoadLoad, #LoadStore, and #StoreStore barriers.
99 * 2) Each load instruction behaves as if it were followed by
100 * #LoadLoad and #LoadStore barriers.
101 * 3) Each store instruction behaves as if it were followed by
102 * #StoreStore barrier.
103 *
104 * In X86_64 atomic instructions set a full barrier and usual instructions
105 * set implicit #LoadLoad, #LoadStore, and #StoreStore barriers.
106 *
107 * An acquire barrier requires at least #LoadLoad and #LoadStore barriers
108 * and they are provided by atomic load-store instruction.
109 *
110 * A release barrier requires at least #LoadStore and #StoreStore barriers,
111 * so a lock release does not require an explicit barrier: all load
112 * instructions in critical section is followed by implicit #LoadStore
113 * barrier and all store instructions are followed by implicit #StoreStore
114 * barrier.
115 */
116
117 #define nxt_atomic_try_lock(lock) \
118 nxt_atomic_cmp_set(lock, 0, 1)
119
120
121 #define nxt_atomic_release(lock) \
122 *lock = 0;
123
124
125 /*
126 * The "rep; nop" is used instead of "pause" to omit the "[ PAUSE ]" hardware
127 * capability added by linker since Solaris ld.so.1 does not know about it:
128 *
129 * ld.so.1: ...: fatal: hardware capability unsupported: 0x2000 [ PAUSE ]
130 */
131
132 #if (__i386__ || __i386 || __amd64__ || __amd64)
133 #define nxt_cpu_pause() \
134 __asm__ ("rep; nop")
135
136 #else
137 #define nxt_cpu_pause()
138 #endif
139
140
141 /* elif (NXT_HAVE_MACOSX_ATOMIC) */
142
143 /*
144 * The atomic(3) interface has been introduced in MacOS 10.4 (Tiger) and
145 * extended in 10.5 (Leopard). However its support is omitted because:
146 *
147 * 1) the interface is still incomplete:
148 * *) there are OSAtomicAdd32Barrier() and OSAtomicAdd64Barrier()
149 * but no OSAtomicAddLongBarrier();
150 * *) there is no interface for XCHG operation.
151 *
152 * 2) the interface is tuned for non-SMP systems due to omission of the
153 * LOCK prefix on single CPU system but nowadays MacOSX systems are at
154 * least dual core. Thus these indirect calls just add overhead as
155 * compared with inlined atomic operations which are supported by GCC
156 * and Clang in modern MacOSX systems.
157 */
158
159
160 #elif (NXT_HAVE_XLC_ATOMIC) /* XL C/C++ V8.0 for AIX */
161
162 #if (NXT_64BIT)
163
164 typedef long nxt_atomic_int_t;
165 typedef unsigned long nxt_atomic_uint_t;
166 typedef volatile nxt_atomic_int_t nxt_atomic_t;
167
168
169 nxt_inline nxt_bool_t
nxt_atomic_cmp_set(nxt_atomic_t * lock,nxt_atomic_int_t cmp,nxt_atomic_int_t set)170 nxt_atomic_cmp_set(nxt_atomic_t *lock, nxt_atomic_int_t cmp,
171 nxt_atomic_int_t set)
172 {
173 nxt_atomic_int_t old;
174
175 old = cmp;
176
177 return __compare_and_swaplp(lock, &old, set);
178 }
179
180
181 #define nxt_atomic_xchg(lock, set) \
182 __fetch_and_swaplp(lock, set)
183
184
185 #define nxt_atomic_fetch_add(value, add) \
186 __fetch_and_addlp(value, add)
187
188
189 #else /* NXT_32BIT */
190
191 typedef int nxt_atomic_int_t;
192 typedef unsigned int nxt_atomic_uint_t;
193 typedef volatile nxt_atomic_int_t nxt_atomic_t;
194
195
196 nxt_inline nxt_bool_t
nxt_atomic_cmp_set(nxt_atomic_t * lock,nxt_atomic_int_t cmp,nxt_atomic_int_t set)197 nxt_atomic_cmp_set(nxt_atomic_t *lock, nxt_atomic_int_t cmp,
198 nxt_atomic_int_t set)
199 {
200 nxt_atomic_int_t old;
201
202 old = cmp;
203
204 return __compare_and_swap(lock, &old, set);
205 }
206
207
208 #define nxt_atomic_xchg(lock, set) \
209 __fetch_and_swap(lock, set)
210
211
212 #define nxt_atomic_fetch_add(value, add) \
213 __fetch_and_add(value, add)
214
215
216 #endif /* NXT_32BIT*/
217
218
219 /*
220 * __lwsync() is a "lwsync" instruction that sets #LoadLoad, #LoadStore,
221 * and #StoreStore barrier.
222 *
223 * __compare_and_swap() is a pair of "ldarx" and "stdcx" instructions.
224 * A "lwsync" does not set #StoreLoad barrier so it can not be used after
225 * this pair since a next load inside critical section can be performed
226 * after the "ldarx" instruction but before the "stdcx" instruction.
227 * However, this next load instruction will load correct data because
228 * otherwise the "ldarx/stdcx" pair will fail and this data will be
229 * discarded. Nevertheless, the "isync" instruction is used for sure.
230 *
231 * A full barrier can be set with __sync(), a "sync" instruction, but there
232 * is also a faster __isync(), an "isync" instruction. This instruction is
233 * not a memory barrier but an instruction barrier. An "isync" instruction
234 * causes the processor to complete execution of all previous instructions
235 * and then to discard instructions (which may have begun execution) following
236 * the "isync". After the "isync" is executed, the following instructions
237 * then begin execution. The "isync" is used to ensure that the loads
238 * following entry into a critical section are not performed (because of
239 * aggressive out-of-order or speculative execution in the processor) until
240 * the lock is granted.
241 */
242
243 nxt_inline nxt_bool_t
nxt_atomic_try_lock(nxt_atomic_t * lock)244 nxt_atomic_try_lock(nxt_atomic_t *lock)
245 {
246 if (nxt_atomic_cmp_set(lock, 0, 1)) {
247 __isync();
248 return 1;
249 }
250
251 return 0;
252 }
253
254
255 #define nxt_atomic_release(lock) \
256 do { __lwsync(); *lock = 0; } while (0)
257
258
259 #define nxt_cpu_pause()
260
261
262 #endif /* NXT_HAVE_XLC_ATOMIC */
263
264
265 #endif /* _NXT_ATOMIC_H_INCLUDED_ */
266