47 #ifndef __ATOMIC_OPS_UNIX_H__
48 #define __ATOMIC_OPS_UNIX_H__
57 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_1
58 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_2
59 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_4
60 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_8
65 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
69 return __sync_add_and_fetch(p,
x);
74 return __sync_sub_and_fetch(p,
x);
79 return __sync_fetch_and_add(p,
x);
84 return __sync_fetch_and_sub(p,
x);
89 return __sync_val_compare_and_swap(
v, old, _new);
95 return __sync_add_and_fetch(p,
x);
100 return __sync_sub_and_fetch(p,
x);
105 return __sync_fetch_and_add(p,
x);
110 return __sync_fetch_and_sub(p,
x);
115 return __sync_val_compare_and_swap(
v, old, _new);
118 #elif (defined(__amd64__) || defined(__x86_64__))
122 asm volatile(
"lock; xaddq %0, %1;"
132 asm volatile(
"lock; xaddq %0, %1;"
152 asm volatile(
"lock; cmpxchgq %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
159 asm volatile(
"lock; xaddq %0, %1;"
169 asm volatile(
"lock; xaddq %0, %1;"
189 asm volatile(
"lock; cmpxchgq %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
193 # error "Missing implementation for 64-bit atomic operations"
198 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
202 return __sync_add_and_fetch(p,
x);
207 return __sync_sub_and_fetch(p,
x);
212 return __sync_val_compare_and_swap(
v, old, _new);
218 return __sync_add_and_fetch(p,
x);
223 return __sync_sub_and_fetch(p,
x);
228 return __sync_val_compare_and_swap(
v, old, _new);
231 #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
236 asm volatile(
"lock; xaddl %0, %1;"
237 :
"+r"(
ret),
"=m"(*p)
246 asm volatile(
"lock; xaddl %0, %1;"
247 :
"+r"(
ret),
"=m"(*p)
256 asm volatile(
"lock; cmpxchgl %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
264 asm volatile(
"lock; xaddl %0, %1;"
265 :
"+r"(
ret),
"=m"(*p)
274 asm volatile(
"lock; xaddl %0, %1;"
275 :
"+r"(
ret),
"=m"(*p)
284 asm volatile(
"lock; cmpxchgl %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
289 # error "Missing implementation for 32-bit atomic operations"
292 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
296 return __sync_fetch_and_add(p,
x);
301 return __sync_fetch_and_or(p,
x);
306 return __sync_fetch_and_and(p,
x);
312 return __sync_fetch_and_add(p,
x);
317 return __sync_fetch_and_or(p,
x);
322 return __sync_fetch_and_and(p,
x);
326 # error "Missing implementation for 32-bit atomic operations"
331 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_2))
336 return __sync_fetch_and_and(p, b);
340 return __sync_fetch_and_or(p, b);
344 # error "Missing implementation for 16-bit atomic operations"
349 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_1))
353 return __sync_fetch_and_and(p, b);
357 return __sync_fetch_and_or(p, b);
363 return __sync_fetch_and_and(p, b);
367 return __sync_fetch_and_or(p, b);
371 # error "Missing implementation for 8-bit atomic operations"
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_or_int16(int16_t *p, int16_t b)
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_and_int16(int16_t *p, int16_t b)
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned __int64 uint64_t