From c8ae8f2fb187fc690764e576d5dcc31637b1109c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Verschelde?= Date: Sun, 1 Sep 2019 12:19:44 +0200 Subject: [PATCH] Revert "Add __atomic_* operators support for atomic operations" --- core/safe_refcount.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/safe_refcount.h b/core/safe_refcount.h index 0b65ffb9ca3..54f540b0c72 100644 --- a/core/safe_refcount.h +++ b/core/safe_refcount.h @@ -97,8 +97,8 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v /* Implementation for GCC & Clang */ -#include -#include +// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes. +// Clang states it supports GCC atomic builtins. template static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { @@ -107,7 +107,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { T tmp = static_cast(*pw); if (tmp == 0) return 0; // if zero, can't add to it anymore - if (__atomic_compare_exchange_n(pw, &tmp, tmp + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true) + if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp) return tmp + 1; } } @@ -115,25 +115,25 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { template static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) { - return __atomic_sub_fetch(pw, 1, __ATOMIC_SEQ_CST); + return __sync_sub_and_fetch(pw, 1); } template static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) { - return __atomic_add_fetch(pw, 1, __ATOMIC_SEQ_CST); + return __sync_add_and_fetch(pw, 1); } template static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) { - return __atomic_sub_fetch(pw, val, __ATOMIC_SEQ_CST); + return __sync_sub_and_fetch(pw, val); } template static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) { - return __atomic_add_fetch(pw, val, __ATOMIC_SEQ_CST); + return __sync_add_and_fetch(pw, val); } template @@ -143,7 +143,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v T tmp = static_cast(*pw); if (tmp >= val) return tmp; // already greater, or equal - if (__atomic_compare_exchange_n(pw, &tmp, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true) + if (__sync_val_compare_and_swap(pw, tmp, val) == tmp) return val; } }