1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

gc: fix msvc not using libatomic_ops (#15418)

This commit is contained in:
Emily Hudson
2022-08-14 11:16:52 +01:00
committed by GitHub
parent 8f98f1db9e
commit 90d9b200f9
78 changed files with 18859 additions and 2 deletions

View File

@@ -0,0 +1,282 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2013-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* As of clang-5.0 (and gcc-5.4), __atomic_thread_fence is always */
/* translated to DMB (which is inefficient for AO_nop_write). */
/* TODO: Update it for newer Clang and GCC releases. */
#if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(AO_THREAD_SANITIZER) \
&& !defined(AO_UNIPROCESSOR)
AO_INLINE void
AO_nop_write(void)
{
__asm__ __volatile__("dmb ishst" : : : "memory");
}
# define AO_HAVE_nop_write
#endif
/* There were some bugs in the older clang releases (related to */
/* optimization of functions dealing with __int128 values, supposedly), */
/* so even asm-based implementation did not work correctly. */
#if !defined(__clang__) || AO_CLANG_PREREQ(3, 9)
# include "../standard_ao_double_t.h"
/* As of gcc-5.4, all built-in load/store and CAS atomics for double */
/* word require -latomic, are not lock-free and cause test_stack */
/* failure, so the asm-based implementation is used for now. */
/* TODO: Update it for newer GCC releases. */
#if (!defined(__ILP32__) && !defined(__clang__)) \
|| defined(AO_AARCH64_ASM_LOAD_STORE_CAS)
# ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
int status;
/* Note that STXP cannot be discarded because LD[A]XP is not */
/* single-copy atomic (unlike LDREXD for 32-bit ARM). */
do {
__asm__ __volatile__("//AO_double_load\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stxp %w2, %w0, %w1, %3"
# else
" ldxp %0, %1, %3\n"
" stxp %w2, %0, %1, %3"
# endif
: "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
: "Q" (*addr));
} while (AO_EXPECT_FALSE(status));
return result;
}
# define AO_HAVE_double_load
AO_INLINE AO_double_t
AO_double_load_acquire(const volatile AO_double_t *addr)
{
AO_double_t result;
int status;
do {
__asm__ __volatile__("//AO_double_load_acquire\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %3\n"
" stxp %w2, %w0, %w1, %3"
# else
" ldaxp %0, %1, %3\n"
" stxp %w2, %0, %1, %3"
# endif
: "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
: "Q" (*addr));
} while (AO_EXPECT_FALSE(status));
return result;
}
# define AO_HAVE_double_load_acquire
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t value)
{
AO_double_t old_val;
int status;
do {
__asm__ __volatile__("//AO_double_store\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stxp %w2, %w4, %w5, %3"
# else
" ldxp %0, %1, %3\n"
" stxp %w2, %4, %5, %3"
# endif
: "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
"=Q" (*addr)
: "r" (value.AO_val1), "r" (value.AO_val2));
/* Compared to the arm.h implementation, the 'cc' (flags) are */
/* not clobbered because A64 has no concept of conditional */
/* execution. */
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store
AO_INLINE void
AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
{
AO_double_t old_val;
int status;
do {
__asm__ __volatile__("//AO_double_store_release\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stlxp %w2, %w4, %w5, %3"
# else
" ldxp %0, %1, %3\n"
" stlxp %w2, %4, %5, %3"
# endif
: "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
"=Q" (*addr)
: "r" (value.AO_val1), "r" (value.AO_val2));
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store_release
# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %2\n"
# else
" ldxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stxp %w0, %w2, %w3, %1\n"
# else
" stxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap
AO_INLINE int
AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_acquire\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %2\n"
# else
" ldaxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stxp %w0, %w2, %w3, %1\n"
# else
" stxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_acquire
AO_INLINE int
AO_double_compare_and_swap_release(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_release\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %2\n"
# else
" ldxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stlxp %w0, %w2, %w3, %1\n"
# else
" stlxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_release
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_full\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %2\n"
# else
" ldaxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stlxp %w0, %w2, %w3, %1\n"
# else
" stlxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_full
#endif /* !__ILP32__ && !__clang__ || AO_AARCH64_ASM_LOAD_STORE_CAS */
/* As of clang-5.0 and gcc-8.1, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 */
/* macro is still missing (while the double-word CAS is available). */
# ifndef __ILP32__
# define AO_GCC_HAVE_double_SYNC_CAS
# endif
#endif /* !__clang__ || AO_CLANG_PREREQ(3, 9) */
#if (defined(__clang__) && !AO_CLANG_PREREQ(3, 8)) || defined(__APPLE_CC__)
/* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are missing. */
# define AO_GCC_FORCE_HAVE_CAS
#endif
#include "generic.h"
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_GCC_HAVE_double_SYNC_CAS

View File

@@ -0,0 +1,67 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#include "../loadstore/atomic_load.h"
#include "../loadstore/atomic_store.h"
#include "../test_and_set_t_is_ao_t.h"
#define AO_NO_DD_ORDERING
/* Data dependence does not imply read ordering. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mb" : : : "memory");
}
#define AO_HAVE_nop_full
AO_INLINE void
AO_nop_write(void)
{
__asm__ __volatile__("wmb" : : : "memory");
}
#define AO_HAVE_nop_write
/* mb should be used for AO_nop_read(). That's the default. */
/* TODO: implement AO_fetch_and_add explicitly. */
/* We believe that ldq_l ... stq_c does not imply any memory barrier. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
unsigned long was_equal;
unsigned long temp;
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" cmpeq %0,%4,%2\n"
" mov %3,%0\n"
" beq %2,2f\n"
" stq_c %0,%1\n"
" beq %0,1b\n"
"2:\n"
: "=&r" (temp), "+m" (*addr), "=&r" (was_equal)
: "r" (new_val), "Ir" (old)
:"memory");
return (int)was_equal;
}
#define AO_HAVE_compare_and_swap
/* TODO: implement AO_fetch_compare_and_swap */

View File

@@ -0,0 +1,742 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2008-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 5)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
# define AO_GCC_ATOMIC_TEST_AND_SET
#endif
#ifdef __native_client__
/* Mask instruction should immediately precede access instruction. */
# define AO_MASK_PTR(reg) " bical " reg ", " reg ", #0xc0000000\n"
# define AO_BR_ALIGN " .align 4\n"
#else
# define AO_MASK_PTR(reg) /* empty */
# define AO_BR_ALIGN /* empty */
#endif
#if defined(__thumb__) && !defined(__thumb2__)
/* Thumb One mode does not have ARM "mcr", "swp" and some load/store */
/* instructions, so we temporarily switch to ARM mode and go back */
/* afterwards (clobbering "r3" register). */
# define AO_THUMB_GO_ARM \
" adr r3, 4f\n" \
" bx r3\n" \
" .align\n" \
" .arm\n" \
AO_BR_ALIGN \
"4:\n"
# define AO_THUMB_RESTORE_MODE \
" adr r3, 5f + 1\n" \
" bx r3\n" \
" .thumb\n" \
AO_BR_ALIGN \
"5:\n"
# define AO_THUMB_SWITCH_CLOBBERS "r3",
#else
# define AO_THUMB_GO_ARM /* empty */
# define AO_THUMB_RESTORE_MODE /* empty */
# define AO_THUMB_SWITCH_CLOBBERS /* empty */
#endif /* !__thumb__ */
/* NEC LE-IT: gcc has no way to easily check the arm architecture */
/* but it defines only one (or several) of __ARM_ARCH_x__ to be true. */
#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_3__) \
&& !defined(__ARM_ARCH_3M__) && !defined(__ARM_ARCH_4__) \
&& !defined(__ARM_ARCH_4T__) \
&& ((!defined(__ARM_ARCH_5__) && !defined(__ARM_ARCH_5E__) \
&& !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__) \
&& !defined(__ARM_ARCH_5TEJ__) && !defined(__ARM_ARCH_6M__)) \
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_8A__))
# define AO_ARM_HAVE_LDREX
# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
&& !defined(__ARM_ARCH_6T2__)
/* LDREXB/STREXB and LDREXH/STREXH are present in ARMv6K/Z+. */
# define AO_ARM_HAVE_LDREXBH
# endif
# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
&& !defined(__ARM_ARCH_6T2__) && !defined(__ARM_ARCH_6Z__) \
&& !defined(__ARM_ARCH_6ZT2__)
# if !defined(__ARM_ARCH_6K__) && !defined(__ARM_ARCH_6KZ__) \
&& !defined(__ARM_ARCH_6ZK__)
/* DMB is present in ARMv6M and ARMv7+. */
# define AO_ARM_HAVE_DMB
# endif
# if (!defined(__thumb__) \
|| (defined(__thumb2__) && !defined(__ARM_ARCH_7__) \
&& !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__))) \
&& (!defined(__clang__) || AO_CLANG_PREREQ(3, 3))
/* LDREXD/STREXD present in ARMv6K/M+ (see gas/config/tc-arm.c). */
/* In the Thumb mode, this works only starting from ARMv7 (except */
/* for the base and 'M' models). Clang3.2 (and earlier) does not */
/* allocate register pairs for LDREXD/STREXD properly (besides, */
/* Clang3.1 does not support "%H<r>" operand specification). */
# define AO_ARM_HAVE_LDREXD
# endif /* !thumb || ARMv7A || ARMv7R+ */
# endif /* ARMv7+ */
#endif /* ARMv6+ */
#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__) \
&& !defined(__ARM_ARCH_8A__) && !defined(__thumb2__)
# define AO_ARM_HAVE_SWP
/* Note: ARMv6M is excluded due to no ARM mode support. */
/* Also, SWP is obsoleted for ARMv8+. */
#endif /* !__thumb2__ */
#if !defined(AO_UNIPROCESSOR) && defined(AO_ARM_HAVE_DMB) \
&& !defined(AO_PREFER_BUILTIN_ATOMICS)
AO_INLINE void
AO_nop_write(void)
{
/* AO_THUMB_GO_ARM is empty. */
/* This will target the system domain and thus be overly */
/* conservative as the CPUs (even in case of big.LITTLE SoC) will */
/* occupy the inner shareable domain. */
/* The plain variant (dmb st) is theoretically slower, and should */
/* not be needed. That said, with limited experimentation, a CPU */
/* implementation for which it actually matters has not been found */
/* yet, though they should already exist. */
/* Anyway, note that the "st" and "ishst" barriers are actually */
/* quite weak and, as the libatomic_ops documentation states, */
/* usually not what you really want. */
__asm__ __volatile__("dmb ishst" : : : "memory");
}
# define AO_HAVE_nop_write
#endif /* AO_ARM_HAVE_DMB */
#ifndef AO_GCC_ATOMIC_TEST_AND_SET
#ifdef AO_UNIPROCESSOR
/* If only a single processor (core) is used, AO_UNIPROCESSOR could */
/* be defined by the client to avoid unnecessary memory barrier. */
AO_INLINE void
AO_nop_full(void)
{
AO_compiler_barrier();
}
# define AO_HAVE_nop_full
#elif defined(AO_ARM_HAVE_DMB)
/* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */
/* a memory barrier (DMB). Raising it via CP15 should still work */
/* (but slightly less efficient because it requires the use of */
/* a general-purpose register). */
AO_INLINE void
AO_nop_full(void)
{
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("dmb" : : : "memory");
}
# define AO_HAVE_nop_full
#elif defined(AO_ARM_HAVE_LDREX)
/* ARMv6 is the first architecture providing support for a simple */
/* LL/SC. A data memory barrier must be raised via CP15 command. */
AO_INLINE void
AO_nop_full(void)
{
unsigned dest = 0;
/* Issue a data memory barrier (keeps ordering of memory */
/* transactions before and after this operation). */
__asm__ __volatile__("@AO_nop_full\n"
AO_THUMB_GO_ARM
" mcr p15,0,%0,c7,c10,5\n"
AO_THUMB_RESTORE_MODE
: "=&r"(dest)
: /* empty */
: AO_THUMB_SWITCH_CLOBBERS "memory");
}
# define AO_HAVE_nop_full
#else
/* AO_nop_full() is emulated using AO_test_and_set_full(). */
#endif /* !AO_UNIPROCESSOR && !AO_ARM_HAVE_LDREX */
#endif /* !AO_GCC_ATOMIC_TEST_AND_SET */
#ifdef AO_ARM_HAVE_LDREX
/* "ARM Architecture Reference Manual" (chapter A3.5.3) says that the */
/* single-copy atomic processor accesses are all byte accesses, all */
/* halfword accesses to halfword-aligned locations, all word accesses */
/* to word-aligned locations. */
/* There is only a single concern related to AO store operations: */
/* a direct write (by STR[B/H] instruction) will not be recognized */
/* by the LL/SC construct on the same CPU (i.e., according to ARM */
/* documentation, e.g., see CortexA8 TRM reference, point 8.5, */
/* atomic "store" (using LDREX/STREX[B/H]) is the only safe way to */
/* set variables also used in LL/SC environment). */
/* This is only a problem if interrupt handlers do not clear the */
/* reservation (by CLREX instruction or a dummy STREX one), as they */
/* almost certainly should (e.g., see restore_user_regs defined in */
/* arch/arm/kernel/entry-header.S of Linux. Nonetheless, there is */
/* a doubt this was properly implemented in some ancient OS releases. */
# ifdef AO_BROKEN_TASKSWITCH_CLREX
# define AO_SKIPATOMIC_store
# define AO_SKIPATOMIC_store_release
# define AO_SKIPATOMIC_char_store
# define AO_SKIPATOMIC_char_store_release
# define AO_SKIPATOMIC_short_store
# define AO_SKIPATOMIC_short_store_release
# define AO_SKIPATOMIC_int_store
# define AO_SKIPATOMIC_int_store_release
# ifndef AO_PREFER_BUILTIN_ATOMICS
AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
{
int flag;
__asm__ __volatile__("@AO_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrex %0, [%2]\n"
AO_MASK_PTR("%2")
" strex %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_store
# ifdef AO_ARM_HAVE_LDREXBH
AO_INLINE void AO_char_store(volatile unsigned char *addr,
unsigned char value)
{
int flag;
__asm__ __volatile__("@AO_char_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrexb %0, [%2]\n"
AO_MASK_PTR("%2")
" strexb %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_char_store
AO_INLINE void AO_short_store(volatile unsigned short *addr,
unsigned short value)
{
int flag;
__asm__ __volatile__("@AO_short_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrexh %0, [%2]\n"
AO_MASK_PTR("%2")
" strexh %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_short_store
# endif /* AO_ARM_HAVE_LDREXBH */
# endif /* !AO_PREFER_BUILTIN_ATOMICS */
# elif !defined(AO_GCC_ATOMIC_TEST_AND_SET)
# include "../loadstore/atomic_store.h"
/* AO_int_store is defined in ao_t_is_int.h. */
# endif /* !AO_BROKEN_TASKSWITCH_CLREX */
#endif /* AO_ARM_HAVE_LDREX */
#ifndef AO_GCC_ATOMIC_TEST_AND_SET
# include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
#ifdef AO_ARM_HAVE_LDREX
/* AO_t/char/short/int load is simple reading. */
/* Unaligned accesses are not guaranteed to be atomic. */
# define AO_ACCESS_CHECK_ALIGNED
# define AO_ACCESS_short_CHECK_ALIGNED
# define AO_ACCESS_int_CHECK_ALIGNED
# include "../all_atomic_only_load.h"
# ifndef AO_HAVE_char_store
# include "../loadstore/char_atomic_store.h"
# include "../loadstore/short_atomic_store.h"
# endif
/* NEC LE-IT: replace the SWAP as recommended by ARM:
"Applies to: ARM11 Cores
Though the SWP instruction will still work with ARM V6 cores, it is
recommended to use the new V6 synchronization instructions. The SWP
instruction produces 'locked' read and write accesses which are atomic,
i.e. another operation cannot be done between these locked accesses which
ties up external bus (AHB, AXI) bandwidth and can increase worst case
interrupt latencies. LDREX, STREX are more flexible, other instructions
can be done between the LDREX and STREX accesses."
*/
#ifndef AO_PREFER_GENERALIZED
#if !defined(AO_FORCE_USE_SWP) || !defined(AO_ARM_HAVE_SWP)
/* But, on the other hand, there could be a considerable performance */
/* degradation in case of a race. Eg., test_atomic.c executing */
/* test_and_set test on a dual-core ARMv7 processor using LDREX/STREX */
/* showed around 35 times lower performance than that using SWP. */
/* To force use of SWP instruction, use -D AO_FORCE_USE_SWP option */
/* (the latter is ignored if SWP instruction is unsupported). */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
AO_TS_VAL_t oldval;
int flag;
__asm__ __volatile__("@AO_test_and_set\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%3")
" ldrex %0, [%3]\n"
AO_MASK_PTR("%3")
" strex %1, %4, [%3]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(oldval), "=&r"(flag), "+m"(*addr)
: "r"(addr), "r"(1)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return oldval;
}
# define AO_HAVE_test_and_set
#endif /* !AO_FORCE_USE_SWP */
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *p, AO_t incr)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrex %0, [%5]\n" /* get original */
" add %2, %0, %4\n" /* sum up in incr */
AO_MASK_PTR("%5")
" strex %1, %2, [%5]\n" /* store them */
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(flag), "=&r"(tmp), "+m"(*p) /* 0..3 */
: "r"(incr), "r"(p) /* 4..5 */
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_t
AO_fetch_and_add1(volatile AO_t *p)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_add1\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n" /* get original */
" add %1, %0, #1\n" /* increment */
AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
: "r"(p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_add1
AO_INLINE AO_t
AO_fetch_and_sub1(volatile AO_t *p)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_sub1\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n" /* get original */
" sub %1, %0, #1\n" /* decrement */
AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
: "r"(p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_sub1
AO_INLINE void
AO_and(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_and\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" and %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_and
AO_INLINE void
AO_or(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_or\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" orr %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_or
AO_INLINE void
AO_xor(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_xor\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" eor %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_xor
#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_ARM_HAVE_LDREXBH
AO_INLINE unsigned char
AO_char_fetch_and_add(volatile unsigned char *p, unsigned char incr)
{
unsigned result, tmp;
int flag;
__asm__ __volatile__("@AO_char_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrexb %0, [%5]\n"
" add %2, %0, %4\n"
AO_MASK_PTR("%5")
" strexb %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
: "r" ((unsigned)incr), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return (unsigned char)result;
}
# define AO_HAVE_char_fetch_and_add
AO_INLINE unsigned short
AO_short_fetch_and_add(volatile unsigned short *p, unsigned short incr)
{
unsigned result, tmp;
int flag;
__asm__ __volatile__("@AO_short_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrexh %0, [%5]\n"
" add %2, %0, %4\n"
AO_MASK_PTR("%5")
" strexh %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
: "r" ((unsigned)incr), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return (unsigned short)result;
}
# define AO_HAVE_short_fetch_and_add
#endif /* AO_ARM_HAVE_LDREXBH */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t result, tmp;
__asm__ __volatile__("@AO_compare_and_swap\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
AO_MASK_PTR("%3")
# ifdef __thumb2__
/* TODO: Eliminate warning: it blocks containing wide Thumb */
/* instructions are deprecated in ARMv8. */
" it eq\n"
# endif
" strexeq %0, %5, [%3]\n" /* store new one if matched */
" teq %0, #1\n"
" beq 1b\n" /* if update failed, repeat */
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "+m"(*addr)
: "r"(addr), "r"(old_val), "r"(new_val)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return !(result&2); /* if succeeded then return 1 else 0 */
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val;
int flag;
__asm__ __volatile__("@AO_fetch_compare_and_swap\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
AO_MASK_PTR("%3")
# ifdef __thumb2__
" it eq\n"
# endif
" strexeq %0, %5, [%3]\n" /* store new one if matched */
" teq %0, #1\n"
" beq 1b\n" /* if update failed, repeat */
AO_THUMB_RESTORE_MODE
: "=&r"(flag), "=&r"(fetched_val), "+m"(*addr)
: "r"(addr), "r"(old_val), "r"(new_val)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
#ifdef AO_ARM_HAVE_LDREXD
# include "../standard_ao_double_t.h"
/* "ARM Architecture Reference Manual ARMv7-A/R edition" (chapter */
/* A3.5.3) says that memory accesses caused by LDREXD and STREXD */
/* instructions to doubleword-aligned locations are single-copy */
/* atomic; accesses to 64-bit elements by other instructions might */
/* not be single-copy atomic as they are executed as a sequence of */
/* 32-bit accesses. */
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_load\n"
AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]"
: "=&r" (result.AO_whole)
: "r" (addr)
/* : no clobber */);
return result;
}
# define AO_HAVE_double_load
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
{
AO_double_t old_val;
int status;
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_store\n"
AO_MASK_PTR("%3")
" ldrexd %0, %H0, [%3]\n"
AO_MASK_PTR("%3")
" strexd %1, %4, %H4, [%3]"
: "=&r" (old_val.AO_whole), "=&r" (status), "+m" (*addr)
: "r" (addr), "r" (new_val.AO_whole)
: "cc");
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
double_ptr_storage tmp;
int result = 1;
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_compare_and_swap\n"
AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]\n" /* get original to r1 & r2 */
: "=&r"(tmp)
: "r"(addr)
/* : no clobber */);
if (tmp != old_val.AO_whole)
break;
__asm__ __volatile__(
AO_MASK_PTR("%2")
" strexd %0, %3, %H3, [%2]\n" /* store new one if matched */
: "=&r"(result), "+m"(*addr)
: "r" (addr), "r" (new_val.AO_whole)
: "cc");
} while (AO_EXPECT_FALSE(result));
return !result; /* if succeeded then return 1 else 0 */
}
# define AO_HAVE_double_compare_and_swap
#endif /* AO_ARM_HAVE_LDREXD */
#else
/* pre ARMv6 architectures ... */
/* I found a slide set that, if I read it correctly, claims that */
/* Loads followed by either a Load or Store are ordered, but nothing */
/* else is. */
/* It appears that SWP is the only simple memory barrier. */
#include "../all_aligned_atomic_load_store.h"
/* The code should run correctly on a multi-core ARMv6+ as well. */
#endif /* !AO_ARM_HAVE_LDREX */
#if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \
&& defined (AO_ARM_HAVE_SWP) && (!defined(AO_PREFER_GENERALIZED) \
|| !defined(AO_HAVE_fetch_compare_and_swap))
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
AO_TS_VAL_t oldval;
/* SWP on ARM is very similar to XCHG on x86. */
/* The first operand is the result, the second the value */
/* to be stored. Both registers must be different from addr. */
/* Make the address operand an early clobber output so it */
/* doesn't overlap with the other operands. The early clobber */
/* on oldval is necessary to prevent the compiler allocating */
/* them to the same register if they are both unused. */
__asm__ __volatile__("@AO_test_and_set_full\n"
AO_THUMB_GO_ARM
AO_MASK_PTR("%3")
" swp %0, %2, [%3]\n"
/* Ignore GCC "SWP is deprecated for this architecture" */
/* warning here (for ARMv6+). */
AO_THUMB_RESTORE_MODE
: "=&r"(oldval), "=&r"(addr)
: "r"(1), "1"(addr)
: AO_THUMB_SWITCH_CLOBBERS "memory");
return oldval;
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */
#define AO_T_IS_INT
#else /* AO_GCC_ATOMIC_TEST_AND_SET */
# if defined(__clang__) && !defined(AO_ARM_HAVE_LDREX)
/* As of clang-3.8, it cannot compile __atomic_and/or/xor_fetch */
/* library calls yet for pre ARMv6. */
# define AO_SKIPATOMIC_ANY_and_ANY
# define AO_SKIPATOMIC_ANY_or_ANY
# define AO_SKIPATOMIC_ANY_xor_ANY
# endif
# ifdef AO_ARM_HAVE_LDREXD
# include "../standard_ao_double_t.h"
# endif
# include "generic.h"
#endif /* AO_GCC_ATOMIC_TEST_AND_SET */
#undef AO_ARM_HAVE_DMB
#undef AO_ARM_HAVE_LDREX
#undef AO_ARM_HAVE_LDREXBH
#undef AO_ARM_HAVE_LDREXD
#undef AO_ARM_HAVE_SWP
#undef AO_BR_ALIGN
#undef AO_MASK_PTR
#undef AO_SKIPATOMIC_ANY_and_ANY
#undef AO_SKIPATOMIC_ANY_or_ANY
#undef AO_SKIPATOMIC_ANY_xor_ANY
#undef AO_SKIPATOMIC_char_store
#undef AO_SKIPATOMIC_char_store_release
#undef AO_SKIPATOMIC_int_store
#undef AO_SKIPATOMIC_int_store_release
#undef AO_SKIPATOMIC_short_store
#undef AO_SKIPATOMIC_short_store_release
#undef AO_SKIPATOMIC_store
#undef AO_SKIPATOMIC_store_release
#undef AO_THUMB_GO_ARM
#undef AO_THUMB_RESTORE_MODE
#undef AO_THUMB_SWITCH_CLOBBERS

View File

@@ -0,0 +1,71 @@
/*
* Copyright (C) 2009 Bradley Smith <brad@brad-smith.co.uk>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "../all_atomic_load_store.h"
#include "../ordered.h" /* There are no multiprocessor implementations. */
#include "../test_and_set_t_is_ao_t.h"
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
register long ret;
__asm__ __volatile__(
"xchg %[oldval], %[mem], %[newval]"
: [oldval] "=&r"(ret)
: [mem] "r"(addr), [newval] "r"(1)
: "memory");
return (AO_TS_VAL_t)ret;
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register long ret;
__asm__ __volatile__(
"1: ssrf 5\n"
" ld.w %[res], %[mem]\n"
" eor %[res], %[oldval]\n"
" brne 2f\n"
" stcond %[mem], %[newval]\n"
" brne 1b\n"
"2:\n"
: [res] "=&r"(ret), [mem] "=m"(*addr)
: "m"(*addr), [newval] "r"(new_val), [oldval] "r"(old)
: "cc", "memory");
return (int)ret;
}
#define AO_HAVE_compare_and_swap_full
/* TODO: implement AO_fetch_compare_and_swap. */
#define AO_T_IS_INT

View File

@@ -0,0 +1,65 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* FIXME: seems to be untested. */
#include "../all_atomic_load_store.h"
#include "../ordered.h" /* There are no multiprocessor implementations. */
#include "../test_and_set_t_is_ao_t.h"
/*
* The architecture apparently supports an "f" flag which is
* set on preemption. This essentially gives us load-locked,
* store-conditional primitives, though I'm not quite sure how
* this would work on a hypothetical multiprocessor. -HB
*
* For details, see
* http://developer.axis.com/doc/hardware/etrax100lx/prog_man/
* 1_architectural_description.pdf
*
* TODO: Presumably many other primitives (notably CAS, including the double-
* width versions) could be implemented in this manner, if someone got
* around to it.
*/
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
/* Ripped from linuxthreads/sysdeps/cris/pt-machine.h */
register unsigned long int ret;
/* Note the use of a dummy output of *addr to expose the write. The
memory barrier is to stop *other* writes being moved past this code. */
__asm__ __volatile__("clearf\n"
"0:\n\t"
"movu.b [%2],%0\n\t"
"ax\n\t"
"move.b %3,[%2]\n\t"
"bwf 0b\n\t"
"clearf"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" ((int) 1), "m" (*addr)
: "memory");
return ret;
}
#define AO_HAVE_test_and_set_full

View File

@@ -0,0 +1,864 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_char_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_xor
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_short_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_xor
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_int_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_xor
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_xor
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_char_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_xor_acquire
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_short_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_xor_acquire
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_int_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_xor_acquire
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_xor_acquire
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_char_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_xor_release
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_short_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_xor_release
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_int_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_xor_release
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_xor_release
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_char_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_xor_full
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_xor_full
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_int_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_xor_full
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_xor_full
#endif
#endif /* !AO_NO_ARITHM */

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_XSIZE_ARITHM
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_XBAR(volatile XCTYPE *addr, XCTYPE incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_XGCCBAR);
}
#define AO_HAVE_XSIZE_fetch_and_add_XBAR
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_XSIZE_and_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_and_XBAR
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_XSIZE_or_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_or_XBAR
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_XSIZE_xor_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_xor_XBAR
#endif
#endif /* !AO_NO_XSIZE_ARITHM */

View File

@@ -0,0 +1,632 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_char_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned/**/char
AO_char_load(const volatile unsigned/**/char *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_char_load
AO_INLINE unsigned/**/char
AO_char_load_acquire(const volatile unsigned/**/char *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_char_load_acquire
/* char_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* char_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* char_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_char_store
AO_INLINE void
AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_store
#endif
#ifndef AO_SKIPATOMIC_char_store_release
AO_INLINE void
AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_store_release
#endif
#endif /* !AO_GCC_HAVE_char_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_char_SYNC_CAS
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_release(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_full(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_char_compare_and_swap(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_compare_and_swap
AO_INLINE int
AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
AO_char_compare_and_swap_release(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
AO_char_compare_and_swap_full(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_char_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_char_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_short_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned/**/short
AO_short_load(const volatile unsigned/**/short *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_short_load
AO_INLINE unsigned/**/short
AO_short_load_acquire(const volatile unsigned/**/short *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_short_load_acquire
/* short_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* short_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* short_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_short_store
AO_INLINE void
AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_store
#endif
#ifndef AO_SKIPATOMIC_short_store_release
AO_INLINE void
AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_store_release
#endif
#endif /* !AO_GCC_HAVE_short_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_short_SYNC_CAS
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_release(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_release
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_full(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_short_compare_and_swap(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_compare_and_swap
AO_INLINE int
AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
AO_short_compare_and_swap_release(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_short_compare_and_swap_release
AO_INLINE int
AO_short_compare_and_swap_full(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_short_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_short_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_int_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned
AO_int_load(const volatile unsigned *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_int_load
AO_INLINE unsigned
AO_int_load_acquire(const volatile unsigned *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_int_load_acquire
/* int_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* int_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* int_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_int_store
AO_INLINE void
AO_int_store(volatile unsigned *addr, unsigned value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_store
#endif
#ifndef AO_SKIPATOMIC_int_store_release
AO_INLINE void
AO_int_store_release(volatile unsigned *addr, unsigned value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_store_release
#endif
#endif /* !AO_GCC_HAVE_int_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_int_SYNC_CAS
AO_INLINE unsigned
AO_int_fetch_compare_and_swap(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_acquire(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_acquire
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_release(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_release
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_full(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_int_compare_and_swap(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_compare_and_swap
AO_INLINE int
AO_int_compare_and_swap_acquire(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_compare_and_swap_acquire
AO_INLINE int
AO_int_compare_and_swap_release(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_int_compare_and_swap_release
AO_INLINE int
AO_int_compare_and_swap_full(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_int_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_int_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_load
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_load_acquire
/* load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_store
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_store
#endif
#ifndef AO_SKIPATOMIC_store_release
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_store_release
#endif
#endif /* !AO_GCC_HAVE_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_SYNC_CAS
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_compare_and_swap
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_SYNC_CAS */

View File

@@ -0,0 +1,158 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_XSIZE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE XCTYPE
AO_XSIZE_load(const volatile XCTYPE *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_XSIZE_load
AO_INLINE XCTYPE
AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_XSIZE_load_acquire
/* XSIZE_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* XSIZE_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* XSIZE_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_XSIZE_store
AO_INLINE void
AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_XSIZE_store
#endif
#ifndef AO_SKIPATOMIC_XSIZE_store_release
AO_INLINE void
AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_XSIZE_store_release
#endif
#endif /* !AO_GCC_HAVE_XSIZE_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_XSIZE_SYNC_CAS
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_release(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_full(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_XSIZE_compare_and_swap(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_XSIZE_compare_and_swap
AO_INLINE int
AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_XSIZE_compare_and_swap_acquire
AO_INLINE int
AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_XSIZE_compare_and_swap_release
AO_INLINE int
AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_XSIZE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_XSIZE_SYNC_CAS */

View File

@@ -0,0 +1,239 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2013-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* The following implementation assumes GCC 4.7 or later. */
/* For the details, see GNU Manual, chapter 6.52 (Built-in functions */
/* for memory model aware atomic operations). */
#define AO_GCC_ATOMIC_TEST_AND_SET
#include "../test_and_set_t_is_char.h"
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_char_SYNC_CAS
#endif
#if (__SIZEOF_SHORT__ == 2 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_short_SYNC_CAS
#endif
#if (__SIZEOF_INT__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \
|| (__SIZEOF_INT__ == 8 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_int_SYNC_CAS
#endif
#if (__SIZEOF_SIZE_T__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \
|| (__SIZEOF_SIZE_T__ == 8 \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_SYNC_CAS
#endif
#undef AO_compiler_barrier
#define AO_compiler_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
#ifdef AO_UNIPROCESSOR
/* If only a single processor (core) is used, AO_UNIPROCESSOR could */
/* be defined by the client to avoid unnecessary memory barrier. */
AO_INLINE void
AO_nop_full(void)
{
AO_compiler_barrier();
}
# define AO_HAVE_nop_full
#else
AO_INLINE void
AO_nop_read(void)
{
__atomic_thread_fence(__ATOMIC_ACQUIRE);
}
# define AO_HAVE_nop_read
# ifndef AO_HAVE_nop_write
AO_INLINE void
AO_nop_write(void)
{
__atomic_thread_fence(__ATOMIC_RELEASE);
}
# define AO_HAVE_nop_write
# endif
AO_INLINE void
AO_nop_full(void)
{
/* __sync_synchronize() could be used instead. */
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
# define AO_HAVE_nop_full
#endif /* !AO_UNIPROCESSOR */
#include "generic-small.h"
#ifndef AO_PREFER_GENERALIZED
# include "generic-arithm.h"
# define AO_CLEAR(addr) __atomic_clear(addr, __ATOMIC_RELEASE)
# define AO_HAVE_CLEAR
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED);
}
# define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE);
}
# define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
# if ((__SIZEOF_SIZE_T__ == 4 \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| (__SIZEOF_SIZE_T__ == 8 /* half of AO_double_t */ \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16))) \
&& !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY)
# define AO_GCC_HAVE_double_SYNC_CAS
# endif
# if !defined(AO_GCC_HAVE_double_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
# if !defined(AO_HAVE_double_load) && !defined(AO_SKIPATOMIC_double_load)
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED);
return result;
}
# define AO_HAVE_double_load
# endif
# if !defined(AO_HAVE_double_load_acquire) \
&& !defined(AO_SKIPATOMIC_double_load_acquire)
AO_INLINE AO_double_t
AO_double_load_acquire(const volatile AO_double_t *addr)
{
AO_double_t result;
result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE);
return result;
}
# define AO_HAVE_double_load_acquire
# endif
# if !defined(AO_HAVE_double_store) && !defined(AO_SKIPATOMIC_double_store)
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t value)
{
__atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED);
}
# define AO_HAVE_double_store
# endif
# if !defined(AO_HAVE_double_store_release) \
&& !defined(AO_SKIPATOMIC_double_store_release)
AO_INLINE void
AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
{
__atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE);
}
# define AO_HAVE_double_store_release
# endif
#endif /* !AO_GCC_HAVE_double_SYNC_CAS || !AO_PREFER_GENERALIZED */
#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
#ifdef AO_GCC_HAVE_double_SYNC_CAS
# ifndef AO_HAVE_double_compare_and_swap
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole /* p_expected */,
new_val.AO_whole /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_double_compare_and_swap
# endif
# ifndef AO_HAVE_double_compare_and_swap_acquire
AO_INLINE int
AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
AO_double_t old_val,
AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_double_compare_and_swap_acquire
# endif
# ifndef AO_HAVE_double_compare_and_swap_release
AO_INLINE int
AO_double_compare_and_swap_release(volatile AO_double_t *addr,
AO_double_t old_val,
AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_double_compare_and_swap_release
# endif
# ifndef AO_HAVE_double_compare_and_swap_full
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_double_compare_and_swap_full
# endif
#endif /* AO_GCC_HAVE_double_SYNC_CAS */

View File

@@ -0,0 +1,140 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#if AO_CLANG_PREREQ(3, 9) && !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled for earlier clang versions as well. */
/* As of clang-3.9, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n are missing. */
# define AO_GCC_FORCE_HAVE_CAS
# define AO_GCC_HAVE_double_SYNC_CAS
# include "../standard_ao_double_t.h"
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_ao_t.h"
/* There's also "isync" and "barrier"; however, for all current CPU */
/* versions, "syncht" should suffice. Likewise, it seems that the */
/* auto-defined versions of *_acquire, *_release or *_full suffice for */
/* all current ISA implementations. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("syncht" : : : "memory");
}
#define AO_HAVE_nop_full
/* The Hexagon has load-locked, store-conditional primitives, and so */
/* resulting code is very nearly identical to that of PowerPC. */
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%3);\n" /* load and reserve */
" %1 = add (%0,%4);\n" /* increment */
" memw_locked(%3,p1) = %1;\n" /* store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval), "+m"(*addr)
: "r"(addr), "r"(incr)
: "memory", "p1");
return oldval;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
int oldval;
int locked_value = 1;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%2);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,#0);\n" /* if load is not zero, */
" if (!p2.new) jump:nt 2f;\n" /* we are done */
" }\n"
" memw_locked(%2,p1) = %3;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval), "+m"(*addr)
: "r"(addr), "r"(locked_value)
: "memory", "p1", "p2");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t __oldval;
int result = 0;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%3);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,%4);\n" /* if load is not equal to */
" if (!p2.new) jump:nt 2f;\n" /* old, fail */
" }\n"
" memw_locked(%3,p1) = %5;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
" %1 = #1\n" /* success, result = 1 */
"2:\n"
: "=&r" (__oldval), "+r" (result), "+m"(*addr)
: "r" (addr), "r" (old), "r" (new_val)
: "p1", "p2", "memory"
);
return result;
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t __oldval;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%2);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,%3);\n" /* if load is not equal to */
" if (!p2.new) jump:nt 2f;\n" /* old_val, fail */
" }\n"
" memw_locked(%2,p1) = %4;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
"2:\n"
: "=&r" (__oldval), "+m"(*addr)
: "r" (addr), "r" (old_val), "r" (new_val)
: "p1", "p2", "memory"
);
return __oldval;
}
#define AO_HAVE_fetch_compare_and_swap
#define AO_T_IS_INT
#endif /* AO_DISABLE_GCC_ATOMICS */
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_GCC_HAVE_double_SYNC_CAS

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_atomic_load_store.h"
/* Some architecture set descriptions include special "ordered" memory */
/* operations. As far as we can tell, no existing processors actually */
/* require those. Nor does it appear likely that future processors */
/* will. */
#include "../ordered.h"
/* GCC will not guarantee the alignment we need, use four lock words */
/* and select the correctly aligned datum. See the glibc 2.3.2 */
/* linuxthread port for the original implementation. */
struct AO_pa_clearable_loc {
int data[4];
};
#undef AO_TS_INITIALIZER
#define AO_TS_t struct AO_pa_clearable_loc
#define AO_TS_INITIALIZER { { 1, 1, 1, 1 } }
/* Switch meaning of set and clear, since we only have an atomic clear */
/* instruction. */
typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
#define AO_TS_VAL_t AO_PA_TS_val
#define AO_TS_CLEAR AO_PA_TS_clear
#define AO_TS_SET AO_PA_TS_set
/* The hppa only has one atomic read and modify memory operation, */
/* load and clear, so hppa spinlocks must use zero to signify that */
/* someone is holding the lock. The address used for the ldcw */
/* semaphore must be 16-byte aligned. */
#define AO_ldcw(a, ret) \
__asm__ __volatile__("ldcw 0(%2), %0" \
: "=r" (ret), "=m" (*(a)) : "r" (a))
/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
/* be assured of 16-byte alignment for atomic lock data even if we */
/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
/* we use a struct containing an array of four ints for the atomic lock */
/* type and dynamically select the 16-byte aligned int from the array */
/* for the semaphore. */
#define AO_PA_LDCW_ALIGNMENT 16
#define AO_ldcw_align(addr) \
((volatile unsigned *)(((unsigned long)(addr) \
+ (AO_PA_LDCW_ALIGNMENT - 1)) \
& ~(AO_PA_LDCW_ALIGNMENT - 1)))
/* Works on PA 1.1 and PA 2.0 systems */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t * addr)
{
volatile unsigned int ret;
volatile unsigned *a = AO_ldcw_align(addr);
AO_ldcw(a, ret);
return (AO_TS_VAL_t)ret;
}
#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
{
volatile unsigned *a = AO_ldcw_align(addr);
AO_compiler_barrier();
*a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
#define AO_HAVE_CLEAR
#undef AO_PA_LDCW_ALIGNMENT
#undef AO_ldcw
#undef AO_ldcw_align

View File

@@ -0,0 +1,287 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_atomic_load_store.h"
#include "../all_acquire_release_volatile.h"
#include "../test_and_set_t_is_char.h"
#ifdef _ILP32
/* 32-bit HP/UX code. */
/* This requires pointer "swizzling". Pointers need to be expanded */
/* to 64 bits using the addp4 instruction before use. This makes it */
/* hard to share code, but we try anyway. */
# define AO_LEN "4"
/* We assume that addr always appears in argument position 1 in asm */
/* code. If it is clobbered due to swizzling, we also need it in */
/* second position. Any later arguments are referenced symbolically, */
/* so that we don't have to worry about their position. This requires*/
/* gcc 3.1, but you shouldn't be using anything older than that on */
/* IA64 anyway. */
/* The AO_MASK macro is a workaround for the fact that HP/UX gcc */
/* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */
/* doesn't appear to clear high bits in a pointer value we pass into */
/* assembly code, even if it is supposedly of type AO_t. */
# define AO_IN_ADDR "1"(addr)
# define AO_OUT_ADDR , "=r"(addr)
# define AO_SWIZZLE "addp4 %1=0,%1;;\n"
# define AO_MASK(ptr) __asm__ __volatile__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr))
#else
# define AO_LEN "8"
# define AO_IN_ADDR "r"(addr)
# define AO_OUT_ADDR
# define AO_SWIZZLE
# define AO_MASK(ptr) /* empty */
#endif /* !_ILP32 */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mf" : : : "memory");
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_acquire
AO_INLINE AO_t
AO_fetch_and_add1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
AO_fetch_and_sub1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
AO_fetch_and_sub1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_release
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t fetched_val;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t fetched_val;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_short_fetch_compare_and_swap_release
#ifdef _ILP32
# define AO_T_IS_INT
/* TODO: Add compare_double_and_swap_double for the _ILP32 case. */
#else
# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.acq %0=[%1],1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_add1_release(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.rel %0=[%1],1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add1_release
AO_INLINE unsigned int
AO_int_fetch_and_sub1_acquire(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.acq %0=[%1],-1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_sub1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_sub1_release(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.rel %0=[%1],-1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_sub1_release
# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int fetched_val;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv"
: "=r"(fetched_val)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
: "memory");
return fetched_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_acquire
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int fetched_val;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv"
: "=r"(fetched_val)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
: "memory");
return fetched_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_release
#endif /* !_ILP32 */
/* TODO: Add compare_and_swap_double as soon as there is widely */
/* available hardware that implements it. */
#undef AO_IN_ADDR
#undef AO_LEN
#undef AO_MASK
#undef AO_OUT_ADDR
#undef AO_SWIZZLE

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* The cas instruction causes an emulation trap for the */
/* 060 with a misaligned pointer, so let's avoid this. */
#undef AO_t
typedef unsigned long AO_t __attribute__((__aligned__(4)));
/* FIXME. Very incomplete. */
#include "../all_aligned_atomic_load_store.h"
/* Are there any m68k multiprocessors still around? */
/* AFAIK, Alliants were sequentially consistent. */
#include "../ordered.h"
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_t oldval;
/* The value at addr is semi-phony. */
/* 'tas' sets bit 7 while the return */
/* value pretends all bits were set, */
/* which at least matches AO_TS_SET. */
__asm__ __volatile__(
"tas %1; sne %0"
: "=d" (oldval), "=m" (*addr)
: "m" (*addr)
: "memory");
/* This cast works due to the above. */
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
char result;
__asm__ __volatile__(
"cas.l %3,%4,%1; seq %0"
: "=d" (result), "=m" (*addr)
: "m" (*addr), "d" (old), "d" (new_val)
: "memory");
return -result;
}
#define AO_HAVE_compare_and_swap_full
/* TODO: implement AO_fetch_compare_and_swap. */
#define AO_T_IS_INT

View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2005,2007 Thiemo Seufer <ths@networkno.de>
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/*
* FIXME: This should probably make finer distinctions. SGI MIPS is
* much more strongly ordered, and in fact closer to sequentially
* consistent. This is really aimed at modern embedded implementations.
*/
/* Data dependence does not imply read ordering. */
#define AO_NO_DD_ORDERING
/* #include "../standard_ao_double_t.h" */
/* TODO: Implement double-wide operations if available. */
#if (AO_GNUC_PREREQ(4, 9) || AO_CLANG_PREREQ(3, 5)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
/* As of clang-3.6/mips[64], __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n missing. */
# if defined(__clang__)
# define AO_GCC_FORCE_HAVE_CAS
# endif
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
# include "../test_and_set_t_is_ao_t.h"
# include "../all_aligned_atomic_load_store.h"
# if !defined(_ABI64) || _MIPS_SIM != _ABI64
# define AO_T_IS_INT
# if __mips_isa_rev >= 6
/* Encoding of ll/sc in mips rel6 differs from that of mips2/3. */
# define AO_MIPS_SET_ISA ""
# else
# define AO_MIPS_SET_ISA " .set mips2\n"
# endif
# define AO_MIPS_LL_1(args) " ll " args "\n"
# define AO_MIPS_SC(args) " sc " args "\n"
# else
# if __mips_isa_rev >= 6
# define AO_MIPS_SET_ISA ""
# else
# define AO_MIPS_SET_ISA " .set mips3\n"
# endif
# define AO_MIPS_LL_1(args) " lld " args "\n"
# define AO_MIPS_SC(args) " scd " args "\n"
# endif /* _MIPS_SIM == _ABI64 */
#ifdef AO_ICE9A1_LLSC_WAR
/* ICE9 rev A1 chip (used in very few systems) is reported to */
/* have a low-frequency bug that causes LL to fail. */
/* To workaround, just issue the second 'LL'. */
# define AO_MIPS_LL(args) AO_MIPS_LL_1(args) AO_MIPS_LL_1(args)
#else
# define AO_MIPS_LL(args) AO_MIPS_LL_1(args)
#endif
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
" sync\n"
" .set pop"
: : : "memory");
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
register int result;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" addu %1, %0, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop"
: "=&r" (result), "=&r" (temp), "+m" (*addr)
: "Ir" (incr)
: "memory");
return (AO_t)result;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
register int oldval;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" move %1, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop"
: "=&r" (oldval), "=&r" (temp), "+m" (*addr)
: "r" (1)
: "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
/* TODO: Implement AO_and/or/xor primitives directly. */
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register int was_equal = 0;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %1")
" bne %0, %4, 2f\n"
" move %0, %3\n"
AO_MIPS_SC("%0, %1")
" .set pop\n"
" beqz %0, 1b\n"
" li %2, 1\n"
"2:"
: "=&r" (temp), "+m" (*addr), "+r" (was_equal)
: "r" (new_val), "r" (old)
: "memory");
return was_equal;
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register int fetched_val;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" bne %0, %4, 2f\n"
" move %1, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop\n"
"2:"
: "=&r" (fetched_val), "=&r" (temp), "+m" (*addr)
: "r" (new_val), "Jr" (old)
: "memory");
return (AO_t)fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
#endif /* AO_DISABLE_GCC_ATOMICS */
/* CAS primitives with acquire, release and full semantics are */
/* generated automatically (and AO_int_... primitives are */
/* defined properly after the first generalization pass). */
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_MIPS_LL
#undef AO_MIPS_LL_1
#undef AO_MIPS_SC
#undef AO_MIPS_SET_ISA

View File

@@ -0,0 +1,348 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* Memory model documented at http://www-106.ibm.com/developerworks/ */
/* eserver/articles/archguide.html and (clearer) */
/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
/* There appears to be no implicit ordering between any kind of */
/* independent memory references. */
/* TODO: Implement double-wide operations if available. */
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 8)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
/* TODO: As of clang-3.8.1, it emits lwsync in AO_load_acquire */
/* (i.e., the code is less efficient than the one given below). */
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
/* Architecture enforces some ordering based on control dependence. */
/* I don't know if that could help. */
/* Data-dependent loads are always ordered. */
/* Based on the above references, eieio is intended for use on */
/* uncached memory, which we don't support. It does not order loads */
/* from cached memory. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_ao_t.h"
/* There seems to be no byte equivalent of lwarx, so this */
/* may really be what we want, at least in the 32-bit case. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("sync" : : : "memory");
}
#define AO_HAVE_nop_full
/* lwsync apparently works for everything but a StoreLoad barrier. */
AO_INLINE void
AO_lwsync(void)
{
#ifdef __NO_LWSYNC__
__asm__ __volatile__("sync" : : : "memory");
#else
__asm__ __volatile__("lwsync" : : : "memory");
#endif
}
#define AO_nop_write() AO_lwsync()
#define AO_HAVE_nop_write
#define AO_nop_read() AO_lwsync()
#define AO_HAVE_nop_read
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* ppc64 uses ld not lwz */
# define AO_PPC_LD "ld"
# define AO_PPC_LxARX "ldarx"
# define AO_PPC_CMPx "cmpd"
# define AO_PPC_STxCXd "stdcx."
# define AO_PPC_LOAD_CLOBBER "cr0"
#else
# define AO_PPC_LD "lwz"
# define AO_PPC_LxARX "lwarx"
# define AO_PPC_CMPx "cmpw"
# define AO_PPC_STxCXd "stwcx."
# define AO_PPC_LOAD_CLOBBER "cc"
/* FIXME: We should get gcc to allocate one of the condition */
/* registers. I always got "impossible constraint" when I */
/* tried the "y" constraint. */
# define AO_T_IS_INT
#endif
#ifdef _AIX
/* Labels are not supported on AIX. */
/* ppc64 has same size of instructions as 32-bit one. */
# define AO_PPC_L(label) /* empty */
# define AO_PPC_BR_A(labelBF, addr) addr
#else
# define AO_PPC_L(label) label ": "
# define AO_PPC_BR_A(labelBF, addr) labelBF
#endif
/* We explicitly specify load_acquire, since it is important, and can */
/* be implemented relatively cheaply. It could be implemented */
/* with an ordinary load followed by a lwsync. But the general wisdom */
/* seems to be that a data dependent branch followed by an isync is */
/* cheaper. And the documentation is fairly explicit that this also */
/* has acquire semantics. */
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (
AO_PPC_LD "%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
"bne- " AO_PPC_BR_A("1f", "$+4") "\n"
AO_PPC_L("1") "isync\n"
: "=r" (result)
: "m"(*addr) : "memory", AO_PPC_LOAD_CLOBBER);
return result;
}
#define AO_HAVE_load_acquire
/* We explicitly specify store_release, since it relies */
/* on the fact that lwsync is also a LoadStore barrier. */
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
AO_lwsync();
*addr = value;
}
#define AO_HAVE_store_release
#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
/* TODO: And we should be using smaller objects anyway. */
AO_t oldval;
AO_t temp = 1; /* locked value */
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n"
/* load and reserve */
AO_PPC_CMPx "i %0, 0\n" /* if load is */
"bne " AO_PPC_BR_A("2f", "$+12") "\n"
/* non-zero, return already set */
AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
AO_PPC_L("2") "\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_TS_VAL_t result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t result;
AO_lwsync();
result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t oldval;
int result = 0;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */
AO_PPC_CMPx " %0, %4\n" /* if load is not equal to */
"bne " AO_PPC_BR_A("2f", "$+16") "\n" /* old, fail */
AO_PPC_STxCXd " %3,0,%2\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
"li %1,1\n" /* result = 1; */
AO_PPC_L("2") "\n"
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
return result;
}
# define AO_HAVE_compare_and_swap
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
{
int result = AO_compare_and_swap(addr, old, new_val);
AO_lwsync();
return result;
}
# define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
# define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
int result;
AO_lwsync();
result = AO_compare_and_swap(addr, old, new_val);
if (result)
AO_lwsync();
return result;
}
# define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n" /* load and reserve */
AO_PPC_CMPx " %0, %3\n" /* if load is not equal to */
"bne " AO_PPC_BR_A("2f", "$+12") "\n" /* old_val, fail */
AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
AO_PPC_L("2") "\n"
: "=&r"(fetched_val)
: "r"(addr), "r"(new_val), "r"(old_val)
: "memory", "cr0");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_lwsync();
return AO_fetch_compare_and_swap(addr, old_val, new_val);
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result;
AO_lwsync();
result = AO_fetch_compare_and_swap(addr, old_val, new_val);
if (result == old_val)
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
AO_PPC_STxCXd " %1,0,%2\n" /* store conditional */
"bne- " AO_PPC_BR_A("1b", "$-12") "\n"
/* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
return oldval;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
AO_t result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_acquire
AO_INLINE AO_t
AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
AO_lwsync();
return AO_fetch_and_add(addr, incr);
}
#define AO_HAVE_fetch_and_add_release
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
AO_t result;
AO_lwsync();
result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
#undef AO_PPC_BR_A
#undef AO_PPC_CMPx
#undef AO_PPC_L
#undef AO_PPC_LD
#undef AO_PPC_LOAD_CLOBBER
#undef AO_PPC_LxARX
#undef AO_PPC_STxCXd
#endif /* AO_DISABLE_GCC_ATOMICS */

View File

@@ -0,0 +1,32 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#if defined(__clang__) || defined(AO_PREFER_BUILTIN_ATOMICS)
/* All __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are still missing. */
/* The operations are lock-free even for the types smaller than word. */
# define AO_GCC_FORCE_HAVE_CAS
#else
/* As of gcc-7.5, CAS and arithmetic atomic operations for char and */
/* short are supported by the compiler but require -latomic flag. */
# if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
# define AO_NO_char_ARITHM
# endif
# if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
# define AO_NO_short_ARITHM
# endif
#endif /* !__clang__ */
#include "generic.h"
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_NO_char_ARITHM
#undef AO_NO_short_ARITHM

View File

@@ -0,0 +1,92 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if (AO_GNUC_PREREQ(5, 4) || AO_CLANG_PREREQ(8, 0)) && defined(__s390x__) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled for earlier clang/gcc versions. */
/* But, e.g., clang-3.8.0 produces a backend error for AtomicFence. */
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
/* The relevant documentation appears to be at */
/* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */
/* around page 5-96. Apparently: */
/* - Memory references in general are atomic only for a single */
/* byte. But it appears that the most common load/store */
/* instructions also guarantee atomicity for aligned */
/* operands of standard types. WE FOOLISHLY ASSUME that */
/* compilers only generate those. If that turns out to be */
/* wrong, we need inline assembly code for AO_load and */
/* AO_store. */
/* - A store followed by a load is unordered since the store */
/* may be delayed. Otherwise everything is ordered. */
/* - There is a hardware compare-and-swap (CS) instruction. */
#include "../all_aligned_atomic_load_store.h"
#include "../ordered_except_wr.h"
#include "../test_and_set_t_is_ao_t.h"
/* TODO: Is there a way to do byte-sized test-and-set? */
/* TODO: AO_nop_full should probably be implemented directly. */
/* It appears that certain BCR instructions have that effect. */
/* Presumably they're cheaper than CS? */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
int retval;
__asm__ __volatile__ (
# ifndef __s390x__
" cs %1,%2,0(%3)\n"
# else
" csg %1,%2,0(%3)\n"
# endif
" ipm %0\n"
" srl %0,28\n"
: "=&d" (retval), "+d" (old)
: "d" (new_val), "a" (addr)
: "cc", "memory");
return retval == 0;
}
#define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
__asm__ __volatile__ (
# ifndef __s390x__
" cs %0,%2,%1\n"
# else
" csg %0,%2,%1\n"
# endif
: "+d" (old), "=Q" (*addr)
: "d" (new_val), "m" (*addr)
: "cc", "memory");
return old;
}
#define AO_HAVE_fetch_compare_and_swap_full
#endif /* AO_DISABLE_GCC_ATOMICS */
/* TODO: Add double-wide operations for 32-bit executables. */

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2009 by Takashi YOSHII. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "../all_atomic_load_store.h"
#include "../ordered.h"
/* sh has tas.b(byte) only */
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
int oldval;
__asm__ __volatile__(
"tas.b @%1; movt %0"
: "=r" (oldval)
: "r" (addr)
: "t", "memory");
return oldval? AO_TS_CLEAR : AO_TS_SET;
}
#define AO_HAVE_test_and_set_full
/* TODO: Very incomplete. */

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* TODO: Very incomplete; Add support for sparc64. */
/* Non-ancient SPARCs provide compare-and-swap (casa). */
#include "../all_atomic_load_store.h"
/* Real SPARC code uses TSO: */
#include "../ordered_except_wr.h"
/* Test_and_set location is just a byte. */
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t oldval;
__asm__ __volatile__("ldstub %1,%0"
: "=r"(oldval), "=m"(*addr)
: "m"(*addr) : "memory");
return oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_NO_SPARC_V9
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t ret;
__asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
# if defined(__arch64__)
"casx [%2],%0,%1\n\t"
# else
"cas [%2],%0,%1\n\t" /* 32-bit version */
# endif
"membar #StoreLoad | #StoreStore\n\t"
"cmp %0,%1\n\t"
"be,a 0f\n\t"
"mov 1,%0\n\t"/* one insn after branch always executed */
"clr %0\n\t"
"0:\n\t"
: "=r" (ret), "+r" (new_val)
: "r" (addr), "0" (old)
: "memory", "cc");
return (int)ret;
}
# define AO_HAVE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
__asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
# if defined(__arch64__)
"casx [%1],%2,%0\n\t"
# else
"cas [%1],%2,%0\n\t" /* 32-bit version */
# endif
"membar #StoreLoad | #StoreStore\n\t"
: "+r" (new_val)
: "r" (addr), "r" (old)
: "memory");
return new_val;
}
#define AO_HAVE_fetch_compare_and_swap_full
#endif /* !AO_NO_SPARC_V9 */
/* TODO: Extend this for SPARC v8 and v9 (V8 also has swap, V9 has CAS, */
/* there are barriers like membar #LoadStore, CASA (32-bit) and */
/* CASXA (64-bit) instructions added in V9). */

View File

@@ -0,0 +1,48 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Minimal support for tile. */
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
# include "../all_atomic_load_store.h"
# include "../test_and_set_t_is_ao_t.h"
AO_INLINE void
AO_nop_full(void)
{
__sync_synchronize();
}
# define AO_HAVE_nop_full
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
return __sync_fetch_and_add(p, incr);
}
# define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
}
# define AO_HAVE_fetch_compare_and_swap_full
#endif /* AO_DISABLE_GCC_ATOMICS */

View File

@@ -0,0 +1,657 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2008-2018 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Some of the machine specific code was borrowed from our GC distribution.
*/
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \
&& !defined(__INTEL_COMPILER) /* TODO: test and enable icc */ \
&& !defined(AO_DISABLE_GCC_ATOMICS)
# define AO_GCC_ATOMIC_TEST_AND_SET
# if defined(__APPLE_CC__)
/* OS X 10.7 clang-425 lacks __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n */
/* predefined macro (unlike e.g. OS X 10.11 clang-703). */
# define AO_GCC_FORCE_HAVE_CAS
# ifdef __x86_64__
# if !AO_CLANG_PREREQ(9, 0) /* < Apple clang-900 */
/* Older Apple clang (e.g., clang-600 based on LLVM 3.5svn) had */
/* some bug in the double word CAS implementation for x64. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# endif
# elif defined(__MACH__)
/* OS X 10.8 lacks __atomic_load/store symbols for arch i386 */
/* (even with a non-Apple clang). */
# ifndef MAC_OS_X_VERSION_MIN_REQUIRED
/* Include this header just to import the version macro. */
# include <AvailabilityMacros.h>
# endif
# if MAC_OS_X_VERSION_MIN_REQUIRED < 1090 /* MAC_OS_X_VERSION_10_9 */
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
# endif /* __i386__ */
# elif defined(__clang__)
# if !defined(__x86_64__)
# if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(__CYGWIN__) \
&& !AO_CLANG_PREREQ(5, 0)
/* At least clang-3.8/i686 (from NDK r11c) required to specify */
/* -latomic in case of a double-word atomic operation use. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif /* !AO_PREFER_BUILTIN_ATOMICS */
# elif !defined(__ILP32__)
# if (!AO_CLANG_PREREQ(3, 5) && !defined(AO_PREFER_BUILTIN_ATOMICS)) \
|| (!AO_CLANG_PREREQ(4, 0) && defined(AO_ADDRESS_SANITIZER)) \
|| defined(AO_THREAD_SANITIZER)
/* clang-3.4/x64 required -latomic. clang-3.9/x64 seems to */
/* pass double-wide arguments to atomic operations incorrectly */
/* in case of ASan/TSan. */
/* TODO: As of clang-4.0, lock-free test_stack fails if TSan. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
# endif /* __x86_64__ */
# elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \
&& !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__)
/* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case */
/* of double-word atomic operations use (but not in case of TSan). */
/* TODO: Revise it for the future gcc-7 releases. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif /* __GNUC__ && !__clang__ */
# ifdef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# define AO_SKIPATOMIC_double_load
# define AO_SKIPATOMIC_double_load_acquire
# define AO_SKIPATOMIC_double_store
# define AO_SKIPATOMIC_double_store_release
# undef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
#else /* AO_DISABLE_GCC_ATOMICS */
/* The following really assume we have a 486 or better. Unfortunately */
/* gcc doesn't define a suitable feature test macro based on command */
/* line options. */
/* We should perhaps test dynamically. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_char.h"
#if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
/* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
# define AO_USE_PENTIUM4_INSTRS
#endif
#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mfence" : : : "memory");
}
# define AO_HAVE_nop_full
#else
/* We could use the cpuid instruction. But that seems to be slower */
/* than the default implementation based on test_and_set_full. Thus */
/* we omit that bit of misinformation here. */
#endif /* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
__asm__ __volatile__ ("lock; xadd %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
{
unsigned char result;
__asm__ __volatile__ ("lock; xaddb %0, %1"
: "=q" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
{
unsigned short result;
__asm__ __volatile__ ("lock; xaddw %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; and %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_and_full
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; or %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_or_full
AO_INLINE void
AO_xor_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; xor %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_xor_full
/* AO_store_full could be implemented directly using "xchg" but it */
/* could be generalized efficiently as an ordinary store accomplished */
/* with AO_nop_full ("mfence" instruction). */
AO_INLINE void
AO_char_and_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; andb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_and_full
AO_INLINE void
AO_char_or_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; orb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_or_full
AO_INLINE void
AO_char_xor_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; xorb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_xor_full
AO_INLINE void
AO_short_and_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; andw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_and_full
AO_INLINE void
AO_short_or_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; orw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_or_full
AO_INLINE void
AO_short_xor_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; xorw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_xor_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
unsigned char oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__ ("xchgb %0, %1"
: "=q" (oldval), "+m" (*addr)
: "0" ((unsigned char)0xff)
: "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return (int)__sync_bool_compare_and_swap(addr, old, new_val
/* empty protection list */);
/* Note: an empty list of variables protected by the */
/* memory barrier should mean all globally accessible */
/* variables are protected. */
# else
char result;
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
AO_t dummy;
__asm__ __volatile__ ("lock; cmpxchg %3, %0"
: "+m" (*addr), "=@ccz" (result), "=a" (dummy)
: "r" (new_val), "a" (old)
: "memory");
# else
__asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
: "+m" (*addr), "=a" (result)
: "r" (new_val), "a" (old)
: "memory");
# endif
return (int)result;
# endif
}
# define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
AO_t fetched_val;
__asm__ __volatile__ ("lock; cmpxchg %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
#define AO_HAVE_fetch_compare_and_swap_full
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned char fetched_val;
__asm__ __volatile__ ("lock; cmpxchgb %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "q" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_char_fetch_compare_and_swap_full
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned short fetched_val;
__asm__ __volatile__ ("lock; cmpxchgw %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# if defined(__x86_64__) && !defined(__ILP32__)
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr,
unsigned int old_val,
unsigned int new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned int fetched_val;
__asm__ __volatile__ ("lock; cmpxchgl %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_int_fetch_compare_and_swap_full
# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
{
unsigned int result;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
AO_int_and_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; andl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_and_full
AO_INLINE void
AO_int_or_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; orl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_or_full
AO_INLINE void
AO_int_xor_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; xorl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_xor_full
# endif /* !AO_PREFER_GENERALIZED */
# else
# define AO_T_IS_INT
# endif /* !x86_64 || ILP32 */
/* Real X86 implementations, except for some old 32-bit WinChips, */
/* appear to enforce ordering between memory operations, EXCEPT that */
/* a later read can pass earlier writes, presumably due to the */
/* visible presence of store buffers. */
/* We ignore both the WinChips and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
# include "../ordered_except_wr.h"
#endif /* AO_DISABLE_GCC_ATOMICS */
#if defined(AO_GCC_ATOMIC_TEST_AND_SET) \
&& !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY)
# if defined(__ILP32__) || !defined(__x86_64__) /* 32-bit AO_t */ \
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) /* 64-bit AO_t */
# include "../standard_ao_double_t.h"
# endif
#elif !defined(__x86_64__) && (!defined(AO_USE_SYNC_CAS_BUILTIN) \
|| defined(AO_GCC_ATOMIC_TEST_AND_SET))
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically on at least a Pentium according to */
/* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */
# ifndef AO_PREFER_GENERALIZED
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
# endif
/* Returns nonzero if the comparison succeeded. */
/* Really requires at least a Pentium. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
# if defined(__PIC__) && !(AO_GNUC_PREREQ(5, 1) || AO_CLANG_PREREQ(4, 0))
AO_t saved_ebx;
AO_t dummy;
/* The following applies to an ancient GCC (and, probably, it was */
/* never needed for Clang): */
/* If PIC is turned on, we cannot use ebx as it is reserved for the */
/* GOT pointer. We should save and restore ebx. The proposed */
/* solution is not so efficient as the older alternatives using */
/* push ebx or edi as new_val1 (w/o clobbering edi and temporary */
/* local variable usage) but it is more portable (it works even if */
/* ebx is not used as GOT pointer, and it works for the buggy GCC */
/* releases that incorrectly evaluate memory operands offset in the */
/* inline assembly after push). */
# ifdef __OPTIMIZE__
__asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */
"lea %0, %%edi\n\t" /* in case addr is in ebx */
"mov %7, %%ebx\n\t" /* load new_val1 */
"lock; cmpxchg8b (%%edi)\n\t"
"mov %2, %%ebx\n\t" /* restore ebx */
"setz %1"
: "+m" (*addr), "=a" (result),
"=m" (saved_ebx), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "m" (new_val1)
: "%edi", "memory");
# else
/* A less-efficient code manually preserving edi if GCC invoked */
/* with -O0 option (otherwise it fails while finding a register */
/* in class 'GENERAL_REGS'). */
AO_t saved_edi;
__asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */
"mov %%ebx, %2\n\t" /* save ebx */
"lea %0, %%edi\n\t" /* in case addr is in ebx */
"mov %8, %%ebx\n\t" /* load new_val1 */
"lock; cmpxchg8b (%%edi)\n\t"
"mov %2, %%ebx\n\t" /* restore ebx */
"mov %3, %%edi\n\t" /* restore edi */
"setz %1"
: "+m" (*addr), "=a" (result),
"=m" (saved_ebx), "=m" (saved_edi), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "m" (new_val1)
: "memory");
# endif
# else
/* For non-PIC mode, this operation could be simplified (and be */
/* faster) by using ebx as new_val1. Reuse of the PIC hard */
/* register, instead of using a fixed register, is implemented */
/* in Clang and GCC 5.1+, at least. (Older GCC refused to compile */
/* such code for PIC mode). */
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
__asm__ __volatile__ ("lock; cmpxchg8b %0"
: "+m" (*addr), "=@ccz" (result),
"+d" (old_val2), "+a" (old_val1)
: "c" (new_val2), "b" (new_val1)
: "memory");
# else
AO_t dummy; /* an output for clobbered edx */
__asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
# endif
# endif
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
#elif defined(__ILP32__) || !defined(__x86_64__)
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically (requires at least a Pentium). */
# ifndef AO_PREFER_GENERALIZED
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
# endif
/* X32 has native support for 64-bit integer operations (AO_double_t */
/* is a 64-bit integer and we could use 64-bit cmpxchg). */
/* This primitive is used by compare_double_and_swap_double_full. */
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
/* It is safe to use __sync CAS built-in here. */
return __sync_bool_compare_and_swap(&addr->AO_whole,
old_val.AO_whole, new_val.AO_whole
/* empty protection list */);
}
# define AO_HAVE_double_compare_and_swap_full
#elif defined(AO_CMPXCHG16B_AVAILABLE) \
|| (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) \
&& !defined(AO_THREAD_SANITIZER))
# include "../standard_ao_double_t.h"
/* The Intel and AMD Architecture Programmer Manuals state roughly */
/* the following: */
/* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */
/* atomic accesses in 64-bit mode (with certain alignment */
/* restrictions); */
/* - SSE instructions that access data larger than a quadword (like */
/* MOVDQA) may be implemented using multiple memory accesses; */
/* - LOCK prefix causes an invalid-opcode exception when used with */
/* 128-bit media (SSE) instructions. */
/* Thus, currently, the only way to implement lock-free double_load */
/* and double_store on x86_64 is to use CMPXCHG16B (if available). */
/* NEC LE-IT: older AMD Opterons are missing this instruction. */
/* On these machines SIGILL will be thrown. */
/* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */
/* based) version available. */
/* HB: Changed this to not define either by default. There are */
/* enough machines and tool chains around on which cmpxchg16b */
/* doesn't work. And the emulation is unsafe by our usual rules. */
/* However both are clearly useful in certain cases. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
__asm__ __volatile__("lock; cmpxchg16b %0"
: "+m" (*addr), "=@ccz" (result),
"+d" (old_val2), "+a" (old_val1)
: "c" (new_val2), "b" (new_val1)
: "memory");
# else
AO_t dummy; /* an output for clobbered rdx */
__asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
# endif
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
#elif defined(AO_WEAK_DOUBLE_CAS_EMULATION)
# include "../standard_ao_double_t.h"
# ifdef __cplusplus
extern "C" {
# endif
/* This one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it */
/* is not atomic with respect to other kinds of updates of *addr. */
/* On the other hand, this may be a useful facility on occasion. */
int AO_compare_double_and_swap_double_emulation(
volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2);
# ifdef __cplusplus
} /* extern "C" */
# endif
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
return AO_compare_double_and_swap_double_emulation(addr,
old_val1, old_val2, new_val1, new_val2);
}
# define AO_HAVE_compare_double_and_swap_double_full
#endif /* x86_64 && !ILP32 && CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */
#ifdef AO_GCC_ATOMIC_TEST_AND_SET
# include "generic.h"
#endif
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_SKIPATOMIC_double_compare_and_swap_ANY
#undef AO_SKIPATOMIC_double_load
#undef AO_SKIPATOMIC_double_load_acquire
#undef AO_SKIPATOMIC_double_store
#undef AO_SKIPATOMIC_double_store_release