1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00

gc: fix msvc not using libatomic_ops (#15418)

This commit is contained in:
Emily Hudson 2022-08-14 11:16:52 +01:00 committed by GitHub
parent 8f98f1db9e
commit 90d9b200f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 18859 additions and 2 deletions

76
thirdparty/libatomic_ops/LICENSE vendored Normal file
View File

@ -0,0 +1,76 @@
MIT License (core library) / GPL-2.0 (gpl extension library)
Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
Copyright (c) 2005, 2007 Thiemo Seufer
Copyright (c) 2007 by NEC LE-IT. All rights reserved.
Copyright (c) 2008-2022 Ivan Maidanski
Copyright (c) 2009 Bradley Smith
Copyright (c) 2009 by Takashi Yoshii. All rights reserved.
Our intent is to make it easy to use libatomic_ops, in both free and
proprietary software. Hence most of code (core library) that we expect to
be linked into a client application is covered by a MIT or MIT-style license.
However, a few library routines (the gpl extension library) are covered by
the GNU General Public License. These are put into a separate library,
libatomic_ops_gpl.a file.
Most of the test code is covered by the GNU General Public License too.
The low-level (core) part of the library (libatomic_ops.a) is mostly covered
by the MIT license:
----------------------------------------
Copyright (c) ...
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------
Some files in the atomic_ops/sysdeps directory (part of core library) were
inherited in part from the Boehm-Demers-Weiser conservative garbage collector,
and are covered by its license, which is similar in spirit to MIT license:
--------------------------------
Copyright (c) ...
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
Permission is hereby granted to use or copy this program
for any purpose, provided the above notices are retained on all copies.
Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
----------------------------------
A few files are covered by the GNU General Public License. (See file
"COPYING".) This applies only to the test code and the atomic_ops_gpl
portion of the library. Thus, atomic_ops_gpl should generally not be
linked into proprietary code. (This distinction was motivated by patent
considerations.)

265
thirdparty/libatomic_ops/atomic_ops.c vendored Normal file
View File

@ -0,0 +1,265 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Initialized data and out-of-line functions to support atomic_ops.h
* go here. Currently this is needed only for pthread-based atomics
* emulation, or for compare-and-swap emulation.
* Pthreads emulation isn't useful on a native Windows platform, and
* cas emulation is not needed. Thus we skip this on Windows.
*/
#if defined(HAVE_CONFIG_H)
# include "config.h"
#endif
#if (defined(__hexagon__) || defined(__native_client__)) \
&& !defined(AO_USE_NO_SIGNALS) && !defined(AO_USE_NANOSLEEP)
/* Hexagon QuRT does not have sigprocmask (but Hexagon does not need */
/* emulation, so it is OK not to bother about signals blocking). */
/* Since NaCl is not recognized by configure yet, we do it here. */
# define AO_USE_NO_SIGNALS
# define AO_USE_NANOSLEEP
#endif
#if defined(AO_USE_WIN32_PTHREADS) && !defined(AO_USE_NO_SIGNALS)
# define AO_USE_NO_SIGNALS
#endif
#if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \
&& !defined(AO_USE_NO_SIGNALS) && !defined(_GNU_SOURCE)
# define _GNU_SOURCE 1
#endif
#undef AO_REQUIRE_CAS
#include "atomic_ops.h" /* Without cas emulation! */
#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__BORLANDC__) \
|| defined(AO_USE_NO_SIGNALS)
#ifndef AO_NO_PTHREADS
# include <pthread.h>
#endif
#ifndef AO_USE_NO_SIGNALS
# include <signal.h>
#endif
#ifdef AO_USE_NANOSLEEP
/* This requires _POSIX_TIMERS feature. */
# include <sys/time.h>
# include <time.h>
#elif defined(AO_USE_WIN32_PTHREADS)
# include <windows.h> /* for Sleep() */
#elif defined(_HPUX_SOURCE)
# include <sys/time.h>
#else
# include <sys/select.h>
#endif
#ifndef AO_HAVE_double_t
# include "atomic_ops/sysdeps/standard_ao_double_t.h"
#endif
/* Lock for pthreads-based implementation. */
#ifndef AO_NO_PTHREADS
pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
/*
* Out of line compare-and-swap emulation based on test and set.
*
* We use a small table of locks for different compare_and_swap locations.
* Before we update perform a compare-and-swap, we grab the corresponding
* lock. Different locations may hash to the same lock, but since we
* never acquire more than one lock at a time, this can't deadlock.
* We explicitly disable signals while we perform this operation.
*
* TODO: Probably also support emulation based on Lamport
* locks, since we may not have test_and_set either.
*/
#define AO_HASH_SIZE 16
#define AO_HASH(x) (((unsigned long)(x) >> 12) & (AO_HASH_SIZE-1))
static AO_TS_t AO_locks[AO_HASH_SIZE] = {
AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER,
};
void AO_pause(int); /* defined below */
static void lock_ool(volatile AO_TS_t *l)
{
int i = 0;
while (AO_test_and_set_acquire(l) == AO_TS_SET)
AO_pause(++i);
}
AO_INLINE void lock(volatile AO_TS_t *l)
{
if (AO_EXPECT_FALSE(AO_test_and_set_acquire(l) == AO_TS_SET))
lock_ool(l);
}
AO_INLINE void unlock(volatile AO_TS_t *l)
{
AO_CLEAR(l);
}
#ifndef AO_USE_NO_SIGNALS
static sigset_t all_sigs;
static volatile AO_t initialized = 0;
static volatile AO_TS_t init_lock = AO_TS_INITIALIZER;
AO_INLINE void block_all_signals(sigset_t *old_sigs_ptr)
{
if (AO_EXPECT_FALSE(!AO_load_acquire(&initialized)))
{
lock(&init_lock);
if (!initialized)
sigfillset(&all_sigs);
unlock(&init_lock);
AO_store_release(&initialized, 1);
}
sigprocmask(SIG_BLOCK, &all_sigs, old_sigs_ptr);
/* Neither sigprocmask nor pthread_sigmask is 100% */
/* guaranteed to work here. Sigprocmask is not */
/* guaranteed be thread safe, and pthread_sigmask */
/* is not async-signal-safe. Under linuxthreads, */
/* sigprocmask may block some pthreads-internal */
/* signals. So long as we do that for short periods, */
/* we should be OK. */
}
#endif /* !AO_USE_NO_SIGNALS */
AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
AO_t fetched_val;
# ifndef AO_USE_NO_SIGNALS
sigset_t old_sigs;
block_all_signals(&old_sigs);
# endif
lock(my_lock);
fetched_val = *addr;
if (fetched_val == old_val)
*addr = new_val;
unlock(my_lock);
# ifndef AO_USE_NO_SIGNALS
sigprocmask(SIG_SETMASK, &old_sigs, NULL);
# endif
return fetched_val;
}
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
int result;
# ifndef AO_USE_NO_SIGNALS
sigset_t old_sigs;
block_all_signals(&old_sigs);
# endif
lock(my_lock);
if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2)
{
addr -> AO_val1 = new_val1;
addr -> AO_val2 = new_val2;
result = 1;
}
else
result = 0;
unlock(my_lock);
# ifndef AO_USE_NO_SIGNALS
sigprocmask(SIG_SETMASK, &old_sigs, NULL);
# endif
return result;
}
void AO_store_full_emulation(volatile AO_t *addr, AO_t val)
{
AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
lock(my_lock);
*addr = val;
unlock(my_lock);
}
#else /* Non-posix platform */
# include <windows.h>
# define AO_USE_WIN32_PTHREADS
/* define to use Sleep() */
extern int AO_non_posix_implementation_is_entirely_in_headers;
#endif
static AO_t spin_dummy = 1;
/* Spin for 2**n units. */
static void AO_spin(int n)
{
AO_t j = AO_load(&spin_dummy);
int i = 2 << n;
while (i-- > 0)
j += (j - 1) << 2;
/* Given 'spin_dummy' is initialized to 1, j is 1 after the loop. */
AO_store(&spin_dummy, j);
}
void AO_pause(int n)
{
if (n < 12)
AO_spin(n);
else
{
# ifdef AO_USE_NANOSLEEP
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = n > 28 ? 100000L * 1000 : 1L << (n - 2);
nanosleep(&ts, 0);
# elif defined(AO_USE_WIN32_PTHREADS)
Sleep(n > 28 ? 100 /* millis */
: n < 22 ? 1 : (DWORD)1 << (n - 22));
# else
struct timeval tv;
/* Short async-signal-safe sleep. */
int usec = n > 28 ? 100000 : 1 << (n - 12);
/* Use an intermediate variable (of int type) to avoid */
/* "shift followed by widening conversion" warning. */
tv.tv_sec = 0;
tv.tv_usec = usec;
(void)select(0, 0, 0, 0, &tv);
# endif
}
}

481
thirdparty/libatomic_ops/atomic_ops.h vendored Normal file
View File

@ -0,0 +1,481 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2008-2021 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef AO_ATOMIC_OPS_H
#define AO_ATOMIC_OPS_H
#include "atomic_ops/ao_version.h"
/* Define version numbers here to allow */
/* test on build machines for cross-builds. */
#include <assert.h>
#include <stddef.h>
/* We define various atomic operations on memory in a */
/* machine-specific way. Unfortunately, this is complicated */
/* by the fact that these may or may not be combined with */
/* various memory barriers. Thus the actual operations we */
/* define have the form AO_<atomic-op>_<barrier>, for all */
/* plausible combinations of <atomic-op> and <barrier>. */
/* This of course results in a mild combinatorial explosion. */
/* To deal with it, we try to generate derived */
/* definitions for as many of the combinations as we can, as */
/* automatically as possible. */
/* */
/* Our assumption throughout is that the programmer will */
/* specify the least demanding operation and memory barrier */
/* that will guarantee correctness for the implementation. */
/* Our job is to find the least expensive way to implement it */
/* on the applicable hardware. In many cases that will */
/* involve, for example, a stronger memory barrier, or a */
/* combination of hardware primitives. */
/* */
/* Conventions: */
/* "plain" atomic operations are not guaranteed to include */
/* a barrier. The suffix in the name specifies the barrier */
/* type. Suffixes are: */
/* _release: Earlier operations may not be delayed past it. */
/* _acquire: Later operations may not move ahead of it. */
/* _read: Subsequent reads must follow this operation and */
/* preceding reads. */
/* _write: Earlier writes precede both this operation and */
/* later writes. */
/* _full: Ordered with respect to both earlier and later memory */
/* operations. */
/* _release_write: Ordered with respect to earlier writes. */
/* _acquire_read: Ordered with respect to later reads. */
/* */
/* Currently we try to define the following atomic memory */
/* operations, in combination with the above barriers: */
/* AO_nop */
/* AO_load */
/* AO_store */
/* AO_test_and_set (binary) */
/* AO_fetch_and_add */
/* AO_fetch_and_add1 */
/* AO_fetch_and_sub1 */
/* AO_and */
/* AO_or */
/* AO_xor */
/* AO_compare_and_swap */
/* AO_fetch_compare_and_swap */
/* */
/* Note that atomicity guarantees are valid only if both */
/* readers and writers use AO_ operations to access the */
/* shared value, while ordering constraints are intended to */
/* apply all memory operations. If a location can potentially */
/* be accessed simultaneously from multiple threads, and one of */
/* those accesses may be a write access, then all such */
/* accesses to that location should be through AO_ primitives. */
/* However if AO_ operations enforce sufficient ordering to */
/* ensure that a location x cannot be accessed concurrently, */
/* or can only be read concurrently, then x can be accessed */
/* via ordinary references and assignments. */
/* */
/* AO_compare_and_swap takes an address and an expected old */
/* value and a new value, and returns an int. Non-zero result */
/* indicates that it succeeded. */
/* AO_fetch_compare_and_swap takes an address and an expected */
/* old value and a new value, and returns the real old value. */
/* The operation succeeded if and only if the expected old */
/* value matches the old value returned. */
/* */
/* Test_and_set takes an address, atomically replaces it by */
/* AO_TS_SET, and returns the prior value. */
/* An AO_TS_t location can be reset with the */
/* AO_CLEAR macro, which normally uses AO_store_release. */
/* AO_fetch_and_add takes an address and an AO_t increment */
/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */
/* are provided, since they allow faster implementations on */
/* some hardware. AO_and, AO_or, AO_xor do atomically and, or, */
/* xor (respectively) an AO_t value into a memory location, */
/* but do not provide access to the original. */
/* */
/* We expect this list to grow slowly over time. */
/* */
/* Note that AO_nop_full is a full memory barrier. */
/* */
/* Note that if some data is initialized with */
/* data.x = ...; data.y = ...; ... */
/* AO_store_release_write(&data_is_initialized, 1) */
/* then data is guaranteed to be initialized after the test */
/* if (AO_load_acquire_read(&data_is_initialized)) ... */
/* succeeds. Furthermore, this should generate near-optimal */
/* code on all common platforms. */
/* */
/* All operations operate on unsigned AO_t, which */
/* is the natural word size, and usually unsigned long. */
/* It is possible to check whether a particular operation op */
/* is available on a particular platform by checking whether */
/* AO_HAVE_op is defined. We make heavy use of these macros */
/* internally. */
/* The rest of this file basically has three sections: */
/* */
/* Some utility and default definitions. */
/* */
/* The architecture dependent section: */
/* This defines atomic operations that have direct hardware */
/* support on a particular platform, mostly by including the */
/* appropriate compiler- and hardware-dependent file. */
/* */
/* The synthesis section: */
/* This tries to define other atomic operations in terms of */
/* those that are explicitly available on the platform. */
/* This section is hardware independent. */
/* We make no attempt to synthesize operations in ways that */
/* effectively introduce locks, except for the debugging/demo */
/* pthread-based implementation at the beginning. A more */
/* realistic implementation that falls back to locks could be */
/* added as a higher layer. But that would sacrifice */
/* usability from signal handlers. */
/* The synthesis section is implemented almost entirely in */
/* atomic_ops/generalize.h. */
/* Some common defaults. Overridden for some architectures. */
#define AO_t size_t
/* The test_and_set primitive returns an AO_TS_VAL_t value. */
/* AO_TS_t is the type of an in-memory test-and-set location. */
#define AO_TS_INITIALIZER ((AO_TS_t)AO_TS_CLEAR)
/* Convenient internal macro to test version of GCC. */
#if defined(__GNUC__) && defined(__GNUC_MINOR__)
# define AO_GNUC_PREREQ(major, minor) \
((__GNUC__ << 16) + __GNUC_MINOR__ >= ((major) << 16) + (minor))
#else
# define AO_GNUC_PREREQ(major, minor) 0 /* false */
#endif
/* Convenient internal macro to test version of Clang. */
#if defined(__clang__) && defined(__clang_major__)
# define AO_CLANG_PREREQ(major, minor) \
((__clang_major__ << 16) + __clang_minor__ >= ((major) << 16) + (minor))
#else
# define AO_CLANG_PREREQ(major, minor) 0 /* false */
#endif
/* Platform-dependent stuff: */
#if (defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER) \
|| defined(__DMC__) || defined(__WATCOMC__)) && !defined(AO_NO_INLINE)
# define AO_INLINE static __inline
#elif defined(__sun) && !defined(AO_NO_INLINE)
# define AO_INLINE static inline
#else
# define AO_INLINE static
#endif
#if AO_GNUC_PREREQ(3, 0) && !defined(LINT2)
# define AO_EXPECT_FALSE(expr) __builtin_expect(expr, 0)
/* Equivalent to (expr) but predict that usually (expr) == 0. */
#else
# define AO_EXPECT_FALSE(expr) (expr)
#endif /* !__GNUC__ */
#if defined(__has_feature)
/* __has_feature() is supported. */
# if __has_feature(address_sanitizer)
# define AO_ADDRESS_SANITIZER
# endif
# if __has_feature(memory_sanitizer)
# define AO_MEMORY_SANITIZER
# endif
# if __has_feature(thread_sanitizer)
# define AO_THREAD_SANITIZER
# endif
#else
# ifdef __SANITIZE_ADDRESS__
/* GCC v4.8+ */
# define AO_ADDRESS_SANITIZER
# endif
#endif /* !__has_feature */
#ifndef AO_ATTR_NO_SANITIZE_MEMORY
# ifndef AO_MEMORY_SANITIZER
# define AO_ATTR_NO_SANITIZE_MEMORY /* empty */
# elif AO_CLANG_PREREQ(3, 8)
# define AO_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory")))
# else
# define AO_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
# endif
#endif /* !AO_ATTR_NO_SANITIZE_MEMORY */
#ifndef AO_ATTR_NO_SANITIZE_THREAD
# ifndef AO_THREAD_SANITIZER
# define AO_ATTR_NO_SANITIZE_THREAD /* empty */
# elif AO_CLANG_PREREQ(3, 8)
# define AO_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread")))
# else
# define AO_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
# endif
#endif /* !AO_ATTR_NO_SANITIZE_THREAD */
#if (AO_GNUC_PREREQ(7, 5) || __STDC_VERSION__ >= 201112L) && !defined(LINT2)
# define AO_ALIGNOF_SUPPORTED 1
#endif
#ifdef AO_ALIGNOF_SUPPORTED
# define AO_ASSERT_ADDR_ALIGNED(addr) \
assert(((size_t)(addr) & (__alignof__(*(addr)) - 1)) == 0)
#else
# define AO_ASSERT_ADDR_ALIGNED(addr) \
assert(((size_t)(addr) & (sizeof(*(addr)) - 1)) == 0)
#endif /* !AO_ALIGNOF_SUPPORTED */
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
#elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
|| defined(__WATCOMC__)
# if defined(_AMD64_) || defined(_M_X64) || _MSC_VER >= 1400
# if defined(_WIN32_WCE)
/* # include <cmnintrin.h> */
# elif defined(_MSC_VER)
# include <intrin.h>
# endif
# pragma intrinsic(_ReadWriteBarrier)
# define AO_compiler_barrier() _ReadWriteBarrier()
/* We assume this does not generate a fence instruction. */
/* The documentation is a bit unclear. */
# else
# define AO_compiler_barrier() __asm { }
/* The preceding implementation may be preferable here too. */
/* But the documentation warns about VC++ 2003 and earlier. */
# endif
#elif defined(__INTEL_COMPILER)
# define AO_compiler_barrier() __memory_barrier()
/* FIXME: Too strong? IA64-only? */
#elif defined(_HPUX_SOURCE)
# if defined(__ia64)
# include <machine/sys/inline.h>
# define AO_compiler_barrier() _Asm_sched_fence()
# else
/* FIXME - We do not know how to do this. This is a guess. */
/* And probably a bad one. */
static volatile int AO_barrier_dummy;
# define AO_compiler_barrier() (void)(AO_barrier_dummy = AO_barrier_dummy)
# endif
#else
/* We conjecture that the following usually gives us the right */
/* semantics or an error. */
# define AO_compiler_barrier() asm("")
#endif
#if defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/generic_pthread.h"
#endif /* AO_USE_PTHREAD_DEFS */
#if (defined(__CC_ARM) || defined(__ARMCC__)) && !defined(__GNUC__) \
&& !defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/armcc/arm_v6.h"
# define AO_GENERALIZE_TWICE
#endif
#if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) \
&& !defined(__INTEL_COMPILER)
# if defined(__i386__)
/* We don't define AO_USE_SYNC_CAS_BUILTIN for x86 here because */
/* it might require specifying additional options (like -march) */
/* or additional link libraries (if -march is not specified). */
# include "atomic_ops/sysdeps/gcc/x86.h"
# elif defined(__x86_64__)
# if AO_GNUC_PREREQ(4, 2) && !defined(AO_USE_SYNC_CAS_BUILTIN)
/* It is safe to use __sync CAS built-in on this architecture. */
# define AO_USE_SYNC_CAS_BUILTIN
# endif
# include "atomic_ops/sysdeps/gcc/x86.h"
# elif defined(__ia64__)
# include "atomic_ops/sysdeps/gcc/ia64.h"
# define AO_GENERALIZE_TWICE
# elif defined(__hppa__)
# include "atomic_ops/sysdeps/gcc/hppa.h"
# define AO_CAN_EMUL_CAS
# elif defined(__alpha__)
# include "atomic_ops/sysdeps/gcc/alpha.h"
# define AO_GENERALIZE_TWICE
# elif defined(__s390__)
# include "atomic_ops/sysdeps/gcc/s390.h"
# elif defined(__sparc__)
# include "atomic_ops/sysdeps/gcc/sparc.h"
# define AO_CAN_EMUL_CAS
# elif defined(__m68k__)
# include "atomic_ops/sysdeps/gcc/m68k.h"
# elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
|| defined(__powerpc64__) || defined(__ppc64__) || defined(_ARCH_PPC)
# include "atomic_ops/sysdeps/gcc/powerpc.h"
# elif defined(__aarch64__)
# include "atomic_ops/sysdeps/gcc/aarch64.h"
# define AO_CAN_EMUL_CAS
# elif defined(__arm__)
# include "atomic_ops/sysdeps/gcc/arm.h"
# define AO_CAN_EMUL_CAS
# elif defined(__cris__) || defined(CRIS)
# include "atomic_ops/sysdeps/gcc/cris.h"
# define AO_CAN_EMUL_CAS
# define AO_GENERALIZE_TWICE
# elif defined(__mips__)
# include "atomic_ops/sysdeps/gcc/mips.h"
# elif defined(__sh__) || defined(SH4)
# include "atomic_ops/sysdeps/gcc/sh.h"
# define AO_CAN_EMUL_CAS
# elif defined(__avr32__)
# include "atomic_ops/sysdeps/gcc/avr32.h"
# elif defined(__hexagon__)
# include "atomic_ops/sysdeps/gcc/hexagon.h"
# elif defined(__nios2__)
# include "atomic_ops/sysdeps/gcc/generic.h"
# define AO_CAN_EMUL_CAS
# elif defined(__riscv)
# include "atomic_ops/sysdeps/gcc/riscv.h"
# elif defined(__tile__)
# include "atomic_ops/sysdeps/gcc/tile.h"
# else /* etc. */
# include "atomic_ops/sysdeps/gcc/generic.h"
# endif
#endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
#if (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__) \
&& !defined(AO_USE_PTHREAD_DEFS)
# if defined(__powerpc__) || defined(__powerpc) || defined(__ppc__) \
|| defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) \
|| defined(_ARCH_PWR)
# include "atomic_ops/sysdeps/ibmc/powerpc.h"
# define AO_GENERALIZE_TWICE
# endif
#endif
#if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__ia64__)
# include "atomic_ops/sysdeps/icc/ia64.h"
# define AO_GENERALIZE_TWICE
# endif
# if defined(__GNUC__)
/* Intel Compiler in GCC compatible mode */
# if defined(__i386__)
# include "atomic_ops/sysdeps/gcc/x86.h"
# endif /* __i386__ */
# if defined(__x86_64__)
# if (__INTEL_COMPILER > 1110) && !defined(AO_USE_SYNC_CAS_BUILTIN)
# define AO_USE_SYNC_CAS_BUILTIN
# endif
# include "atomic_ops/sysdeps/gcc/x86.h"
# endif /* __x86_64__ */
# endif
#endif
#if defined(_HPUX_SOURCE) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__ia64)
# include "atomic_ops/sysdeps/hpc/ia64.h"
# define AO_GENERALIZE_TWICE
# else
# include "atomic_ops/sysdeps/hpc/hppa.h"
# define AO_CAN_EMUL_CAS
# endif
#endif
#if defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
|| (defined(__WATCOMC__) && defined(__NT__))
# if defined(_AMD64_) || defined(_M_X64) || defined(_M_ARM64)
# include "atomic_ops/sysdeps/msftc/x86_64.h"
# elif defined(_M_IX86) || defined(x86)
# include "atomic_ops/sysdeps/msftc/x86.h"
# elif defined(_M_ARM) || defined(ARM) || defined(_ARM_)
# include "atomic_ops/sysdeps/msftc/arm.h"
# define AO_GENERALIZE_TWICE
# endif
#endif
#if defined(__sun) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
/* Note: use -DAO_USE_PTHREAD_DEFS if Sun CC does not handle inline asm. */
# if defined(__i386) || defined(__x86_64) || defined(__amd64)
# include "atomic_ops/sysdeps/sunc/x86.h"
# endif
#endif
#if !defined(__GNUC__) && (defined(sparc) || defined(__sparc)) \
&& !defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/sunc/sparc.h"
# define AO_CAN_EMUL_CAS
#endif
#if (defined(AO_REQUIRE_CAS) && !defined(AO_HAVE_compare_and_swap) \
&& !defined(AO_HAVE_fetch_compare_and_swap) \
&& !defined(AO_HAVE_compare_and_swap_full) \
&& !defined(AO_HAVE_fetch_compare_and_swap_full) \
&& !defined(AO_HAVE_compare_and_swap_acquire) \
&& !defined(AO_HAVE_fetch_compare_and_swap_acquire)) || defined(CPPCHECK)
# if defined(AO_CAN_EMUL_CAS)
# include "atomic_ops/sysdeps/emul_cas.h"
# elif !defined(CPPCHECK)
# error Cannot implement AO_compare_and_swap_full on this architecture.
# endif
#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */
/* The most common way to clear a test-and-set location */
/* at the end of a critical section. */
#if defined(AO_AO_TS_T) && !defined(AO_HAVE_CLEAR)
# define AO_CLEAR(addr) AO_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
# define AO_HAVE_CLEAR
#endif
#if defined(AO_CHAR_TS_T) && !defined(AO_HAVE_CLEAR)
# define AO_CLEAR(addr) AO_char_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
# define AO_HAVE_CLEAR
#endif
/* The generalization section. */
#if !defined(AO_GENERALIZE_TWICE) && defined(AO_CAN_EMUL_CAS) \
&& !defined(AO_HAVE_compare_and_swap_full) \
&& !defined(AO_HAVE_fetch_compare_and_swap_full)
# define AO_GENERALIZE_TWICE
#endif
/* Theoretically we should repeatedly include atomic_ops/generalize.h. */
/* In fact, we observe that this converges after a small fixed number */
/* of iterations, usually one. */
#include "atomic_ops/generalize.h"
#if !defined(AO_GENERALIZE_TWICE) \
&& defined(AO_HAVE_compare_double_and_swap_double) \
&& (!defined(AO_HAVE_double_load) || !defined(AO_HAVE_double_store))
# define AO_GENERALIZE_TWICE
#endif
#ifdef AO_T_IS_INT
/* Included after the first generalization pass. */
# include "atomic_ops/sysdeps/ao_t_is_int.h"
# ifndef AO_GENERALIZE_TWICE
/* Always generalize again. */
# define AO_GENERALIZE_TWICE
# endif
#endif /* AO_T_IS_INT */
#ifdef AO_GENERALIZE_TWICE
# include "atomic_ops/generalize.h"
#endif
/* For compatibility with version 0.4 and earlier */
#define AO_TS_T AO_TS_t
#define AO_T AO_t
#define AO_TS_VAL AO_TS_VAL_t
#endif /* !AO_ATOMIC_OPS_H */

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2011-2018 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef AO_ATOMIC_OPS_H
# error This file should not be included directly.
#endif
/* The policy regarding version numbers: development code has odd */
/* "minor" number (and "micro" part is 0); when development is finished */
/* and a release is prepared, "minor" number is incremented (keeping */
/* "micro" number still zero), whenever a defect is fixed a new release */
/* is prepared incrementing "micro" part to odd value (the most stable */
/* release has the biggest "micro" number). */
/* The version here should match that in configure.ac and README. */
#define AO_VERSION_MAJOR 7
#define AO_VERSION_MINOR 6
#define AO_VERSION_MICRO 12 /* 7.6.12 */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,852 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* XSIZE_compare_and_swap (based on fetch_compare_and_swap) */
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_full)
AO_INLINE int
AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_full
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
AO_INLINE int
AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_acquire
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_release)
AO_INLINE int
AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_release
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_write)
AO_INLINE int
AO_XSIZE_compare_and_swap_write(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_read)
AO_INLINE int
AO_XSIZE_compare_and_swap_read(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap)
AO_INLINE int
AO_XSIZE_compare_and_swap(volatile XCTYPE *addr, XCTYPE old_val,
XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_release_write)
AO_INLINE int
AO_XSIZE_compare_and_swap_release_write(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_release_write(addr, old_val,
new_val) == old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_release_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read)
AO_INLINE int
AO_XSIZE_compare_and_swap_acquire_read(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val,
new_val) == old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read)
AO_INLINE int
AO_XSIZE_compare_and_swap_dd_acquire_read(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr, old_val,
new_val) == old_val;
}
# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
#endif
/* XSIZE_fetch_and_add */
/* We first try to implement fetch_and_add variants in terms of the */
/* corresponding compare_and_swap variants to minimize adding barriers. */
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add_full)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_full(volatile XCTYPE *addr, XCTYPE incr)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
old + incr)));
return old;
}
# define AO_HAVE_XSIZE_fetch_and_add_full
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire(addr, old,
old + incr)));
return old;
}
# define AO_HAVE_XSIZE_fetch_and_add_acquire
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_release) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add_release)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_release(volatile XCTYPE *addr, XCTYPE incr)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old,
old + incr)));
return old;
}
# define AO_HAVE_XSIZE_fetch_and_add_release
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add(volatile XCTYPE *addr, XCTYPE incr)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr, old,
old + incr)));
return old;
}
# define AO_HAVE_XSIZE_fetch_and_add
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_full)
# if !defined(AO_HAVE_XSIZE_fetch_and_add_release)
# define AO_XSIZE_fetch_and_add_release(addr, val) \
AO_XSIZE_fetch_and_add_full(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_release
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
# define AO_XSIZE_fetch_and_add_acquire(addr, val) \
AO_XSIZE_fetch_and_add_full(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_acquire
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add_write)
# define AO_XSIZE_fetch_and_add_write(addr, val) \
AO_XSIZE_fetch_and_add_full(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_write
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add_read)
# define AO_XSIZE_fetch_and_add_read(addr, val) \
AO_XSIZE_fetch_and_add_full(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_read
# endif
#endif /* AO_HAVE_XSIZE_fetch_and_add_full */
#if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr)
{
XCTYPE result = AO_XSIZE_fetch_and_add(addr, incr);
AO_nop_full();
return result;
}
# define AO_HAVE_XSIZE_fetch_and_add_acquire
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add_release)
# define AO_XSIZE_fetch_and_add_release(addr, incr) \
(AO_nop_full(), AO_XSIZE_fetch_and_add(addr, incr))
# define AO_HAVE_XSIZE_fetch_and_add_release
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_release)
# define AO_XSIZE_fetch_and_add(addr, val) \
AO_XSIZE_fetch_and_add_release(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
# define AO_XSIZE_fetch_and_add(addr, val) \
AO_XSIZE_fetch_and_add_acquire(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_write)
# define AO_XSIZE_fetch_and_add(addr, val) \
AO_XSIZE_fetch_and_add_write(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_read)
# define AO_XSIZE_fetch_and_add(addr, val) \
AO_XSIZE_fetch_and_add_read(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
&& defined(AO_HAVE_nop_full) && !defined(AO_HAVE_XSIZE_fetch_and_add_full)
# define AO_XSIZE_fetch_and_add_full(addr, val) \
(AO_nop_full(), AO_XSIZE_fetch_and_add_acquire(addr, val))
# define AO_HAVE_XSIZE_fetch_and_add_full
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_write)
# define AO_XSIZE_fetch_and_add_release_write(addr, val) \
AO_XSIZE_fetch_and_add_write(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_release)
# define AO_XSIZE_fetch_and_add_release_write(addr, val) \
AO_XSIZE_fetch_and_add_release(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_read)
# define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \
AO_XSIZE_fetch_and_add_read(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_add_acquire)
# define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \
AO_XSIZE_fetch_and_add_acquire(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read)
# define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \
AO_XSIZE_fetch_and_add_acquire_read(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_fetch_and_add)
# define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \
AO_XSIZE_fetch_and_add(addr, val)
# define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_fetch_and_add1 */
#if defined(AO_HAVE_XSIZE_fetch_and_add_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_full)
# define AO_XSIZE_fetch_and_add1_full(addr) \
AO_XSIZE_fetch_and_add_full(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_full
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_release) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_release)
# define AO_XSIZE_fetch_and_add1_release(addr) \
AO_XSIZE_fetch_and_add_release(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_release
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
# define AO_XSIZE_fetch_and_add1_acquire(addr) \
AO_XSIZE_fetch_and_add_acquire(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_acquire
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_write) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_write)
# define AO_XSIZE_fetch_and_add1_write(addr) \
AO_XSIZE_fetch_and_add_write(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_read) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_read)
# define AO_XSIZE_fetch_and_add1_read(addr) \
AO_XSIZE_fetch_and_add_read(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write)
# define AO_XSIZE_fetch_and_add1_release_write(addr) \
AO_XSIZE_fetch_and_add_release_write(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_release_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read)
# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
AO_XSIZE_fetch_and_add_acquire_read(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1)
# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add(addr, 1)
# define AO_HAVE_XSIZE_fetch_and_add1
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add1_full)
# if !defined(AO_HAVE_XSIZE_fetch_and_add1_release)
# define AO_XSIZE_fetch_and_add1_release(addr) \
AO_XSIZE_fetch_and_add1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_release
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
# define AO_XSIZE_fetch_and_add1_acquire(addr) \
AO_XSIZE_fetch_and_add1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_acquire
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add1_write)
# define AO_XSIZE_fetch_and_add1_write(addr) \
AO_XSIZE_fetch_and_add1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_write
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_add1_read)
# define AO_XSIZE_fetch_and_add1_read(addr) \
AO_XSIZE_fetch_and_add1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_read
# endif
#endif /* AO_HAVE_XSIZE_fetch_and_add1_full */
#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_release)
# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_release(addr)
# define AO_HAVE_XSIZE_fetch_and_add1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_acquire(addr)
# define AO_HAVE_XSIZE_fetch_and_add1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_write)
# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_write(addr)
# define AO_HAVE_XSIZE_fetch_and_add1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_read)
# define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_read(addr)
# define AO_HAVE_XSIZE_fetch_and_add1
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_add1_full)
# define AO_XSIZE_fetch_and_add1_full(addr) \
(AO_nop_full(), AO_XSIZE_fetch_and_add1_acquire(addr))
# define AO_HAVE_XSIZE_fetch_and_add1_full
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_write)
# define AO_XSIZE_fetch_and_add1_release_write(addr) \
AO_XSIZE_fetch_and_add1_write(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_release)
# define AO_XSIZE_fetch_and_add1_release_write(addr) \
AO_XSIZE_fetch_and_add1_release(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_read)
# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
AO_XSIZE_fetch_and_add1_read(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_add1_acquire)
# define AO_XSIZE_fetch_and_add1_acquire_read(addr) \
AO_XSIZE_fetch_and_add1_acquire(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read)
# define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \
AO_XSIZE_fetch_and_add1_acquire_read(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_fetch_and_add1)
# define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \
AO_XSIZE_fetch_and_add1(addr)
# define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_fetch_and_sub1 */
#if defined(AO_HAVE_XSIZE_fetch_and_add_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
# define AO_XSIZE_fetch_and_sub1_full(addr) \
AO_XSIZE_fetch_and_add_full(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_full
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_release) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
# define AO_XSIZE_fetch_and_sub1_release(addr) \
AO_XSIZE_fetch_and_add_release(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_release
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
# define AO_XSIZE_fetch_and_sub1_acquire(addr) \
AO_XSIZE_fetch_and_add_acquire(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_acquire
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_write) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
# define AO_XSIZE_fetch_and_sub1_write(addr) \
AO_XSIZE_fetch_and_add_write(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_read) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
# define AO_XSIZE_fetch_and_sub1_read(addr) \
AO_XSIZE_fetch_and_add_read(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write)
# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
AO_XSIZE_fetch_and_add_release_write(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read)
# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
AO_XSIZE_fetch_and_add_acquire_read(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_add) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1)
# define AO_XSIZE_fetch_and_sub1(addr) \
AO_XSIZE_fetch_and_add(addr, (XCTYPE)(-1))
# define AO_HAVE_XSIZE_fetch_and_sub1
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
# define AO_XSIZE_fetch_and_sub1_release(addr) \
AO_XSIZE_fetch_and_sub1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_release
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
# define AO_XSIZE_fetch_and_sub1_acquire(addr) \
AO_XSIZE_fetch_and_sub1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_acquire
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
# define AO_XSIZE_fetch_and_sub1_write(addr) \
AO_XSIZE_fetch_and_sub1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_write
# endif
# if !defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
# define AO_XSIZE_fetch_and_sub1_read(addr) \
AO_XSIZE_fetch_and_sub1_full(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_read
# endif
#endif /* AO_HAVE_XSIZE_fetch_and_sub1_full */
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_release(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_acquire(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_write(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
# define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_read(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1
#endif
#if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_and_sub1_full)
# define AO_XSIZE_fetch_and_sub1_full(addr) \
(AO_nop_full(), AO_XSIZE_fetch_and_sub1_acquire(addr))
# define AO_HAVE_XSIZE_fetch_and_sub1_full
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_write)
# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
AO_XSIZE_fetch_and_sub1_write(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_release)
# define AO_XSIZE_fetch_and_sub1_release_write(addr) \
AO_XSIZE_fetch_and_sub1_release(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_read)
# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
AO_XSIZE_fetch_and_sub1_read(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire)
# define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \
AO_XSIZE_fetch_and_sub1_acquire(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read)
# define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \
AO_XSIZE_fetch_and_sub1_acquire_read(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_fetch_and_sub1)
# define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \
AO_XSIZE_fetch_and_sub1(addr)
# define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_and */
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_and_full)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_and_full(volatile XCTYPE *addr, XCTYPE value)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
old & value)));
}
# define AO_HAVE_XSIZE_and_full
#endif
#if defined(AO_HAVE_XSIZE_and_full)
# if !defined(AO_HAVE_XSIZE_and_release)
# define AO_XSIZE_and_release(addr, val) AO_XSIZE_and_full(addr, val)
# define AO_HAVE_XSIZE_and_release
# endif
# if !defined(AO_HAVE_XSIZE_and_acquire)
# define AO_XSIZE_and_acquire(addr, val) AO_XSIZE_and_full(addr, val)
# define AO_HAVE_XSIZE_and_acquire
# endif
# if !defined(AO_HAVE_XSIZE_and_write)
# define AO_XSIZE_and_write(addr, val) AO_XSIZE_and_full(addr, val)
# define AO_HAVE_XSIZE_and_write
# endif
# if !defined(AO_HAVE_XSIZE_and_read)
# define AO_XSIZE_and_read(addr, val) AO_XSIZE_and_full(addr, val)
# define AO_HAVE_XSIZE_and_read
# endif
#endif /* AO_HAVE_XSIZE_and_full */
#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_release)
# define AO_XSIZE_and(addr, val) AO_XSIZE_and_release(addr, val)
# define AO_HAVE_XSIZE_and
#endif
#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_acquire)
# define AO_XSIZE_and(addr, val) AO_XSIZE_and_acquire(addr, val)
# define AO_HAVE_XSIZE_and
#endif
#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_write)
# define AO_XSIZE_and(addr, val) AO_XSIZE_and_write(addr, val)
# define AO_HAVE_XSIZE_and
#endif
#if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_read)
# define AO_XSIZE_and(addr, val) AO_XSIZE_and_read(addr, val)
# define AO_HAVE_XSIZE_and
#endif
#if defined(AO_HAVE_XSIZE_and_acquire) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_and_full)
# define AO_XSIZE_and_full(addr, val) \
(AO_nop_full(), AO_XSIZE_and_acquire(addr, val))
# define AO_HAVE_XSIZE_and_full
#endif
#if !defined(AO_HAVE_XSIZE_and_release_write) \
&& defined(AO_HAVE_XSIZE_and_write)
# define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_write(addr, val)
# define AO_HAVE_XSIZE_and_release_write
#endif
#if !defined(AO_HAVE_XSIZE_and_release_write) \
&& defined(AO_HAVE_XSIZE_and_release)
# define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_release(addr, val)
# define AO_HAVE_XSIZE_and_release_write
#endif
#if !defined(AO_HAVE_XSIZE_and_acquire_read) \
&& defined(AO_HAVE_XSIZE_and_read)
# define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_read(addr, val)
# define AO_HAVE_XSIZE_and_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_and_acquire_read) \
&& defined(AO_HAVE_XSIZE_and_acquire)
# define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_acquire(addr, val)
# define AO_HAVE_XSIZE_and_acquire_read
#endif
/* XSIZE_or */
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_or_full)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_or_full(volatile XCTYPE *addr, XCTYPE value)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
old | value)));
}
# define AO_HAVE_XSIZE_or_full
#endif
#if defined(AO_HAVE_XSIZE_or_full)
# if !defined(AO_HAVE_XSIZE_or_release)
# define AO_XSIZE_or_release(addr, val) AO_XSIZE_or_full(addr, val)
# define AO_HAVE_XSIZE_or_release
# endif
# if !defined(AO_HAVE_XSIZE_or_acquire)
# define AO_XSIZE_or_acquire(addr, val) AO_XSIZE_or_full(addr, val)
# define AO_HAVE_XSIZE_or_acquire
# endif
# if !defined(AO_HAVE_XSIZE_or_write)
# define AO_XSIZE_or_write(addr, val) AO_XSIZE_or_full(addr, val)
# define AO_HAVE_XSIZE_or_write
# endif
# if !defined(AO_HAVE_XSIZE_or_read)
# define AO_XSIZE_or_read(addr, val) AO_XSIZE_or_full(addr, val)
# define AO_HAVE_XSIZE_or_read
# endif
#endif /* AO_HAVE_XSIZE_or_full */
#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_release)
# define AO_XSIZE_or(addr, val) AO_XSIZE_or_release(addr, val)
# define AO_HAVE_XSIZE_or
#endif
#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_acquire)
# define AO_XSIZE_or(addr, val) AO_XSIZE_or_acquire(addr, val)
# define AO_HAVE_XSIZE_or
#endif
#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_write)
# define AO_XSIZE_or(addr, val) AO_XSIZE_or_write(addr, val)
# define AO_HAVE_XSIZE_or
#endif
#if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_read)
# define AO_XSIZE_or(addr, val) AO_XSIZE_or_read(addr, val)
# define AO_HAVE_XSIZE_or
#endif
#if defined(AO_HAVE_XSIZE_or_acquire) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_or_full)
# define AO_XSIZE_or_full(addr, val) \
(AO_nop_full(), AO_XSIZE_or_acquire(addr, val))
# define AO_HAVE_XSIZE_or_full
#endif
#if !defined(AO_HAVE_XSIZE_or_release_write) \
&& defined(AO_HAVE_XSIZE_or_write)
# define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_write(addr, val)
# define AO_HAVE_XSIZE_or_release_write
#endif
#if !defined(AO_HAVE_XSIZE_or_release_write) \
&& defined(AO_HAVE_XSIZE_or_release)
# define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_release(addr, val)
# define AO_HAVE_XSIZE_or_release_write
#endif
#if !defined(AO_HAVE_XSIZE_or_acquire_read) && defined(AO_HAVE_XSIZE_or_read)
# define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_read(addr, val)
# define AO_HAVE_XSIZE_or_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_or_acquire_read) \
&& defined(AO_HAVE_XSIZE_or_acquire)
# define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_acquire(addr, val)
# define AO_HAVE_XSIZE_or_acquire_read
#endif
/* XSIZE_xor */
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_xor_full)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_xor_full(volatile XCTYPE *addr, XCTYPE value)
{
XCTYPE old;
do
{
old = *(XCTYPE *)addr;
}
while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old,
old ^ value)));
}
# define AO_HAVE_XSIZE_xor_full
#endif
#if defined(AO_HAVE_XSIZE_xor_full)
# if !defined(AO_HAVE_XSIZE_xor_release)
# define AO_XSIZE_xor_release(addr, val) AO_XSIZE_xor_full(addr, val)
# define AO_HAVE_XSIZE_xor_release
# endif
# if !defined(AO_HAVE_XSIZE_xor_acquire)
# define AO_XSIZE_xor_acquire(addr, val) AO_XSIZE_xor_full(addr, val)
# define AO_HAVE_XSIZE_xor_acquire
# endif
# if !defined(AO_HAVE_XSIZE_xor_write)
# define AO_XSIZE_xor_write(addr, val) AO_XSIZE_xor_full(addr, val)
# define AO_HAVE_XSIZE_xor_write
# endif
# if !defined(AO_HAVE_XSIZE_xor_read)
# define AO_XSIZE_xor_read(addr, val) AO_XSIZE_xor_full(addr, val)
# define AO_HAVE_XSIZE_xor_read
# endif
#endif /* AO_HAVE_XSIZE_xor_full */
#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_release)
# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_release(addr, val)
# define AO_HAVE_XSIZE_xor
#endif
#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_acquire)
# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_acquire(addr, val)
# define AO_HAVE_XSIZE_xor
#endif
#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_write)
# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_write(addr, val)
# define AO_HAVE_XSIZE_xor
#endif
#if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_read)
# define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_read(addr, val)
# define AO_HAVE_XSIZE_xor
#endif
#if defined(AO_HAVE_XSIZE_xor_acquire) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_xor_full)
# define AO_XSIZE_xor_full(addr, val) \
(AO_nop_full(), AO_XSIZE_xor_acquire(addr, val))
# define AO_HAVE_XSIZE_xor_full
#endif
#if !defined(AO_HAVE_XSIZE_xor_release_write) \
&& defined(AO_HAVE_XSIZE_xor_write)
# define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_write(addr, val)
# define AO_HAVE_XSIZE_xor_release_write
#endif
#if !defined(AO_HAVE_XSIZE_xor_release_write) \
&& defined(AO_HAVE_XSIZE_xor_release)
# define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_release(addr, val)
# define AO_HAVE_XSIZE_xor_release_write
#endif
#if !defined(AO_HAVE_XSIZE_xor_acquire_read) \
&& defined(AO_HAVE_XSIZE_xor_read)
# define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_read(addr, val)
# define AO_HAVE_XSIZE_xor_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_xor_acquire_read) \
&& defined(AO_HAVE_XSIZE_xor_acquire)
# define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_acquire(addr, val)
# define AO_HAVE_XSIZE_xor_acquire_read
#endif
/* XSIZE_and/or/xor_dd_acquire_read are meaningless. */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,528 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* XSIZE_fetch_compare_and_swap */
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
XCTYPE result = AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val);
AO_nop_full();
return result;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
# define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \
(AO_nop_full(), \
AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val))
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full)
# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
# define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
# endif
# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
# define AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
# endif
# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
# define AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_write
# endif
# if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
# define AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_read
# endif
#endif /* AO_HAVE_XSIZE_fetch_compare_and_swap_full */
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
# define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \
AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap
#endif
#if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full)
# define AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) \
(AO_nop_full(), \
AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val))
# define AO_HAVE_XSIZE_fetch_compare_and_swap_full
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write)
# define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release)
# define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read)
# define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \
&& defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire)
# define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read)
# define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_fetch_compare_and_swap)
# define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \
AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val)
# define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_compare_and_swap */
#if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
AO_INLINE int
AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old,
XCTYPE new_val)
{
int result = AO_XSIZE_compare_and_swap(addr, old, new_val);
AO_nop_full();
return result;
}
# define AO_HAVE_XSIZE_compare_and_swap_acquire
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_release)
# define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \
(AO_nop_full(), AO_XSIZE_compare_and_swap(addr, old, new_val))
# define AO_HAVE_XSIZE_compare_and_swap_release
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_full)
# if !defined(AO_HAVE_XSIZE_compare_and_swap_release)
# define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \
AO_XSIZE_compare_and_swap_full(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_release
# endif
# if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
# define AO_XSIZE_compare_and_swap_acquire(addr, old, new_val) \
AO_XSIZE_compare_and_swap_full(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_acquire
# endif
# if !defined(AO_HAVE_XSIZE_compare_and_swap_write)
# define AO_XSIZE_compare_and_swap_write(addr, old, new_val) \
AO_XSIZE_compare_and_swap_full(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_write
# endif
# if !defined(AO_HAVE_XSIZE_compare_and_swap_read)
# define AO_XSIZE_compare_and_swap_read(addr, old, new_val) \
AO_XSIZE_compare_and_swap_full(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_read
# endif
#endif /* AO_HAVE_XSIZE_compare_and_swap_full */
#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_release)
# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
AO_XSIZE_compare_and_swap_release(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
AO_XSIZE_compare_and_swap_acquire(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_write)
# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
AO_XSIZE_compare_and_swap_write(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_read)
# define AO_XSIZE_compare_and_swap(addr, old, new_val) \
AO_XSIZE_compare_and_swap_read(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_compare_and_swap_full)
# define AO_XSIZE_compare_and_swap_full(addr, old, new_val) \
(AO_nop_full(), \
AO_XSIZE_compare_and_swap_acquire(addr, old, new_val))
# define AO_HAVE_XSIZE_compare_and_swap_full
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_write)
# define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \
AO_XSIZE_compare_and_swap_write(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_release_write
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_release)
# define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \
AO_XSIZE_compare_and_swap_release(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_release_write
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_read)
# define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \
AO_XSIZE_compare_and_swap_read(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
#endif
#if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \
&& defined(AO_HAVE_XSIZE_compare_and_swap_acquire)
# define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \
AO_XSIZE_compare_and_swap_acquire(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read)
# define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \
AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_compare_and_swap)
# define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \
AO_XSIZE_compare_and_swap(addr, old, new_val)
# define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_load */
#if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_acquire)
# define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_full(addr)
# define AO_HAVE_XSIZE_load_acquire
#endif
#if defined(AO_HAVE_XSIZE_load_acquire) && !defined(AO_HAVE_XSIZE_load)
# define AO_XSIZE_load(addr) AO_XSIZE_load_acquire(addr)
# define AO_HAVE_XSIZE_load
#endif
#if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_read)
# define AO_XSIZE_load_read(addr) AO_XSIZE_load_full(addr)
# define AO_HAVE_XSIZE_load_read
#endif
#if !defined(AO_HAVE_XSIZE_load_acquire_read) \
&& defined(AO_HAVE_XSIZE_load_acquire)
# define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_acquire(addr)
# define AO_HAVE_XSIZE_load_acquire_read
#endif
#if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_load_acquire)
AO_INLINE XCTYPE
AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
{
XCTYPE result = AO_XSIZE_load(addr);
/* Acquire barrier would be useless, since the load could be delayed */
/* beyond it. */
AO_nop_full();
return result;
}
# define AO_HAVE_XSIZE_load_acquire
#endif
#if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_read) \
&& !defined(AO_HAVE_XSIZE_load_read)
AO_INLINE XCTYPE
AO_XSIZE_load_read(const volatile XCTYPE *addr)
{
XCTYPE result = AO_XSIZE_load(addr);
AO_nop_read();
return result;
}
# define AO_HAVE_XSIZE_load_read
#endif
#if defined(AO_HAVE_XSIZE_load_acquire) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_load_full)
# define AO_XSIZE_load_full(addr) (AO_nop_full(), AO_XSIZE_load_acquire(addr))
# define AO_HAVE_XSIZE_load_full
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_read) \
&& !defined(AO_HAVE_XSIZE_load_read)
# define AO_XSIZE_CAS_BASED_LOAD_READ
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_load_read(const volatile XCTYPE *addr)
{
XCTYPE result;
do {
result = *(const XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_read(
(volatile XCTYPE *)addr,
result, result)));
return result;
}
# define AO_HAVE_XSIZE_load_read
#endif
#if !defined(AO_HAVE_XSIZE_load_acquire_read) \
&& defined(AO_HAVE_XSIZE_load_read)
# define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_read(addr)
# define AO_HAVE_XSIZE_load_acquire_read
#endif
#if defined(AO_HAVE_XSIZE_load_acquire_read) && !defined(AO_HAVE_XSIZE_load) \
&& (!defined(AO_XSIZE_CAS_BASED_LOAD_READ) \
|| !defined(AO_HAVE_XSIZE_compare_and_swap))
# define AO_XSIZE_load(addr) AO_XSIZE_load_acquire_read(addr)
# define AO_HAVE_XSIZE_load
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_load_full)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_load_full(const volatile XCTYPE *addr)
{
XCTYPE result;
do {
result = *(const XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(
(volatile XCTYPE *)addr,
result, result)));
return result;
}
# define AO_HAVE_XSIZE_load_full
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \
&& !defined(AO_HAVE_XSIZE_load_acquire)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
{
XCTYPE result;
do {
result = *(const XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire(
(volatile XCTYPE *)addr,
result, result)));
return result;
}
# define AO_HAVE_XSIZE_load_acquire
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_load)
AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE XCTYPE
AO_XSIZE_load(const volatile XCTYPE *addr)
{
XCTYPE result;
do {
result = *(const XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(
(volatile XCTYPE *)addr,
result, result)));
return result;
}
# define AO_HAVE_XSIZE_load
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_XSIZE_load_acquire_read)
# define AO_XSIZE_load_dd_acquire_read(addr) \
AO_XSIZE_load_acquire_read(addr)
# define AO_HAVE_XSIZE_load_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_XSIZE_load)
# define AO_XSIZE_load_dd_acquire_read(addr) AO_XSIZE_load(addr)
# define AO_HAVE_XSIZE_load_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* XSIZE_store */
#if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_release)
# define AO_XSIZE_store_release(addr, val) AO_XSIZE_store_full(addr, val)
# define AO_HAVE_XSIZE_store_release
#endif
#if defined(AO_HAVE_XSIZE_store_release) && !defined(AO_HAVE_XSIZE_store)
# define AO_XSIZE_store(addr, val) AO_XSIZE_store_release(addr, val)
# define AO_HAVE_XSIZE_store
#endif
#if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_write)
# define AO_XSIZE_store_write(addr, val) AO_XSIZE_store_full(addr, val)
# define AO_HAVE_XSIZE_store_write
#endif
#if defined(AO_HAVE_XSIZE_store_release) \
&& !defined(AO_HAVE_XSIZE_store_release_write)
# define AO_XSIZE_store_release_write(addr, val) \
AO_XSIZE_store_release(addr, val)
# define AO_HAVE_XSIZE_store_release_write
#endif
#if defined(AO_HAVE_XSIZE_store_write) && !defined(AO_HAVE_XSIZE_store)
# define AO_XSIZE_store(addr, val) AO_XSIZE_store_write(addr, val)
# define AO_HAVE_XSIZE_store
#endif
#if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_store_release)
# define AO_XSIZE_store_release(addr, val) \
(AO_nop_full(), AO_XSIZE_store(addr, val))
# define AO_HAVE_XSIZE_store_release
#endif
#if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_write) \
&& !defined(AO_HAVE_XSIZE_store_write)
# define AO_XSIZE_store_write(addr, val) \
(AO_nop_write(), AO_XSIZE_store(addr, val))
# define AO_HAVE_XSIZE_store_write
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_write) \
&& !defined(AO_HAVE_XSIZE_store_write)
AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_store_write(volatile XCTYPE *addr, XCTYPE new_val)
{
XCTYPE old_val;
do {
old_val = *(XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_write(addr, old_val,
new_val)));
}
# define AO_HAVE_XSIZE_store_write
#endif
#if defined(AO_HAVE_XSIZE_store_write) \
&& !defined(AO_HAVE_XSIZE_store_release_write)
# define AO_XSIZE_store_release_write(addr, val) \
AO_XSIZE_store_write(addr, val)
# define AO_HAVE_XSIZE_store_release_write
#endif
#if defined(AO_HAVE_XSIZE_store_release) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_XSIZE_store_full)
# define AO_XSIZE_store_full(addr, val) \
(AO_XSIZE_store_release(addr, val), \
AO_nop_full())
# define AO_HAVE_XSIZE_store_full
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_store)
AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
{
XCTYPE old_val;
do {
old_val = *(XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr,
old_val, new_val)));
}
# define AO_HAVE_XSIZE_store
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_release) \
&& !defined(AO_HAVE_XSIZE_store_release)
AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val)
{
XCTYPE old_val;
do {
old_val = *(XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old_val,
new_val)));
}
# define AO_HAVE_XSIZE_store_release
#endif
#if defined(AO_HAVE_XSIZE_compare_and_swap_full) \
&& !defined(AO_HAVE_XSIZE_store_full)
AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD
AO_INLINE void
AO_XSIZE_store_full(volatile XCTYPE *addr, XCTYPE new_val)
{
XCTYPE old_val;
do {
old_val = *(XCTYPE *)addr;
} while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old_val,
new_val)));
}
# define AO_HAVE_XSIZE_store_full
#endif

View File

@ -0,0 +1,729 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Generalize atomic operations for atomic_ops.h.
* Should not be included directly.
*
* We make no attempt to define useless operations, such as
* AO_nop_acquire
* AO_nop_release
*
* We have also so far neglected to define some others, which
* do not appear likely to be useful, e.g. stores with acquire
* or read barriers.
*
* This file is sometimes included twice by atomic_ops.h.
* All definitions include explicit checks that we are not replacing
* an earlier definition. In general, more desirable expansions
* appear earlier so that we are more likely to use them.
*
* We only make safe generalizations, except that by default we define
* the ...dd_acquire_read operations to be equivalent to those without
* a barrier. On platforms for which this is unsafe, the platform-specific
* file must define AO_NO_DD_ORDERING.
*/
#ifndef AO_ATOMIC_OPS_H
# error This file should not be included directly.
#endif
/* Generate test_and_set_full, if necessary and possible. */
#if !defined(AO_HAVE_test_and_set) && !defined(AO_HAVE_test_and_set_release) \
&& !defined(AO_HAVE_test_and_set_acquire) \
&& !defined(AO_HAVE_test_and_set_read) \
&& !defined(AO_HAVE_test_and_set_full)
/* Emulate AO_compare_and_swap() via AO_fetch_compare_and_swap(). */
# if defined(AO_HAVE_fetch_compare_and_swap) \
&& !defined(AO_HAVE_compare_and_swap)
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
return AO_fetch_compare_and_swap(addr, old_val, new_val) == old_val;
}
# define AO_HAVE_compare_and_swap
# endif
# if defined(AO_HAVE_fetch_compare_and_swap_full) \
&& !defined(AO_HAVE_compare_and_swap_full)
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
return AO_fetch_compare_and_swap_full(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_compare_and_swap_full
# endif
# if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
&& !defined(AO_HAVE_compare_and_swap_acquire)
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_compare_and_swap_acquire
# endif
# if defined(AO_HAVE_fetch_compare_and_swap_release) \
&& !defined(AO_HAVE_compare_and_swap_release)
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return AO_fetch_compare_and_swap_release(addr, old_val, new_val)
== old_val;
}
# define AO_HAVE_compare_and_swap_release
# endif
# if defined(AO_CHAR_TS_T)
# define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) \
AO_char_compare_and_swap_full(a,o,n)
# define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \
AO_char_compare_and_swap_acquire(a,o,n)
# define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \
AO_char_compare_and_swap_release(a,o,n)
# define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_char_compare_and_swap(a,o,n)
# endif
# if defined(AO_AO_TS_T)
# define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) AO_compare_and_swap_full(a,o,n)
# define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \
AO_compare_and_swap_acquire(a,o,n)
# define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \
AO_compare_and_swap_release(a,o,n)
# define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_compare_and_swap(a,o,n)
# endif
# if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_full)) \
|| (defined(AO_CHAR_TS_T) && defined(AO_HAVE_char_compare_and_swap_full))
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
if (AO_TS_COMPARE_AND_SWAP_FULL(addr, AO_TS_CLEAR, AO_TS_SET))
return AO_TS_CLEAR;
else
return AO_TS_SET;
}
# define AO_HAVE_test_and_set_full
# endif /* AO_HAVE_compare_and_swap_full */
# if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_acquire)) \
|| (defined(AO_CHAR_TS_T) \
&& defined(AO_HAVE_char_compare_and_swap_acquire))
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
if (AO_TS_COMPARE_AND_SWAP_ACQUIRE(addr, AO_TS_CLEAR, AO_TS_SET))
return AO_TS_CLEAR;
else
return AO_TS_SET;
}
# define AO_HAVE_test_and_set_acquire
# endif /* AO_HAVE_compare_and_swap_acquire */
# if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_release)) \
|| (defined(AO_CHAR_TS_T) \
&& defined(AO_HAVE_char_compare_and_swap_release))
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr)
{
if (AO_TS_COMPARE_AND_SWAP_RELEASE(addr, AO_TS_CLEAR, AO_TS_SET))
return AO_TS_CLEAR;
else
return AO_TS_SET;
}
# define AO_HAVE_test_and_set_release
# endif /* AO_HAVE_compare_and_swap_release */
# if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap)) \
|| (defined(AO_CHAR_TS_T) && defined(AO_HAVE_char_compare_and_swap))
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
if (AO_TS_COMPARE_AND_SWAP(addr, AO_TS_CLEAR, AO_TS_SET))
return AO_TS_CLEAR;
else
return AO_TS_SET;
}
# define AO_HAVE_test_and_set
# endif /* AO_HAVE_compare_and_swap */
#endif /* No prior test and set */
/* Nop */
#if !defined(AO_HAVE_nop)
AO_INLINE void AO_nop(void) {}
# define AO_HAVE_nop
#endif
#if defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_nop_full)
AO_INLINE void
AO_nop_full(void)
{
AO_TS_t dummy = AO_TS_INITIALIZER;
AO_test_and_set_full(&dummy);
}
# define AO_HAVE_nop_full
#endif
#if defined(AO_HAVE_nop_acquire) && !defined(CPPCHECK)
# error AO_nop_acquire is useless: do not define.
#endif
#if defined(AO_HAVE_nop_release) && !defined(CPPCHECK)
# error AO_nop_release is useless: do not define.
#endif
#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_read)
# define AO_nop_read() AO_nop_full()
# define AO_HAVE_nop_read
#endif
#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_write)
# define AO_nop_write() AO_nop_full()
# define AO_HAVE_nop_write
#endif
/* Test_and_set */
#if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_test_and_set_release)
# define AO_test_and_set_release(addr) (AO_nop_full(), AO_test_and_set(addr))
# define AO_HAVE_test_and_set_release
#endif
#if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_test_and_set_acquire)
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
AO_TS_VAL_t result = AO_test_and_set(addr);
AO_nop_full();
return result;
}
# define AO_HAVE_test_and_set_acquire
#endif
#if defined(AO_HAVE_test_and_set_full)
# if !defined(AO_HAVE_test_and_set_release)
# define AO_test_and_set_release(addr) AO_test_and_set_full(addr)
# define AO_HAVE_test_and_set_release
# endif
# if !defined(AO_HAVE_test_and_set_acquire)
# define AO_test_and_set_acquire(addr) AO_test_and_set_full(addr)
# define AO_HAVE_test_and_set_acquire
# endif
# if !defined(AO_HAVE_test_and_set_write)
# define AO_test_and_set_write(addr) AO_test_and_set_full(addr)
# define AO_HAVE_test_and_set_write
# endif
# if !defined(AO_HAVE_test_and_set_read)
# define AO_test_and_set_read(addr) AO_test_and_set_full(addr)
# define AO_HAVE_test_and_set_read
# endif
#endif /* AO_HAVE_test_and_set_full */
#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_release)
# define AO_test_and_set(addr) AO_test_and_set_release(addr)
# define AO_HAVE_test_and_set
#endif
#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_acquire)
# define AO_test_and_set(addr) AO_test_and_set_acquire(addr)
# define AO_HAVE_test_and_set
#endif
#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_write)
# define AO_test_and_set(addr) AO_test_and_set_write(addr)
# define AO_HAVE_test_and_set
#endif
#if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_read)
# define AO_test_and_set(addr) AO_test_and_set_read(addr)
# define AO_HAVE_test_and_set
#endif
#if defined(AO_HAVE_test_and_set_acquire) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_test_and_set_full)
# define AO_test_and_set_full(addr) \
(AO_nop_full(), AO_test_and_set_acquire(addr))
# define AO_HAVE_test_and_set_full
#endif
#if !defined(AO_HAVE_test_and_set_release_write) \
&& defined(AO_HAVE_test_and_set_write)
# define AO_test_and_set_release_write(addr) AO_test_and_set_write(addr)
# define AO_HAVE_test_and_set_release_write
#endif
#if !defined(AO_HAVE_test_and_set_release_write) \
&& defined(AO_HAVE_test_and_set_release)
# define AO_test_and_set_release_write(addr) AO_test_and_set_release(addr)
# define AO_HAVE_test_and_set_release_write
#endif
#if !defined(AO_HAVE_test_and_set_acquire_read) \
&& defined(AO_HAVE_test_and_set_read)
# define AO_test_and_set_acquire_read(addr) AO_test_and_set_read(addr)
# define AO_HAVE_test_and_set_acquire_read
#endif
#if !defined(AO_HAVE_test_and_set_acquire_read) \
&& defined(AO_HAVE_test_and_set_acquire)
# define AO_test_and_set_acquire_read(addr) AO_test_and_set_acquire(addr)
# define AO_HAVE_test_and_set_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_test_and_set_acquire_read)
# define AO_test_and_set_dd_acquire_read(addr) \
AO_test_and_set_acquire_read(addr)
# define AO_HAVE_test_and_set_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_test_and_set)
# define AO_test_and_set_dd_acquire_read(addr) AO_test_and_set(addr)
# define AO_HAVE_test_and_set_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
#include "generalize-small.h"
#include "generalize-arithm.h"
/* Compare_double_and_swap_double based on double_compare_and_swap. */
#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
# if defined(AO_HAVE_double_compare_and_swap) \
&& !defined(AO_HAVE_compare_double_and_swap_double)
AO_INLINE int
AO_compare_double_and_swap_double(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_double_t old_w;
AO_double_t new_w;
old_w.AO_val1 = old_val1;
old_w.AO_val2 = old_val2;
new_w.AO_val1 = new_val1;
new_w.AO_val2 = new_val2;
return AO_double_compare_and_swap(addr, old_w, new_w);
}
# define AO_HAVE_compare_double_and_swap_double
# endif
# if defined(AO_HAVE_double_compare_and_swap_acquire) \
&& !defined(AO_HAVE_compare_double_and_swap_double_acquire)
AO_INLINE int
AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_double_t old_w;
AO_double_t new_w;
old_w.AO_val1 = old_val1;
old_w.AO_val2 = old_val2;
new_w.AO_val1 = new_val1;
new_w.AO_val2 = new_val2;
return AO_double_compare_and_swap_acquire(addr, old_w, new_w);
}
# define AO_HAVE_compare_double_and_swap_double_acquire
# endif
# if defined(AO_HAVE_double_compare_and_swap_release) \
&& !defined(AO_HAVE_compare_double_and_swap_double_release)
AO_INLINE int
AO_compare_double_and_swap_double_release(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_double_t old_w;
AO_double_t new_w;
old_w.AO_val1 = old_val1;
old_w.AO_val2 = old_val2;
new_w.AO_val1 = new_val1;
new_w.AO_val2 = new_val2;
return AO_double_compare_and_swap_release(addr, old_w, new_w);
}
# define AO_HAVE_compare_double_and_swap_double_release
# endif
# if defined(AO_HAVE_double_compare_and_swap_full) \
&& !defined(AO_HAVE_compare_double_and_swap_double_full)
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_double_t old_w;
AO_double_t new_w;
old_w.AO_val1 = old_val1;
old_w.AO_val2 = old_val2;
new_w.AO_val1 = new_val1;
new_w.AO_val2 = new_val2;
return AO_double_compare_and_swap_full(addr, old_w, new_w);
}
# define AO_HAVE_compare_double_and_swap_double_full
# endif
#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
/* Compare_double_and_swap_double */
#if defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_double_and_swap_double_acquire)
AO_INLINE int
AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr,
AO_t o1, AO_t o2,
AO_t n1, AO_t n2)
{
int result = AO_compare_double_and_swap_double(addr, o1, o2, n1, n2);
AO_nop_full();
return result;
}
# define AO_HAVE_compare_double_and_swap_double_acquire
#endif
#if defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_double_and_swap_double_release)
# define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \
(AO_nop_full(), AO_compare_double_and_swap_double(addr,o1,o2,n1,n2))
# define AO_HAVE_compare_double_and_swap_double_release
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_full)
# if !defined(AO_HAVE_compare_double_and_swap_double_release)
# define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_release
# endif
# if !defined(AO_HAVE_compare_double_and_swap_double_acquire)
# define AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_acquire
# endif
# if !defined(AO_HAVE_compare_double_and_swap_double_write)
# define AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_write
# endif
# if !defined(AO_HAVE_compare_double_and_swap_double_read)
# define AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_read
# endif
#endif /* AO_HAVE_compare_double_and_swap_double_full */
#if !defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_compare_double_and_swap_double_release)
# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_compare_double_and_swap_double_acquire)
# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_compare_double_and_swap_double_write)
# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double) \
&& defined(AO_HAVE_compare_double_and_swap_double_read)
# define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_double_and_swap_double_full)
# define AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) \
(AO_nop_full(), \
AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2))
# define AO_HAVE_compare_double_and_swap_double_full
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \
&& defined(AO_HAVE_compare_double_and_swap_double_write)
# define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_release_write
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \
&& defined(AO_HAVE_compare_double_and_swap_double_release)
# define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_release_write
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
&& defined(AO_HAVE_compare_double_and_swap_double_read)
# define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_acquire_read
#endif
#if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
&& defined(AO_HAVE_compare_double_and_swap_double_acquire)
# define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_compare_double_and_swap_double_acquire_read)
# define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_compare_double_and_swap_double)
# define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \
AO_compare_double_and_swap_double(addr,o1,o2,n1,n2)
# define AO_HAVE_compare_double_and_swap_double_dd_acquire_read
# endif
#endif /* !AO_NO_DD_ORDERING */
/* Compare_and_swap_double */
#if defined(AO_HAVE_compare_and_swap_double) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_and_swap_double_acquire)
AO_INLINE int
AO_compare_and_swap_double_acquire(volatile AO_double_t *addr,
AO_t o1,
AO_t n1, AO_t n2)
{
int result = AO_compare_and_swap_double(addr, o1, n1, n2);
AO_nop_full();
return result;
}
# define AO_HAVE_compare_and_swap_double_acquire
#endif
#if defined(AO_HAVE_compare_and_swap_double) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_and_swap_double_release)
# define AO_compare_and_swap_double_release(addr,o1,n1,n2) \
(AO_nop_full(), AO_compare_and_swap_double(addr,o1,n1,n2))
# define AO_HAVE_compare_and_swap_double_release
#endif
#if defined(AO_HAVE_compare_and_swap_double_full)
# if !defined(AO_HAVE_compare_and_swap_double_release)
# define AO_compare_and_swap_double_release(addr,o1,n1,n2) \
AO_compare_and_swap_double_full(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_release
# endif
# if !defined(AO_HAVE_compare_and_swap_double_acquire)
# define AO_compare_and_swap_double_acquire(addr,o1,n1,n2) \
AO_compare_and_swap_double_full(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_acquire
# endif
# if !defined(AO_HAVE_compare_and_swap_double_write)
# define AO_compare_and_swap_double_write(addr,o1,n1,n2) \
AO_compare_and_swap_double_full(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_write
# endif
# if !defined(AO_HAVE_compare_and_swap_double_read)
# define AO_compare_and_swap_double_read(addr,o1,n1,n2) \
AO_compare_and_swap_double_full(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_read
# endif
#endif /* AO_HAVE_compare_and_swap_double_full */
#if !defined(AO_HAVE_compare_and_swap_double) \
&& defined(AO_HAVE_compare_and_swap_double_release)
# define AO_compare_and_swap_double(addr,o1,n1,n2) \
AO_compare_and_swap_double_release(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double
#endif
#if !defined(AO_HAVE_compare_and_swap_double) \
&& defined(AO_HAVE_compare_and_swap_double_acquire)
# define AO_compare_and_swap_double(addr,o1,n1,n2) \
AO_compare_and_swap_double_acquire(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double
#endif
#if !defined(AO_HAVE_compare_and_swap_double) \
&& defined(AO_HAVE_compare_and_swap_double_write)
# define AO_compare_and_swap_double(addr,o1,n1,n2) \
AO_compare_and_swap_double_write(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double
#endif
#if !defined(AO_HAVE_compare_and_swap_double) \
&& defined(AO_HAVE_compare_and_swap_double_read)
# define AO_compare_and_swap_double(addr,o1,n1,n2) \
AO_compare_and_swap_double_read(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double
#endif
#if defined(AO_HAVE_compare_and_swap_double_acquire) \
&& defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_compare_and_swap_double_full)
# define AO_compare_and_swap_double_full(addr,o1,n1,n2) \
(AO_nop_full(), AO_compare_and_swap_double_acquire(addr,o1,n1,n2))
# define AO_HAVE_compare_and_swap_double_full
#endif
#if !defined(AO_HAVE_compare_and_swap_double_release_write) \
&& defined(AO_HAVE_compare_and_swap_double_write)
# define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \
AO_compare_and_swap_double_write(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_release_write
#endif
#if !defined(AO_HAVE_compare_and_swap_double_release_write) \
&& defined(AO_HAVE_compare_and_swap_double_release)
# define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \
AO_compare_and_swap_double_release(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_release_write
#endif
#if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \
&& defined(AO_HAVE_compare_and_swap_double_read)
# define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \
AO_compare_and_swap_double_read(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_acquire_read
#endif
#if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \
&& defined(AO_HAVE_compare_and_swap_double_acquire)
# define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \
AO_compare_and_swap_double_acquire(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_acquire_read
#endif
#ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_compare_and_swap_double_acquire_read)
# define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \
AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_dd_acquire_read
# endif
#else
# if defined(AO_HAVE_compare_and_swap_double)
# define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \
AO_compare_and_swap_double(addr,o1,n1,n2)
# define AO_HAVE_compare_and_swap_double_dd_acquire_read
# endif
#endif
/* Convenience functions for AO_double compare-and-swap which types and */
/* reads easier in code. */
#if defined(AO_HAVE_compare_double_and_swap_double) \
&& !defined(AO_HAVE_double_compare_and_swap)
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_release) \
&& !defined(AO_HAVE_double_compare_and_swap_release)
AO_INLINE int
AO_double_compare_and_swap_release(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_release(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_release
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_acquire) \
&& !defined(AO_HAVE_double_compare_and_swap_acquire)
AO_INLINE int
AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_acquire(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_acquire
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_read) \
&& !defined(AO_HAVE_double_compare_and_swap_read)
AO_INLINE int
AO_double_compare_and_swap_read(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_read(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_read
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_write) \
&& !defined(AO_HAVE_double_compare_and_swap_write)
AO_INLINE int
AO_double_compare_and_swap_write(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_write(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_write
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_release_write) \
&& !defined(AO_HAVE_double_compare_and_swap_release_write)
AO_INLINE int
AO_double_compare_and_swap_release_write(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_release_write(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_release_write
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \
&& !defined(AO_HAVE_double_compare_and_swap_acquire_read)
AO_INLINE int
AO_double_compare_and_swap_acquire_read(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_acquire_read(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_acquire_read
#endif
#if defined(AO_HAVE_compare_double_and_swap_double_full) \
&& !defined(AO_HAVE_double_compare_and_swap_full)
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return AO_compare_double_and_swap_double_full(addr,
old_val.AO_val1, old_val.AO_val2,
new_val.AO_val1, new_val.AO_val2);
}
# define AO_HAVE_double_compare_and_swap_full
#endif
#ifndef AO_HAVE_double_compare_and_swap_dd_acquire_read
/* Duplicated from generalize-small because double CAS might be */
/* defined after the include. */
# ifdef AO_NO_DD_ORDERING
# if defined(AO_HAVE_double_compare_and_swap_acquire_read)
# define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \
AO_double_compare_and_swap_acquire_read(addr, old, new_val)
# define AO_HAVE_double_compare_and_swap_dd_acquire_read
# endif
# elif defined(AO_HAVE_double_compare_and_swap)
# define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \
AO_double_compare_and_swap(addr, old, new_val)
# define AO_HAVE_double_compare_and_swap_dd_acquire_read
# endif /* !AO_NO_DD_ORDERING */
#endif

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Describes architectures on which volatile AO_t, unsigned char, */
/* unsigned short, and unsigned int loads and stores have */
/* acquire/release semantics for all normally legal alignments. */
#include "loadstore/acquire_release_volatile.h"
#include "loadstore/char_acquire_release_volatile.h"
#include "loadstore/short_acquire_release_volatile.h"
#include "loadstore/int_acquire_release_volatile.h"

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Describes architectures on which AO_t, unsigned char, unsigned */
/* short, and unsigned int loads and stores are atomic but only if data */
/* is suitably aligned. */
#if defined(__m68k__) && !defined(AO_ALIGNOF_SUPPORTED)
/* Even though AO_t is redefined in m68k.h, some clients use AO */
/* pointer size primitives to access variables not declared as AO_t. */
/* Such variables may have 2-byte alignment, while their sizeof is 4. */
#else
# define AO_ACCESS_CHECK_ALIGNED
#endif
/* Check for char type is a misnomer. */
#define AO_ACCESS_short_CHECK_ALIGNED
#define AO_ACCESS_int_CHECK_ALIGNED
#include "all_atomic_load_store.h"

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Describes architectures on which AO_t, unsigned char, unsigned */
/* short, and unsigned int loads and stores are atomic for all normally */
/* legal alignments. */
#include "all_atomic_only_load.h"
#include "loadstore/atomic_store.h"
#include "loadstore/char_atomic_store.h"
#include "loadstore/short_atomic_store.h"
#include "loadstore/int_atomic_store.h"

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Describes architectures on which AO_t, unsigned char, unsigned */
/* short, and unsigned int loads are atomic for all normally legal */
/* alignments. */
#include "loadstore/atomic_load.h"
#include "loadstore/char_atomic_load.h"
#include "loadstore/short_atomic_load.h"
#include "loadstore/int_atomic_load.h"

View File

@ -0,0 +1,552 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load) && !defined(AO_HAVE_int_load)
# define AO_int_load(addr) \
(unsigned)AO_load((const volatile AO_t *)(addr))
# define AO_HAVE_int_load
#endif
#if defined(AO_HAVE_store) && !defined(AO_HAVE_int_store)
# define AO_int_store(addr, val) \
AO_store((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store
#endif
#if defined(AO_HAVE_fetch_and_add) \
&& !defined(AO_HAVE_int_fetch_and_add)
# define AO_int_fetch_and_add(addr, incr) \
(unsigned)AO_fetch_and_add((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add
#endif
#if defined(AO_HAVE_fetch_and_add1) \
&& !defined(AO_HAVE_int_fetch_and_add1)
# define AO_int_fetch_and_add1(addr) \
(unsigned)AO_fetch_and_add1((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1
#endif
#if defined(AO_HAVE_fetch_and_sub1) \
&& !defined(AO_HAVE_int_fetch_and_sub1)
# define AO_int_fetch_and_sub1(addr) \
(unsigned)AO_fetch_and_sub1((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1
#endif
#if defined(AO_HAVE_and) && !defined(AO_HAVE_int_and)
# define AO_int_and(addr, val) \
AO_and((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and
#endif
#if defined(AO_HAVE_or) && !defined(AO_HAVE_int_or)
# define AO_int_or(addr, val) \
AO_or((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or
#endif
#if defined(AO_HAVE_xor) && !defined(AO_HAVE_int_xor)
# define AO_int_xor(addr, val) \
AO_xor((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor
#endif
#if defined(AO_HAVE_fetch_compare_and_swap) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap)
# define AO_int_fetch_compare_and_swap(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap
#endif
#if defined(AO_HAVE_compare_and_swap) \
&& !defined(AO_HAVE_int_compare_and_swap)
# define AO_int_compare_and_swap(addr, old, new_val) \
AO_compare_and_swap((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap
#endif
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_int_load_full)
# define AO_int_load_full(addr) \
(unsigned)AO_load_full((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_full
#endif
#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_int_store_full)
# define AO_int_store_full(addr, val) \
AO_store_full((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_full
#endif
#if defined(AO_HAVE_fetch_and_add_full) \
&& !defined(AO_HAVE_int_fetch_and_add_full)
# define AO_int_fetch_and_add_full(addr, incr) \
(unsigned)AO_fetch_and_add_full((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_full
#endif
#if defined(AO_HAVE_fetch_and_add1_full) \
&& !defined(AO_HAVE_int_fetch_and_add1_full)
# define AO_int_fetch_and_add1_full(addr) \
(unsigned)AO_fetch_and_add1_full((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_full
#endif
#if defined(AO_HAVE_fetch_and_sub1_full) \
&& !defined(AO_HAVE_int_fetch_and_sub1_full)
# define AO_int_fetch_and_sub1_full(addr) \
(unsigned)AO_fetch_and_sub1_full((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_full
#endif
#if defined(AO_HAVE_and_full) && !defined(AO_HAVE_int_and_full)
# define AO_int_and_full(addr, val) \
AO_and_full((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_full
#endif
#if defined(AO_HAVE_or_full) && !defined(AO_HAVE_int_or_full)
# define AO_int_or_full(addr, val) \
AO_or_full((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_full
#endif
#if defined(AO_HAVE_xor_full) && !defined(AO_HAVE_int_xor_full)
# define AO_int_xor_full(addr, val) \
AO_xor_full((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_full
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_full) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_full)
# define AO_int_fetch_compare_and_swap_full(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_full((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_full
#endif
#if defined(AO_HAVE_compare_and_swap_full) \
&& !defined(AO_HAVE_int_compare_and_swap_full)
# define AO_int_compare_and_swap_full(addr, old, new_val) \
AO_compare_and_swap_full((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_full
#endif
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_int_load_acquire)
# define AO_int_load_acquire(addr) \
(unsigned)AO_load_acquire((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_acquire
#endif
#if defined(AO_HAVE_store_acquire) && !defined(AO_HAVE_int_store_acquire)
# define AO_int_store_acquire(addr, val) \
AO_store_acquire((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_acquire
#endif
#if defined(AO_HAVE_fetch_and_add_acquire) \
&& !defined(AO_HAVE_int_fetch_and_add_acquire)
# define AO_int_fetch_and_add_acquire(addr, incr) \
(unsigned)AO_fetch_and_add_acquire((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_acquire
#endif
#if defined(AO_HAVE_fetch_and_add1_acquire) \
&& !defined(AO_HAVE_int_fetch_and_add1_acquire)
# define AO_int_fetch_and_add1_acquire(addr) \
(unsigned)AO_fetch_and_add1_acquire((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_acquire
#endif
#if defined(AO_HAVE_fetch_and_sub1_acquire) \
&& !defined(AO_HAVE_int_fetch_and_sub1_acquire)
# define AO_int_fetch_and_sub1_acquire(addr) \
(unsigned)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_acquire
#endif
#if defined(AO_HAVE_and_acquire) && !defined(AO_HAVE_int_and_acquire)
# define AO_int_and_acquire(addr, val) \
AO_and_acquire((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_acquire
#endif
#if defined(AO_HAVE_or_acquire) && !defined(AO_HAVE_int_or_acquire)
# define AO_int_or_acquire(addr, val) \
AO_or_acquire((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_acquire
#endif
#if defined(AO_HAVE_xor_acquire) && !defined(AO_HAVE_int_xor_acquire)
# define AO_int_xor_acquire(addr, val) \
AO_xor_acquire((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_acquire
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_acquire) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_acquire)
# define AO_int_fetch_compare_and_swap_acquire(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_acquire((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_acquire
#endif
#if defined(AO_HAVE_compare_and_swap_acquire) \
&& !defined(AO_HAVE_int_compare_and_swap_acquire)
# define AO_int_compare_and_swap_acquire(addr, old, new_val) \
AO_compare_and_swap_acquire((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_acquire
#endif
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_release) && !defined(AO_HAVE_int_load_release)
# define AO_int_load_release(addr) \
(unsigned)AO_load_release((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_release
#endif
#if defined(AO_HAVE_store_release) && !defined(AO_HAVE_int_store_release)
# define AO_int_store_release(addr, val) \
AO_store_release((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_release
#endif
#if defined(AO_HAVE_fetch_and_add_release) \
&& !defined(AO_HAVE_int_fetch_and_add_release)
# define AO_int_fetch_and_add_release(addr, incr) \
(unsigned)AO_fetch_and_add_release((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_release
#endif
#if defined(AO_HAVE_fetch_and_add1_release) \
&& !defined(AO_HAVE_int_fetch_and_add1_release)
# define AO_int_fetch_and_add1_release(addr) \
(unsigned)AO_fetch_and_add1_release((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_release
#endif
#if defined(AO_HAVE_fetch_and_sub1_release) \
&& !defined(AO_HAVE_int_fetch_and_sub1_release)
# define AO_int_fetch_and_sub1_release(addr) \
(unsigned)AO_fetch_and_sub1_release((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_release
#endif
#if defined(AO_HAVE_and_release) && !defined(AO_HAVE_int_and_release)
# define AO_int_and_release(addr, val) \
AO_and_release((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_release
#endif
#if defined(AO_HAVE_or_release) && !defined(AO_HAVE_int_or_release)
# define AO_int_or_release(addr, val) \
AO_or_release((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_release
#endif
#if defined(AO_HAVE_xor_release) && !defined(AO_HAVE_int_xor_release)
# define AO_int_xor_release(addr, val) \
AO_xor_release((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_release
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_release) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_release)
# define AO_int_fetch_compare_and_swap_release(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_release((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_release
#endif
#if defined(AO_HAVE_compare_and_swap_release) \
&& !defined(AO_HAVE_int_compare_and_swap_release)
# define AO_int_compare_and_swap_release(addr, old, new_val) \
AO_compare_and_swap_release((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_release
#endif
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_write) && !defined(AO_HAVE_int_load_write)
# define AO_int_load_write(addr) \
(unsigned)AO_load_write((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_write
#endif
#if defined(AO_HAVE_store_write) && !defined(AO_HAVE_int_store_write)
# define AO_int_store_write(addr, val) \
AO_store_write((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_write
#endif
#if defined(AO_HAVE_fetch_and_add_write) \
&& !defined(AO_HAVE_int_fetch_and_add_write)
# define AO_int_fetch_and_add_write(addr, incr) \
(unsigned)AO_fetch_and_add_write((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_write
#endif
#if defined(AO_HAVE_fetch_and_add1_write) \
&& !defined(AO_HAVE_int_fetch_and_add1_write)
# define AO_int_fetch_and_add1_write(addr) \
(unsigned)AO_fetch_and_add1_write((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_write
#endif
#if defined(AO_HAVE_fetch_and_sub1_write) \
&& !defined(AO_HAVE_int_fetch_and_sub1_write)
# define AO_int_fetch_and_sub1_write(addr) \
(unsigned)AO_fetch_and_sub1_write((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_write
#endif
#if defined(AO_HAVE_and_write) && !defined(AO_HAVE_int_and_write)
# define AO_int_and_write(addr, val) \
AO_and_write((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_write
#endif
#if defined(AO_HAVE_or_write) && !defined(AO_HAVE_int_or_write)
# define AO_int_or_write(addr, val) \
AO_or_write((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_write
#endif
#if defined(AO_HAVE_xor_write) && !defined(AO_HAVE_int_xor_write)
# define AO_int_xor_write(addr, val) \
AO_xor_write((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_write
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_write) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_write)
# define AO_int_fetch_compare_and_swap_write(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_write((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_write
#endif
#if defined(AO_HAVE_compare_and_swap_write) \
&& !defined(AO_HAVE_int_compare_and_swap_write)
# define AO_int_compare_and_swap_write(addr, old, new_val) \
AO_compare_and_swap_write((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_write
#endif
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_read) && !defined(AO_HAVE_int_load_read)
# define AO_int_load_read(addr) \
(unsigned)AO_load_read((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_read
#endif
#if defined(AO_HAVE_store_read) && !defined(AO_HAVE_int_store_read)
# define AO_int_store_read(addr, val) \
AO_store_read((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_read
#endif
#if defined(AO_HAVE_fetch_and_add_read) \
&& !defined(AO_HAVE_int_fetch_and_add_read)
# define AO_int_fetch_and_add_read(addr, incr) \
(unsigned)AO_fetch_and_add_read((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_read
#endif
#if defined(AO_HAVE_fetch_and_add1_read) \
&& !defined(AO_HAVE_int_fetch_and_add1_read)
# define AO_int_fetch_and_add1_read(addr) \
(unsigned)AO_fetch_and_add1_read((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_read
#endif
#if defined(AO_HAVE_fetch_and_sub1_read) \
&& !defined(AO_HAVE_int_fetch_and_sub1_read)
# define AO_int_fetch_and_sub1_read(addr) \
(unsigned)AO_fetch_and_sub1_read((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_read
#endif
#if defined(AO_HAVE_and_read) && !defined(AO_HAVE_int_and_read)
# define AO_int_and_read(addr, val) \
AO_and_read((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_read
#endif
#if defined(AO_HAVE_or_read) && !defined(AO_HAVE_int_or_read)
# define AO_int_or_read(addr, val) \
AO_or_read((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_read
#endif
#if defined(AO_HAVE_xor_read) && !defined(AO_HAVE_int_xor_read)
# define AO_int_xor_read(addr, val) \
AO_xor_read((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_read
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_read) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_read)
# define AO_int_fetch_compare_and_swap_read(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_read((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_read
#endif
#if defined(AO_HAVE_compare_and_swap_read) \
&& !defined(AO_HAVE_int_compare_and_swap_read)
# define AO_int_compare_and_swap_read(addr, old, new_val) \
AO_compare_and_swap_read((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_read
#endif

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Inclusion of this file signifies that AO_t is in fact int. */
/* Hence any AO_... operation can also serve as AO_int_... operation. */
#if defined(AO_HAVE_load_XBAR) && !defined(AO_HAVE_int_load_XBAR)
# define AO_int_load_XBAR(addr) \
(unsigned)AO_load_XBAR((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_XBAR
#endif
#if defined(AO_HAVE_store_XBAR) && !defined(AO_HAVE_int_store_XBAR)
# define AO_int_store_XBAR(addr, val) \
AO_store_XBAR((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_XBAR
#endif
#if defined(AO_HAVE_fetch_and_add_XBAR) \
&& !defined(AO_HAVE_int_fetch_and_add_XBAR)
# define AO_int_fetch_and_add_XBAR(addr, incr) \
(unsigned)AO_fetch_and_add_XBAR((volatile AO_t *)(addr), \
(AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_XBAR
#endif
#if defined(AO_HAVE_fetch_and_add1_XBAR) \
&& !defined(AO_HAVE_int_fetch_and_add1_XBAR)
# define AO_int_fetch_and_add1_XBAR(addr) \
(unsigned)AO_fetch_and_add1_XBAR((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_XBAR
#endif
#if defined(AO_HAVE_fetch_and_sub1_XBAR) \
&& !defined(AO_HAVE_int_fetch_and_sub1_XBAR)
# define AO_int_fetch_and_sub1_XBAR(addr) \
(unsigned)AO_fetch_and_sub1_XBAR((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_XBAR
#endif
#if defined(AO_HAVE_and_XBAR) && !defined(AO_HAVE_int_and_XBAR)
# define AO_int_and_XBAR(addr, val) \
AO_and_XBAR((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_and_XBAR
#endif
#if defined(AO_HAVE_or_XBAR) && !defined(AO_HAVE_int_or_XBAR)
# define AO_int_or_XBAR(addr, val) \
AO_or_XBAR((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_or_XBAR
#endif
#if defined(AO_HAVE_xor_XBAR) && !defined(AO_HAVE_int_xor_XBAR)
# define AO_int_xor_XBAR(addr, val) \
AO_xor_XBAR((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_xor_XBAR
#endif
#if defined(AO_HAVE_fetch_compare_and_swap_XBAR) \
&& !defined(AO_HAVE_int_fetch_compare_and_swap_XBAR)
# define AO_int_fetch_compare_and_swap_XBAR(addr, old, new_val) \
(unsigned)AO_fetch_compare_and_swap_XBAR((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_fetch_compare_and_swap_XBAR
#endif
#if defined(AO_HAVE_compare_and_swap_XBAR) \
&& !defined(AO_HAVE_int_compare_and_swap_XBAR)
# define AO_int_compare_and_swap_XBAR(addr, old, new_val) \
AO_compare_and_swap_XBAR((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_XBAR
#endif

View File

@ -0,0 +1,264 @@
/*
* Copyright (c) 2007 by NEC LE-IT: All rights reserved.
* A transcription of ARMv6 atomic operations for the ARM Realview Toolchain.
* This code works with armcc from RVDS 3.1
* This is based on work in gcc/arm.h by
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
#if __TARGET_ARCH_ARM < 6
# if !defined(CPPCHECK)
# error Do not use with ARM instruction sets lower than v6
# endif
#else
#define AO_ACCESS_CHECK_ALIGNED
#define AO_ACCESS_short_CHECK_ALIGNED
#define AO_ACCESS_int_CHECK_ALIGNED
#include "../all_atomic_only_load.h"
#include "../standard_ao_double_t.h"
/* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC
* A data memory barrier must be raised via CP15 command (see documentation).
*
* ARMv7 is compatible to ARMv6 but has a simpler command for issuing a
* memory barrier (DMB). Raising it via CP15 should still work as told me by the
* support engineers. If it turns out to be much quicker than we should implement
* custom code for ARMv7 using the asm { dmb } command.
*
* If only a single processor is used, we can define AO_UNIPROCESSOR
* and do not need to access CP15 for ensuring a DMB at all.
*/
AO_INLINE void
AO_nop_full(void)
{
# ifndef AO_UNIPROCESSOR
unsigned int dest=0;
/* Issue a data memory barrier (keeps ordering of memory transactions */
/* before and after this operation). */
__asm {
mcr p15,0,dest,c7,c10,5
};
# else
AO_compiler_barrier();
# endif
}
#define AO_HAVE_nop_full
/* NEC LE-IT: atomic "store" - according to ARM documentation this is
* the only safe way to set variables also used in LL/SC environment.
* A direct write won't be recognized by the LL/SC construct in other CPUs.
*
* HB: Based on subsequent discussion, I think it would be OK to use an
* ordinary store here if we knew that interrupt handlers always cleared
* the reservation. They should, but there is some doubt that this is
* currently always the case for e.g. Linux.
*/
AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
{
unsigned long tmp;
retry:
__asm {
ldrex tmp, [addr]
strex tmp, value, [addr]
teq tmp, #0
bne retry
};
}
#define AO_HAVE_store
/* NEC LE-IT: replace the SWAP as recommended by ARM:
"Applies to: ARM11 Cores
Though the SWP instruction will still work with ARM V6 cores, it is recommended
to use the new V6 synchronization instructions. The SWP instruction produces
locked read and write accesses which are atomic, i.e. another operation cannot
be done between these locked accesses which ties up external bus (AHB,AXI)
bandwidth and can increase worst case interrupt latencies. LDREX,STREX are
more flexible, other instructions can be done between the LDREX and STREX accesses.
"
*/
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
AO_TS_VAL_t oldval;
unsigned long tmp;
unsigned long one = 1;
retry:
__asm {
ldrex oldval, [addr]
strex tmp, one, [addr]
teq tmp, #0
bne retry
}
return oldval;
}
#define AO_HAVE_test_and_set
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *p, AO_t incr)
{
unsigned long tmp,tmp2;
AO_t result;
retry:
__asm {
ldrex result, [p]
add tmp, incr, result
strex tmp2, tmp, [p]
teq tmp2, #0
bne retry
}
return result;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_t
AO_fetch_and_add1(volatile AO_t *p)
{
unsigned long tmp,tmp2;
AO_t result;
retry:
__asm {
ldrex result, [p]
add tmp, result, #1
strex tmp2, tmp, [p]
teq tmp2, #0
bne retry
}
return result;
}
#define AO_HAVE_fetch_and_add1
AO_INLINE AO_t
AO_fetch_and_sub1(volatile AO_t *p)
{
unsigned long tmp,tmp2;
AO_t result;
retry:
__asm {
ldrex result, [p]
sub tmp, result, #1
strex tmp2, tmp, [p]
teq tmp2, #0
bne retry
}
return result;
}
#define AO_HAVE_fetch_and_sub1
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t result, tmp;
retry:
__asm__ {
mov result, #2
ldrex tmp, [addr]
teq tmp, old_val
# ifdef __thumb__
it eq
# endif
strexeq result, new_val, [addr]
teq result, #1
beq retry
}
return !(result&2);
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val, tmp;
retry:
__asm__ {
mov tmp, #2
ldrex fetched_val, [addr]
teq fetched_val, old_val
# ifdef __thumb__
it eq
# endif
strexeq tmp, new_val, [addr]
teq tmp, #1
beq retry
}
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
/* helper functions for the Realview compiler: LDREXD is not usable
* with inline assembler, so use the "embedded" assembler as
* suggested by ARM Dev. support (June 2008). */
__asm inline double_ptr_storage AO_load_ex(const volatile AO_double_t *addr) {
LDREXD r0,r1,[r0]
}
__asm inline int AO_store_ex(AO_t val1, AO_t val2, volatile AO_double_t *addr) {
STREXD r3,r0,r1,[r2]
MOV r0,r3
}
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
result.AO_whole = AO_load_ex(addr);
return result;
}
#define AO_HAVE_double_load
AO_INLINE int
AO_compare_double_and_swap_double(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
double_ptr_storage old_val =
((double_ptr_storage)old_val2 << 32) | old_val1;
double_ptr_storage tmp;
int result;
while(1) {
tmp = AO_load_ex(addr);
if(tmp != old_val) return 0;
result = AO_store_ex(new_val1, new_val2, addr);
if(!result) return 1;
}
}
#define AO_HAVE_compare_double_and_swap_double
#endif /* __TARGET_ARCH_ARM >= 6 */
#define AO_T_IS_INT

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Ensure, if at all possible, that AO_compare_and_swap_full() is
* available. The emulation should be brute-force signal-safe, even
* though it actually blocks.
* Including this file will generate an error if AO_compare_and_swap_full()
* cannot be made available.
* This will be included from platform-specific atomic_ops files
* if appropriate, and if AO_REQUIRE_CAS is defined. It should not be
* included directly, especially since it affects the implementation
* of other atomic update primitives.
* The implementation assumes that only AO_store_XXX and AO_test_and_set_XXX
* variants are defined, and that AO_test_and_set_XXX is not used to
* operate on compare_and_swap locations.
*/
#ifndef AO_ATOMIC_OPS_H
# error This file should not be included directly.
#endif
#ifndef AO_HAVE_double_t
# include "standard_ao_double_t.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val,
AO_t new_val);
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2);
void AO_store_full_emulation(volatile AO_t *addr, AO_t val);
#ifndef AO_HAVE_fetch_compare_and_swap_full
# define AO_fetch_compare_and_swap_full(addr, old, newval) \
AO_fetch_compare_and_swap_emulation(addr, old, newval)
# define AO_HAVE_fetch_compare_and_swap_full
#endif
#ifndef AO_HAVE_compare_double_and_swap_double_full
# define AO_compare_double_and_swap_double_full(addr, old1, old2, \
newval1, newval2) \
AO_compare_double_and_swap_double_emulation(addr, old1, old2, \
newval1, newval2)
# define AO_HAVE_compare_double_and_swap_double_full
#endif
#undef AO_store
#undef AO_HAVE_store
#undef AO_store_write
#undef AO_HAVE_store_write
#undef AO_store_release
#undef AO_HAVE_store_release
#undef AO_store_full
#undef AO_HAVE_store_full
#define AO_store_full(addr, val) AO_store_full_emulation(addr, val)
#define AO_HAVE_store_full
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2013-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* As of clang-5.0 (and gcc-5.4), __atomic_thread_fence is always */
/* translated to DMB (which is inefficient for AO_nop_write). */
/* TODO: Update it for newer Clang and GCC releases. */
#if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(AO_THREAD_SANITIZER) \
&& !defined(AO_UNIPROCESSOR)
AO_INLINE void
AO_nop_write(void)
{
__asm__ __volatile__("dmb ishst" : : : "memory");
}
# define AO_HAVE_nop_write
#endif
/* There were some bugs in the older clang releases (related to */
/* optimization of functions dealing with __int128 values, supposedly), */
/* so even asm-based implementation did not work correctly. */
#if !defined(__clang__) || AO_CLANG_PREREQ(3, 9)
# include "../standard_ao_double_t.h"
/* As of gcc-5.4, all built-in load/store and CAS atomics for double */
/* word require -latomic, are not lock-free and cause test_stack */
/* failure, so the asm-based implementation is used for now. */
/* TODO: Update it for newer GCC releases. */
#if (!defined(__ILP32__) && !defined(__clang__)) \
|| defined(AO_AARCH64_ASM_LOAD_STORE_CAS)
# ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
int status;
/* Note that STXP cannot be discarded because LD[A]XP is not */
/* single-copy atomic (unlike LDREXD for 32-bit ARM). */
do {
__asm__ __volatile__("//AO_double_load\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stxp %w2, %w0, %w1, %3"
# else
" ldxp %0, %1, %3\n"
" stxp %w2, %0, %1, %3"
# endif
: "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
: "Q" (*addr));
} while (AO_EXPECT_FALSE(status));
return result;
}
# define AO_HAVE_double_load
AO_INLINE AO_double_t
AO_double_load_acquire(const volatile AO_double_t *addr)
{
AO_double_t result;
int status;
do {
__asm__ __volatile__("//AO_double_load_acquire\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %3\n"
" stxp %w2, %w0, %w1, %3"
# else
" ldaxp %0, %1, %3\n"
" stxp %w2, %0, %1, %3"
# endif
: "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
: "Q" (*addr));
} while (AO_EXPECT_FALSE(status));
return result;
}
# define AO_HAVE_double_load_acquire
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t value)
{
AO_double_t old_val;
int status;
do {
__asm__ __volatile__("//AO_double_store\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stxp %w2, %w4, %w5, %3"
# else
" ldxp %0, %1, %3\n"
" stxp %w2, %4, %5, %3"
# endif
: "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
"=Q" (*addr)
: "r" (value.AO_val1), "r" (value.AO_val2));
/* Compared to the arm.h implementation, the 'cc' (flags) are */
/* not clobbered because A64 has no concept of conditional */
/* execution. */
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store
AO_INLINE void
AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
{
AO_double_t old_val;
int status;
do {
__asm__ __volatile__("//AO_double_store_release\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %3\n"
" stlxp %w2, %w4, %w5, %3"
# else
" ldxp %0, %1, %3\n"
" stlxp %w2, %4, %5, %3"
# endif
: "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
"=Q" (*addr)
: "r" (value.AO_val1), "r" (value.AO_val2));
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store_release
# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %2\n"
# else
" ldxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stxp %w0, %w2, %w3, %1\n"
# else
" stxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap
AO_INLINE int
AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_acquire\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %2\n"
# else
" ldaxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stxp %w0, %w2, %w3, %1\n"
# else
" stxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_acquire
AO_INLINE int
AO_double_compare_and_swap_release(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_release\n"
# ifdef __ILP32__
" ldxp %w0, %w1, %2\n"
# else
" ldxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stlxp %w0, %w2, %w3, %1\n"
# else
" stlxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_release
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_double_t tmp;
int result = 1;
do {
__asm__ __volatile__("//AO_double_compare_and_swap_full\n"
# ifdef __ILP32__
" ldaxp %w0, %w1, %2\n"
# else
" ldaxp %0, %1, %2\n"
# endif
: "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
: "Q" (*addr));
if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
break;
__asm__ __volatile__(
# ifdef __ILP32__
" stlxp %w0, %w2, %w3, %1\n"
# else
" stlxp %w0, %2, %3, %1\n"
# endif
: "=&r" (result), "=Q" (*addr)
: "r" (new_val.AO_val1), "r" (new_val.AO_val2));
} while (AO_EXPECT_FALSE(result));
return !result;
}
# define AO_HAVE_double_compare_and_swap_full
#endif /* !__ILP32__ && !__clang__ || AO_AARCH64_ASM_LOAD_STORE_CAS */
/* As of clang-5.0 and gcc-8.1, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 */
/* macro is still missing (while the double-word CAS is available). */
# ifndef __ILP32__
# define AO_GCC_HAVE_double_SYNC_CAS
# endif
#endif /* !__clang__ || AO_CLANG_PREREQ(3, 9) */
#if (defined(__clang__) && !AO_CLANG_PREREQ(3, 8)) || defined(__APPLE_CC__)
/* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are missing. */
# define AO_GCC_FORCE_HAVE_CAS
#endif
#include "generic.h"
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_GCC_HAVE_double_SYNC_CAS

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#include "../loadstore/atomic_load.h"
#include "../loadstore/atomic_store.h"
#include "../test_and_set_t_is_ao_t.h"
#define AO_NO_DD_ORDERING
/* Data dependence does not imply read ordering. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mb" : : : "memory");
}
#define AO_HAVE_nop_full
AO_INLINE void
AO_nop_write(void)
{
__asm__ __volatile__("wmb" : : : "memory");
}
#define AO_HAVE_nop_write
/* mb should be used for AO_nop_read(). That's the default. */
/* TODO: implement AO_fetch_and_add explicitly. */
/* We believe that ldq_l ... stq_c does not imply any memory barrier. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
unsigned long was_equal;
unsigned long temp;
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" cmpeq %0,%4,%2\n"
" mov %3,%0\n"
" beq %2,2f\n"
" stq_c %0,%1\n"
" beq %0,1b\n"
"2:\n"
: "=&r" (temp), "+m" (*addr), "=&r" (was_equal)
: "r" (new_val), "Ir" (old)
:"memory");
return (int)was_equal;
}
#define AO_HAVE_compare_and_swap
/* TODO: implement AO_fetch_compare_and_swap */

View File

@ -0,0 +1,742 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2008-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 5)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
# define AO_GCC_ATOMIC_TEST_AND_SET
#endif
#ifdef __native_client__
/* Mask instruction should immediately precede access instruction. */
# define AO_MASK_PTR(reg) " bical " reg ", " reg ", #0xc0000000\n"
# define AO_BR_ALIGN " .align 4\n"
#else
# define AO_MASK_PTR(reg) /* empty */
# define AO_BR_ALIGN /* empty */
#endif
#if defined(__thumb__) && !defined(__thumb2__)
/* Thumb One mode does not have ARM "mcr", "swp" and some load/store */
/* instructions, so we temporarily switch to ARM mode and go back */
/* afterwards (clobbering "r3" register). */
# define AO_THUMB_GO_ARM \
" adr r3, 4f\n" \
" bx r3\n" \
" .align\n" \
" .arm\n" \
AO_BR_ALIGN \
"4:\n"
# define AO_THUMB_RESTORE_MODE \
" adr r3, 5f + 1\n" \
" bx r3\n" \
" .thumb\n" \
AO_BR_ALIGN \
"5:\n"
# define AO_THUMB_SWITCH_CLOBBERS "r3",
#else
# define AO_THUMB_GO_ARM /* empty */
# define AO_THUMB_RESTORE_MODE /* empty */
# define AO_THUMB_SWITCH_CLOBBERS /* empty */
#endif /* !__thumb__ */
/* NEC LE-IT: gcc has no way to easily check the arm architecture */
/* but it defines only one (or several) of __ARM_ARCH_x__ to be true. */
#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_3__) \
&& !defined(__ARM_ARCH_3M__) && !defined(__ARM_ARCH_4__) \
&& !defined(__ARM_ARCH_4T__) \
&& ((!defined(__ARM_ARCH_5__) && !defined(__ARM_ARCH_5E__) \
&& !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__) \
&& !defined(__ARM_ARCH_5TEJ__) && !defined(__ARM_ARCH_6M__)) \
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_8A__))
# define AO_ARM_HAVE_LDREX
# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
&& !defined(__ARM_ARCH_6T2__)
/* LDREXB/STREXB and LDREXH/STREXH are present in ARMv6K/Z+. */
# define AO_ARM_HAVE_LDREXBH
# endif
# if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \
&& !defined(__ARM_ARCH_6T2__) && !defined(__ARM_ARCH_6Z__) \
&& !defined(__ARM_ARCH_6ZT2__)
# if !defined(__ARM_ARCH_6K__) && !defined(__ARM_ARCH_6KZ__) \
&& !defined(__ARM_ARCH_6ZK__)
/* DMB is present in ARMv6M and ARMv7+. */
# define AO_ARM_HAVE_DMB
# endif
# if (!defined(__thumb__) \
|| (defined(__thumb2__) && !defined(__ARM_ARCH_7__) \
&& !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__))) \
&& (!defined(__clang__) || AO_CLANG_PREREQ(3, 3))
/* LDREXD/STREXD present in ARMv6K/M+ (see gas/config/tc-arm.c). */
/* In the Thumb mode, this works only starting from ARMv7 (except */
/* for the base and 'M' models). Clang3.2 (and earlier) does not */
/* allocate register pairs for LDREXD/STREXD properly (besides, */
/* Clang3.1 does not support "%H<r>" operand specification). */
# define AO_ARM_HAVE_LDREXD
# endif /* !thumb || ARMv7A || ARMv7R+ */
# endif /* ARMv7+ */
#endif /* ARMv6+ */
#if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__) \
&& !defined(__ARM_ARCH_8A__) && !defined(__thumb2__)
# define AO_ARM_HAVE_SWP
/* Note: ARMv6M is excluded due to no ARM mode support. */
/* Also, SWP is obsoleted for ARMv8+. */
#endif /* !__thumb2__ */
#if !defined(AO_UNIPROCESSOR) && defined(AO_ARM_HAVE_DMB) \
&& !defined(AO_PREFER_BUILTIN_ATOMICS)
AO_INLINE void
AO_nop_write(void)
{
/* AO_THUMB_GO_ARM is empty. */
/* This will target the system domain and thus be overly */
/* conservative as the CPUs (even in case of big.LITTLE SoC) will */
/* occupy the inner shareable domain. */
/* The plain variant (dmb st) is theoretically slower, and should */
/* not be needed. That said, with limited experimentation, a CPU */
/* implementation for which it actually matters has not been found */
/* yet, though they should already exist. */
/* Anyway, note that the "st" and "ishst" barriers are actually */
/* quite weak and, as the libatomic_ops documentation states, */
/* usually not what you really want. */
__asm__ __volatile__("dmb ishst" : : : "memory");
}
# define AO_HAVE_nop_write
#endif /* AO_ARM_HAVE_DMB */
#ifndef AO_GCC_ATOMIC_TEST_AND_SET
#ifdef AO_UNIPROCESSOR
/* If only a single processor (core) is used, AO_UNIPROCESSOR could */
/* be defined by the client to avoid unnecessary memory barrier. */
AO_INLINE void
AO_nop_full(void)
{
AO_compiler_barrier();
}
# define AO_HAVE_nop_full
#elif defined(AO_ARM_HAVE_DMB)
/* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */
/* a memory barrier (DMB). Raising it via CP15 should still work */
/* (but slightly less efficient because it requires the use of */
/* a general-purpose register). */
AO_INLINE void
AO_nop_full(void)
{
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("dmb" : : : "memory");
}
# define AO_HAVE_nop_full
#elif defined(AO_ARM_HAVE_LDREX)
/* ARMv6 is the first architecture providing support for a simple */
/* LL/SC. A data memory barrier must be raised via CP15 command. */
AO_INLINE void
AO_nop_full(void)
{
unsigned dest = 0;
/* Issue a data memory barrier (keeps ordering of memory */
/* transactions before and after this operation). */
__asm__ __volatile__("@AO_nop_full\n"
AO_THUMB_GO_ARM
" mcr p15,0,%0,c7,c10,5\n"
AO_THUMB_RESTORE_MODE
: "=&r"(dest)
: /* empty */
: AO_THUMB_SWITCH_CLOBBERS "memory");
}
# define AO_HAVE_nop_full
#else
/* AO_nop_full() is emulated using AO_test_and_set_full(). */
#endif /* !AO_UNIPROCESSOR && !AO_ARM_HAVE_LDREX */
#endif /* !AO_GCC_ATOMIC_TEST_AND_SET */
#ifdef AO_ARM_HAVE_LDREX
/* "ARM Architecture Reference Manual" (chapter A3.5.3) says that the */
/* single-copy atomic processor accesses are all byte accesses, all */
/* halfword accesses to halfword-aligned locations, all word accesses */
/* to word-aligned locations. */
/* There is only a single concern related to AO store operations: */
/* a direct write (by STR[B/H] instruction) will not be recognized */
/* by the LL/SC construct on the same CPU (i.e., according to ARM */
/* documentation, e.g., see CortexA8 TRM reference, point 8.5, */
/* atomic "store" (using LDREX/STREX[B/H]) is the only safe way to */
/* set variables also used in LL/SC environment). */
/* This is only a problem if interrupt handlers do not clear the */
/* reservation (by CLREX instruction or a dummy STREX one), as they */
/* almost certainly should (e.g., see restore_user_regs defined in */
/* arch/arm/kernel/entry-header.S of Linux. Nonetheless, there is */
/* a doubt this was properly implemented in some ancient OS releases. */
# ifdef AO_BROKEN_TASKSWITCH_CLREX
# define AO_SKIPATOMIC_store
# define AO_SKIPATOMIC_store_release
# define AO_SKIPATOMIC_char_store
# define AO_SKIPATOMIC_char_store_release
# define AO_SKIPATOMIC_short_store
# define AO_SKIPATOMIC_short_store_release
# define AO_SKIPATOMIC_int_store
# define AO_SKIPATOMIC_int_store_release
# ifndef AO_PREFER_BUILTIN_ATOMICS
AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
{
int flag;
__asm__ __volatile__("@AO_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrex %0, [%2]\n"
AO_MASK_PTR("%2")
" strex %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_store
# ifdef AO_ARM_HAVE_LDREXBH
AO_INLINE void AO_char_store(volatile unsigned char *addr,
unsigned char value)
{
int flag;
__asm__ __volatile__("@AO_char_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrexb %0, [%2]\n"
AO_MASK_PTR("%2")
" strexb %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_char_store
AO_INLINE void AO_short_store(volatile unsigned short *addr,
unsigned short value)
{
int flag;
__asm__ __volatile__("@AO_short_store\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%2")
" ldrexh %0, [%2]\n"
AO_MASK_PTR("%2")
" strexh %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (flag), "+m" (*addr)
: "r" (addr), "r" (value)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
# define AO_HAVE_short_store
# endif /* AO_ARM_HAVE_LDREXBH */
# endif /* !AO_PREFER_BUILTIN_ATOMICS */
# elif !defined(AO_GCC_ATOMIC_TEST_AND_SET)
# include "../loadstore/atomic_store.h"
/* AO_int_store is defined in ao_t_is_int.h. */
# endif /* !AO_BROKEN_TASKSWITCH_CLREX */
#endif /* AO_ARM_HAVE_LDREX */
#ifndef AO_GCC_ATOMIC_TEST_AND_SET
# include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
#ifdef AO_ARM_HAVE_LDREX
/* AO_t/char/short/int load is simple reading. */
/* Unaligned accesses are not guaranteed to be atomic. */
# define AO_ACCESS_CHECK_ALIGNED
# define AO_ACCESS_short_CHECK_ALIGNED
# define AO_ACCESS_int_CHECK_ALIGNED
# include "../all_atomic_only_load.h"
# ifndef AO_HAVE_char_store
# include "../loadstore/char_atomic_store.h"
# include "../loadstore/short_atomic_store.h"
# endif
/* NEC LE-IT: replace the SWAP as recommended by ARM:
"Applies to: ARM11 Cores
Though the SWP instruction will still work with ARM V6 cores, it is
recommended to use the new V6 synchronization instructions. The SWP
instruction produces 'locked' read and write accesses which are atomic,
i.e. another operation cannot be done between these locked accesses which
ties up external bus (AHB, AXI) bandwidth and can increase worst case
interrupt latencies. LDREX, STREX are more flexible, other instructions
can be done between the LDREX and STREX accesses."
*/
#ifndef AO_PREFER_GENERALIZED
#if !defined(AO_FORCE_USE_SWP) || !defined(AO_ARM_HAVE_SWP)
/* But, on the other hand, there could be a considerable performance */
/* degradation in case of a race. Eg., test_atomic.c executing */
/* test_and_set test on a dual-core ARMv7 processor using LDREX/STREX */
/* showed around 35 times lower performance than that using SWP. */
/* To force use of SWP instruction, use -D AO_FORCE_USE_SWP option */
/* (the latter is ignored if SWP instruction is unsupported). */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
AO_TS_VAL_t oldval;
int flag;
__asm__ __volatile__("@AO_test_and_set\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%3")
" ldrex %0, [%3]\n"
AO_MASK_PTR("%3")
" strex %1, %4, [%3]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(oldval), "=&r"(flag), "+m"(*addr)
: "r"(addr), "r"(1)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return oldval;
}
# define AO_HAVE_test_and_set
#endif /* !AO_FORCE_USE_SWP */
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *p, AO_t incr)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrex %0, [%5]\n" /* get original */
" add %2, %0, %4\n" /* sum up in incr */
AO_MASK_PTR("%5")
" strex %1, %2, [%5]\n" /* store them */
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(flag), "=&r"(tmp), "+m"(*p) /* 0..3 */
: "r"(incr), "r"(p) /* 4..5 */
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_t
AO_fetch_and_add1(volatile AO_t *p)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_add1\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n" /* get original */
" add %1, %0, #1\n" /* increment */
AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
: "r"(p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_add1
AO_INLINE AO_t
AO_fetch_and_sub1(volatile AO_t *p)
{
AO_t result, tmp;
int flag;
__asm__ __volatile__("@AO_fetch_and_sub1\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n" /* get original */
" sub %1, %0, #1\n" /* decrement */
AO_MASK_PTR("%4")
" strex %2, %1, [%4]\n" /* store them */
" teq %2, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p)
: "r"(p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return result;
}
#define AO_HAVE_fetch_and_sub1
AO_INLINE void
AO_and(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_and\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" and %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_and
AO_INLINE void
AO_or(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_or\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" orr %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_or
AO_INLINE void
AO_xor(volatile AO_t *p, AO_t value)
{
AO_t tmp, result;
__asm__ __volatile__("@AO_xor\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%4")
" ldrex %0, [%4]\n"
" eor %1, %0, %3\n"
AO_MASK_PTR("%4")
" strex %0, %1, [%4]\n"
" teq %0, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (tmp), "=&r" (result), "+m" (*p)
: "r" (value), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
}
#define AO_HAVE_xor
#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_ARM_HAVE_LDREXBH
AO_INLINE unsigned char
AO_char_fetch_and_add(volatile unsigned char *p, unsigned char incr)
{
unsigned result, tmp;
int flag;
__asm__ __volatile__("@AO_char_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrexb %0, [%5]\n"
" add %2, %0, %4\n"
AO_MASK_PTR("%5")
" strexb %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
: "r" ((unsigned)incr), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return (unsigned char)result;
}
# define AO_HAVE_char_fetch_and_add
AO_INLINE unsigned short
AO_short_fetch_and_add(volatile unsigned short *p, unsigned short incr)
{
unsigned result, tmp;
int flag;
__asm__ __volatile__("@AO_short_fetch_and_add\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: " AO_MASK_PTR("%5")
" ldrexh %0, [%5]\n"
" add %2, %0, %4\n"
AO_MASK_PTR("%5")
" strexh %1, %2, [%5]\n"
" teq %1, #0\n"
" bne 1b\n"
AO_THUMB_RESTORE_MODE
: "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p)
: "r" ((unsigned)incr), "r" (p)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return (unsigned short)result;
}
# define AO_HAVE_short_fetch_and_add
#endif /* AO_ARM_HAVE_LDREXBH */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t result, tmp;
__asm__ __volatile__("@AO_compare_and_swap\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
AO_MASK_PTR("%3")
# ifdef __thumb2__
/* TODO: Eliminate warning: it blocks containing wide Thumb */
/* instructions are deprecated in ARMv8. */
" it eq\n"
# endif
" strexeq %0, %5, [%3]\n" /* store new one if matched */
" teq %0, #1\n"
" beq 1b\n" /* if update failed, repeat */
AO_THUMB_RESTORE_MODE
: "=&r"(result), "=&r"(tmp), "+m"(*addr)
: "r"(addr), "r"(old_val), "r"(new_val)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return !(result&2); /* if succeeded then return 1 else 0 */
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val;
int flag;
__asm__ __volatile__("@AO_fetch_compare_and_swap\n"
AO_THUMB_GO_ARM
AO_BR_ALIGN
"1: mov %0, #2\n" /* store a flag */
AO_MASK_PTR("%3")
" ldrex %1, [%3]\n" /* get original */
" teq %1, %4\n" /* see if match */
AO_MASK_PTR("%3")
# ifdef __thumb2__
" it eq\n"
# endif
" strexeq %0, %5, [%3]\n" /* store new one if matched */
" teq %0, #1\n"
" beq 1b\n" /* if update failed, repeat */
AO_THUMB_RESTORE_MODE
: "=&r"(flag), "=&r"(fetched_val), "+m"(*addr)
: "r"(addr), "r"(old_val), "r"(new_val)
: AO_THUMB_SWITCH_CLOBBERS "cc");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
#ifdef AO_ARM_HAVE_LDREXD
# include "../standard_ao_double_t.h"
/* "ARM Architecture Reference Manual ARMv7-A/R edition" (chapter */
/* A3.5.3) says that memory accesses caused by LDREXD and STREXD */
/* instructions to doubleword-aligned locations are single-copy */
/* atomic; accesses to 64-bit elements by other instructions might */
/* not be single-copy atomic as they are executed as a sequence of */
/* 32-bit accesses. */
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_load\n"
AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]"
: "=&r" (result.AO_whole)
: "r" (addr)
/* : no clobber */);
return result;
}
# define AO_HAVE_double_load
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
{
AO_double_t old_val;
int status;
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_store\n"
AO_MASK_PTR("%3")
" ldrexd %0, %H0, [%3]\n"
AO_MASK_PTR("%3")
" strexd %1, %4, %H4, [%3]"
: "=&r" (old_val.AO_whole), "=&r" (status), "+m" (*addr)
: "r" (addr), "r" (new_val.AO_whole)
: "cc");
} while (AO_EXPECT_FALSE(status));
}
# define AO_HAVE_double_store
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
double_ptr_storage tmp;
int result = 1;
do {
/* AO_THUMB_GO_ARM is empty. */
__asm__ __volatile__("@AO_double_compare_and_swap\n"
AO_MASK_PTR("%1")
" ldrexd %0, %H0, [%1]\n" /* get original to r1 & r2 */
: "=&r"(tmp)
: "r"(addr)
/* : no clobber */);
if (tmp != old_val.AO_whole)
break;
__asm__ __volatile__(
AO_MASK_PTR("%2")
" strexd %0, %3, %H3, [%2]\n" /* store new one if matched */
: "=&r"(result), "+m"(*addr)
: "r" (addr), "r" (new_val.AO_whole)
: "cc");
} while (AO_EXPECT_FALSE(result));
return !result; /* if succeeded then return 1 else 0 */
}
# define AO_HAVE_double_compare_and_swap
#endif /* AO_ARM_HAVE_LDREXD */
#else
/* pre ARMv6 architectures ... */
/* I found a slide set that, if I read it correctly, claims that */
/* Loads followed by either a Load or Store are ordered, but nothing */
/* else is. */
/* It appears that SWP is the only simple memory barrier. */
#include "../all_aligned_atomic_load_store.h"
/* The code should run correctly on a multi-core ARMv6+ as well. */
#endif /* !AO_ARM_HAVE_LDREX */
#if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \
&& defined (AO_ARM_HAVE_SWP) && (!defined(AO_PREFER_GENERALIZED) \
|| !defined(AO_HAVE_fetch_compare_and_swap))
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
AO_TS_VAL_t oldval;
/* SWP on ARM is very similar to XCHG on x86. */
/* The first operand is the result, the second the value */
/* to be stored. Both registers must be different from addr. */
/* Make the address operand an early clobber output so it */
/* doesn't overlap with the other operands. The early clobber */
/* on oldval is necessary to prevent the compiler allocating */
/* them to the same register if they are both unused. */
__asm__ __volatile__("@AO_test_and_set_full\n"
AO_THUMB_GO_ARM
AO_MASK_PTR("%3")
" swp %0, %2, [%3]\n"
/* Ignore GCC "SWP is deprecated for this architecture" */
/* warning here (for ARMv6+). */
AO_THUMB_RESTORE_MODE
: "=&r"(oldval), "=&r"(addr)
: "r"(1), "1"(addr)
: AO_THUMB_SWITCH_CLOBBERS "memory");
return oldval;
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */
#define AO_T_IS_INT
#else /* AO_GCC_ATOMIC_TEST_AND_SET */
# if defined(__clang__) && !defined(AO_ARM_HAVE_LDREX)
/* As of clang-3.8, it cannot compile __atomic_and/or/xor_fetch */
/* library calls yet for pre ARMv6. */
# define AO_SKIPATOMIC_ANY_and_ANY
# define AO_SKIPATOMIC_ANY_or_ANY
# define AO_SKIPATOMIC_ANY_xor_ANY
# endif
# ifdef AO_ARM_HAVE_LDREXD
# include "../standard_ao_double_t.h"
# endif
# include "generic.h"
#endif /* AO_GCC_ATOMIC_TEST_AND_SET */
#undef AO_ARM_HAVE_DMB
#undef AO_ARM_HAVE_LDREX
#undef AO_ARM_HAVE_LDREXBH
#undef AO_ARM_HAVE_LDREXD
#undef AO_ARM_HAVE_SWP
#undef AO_BR_ALIGN
#undef AO_MASK_PTR
#undef AO_SKIPATOMIC_ANY_and_ANY
#undef AO_SKIPATOMIC_ANY_or_ANY
#undef AO_SKIPATOMIC_ANY_xor_ANY
#undef AO_SKIPATOMIC_char_store
#undef AO_SKIPATOMIC_char_store_release
#undef AO_SKIPATOMIC_int_store
#undef AO_SKIPATOMIC_int_store_release
#undef AO_SKIPATOMIC_short_store
#undef AO_SKIPATOMIC_short_store_release
#undef AO_SKIPATOMIC_store
#undef AO_SKIPATOMIC_store_release
#undef AO_THUMB_GO_ARM
#undef AO_THUMB_RESTORE_MODE
#undef AO_THUMB_SWITCH_CLOBBERS

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) 2009 Bradley Smith <brad@brad-smith.co.uk>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "../all_atomic_load_store.h"
#include "../ordered.h" /* There are no multiprocessor implementations. */
#include "../test_and_set_t_is_ao_t.h"
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
register long ret;
__asm__ __volatile__(
"xchg %[oldval], %[mem], %[newval]"
: [oldval] "=&r"(ret)
: [mem] "r"(addr), [newval] "r"(1)
: "memory");
return (AO_TS_VAL_t)ret;
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register long ret;
__asm__ __volatile__(
"1: ssrf 5\n"
" ld.w %[res], %[mem]\n"
" eor %[res], %[oldval]\n"
" brne 2f\n"
" stcond %[mem], %[newval]\n"
" brne 1b\n"
"2:\n"
: [res] "=&r"(ret), [mem] "=m"(*addr)
: "m"(*addr), [newval] "r"(new_val), [oldval] "r"(old)
: "cc", "memory");
return (int)ret;
}
#define AO_HAVE_compare_and_swap_full
/* TODO: implement AO_fetch_compare_and_swap. */
#define AO_T_IS_INT

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* FIXME: seems to be untested. */
#include "../all_atomic_load_store.h"
#include "../ordered.h" /* There are no multiprocessor implementations. */
#include "../test_and_set_t_is_ao_t.h"
/*
* The architecture apparently supports an "f" flag which is
* set on preemption. This essentially gives us load-locked,
* store-conditional primitives, though I'm not quite sure how
* this would work on a hypothetical multiprocessor. -HB
*
* For details, see
* http://developer.axis.com/doc/hardware/etrax100lx/prog_man/
* 1_architectural_description.pdf
*
* TODO: Presumably many other primitives (notably CAS, including the double-
* width versions) could be implemented in this manner, if someone got
* around to it.
*/
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
/* Ripped from linuxthreads/sysdeps/cris/pt-machine.h */
register unsigned long int ret;
/* Note the use of a dummy output of *addr to expose the write. The
memory barrier is to stop *other* writes being moved past this code. */
__asm__ __volatile__("clearf\n"
"0:\n\t"
"movu.b [%2],%0\n\t"
"ax\n\t"
"move.b %3,[%2]\n\t"
"bwf 0b\n\t"
"clearf"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" ((int) 1), "m" (*addr)
: "memory");
return ret;
}
#define AO_HAVE_test_and_set_full

View File

@ -0,0 +1,864 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_char_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_xor
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_short_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_xor
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_int_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_xor
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
}
#define AO_HAVE_fetch_and_add
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_and
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_or
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_xor
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_char_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_xor_acquire
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_short_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_xor_acquire
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_int_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_acquire(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_xor_acquire
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_fetch_and_add_acquire
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_and_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_or_acquire
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_acquire(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_xor_acquire
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_char_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_xor_release
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_short_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_xor_release
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_int_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_release(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_xor_release
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
}
#define AO_HAVE_fetch_and_add_release
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_and_release
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_or_release
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_release(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_xor_release
#endif
#endif /* !AO_NO_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_char_ARITHM
AO_INLINE unsigned/**/char
AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_char_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_char_xor_full
#endif
#endif /* !AO_NO_char_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_short_ARITHM
AO_INLINE unsigned/**/short
AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_short_xor_full
#endif
#endif /* !AO_NO_short_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_int_ARITHM
AO_INLINE unsigned
AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_int_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_int_and_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_int_or_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_int_xor_full(volatile unsigned *addr, unsigned value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_int_xor_full
#endif
#endif /* !AO_NO_int_ARITHM */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_ARITHM
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
}
#define AO_HAVE_fetch_and_add_full
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_and_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_and_full
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_or_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_or_full
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_xor_full(volatile AO_t *addr, AO_t value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_xor_full
#endif
#endif /* !AO_NO_ARITHM */

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#ifndef AO_NO_XSIZE_ARITHM
AO_INLINE XCTYPE
AO_XSIZE_fetch_and_add_XBAR(volatile XCTYPE *addr, XCTYPE incr)
{
return __atomic_fetch_add(addr, incr, __ATOMIC_XGCCBAR);
}
#define AO_HAVE_XSIZE_fetch_and_add_XBAR
#ifndef AO_SKIPATOMIC_ANY_and_ANY
AO_INLINE void
AO_XSIZE_and_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_and_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_and_XBAR
#endif
#ifndef AO_SKIPATOMIC_ANY_or_ANY
AO_INLINE void
AO_XSIZE_or_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_or_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_or_XBAR
#endif
#ifndef AO_SKIPATOMIC_ANY_xor_ANY
AO_INLINE void
AO_XSIZE_xor_XBAR(volatile XCTYPE *addr, XCTYPE value)
{
(void)__atomic_xor_fetch(addr, value, __ATOMIC_XGCCBAR);
}
# define AO_HAVE_XSIZE_xor_XBAR
#endif
#endif /* !AO_NO_XSIZE_ARITHM */

View File

@ -0,0 +1,632 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_char_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned/**/char
AO_char_load(const volatile unsigned/**/char *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_char_load
AO_INLINE unsigned/**/char
AO_char_load_acquire(const volatile unsigned/**/char *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_char_load_acquire
/* char_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* char_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* char_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_char_store
AO_INLINE void
AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_store
#endif
#ifndef AO_SKIPATOMIC_char_store_release
AO_INLINE void
AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_char_store_release
#endif
#endif /* !AO_GCC_HAVE_char_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_char_SYNC_CAS
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_release(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned/**/char
AO_char_fetch_compare_and_swap_full(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_char_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_char_compare_and_swap(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_char_compare_and_swap
AO_INLINE int
AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
AO_char_compare_and_swap_release(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
AO_char_compare_and_swap_full(volatile unsigned/**/char *addr,
unsigned/**/char old_val, unsigned/**/char new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_char_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_char_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_short_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned/**/short
AO_short_load(const volatile unsigned/**/short *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_short_load
AO_INLINE unsigned/**/short
AO_short_load_acquire(const volatile unsigned/**/short *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_short_load_acquire
/* short_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* short_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* short_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_short_store
AO_INLINE void
AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_store
#endif
#ifndef AO_SKIPATOMIC_short_store_release
AO_INLINE void
AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_short_store_release
#endif
#endif /* !AO_GCC_HAVE_short_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_short_SYNC_CAS
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_release(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_release
AO_INLINE unsigned/**/short
AO_short_fetch_compare_and_swap_full(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_short_compare_and_swap(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_short_compare_and_swap
AO_INLINE int
AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
AO_short_compare_and_swap_release(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_short_compare_and_swap_release
AO_INLINE int
AO_short_compare_and_swap_full(volatile unsigned/**/short *addr,
unsigned/**/short old_val, unsigned/**/short new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_short_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_short_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_int_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE unsigned
AO_int_load(const volatile unsigned *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_int_load
AO_INLINE unsigned
AO_int_load_acquire(const volatile unsigned *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_int_load_acquire
/* int_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* int_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* int_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_int_store
AO_INLINE void
AO_int_store(volatile unsigned *addr, unsigned value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_store
#endif
#ifndef AO_SKIPATOMIC_int_store_release
AO_INLINE void
AO_int_store_release(volatile unsigned *addr, unsigned value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_int_store_release
#endif
#endif /* !AO_GCC_HAVE_int_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_int_SYNC_CAS
AO_INLINE unsigned
AO_int_fetch_compare_and_swap(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_acquire(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_acquire
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_release(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_release
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_full(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_int_compare_and_swap(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_int_compare_and_swap
AO_INLINE int
AO_int_compare_and_swap_acquire(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_int_compare_and_swap_acquire
AO_INLINE int
AO_int_compare_and_swap_release(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_int_compare_and_swap_release
AO_INLINE int
AO_int_compare_and_swap_full(volatile unsigned *addr,
unsigned old_val, unsigned new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_int_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_int_SYNC_CAS */
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_load
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_load_acquire
/* load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_store
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_store
#endif
#ifndef AO_SKIPATOMIC_store_release
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_store_release
#endif
#endif /* !AO_GCC_HAVE_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_SYNC_CAS
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_compare_and_swap
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old_val, AO_t new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_SYNC_CAS */

View File

@ -0,0 +1,158 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if !defined(AO_GCC_HAVE_XSIZE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
AO_INLINE XCTYPE
AO_XSIZE_load(const volatile XCTYPE *addr)
{
return __atomic_load_n(addr, __ATOMIC_RELAXED);
}
#define AO_HAVE_XSIZE_load
AO_INLINE XCTYPE
AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
{
return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
}
#define AO_HAVE_XSIZE_load_acquire
/* XSIZE_load_read is defined using load and nop_read. */
/* TODO: Map it to ACQUIRE. We should be strengthening the read and */
/* write stuff to the more general acquire/release versions. It almost */
/* never makes a difference and is much less error-prone. */
/* XSIZE_load_full is generalized using load and nop_full. */
/* TODO: Map it to SEQ_CST and clarify the documentation. */
/* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */
/* mapped to CONSUME, but the latter is currently broken. */
/* XSIZE_store_full definition is omitted similar to load_full reason. */
/* TODO: Map store_write to RELEASE. */
#ifndef AO_SKIPATOMIC_XSIZE_store
AO_INLINE void
AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
{
__atomic_store_n(addr, value, __ATOMIC_RELAXED);
}
# define AO_HAVE_XSIZE_store
#endif
#ifndef AO_SKIPATOMIC_XSIZE_store_release
AO_INLINE void
AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
{
__atomic_store_n(addr, value, __ATOMIC_RELEASE);
}
# define AO_HAVE_XSIZE_store_release
#endif
#endif /* !AO_GCC_HAVE_XSIZE_SYNC_CAS || !AO_PREFER_GENERALIZED */
#ifdef AO_GCC_HAVE_XSIZE_SYNC_CAS
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr,
&old_val /* p_expected */,
new_val /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_release(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_release
AO_INLINE XCTYPE
AO_XSIZE_fetch_compare_and_swap_full(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
(void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
return old_val;
}
# define AO_HAVE_XSIZE_fetch_compare_and_swap_full
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_XSIZE_compare_and_swap(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
# define AO_HAVE_XSIZE_compare_and_swap
AO_INLINE int
AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_XSIZE_compare_and_swap_acquire
AO_INLINE int
AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_XSIZE_compare_and_swap_release
AO_INLINE int
AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr,
XCTYPE old_val, XCTYPE new_val)
{
return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_XSIZE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
#endif /* AO_GCC_HAVE_XSIZE_SYNC_CAS */

View File

@ -0,0 +1,239 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2013-2017 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* The following implementation assumes GCC 4.7 or later. */
/* For the details, see GNU Manual, chapter 6.52 (Built-in functions */
/* for memory model aware atomic operations). */
#define AO_GCC_ATOMIC_TEST_AND_SET
#include "../test_and_set_t_is_char.h"
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_char_SYNC_CAS
#endif
#if (__SIZEOF_SHORT__ == 2 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_short_SYNC_CAS
#endif
#if (__SIZEOF_INT__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \
|| (__SIZEOF_INT__ == 8 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_int_SYNC_CAS
#endif
#if (__SIZEOF_SIZE_T__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \
|| (__SIZEOF_SIZE_T__ == 8 \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| defined(AO_GCC_FORCE_HAVE_CAS)
# define AO_GCC_HAVE_SYNC_CAS
#endif
#undef AO_compiler_barrier
#define AO_compiler_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
#ifdef AO_UNIPROCESSOR
/* If only a single processor (core) is used, AO_UNIPROCESSOR could */
/* be defined by the client to avoid unnecessary memory barrier. */
AO_INLINE void
AO_nop_full(void)
{
AO_compiler_barrier();
}
# define AO_HAVE_nop_full
#else
AO_INLINE void
AO_nop_read(void)
{
__atomic_thread_fence(__ATOMIC_ACQUIRE);
}
# define AO_HAVE_nop_read
# ifndef AO_HAVE_nop_write
AO_INLINE void
AO_nop_write(void)
{
__atomic_thread_fence(__ATOMIC_RELEASE);
}
# define AO_HAVE_nop_write
# endif
AO_INLINE void
AO_nop_full(void)
{
/* __sync_synchronize() could be used instead. */
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
# define AO_HAVE_nop_full
#endif /* !AO_UNIPROCESSOR */
#include "generic-small.h"
#ifndef AO_PREFER_GENERALIZED
# include "generic-arithm.h"
# define AO_CLEAR(addr) __atomic_clear(addr, __ATOMIC_RELEASE)
# define AO_HAVE_CLEAR
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED);
}
# define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE);
}
# define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST);
}
# define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
# if ((__SIZEOF_SIZE_T__ == 4 \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \
|| (__SIZEOF_SIZE_T__ == 8 /* half of AO_double_t */ \
&& defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16))) \
&& !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY)
# define AO_GCC_HAVE_double_SYNC_CAS
# endif
# if !defined(AO_GCC_HAVE_double_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED)
# if !defined(AO_HAVE_double_load) && !defined(AO_SKIPATOMIC_double_load)
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED);
return result;
}
# define AO_HAVE_double_load
# endif
# if !defined(AO_HAVE_double_load_acquire) \
&& !defined(AO_SKIPATOMIC_double_load_acquire)
AO_INLINE AO_double_t
AO_double_load_acquire(const volatile AO_double_t *addr)
{
AO_double_t result;
result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE);
return result;
}
# define AO_HAVE_double_load_acquire
# endif
# if !defined(AO_HAVE_double_store) && !defined(AO_SKIPATOMIC_double_store)
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t value)
{
__atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED);
}
# define AO_HAVE_double_store
# endif
# if !defined(AO_HAVE_double_store_release) \
&& !defined(AO_SKIPATOMIC_double_store_release)
AO_INLINE void
AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
{
__atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE);
}
# define AO_HAVE_double_store_release
# endif
#endif /* !AO_GCC_HAVE_double_SYNC_CAS || !AO_PREFER_GENERALIZED */
#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
#ifdef AO_GCC_HAVE_double_SYNC_CAS
# ifndef AO_HAVE_double_compare_and_swap
AO_INLINE int
AO_double_compare_and_swap(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole /* p_expected */,
new_val.AO_whole /* desired */,
0 /* is_weak: false */,
__ATOMIC_RELAXED /* success */,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_double_compare_and_swap
# endif
# ifndef AO_HAVE_double_compare_and_swap_acquire
AO_INLINE int
AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
AO_double_t old_val,
AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
}
# define AO_HAVE_double_compare_and_swap_acquire
# endif
# ifndef AO_HAVE_double_compare_and_swap_release
AO_INLINE int
AO_double_compare_and_swap_release(volatile AO_double_t *addr,
AO_double_t old_val,
AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED /* failure */);
}
# define AO_HAVE_double_compare_and_swap_release
# endif
# ifndef AO_HAVE_double_compare_and_swap_full
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
return (int)__atomic_compare_exchange_n(&addr->AO_whole,
&old_val.AO_whole, new_val.AO_whole, 0,
__ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE /* failure */);
}
# define AO_HAVE_double_compare_and_swap_full
# endif
#endif /* AO_GCC_HAVE_double_SYNC_CAS */

View File

@ -0,0 +1,140 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#if AO_CLANG_PREREQ(3, 9) && !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled for earlier clang versions as well. */
/* As of clang-3.9, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n are missing. */
# define AO_GCC_FORCE_HAVE_CAS
# define AO_GCC_HAVE_double_SYNC_CAS
# include "../standard_ao_double_t.h"
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_ao_t.h"
/* There's also "isync" and "barrier"; however, for all current CPU */
/* versions, "syncht" should suffice. Likewise, it seems that the */
/* auto-defined versions of *_acquire, *_release or *_full suffice for */
/* all current ISA implementations. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("syncht" : : : "memory");
}
#define AO_HAVE_nop_full
/* The Hexagon has load-locked, store-conditional primitives, and so */
/* resulting code is very nearly identical to that of PowerPC. */
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%3);\n" /* load and reserve */
" %1 = add (%0,%4);\n" /* increment */
" memw_locked(%3,p1) = %1;\n" /* store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval), "+m"(*addr)
: "r"(addr), "r"(incr)
: "memory", "p1");
return oldval;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
int oldval;
int locked_value = 1;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%2);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,#0);\n" /* if load is not zero, */
" if (!p2.new) jump:nt 2f;\n" /* we are done */
" }\n"
" memw_locked(%2,p1) = %3;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval), "+m"(*addr)
: "r"(addr), "r"(locked_value)
: "memory", "p1", "p2");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t __oldval;
int result = 0;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%3);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,%4);\n" /* if load is not equal to */
" if (!p2.new) jump:nt 2f;\n" /* old, fail */
" }\n"
" memw_locked(%3,p1) = %5;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
" %1 = #1\n" /* success, result = 1 */
"2:\n"
: "=&r" (__oldval), "+r" (result), "+m"(*addr)
: "r" (addr), "r" (old), "r" (new_val)
: "p1", "p2", "memory"
);
return result;
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t __oldval;
__asm__ __volatile__(
"1:\n"
" %0 = memw_locked(%2);\n" /* load and reserve */
" {\n"
" p2 = cmp.eq(%0,%3);\n" /* if load is not equal to */
" if (!p2.new) jump:nt 2f;\n" /* old_val, fail */
" }\n"
" memw_locked(%2,p1) = %4;\n" /* else store conditional */
" if (!p1) jump 1b;\n" /* retry if lost reservation */
"2:\n"
: "=&r" (__oldval), "+m"(*addr)
: "r" (addr), "r" (old_val), "r" (new_val)
: "p1", "p2", "memory"
);
return __oldval;
}
#define AO_HAVE_fetch_compare_and_swap
#define AO_T_IS_INT
#endif /* AO_DISABLE_GCC_ATOMICS */
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_GCC_HAVE_double_SYNC_CAS

View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_atomic_load_store.h"
/* Some architecture set descriptions include special "ordered" memory */
/* operations. As far as we can tell, no existing processors actually */
/* require those. Nor does it appear likely that future processors */
/* will. */
#include "../ordered.h"
/* GCC will not guarantee the alignment we need, use four lock words */
/* and select the correctly aligned datum. See the glibc 2.3.2 */
/* linuxthread port for the original implementation. */
struct AO_pa_clearable_loc {
int data[4];
};
#undef AO_TS_INITIALIZER
#define AO_TS_t struct AO_pa_clearable_loc
#define AO_TS_INITIALIZER { { 1, 1, 1, 1 } }
/* Switch meaning of set and clear, since we only have an atomic clear */
/* instruction. */
typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
#define AO_TS_VAL_t AO_PA_TS_val
#define AO_TS_CLEAR AO_PA_TS_clear
#define AO_TS_SET AO_PA_TS_set
/* The hppa only has one atomic read and modify memory operation, */
/* load and clear, so hppa spinlocks must use zero to signify that */
/* someone is holding the lock. The address used for the ldcw */
/* semaphore must be 16-byte aligned. */
#define AO_ldcw(a, ret) \
__asm__ __volatile__("ldcw 0(%2), %0" \
: "=r" (ret), "=m" (*(a)) : "r" (a))
/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
/* be assured of 16-byte alignment for atomic lock data even if we */
/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
/* we use a struct containing an array of four ints for the atomic lock */
/* type and dynamically select the 16-byte aligned int from the array */
/* for the semaphore. */
#define AO_PA_LDCW_ALIGNMENT 16
#define AO_ldcw_align(addr) \
((volatile unsigned *)(((unsigned long)(addr) \
+ (AO_PA_LDCW_ALIGNMENT - 1)) \
& ~(AO_PA_LDCW_ALIGNMENT - 1)))
/* Works on PA 1.1 and PA 2.0 systems */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t * addr)
{
volatile unsigned int ret;
volatile unsigned *a = AO_ldcw_align(addr);
AO_ldcw(a, ret);
return (AO_TS_VAL_t)ret;
}
#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
{
volatile unsigned *a = AO_ldcw_align(addr);
AO_compiler_barrier();
*a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
#define AO_HAVE_CLEAR
#undef AO_PA_LDCW_ALIGNMENT
#undef AO_ldcw
#undef AO_ldcw_align

View File

@ -0,0 +1,287 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_atomic_load_store.h"
#include "../all_acquire_release_volatile.h"
#include "../test_and_set_t_is_char.h"
#ifdef _ILP32
/* 32-bit HP/UX code. */
/* This requires pointer "swizzling". Pointers need to be expanded */
/* to 64 bits using the addp4 instruction before use. This makes it */
/* hard to share code, but we try anyway. */
# define AO_LEN "4"
/* We assume that addr always appears in argument position 1 in asm */
/* code. If it is clobbered due to swizzling, we also need it in */
/* second position. Any later arguments are referenced symbolically, */
/* so that we don't have to worry about their position. This requires*/
/* gcc 3.1, but you shouldn't be using anything older than that on */
/* IA64 anyway. */
/* The AO_MASK macro is a workaround for the fact that HP/UX gcc */
/* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */
/* doesn't appear to clear high bits in a pointer value we pass into */
/* assembly code, even if it is supposedly of type AO_t. */
# define AO_IN_ADDR "1"(addr)
# define AO_OUT_ADDR , "=r"(addr)
# define AO_SWIZZLE "addp4 %1=0,%1;;\n"
# define AO_MASK(ptr) __asm__ __volatile__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr))
#else
# define AO_LEN "8"
# define AO_IN_ADDR "r"(addr)
# define AO_OUT_ADDR
# define AO_SWIZZLE
# define AO_MASK(ptr) /* empty */
#endif /* !_ILP32 */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mf" : : : "memory");
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_acquire
AO_INLINE AO_t
AO_fetch_and_add1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
AO_fetch_and_sub1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
AO_fetch_and_sub1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_release
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t fetched_val;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t fetched_val;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short fetched_val;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(fetched_val) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return fetched_val;
}
#define AO_HAVE_short_fetch_compare_and_swap_release
#ifdef _ILP32
# define AO_T_IS_INT
/* TODO: Add compare_double_and_swap_double for the _ILP32 case. */
#else
# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.acq %0=[%1],1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_add1_release(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.rel %0=[%1],1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add1_release
AO_INLINE unsigned int
AO_int_fetch_and_sub1_acquire(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.acq %0=[%1],-1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_sub1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_sub1_release(volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__("fetchadd4.rel %0=[%1],-1"
: "=r" (result) : AO_IN_ADDR
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_sub1_release
# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int fetched_val;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv"
: "=r"(fetched_val)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
: "memory");
return fetched_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_acquire
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int fetched_val;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv"
: "=r"(fetched_val)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
: "memory");
return fetched_val;
}
# define AO_HAVE_int_fetch_compare_and_swap_release
#endif /* !_ILP32 */
/* TODO: Add compare_and_swap_double as soon as there is widely */
/* available hardware that implements it. */
#undef AO_IN_ADDR
#undef AO_LEN
#undef AO_MASK
#undef AO_OUT_ADDR
#undef AO_SWIZZLE

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* The cas instruction causes an emulation trap for the */
/* 060 with a misaligned pointer, so let's avoid this. */
#undef AO_t
typedef unsigned long AO_t __attribute__((__aligned__(4)));
/* FIXME. Very incomplete. */
#include "../all_aligned_atomic_load_store.h"
/* Are there any m68k multiprocessors still around? */
/* AFAIK, Alliants were sequentially consistent. */
#include "../ordered.h"
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_t oldval;
/* The value at addr is semi-phony. */
/* 'tas' sets bit 7 while the return */
/* value pretends all bits were set, */
/* which at least matches AO_TS_SET. */
__asm__ __volatile__(
"tas %1; sne %0"
: "=d" (oldval), "=m" (*addr)
: "m" (*addr)
: "memory");
/* This cast works due to the above. */
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
char result;
__asm__ __volatile__(
"cas.l %3,%4,%1; seq %0"
: "=d" (result), "=m" (*addr)
: "m" (*addr), "d" (old), "d" (new_val)
: "memory");
return -result;
}
#define AO_HAVE_compare_and_swap_full
/* TODO: implement AO_fetch_compare_and_swap. */
#define AO_T_IS_INT

View File

@ -0,0 +1,205 @@
/*
* Copyright (c) 2005,2007 Thiemo Seufer <ths@networkno.de>
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/*
* FIXME: This should probably make finer distinctions. SGI MIPS is
* much more strongly ordered, and in fact closer to sequentially
* consistent. This is really aimed at modern embedded implementations.
*/
/* Data dependence does not imply read ordering. */
#define AO_NO_DD_ORDERING
/* #include "../standard_ao_double_t.h" */
/* TODO: Implement double-wide operations if available. */
#if (AO_GNUC_PREREQ(4, 9) || AO_CLANG_PREREQ(3, 5)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
/* As of clang-3.6/mips[64], __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n missing. */
# if defined(__clang__)
# define AO_GCC_FORCE_HAVE_CAS
# endif
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
# include "../test_and_set_t_is_ao_t.h"
# include "../all_aligned_atomic_load_store.h"
# if !defined(_ABI64) || _MIPS_SIM != _ABI64
# define AO_T_IS_INT
# if __mips_isa_rev >= 6
/* Encoding of ll/sc in mips rel6 differs from that of mips2/3. */
# define AO_MIPS_SET_ISA ""
# else
# define AO_MIPS_SET_ISA " .set mips2\n"
# endif
# define AO_MIPS_LL_1(args) " ll " args "\n"
# define AO_MIPS_SC(args) " sc " args "\n"
# else
# if __mips_isa_rev >= 6
# define AO_MIPS_SET_ISA ""
# else
# define AO_MIPS_SET_ISA " .set mips3\n"
# endif
# define AO_MIPS_LL_1(args) " lld " args "\n"
# define AO_MIPS_SC(args) " scd " args "\n"
# endif /* _MIPS_SIM == _ABI64 */
#ifdef AO_ICE9A1_LLSC_WAR
/* ICE9 rev A1 chip (used in very few systems) is reported to */
/* have a low-frequency bug that causes LL to fail. */
/* To workaround, just issue the second 'LL'. */
# define AO_MIPS_LL(args) AO_MIPS_LL_1(args) AO_MIPS_LL_1(args)
#else
# define AO_MIPS_LL(args) AO_MIPS_LL_1(args)
#endif
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
" sync\n"
" .set pop"
: : : "memory");
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
register int result;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" addu %1, %0, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop"
: "=&r" (result), "=&r" (temp), "+m" (*addr)
: "Ir" (incr)
: "memory");
return (AO_t)result;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr)
{
register int oldval;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" move %1, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop"
: "=&r" (oldval), "=&r" (temp), "+m" (*addr)
: "r" (1)
: "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
/* TODO: Implement AO_and/or/xor primitives directly. */
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register int was_equal = 0;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %1")
" bne %0, %4, 2f\n"
" move %0, %3\n"
AO_MIPS_SC("%0, %1")
" .set pop\n"
" beqz %0, 1b\n"
" li %2, 1\n"
"2:"
: "=&r" (temp), "+m" (*addr), "+r" (was_equal)
: "r" (new_val), "r" (old)
: "memory");
return was_equal;
}
# define AO_HAVE_compare_and_swap
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
register int fetched_val;
register int temp;
__asm__ __volatile__(
" .set push\n"
AO_MIPS_SET_ISA
" .set noreorder\n"
" .set nomacro\n"
"1: "
AO_MIPS_LL("%0, %2")
" bne %0, %4, 2f\n"
" move %1, %3\n"
AO_MIPS_SC("%1, %2")
" beqz %1, 1b\n"
" nop\n"
" .set pop\n"
"2:"
: "=&r" (fetched_val), "=&r" (temp), "+m" (*addr)
: "r" (new_val), "Jr" (old)
: "memory");
return (AO_t)fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
#endif /* AO_DISABLE_GCC_ATOMICS */
/* CAS primitives with acquire, release and full semantics are */
/* generated automatically (and AO_int_... primitives are */
/* defined properly after the first generalization pass). */
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_MIPS_LL
#undef AO_MIPS_LL_1
#undef AO_MIPS_SC
#undef AO_MIPS_SET_ISA

View File

@ -0,0 +1,348 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* Memory model documented at http://www-106.ibm.com/developerworks/ */
/* eserver/articles/archguide.html and (clearer) */
/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
/* There appears to be no implicit ordering between any kind of */
/* independent memory references. */
/* TODO: Implement double-wide operations if available. */
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 8)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled even for earlier gcc/clang versions. */
/* TODO: As of clang-3.8.1, it emits lwsync in AO_load_acquire */
/* (i.e., the code is less efficient than the one given below). */
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
/* Architecture enforces some ordering based on control dependence. */
/* I don't know if that could help. */
/* Data-dependent loads are always ordered. */
/* Based on the above references, eieio is intended for use on */
/* uncached memory, which we don't support. It does not order loads */
/* from cached memory. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_ao_t.h"
/* There seems to be no byte equivalent of lwarx, so this */
/* may really be what we want, at least in the 32-bit case. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("sync" : : : "memory");
}
#define AO_HAVE_nop_full
/* lwsync apparently works for everything but a StoreLoad barrier. */
AO_INLINE void
AO_lwsync(void)
{
#ifdef __NO_LWSYNC__
__asm__ __volatile__("sync" : : : "memory");
#else
__asm__ __volatile__("lwsync" : : : "memory");
#endif
}
#define AO_nop_write() AO_lwsync()
#define AO_HAVE_nop_write
#define AO_nop_read() AO_lwsync()
#define AO_HAVE_nop_read
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* ppc64 uses ld not lwz */
# define AO_PPC_LD "ld"
# define AO_PPC_LxARX "ldarx"
# define AO_PPC_CMPx "cmpd"
# define AO_PPC_STxCXd "stdcx."
# define AO_PPC_LOAD_CLOBBER "cr0"
#else
# define AO_PPC_LD "lwz"
# define AO_PPC_LxARX "lwarx"
# define AO_PPC_CMPx "cmpw"
# define AO_PPC_STxCXd "stwcx."
# define AO_PPC_LOAD_CLOBBER "cc"
/* FIXME: We should get gcc to allocate one of the condition */
/* registers. I always got "impossible constraint" when I */
/* tried the "y" constraint. */
# define AO_T_IS_INT
#endif
#ifdef _AIX
/* Labels are not supported on AIX. */
/* ppc64 has same size of instructions as 32-bit one. */
# define AO_PPC_L(label) /* empty */
# define AO_PPC_BR_A(labelBF, addr) addr
#else
# define AO_PPC_L(label) label ": "
# define AO_PPC_BR_A(labelBF, addr) labelBF
#endif
/* We explicitly specify load_acquire, since it is important, and can */
/* be implemented relatively cheaply. It could be implemented */
/* with an ordinary load followed by a lwsync. But the general wisdom */
/* seems to be that a data dependent branch followed by an isync is */
/* cheaper. And the documentation is fairly explicit that this also */
/* has acquire semantics. */
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (
AO_PPC_LD "%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
"bne- " AO_PPC_BR_A("1f", "$+4") "\n"
AO_PPC_L("1") "isync\n"
: "=r" (result)
: "m"(*addr) : "memory", AO_PPC_LOAD_CLOBBER);
return result;
}
#define AO_HAVE_load_acquire
/* We explicitly specify store_release, since it relies */
/* on the fact that lwsync is also a LoadStore barrier. */
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
AO_lwsync();
*addr = value;
}
#define AO_HAVE_store_release
#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
/* TODO: And we should be using smaller objects anyway. */
AO_t oldval;
AO_t temp = 1; /* locked value */
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n"
/* load and reserve */
AO_PPC_CMPx "i %0, 0\n" /* if load is */
"bne " AO_PPC_BR_A("2f", "$+12") "\n"
/* non-zero, return already set */
AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
AO_PPC_L("2") "\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_TS_VAL_t result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t result;
AO_lwsync();
result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_t oldval;
int result = 0;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */
AO_PPC_CMPx " %0, %4\n" /* if load is not equal to */
"bne " AO_PPC_BR_A("2f", "$+16") "\n" /* old, fail */
AO_PPC_STxCXd " %3,0,%2\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
"li %1,1\n" /* result = 1; */
AO_PPC_L("2") "\n"
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
return result;
}
# define AO_HAVE_compare_and_swap
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
{
int result = AO_compare_and_swap(addr, old, new_val);
AO_lwsync();
return result;
}
# define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
{
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
# define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
int result;
AO_lwsync();
result = AO_compare_and_swap(addr, old, new_val);
if (result)
AO_lwsync();
return result;
}
# define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n" /* load and reserve */
AO_PPC_CMPx " %0, %3\n" /* if load is not equal to */
"bne " AO_PPC_BR_A("2f", "$+12") "\n" /* old_val, fail */
AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */
"bne- " AO_PPC_BR_A("1b", "$-16") "\n"
/* retry if lost reservation */
AO_PPC_L("2") "\n"
: "=&r"(fetched_val)
: "r"(addr), "r"(new_val), "r"(old_val)
: "memory", "cr0");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_lwsync();
return AO_fetch_compare_and_swap(addr, old_val, new_val);
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result;
AO_lwsync();
result = AO_fetch_compare_and_swap(addr, old_val, new_val);
if (result == old_val)
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
AO_PPC_STxCXd " %1,0,%2\n" /* store conditional */
"bne- " AO_PPC_BR_A("1b", "$-12") "\n"
/* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
return oldval;
}
#define AO_HAVE_fetch_and_add
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
AO_t result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_acquire
AO_INLINE AO_t
AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
AO_lwsync();
return AO_fetch_and_add(addr, incr);
}
#define AO_HAVE_fetch_and_add_release
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
AO_t result;
AO_lwsync();
result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
#undef AO_PPC_BR_A
#undef AO_PPC_CMPx
#undef AO_PPC_L
#undef AO_PPC_LD
#undef AO_PPC_LOAD_CLOBBER
#undef AO_PPC_LxARX
#undef AO_PPC_STxCXd
#endif /* AO_DISABLE_GCC_ATOMICS */

View File

@ -0,0 +1,32 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#if defined(__clang__) || defined(AO_PREFER_BUILTIN_ATOMICS)
/* All __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are still missing. */
/* The operations are lock-free even for the types smaller than word. */
# define AO_GCC_FORCE_HAVE_CAS
#else
/* As of gcc-7.5, CAS and arithmetic atomic operations for char and */
/* short are supported by the compiler but require -latomic flag. */
# if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
# define AO_NO_char_ARITHM
# endif
# if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
# define AO_NO_short_ARITHM
# endif
#endif /* !__clang__ */
#include "generic.h"
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_NO_char_ARITHM
#undef AO_NO_short_ARITHM

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#if (AO_GNUC_PREREQ(5, 4) || AO_CLANG_PREREQ(8, 0)) && defined(__s390x__) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
/* Probably, it could be enabled for earlier clang/gcc versions. */
/* But, e.g., clang-3.8.0 produces a backend error for AtomicFence. */
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
/* The relevant documentation appears to be at */
/* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */
/* around page 5-96. Apparently: */
/* - Memory references in general are atomic only for a single */
/* byte. But it appears that the most common load/store */
/* instructions also guarantee atomicity for aligned */
/* operands of standard types. WE FOOLISHLY ASSUME that */
/* compilers only generate those. If that turns out to be */
/* wrong, we need inline assembly code for AO_load and */
/* AO_store. */
/* - A store followed by a load is unordered since the store */
/* may be delayed. Otherwise everything is ordered. */
/* - There is a hardware compare-and-swap (CS) instruction. */
#include "../all_aligned_atomic_load_store.h"
#include "../ordered_except_wr.h"
#include "../test_and_set_t_is_ao_t.h"
/* TODO: Is there a way to do byte-sized test-and-set? */
/* TODO: AO_nop_full should probably be implemented directly. */
/* It appears that certain BCR instructions have that effect. */
/* Presumably they're cheaper than CS? */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
int retval;
__asm__ __volatile__ (
# ifndef __s390x__
" cs %1,%2,0(%3)\n"
# else
" csg %1,%2,0(%3)\n"
# endif
" ipm %0\n"
" srl %0,28\n"
: "=&d" (retval), "+d" (old)
: "d" (new_val), "a" (addr)
: "cc", "memory");
return retval == 0;
}
#define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
__asm__ __volatile__ (
# ifndef __s390x__
" cs %0,%2,%1\n"
# else
" csg %0,%2,%1\n"
# endif
: "+d" (old), "=Q" (*addr)
: "d" (new_val), "m" (*addr)
: "cc", "memory");
return old;
}
#define AO_HAVE_fetch_compare_and_swap_full
#endif /* AO_DISABLE_GCC_ATOMICS */
/* TODO: Add double-wide operations for 32-bit executables. */

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2009 by Takashi YOSHII. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "../all_atomic_load_store.h"
#include "../ordered.h"
/* sh has tas.b(byte) only */
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
int oldval;
__asm__ __volatile__(
"tas.b @%1; movt %0"
: "=r" (oldval)
: "r" (addr)
: "t", "memory");
return oldval? AO_TS_CLEAR : AO_TS_SET;
}
#define AO_HAVE_test_and_set_full
/* TODO: Very incomplete. */

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* TODO: Very incomplete; Add support for sparc64. */
/* Non-ancient SPARCs provide compare-and-swap (casa). */
#include "../all_atomic_load_store.h"
/* Real SPARC code uses TSO: */
#include "../ordered_except_wr.h"
/* Test_and_set location is just a byte. */
#include "../test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t oldval;
__asm__ __volatile__("ldstub %1,%0"
: "=r"(oldval), "=m"(*addr)
: "m"(*addr) : "memory");
return oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_NO_SPARC_V9
# ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t ret;
__asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
# if defined(__arch64__)
"casx [%2],%0,%1\n\t"
# else
"cas [%2],%0,%1\n\t" /* 32-bit version */
# endif
"membar #StoreLoad | #StoreStore\n\t"
"cmp %0,%1\n\t"
"be,a 0f\n\t"
"mov 1,%0\n\t"/* one insn after branch always executed */
"clr %0\n\t"
"0:\n\t"
: "=r" (ret), "+r" (new_val)
: "r" (addr), "0" (old)
: "memory", "cc");
return (int)ret;
}
# define AO_HAVE_compare_and_swap_full
# endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
__asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
# if defined(__arch64__)
"casx [%1],%2,%0\n\t"
# else
"cas [%1],%2,%0\n\t" /* 32-bit version */
# endif
"membar #StoreLoad | #StoreStore\n\t"
: "+r" (new_val)
: "r" (addr), "r" (old)
: "memory");
return new_val;
}
#define AO_HAVE_fetch_compare_and_swap_full
#endif /* !AO_NO_SPARC_V9 */
/* TODO: Extend this for SPARC v8 and v9 (V8 also has swap, V9 has CAS, */
/* there are barriers like membar #LoadStore, CASA (32-bit) and */
/* CASXA (64-bit) instructions added in V9). */

View File

@ -0,0 +1,48 @@
/*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Minimal support for tile. */
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \
&& !defined(AO_DISABLE_GCC_ATOMICS)
# include "generic.h"
#else /* AO_DISABLE_GCC_ATOMICS */
# include "../all_atomic_load_store.h"
# include "../test_and_set_t_is_ao_t.h"
AO_INLINE void
AO_nop_full(void)
{
__sync_synchronize();
}
# define AO_HAVE_nop_full
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
return __sync_fetch_and_add(p, incr);
}
# define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
}
# define AO_HAVE_fetch_compare_and_swap_full
#endif /* AO_DISABLE_GCC_ATOMICS */

View File

@ -0,0 +1,657 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2008-2018 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Some of the machine specific code was borrowed from our GC distribution.
*/
#if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \
&& !defined(__INTEL_COMPILER) /* TODO: test and enable icc */ \
&& !defined(AO_DISABLE_GCC_ATOMICS)
# define AO_GCC_ATOMIC_TEST_AND_SET
# if defined(__APPLE_CC__)
/* OS X 10.7 clang-425 lacks __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n */
/* predefined macro (unlike e.g. OS X 10.11 clang-703). */
# define AO_GCC_FORCE_HAVE_CAS
# ifdef __x86_64__
# if !AO_CLANG_PREREQ(9, 0) /* < Apple clang-900 */
/* Older Apple clang (e.g., clang-600 based on LLVM 3.5svn) had */
/* some bug in the double word CAS implementation for x64. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# endif
# elif defined(__MACH__)
/* OS X 10.8 lacks __atomic_load/store symbols for arch i386 */
/* (even with a non-Apple clang). */
# ifndef MAC_OS_X_VERSION_MIN_REQUIRED
/* Include this header just to import the version macro. */
# include <AvailabilityMacros.h>
# endif
# if MAC_OS_X_VERSION_MIN_REQUIRED < 1090 /* MAC_OS_X_VERSION_10_9 */
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
# endif /* __i386__ */
# elif defined(__clang__)
# if !defined(__x86_64__)
# if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(__CYGWIN__) \
&& !AO_CLANG_PREREQ(5, 0)
/* At least clang-3.8/i686 (from NDK r11c) required to specify */
/* -latomic in case of a double-word atomic operation use. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif /* !AO_PREFER_BUILTIN_ATOMICS */
# elif !defined(__ILP32__)
# if (!AO_CLANG_PREREQ(3, 5) && !defined(AO_PREFER_BUILTIN_ATOMICS)) \
|| (!AO_CLANG_PREREQ(4, 0) && defined(AO_ADDRESS_SANITIZER)) \
|| defined(AO_THREAD_SANITIZER)
/* clang-3.4/x64 required -latomic. clang-3.9/x64 seems to */
/* pass double-wide arguments to atomic operations incorrectly */
/* in case of ASan/TSan. */
/* TODO: As of clang-4.0, lock-free test_stack fails if TSan. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
# endif /* __x86_64__ */
# elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \
&& !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__)
/* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case */
/* of double-word atomic operations use (but not in case of TSan). */
/* TODO: Revise it for the future gcc-7 releases. */
# define AO_SKIPATOMIC_double_compare_and_swap_ANY
# define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif /* __GNUC__ && !__clang__ */
# ifdef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# define AO_SKIPATOMIC_double_load
# define AO_SKIPATOMIC_double_load_acquire
# define AO_SKIPATOMIC_double_store
# define AO_SKIPATOMIC_double_store_release
# undef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
# endif
#else /* AO_DISABLE_GCC_ATOMICS */
/* The following really assume we have a 486 or better. Unfortunately */
/* gcc doesn't define a suitable feature test macro based on command */
/* line options. */
/* We should perhaps test dynamically. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_char.h"
#if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
/* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
# define AO_USE_PENTIUM4_INSTRS
#endif
#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mfence" : : : "memory");
}
# define AO_HAVE_nop_full
#else
/* We could use the cpuid instruction. But that seems to be slower */
/* than the default implementation based on test_and_set_full. Thus */
/* we omit that bit of misinformation here. */
#endif /* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
__asm__ __volatile__ ("lock; xadd %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
{
unsigned char result;
__asm__ __volatile__ ("lock; xaddb %0, %1"
: "=q" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
{
unsigned short result;
__asm__ __volatile__ ("lock; xaddw %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; and %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_and_full
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; or %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_or_full
AO_INLINE void
AO_xor_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; xor %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_xor_full
/* AO_store_full could be implemented directly using "xchg" but it */
/* could be generalized efficiently as an ordinary store accomplished */
/* with AO_nop_full ("mfence" instruction). */
AO_INLINE void
AO_char_and_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; andb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_and_full
AO_INLINE void
AO_char_or_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; orb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_or_full
AO_INLINE void
AO_char_xor_full (volatile unsigned char *p, unsigned char value)
{
__asm__ __volatile__ ("lock; xorb %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_char_xor_full
AO_INLINE void
AO_short_and_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; andw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_and_full
AO_INLINE void
AO_short_or_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; orw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_or_full
AO_INLINE void
AO_short_xor_full (volatile unsigned short *p, unsigned short value)
{
__asm__ __volatile__ ("lock; xorw %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
#define AO_HAVE_short_xor_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
unsigned char oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__ ("xchgb %0, %1"
: "=q" (oldval), "+m" (*addr)
: "0" ((unsigned char)0xff)
: "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return (int)__sync_bool_compare_and_swap(addr, old, new_val
/* empty protection list */);
/* Note: an empty list of variables protected by the */
/* memory barrier should mean all globally accessible */
/* variables are protected. */
# else
char result;
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
AO_t dummy;
__asm__ __volatile__ ("lock; cmpxchg %3, %0"
: "+m" (*addr), "=@ccz" (result), "=a" (dummy)
: "r" (new_val), "a" (old)
: "memory");
# else
__asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
: "+m" (*addr), "=a" (result)
: "r" (new_val), "a" (old)
: "memory");
# endif
return (int)result;
# endif
}
# define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
AO_t fetched_val;
__asm__ __volatile__ ("lock; cmpxchg %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
#define AO_HAVE_fetch_compare_and_swap_full
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned char fetched_val;
__asm__ __volatile__ ("lock; cmpxchgb %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "q" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_char_fetch_compare_and_swap_full
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned short fetched_val;
__asm__ __volatile__ ("lock; cmpxchgw %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# if defined(__x86_64__) && !defined(__ILP32__)
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr,
unsigned int old_val,
unsigned int new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return __sync_val_compare_and_swap(addr, old_val, new_val
/* empty protection list */);
# else
unsigned int fetched_val;
__asm__ __volatile__ ("lock; cmpxchgl %3, %1"
: "=a" (fetched_val), "+m" (*addr)
: "a" (old_val), "r" (new_val)
: "memory");
return fetched_val;
# endif
}
# define AO_HAVE_int_fetch_compare_and_swap_full
# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
{
unsigned int result;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
AO_int_and_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; andl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_and_full
AO_INLINE void
AO_int_or_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; orl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_or_full
AO_INLINE void
AO_int_xor_full (volatile unsigned int *p, unsigned int value)
{
__asm__ __volatile__ ("lock; xorl %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_int_xor_full
# endif /* !AO_PREFER_GENERALIZED */
# else
# define AO_T_IS_INT
# endif /* !x86_64 || ILP32 */
/* Real X86 implementations, except for some old 32-bit WinChips, */
/* appear to enforce ordering between memory operations, EXCEPT that */
/* a later read can pass earlier writes, presumably due to the */
/* visible presence of store buffers. */
/* We ignore both the WinChips and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
# include "../ordered_except_wr.h"
#endif /* AO_DISABLE_GCC_ATOMICS */
#if defined(AO_GCC_ATOMIC_TEST_AND_SET) \
&& !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY)
# if defined(__ILP32__) || !defined(__x86_64__) /* 32-bit AO_t */ \
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) /* 64-bit AO_t */
# include "../standard_ao_double_t.h"
# endif
#elif !defined(__x86_64__) && (!defined(AO_USE_SYNC_CAS_BUILTIN) \
|| defined(AO_GCC_ATOMIC_TEST_AND_SET))
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically on at least a Pentium according to */
/* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */
# ifndef AO_PREFER_GENERALIZED
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
# endif
/* Returns nonzero if the comparison succeeded. */
/* Really requires at least a Pentium. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
# if defined(__PIC__) && !(AO_GNUC_PREREQ(5, 1) || AO_CLANG_PREREQ(4, 0))
AO_t saved_ebx;
AO_t dummy;
/* The following applies to an ancient GCC (and, probably, it was */
/* never needed for Clang): */
/* If PIC is turned on, we cannot use ebx as it is reserved for the */
/* GOT pointer. We should save and restore ebx. The proposed */
/* solution is not so efficient as the older alternatives using */
/* push ebx or edi as new_val1 (w/o clobbering edi and temporary */
/* local variable usage) but it is more portable (it works even if */
/* ebx is not used as GOT pointer, and it works for the buggy GCC */
/* releases that incorrectly evaluate memory operands offset in the */
/* inline assembly after push). */
# ifdef __OPTIMIZE__
__asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */
"lea %0, %%edi\n\t" /* in case addr is in ebx */
"mov %7, %%ebx\n\t" /* load new_val1 */
"lock; cmpxchg8b (%%edi)\n\t"
"mov %2, %%ebx\n\t" /* restore ebx */
"setz %1"
: "+m" (*addr), "=a" (result),
"=m" (saved_ebx), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "m" (new_val1)
: "%edi", "memory");
# else
/* A less-efficient code manually preserving edi if GCC invoked */
/* with -O0 option (otherwise it fails while finding a register */
/* in class 'GENERAL_REGS'). */
AO_t saved_edi;
__asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */
"mov %%ebx, %2\n\t" /* save ebx */
"lea %0, %%edi\n\t" /* in case addr is in ebx */
"mov %8, %%ebx\n\t" /* load new_val1 */
"lock; cmpxchg8b (%%edi)\n\t"
"mov %2, %%ebx\n\t" /* restore ebx */
"mov %3, %%edi\n\t" /* restore edi */
"setz %1"
: "+m" (*addr), "=a" (result),
"=m" (saved_ebx), "=m" (saved_edi), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "m" (new_val1)
: "memory");
# endif
# else
/* For non-PIC mode, this operation could be simplified (and be */
/* faster) by using ebx as new_val1. Reuse of the PIC hard */
/* register, instead of using a fixed register, is implemented */
/* in Clang and GCC 5.1+, at least. (Older GCC refused to compile */
/* such code for PIC mode). */
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
__asm__ __volatile__ ("lock; cmpxchg8b %0"
: "+m" (*addr), "=@ccz" (result),
"+d" (old_val2), "+a" (old_val1)
: "c" (new_val2), "b" (new_val1)
: "memory");
# else
AO_t dummy; /* an output for clobbered edx */
__asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
# endif
# endif
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
#elif defined(__ILP32__) || !defined(__x86_64__)
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically (requires at least a Pentium). */
# ifndef AO_PREFER_GENERALIZED
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
# endif
/* X32 has native support for 64-bit integer operations (AO_double_t */
/* is a 64-bit integer and we could use 64-bit cmpxchg). */
/* This primitive is used by compare_double_and_swap_double_full. */
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
/* It is safe to use __sync CAS built-in here. */
return __sync_bool_compare_and_swap(&addr->AO_whole,
old_val.AO_whole, new_val.AO_whole
/* empty protection list */);
}
# define AO_HAVE_double_compare_and_swap_full
#elif defined(AO_CMPXCHG16B_AVAILABLE) \
|| (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) \
&& !defined(AO_THREAD_SANITIZER))
# include "../standard_ao_double_t.h"
/* The Intel and AMD Architecture Programmer Manuals state roughly */
/* the following: */
/* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */
/* atomic accesses in 64-bit mode (with certain alignment */
/* restrictions); */
/* - SSE instructions that access data larger than a quadword (like */
/* MOVDQA) may be implemented using multiple memory accesses; */
/* - LOCK prefix causes an invalid-opcode exception when used with */
/* 128-bit media (SSE) instructions. */
/* Thus, currently, the only way to implement lock-free double_load */
/* and double_store on x86_64 is to use CMPXCHG16B (if available). */
/* NEC LE-IT: older AMD Opterons are missing this instruction. */
/* On these machines SIGILL will be thrown. */
/* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */
/* based) version available. */
/* HB: Changed this to not define either by default. There are */
/* enough machines and tool chains around on which cmpxchg16b */
/* doesn't work. And the emulation is unsafe by our usual rules. */
/* However both are clearly useful in certain cases. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
# if defined(__GCC_ASM_FLAG_OUTPUTS__)
__asm__ __volatile__("lock; cmpxchg16b %0"
: "+m" (*addr), "=@ccz" (result),
"+d" (old_val2), "+a" (old_val1)
: "c" (new_val2), "b" (new_val1)
: "memory");
# else
AO_t dummy; /* an output for clobbered rdx */
__asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
# endif
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
#elif defined(AO_WEAK_DOUBLE_CAS_EMULATION)
# include "../standard_ao_double_t.h"
# ifdef __cplusplus
extern "C" {
# endif
/* This one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it */
/* is not atomic with respect to other kinds of updates of *addr. */
/* On the other hand, this may be a useful facility on occasion. */
int AO_compare_double_and_swap_double_emulation(
volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2);
# ifdef __cplusplus
} /* extern "C" */
# endif
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
return AO_compare_double_and_swap_double_emulation(addr,
old_val1, old_val2, new_val1, new_val2);
}
# define AO_HAVE_compare_double_and_swap_double_full
#endif /* x86_64 && !ILP32 && CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */
#ifdef AO_GCC_ATOMIC_TEST_AND_SET
# include "generic.h"
#endif
#undef AO_GCC_FORCE_HAVE_CAS
#undef AO_SKIPATOMIC_double_compare_and_swap_ANY
#undef AO_SKIPATOMIC_double_load
#undef AO_SKIPATOMIC_double_load_acquire
#undef AO_SKIPATOMIC_double_store
#undef AO_SKIPATOMIC_double_store_release

View File

@ -0,0 +1,442 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* The following is useful primarily for debugging and documentation. */
/* We define various atomic operations by acquiring a global pthread */
/* lock. The resulting implementation will perform poorly, but should */
/* be correct unless it is used from signal handlers. */
/* We assume that all pthread operations act like full memory barriers. */
/* (We believe that is the intent of the specification.) */
#include <pthread.h>
#include "test_and_set_t_is_ao_t.h"
/* This is not necessarily compatible with the native */
/* implementation. But those can't be safely mixed anyway. */
#ifdef __cplusplus
extern "C" {
#endif
/* We define only the full barrier variants, and count on the */
/* generalization section below to fill in the rest. */
extern pthread_mutex_t AO_pt_lock;
#ifdef __cplusplus
} /* extern "C" */
#endif
AO_INLINE void
AO_nop_full(void)
{
pthread_mutex_lock(&AO_pt_lock);
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_nop_full
AO_INLINE AO_t
AO_load_full(const volatile AO_t *addr)
{
AO_t result;
pthread_mutex_lock(&AO_pt_lock);
result = *addr;
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
#define AO_HAVE_load_full
AO_INLINE void
AO_store_full(volatile AO_t *addr, AO_t val)
{
pthread_mutex_lock(&AO_pt_lock);
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_store_full
AO_INLINE unsigned char
AO_char_load_full(const volatile unsigned char *addr)
{
unsigned char result;
pthread_mutex_lock(&AO_pt_lock);
result = *addr;
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
#define AO_HAVE_char_load_full
AO_INLINE void
AO_char_store_full(volatile unsigned char *addr, unsigned char val)
{
pthread_mutex_lock(&AO_pt_lock);
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_char_store_full
AO_INLINE unsigned short
AO_short_load_full(const volatile unsigned short *addr)
{
unsigned short result;
pthread_mutex_lock(&AO_pt_lock);
result = *addr;
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
#define AO_HAVE_short_load_full
AO_INLINE void
AO_short_store_full(volatile unsigned short *addr, unsigned short val)
{
pthread_mutex_lock(&AO_pt_lock);
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_short_store_full
AO_INLINE unsigned int
AO_int_load_full(const volatile unsigned int *addr)
{
unsigned int result;
pthread_mutex_lock(&AO_pt_lock);
result = *addr;
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
#define AO_HAVE_int_load_full
AO_INLINE void
AO_int_store_full(volatile unsigned int *addr, unsigned int val)
{
pthread_mutex_lock(&AO_pt_lock);
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_int_store_full
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
AO_TS_VAL_t result;
pthread_mutex_lock(&AO_pt_lock);
result = (AO_TS_VAL_t)(*addr);
*addr = AO_TS_SET;
pthread_mutex_unlock(&AO_pt_lock);
assert(result == AO_TS_SET || result == AO_TS_CLEAR);
return result;
}
#define AO_HAVE_test_and_set_full
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
AO_t old_val;
pthread_mutex_lock(&AO_pt_lock);
old_val = *p;
*p = old_val + incr;
pthread_mutex_unlock(&AO_pt_lock);
return old_val;
}
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
{
unsigned char old_val;
pthread_mutex_lock(&AO_pt_lock);
old_val = *p;
*p = old_val + incr;
pthread_mutex_unlock(&AO_pt_lock);
return old_val;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
{
unsigned short old_val;
pthread_mutex_lock(&AO_pt_lock);
old_val = *p;
*p = old_val + incr;
pthread_mutex_unlock(&AO_pt_lock);
return old_val;
}
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
{
unsigned int old_val;
pthread_mutex_lock(&AO_pt_lock);
old_val = *p;
*p = old_val + incr;
pthread_mutex_unlock(&AO_pt_lock);
return old_val;
}
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
AO_and_full(volatile AO_t *p, AO_t value)
{
pthread_mutex_lock(&AO_pt_lock);
*p &= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_and_full
AO_INLINE void
AO_or_full(volatile AO_t *p, AO_t value)
{
pthread_mutex_lock(&AO_pt_lock);
*p |= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_or_full
AO_INLINE void
AO_xor_full(volatile AO_t *p, AO_t value)
{
pthread_mutex_lock(&AO_pt_lock);
*p ^= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_xor_full
AO_INLINE void
AO_char_and_full(volatile unsigned char *p, unsigned char value)
{
pthread_mutex_lock(&AO_pt_lock);
*p &= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_char_and_full
AO_INLINE void
AO_char_or_full(volatile unsigned char *p, unsigned char value)
{
pthread_mutex_lock(&AO_pt_lock);
*p |= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_char_or_full
AO_INLINE void
AO_char_xor_full(volatile unsigned char *p, unsigned char value)
{
pthread_mutex_lock(&AO_pt_lock);
*p ^= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_char_xor_full
AO_INLINE void
AO_short_and_full(volatile unsigned short *p, unsigned short value)
{
pthread_mutex_lock(&AO_pt_lock);
*p &= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_short_and_full
AO_INLINE void
AO_short_or_full(volatile unsigned short *p, unsigned short value)
{
pthread_mutex_lock(&AO_pt_lock);
*p |= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_short_or_full
AO_INLINE void
AO_short_xor_full(volatile unsigned short *p, unsigned short value)
{
pthread_mutex_lock(&AO_pt_lock);
*p ^= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_short_xor_full
AO_INLINE void
AO_int_and_full(volatile unsigned *p, unsigned value)
{
pthread_mutex_lock(&AO_pt_lock);
*p &= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_int_and_full
AO_INLINE void
AO_int_or_full(volatile unsigned *p, unsigned value)
{
pthread_mutex_lock(&AO_pt_lock);
*p |= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_int_or_full
AO_INLINE void
AO_int_xor_full(volatile unsigned *p, unsigned value)
{
pthread_mutex_lock(&AO_pt_lock);
*p ^= value;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_int_xor_full
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t fetched_val;
pthread_mutex_lock(&AO_pt_lock);
fetched_val = *addr;
if (fetched_val == old_val)
*addr = new_val;
pthread_mutex_unlock(&AO_pt_lock);
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_full
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
unsigned char fetched_val;
pthread_mutex_lock(&AO_pt_lock);
fetched_val = *addr;
if (fetched_val == old_val)
*addr = new_val;
pthread_mutex_unlock(&AO_pt_lock);
return fetched_val;
}
#define AO_HAVE_char_fetch_compare_and_swap_full
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
unsigned short fetched_val;
pthread_mutex_lock(&AO_pt_lock);
fetched_val = *addr;
if (fetched_val == old_val)
*addr = new_val;
pthread_mutex_unlock(&AO_pt_lock);
return fetched_val;
}
#define AO_HAVE_short_fetch_compare_and_swap_full
AO_INLINE unsigned
AO_int_fetch_compare_and_swap_full(volatile unsigned *addr, unsigned old_val,
unsigned new_val)
{
unsigned fetched_val;
pthread_mutex_lock(&AO_pt_lock);
fetched_val = *addr;
if (fetched_val == old_val)
*addr = new_val;
pthread_mutex_unlock(&AO_pt_lock);
return fetched_val;
}
#define AO_HAVE_int_fetch_compare_and_swap_full
/* Unlike real architectures, we define both double-width CAS variants. */
typedef struct {
AO_t AO_val1;
AO_t AO_val2;
} AO_double_t;
#define AO_HAVE_double_t
#define AO_DOUBLE_T_INITIALIZER { (AO_t)0, (AO_t)0 }
AO_INLINE AO_double_t
AO_double_load_full(const volatile AO_double_t *addr)
{
AO_double_t result;
pthread_mutex_lock(&AO_pt_lock);
result.AO_val1 = addr->AO_val1;
result.AO_val2 = addr->AO_val2;
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
#define AO_HAVE_double_load_full
AO_INLINE void
AO_double_store_full(volatile AO_double_t *addr, AO_double_t value)
{
pthread_mutex_lock(&AO_pt_lock);
addr->AO_val1 = value.AO_val1;
addr->AO_val2 = value.AO_val2;
pthread_mutex_unlock(&AO_pt_lock);
}
#define AO_HAVE_double_store_full
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old1, AO_t old2,
AO_t new1, AO_t new2)
{
pthread_mutex_lock(&AO_pt_lock);
if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2)
{
addr -> AO_val1 = new1;
addr -> AO_val2 = new2;
pthread_mutex_unlock(&AO_pt_lock);
return 1;
}
else
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
#define AO_HAVE_compare_double_and_swap_double_full
AO_INLINE int
AO_compare_and_swap_double_full(volatile AO_double_t *addr,
AO_t old1, AO_t new1, AO_t new2)
{
pthread_mutex_lock(&AO_pt_lock);
if (addr -> AO_val1 == old1)
{
addr -> AO_val1 = new1;
addr -> AO_val2 = new2;
pthread_mutex_unlock(&AO_pt_lock);
return 1;
}
else
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
#define AO_HAVE_compare_and_swap_double_full
/* We can't use hardware loads and stores, since they don't */
/* interact correctly with atomic updates. */

View File

@ -0,0 +1,104 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Derived from the corresponding header file for gcc.
*/
#include "../loadstore/atomic_load.h"
#include "../loadstore/atomic_store.h"
/* Some architecture set descriptions include special "ordered" memory */
/* operations. As far as we can tell, no existing processors actually */
/* require those. Nor does it appear likely that future processors */
/* will. */
/* FIXME: The PA emulator on Itanium may obey weaker restrictions. */
/* There should be a mode in which we don't assume sequential */
/* consistency here. */
#include "../ordered.h"
#include <machine/inline.h>
/* GCC will not guarantee the alignment we need, use four lock words */
/* and select the correctly aligned datum. See the glibc 2.3.2 */
/* linuxthread port for the original implementation. */
struct AO_pa_clearable_loc {
int data[4];
};
#undef AO_TS_INITIALIZER
#define AO_TS_t struct AO_pa_clearable_loc
#define AO_TS_INITIALIZER {1,1,1,1}
/* Switch meaning of set and clear, since we only have an atomic clear */
/* instruction. */
typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
#define AO_TS_VAL_t AO_PA_TS_val
#define AO_TS_CLEAR AO_PA_TS_clear
#define AO_TS_SET AO_PA_TS_set
/* The hppa only has one atomic read and modify memory operation, */
/* load and clear, so hppa spinlocks must use zero to signify that */
/* someone is holding the lock. The address used for the ldcw */
/* semaphore must be 16-byte aligned. */
#define AO_ldcw(a, ret) \
_LDCWX(0 /* index */, 0 /* s */, a /* base */, ret)
/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
/* be assured of 16-byte alignment for atomic lock data even if we */
/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
/* we use a struct containing an array of four ints for the atomic lock */
/* type and dynamically select the 16-byte aligned int from the array */
/* for the semaphore. */
#define AO_PA_LDCW_ALIGNMENT 16
#define AO_ldcw_align(addr) \
((volatile unsigned *)(((unsigned long)(addr) \
+ (AO_PA_LDCW_ALIGNMENT - 1)) \
& ~(AO_PA_LDCW_ALIGNMENT - 1)))
/* Works on PA 1.1 and PA 2.0 systems */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t * addr)
{
register unsigned int ret;
register unsigned long a = (unsigned long)AO_ldcw_align(addr);
# if defined(CPPCHECK)
ret = 0; /* to void 'uninitialized variable' warning */
# endif
AO_ldcw(a, ret);
return (AO_TS_VAL_t)ret;
}
#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
{
volatile unsigned *a = AO_ldcw_align(addr);
AO_compiler_barrier();
*a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
#define AO_HAVE_CLEAR
#undef AO_PA_LDCW_ALIGNMENT
#undef AO_ldcw
#undef AO_ldcw_align

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file specifies Itanimum primitives for use with the HP compiler
* under HP/UX. We use intrinsics instead of the inline assembly code in the
* gcc file.
*/
#include "../all_atomic_load_store.h"
#include "../all_acquire_release_volatile.h"
#include "../test_and_set_t_is_char.h"
#include <machine/sys/inline.h>
#ifdef __LP64__
# define AO_T_FASIZE _FASZ_D
# define AO_T_SIZE _SZ_D
#else
# define AO_T_FASIZE _FASZ_W
# define AO_T_SIZE _SZ_W
#endif
AO_INLINE void
AO_nop_full(void)
{
_Asm_mf();
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *p)
{
return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, 1,
_LDHINT_NONE, _DOWN_MEM_FENCE);
}
#define AO_HAVE_fetch_and_add1_acquire
AO_INLINE AO_t
AO_fetch_and_add1_release (volatile AO_t *p)
{
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
AO_fetch_and_sub1_acquire (volatile AO_t *p)
{
return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1,
_LDHINT_NONE, _DOWN_MEM_FENCE);
}
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
AO_fetch_and_sub1_release (volatile AO_t *p)
{
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_fetch_and_sub1_release
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
return _Asm_cmpxchg(AO_T_SIZE, _SEM_ACQ, addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
return _Asm_cmpxchg(AO_T_SIZE, _SEM_REL, addr,
new_val, _LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
unsigned char old_val, unsigned char new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
}
#define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
unsigned char old_val, unsigned char new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr,
new_val, _LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE);
return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr,
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
}
#define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
_Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE);
return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr,
new_val, _LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_short_fetch_compare_and_swap_release
#ifndef __LP64__
# define AO_T_IS_INT
#endif
#undef AO_T_FASIZE
#undef AO_T_SIZE

View File

@ -0,0 +1,183 @@
/* Memory model documented at http://www-106.ibm.com/developerworks/ */
/* eserver/articles/archguide.html and (clearer) */
/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
/* There appears to be no implicit ordering between any kind of */
/* independent memory references. */
/* Architecture enforces some ordering based on control dependence. */
/* I don't know if that could help. */
/* Data-dependent loads are always ordered. */
/* Based on the above references, eieio is intended for use on */
/* uncached memory, which we don't support. It does not order loads */
/* from cached memory. */
/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */
/* track some of this down and correcting my misunderstandings. -HB */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_ao_t.h"
void AO_sync(void);
#pragma mc_func AO_sync { "7c0004ac" }
#ifdef __NO_LWSYNC__
# define AO_lwsync AO_sync
#else
void AO_lwsync(void);
#pragma mc_func AO_lwsync { "7c2004ac" }
#endif
#define AO_nop_write() AO_lwsync()
#define AO_HAVE_nop_write
#define AO_nop_read() AO_lwsync()
#define AO_HAVE_nop_read
/* We explicitly specify load_acquire and store_release, since these */
/* rely on the fact that lwsync is also a LoadStore barrier. */
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result = *addr;
AO_lwsync();
return result;
}
#define AO_HAVE_load_acquire
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
AO_lwsync();
*addr = value;
}
#define AO_HAVE_store_release
#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* Completely untested. And we should be using smaller objects anyway. */
unsigned long oldval;
unsigned long temp = 1; /* locked value */
__asm__ __volatile__(
"1:ldarx %0,0,%1\n" /* load and reserve */
"cmpdi %0, 0\n" /* if load is */
"bne 2f\n" /* non-zero, return already set */
"stdcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
#else
int oldval;
int temp = 1; /* locked value */
__asm__ __volatile__(
"1:lwarx %0,0,%1\n" /* load and reserve */
"cmpwi %0, 0\n" /* if load is */
"bne 2f\n" /* non-zero, return already set */
"stwcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
#endif
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_TS_VAL_t result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t result;
AO_lwsync();
result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
{
AO_t fetched_val;
# if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
__asm__ __volatile__(
"1:ldarx %0,0,%1\n" /* load and reserve */
"cmpd %0, %3\n" /* if load is not equal to */
"bne 2f\n" /* old_val, fail */
"stdcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n"
: "=&r"(fetched_val)
: "r"(addr), "r"(new_val), "r"(old_val)
: "memory", "cr0");
# else
__asm__ __volatile__(
"1:lwarx %0,0,%1\n" /* load and reserve */
"cmpw %0, %3\n" /* if load is not equal to */
"bne 2f\n" /* old_val, fail */
"stwcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n"
: "=&r"(fetched_val)
: "r"(addr), "r"(new_val), "r"(old_val)
: "memory", "cr0");
# endif
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_lwsync();
return AO_fetch_compare_and_swap(addr, old_val, new_val);
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t result;
AO_lwsync();
result = AO_fetch_compare_and_swap(addr, old_val, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_compare_and_swap_full
/* TODO: Implement AO_fetch_and_add, AO_and/or/xor directly. */

View File

@ -0,0 +1,207 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file specifies Itanimum primitives for use with the Intel (ecc)
* compiler. We use intrinsics instead of the inline assembly code in the
* gcc file.
*/
#include "../all_atomic_load_store.h"
#include "../test_and_set_t_is_char.h"
#include <ia64intrin.h>
/* The acquire release semantics of volatile can be turned off. And volatile */
/* operations in icc9 don't imply ordering with respect to other nonvolatile */
/* operations. */
#define AO_INTEL_PTR_t void *
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *p)
{
return (AO_t)(__ld8_acq((AO_INTEL_PTR_t)p));
}
#define AO_HAVE_load_acquire
AO_INLINE void
AO_store_release(volatile AO_t *p, AO_t val)
{
__st8_rel((AO_INTEL_PTR_t)p, (__int64)val);
}
#define AO_HAVE_store_release
AO_INLINE unsigned char
AO_char_load_acquire(const volatile unsigned char *p)
{
/* A normal volatile load generates an ld.acq */
return (__ld1_acq((AO_INTEL_PTR_t)p));
}
#define AO_HAVE_char_load_acquire
AO_INLINE void
AO_char_store_release(volatile unsigned char *p, unsigned char val)
{
__st1_rel((AO_INTEL_PTR_t)p, val);
}
#define AO_HAVE_char_store_release
AO_INLINE unsigned short
AO_short_load_acquire(const volatile unsigned short *p)
{
/* A normal volatile load generates an ld.acq */
return (__ld2_acq((AO_INTEL_PTR_t)p));
}
#define AO_HAVE_short_load_acquire
AO_INLINE void
AO_short_store_release(volatile unsigned short *p, unsigned short val)
{
__st2_rel((AO_INTEL_PTR_t)p, val);
}
#define AO_HAVE_short_store_release
AO_INLINE unsigned int
AO_int_load_acquire(const volatile unsigned int *p)
{
/* A normal volatile load generates an ld.acq */
return (__ld4_acq((AO_INTEL_PTR_t)p));
}
#define AO_HAVE_int_load_acquire
AO_INLINE void
AO_int_store_release(volatile unsigned int *p, unsigned int val)
{
__st4_rel((AO_INTEL_PTR_t)p, val);
}
#define AO_HAVE_int_store_release
AO_INLINE void
AO_nop_full(void)
{
__mf();
}
#define AO_HAVE_nop_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire(volatile AO_t *p)
{
return __fetchadd8_acq((unsigned __int64 *)p, 1);
}
#define AO_HAVE_fetch_and_add1_acquire
AO_INLINE AO_t
AO_fetch_and_add1_release(volatile AO_t *p)
{
return __fetchadd8_rel((unsigned __int64 *)p, 1);
}
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
AO_fetch_and_sub1_acquire(volatile AO_t *p)
{
return __fetchadd8_acq((unsigned __int64 *)p, -1);
}
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
AO_fetch_and_sub1_release(volatile AO_t *p)
{
return __fetchadd8_rel((unsigned __int64 *)p, -1);
}
#define AO_HAVE_fetch_and_sub1_release
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return _InterlockedCompareExchange64_acq(addr, new_val, old_val);
}
#define AO_HAVE_fetch_compare_and_swap_acquire
AO_INLINE AO_t
AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return _InterlockedCompareExchange64_rel(addr, new_val, old_val);
}
#define AO_HAVE_fetch_compare_and_swap_release
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
return _InterlockedCompareExchange8_acq(addr, new_val, old_val);
}
#define AO_HAVE_char_fetch_compare_and_swap_acquire
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
return _InterlockedCompareExchange8_rel(addr, new_val, old_val);
}
#define AO_HAVE_char_fetch_compare_and_swap_release
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
return _InterlockedCompareExchange16_acq(addr, new_val, old_val);
}
#define AO_HAVE_short_fetch_compare_and_swap_acquire
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
return _InterlockedCompareExchange16_rel(addr, new_val, old_val);
}
#define AO_HAVE_short_fetch_compare_and_swap_release
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
unsigned int old_val,
unsigned int new_val)
{
return _InterlockedCompareExchange_acq(addr, new_val, old_val);
}
#define AO_HAVE_int_fetch_compare_and_swap_acquire
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
unsigned int old_val,
unsigned int new_val)
{
return _InterlockedCompareExchange_rel(addr, new_val, old_val);
}
#define AO_HAVE_int_fetch_compare_and_swap_release
#undef AO_INTEL_PTR_t

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file adds definitions appropriate for environments in which */
/* volatile load of a given type has acquire semantics, and volatile */
/* store of a given type has release semantics. This is arguably */
/* supposed to be true with the standard Itanium software conventions. */
/* Empirically gcc/ia64 does some reordering of ordinary operations */
/* around volatiles even when we think it should not. GCC v3.3 and */
/* earlier could reorder a volatile store with another store. As of */
/* March 2005, gcc pre-4 reuses some previously computed common */
/* subexpressions across a volatile load; hence, we now add compiler */
/* barriers for gcc. */
#ifndef AO_HAVE_GCC_BARRIER
/* TODO: Check GCC version (if workaround not needed for modern GCC). */
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER() (void)0
# endif
# define AO_HAVE_GCC_BARRIER
#endif
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result = *addr;
/* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_load_acquire
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t new_val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel (on IA-64). */
*addr = new_val;
}
#define AO_HAVE_store_release

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file adds definitions appropriate for environments in which */
/* volatile load of a given type has acquire semantics, and volatile */
/* store of a given type has release semantics. This is arguably */
/* supposed to be true with the standard Itanium software conventions. */
/* Empirically gcc/ia64 does some reordering of ordinary operations */
/* around volatiles even when we think it should not. GCC v3.3 and */
/* earlier could reorder a volatile store with another store. As of */
/* March 2005, gcc pre-4 reuses some previously computed common */
/* subexpressions across a volatile load; hence, we now add compiler */
/* barriers for gcc. */
#ifndef AO_HAVE_GCC_BARRIER
/* TODO: Check GCC version (if workaround not needed for modern GCC). */
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER() (void)0
# endif
# define AO_HAVE_GCC_BARRIER
#endif
AO_INLINE XCTYPE
AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
{
XCTYPE result = *addr;
/* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_XSIZE_load_acquire
AO_INLINE void
AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel (on IA-64). */
*addr = new_val;
}
#define AO_HAVE_XSIZE_store_release

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which loads of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
# ifdef AO_ACCESS_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
return *(const AO_t *)addr;
}
#define AO_HAVE_load

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which loads of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE XCTYPE
AO_XSIZE_load(const volatile XCTYPE *addr)
{
# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
return *(const XCTYPE *)addr;
}
#define AO_HAVE_XSIZE_load

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which stores of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t new_val)
{
# ifdef AO_ACCESS_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(AO_t *)addr = new_val;
}
#define AO_HAVE_store

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which stores of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE void
AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
{
# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(XCTYPE *)addr = new_val;
}
#define AO_HAVE_XSIZE_store

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file adds definitions appropriate for environments in which */
/* volatile load of a given type has acquire semantics, and volatile */
/* store of a given type has release semantics. This is arguably */
/* supposed to be true with the standard Itanium software conventions. */
/* Empirically gcc/ia64 does some reordering of ordinary operations */
/* around volatiles even when we think it should not. GCC v3.3 and */
/* earlier could reorder a volatile store with another store. As of */
/* March 2005, gcc pre-4 reuses some previously computed common */
/* subexpressions across a volatile load; hence, we now add compiler */
/* barriers for gcc. */
#ifndef AO_HAVE_GCC_BARRIER
/* TODO: Check GCC version (if workaround not needed for modern GCC). */
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER() (void)0
# endif
# define AO_HAVE_GCC_BARRIER
#endif
AO_INLINE unsigned/**/char
AO_char_load_acquire(const volatile unsigned/**/char *addr)
{
unsigned/**/char result = *addr;
/* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_char_load_acquire
AO_INLINE void
AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel (on IA-64). */
*addr = new_val;
}
#define AO_HAVE_char_store_release

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which loads of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE unsigned/**/char
AO_char_load(const volatile unsigned/**/char *addr)
{
# ifdef AO_ACCESS_char_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
return *(const unsigned/**/char *)addr;
}
#define AO_HAVE_char_load

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which stores of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE void
AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val)
{
# ifdef AO_ACCESS_char_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned/**/char *)addr = new_val;
}
#define AO_HAVE_char_store

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2013 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which AO_double_t loads and stores */
/* are atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE AO_double_t
AO_double_load(const volatile AO_double_t *addr)
{
AO_double_t result;
# ifdef AO_ACCESS_double_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile in case it adds fence semantics. */
result.AO_whole = ((const AO_double_t *)addr)->AO_whole;
return result;
}
#define AO_HAVE_double_load
AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
{
# ifdef AO_ACCESS_double_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
((AO_double_t *)addr)->AO_whole = new_val.AO_whole;
}
#define AO_HAVE_double_store

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file adds definitions appropriate for environments in which */
/* volatile load of a given type has acquire semantics, and volatile */
/* store of a given type has release semantics. This is arguably */
/* supposed to be true with the standard Itanium software conventions. */
/* Empirically gcc/ia64 does some reordering of ordinary operations */
/* around volatiles even when we think it should not. GCC v3.3 and */
/* earlier could reorder a volatile store with another store. As of */
/* March 2005, gcc pre-4 reuses some previously computed common */
/* subexpressions across a volatile load; hence, we now add compiler */
/* barriers for gcc. */
#ifndef AO_HAVE_GCC_BARRIER
/* TODO: Check GCC version (if workaround not needed for modern GCC). */
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER() (void)0
# endif
# define AO_HAVE_GCC_BARRIER
#endif
AO_INLINE unsigned
AO_int_load_acquire(const volatile unsigned *addr)
{
unsigned result = *addr;
/* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_int_load_acquire
AO_INLINE void
AO_int_store_release(volatile unsigned *addr, unsigned new_val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel (on IA-64). */
*addr = new_val;
}
#define AO_HAVE_int_store_release

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which loads of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE unsigned
AO_int_load(const volatile unsigned *addr)
{
# ifdef AO_ACCESS_int_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
return *(const unsigned *)addr;
}
#define AO_HAVE_int_load

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which stores of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE void
AO_int_store(volatile unsigned *addr, unsigned new_val)
{
# ifdef AO_ACCESS_int_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned *)addr = new_val;
}
#define AO_HAVE_int_store

View File

@ -0,0 +1,135 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_char_load
/* char_load_read is defined in generalize-small. */
# define AO_char_load_acquire(addr) AO_char_load_read(addr)
# define AO_HAVE_char_load_acquire
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_short_load
/* short_load_read is defined in generalize-small. */
# define AO_short_load_acquire(addr) AO_short_load_read(addr)
# define AO_HAVE_short_load_acquire
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_int_load
/* int_load_read is defined in generalize-small. */
# define AO_int_load_acquire(addr) AO_int_load_read(addr)
# define AO_HAVE_int_load_acquire
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_load
/* load_read is defined in generalize-small. */
# define AO_load_acquire(addr) AO_load_read(addr)
# define AO_HAVE_load_acquire
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_double_load
/* double_load_read is defined in generalize-small. */
# define AO_double_load_acquire(addr) AO_double_load_read(addr)
# define AO_HAVE_double_load_acquire
#endif

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_XSIZE_load
/* XSIZE_load_read is defined in generalize-small. */
# define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_read(addr)
# define AO_HAVE_XSIZE_load_acquire
#endif

View File

@ -0,0 +1,135 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_char_store
# define AO_char_store_release(addr, val) \
(AO_nop_write(), AO_char_store(addr, val))
# define AO_HAVE_char_store_release
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_short_store
# define AO_short_store_release(addr, val) \
(AO_nop_write(), AO_short_store(addr, val))
# define AO_HAVE_short_store_release
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_int_store
# define AO_int_store_release(addr, val) \
(AO_nop_write(), AO_int_store(addr, val))
# define AO_HAVE_int_store_release
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_store
# define AO_store_release(addr, val) \
(AO_nop_write(), AO_store(addr, val))
# define AO_HAVE_store_release
#endif
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_double_store
# define AO_double_store_release(addr, val) \
(AO_nop_write(), AO_double_store(addr, val))
# define AO_HAVE_double_store_release
#endif

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef AO_HAVE_XSIZE_store
# define AO_XSIZE_store_release(addr, val) \
(AO_nop_write(), AO_XSIZE_store(addr, val))
# define AO_HAVE_XSIZE_store_release
#endif

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file adds definitions appropriate for environments in which */
/* volatile load of a given type has acquire semantics, and volatile */
/* store of a given type has release semantics. This is arguably */
/* supposed to be true with the standard Itanium software conventions. */
/* Empirically gcc/ia64 does some reordering of ordinary operations */
/* around volatiles even when we think it should not. GCC v3.3 and */
/* earlier could reorder a volatile store with another store. As of */
/* March 2005, gcc pre-4 reuses some previously computed common */
/* subexpressions across a volatile load; hence, we now add compiler */
/* barriers for gcc. */
#ifndef AO_HAVE_GCC_BARRIER
/* TODO: Check GCC version (if workaround not needed for modern GCC). */
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER() (void)0
# endif
# define AO_HAVE_GCC_BARRIER
#endif
AO_INLINE unsigned/**/short
AO_short_load_acquire(const volatile unsigned/**/short *addr)
{
unsigned/**/short result = *addr;
/* A normal volatile load generates an ld.acq (on IA-64). */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_short_load_acquire
AO_INLINE void
AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel (on IA-64). */
*addr = new_val;
}
#define AO_HAVE_short_store_release

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which loads of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE unsigned/**/short
AO_short_load(const volatile unsigned/**/short *addr)
{
# ifdef AO_ACCESS_short_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
return *(const unsigned/**/short *)addr;
}
#define AO_HAVE_short_load

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Definitions for architectures on which stores of given type are */
/* atomic (either for suitably aligned data only or for any legal */
/* alignment). */
AO_INLINE void
AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
# ifdef AO_ACCESS_short_CHECK_ALIGNED
AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned/**/short *)addr = new_val;
}
#define AO_HAVE_short_store

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2009-2017 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef AO_ASSUME_WINDOWS98
/* CAS is always available */
# define AO_ASSUME_WINDOWS98
#endif
#include "common32_defs.h"
#include "../test_and_set_t_is_ao_t.h"
/* AO_test_and_set_full() is emulated using CAS. */
/* Some ARM slide set, if it has been read correctly, claims that Loads */
/* followed by either a Load or a Store are ordered, but nothing else. */
/* It is assumed that Windows interrupt handlers clear the LL/SC flag. */
/* Unaligned accesses are not guaranteed to be atomic. */
#include "../all_aligned_atomic_load_store.h"
/* If only a single processor is used, we can define AO_UNIPROCESSOR. */
#ifdef AO_UNIPROCESSOR
AO_INLINE void AO_nop_full(void)
{
AO_compiler_barrier();
}
# define AO_HAVE_nop_full
#else
/* AO_nop_full() is emulated using AO_test_and_set_full(). */
#endif
#if _M_ARM >= 6
/* ARMv6 is the first architecture providing support for simple LL/SC. */
/* #include "../standard_ao_double_t.h" */
/* TODO: implement double-wide operations (similar to x86). */
#else /* _M_ARM < 6 */
/* TODO: implement AO_test_and_set_full using SWP. */
#endif /* _M_ARM < 6 */
#define AO_T_IS_INT

View File

@ -0,0 +1,212 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
* Copyright (c) 2009-2018 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file contains AO primitives based on VC++ built-in intrinsic */
/* functions commonly available across 32-bit architectures. */
/* This file should be included from arch-specific header files. */
/* Define AO_USE_INTERLOCKED_INTRINSICS if _Interlocked primitives */
/* (used below) are available as intrinsic ones for a target arch */
/* (otherwise "Interlocked" functions family is used instead). */
/* Define AO_ASSUME_WINDOWS98 if CAS is available. */
#if _MSC_VER <= 1400 || !defined(AO_USE_INTERLOCKED_INTRINSICS) \
|| defined(_WIN32_WCE)
# include <windows.h>
/* Seems like over-kill, but that's what MSDN recommends. */
/* And apparently winbase.h is not always self-contained. */
#endif
#if _MSC_VER < 1310 || !defined(AO_USE_INTERLOCKED_INTRINSICS)
# define _InterlockedIncrement InterlockedIncrement
# define _InterlockedDecrement InterlockedDecrement
# define _InterlockedExchangeAdd InterlockedExchangeAdd
# define _InterlockedCompareExchange InterlockedCompareExchange
# define AO_INTERLOCKED_VOLATILE /**/
#else /* elif _MSC_VER >= 1310 */
# if _MSC_VER >= 1400
# ifndef _WIN32_WCE
# include <intrin.h>
# endif
# else /* elif _MSC_VER < 1400 */
# ifdef __cplusplus
extern "C" {
# endif
LONG __cdecl _InterlockedIncrement(LONG volatile *);
LONG __cdecl _InterlockedDecrement(LONG volatile *);
LONG __cdecl _InterlockedExchangeAdd(LONG volatile *, LONG);
LONG __cdecl _InterlockedCompareExchange(LONG volatile *,
LONG /* Exchange */, LONG /* Comp */);
# ifdef __cplusplus
} /* extern "C" */
# endif
# endif /* _MSC_VER < 1400 */
# if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
# pragma intrinsic (_InterlockedIncrement)
# pragma intrinsic (_InterlockedDecrement)
# pragma intrinsic (_InterlockedExchangeAdd)
# endif /* !AO_PREFER_GENERALIZED */
# pragma intrinsic (_InterlockedCompareExchange)
# define AO_INTERLOCKED_VOLATILE volatile
#endif /* _MSC_VER >= 1310 */
#if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
return _InterlockedExchangeAdd((long AO_INTERLOCKED_VOLATILE *)p, incr);
}
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
AO_fetch_and_add1_full(volatile AO_t *p)
{
return _InterlockedIncrement((long AO_INTERLOCKED_VOLATILE *)p) - 1;
}
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
AO_fetch_and_sub1_full(volatile AO_t *p)
{
return _InterlockedDecrement((long AO_INTERLOCKED_VOLATILE *)p) + 1;
}
#define AO_HAVE_fetch_and_sub1_full
#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_ASSUME_WINDOWS98
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
# ifdef AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE
return (AO_t)_InterlockedCompareExchange(
(void *AO_INTERLOCKED_VOLATILE *)addr,
(void *)new_val, (void *)old_val);
# else
return _InterlockedCompareExchange((long AO_INTERLOCKED_VOLATILE *)addr,
new_val, old_val);
# endif
}
# define AO_HAVE_fetch_compare_and_swap_full
#endif /* AO_ASSUME_WINDOWS98 */
#if (_MSC_VER > 1400) && (!defined(_M_ARM) || _MSC_VER >= 1800)
# pragma intrinsic (_InterlockedAnd8)
# pragma intrinsic (_InterlockedCompareExchange16)
# pragma intrinsic (_InterlockedOr8)
# pragma intrinsic (_InterlockedXor8)
AO_INLINE void
AO_char_and_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedAnd8((char volatile *)p, value);
}
# define AO_HAVE_char_and_full
AO_INLINE void
AO_char_or_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedOr8((char volatile *)p, value);
}
# define AO_HAVE_char_or_full
AO_INLINE void
AO_char_xor_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedXor8((char volatile *)p, value);
}
# define AO_HAVE_char_xor_full
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
return _InterlockedCompareExchange16((short volatile *)addr,
new_val, old_val);
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# ifndef AO_PREFER_GENERALIZED
# pragma intrinsic (_InterlockedIncrement16)
# pragma intrinsic (_InterlockedDecrement16)
AO_INLINE unsigned short
AO_short_fetch_and_add1_full(volatile unsigned short *p)
{
return _InterlockedIncrement16((short volatile *)p) - 1;
}
# define AO_HAVE_short_fetch_and_add1_full
AO_INLINE unsigned short
AO_short_fetch_and_sub1_full(volatile unsigned short *p)
{
return _InterlockedDecrement16((short volatile *)p) + 1;
}
# define AO_HAVE_short_fetch_and_sub1_full
# endif /* !AO_PREFER_GENERALIZED */
#endif /* _MSC_VER > 1400 */
#if _MSC_VER >= 1800 /* Visual Studio 2013+ */
# pragma intrinsic (_InterlockedCompareExchange8)
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
return _InterlockedCompareExchange8((char volatile *)addr,
new_val, old_val);
}
# define AO_HAVE_char_fetch_compare_and_swap_full
# if !defined(AO_PREFER_GENERALIZED) && !defined(_M_ARM)
# pragma intrinsic (_InterlockedExchangeAdd16)
# pragma intrinsic (_InterlockedExchangeAdd8)
AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
{
return _InterlockedExchangeAdd8((char volatile *)p, incr);
}
# define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short *p,
unsigned short incr)
{
return _InterlockedExchangeAdd16((short volatile *)p, incr);
}
# define AO_HAVE_short_fetch_and_add_full
# endif /* !AO_PREFER_GENERALIZED && !_M_ARM */
#endif /* _MSC_VER >= 1800 */

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* If AO_ASSUME_VISTA is defined, we assume Windows Server 2003, Vista */
/* or later. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_char.h"
#if !defined(AO_ASSUME_WINDOWS98) \
&& (defined(AO_ASSUME_VISTA) || _MSC_VER >= 1400)
/* Visual Studio 2005 (MS VC++ 8.0) discontinued support of Windows 95. */
# define AO_ASSUME_WINDOWS98
#endif
#ifndef AO_USE_INTERLOCKED_INTRINSICS
/* _Interlocked primitives (Inc, Dec, Xchg, Add) are always available */
# define AO_USE_INTERLOCKED_INTRINSICS
#endif
#include "common32_defs.h"
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Unfortunately mfence doesn't exist everywhere. */
/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */
/* probably a conservative test for it? */
#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full(void)
{
__asm { mfence }
}
#define AO_HAVE_nop_full
#else
/* We could use the cpuid instruction. But that seems to be slower */
/* than the default implementation based on test_and_set_full. Thus */
/* we omit that bit of misinformation here. */
#endif
#if !defined(AO_NO_ASM_XADD) && !defined(AO_HAVE_char_fetch_and_add_full)
AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
{
__asm
{
mov al, incr
mov ebx, p
lock xadd byte ptr [ebx], al
}
/* Ignore possible "missing return value" warning here. */
}
# define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
{
__asm
{
mov ax, incr
mov ebx, p
lock xadd word ptr [ebx], ax
}
/* Ignore possible "missing return value" warning here. */
}
# define AO_HAVE_short_fetch_and_add_full
#endif /* !AO_NO_ASM_XADD */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
__asm
{
mov eax,0xff ; /* AO_TS_SET */
mov ebx,addr ;
xchg byte ptr [ebx],al ;
}
/* Ignore possible "missing return value" warning here. */
}
#define AO_HAVE_test_and_set_full
#if defined(_WIN64) && !defined(CPPCHECK)
# error wrong architecture
#endif
#ifdef AO_ASSUME_VISTA
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically (requires at least a Pentium). */
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
/* Whenever we run on a Pentium class machine, we have that certain */
/* function. */
# pragma intrinsic (_InterlockedCompareExchange64)
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
AO_ASSERT_ADDR_ALIGNED(addr);
return (double_ptr_storage)_InterlockedCompareExchange64(
(__int64 volatile *)addr,
new_val.AO_whole /* exchange */,
old_val.AO_whole) == old_val.AO_whole;
}
# define AO_HAVE_double_compare_and_swap_full
#endif /* AO_ASSUME_VISTA */
#define AO_T_IS_INT
/* Real X86 implementations, except for some old WinChips, appear */
/* to enforce ordering between memory operations, EXCEPT that a later */
/* read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
/* We ignore both the WinChips, and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "../ordered_except_wr.h"

View File

@ -0,0 +1,313 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_aligned_atomic_load_store.h"
/* Real X86 implementations appear */
/* to enforce ordering between memory operations, EXCEPT that a later */
/* read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
/* We ignore the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "../ordered_except_wr.h"
#ifdef AO_ASM_X64_AVAILABLE
# include "../test_and_set_t_is_char.h"
#else
# include "../test_and_set_t_is_ao_t.h"
#endif
/* Assume _MSC_VER >= 1400 */
#include <intrin.h>
#pragma intrinsic (_InterlockedCompareExchange)
#pragma intrinsic (_InterlockedCompareExchange64)
#ifndef AO_PREFER_GENERALIZED
# pragma intrinsic (_InterlockedIncrement)
# pragma intrinsic (_InterlockedIncrement64)
# pragma intrinsic (_InterlockedDecrement)
# pragma intrinsic (_InterlockedDecrement64)
# pragma intrinsic (_InterlockedExchangeAdd)
# pragma intrinsic (_InterlockedExchangeAdd64)
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return _InterlockedExchangeAdd64((__int64 volatile *)p, incr);
}
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
AO_fetch_and_add1_full (volatile AO_t *p)
{
return _InterlockedIncrement64((__int64 volatile *)p) - 1;
}
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
AO_fetch_and_sub1_full (volatile AO_t *p)
{
return _InterlockedDecrement64((__int64 volatile *)p) + 1;
}
#define AO_HAVE_fetch_and_sub1_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
return (AO_t)_InterlockedCompareExchange64((__int64 volatile *)addr,
new_val, old_val);
}
#define AO_HAVE_fetch_compare_and_swap_full
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr,
unsigned int old_val, unsigned int new_val)
{
return _InterlockedCompareExchange((long volatile *)addr, new_val, old_val);
}
#define AO_HAVE_int_fetch_compare_and_swap_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
{
return _InterlockedExchangeAdd((long volatile *)p, incr);
}
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE unsigned int
AO_int_fetch_and_add1_full(volatile unsigned int *p)
{
return _InterlockedIncrement((long volatile *)p) - 1;
}
# define AO_HAVE_int_fetch_and_add1_full
AO_INLINE unsigned int
AO_int_fetch_and_sub1_full(volatile unsigned int *p)
{
return _InterlockedDecrement((long volatile *)p) + 1;
}
# define AO_HAVE_int_fetch_and_sub1_full
#endif /* !AO_PREFER_GENERALIZED */
#if _MSC_VER > 1400
# pragma intrinsic (_InterlockedAnd8)
# pragma intrinsic (_InterlockedCompareExchange16)
# pragma intrinsic (_InterlockedOr8)
# pragma intrinsic (_InterlockedXor8)
AO_INLINE void
AO_char_and_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedAnd8((char volatile *)p, value);
}
# define AO_HAVE_char_and_full
AO_INLINE void
AO_char_or_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedOr8((char volatile *)p, value);
}
# define AO_HAVE_char_or_full
AO_INLINE void
AO_char_xor_full(volatile unsigned char *p, unsigned char value)
{
_InterlockedXor8((char volatile *)p, value);
}
# define AO_HAVE_char_xor_full
AO_INLINE unsigned short
AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
unsigned short old_val,
unsigned short new_val)
{
return _InterlockedCompareExchange16((short volatile *)addr,
new_val, old_val);
}
# define AO_HAVE_short_fetch_compare_and_swap_full
# ifndef AO_PREFER_GENERALIZED
# pragma intrinsic (_InterlockedIncrement16)
# pragma intrinsic (_InterlockedDecrement16)
AO_INLINE unsigned short
AO_short_fetch_and_add1_full(volatile unsigned short *p)
{
return _InterlockedIncrement16((short volatile *)p) - 1;
}
# define AO_HAVE_short_fetch_and_add1_full
AO_INLINE unsigned short
AO_short_fetch_and_sub1_full(volatile unsigned short *p)
{
return _InterlockedDecrement16((short volatile *)p) + 1;
}
# define AO_HAVE_short_fetch_and_sub1_full
# endif /* !AO_PREFER_GENERALIZED */
#endif /* _MSC_VER > 1400 */
#if _MSC_VER >= 1800 /* Visual Studio 2013+ */
# pragma intrinsic (_InterlockedCompareExchange8)
AO_INLINE unsigned char
AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
unsigned char old_val,
unsigned char new_val)
{
return _InterlockedCompareExchange8((char volatile *)addr,
new_val, old_val);
}
# define AO_HAVE_char_fetch_compare_and_swap_full
# ifndef AO_PREFER_GENERALIZED
# pragma intrinsic (_InterlockedExchangeAdd16)
# pragma intrinsic (_InterlockedExchangeAdd8)
AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
{
return _InterlockedExchangeAdd8((char volatile *)p, incr);
}
# define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short *p,
unsigned short incr)
{
return _InterlockedExchangeAdd16((short volatile *)p, incr);
}
# define AO_HAVE_short_fetch_and_add_full
# endif /* !AO_PREFER_GENERALIZED */
#elif defined(AO_ASM_X64_AVAILABLE)
AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
{
__asm
{
mov al, incr
mov rbx, p
lock xadd byte ptr [rbx], al
}
}
# define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
{
__asm
{
mov ax, incr
mov rbx, p
lock xadd word ptr [rbx], ax
}
}
# define AO_HAVE_short_fetch_and_add_full
#endif /* _MSC_VER < 1800 && AO_ASM_X64_AVAILABLE */
#ifdef AO_ASM_X64_AVAILABLE
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
AO_INLINE void
AO_nop_full(void)
{
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm { mfence }
}
# define AO_HAVE_nop_full
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
__asm
{
mov rax,AO_TS_SET ;
mov rbx,addr ;
xchg byte ptr [rbx],al ;
}
}
# define AO_HAVE_test_and_set_full
#endif /* AO_ASM_X64_AVAILABLE */
#ifdef AO_CMPXCHG16B_AVAILABLE
/* AO_compare_double_and_swap_double_full needs implementation for Win64.
* Also see ../gcc/x86.h for partial old Opteron workaround.
*/
# if _MSC_VER >= 1500
# include "../standard_ao_double_t.h"
# pragma intrinsic (_InterlockedCompareExchange128)
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
__int64 comparandResult[2];
AO_ASSERT_ADDR_ALIGNED(addr);
comparandResult[0] = old_val1; /* low */
comparandResult[1] = old_val2; /* high */
return _InterlockedCompareExchange128((volatile __int64 *)addr,
new_val2 /* high */, new_val1 /* low */, comparandResult);
}
# define AO_HAVE_compare_double_and_swap_double_full
# elif defined(AO_ASM_X64_AVAILABLE)
# include "../standard_ao_double_t.h"
/* If there is no intrinsic _InterlockedCompareExchange128 then we */
/* need basically what's given below. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
__asm
{
mov rdx,QWORD PTR [old_val2] ;
mov rax,QWORD PTR [old_val1] ;
mov rcx,QWORD PTR [new_val2] ;
mov rbx,QWORD PTR [new_val1] ;
lock cmpxchg16b [addr] ;
setz rax ;
}
}
# define AO_HAVE_compare_double_and_swap_double_full
# endif /* AO_ASM_X64_AVAILABLE && (_MSC_VER < 1500) */
#endif /* AO_CMPXCHG16B_AVAILABLE */

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* These are common definitions for architectures that provide */
/* processor ordered memory operations. */
#include "ordered_except_wr.h"
AO_INLINE void
AO_nop_full(void)
{
AO_compiler_barrier();
}
#define AO_HAVE_nop_full

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures that provide processor
* ordered memory operations except that a later read may pass an
* earlier write. Real x86 implementations seem to be in this category,
* except apparently for some IDT WinChips, which we ignore.
*/
#include "read_ordered.h"
AO_INLINE void
AO_nop_write(void)
{
/* AO_nop_write implementation is the same as of AO_nop_read. */
AO_compiler_barrier();
/* sfence according to Intel docs. Pentium 3 and up. */
/* Unnecessary for cached accesses? */
}
#define AO_HAVE_nop_write
#include "loadstore/ordered_stores_only.h"

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures that provide processor
* ordered memory operations except that a later read may pass an
* earlier write. Real x86 implementations seem to be in this category,
* except apparently for some IDT WinChips, which we ignore.
*/
AO_INLINE void
AO_nop_read(void)
{
AO_compiler_barrier();
}
#define AO_HAVE_nop_read
#include "loadstore/ordered_loads_only.h"

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2004-2011 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* For 64-bit systems, we expect the double type to hold two int64's. */
#if ((defined(__x86_64__) && defined(AO_GCC_ATOMIC_TEST_AND_SET)) \
|| defined(__aarch64__)) && !defined(__ILP32__)
/* x86-64: __m128 is not applicable to atomic intrinsics. */
# if AO_GNUC_PREREQ(4, 7) || AO_CLANG_PREREQ(3, 6)
# pragma GCC diagnostic push
/* Suppress warning about __int128 type. */
# if defined(__clang__) || AO_GNUC_PREREQ(6, 4)
# pragma GCC diagnostic ignored "-Wpedantic"
# else
/* GCC before ~4.8 does not accept "-Wpedantic" quietly. */
# pragma GCC diagnostic ignored "-pedantic"
# endif
typedef unsigned __int128 double_ptr_storage;
# pragma GCC diagnostic pop
# else /* pragma diagnostic is not supported */
typedef unsigned __int128 double_ptr_storage;
# endif
#elif ((defined(__x86_64__) && AO_GNUC_PREREQ(4, 0)) || defined(_WIN64)) \
&& !defined(__ILP32__)
/* x86-64 (except for x32): __m128 serves as a placeholder which also */
/* requires the compiler to align it on 16-byte boundary (as required */
/* by cmpxchg16b). */
/* Similar things could be done for PPC 64-bit using a VMX data type. */
# include <xmmintrin.h>
typedef __m128 double_ptr_storage;
#elif defined(_WIN32) && !defined(__GNUC__)
typedef unsigned __int64 double_ptr_storage;
#elif defined(__i386__) && defined(__GNUC__)
typedef unsigned long long double_ptr_storage
__attribute__((__aligned__(8)));
#else
typedef unsigned long long double_ptr_storage;
#endif
# define AO_HAVE_DOUBLE_PTR_STORAGE
typedef union {
struct { AO_t AO_v1; AO_t AO_v2; } AO_parts;
/* Note that AO_v1 corresponds to the low or the high part of */
/* AO_whole depending on the machine endianness. */
double_ptr_storage AO_whole;
/* AO_whole is now (starting from v7.3alpha3) the 2nd element */
/* of this union to make AO_DOUBLE_T_INITIALIZER portable */
/* (because __m128 definition could vary from a primitive type */
/* to a structure or array/vector). */
} AO_double_t;
#define AO_HAVE_double_t
/* Note: AO_double_t volatile variables are not intended to be local */
/* ones (at least those which are passed to AO double-wide primitives */
/* as the first argument), otherwise it is the client responsibility to */
/* ensure they have double-word alignment. */
/* Dummy declaration as a compile-time assertion for AO_double_t size. */
struct AO_double_t_size_static_assert {
char dummy[sizeof(AO_double_t) == 2 * sizeof(AO_t) ? 1 : -1];
};
#define AO_DOUBLE_T_INITIALIZER { { (AO_t)0, (AO_t)0 } }
#define AO_val1 AO_parts.AO_v1
#define AO_val2 AO_parts.AO_v2

View File

@ -0,0 +1,5 @@
.seg "text"
.globl AO_test_and_set_full
AO_test_and_set_full:
retl
ldstub [%o0],%o0

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "../all_atomic_load_store.h"
/* Real SPARC code uses TSO: */
#include "../ordered_except_wr.h"
/* Test_and_set location is just a byte. */
#include "../test_and_set_t_is_char.h"
#ifdef __cplusplus
extern "C" {
#endif
extern AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr);
/* Implemented in separate .S file, for now. */
#define AO_HAVE_test_and_set_full
/* TODO: Like the gcc version, extend this for V8 and V9. */
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
* Copyright (c) 2009-2016 Ivan Maidanski
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Some of the machine specific code was borrowed from our GC distribution.
*/
/* The following really assume we have a 486 or better. */
#include "../all_aligned_atomic_load_store.h"
#include "../test_and_set_t_is_char.h"
#if !defined(AO_USE_PENTIUM4_INSTRS) && !defined(__i386)
/* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
# define AO_USE_PENTIUM4_INSTRS
#endif
#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__ ("mfence" : : : "memory");
}
# define AO_HAVE_nop_full
#else
/* We could use the cpuid instruction. But that seems to be slower */
/* than the default implementation based on test_and_set_full. Thus */
/* we omit that bit of misinformation here. */
#endif /* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
__asm__ __volatile__ ("lock; xadd %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
{
unsigned char result;
__asm__ __volatile__ ("lock; xaddb %0, %1"
: "=q" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
{
unsigned short result;
__asm__ __volatile__ ("lock; xaddw %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; and %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_and_full
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; or %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_or_full
AO_INLINE void
AO_xor_full (volatile AO_t *p, AO_t value)
{
__asm__ __volatile__ ("lock; xor %1, %0"
: "+m" (*p)
: "r" (value)
: "memory");
}
# define AO_HAVE_xor_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full (volatile AO_TS_t *addr)
{
AO_TS_t oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__ ("xchg %b0, %1"
: "=q" (oldval), "+m" (*addr)
: "0" (0xff)
: "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
char result;
__asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
: "+m" (*addr), "=a" (result)
: "r" (new_val), "a" (old)
: "memory");
return (int) result;
}
# define AO_HAVE_compare_and_swap_full
#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
AO_t fetched_val;
__asm__ __volatile__ ("lock; cmpxchg %2, %0"
: "+m" (*addr), "=a" (fetched_val)
: "r" (new_val), "a" (old_val)
: "memory");
return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_full
#if defined(__i386)
# ifndef AO_NO_CMPXCHG8B
# include "../standard_ao_double_t.h"
/* Reading or writing a quadword aligned on a 64-bit boundary is */
/* always carried out atomically (requires at least a Pentium). */
# define AO_ACCESS_double_CHECK_ALIGNED
# include "../loadstore/double_atomic_load_store.h"
/* Returns nonzero if the comparison succeeded. */
/* Really requires at least a Pentium. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_t dummy; /* an output for clobbered edx */
char result;
__asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
# endif /* !AO_NO_CMPXCHG8B */
# define AO_T_IS_INT
#else /* x64 */
AO_INLINE unsigned int
AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
{
unsigned int result;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (result), "+m" (*p)
: "0" (incr)
: "memory");
return result;
}
# define AO_HAVE_int_fetch_and_add_full
# ifdef AO_CMPXCHG16B_AVAILABLE
# include "../standard_ao_double_t.h"
/* Older AMD Opterons are missing this instruction (SIGILL should */
/* be thrown in this case). */
AO_INLINE int
AO_compare_double_and_swap_double_full (volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
AO_t dummy;
char result;
__asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1"
: "+m" (*addr), "=a" (result), "=d" (dummy)
: "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)
: "memory");
return (int) result;
}
# define AO_HAVE_compare_double_and_swap_double_full
# endif /* !AO_CMPXCHG16B_AVAILABLE */
#endif /* x64 */
/* Real X86 implementations, except for some old 32-bit WinChips, */
/* appear to enforce ordering between memory operations, EXCEPT that */
/* a later read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
/* We ignore both the WinChips and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "../ordered_except_wr.h"

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures on which test_and_set
* operates on pointer-sized quantities, the "clear" value contains
* all zeroes, and the "set" value contains only one lowest bit set.
* This can be used if test_and_set is synthesized from compare_and_swap.
*/
typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val;
#define AO_TS_VAL_t AO_TS_val
#define AO_TS_CLEAR AO_TS_clear
#define AO_TS_SET AO_TS_set
#define AO_TS_t AO_t
#define AO_AO_TS_T 1

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures on which test_and_set
* operates on byte sized quantities, the "clear" value contains
* all zeroes, and the "set" value contains all ones typically.
*/
#ifndef AO_GCC_ATOMIC_TEST_AND_SET
# define AO_TS_SET_TRUEVAL 0xff
#elif defined(__GCC_ATOMIC_TEST_AND_SET_TRUEVAL) \
&& !defined(AO_PREFER_GENERALIZED)
# define AO_TS_SET_TRUEVAL __GCC_ATOMIC_TEST_AND_SET_TRUEVAL
#else
# define AO_TS_SET_TRUEVAL 1 /* true */
#endif
typedef enum {
AO_BYTE_TS_clear = 0,
AO_BYTE_TS_set = AO_TS_SET_TRUEVAL
} AO_BYTE_TS_val;
#define AO_TS_VAL_t AO_BYTE_TS_val
#define AO_TS_CLEAR AO_BYTE_TS_clear
#define AO_TS_SET AO_BYTE_TS_set
#define AO_TS_t unsigned char
#define AO_CHAR_TS_T 1
#undef AO_TS_SET_TRUEVAL

View File

@ -29,9 +29,8 @@ $if dynamic_boehm ? {
}
}
} $else {
#flag -DGC_BUILTIN_ATOMIC=1
$if macos || linux {
#flag -DGC_BUILTIN_ATOMIC=1
#flag -I @VEXEROOT/thirdparty/libgc/include
$if (!macos && prod && !tinyc && !debug) || !(amd64 || arm64 || i386 || arm32) {
// TODO: replace the architecture check with a `!$exists("@VEXEROOT/thirdparty/tcc/lib/libgc.a")` comptime call
@ -43,6 +42,7 @@ $if dynamic_boehm ? {
#flag -lpthread
} $else $if freebsd {
// Tested on FreeBSD 13.0-RELEASE-p3, with clang, gcc and tcc:
#flag -DGC_BUILTIN_ATOMIC=1
#flag -DBUS_PAGE_FAULT=T_PAGEFLT
$if !tinyc {
#flag -I @VEXEROOT/thirdparty/libgc/include
@ -55,6 +55,7 @@ $if dynamic_boehm ? {
}
#flag -lpthread
} $else $if openbsd {
#flag -DGC_BUILTIN_ATOMIC=1
#flag -I/usr/local/include
#flag $first_existing("/usr/local/lib/libgc.a", "/usr/lib/libgc.a")
#flag -lpthread
@ -62,16 +63,27 @@ $if dynamic_boehm ? {
#flag -DGC_NOT_DLL=1
#flag -DGC_WIN32_THREADS=1
$if tinyc {
#flag -DGC_BUILTIN_ATOMIC=1
#flag -I @VEXEROOT/thirdparty/libgc/include
#flag @VEXEROOT/thirdparty/tcc/lib/libgc.a
#flag -luser32
} $else $if msvc {
// Build libatomic_ops
#flag @VEXEROOT/thirdparty/libatomic_ops/atomic_ops.o
#flag -I @VEXEROOT/thirdparty/libatomic_ops
#flag -I @VEXEROOT/thirdparty/libgc/include
#flag @VEXEROOT/thirdparty/libgc/gc.o
} $else {
#flag -DGC_BUILTIN_ATOMIC=1
#flag -I @VEXEROOT/thirdparty/libgc/include
#flag @VEXEROOT/thirdparty/libgc/gc.o
}
} $else $if $pkgconfig('bdw-gc') {
#flag -DGC_BUILTIN_ATOMIC=1
#pkgconfig bdw-gc
} $else {
#flag -DGC_BUILTIN_ATOMIC=1
#flag -lgc
}
}