[ltt-dev] [PATCH 08/11] use uatomic_gcc.h
Paolo Bonzini
pbonzini at redhat.com
Sat Feb 13 12:16:28 EST 2010
And now, really remove the code for PPC/S390/SPARC, just use GCC
builtins on those architectures.
Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
---
urcu/uatomic_arch_ppc.h | 118 ------------------------------
urcu/uatomic_arch_s390.h | 170 -------------------------------------------
urcu/uatomic_arch_sparc64.h | 84 +---------------------
3 files changed, 1 insertions(+), 371 deletions(-)
diff --git a/urcu/uatomic_arch_ppc.h b/urcu/uatomic_arch_ppc.h
index c611c7a..c1eb0f5 100644
--- a/urcu/uatomic_arch_ppc.h
+++ b/urcu/uatomic_arch_ppc.h
@@ -96,124 +96,6 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
#define uatomic_xchg(addr, v) \
((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned int)_new),
- "r"((unsigned int)old)
- : "memory", "cc");
-
- return old_val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val),
- : "r"(addr), "r"((unsigned long)_new),
- "r"((unsigned long)old)
- : "memory", "cc");
-
- return old_val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stwcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stdcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
#ifdef __cplusplus
}
#endif
diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h
index 9d4b74e..cb0269b 100644
--- a/urcu/uatomic_arch_s390.h
+++ b/urcu/uatomic_arch_s390.h
@@ -31,176 +31,6 @@
#include <urcu/compiler.h>
#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-#define COMPILER_HAVE_SHORT_MEM_OPERAND
-#endif
-
-/*
- * MEMOP assembler operand rules:
- * - op refer to MEMOP_IN operand
- * - MEMOP_IN can expand to more than a single operand. Use it at the end of
- * operand list only.
- */
-
-#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
-
-#define MEMOP_OUT(addr) "=Q" (*(addr))
-#define MEMOP_IN(addr) "Q" (*(addr))
-#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
-
-#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-#define MEMOP_OUT(addr) "=m" (*(addr))
-#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
-#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
-
-#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- "0: cs %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- "0: csg %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val = (unsigned int)old;
-
- __asm__ __volatile__(
- " cs %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- " csg %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(_new), \
- sizeof(*(addr)))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
- } while (oldt != old);
-
- return old + val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- }
- __builtin_trap();
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
#include <urcu/uatomic_gcc.h>
#endif /* _URCU_UATOMIC_ARCH_S390_H */
diff --git a/urcu/uatomic_arch_sparc64.h b/urcu/uatomic_arch_sparc64.h
index d21d73c..6169447 100644
--- a/urcu/uatomic_arch_sparc64.h
+++ b/urcu/uatomic_arch_sparc64.h
@@ -33,6 +33,7 @@ extern "C" {
/* cmpxchg */
+/* GCC only places a memory barrier before. */
static inline __attribute__((always_inline))
unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len)
@@ -75,89 +76,6 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
(unsigned long)(_new), \
sizeof(*(addr))))
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, val, 4);
- } while (oldt != old);
-
- return old;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, val, 8);
- } while (oldt != old);
-
- return old;
- }
-#endif
- }
- __builtin_trap();
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
- } while (oldt != old);
-
- return old + val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- }
- __builtin_trap();
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
#ifdef __cplusplus
}
#endif
--
1.6.6
More information about the lttng-dev
mailing list