[ltt-dev] [PATCH 07/11] add uatomic_gcc.h, use it for default definitions
Paolo Bonzini
pbonzini at redhat.com
Sat Feb 13 12:16:27 EST 2010
>From most of the architectures, we can derive the implementation
of xchg and add_return from cmpxchg:
- if cmpxchg is present, use it to implement xchg and add_return;
- if it is not present, implement all three using __sync_* builtins
The hunk in tests/test_uatomic.c is only needed for bisectability
and will be removed later.
Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
---
Makefile.am | 2 +-
tests/test_uatomic.c | 2 +
urcu/uatomic_arch_ppc.h | 15 +---
urcu/uatomic_arch_s390.h | 15 +---
urcu/uatomic_arch_sparc64.h | 15 +---
urcu/uatomic_arch_x86.h | 26 ++----
urcu/uatomic_gcc.h | 221 +++++++++++++++++++++++++++++++++++++++++++
7 files changed, 239 insertions(+), 57 deletions(-)
create mode 100644 urcu/uatomic_gcc.h
diff --git a/Makefile.am b/Makefile.am
index 57e9299..d6ee789 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -6,7 +6,7 @@ SUBDIRS = . tests
include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h
nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \
- urcu/rculist.h urcu/system.h urcu/urcu-futex.h
+ urcu/rculist.h urcu/system.h urcu/urcu-futex.h urcu/uatomic_gcc.h
nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h urcu/config.h
EXTRA_DIST = $(top_srcdir)/urcu/arch_*.h $(top_srcdir)/urcu/uatomic_arch_*.h \
diff --git a/tests/test_uatomic.c b/tests/test_uatomic.c
index 68cb6df..c0f36fe 100644
--- a/tests/test_uatomic.c
+++ b/tests/test_uatomic.c
@@ -1,5 +1,7 @@
#include <stdio.h>
#include <assert.h>
+
+#define UATOMIC_NO_LINK_ERROR
#include <urcu/uatomic_arch.h>
#if (defined(__i386__) || defined(__x86_64__))
diff --git a/urcu/uatomic_arch_ppc.h b/urcu/uatomic_arch_ppc.h
index 7106b99..c611c7a 100644
--- a/urcu/uatomic_arch_ppc.h
+++ b/urcu/uatomic_arch_ppc.h
@@ -39,9 +39,6 @@ extern "C" {
#define ILLEGAL_INSTR ".long 0xd00d00"
-#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
-#define uatomic_read(addr) LOAD_SHARED(*(addr))
-
/*
* Using a isync as second barrier for exchange to provide acquire semantic.
* According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
@@ -217,18 +214,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
(unsigned long)(v), \
sizeof(*(addr))))
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-
#ifdef __cplusplus
}
#endif
+#include <urcu/uatomic_gcc.h>
+
#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h
index 6247601..9d4b74e 100644
--- a/urcu/uatomic_arch_s390.h
+++ b/urcu/uatomic_arch_s390.h
@@ -70,9 +70,6 @@ struct __uatomic_dummy {
};
#define __hp(x) ((struct __uatomic_dummy *)(x))
-#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
-#define uatomic_read(addr) LOAD_SHARED(*(addr))
-
/* xchg */
static inline __attribute__((always_inline))
@@ -200,18 +197,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
(unsigned long)(v), \
sizeof(*(addr))))
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-
#ifdef __cplusplus
}
#endif
+#include <urcu/uatomic_gcc.h>
+
#endif /* _URCU_UATOMIC_ARCH_S390_H */
diff --git a/urcu/uatomic_arch_sparc64.h b/urcu/uatomic_arch_sparc64.h
index e984986..d21d73c 100644
--- a/urcu/uatomic_arch_sparc64.h
+++ b/urcu/uatomic_arch_sparc64.h
@@ -31,9 +31,6 @@ extern "C" {
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#endif
-#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
-#define uatomic_read(addr) LOAD_SHARED(*(addr))
-
/* cmpxchg */
static inline __attribute__((always_inline))
@@ -161,18 +158,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
(unsigned long)(v), \
sizeof(*(addr))))
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-
#ifdef __cplusplus
}
#endif
+#include <urcu/uatomic_gcc.h>
+
#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h
index 34f5f87..0aadbd5 100644
--- a/urcu/uatomic_arch_x86.h
+++ b/urcu/uatomic_arch_x86.h
@@ -41,7 +41,11 @@ struct __uatomic_dummy {
#define __hp(x) ((struct __uatomic_dummy *)(x))
#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
+
+#if 0
+/* Read is atomic even in compat mode */
#define _uatomic_read(addr) LOAD_SHARED(*(addr))
+#endif
/* cmpxchg */
@@ -168,7 +172,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
-/* uatomic_add_return, uatomic_sub_return */
+/* uatomic_add_return */
static inline __attribute__((always_inline))
unsigned long __uatomic_add_return(void *addr, unsigned long val,
@@ -233,9 +237,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
(unsigned long)(v), \
sizeof(*(addr))))
-#define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
-
-/* uatomic_add, uatomic_sub */
+/* uatomic_add */
static inline __attribute__((always_inline))
void __uatomic_add(void *addr, unsigned long val, int len)
@@ -289,8 +291,6 @@ void __uatomic_add(void *addr, unsigned long val, int len)
#define _uatomic_add(addr, v) \
(__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-#define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
-
/* uatomic_inc */
@@ -441,24 +441,17 @@ extern unsigned long _compat_uatomic_xchg(void *addr,
(unsigned long)(v), \
sizeof(*(addr))))
-#define compat_uatomic_sub_return(addr, v) \
- compat_uatomic_add_return((addr), -(v))
#define compat_uatomic_add(addr, v) \
((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_sub(addr, v) \
- ((void)compat_uatomic_sub_return((addr), (v)))
#define compat_uatomic_inc(addr) \
(compat_uatomic_add((addr), 1))
#define compat_uatomic_dec(addr) \
- (compat_uatomic_sub((addr), 1))
+ (compat_uatomic_add((addr), -1))
#else
#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif
-/* Read is atomic even in compat mode */
-#define uatomic_read(addr) _uatomic_read(addr)
-
#define uatomic_set(addr, v) \
UATOMIC_COMPAT(set(addr, v))
#define uatomic_cmpxchg(addr, old, _new) \
@@ -467,10 +460,7 @@ extern unsigned long _compat_uatomic_xchg(void *addr,
UATOMIC_COMPAT(xchg(addr, v))
#define uatomic_add_return(addr, v) \
UATOMIC_COMPAT(add_return(addr, v))
-#define uatomic_sub_return(addr, v) \
- UATOMIC_COMPAT(sub_return(addr, v))
#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
-#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
@@ -478,4 +468,6 @@ extern unsigned long _compat_uatomic_xchg(void *addr,
}
#endif
+#include <urcu/uatomic_gcc.h>
+
#endif /* _URCU_ARCH_UATOMIC_X86_H */
diff --git a/urcu/uatomic_gcc.h b/urcu/uatomic_gcc.h
new file mode 100644
index 0000000..28f6d71
--- /dev/null
+++ b/urcu/uatomic_gcc.h
@@ -0,0 +1,223 @@
+#ifndef _URCU_UATOMIC_GCC_H
+#define _URCU_UATOMIC_GCC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paolo Bonzini
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+
+#ifndef uatomic_set
+#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
+#endif
+
+#ifndef uatomic_read
+#define uatomic_read(addr) LOAD_SHARED(*(addr))
+#endif
+
+#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
+static inline __attribute__((always_inline))
+void _uatomic_link_error()
+{
+#ifdef ILLEGAL_INSTR
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+#else
+ __builtin_trap ();
+#endif
+}
+#else
+extern void _uatomic_link_error ();
+#endif
+
+/* cmpxchg */
+
+#ifndef uatomic_cmpxchg
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ return __sync_val_compare_and_swap_4(addr, old, _new);
+#if (BITS_PER_LONG == 64)
+ case 8:
+ return __sync_val_compare_and_swap_8(addr, old, _new);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+/* uatomic_add_return */
+
+#ifndef uatomic_add_return
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 4:
+ return __sync_add_and_fetch_4(addr, val);
+#if (BITS_PER_LONG == 64)
+ case 8:
+ return __sync_add_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+#else
+
+#ifndef uatomic_add_return
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+#endif
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, val, 4);
+ } while (oldt != old);
+
+ return old;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, val, 8);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#ifndef uatomic_add
+#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
+#endif
+
+#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
+
+#ifndef uatomic_inc
+#define uatomic_inc(addr) uatomic_add((addr), 1)
+#endif
+
+#ifndef uatomic_dec
+#define uatomic_dec(addr) uatomic_add((addr), -1)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_UATOMIC_GCC_H */
--
1.6.6
More information about the lttng-dev
mailing list