[ltt-dev] [PATCH 06/10] uatomic: add uatomic_and
Paolo Bonzini
pbonzini at redhat.com
Wed Jun 8 04:59:14 EDT 2011
Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
---
compat_arch_x86.c | 25 ++++++++++++
tests/test_uatomic.c | 2 +
urcu/uatomic_arch_x86.h | 63 ++++++++++++++++++++++++++++++
urcu/uatomic_generic.h | 97 +++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 187 insertions(+), 0 deletions(-)
diff --git a/compat_arch_x86.c b/compat_arch_x86.c
index 33bf13d..692417e 100644
--- a/compat_arch_x86.c
+++ b/compat_arch_x86.c
@@ -226,6 +226,31 @@ void _compat_uatomic_or(void *addr, unsigned long v, int len)
mutex_lock_signal_restore(&compat_mutex, &mask);
}
+void _compat_uatomic_and(void *addr, unsigned long v, int len)
+{
+ sigset_t mask;
+
+ mutex_lock_signal_save(&compat_mutex, &mask);
+ switch (len) {
+ case 1:
+ *(unsigned char *)addr &= (unsigned char)v;
+ break;
+ case 2:
+ *(unsigned short *)addr &= (unsigned short)v;
+ break;
+ case 4:
+ *(unsigned int *)addr &= (unsigned int)v;
+ break;
+ default:
+ /*
+ * generate an illegal instruction. Cannot catch this with
+ * linker tricks when optimizations are disabled.
+ */
+ __asm__ __volatile__("ud2");
+ }
+ mutex_lock_signal_restore(&compat_mutex, &mask);
+}
+
unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
{
sigset_t mask;
diff --git a/tests/test_uatomic.c b/tests/test_uatomic.c
index 37f95a6..2c8c232 100644
--- a/tests/test_uatomic.c
+++ b/tests/test_uatomic.c
@@ -41,6 +41,8 @@ do { \
v = uatomic_sub_return(ptr, 1); \
assert(v == 121); \
assert(uatomic_read(ptr) == 121); \
+ uatomic_and(ptr, 129); \
+ assert(uatomic_read(ptr) == 1); \
} while (0)
int main(int argc, char **argv)
diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h
index c3b5333..c861208 100644
--- a/urcu/uatomic_arch_x86.h
+++ b/urcu/uatomic_arch_x86.h
@@ -231,6 +231,60 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val,
(unsigned long)(v), \
sizeof(*(addr))))
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; andb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; andw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; andl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; andq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_and(addr, v) \
+ (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
/* uatomic_or */
static inline __attribute__((always_inline))
@@ -482,6 +536,13 @@ extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
(unsigned long)(_new), \
sizeof(*(addr))))
+extern unsigned long _compat_uatomic_and(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_and(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
extern unsigned long _compat_uatomic_or(void *addr,
unsigned long _new, int len);
#define compat_uatomic_or(addr, v) \
@@ -515,6 +576,8 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
UATOMIC_COMPAT(cmpxchg(addr, old, _new))
#define uatomic_xchg(addr, v) \
UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v) \
+ UATOMIC_COMPAT(and(addr, v))
#define uatomic_or(addr, v) \
UATOMIC_COMPAT(or(addr, v))
#define uatomic_add_return(addr, v) \
diff --git a/urcu/uatomic_generic.h b/urcu/uatomic_generic.h
index 556846f..cef58f3 100644
--- a/urcu/uatomic_generic.h
+++ b/urcu/uatomic_generic.h
@@ -87,6 +87,39 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
sizeof(*(addr))))
+/* uatomic_and */
+
+#ifndef uatomic_and
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ __sync_and_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ __sync_and_and_fetch_2(addr, val);
+#endif
+ case 4:
+ __sync_and_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ __sync_and_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_and(addr, v) \
+ (_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
/* uatomic_or */
#ifndef uatomic_or
@@ -219,6 +252,70 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
#else /* #ifndef uatomic_cmpxchg */
+#ifndef uatomic_and
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
+ } while (oldt != old);
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
+ } while (oldt != old);
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
+ } while (oldt != old);
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
+ } while (oldt != old);
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_and(addr, v) \
+ (uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_and */
+
#ifndef uatomic_or
/* uatomic_or */
--
1.7.4.4
More information about the lttng-dev
mailing list