[lttng-dev] [RFC PATCH urcu] Use urcu/tls-compat.h
Mathieu Desnoyers
mathieu.desnoyers at efficios.com
Wed May 16 09:58:02 EDT 2012
Provides compatibility for OpenBSD, NetBSD and Darwin.
Suggested-by: Marek Vavruša <marek.vavrusa at nic.cz>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers at efficios.com>
---
diff --git a/tests/test_mutex.c b/tests/test_mutex.c
index 3f84bbf..ba184a4 100644
--- a/tests/test_mutex.c
+++ b/tests/test_mutex.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -155,8 +156,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
@@ -208,12 +209,12 @@ void *thr_reader(void *data)
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- tot_nr_reads[tidx] = nr_reads;
+ tot_nr_reads[tidx] = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -241,7 +242,7 @@ void *thr_writer(void *data)
if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_mutex_unlock(&lock);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -250,7 +251,7 @@ void *thr_writer(void *data)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_perthreadlock.c b/tests/test_perthreadlock.c
index fa9c89a..6ff0412 100644
--- a/tests/test_perthreadlock.c
+++ b/tests/test_perthreadlock.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -159,8 +160,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
@@ -212,12 +213,12 @@ void *thr_reader(void *data)
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- tot_nr_reads[tidx] = nr_reads;
+ tot_nr_reads[tidx] = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -250,7 +251,7 @@ void *thr_writer(void *data)
for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) {
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -259,7 +260,7 @@ void *thr_writer(void *data)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_rwlock.c b/tests/test_rwlock.c
index 34d8c07..ccbc9d1 100644
--- a/tests/test_rwlock.c
+++ b/tests/test_rwlock.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -156,8 +157,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -204,12 +205,12 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_rwlock_unlock(&lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -237,7 +238,7 @@ void *thr_writer(void *_count)
if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_rwlock_unlock(&lock);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -246,7 +247,7 @@ void *thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu.c b/tests/test_urcu.c
index 870f133..1b1b94b 100644
--- a/tests/test_urcu.c
+++ b/tests/test_urcu.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -154,8 +155,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -242,7 +243,7 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
@@ -253,7 +254,7 @@ void *thr_reader(void *_count)
rcu_register_thread();
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -287,7 +288,7 @@ void *thr_writer(void *_count)
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -296,7 +297,7 @@ void *thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_assign.c b/tests/test_urcu_assign.c
index 42d70c2..31e22e5 100644
--- a/tests/test_urcu_assign.c
+++ b/tests/test_urcu_assign.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -154,8 +155,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -242,14 +243,14 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -284,7 +285,7 @@ void *thr_writer(void *_count)
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -293,7 +294,7 @@ void *thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c
index 857913f..41caeb0 100644
--- a/tests/test_urcu_bp.c
+++ b/tests/test_urcu_bp.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -154,8 +155,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -242,14 +243,14 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -283,7 +284,7 @@ void *thr_writer(void *_count)
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -292,7 +293,7 @@ void *thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_defer.c b/tests/test_urcu_defer.c
index 1575e9c..cd9780b 100644
--- a/tests/test_urcu_defer.c
+++ b/tests/test_urcu_defer.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -155,8 +156,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
@@ -213,14 +214,14 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -271,7 +272,7 @@ void *thr_writer(void *data)
defer_rcu(test_cb2, (void *)-2L);
defer_rcu(test_cb2, (void *)-4L);
defer_rcu(test_cb2, (void *)-2L);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -282,7 +283,7 @@ void *thr_writer(void *data)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_gc.c b/tests/test_urcu_gc.c
index 21c5d56..3a42506 100644
--- a/tests/test_urcu_gc.c
+++ b/tests/test_urcu_gc.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -163,8 +164,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
@@ -221,14 +222,14 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -294,7 +295,7 @@ void *thr_writer(void *data)
if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -303,7 +304,7 @@ void *thr_writer(void *data)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c
index 2223413..5710de0 100644
--- a/tests/test_urcu_hash.c
+++ b/tests/test_urcu_hash.c
@@ -82,13 +82,13 @@ int (*get_populate_hash_cb(void))(void)
return test_hash_cb[test_choice].populate_hash;
}
-unsigned int __thread rand_lookup;
-unsigned long __thread nr_add;
-unsigned long __thread nr_addexist;
-unsigned long __thread nr_del;
-unsigned long __thread nr_delnoent;
-unsigned long __thread lookup_fail;
-unsigned long __thread lookup_ok;
+DEFINE_URCU_TLS(unsigned int, rand_lookup);
+DEFINE_URCU_TLS(unsigned long, nr_add);
+DEFINE_URCU_TLS(unsigned long, nr_addexist);
+DEFINE_URCU_TLS(unsigned long, nr_del);
+DEFINE_URCU_TLS(unsigned long, nr_delnoent);
+DEFINE_URCU_TLS(unsigned long, lookup_fail);
+DEFINE_URCU_TLS(unsigned long, lookup_ok);
struct cds_lfht *test_ht;
@@ -126,8 +126,8 @@ int use_affinity = 0;
pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
-unsigned long long __thread nr_writes;
-unsigned long long __thread nr_reads;
+DEFINE_URCU_TLS(unsigned long long, nr_writes);
+DEFINE_URCU_TLS(unsigned long long, nr_reads);
unsigned int nr_readers;
unsigned int nr_writers;
diff --git a/tests/test_urcu_hash.h b/tests/test_urcu_hash.h
index 083e71c..fe13c36 100644
--- a/tests/test_urcu_hash.h
+++ b/tests/test_urcu_hash.h
@@ -37,6 +37,8 @@
#include <errno.h>
#include <signal.h>
+#include <urcu/tls-compat.h>
+
#ifdef __linux__
#include <syscall.h>
#endif
@@ -102,13 +104,13 @@ struct wr_count {
unsigned long remove;
};
-extern unsigned int __thread rand_lookup;
-extern unsigned long __thread nr_add;
-extern unsigned long __thread nr_addexist;
-extern unsigned long __thread nr_del;
-extern unsigned long __thread nr_delnoent;
-extern unsigned long __thread lookup_fail;
-extern unsigned long __thread lookup_ok;
+extern DECLARE_URCU_TLS(unsigned int, rand_lookup);
+extern DECLARE_URCU_TLS(unsigned long, nr_add);
+extern DECLARE_URCU_TLS(unsigned long, nr_addexist);
+extern DECLARE_URCU_TLS(unsigned long, nr_del);
+extern DECLARE_URCU_TLS(unsigned long, nr_delnoent);
+extern DECLARE_URCU_TLS(unsigned long, lookup_fail);
+extern DECLARE_URCU_TLS(unsigned long, lookup_ok);
extern struct cds_lfht *test_ht;
@@ -214,8 +216,8 @@ static inline int test_duration_read(void)
return !test_stop;
}
-extern unsigned long long __thread nr_writes;
-extern unsigned long long __thread nr_reads;
+extern DECLARE_URCU_TLS(unsigned long long, nr_writes);
+extern DECLARE_URCU_TLS(unsigned long long, nr_reads);
extern unsigned int nr_readers;
extern unsigned int nr_writers;
diff --git a/tests/test_urcu_hash_rw.c b/tests/test_urcu_hash_rw.c
index 7310344..a789236 100644
--- a/tests/test_urcu_hash_rw.c
+++ b/tests/test_urcu_hash_rw.c
@@ -80,7 +80,7 @@ void *test_hash_rw_thr_reader(void *_count)
for (;;) {
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % lookup_pool_size) + lookup_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % lookup_pool_size) + lookup_pool_offset),
sizeof(void *), &iter);
node = cds_lfht_iter_get_test_node(&iter);
if (node == NULL) {
@@ -88,28 +88,29 @@ void *test_hash_rw_thr_reader(void *_count)
printf("[ERROR] Lookup cannot find initial node.\n");
exit(-1);
}
- lookup_fail++;
+ URCU_TLS(lookup_fail)++;
} else {
- lookup_ok++;
+ URCU_TLS(lookup_ok)++;
}
debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
- pthread_self(), lookup_fail, lookup_ok);
+ pthread_self(), URCU_TLS(lookup_fail),
+ URCU_TLS(lookup_ok));
return ((void*)1);
}
@@ -136,10 +137,10 @@ void *test_hash_rw_thr_writer(void *_count)
for (;;) {
if ((addremove == AR_ADD || add_only)
- || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) {
+ || (addremove == AR_RANDOM && rand_r(&URCU_TLS(rand_lookup)) & 1)) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *));
rcu_read_lock();
if (add_unique) {
@@ -159,36 +160,36 @@ void *test_hash_rw_thr_writer(void *_count)
rcu_read_unlock();
if (add_unique && ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
if (add_replace && ret_node) {
call_rcu(&to_test_node(ret_node)->head,
free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
} else {
/* May delete */
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *), &iter);
ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
rcu_read_unlock();
if (ret == 0) {
node = cds_lfht_iter_get_test_node(&iter);
call_rcu(&node->head, free_node_cb);
- nr_del++;
+ URCU_TLS(nr_del)++;
} else
- nr_delnoent++;
+ URCU_TLS(nr_delnoent)++;
}
#if 0
- //if (nr_writes % 100000 == 0) {
- if (nr_writes % 1000 == 0) {
+ //if (URCU_TLS(nr_writes) % 100000 == 0) {
+ if (URCU_TLS(nr_writes) % 1000 == 0) {
rcu_read_lock();
- if (rand_r(&rand_lookup) & 1) {
+ if (rand_r(&URCU_TLS(rand_lookup)) & 1) {
ht_resize(test_ht, 1);
} else {
ht_resize(test_ht, -1);
@@ -196,12 +197,12 @@ void *test_hash_rw_thr_writer(void *_count)
rcu_read_unlock();
}
#endif //0
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
- if (caa_unlikely((nr_writes & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_writes) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
@@ -210,12 +211,13 @@ void *test_hash_rw_thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
- "nr_delnoent %lu\n", pthread_self(), nr_add,
- nr_addexist, nr_del, nr_delnoent);
- count->update_ops = nr_writes;
- count->add = nr_add;
- count->add_exist = nr_addexist;
- count->remove = nr_del;
+ "nr_delnoent %lu\n", pthread_self(), URCU_TLS(nr_add),
+ URCU_TLS(nr_addexist), URCU_TLS(nr_del),
+ URCU_TLS(nr_delnoent));
+ count->update_ops = URCU_TLS(nr_writes);
+ count->add = URCU_TLS(nr_add);
+ count->add_exist = URCU_TLS(nr_addexist);
+ count->remove = URCU_TLS(nr_del);
return ((void*)2);
}
@@ -235,10 +237,10 @@ int test_hash_rw_populate_hash(void)
"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
}
- while (nr_add < init_populate) {
+ while (URCU_TLS(nr_add) < init_populate) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % init_pool_size) + init_pool_offset),
sizeof(void *));
rcu_read_lock();
if (add_unique) {
@@ -258,16 +260,16 @@ int test_hash_rw_populate_hash(void)
rcu_read_unlock();
if (add_unique && ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
if (add_replace && ret_node) {
call_rcu(&to_test_node(ret_node)->head, free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
}
return 0;
}
diff --git a/tests/test_urcu_hash_unique.c b/tests/test_urcu_hash_unique.c
index c934feb..610f479 100644
--- a/tests/test_urcu_hash_unique.c
+++ b/tests/test_urcu_hash_unique.c
@@ -98,20 +98,21 @@ void *test_hash_unique_thr_reader(void *_count)
debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
- pthread_self(), lookup_fail, lookup_ok);
+ pthread_self(), URCU_TLS(lookup_fail),
+ URCU_TLS(lookup_ok));
return ((void*)1);
}
@@ -142,13 +143,13 @@ void *test_hash_unique_thr_writer(void *_count)
* add unique/add replace with new node key from range.
*/
if (1 || (addremove == AR_ADD || add_only)
- || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) {
+ || (addremove == AR_RANDOM && rand_r(&URCU_TLS(rand_lookup)) & 1)) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *));
rcu_read_lock();
- loc_add_unique = rand_r(&rand_lookup) & 1;
+ loc_add_unique = rand_r(&URCU_TLS(rand_lookup)) & 1;
if (loc_add_unique) {
ret_node = cds_lfht_add_unique(test_ht,
test_hash(node->key, node->key_len, TEST_HASH_SEED),
@@ -168,39 +169,39 @@ void *test_hash_unique_thr_writer(void *_count)
if (loc_add_unique) {
if (ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
} else {
if (ret_node) {
call_rcu(&to_test_node(ret_node)->head,
free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
} else {
/* May delete */
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *), &iter);
ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
rcu_read_unlock();
if (ret == 0) {
node = cds_lfht_iter_get_test_node(&iter);
call_rcu(&node->head, free_node_cb);
- nr_del++;
+ URCU_TLS(nr_del)++;
} else
- nr_delnoent++;
+ URCU_TLS(nr_delnoent)++;
}
#if 0
- //if (nr_writes % 100000 == 0) {
- if (nr_writes % 1000 == 0) {
+ //if (URCU_TLS(nr_writes) % 100000 == 0) {
+ if (URCU_TLS(nr_writes) % 1000 == 0) {
rcu_read_lock();
- if (rand_r(&rand_lookup) & 1) {
+ if (rand_r(&URCU_TLS(rand_lookup)) & 1) {
ht_resize(test_ht, 1);
} else {
ht_resize(test_ht, -1);
@@ -208,12 +209,12 @@ void *test_hash_unique_thr_writer(void *_count)
rcu_read_unlock();
}
#endif //0
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
- if (caa_unlikely((nr_writes & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_writes) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
@@ -222,12 +223,13 @@ void *test_hash_unique_thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
- "nr_delnoent %lu\n", pthread_self(), nr_add,
- nr_addexist, nr_del, nr_delnoent);
- count->update_ops = nr_writes;
- count->add = nr_add;
- count->add_exist = nr_addexist;
- count->remove = nr_del;
+ "nr_delnoent %lu\n", pthread_self(), URCU_TLS(nr_add),
+ URCU_TLS(nr_addexist), URCU_TLS(nr_del),
+ URCU_TLS(nr_delnoent));
+ count->update_ops = URCU_TLS(nr_writes);
+ count->add = URCU_TLS(nr_add);
+ count->add_exist = URCU_TLS(nr_addexist);
+ count->remove = URCU_TLS(nr_del);
return ((void*)2);
}
@@ -247,10 +249,10 @@ int test_hash_unique_populate_hash(void)
"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
}
- while (nr_add < init_populate) {
+ while (URCU_TLS(nr_add) < init_populate) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % init_pool_size) + init_pool_offset),
sizeof(void *));
rcu_read_lock();
ret_node = cds_lfht_add_replace(test_ht,
@@ -259,11 +261,11 @@ int test_hash_unique_populate_hash(void)
rcu_read_unlock();
if (ret_node) {
call_rcu(&to_test_node(ret_node)->head, free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
}
return 0;
}
diff --git a/tests/test_urcu_lfq.c b/tests/test_urcu_lfq.c
index 01a2781..66ddd41 100644
--- a/tests/test_urcu_lfq.c
+++ b/tests/test_urcu_lfq.c
@@ -38,6 +38,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -148,11 +149,11 @@ static int test_duration_enqueue(void)
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
@@ -188,24 +189,24 @@ void *thr_enqueuer(void *_count)
rcu_read_lock();
cds_lfq_enqueue_rcu(&q, &node->list);
rcu_read_unlock();
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
rcu_unregister_thread();
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
@@ -251,10 +252,10 @@ void *thr_dequeuer(void *_count)
if (node) {
call_rcu(&node->rcu, free_node_cb);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
@@ -265,10 +266,10 @@ void *thr_dequeuer(void *_count)
rcu_defer_unregister_thread();
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
diff --git a/tests/test_urcu_lfs.c b/tests/test_urcu_lfs.c
index b48a35f..88bf65d 100644
--- a/tests/test_urcu_lfs.c
+++ b/tests/test_urcu_lfs.c
@@ -38,6 +38,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -148,11 +149,11 @@ static int test_duration_enqueue(void)
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
@@ -187,24 +188,24 @@ void *thr_enqueuer(void *_count)
cds_lfs_node_init_rcu(&node->list);
/* No rcu read-side is needed for push */
cds_lfs_push_rcu(&s, &node->list);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
rcu_unregister_thread();
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
@@ -249,9 +250,9 @@ void *thr_dequeuer(void *_count)
rcu_read_unlock();
if (node) {
call_rcu(&node->rcu, free_node_cb);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
@@ -263,10 +264,10 @@ void *thr_dequeuer(void *_count)
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
diff --git a/tests/test_urcu_qsbr.c b/tests/test_urcu_qsbr.c
index b986fd8..421fb07 100644
--- a/tests/test_urcu_qsbr.c
+++ b/tests/test_urcu_qsbr.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -153,8 +154,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -241,9 +242,9 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
/* QS each 1024 reads */
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
if (caa_unlikely(!test_duration_read()))
break;
@@ -255,7 +256,7 @@ void *thr_reader(void *_count)
rcu_register_thread();
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -290,7 +291,7 @@ void *thr_writer(void *_count)
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -299,7 +300,7 @@ void *thr_writer(void *_count)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_qsbr_gc.c b/tests/test_urcu_qsbr_gc.c
index 9deb0aa..561a475 100644
--- a/tests/test_urcu_qsbr_gc.c
+++ b/tests/test_urcu_qsbr_gc.c
@@ -35,6 +35,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -159,8 +160,8 @@ static int test_duration_read(void)
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
@@ -217,9 +218,9 @@ void *thr_reader(void *_count)
if (caa_unlikely(rduration))
loop_sleep(rduration);
_rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
/* QS each 1024 reads */
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
_rcu_quiescent_state();
if (caa_unlikely(!test_duration_read()))
break;
@@ -227,7 +228,7 @@ void *thr_reader(void *_count)
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
@@ -293,7 +294,7 @@ void *thr_writer(void *data)
if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
@@ -302,7 +303,7 @@ void *thr_writer(void *data)
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
diff --git a/tests/test_urcu_wfq.c b/tests/test_urcu_wfq.c
index 83ec635..75a9275 100644
--- a/tests/test_urcu_wfq.c
+++ b/tests/test_urcu_wfq.c
@@ -38,6 +38,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -147,11 +148,11 @@ static int test_duration_enqueue(void)
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
@@ -178,22 +179,22 @@ void *thr_enqueuer(void *_count)
goto fail;
cds_wfq_node_init(node);
cds_wfq_enqueue(&q, node);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
@@ -217,10 +218,10 @@ void *thr_dequeuer(void *_count)
if (node) {
free(node);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
@@ -229,10 +230,10 @@ void *thr_dequeuer(void *_count)
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
diff --git a/tests/test_urcu_wfs.c b/tests/test_urcu_wfs.c
index 7746a1d..be09944 100644
--- a/tests/test_urcu_wfs.c
+++ b/tests/test_urcu_wfs.c
@@ -38,6 +38,7 @@
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
@@ -147,11 +148,11 @@ static int test_duration_enqueue(void)
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
@@ -178,22 +179,22 @@ void *thr_enqueuer(void *_count)
goto fail;
cds_wfs_node_init(node);
cds_wfs_push(&s, node);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
@@ -217,10 +218,10 @@ void *thr_dequeuer(void *_count)
if (node) {
free(node);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
@@ -229,10 +230,10 @@ void *thr_dequeuer(void *_count)
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
diff --git a/urcu-bp.c b/urcu-bp.c
index 08b4b55..bb657d7 100644
--- a/urcu-bp.c
+++ b/urcu-bp.c
@@ -40,6 +40,7 @@
#include "urcu/map/urcu-bp.h"
#include "urcu/static/urcu-bp.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
@@ -94,7 +95,7 @@ static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
/*
@@ -109,7 +110,7 @@ long rcu_gp_ctr = RCU_GP_COUNT;
* Pointer to registry elements. Written to only by each individual reader. Read
* by both the reader and the writers.
*/
-struct rcu_reader __thread *rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
static CDS_LIST_HEAD(registry);
@@ -322,7 +323,7 @@ static void add_thread(void)
rcu_reader_reg->tid = pthread_self();
assert(rcu_reader_reg->ctr == 0);
cds_list_add(&rcu_reader_reg->node, ®istry);
- rcu_reader = rcu_reader_reg;
+ URCU_TLS(rcu_reader) = rcu_reader_reg;
}
/* Called with signals off and mutex locked */
@@ -363,7 +364,7 @@ void rcu_bp_register(void)
/*
* Check if a signal concurrently registered our thread since
* the check in rcu_read_lock(). */
- if (rcu_reader)
+ if (URCU_TLS(rcu_reader))
goto end;
mutex_lock(&rcu_gp_lock);
diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h
index 36e3cf4..8ed2ab3 100644
--- a/urcu-call-rcu-impl.h
+++ b/urcu-call-rcu-impl.h
@@ -40,6 +40,7 @@
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
/* Data structure that identifies a call_rcu thread. */
@@ -62,7 +63,7 @@ CDS_LIST_HEAD(call_rcu_data_list);
/* Link a thread using call_rcu() to its call_rcu thread. */
-static __thread struct call_rcu_data *thread_call_rcu_data;
+static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
/* Guard call_rcu thread creation. */
@@ -232,7 +233,7 @@ static void *call_rcu_thread(void *arg)
*/
rcu_register_thread();
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
@@ -470,8 +471,8 @@ struct call_rcu_data *get_call_rcu_data(void)
{
struct call_rcu_data *crd;
- if (thread_call_rcu_data != NULL)
- return thread_call_rcu_data;
+ if (URCU_TLS(thread_call_rcu_data) != NULL)
+ return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
@@ -488,7 +489,7 @@ struct call_rcu_data *get_call_rcu_data(void)
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return thread_call_rcu_data;
+ return URCU_TLS(thread_call_rcu_data);
}
/*
@@ -504,7 +505,7 @@ struct call_rcu_data *get_thread_call_rcu_data(void)
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
}
/*
@@ -746,7 +747,7 @@ void call_rcu_after_fork_child(void)
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
- thread_call_rcu_data = NULL;
+ URCU_TLS(thread_call_rcu_data) = NULL;
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
diff --git a/urcu-defer-impl.h b/urcu-defer-impl.h
index 4d1ca5e..f65e410 100644
--- a/urcu-defer-impl.h
+++ b/urcu-defer-impl.h
@@ -48,6 +48,7 @@
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
+#include <urcu/tls-compat.h>
/*
* Number of entries in the per-thread defer queue. Must be power of 2.
@@ -130,7 +131,7 @@ static int32_t defer_thread_stop;
* Written to only by each individual deferer. Read by both the deferer and
* the reclamation tread.
*/
-static struct defer_queue __thread defer_queue;
+static DEFINE_URCU_TLS(struct defer_queue, defer_queue);
static CDS_LIST_HEAD(registry_defer);
static pthread_t tid_defer;
@@ -245,12 +246,12 @@ static void _rcu_defer_barrier_thread(void)
{
unsigned long head, num_items;
- head = defer_queue.head;
- num_items = head - defer_queue.tail;
+ head = URCU_TLS(defer_queue).head;
+ num_items = head - URCU_TLS(defer_queue).tail;
if (caa_unlikely(!num_items))
return;
synchronize_rcu();
- rcu_defer_barrier_queue(&defer_queue, head);
+ rcu_defer_barrier_queue(&URCU_TLS(defer_queue), head);
}
void rcu_defer_barrier_thread(void)
@@ -311,8 +312,8 @@ void _defer_rcu(void (*fct)(void *p), void *p)
* Head is only modified by ourself. Tail can be modified by reclamation
* thread.
*/
- head = defer_queue.head;
- tail = CMM_LOAD_SHARED(defer_queue.tail);
+ head = URCU_TLS(defer_queue).head;
+ tail = CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail);
/*
* If queue is full, or reached threshold. Empty queue ourself.
@@ -321,7 +322,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
- assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
+ assert(head - CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail) == 0);
}
/*
@@ -340,25 +341,25 @@ void _defer_rcu(void (*fct)(void *p), void *p)
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
- if (caa_unlikely(defer_queue.last_fct_in != fct
+ if (caa_unlikely(URCU_TLS(defer_queue).last_fct_in != fct
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
- defer_queue.last_fct_in = fct;
+ URCU_TLS(defer_queue).last_fct_in = fct;
if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
fct);
} else {
DQ_SET_FCT_BIT(fct);
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
fct);
}
}
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], p);
cmm_smp_wmb(); /* Publish new pointer before head */
/* Write q[] before head. */
- CMM_STORE_SHARED(defer_queue.head, head);
+ CMM_STORE_SHARED(URCU_TLS(defer_queue).head, head);
cmm_smp_mb(); /* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
@@ -422,16 +423,16 @@ int rcu_defer_register_thread(void)
{
int was_empty;
- assert(defer_queue.last_head == 0);
- assert(defer_queue.q == NULL);
- defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
- if (!defer_queue.q)
+ assert(URCU_TLS(defer_queue).last_head == 0);
+ assert(URCU_TLS(defer_queue).q == NULL);
+ URCU_TLS(defer_queue).q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
+ if (!URCU_TLS(defer_queue).q)
return -ENOMEM;
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
was_empty = cds_list_empty(®istry_defer);
- cds_list_add(&defer_queue.list, ®istry_defer);
+ cds_list_add(&URCU_TLS(defer_queue).list, ®istry_defer);
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
@@ -446,10 +447,10 @@ void rcu_defer_unregister_thread(void)
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
- cds_list_del(&defer_queue.list);
+ cds_list_del(&URCU_TLS(defer_queue).list);
_rcu_defer_barrier_thread();
- free(defer_queue.q);
- defer_queue.q = NULL;
+ free(URCU_TLS(defer_queue).q);
+ URCU_TLS(defer_queue).q = NULL;
is_empty = cds_list_empty(®istry_defer);
mutex_unlock(&rcu_defer_mutex);
diff --git a/urcu-qsbr.c b/urcu-qsbr.c
index 745676e..b20d564 100644
--- a/urcu-qsbr.c
+++ b/urcu-qsbr.c
@@ -40,6 +40,7 @@
#define BUILD_QSBR_LIB
#include "urcu/static/urcu-qsbr.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
@@ -66,11 +67,11 @@ unsigned long rcu_gp_ctr = RCU_GP_ONLINE;
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
@@ -139,7 +140,7 @@ static void update_counter_and_wait(void)
* quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data
* (no progress). Enforce compiler-order of store to rcu_gp_ctr
- * before load rcu_reader ctr.
+ * before load URCU_TLS(rcu_reader).ctr.
*/
cmm_barrier();
@@ -206,7 +207,7 @@ void synchronize_rcu(void)
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/* All threads should read qparity before accessing data structure
* where new ptr points to. In the "then" case, rcu_thread_offline
@@ -236,7 +237,7 @@ void synchronize_rcu(void)
* committing next rcu_gp_ctr update to memory. Failure to
* do so could result in the writer waiting forever while new
* readers are always accessing data (no progress). Enforce
- * compiler-order of load rcu_reader ctr before store to
+ * compiler-order of load URCU_TLS(rcu_reader).ctr before store to
* rcu_gp_ctr.
*/
cmm_barrier();
@@ -269,7 +270,7 @@ void synchronize_rcu(void)
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/*
* Mark the writer thread offline to make sure we don't wait for
@@ -326,11 +327,11 @@ void rcu_thread_online(void)
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.ctr == 0);
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).ctr == 0);
mutex_lock(&rcu_gp_lock);
- cds_list_add(&rcu_reader.node, ®istry);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
mutex_unlock(&rcu_gp_lock);
_rcu_thread_online();
}
@@ -343,7 +344,7 @@ void rcu_unregister_thread(void)
*/
_rcu_thread_offline();
mutex_lock(&rcu_gp_lock);
- cds_list_del(&rcu_reader.node);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
mutex_unlock(&rcu_gp_lock);
}
diff --git a/urcu.c b/urcu.c
index 3948629..5fb4db8 100644
--- a/urcu.c
+++ b/urcu.c
@@ -40,6 +40,7 @@
#include "urcu/map/urcu.h"
#include "urcu/static/urcu.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
@@ -94,11 +95,11 @@ unsigned long rcu_gp_ctr = RCU_GP_COUNT;
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
@@ -120,9 +121,9 @@ static void mutex_lock(pthread_mutex_t *mutex)
perror("Error in pthread mutex lock");
exit(-1);
}
- if (CMM_LOAD_SHARED(rcu_reader.need_mb)) {
+ if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
cmm_smp_mb();
}
poll(NULL,0,10);
@@ -245,7 +246,7 @@ void update_counter_and_wait(void)
cmm_smp_mb();
/*
- * Wait for each thread rcu_reader.ctr count to become 0.
+ * Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0.
*/
for (;;) {
wait_loops++;
@@ -277,7 +278,8 @@ void update_counter_and_wait(void)
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
- * rcu_reader.ctr update to memory if we wait for too long.
+ * URCU_TLS(rcu_reader).ctr update to memory if we wait
+ * for too long.
*/
if (cds_list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
@@ -328,7 +330,7 @@ void synchronize_rcu(void)
* committing next rcu_gp_ctr update to memory. Failure to do so could
* result in the writer waiting forever while new readers are always
* accessing data (no progress). Enforce compiler-order of load
- * rcu_reader ctr before store to rcu_gp_ctr.
+ * URCU_TLS(rcu_reader).ctr before store to rcu_gp_ctr.
*/
cmm_barrier();
@@ -368,20 +370,20 @@ void rcu_read_unlock(void)
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.need_mb == 0);
- assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).need_mb == 0);
+ assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK));
mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- cds_list_add(&rcu_reader.node, ®istry);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
mutex_lock(&rcu_gp_lock);
- cds_list_del(&rcu_reader.node);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
mutex_unlock(&rcu_gp_lock);
}
@@ -405,7 +407,7 @@ static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
* executed on.
*/
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
cmm_smp_mb();
}
diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h
index 8d22163..e7b2eda 100644
--- a/urcu/static/urcu-bp.h
+++ b/urcu/static/urcu-bp.h
@@ -38,6 +38,7 @@
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/list.h>
+#include <urcu/tls-compat.h>
/*
* This code section can only be included in LGPL 2.1 compatible source code.
@@ -74,25 +75,25 @@ extern "C" {
#define MAX_SLEEP 50
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
}
#else
static inline void debug_yield_read(void)
@@ -144,7 +145,7 @@ struct rcu_reader {
* Adds a pointer dereference on the read-side, but won't require to unregister
* the reader thread.
*/
-extern struct rcu_reader __thread *rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader);
static inline int rcu_old_gp_ongoing(long *value)
{
@@ -166,24 +167,24 @@ static inline void _rcu_read_lock(void)
long tmp;
/* Check if registered */
- if (caa_unlikely(!rcu_reader))
+ if (caa_unlikely(!URCU_TLS(rcu_reader)))
rcu_bp_register();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader->ctr;
+ tmp = URCU_TLS(rcu_reader)->ctr;
/*
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer.
*/
cmm_smp_mb();
} else {
- _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);
}
}
@@ -193,7 +194,7 @@ static inline void _rcu_read_unlock(void)
* Finish using rcu before decrementing the pointer.
*/
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, URCU_TLS(rcu_reader)->ctr - RCU_GP_COUNT);
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
diff --git a/urcu/static/urcu-qsbr.h b/urcu/static/urcu-qsbr.h
index 68bfc31..22908a4 100644
--- a/urcu/static/urcu-qsbr.h
+++ b/urcu/static/urcu-qsbr.h
@@ -42,6 +42,7 @@
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
#ifdef __cplusplus
extern "C" {
@@ -74,25 +75,25 @@ extern "C" {
#define MAX_SLEEP 50
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
}
#else
static inline void debug_yield_read(void)
@@ -128,7 +129,7 @@ struct rcu_reader {
pthread_t tid;
};
-extern struct rcu_reader __thread rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern int32_t gp_futex;
@@ -137,8 +138,8 @@ extern int32_t gp_futex;
*/
static inline void wake_up_gp(void)
{
- if (caa_unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
- _CMM_STORE_SHARED(rcu_reader.waiting, 0);
+ if (caa_unlikely(_CMM_LOAD_SHARED(URCU_TLS(rcu_reader).waiting))) {
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).waiting, 0);
cmm_smp_mb();
if (uatomic_read(&gp_futex) != -1)
return;
@@ -158,7 +159,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
static inline void _rcu_read_lock(void)
{
- rcu_assert(rcu_reader.ctr);
+ rcu_assert(URCU_TLS(rcu_reader).ctr);
}
static inline void _rcu_read_unlock(void)
@@ -168,8 +169,8 @@ static inline void _rcu_read_unlock(void)
static inline void _rcu_quiescent_state(void)
{
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb(); /* write URCU_TLS(rcu_reader).ctr before read futex */
wake_up_gp();
cmm_smp_mb();
}
@@ -177,8 +178,8 @@ static inline void _rcu_quiescent_state(void)
static inline void _rcu_thread_offline(void)
{
cmm_smp_mb();
- CMM_STORE_SHARED(rcu_reader.ctr, 0);
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, 0);
+ cmm_smp_mb(); /* write URCU_TLS(rcu_reader).ctr before read futex */
wake_up_gp();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
@@ -186,7 +187,7 @@ static inline void _rcu_thread_offline(void)
static inline void _rcu_thread_online(void)
{
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h
index 7ae0185..f27f8b6 100644
--- a/urcu/static/urcu.h
+++ b/urcu/static/urcu.h
@@ -40,6 +40,7 @@
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
#ifdef __cplusplus
extern "C" {
@@ -121,25 +122,25 @@ extern "C" {
#endif
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ (unsigned long) pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
}
#else
static inline void debug_yield_read(void)
@@ -222,7 +223,7 @@ struct rcu_reader {
pthread_t tid;
};
-extern struct rcu_reader __thread rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern int32_t gp_futex;
@@ -256,20 +257,20 @@ static inline void _rcu_read_lock(void)
unsigned long tmp;
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader.ctr;
+ tmp = URCU_TLS(rcu_reader).ctr;
/*
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer. See smp_mb_master().
*/
smp_mb_slave(RCU_MB_GROUP);
} else {
- _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
}
}
@@ -277,19 +278,19 @@ static inline void _rcu_read_unlock(void)
{
unsigned long tmp;
- tmp = rcu_reader.ctr;
+ tmp = URCU_TLS(rcu_reader).ctr;
/*
* Finish using rcu before decrementing the pointer.
* See smp_mb_master().
*/
if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
smp_mb_slave(RCU_MB_GROUP);
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
- /* write rcu_reader.ctr before read futex */
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
+ /* write URCU_TLS(rcu_reader).ctr before read futex */
smp_mb_slave(RCU_MB_GROUP);
wake_up_gp();
} else {
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
}
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
More information about the lttng-dev
mailing list