[ltt-dev] [PATCH 2/5] rculfhash: use hash for index if !HAVE_SCHED_GETCPU

Lai Jiangshan laijs at cn.fujitsu.com
Thu Oct 27 23:57:25 EDT 2011


Signed-off-by: Lai Jiangshan <laijs at cn.fujitsu.com>
---
 rculfhash.c |   50 ++++++++++++++++++++++++++++----------------------
 1 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/rculfhash.c b/rculfhash.c
index d1bec03..ea6df05 100644
--- a/rculfhash.c
+++ b/rculfhash.c
@@ -506,7 +506,7 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
  * In the unfortunate event the number of CPUs reported would be
  * inaccurate, we use modulo arithmetic on the number of CPUs we got.
  */
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#if defined(HAVE_SYSCONF)
 
 static
 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
@@ -554,30 +554,36 @@ void free_split_items_count(struct ht_items_count *count)
 	poison_free(count);
 }
 
+#if defined(HAVE_SCHED_GETCPU)
 static
-int ht_get_split_count_index(void)
+int ht_get_split_count_index(unsigned long hash)
 {
 	int cpu;
 
 	assert(split_count_mask >= 0);
 	cpu = sched_getcpu();
 	if (unlikely(cpu < 0))
-		return cpu;
+		return hash & split_count_mask;
 	else
 		return cpu & split_count_mask;
 }
+#else /* #if defined(HAVE_SCHED_GETCPU) */
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+	return hash & split_count_mask;
+}
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
 
 static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
 	unsigned long split_count;
 	int index;
 
 	if (unlikely(!ht->split_count))
 		return;
-	index = ht_get_split_count_index();
-	if (unlikely(index < 0))
-		return;
+	index = ht_get_split_count_index(hash);
 	split_count = uatomic_add_return(&ht->split_count[index].add, 1);
 	if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
 		long count;
@@ -597,16 +603,14 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size)
 }
 
 static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
 	unsigned long split_count;
 	int index;
 
 	if (unlikely(!ht->split_count))
 		return;
-	index = ht_get_split_count_index();
-	if (unlikely(index < 0))
-		return;
+	index = ht_get_split_count_index(hash);
 	split_count = uatomic_add_return(&ht->split_count[index].del, 1);
 	if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
 		long count;
@@ -631,7 +635,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size)
 	}
 }
 
-#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#else /* #if defined(HAVE_SYSCONF) */
 
 static const long nr_cpus_mask = -2;
 static const long split_count_mask = -2;
@@ -648,16 +652,16 @@ void free_split_items_count(struct ht_items_count *count)
 }
 
 static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
 }
 
 static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
 {
 }
 
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#endif /* #else #if defined(HAVE_SYSCONF) */
 
 
 static
@@ -1504,7 +1508,7 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 
 	size = rcu_dereference(ht->t.size);
 	_cds_lfht_add(ht, size, node, NULL, 0);
-	ht_count_add(ht, size);
+	ht_count_add(ht, size, hash);
 }
 
 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
@@ -1519,7 +1523,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
 	size = rcu_dereference(ht->t.size);
 	_cds_lfht_add(ht, size, node, &iter, 0);
 	if (iter.node == node)
-		ht_count_add(ht, size);
+		ht_count_add(ht, size, hash);
 	return iter.node;
 }
 
@@ -1536,7 +1540,7 @@ struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
 	for (;;) {
 		_cds_lfht_add(ht, size, node, &iter, 0);
 		if (iter.node == node) {
-			ht_count_add(ht, size);
+			ht_count_add(ht, size, hash);
 			return NULL;
 		}
 
@@ -1557,13 +1561,15 @@ int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
 
 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
-	unsigned long size;
+	unsigned long size, hash;
 	int ret;
 
 	size = rcu_dereference(ht->t.size);
 	ret = _cds_lfht_del(ht, size, iter->node, 0);
-	if (!ret)
-		ht_count_del(ht, size);
+	if (!ret) {
+		hash = bit_reverse_ulong(iter->node->p.reverse_hash);
+		ht_count_del(ht, size, hash);
+	}
 	return ret;
 }
 
@@ -1809,7 +1815,7 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
 	}
 }
 
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#if defined(HAVE_SYSCONF)
 
 static
 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
-- 
1.7.4.4





More information about the lttng-dev mailing list