[ltt-dev] [PATCH 2/6] rculfhash: Fix ht lazy grow logic.
Lai Jiangshan
laijs at cn.fujitsu.com
Tue Nov 1 21:42:00 EDT 2011
On 11/02/2011 01:00 AM, Mathieu Desnoyers wrote:
> * Lai Jiangshan (laijs at cn.fujitsu.com) wrote:
>> "size < target_size" in cds_lfht_resize_lazy() which is always true
>> confuses me.
>
> I think it can be false sometimes if we have multiple concurrent calls
> to cds_lfht_resize_lazy. If the first call succeeds, the resize happens,
> and then a second call gets to try the resize again, but the table
> already has grown. In this case, the resize work does not need to be
> executed.
size is stack local variable, and target_size is (size << growth) or
larger(concurrent calls to cds_lfht_resize_lazy),
so "size < target_size" is always true.
>
> The resize_initiated flag is just a hint telling if a resize is in
> progress (so we don't trigger extra resize work when unnecessary), but
> it is not sampled before the RCU reads are performed -- only the "size"
> is read with rcu_dereference, which ensures that we can use this size
> information to confirm that the chain length that we read _after_ the
> rcu_dereference actually apply to a size we really want to update. The
> resize_initiated flag does not provide this guarantee.
>
>>
>> I think we should grow the ht only when we success to increase
>> the resize_target.
>
> I agree on this second change.
>
> Can you respin this patch leaving the ""size < target_size" in place ?
See above.
Thanks,
Lai
>
> You can add my comments above as documentation of cds_lfht_resize_lazy()
> if you want.
>
> Thanks,
>
> Mathieu
>
>>
>> Signed-off-by: Lai Jiangshan <laijs at cn.fujitsu.com>
>> ---
>> rculfhash.c | 25 +++++++++++++------------
>> 1 files changed, 13 insertions(+), 12 deletions(-)
>>
>> diff --git a/rculfhash.c b/rculfhash.c
>> index da37e97..e97d854 100644
>> --- a/rculfhash.c
>> +++ b/rculfhash.c
>> @@ -498,7 +498,7 @@ int get_count_order_ulong(unsigned long x)
>> #endif
>>
>> static
>> -void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
>> +void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
>>
>> static
>> void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
>> @@ -659,7 +659,7 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
>> dbg_printf("WARNING: large chain length: %u.\n",
>> chain_len);
>> if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
>> - cds_lfht_resize_lazy(ht, size,
>> + cds_lfht_resize_lazy_grow(ht, size,
>> get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
>> }
>>
>> @@ -706,7 +706,8 @@ int is_end(struct cds_lfht_node *node)
>> }
>>
>> static
>> -unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
>> +unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
>> + unsigned long v)
>> {
>> unsigned long old1, old2;
>>
>> @@ -716,7 +717,7 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
>> if (old2 >= v)
>> return old2;
>> } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
>> - return v;
>> + return old2;
>> }
>>
>> static
>> @@ -1716,11 +1717,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht)
>> }
>>
>> static
>> -unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
>> - int growth_order)
>> +unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
>> {
>> - return _uatomic_max(&ht->t.resize_target,
>> - size << growth_order);
>> + return _uatomic_xchg_monotonic_increase(&ht->t.resize_target, new_size);
>> }
>>
>> static
>> @@ -1760,15 +1759,17 @@ void do_resize_cb(struct rcu_head *head)
>> }
>>
>> static
>> -void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
>> +void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
>> {
>> struct rcu_resize_work *work;
>> - unsigned long target_size;
>> + unsigned long target_size = size << growth;
>> +
>> + if (resize_target_grow(ht, target_size) >= target_size)
>> + return;
>>
>> - target_size = resize_target_update(ht, size, growth);
>> /* Store resize_target before read resize_initiated */
>> cmm_smp_mb();
>> - if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
>> + if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
>> uatomic_inc(&ht->in_progress_resize);
>> cmm_smp_mb(); /* increment resize count before load destroy */
>> if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
>> --
>> 1.7.4.4
>>
>
More information about the lttng-dev
mailing list