[ltt-dev] [PATCH 07/10 round10] add LFHT_MEMORY_RESERVED low level memory management
Mathieu Desnoyers
mathieu.desnoyers at efficios.com
Tue Nov 22 05:25:35 EST 2011
I _really_ like this approach. Although I'm doubtful about the merits of
the "chunk" memory allocation scheme, this mmap-based scheme looks
really promising!
Please resubmit with next round,
Comments below,
* Lai Jiangshan (laijs at cn.fujitsu.com) wrote:
> Signed-off-by: Lai Jiangshan <laijs at cn.fujitsu.com>
> ---
> rculfhash.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
> 1 files changed, 118 insertions(+), 1 deletions(-)
>
> diff --git a/rculfhash.c b/rculfhash.c
> index ae1177f..b764dc1 100644
> --- a/rculfhash.c
> +++ b/rculfhash.c
> @@ -155,6 +155,7 @@
> #include <stdio.h>
> #include <stdint.h>
> #include <string.h>
> +#include <sys/mman.h>
>
> #include "config.h"
> #include <urcu.h>
> @@ -172,7 +173,7 @@
> #define dbg_printf(fmt, args...)
> #endif
>
> -#if !defined(LFHT_MEMORY_ORDER) && !defined(LFHT_MEMORY_CHUNK)
> +#if !defined(LFHT_MEMORY_ORDER) && !defined(LFHT_MEMORY_CHUNK) && !defined(LFHT_MEMORY_RESERVED)
> #define LFHT_MEMORY_ORDER
> #endif
>
> @@ -262,6 +263,13 @@ struct rcu_table {
> * levels to improve cache locality for small index orders.
> */
> struct cds_lfht_node *tbl[MAX_TABLE_ORDER];
> +#elif defined(LFHT_MEMORY_RESERVED)
> + /*
> + * Contains all the room of buckets without memory allocated.
> + * All the per order-index-level bucket node tables will be allocated
> + * inside it when used. It achieves extreme simple bucket indexing.
> + */
> + struct cds_lfht_node *tbl;
> #endif
> };
>
> @@ -840,6 +848,115 @@ struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
> offset = index & (ht->min_alloc_size - 1);
> return &ht->tbl[chunk][offset];
> }
> +#elif defined(LFHT_MEMORY_RESERVED)
> +static
> +struct cds_lfht *_cds_lfht_alloc(unsigned long min_alloc_size,
> + unsigned long max_size)
> +{
> + struct cds_lfht *ht;
> + unsigned long page_bucket_size = getpagesize() / sizeof(*ht->t.tbl);
> +
> + if (max_size <= page_bucket_size) /* small table */
> + min_alloc_size = max_size;
> + else /* large table */
> + min_alloc_size = max(min_alloc_size, page_bucket_size);
> +
> + ht = calloc(1, sizeof(struct cds_lfht));
> + assert(ht);
> +
> + ht->min_alloc_size = min_alloc_size;
> + ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
> + ht->max_size = max_size;
> +
> + return ht;
> +}
> +
> +/* reserve inaccessible memory space without allocation any memory */
> +static void *memory_reserve(size_t length)
reserve -> map ?
> +{
> + void *ret = mmap(NULL, length, PROT_NONE, MAP_ANONYMOUS, -1, 0);
> +
> + assert(ret != MAP_FAILED);
> + return ret;
> +}
> +
> +static void memory_dereserve(void *ptr, size_t length)
dereserve -> unmap ?
Thanks,
Mathieu
> +{
> + int ret = munmap(ptr, length);
> +
> + assert(ret == 0);
> +}
> +
> +static void memory_populate(void *ptr, size_t length)
> +{
> + void *ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
> + MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> +
> + assert(ret == ptr);
> +}
> +
> +/*
> + * Discard garbage memory and avoid system save it when try to swap it out.
> + * Make it still reserved, inaccessible.
> + */
> +static void memory_discard(void *ptr, size_t length)
> +{
> + void *ret = mmap(ptr, length, PROT_NONE,
> + MAP_FIXED | MAP_ANONYMOUS, -1, 0);
> +
> + assert(ret == ptr);
> +}
> +
> +static
> +void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
> +{
> + if (order == 0) {
> + if (ht->min_alloc_size == ht->max_size) { /* small table */
> + ht->t.tbl = calloc(ht->max_size, sizeof(*ht->t.tbl));
> + assert(ht->t.tbl);
> + return;
> + }
> + /* large table */
> + ht->t.tbl = memory_reserve(ht->max_size * sizeof(*ht->t.tbl));
> + memory_populate(ht->t.tbl, ht->min_alloc_size * sizeof(*ht->t.tbl));
> + } else if (order > ht->min_alloc_order) { /* large table */
> + unsigned long len = 1UL << (order - 1);
> +
> + assert(ht->min_alloc_size < ht->max_size);
> + memory_populate(ht->t.tbl + len, len * sizeof(*ht->t.tbl));
> + }
> + /* Nothing to do for 0 < order && order <= ht->min_alloc_order */
> +}
> +
> +/*
> + * cds_lfht_free_bucket_table() should be called with decreasing order.
> + * When cds_lfht_free_bucket_table(0) is called, it means the whole
> + * lfht is destroyed.
> + */
> +static
> +void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
> +{
> + if (order == 0) {
> + if (ht->min_alloc_size == ht->max_size) { /* small table */
> + poison_free(ht->t.tbl);
> + return;
> + }
> + /* large table */
> + memory_dereserve(ht->t.tbl, ht->max_size * sizeof(*ht->t.tbl));
> + } else if (order > ht->min_alloc_order) { /* large table */
> + unsigned long len = 1UL << (order - 1);
> +
> + assert(ht->min_alloc_size < ht->max_size);
> + memory_discard(ht->t.tbl + len, len * sizeof(*ht->t.tbl));
> + }
> + /* Nothing to do for 0 < order && order <= ht->min_alloc_order */
> +}
> +
> +static inline
> +struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
> +{
> + return &ht->t.tbl[index];
> +}
> #else
> /* LFHT_MEMORY_ORDER */
> static
> --
> 1.7.4.4
>
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
More information about the lttng-dev
mailing list