[ltt-dev] [PATCH 03/11] add urcu/arch_defaults.h
Mathieu Desnoyers
compudj at krystal.dyndns.org
Sun Feb 14 09:34:59 EST 2010
* Paolo Bonzini (pbonzini at redhat.com) wrote:
> Most of the memory barrier definitions are shared between all
> architectures, especially smp_* and mc/rmc/wmc. Put them in
> a common file.
>
> Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
> ---
> urcu/arch_defaults.h | 109 ++++++++++++++++++++++++++++++++++++++++++++++++++
> urcu/arch_ppc.h | 50 +---------------------
> urcu/arch_s390.h | 38 +----------------
> urcu/arch_sparc64.h | 51 +----------------------
> urcu/arch_x86.h | 64 +++--------------------------
> 5 files changed, 122 insertions(+), 190 deletions(-)
> create mode 100644 urcu/arch_defaults.h
>
> diff --git a/urcu/arch_defaults.h b/urcu/arch_defaults.h
> new file mode 100644
> index 0000000..0cc659e
> --- /dev/null
> +++ b/urcu/arch_defaults.h
> @@ -0,0 +1,121 @@
> +#ifndef _URCU_ARCH_DEFAULTS_H
> +#define _URCU_ARCH_DEFAULTS_H
> +
> +/*
> + * arch_defaults.h: common definitions for multiple architectures.
> + *
> + * Copyright (c) 2010 Paolo Bonzini <pbonzini at redhat.com>
> + *
> + * This library is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> +*
> + * This library is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with this library; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include <urcu/compiler.h>
> +#include <urcu/config.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#ifndef CACHE_LINE_SIZE
> +#define CACHE_LINE_SIZE 64
> +#endif
> +
> +#if !defined mc && !defined rmc && !defined wmc
+#if !defined(mc) && !defined(rmc) && !defined(wmc)
instead ?
> +#define CONFIG_HAVE_MEM_COHERENCY
> +
> +#ifndef mb
> +#define mb() __sync_synchronize()
> +#endif
> +
> +#ifndef rmb
> +#define rmb() mb()
> +#endif
> +
> +#ifndef wmb
> +#define wmb() mb()
> +#endif
> +
> +#define mc() barrier()
> +#define rmc() barrier()
> +#define wmc() barrier()
> +#else
> +/*
> + * Architectures without cache coherency need something like the following:
> + *
> + * #define mb() mc()
> + * #define rmb() rmc()
> + * #define wmb() wmc()
> + * #define mc() arch_cache_flush() // mandatory
The // comment style should be avoided. (Following Linux kernel
guide-lines)
The rest of this patch looks good.
Thanks,
Mathieu
> + * #define rmc() arch_cache_flush_read()
> + * #define wmc() arch_cache_flush_write()
> + */
> +
> +#ifndef mb
> +#define mb() mc()
> +#endif
> +
> +#ifndef rmb
> +#define rmb() rmc()
> +#endif
> +
> +#ifndef wmb
> +#define wmb() wmc()
> +#endif
> +
> +#ifndef rmc
> +#define rmc() mc()
> +#endif
> +
> +#ifndef wmc
> +#define wmc() mc()
> +#endif
> +#endif
> +
> +/* Nop everywhere except on alpha. */
> +#ifndef read_barrier_depends
> +#define read_barrier_depends()
> +#endif
> +
> +#ifdef CONFIG_RCU_SMP
> +#define smp_mb() mb()
> +#define smp_rmb() rmb()
> +#define smp_wmb() wmb()
> +#define smp_mc() mc()
> +#define smp_rmc() rmc()
> +#define smp_wmc() wmc()
> +#define smp_read_barrier_depends() read_barrier_depends()
> +#else
> +#define smp_mb() barrier()
> +#define smp_rmb() barrier()
> +#define smp_wmb() barrier()
> +#define smp_mc() barrier()
> +#define smp_rmc() barrier()
> +#define smp_wmc() barrier()
> +#define smp_read_barrier_depends()
> +#endif
> +
> +#ifndef cpu_relax
> +#define cpu_relax() barrier()
> +#endif
> +
> +#ifndef sync_core
> +#define sync_core() mb()
> +#endif
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _URCU_ARCH_DEFAULTS_H */
> diff --git a/urcu/arch_ppc.h b/urcu/arch_ppc.h
> index c1762ae..f925d07 100644
> --- a/urcu/arch_ppc.h
> +++ b/urcu/arch_ppc.h
> @@ -29,8 +29,6 @@
> extern "C" {
> #endif
>
> -#define CONFIG_HAVE_MEM_COHERENCY
> -
> /* Include size of POWER5+ L3 cache lines: 256 bytes */
> #define CACHE_LINE_SIZE 256
>
> @@ -39,55 +37,11 @@ extern "C" {
> #endif
>
> #define mb() asm volatile("sync":::"memory")
> -#define rmb() asm volatile("sync":::"memory")
> -#define wmb() asm volatile("sync"::: "memory")
> -
> -/*
> - * Architectures without cache coherency need something like the following:
> - *
> - * #define mb() mc()
> - * #define rmb() rmc()
> - * #define wmb() wmc()
> - * #define mc() arch_cache_flush()
> - * #define rmc() arch_cache_flush_read()
> - * #define wmc() arch_cache_flush_write()
> - */
> -
> -#define mc() barrier()
> -#define rmc() barrier()
> -#define wmc() barrier()
> -
> -#ifdef CONFIG_RCU_SMP
> -#define smp_mb() mb()
> -#define smp_rmb() rmb()
> -#define smp_wmb() wmb()
> -#define smp_mc() mc()
> -#define smp_rmc() rmc()
> -#define smp_wmc() wmc()
> -#else
> -#define smp_mb() barrier()
> -#define smp_rmb() barrier()
> -#define smp_wmb() barrier()
> -#define smp_mc() barrier()
> -#define smp_rmc() barrier()
> -#define smp_wmc() barrier()
> -#endif
> -
> -/* Nop everywhere except on alpha. */
> -#define smp_read_barrier_depends()
> -
> -static inline void cpu_relax(void)
> -{
> - barrier();
> -}
>
> /*
> * Serialize core instruction execution. Also acts as a compiler barrier.
> */
> -static inline void sync_core()
> -{
> - asm volatile("isync" : : : "memory");
> -}
> +#define sync_core() asm volatile("isync" : : : "memory")
>
> #define mftbl() \
> ({ \
> @@ -123,4 +77,6 @@ static inline cycles_t get_cycles (void)
> }
> #endif
>
> +#include <urcu/arch_defaults.h>
> +
> #endif /* _URCU_ARCH_PPC_H */
> diff --git a/urcu/arch_s390.h b/urcu/arch_s390.h
> index 22a1853..0982112 100644
> --- a/urcu/arch_s390.h
> +++ b/urcu/arch_s390.h
> @@ -35,8 +35,6 @@
> extern "C" {
> #endif
>
> -#define CONFIG_HAVE_MEM_COHERENCY
> -
> #define CACHE_LINE_SIZE 128
>
> #ifndef __SIZEOF_LONG__
> @@ -52,40 +50,6 @@ extern "C" {
> #endif
>
> #define mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
> -#define rmb() __asm__ __volatile__("bcr 15,0" : : : "memory")
> -#define wmb() __asm__ __volatile__("bcr 15,0" : : : "memory")
> -#define mc() barrier()
> -#define rmc() barrier()
> -#define wmc() barrier()
> -
> -#ifdef CONFIG_RCU_SMP
> -#define smp_mb() mb()
> -#define smp_rmb() rmb()
> -#define smp_wmb() wmb()
> -#define smp_mc() mc()
> -#define smp_rmc() rmc()
> -#define smp_wmc() wmc()
> -#else
> -#define smp_mb() barrier()
> -#define smp_rmb() barrier()
> -#define smp_wmb() barrier()
> -#define smp_mc() barrier()
> -#define smp_rmc() barrier()
> -#define smp_wmc() barrier()
> -#endif
> -
> -/* Nop everywhere except on alpha. */
> -#define smp_read_barrier_depends()
> -
> -static inline void cpu_relax(void)
> -{
> - barrier();
> -}
> -
> -static inline void sync_core()
> -{
> - __asm__ __volatile__("bcr 15,0" : : : "memory");
> -}
>
> typedef unsigned long long cycles_t;
>
> @@ -102,4 +66,6 @@ static inline cycles_t get_cycles (void)
> }
> #endif
>
> +#include <urcu/arch_defaults.h>
> +
> #endif /* _URCU_ARCH_S390_H */
> diff --git a/urcu/arch_sparc64.h b/urcu/arch_sparc64.h
> index 54c4c3c..c906168 100644
> --- a/urcu/arch_sparc64.h
> +++ b/urcu/arch_sparc64.h
> @@ -29,8 +29,6 @@
> extern "C" {
> #endif
>
> -#define CONFIG_HAVE_MEM_COHERENCY
> -
> #define CACHE_LINE_SIZE 256
>
> #ifndef BITS_PER_LONG
> @@ -50,53 +48,6 @@ __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
> #define rmb() membar_safe("#LoadLoad")
> #define wmb() membar_safe("#StoreStore")
>
> -/*
> - * Architectures without cache coherency need something like the following:
> - *
> - * #define mb() mc()
> - * #define rmb() rmc()
> - * #define wmb() wmc()
> - * #define mc() arch_cache_flush()
> - * #define rmc() arch_cache_flush_read()
> - * #define wmc() arch_cache_flush_write()
> - */
> -
> -#define mc() barrier()
> -#define rmc() barrier()
> -#define wmc() barrier()
> -
> -#ifdef CONFIG_RCU_SMP
> -#define smp_mb() mb()
> -#define smp_rmb() rmb()
> -#define smp_wmb() wmb()
> -#define smp_mc() mc()
> -#define smp_rmc() rmc()
> -#define smp_wmc() wmc()
> -#else
> -#define smp_mb() barrier()
> -#define smp_rmb() barrier()
> -#define smp_wmb() barrier()
> -#define smp_mc() barrier()
> -#define smp_rmc() barrier()
> -#define smp_wmc() barrier()
> -#endif
> -
> -/* Nop everywhere except on alpha. */
> -#define smp_read_barrier_depends()
> -
> -static inline void cpu_relax(void)
> -{
> - barrier();
> -}
> -
> -/*
> - * Serialize core instruction execution. Also acts as a compiler barrier.
> - */
> -static inline void sync_core()
> -{
> - mb();
> -}
> -
> typedef unsigned long long cycles_t;
>
> static inline cycles_t get_cycles (void)
> @@ -108,4 +59,6 @@ static inline cycles_t get_cycles (void)
> }
> #endif
>
> +#include <urcu/arch_defaults.h>
> +
> #endif /* _URCU_ARCH_SPARC64_H */
> diff --git a/urcu/arch_x86.h b/urcu/arch_x86.h
> index 4abac2b..bc03379 100644
> --- a/urcu/arch_x86.h
> +++ b/urcu/arch_x86.h
> @@ -29,8 +29,6 @@
> extern "C" {
> #endif
>
> -#define CONFIG_HAVE_MEM_COHERENCY
> -
> #define CACHE_LINE_SIZE 128
>
> #ifdef CONFIG_RCU_HAVE_FENCE
> @@ -47,68 +45,16 @@ extern "C" {
> #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
> #endif
>
> -/*
> - * Architectures without cache coherency need something like the following:
> - *
> - * #define mb() mc()
> - * #define rmb() rmc()
> - * #define wmb() wmc()
> - * #define mc() arch_cache_flush()
> - * #define rmc() arch_cache_flush_read()
> - * #define wmc() arch_cache_flush_write()
> - */
> -
> -#define mc() barrier()
> -#define rmc() barrier()
> -#define wmc() barrier()
> -
> -#ifdef CONFIG_RCU_SMP
> -#define smp_mb() mb()
> -#define smp_rmb() rmb()
> -#define smp_wmb() wmb()
> -#define smp_mc() mc()
> -#define smp_rmc() rmc()
> -#define smp_wmc() wmc()
> -#else
> -#define smp_mb() barrier()
> -#define smp_rmb() barrier()
> -#define smp_wmb() barrier()
> -#define smp_mc() barrier()
> -#define smp_rmc() barrier()
> -#define smp_wmc() barrier()
> -#endif
> -
> -/* Nop everywhere except on alpha. */
> -#define smp_read_barrier_depends()
> -
> -static inline void rep_nop(void)
> -{
> - asm volatile("rep; nop" : : : "memory");
> -}
> -
> -static inline void cpu_relax(void)
> -{
> - rep_nop();
> -}
> +#define cpu_relax() asm volatile("rep; nop" : : : "memory");
>
> /*
> * Serialize core instruction execution. Also acts as a compiler barrier.
> - */
> -#ifdef __PIC__
> -/*
> - * Cannot use cpuid because it clobbers the ebx register and clashes
> - * with -fPIC :
> + * Cannot use cpuid on PIC because it clobbers the ebx register;
> * error: PIC register 'ebx' clobbered in 'asm'
> */
> -static inline void sync_core(void)
> -{
> - mb();
> -}
> -#else
> -static inline void sync_core(void)
> -{
> +#ifndef __PIC__
> +#define sync_core() \
> asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx");
> -}
> #endif
>
> #define rdtscll(val) \
> @@ -133,4 +79,6 @@ static inline cycles_t get_cycles(void)
> }
> #endif
>
> +#include <urcu/arch_defaults.h>
> +
> #endif /* _URCU_ARCH_X86_H */
> --
> 1.6.6
>
>
>
> _______________________________________________
> ltt-dev mailing list
> ltt-dev at lists.casi.polymtl.ca
> http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev
>
--
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
More information about the lttng-dev
mailing list