From 5a1b26d7c629915446222ebe77d16567c98426ff Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 31 Dec 2015 12:09:13 +0200 Subject: lcoking/barriers, arch: Use smp barriers in smp_store_release() With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") it was made clear that the context of this call (and thus set_mb) is strictly for CPU ordering, as opposed to IO. As such all archs should use the smp variant of mb(), respecting the semantics and saving a mandatory barrier on UP. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar Reviewed-by: Paul E. McKenney --- arch/ia64/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/s390/include/asm/barrier.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index df896a1c41d3..209c4b817c95 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -77,7 +77,7 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) /* * The group barrier in front of the rsm & ssm are necessary to ensure diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 0eca6efc0631..a7af5fb7b914 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -34,7 +34,7 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index d68e11e0df5e..7ffd0b19135c 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -36,7 +36,7 @@ #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_store_release(p, v) \ do { \ -- cgit v1.2.3 From 9505ec0825a09ea97426d026f2524d1cefa83a84 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 28 Dec 2015 13:58:06 +0200 Subject: ia64: rename nop->iosapic_nop asm-generic/barrier.h defines a nop() macro. To be able to use this header on ia64, we shouldn't call local functions/variables nop(). There's one instance where this breaks on ia64: rename the function to iosapic_nop to avoid the conflict. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/kernel/iosapic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d2fae054d988..90fde5b8669d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) } static void -nop (struct irq_data *data) +iosapic_nop (struct irq_data *data) { /* do nothing... */ } @@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data) #define iosapic_shutdown_level_irq mask_irq #define iosapic_enable_level_irq unmask_irq #define iosapic_disable_level_irq mask_irq -#define iosapic_ack_level_irq nop +#define iosapic_ack_level_irq iosapic_nop static struct irq_chip irq_type_iosapic_level = { .name = "IO-SAPIC-level", @@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data) } #define iosapic_enable_edge_irq unmask_irq -#define iosapic_disable_edge_irq nop +#define iosapic_disable_edge_irq iosapic_nop static struct irq_chip irq_type_iosapic_edge = { .name = "IO-SAPIC-edge", -- cgit v1.2.3 From 53a05ac15ee04b56ce02f0f831556e2fcdcce93f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: ia64: reuse asm-generic/barrier.h On ia64 smp_rmb, smp_wmb, read_barrier_depends, smp_read_barrier_depends and smp_store_mb() match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 209c4b817c95..2f933480a764 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -48,12 +48,6 @@ # define smp_mb() barrier() #endif -#define smp_rmb() smp_mb() -#define smp_wmb() smp_mb() - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() @@ -77,12 +71,12 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ +#include + #endif /* _ASM_IA64_BARRIER_H */ -- cgit v1.2.3 From fbd7ec02363cee4264aca2cc46692c9322fd6b42 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: powerpc: reuse asm-generic/barrier.h On powerpc read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Paul E. McKenney --- arch/powerpc/include/asm/barrier.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index a7af5fb7b914..980ad0cbdccf 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -34,8 +34,6 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC #else @@ -60,9 +58,6 @@ #define smp_wmb() barrier() #endif /* CONFIG_SMP */ -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - /* * This is a barrier which prevents following instructions from being * started until the value of the argument x is known. For example, if @@ -87,8 +82,8 @@ do { \ ___p1; \ }) -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() #define smp_mb__before_spinlock() smp_mb() +#include + #endif /* _ASM_POWERPC_BARRIER_H */ -- cgit v1.2.3 From 21535aaed9e33b4cc485ab2245dd2958816ee916 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: s390: reuse asm-generic/barrier.h On s390 read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 7ffd0b19135c..c358c31a7f07 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -30,14 +30,6 @@ #define smp_rmb() rmb() #define smp_wmb() wmb() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -53,4 +45,6 @@ do { \ ___p1; \ }) +#include + #endif /* __ASM_BARRIER_H */ -- cgit v1.2.3 From 519be0438e6963b8efd2430e9d0595f5aeee915e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: sparc: reuse asm-generic/barrier.h On sparc 64 bit dma_rmb, dma_wmb, smp_store_mb, smp_mb, smp_rmb, smp_wmb, read_barrier_depends and smp_read_barrier_depends match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. nop uses __asm__ __volatile but is otherwise identical to the generic version, drop that as well. This is in preparation to refactoring this code area. Note: nop() was in processor.h and not in barrier.h as on other architectures. Nothing seems to depend on it being there though. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: David S. Miller Acked-by: Peter Zijlstra (Intel) --- arch/sparc/include/asm/barrier_32.h | 1 - arch/sparc/include/asm/barrier_64.h | 21 ++------------------- arch/sparc/include/asm/processor.h | 3 --- 3 files changed, 2 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index ae69eda288f4..8059130a6cee 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h @@ -1,7 +1,6 @@ #ifndef __SPARC_BARRIER_H #define __SPARC_BARRIER_H -#include /* for nop() */ #include #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 14a928601657..26c3f7247f2d 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,25 +37,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define dma_rmb() rmb() -#define dma_wmb() wmb() - -#define smp_store_mb(__var, __value) \ - do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#else -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#endif - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -74,4 +55,6 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h index 2fe99e66e760..9da9646bf6c6 100644 --- a/arch/sparc/include/asm/processor.h +++ b/arch/sparc/include/asm/processor.h @@ -5,7 +5,4 @@ #else #include #endif - -#define nop() __asm__ __volatile__ ("nop") - #endif -- cgit v1.2.3 From 335390d6096f647311980f50312b304b377e616f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: arm: reuse asm-generic/barrier.h On arm smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release, smp_load_acquire, smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Russell King Acked-by: Peter Zijlstra (Intel) --- arch/arm/include/asm/barrier.h | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 3ff5642d9788..31152e8c7501 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -70,28 +70,7 @@ extern void arm_heavy_mb(void); #define smp_wmb() dmb(ishst) #endif -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#include #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ -- cgit v1.2.3 From 90ff6a17d0e07d689886cba4244674bfd41e7a2d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: arm64: reuse asm-generic/barrier.h On arm64 nop, read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/arm64/include/asm/barrier.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9622eb48f894..91a43f48914d 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -91,14 +91,7 @@ do { \ __u.__val; \ }) -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) -#define nop() asm volatile("nop"); - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#include #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From abe114d9f0a80f27bc5040cd2287dca80423d13e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: metag: reuse asm-generic/barrier.h On metag dma_rmb, dma_wmb, smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release and smp_load_acquire match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: James Hogan Acked-by: Peter Zijlstra (Intel) --- arch/metag/include/asm/barrier.h | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 172b7e5efc53..b5b778bb0b33 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -44,9 +44,6 @@ static inline void wr_fence(void) #define rmb() barrier() #define wmb() mb() -#define dma_rmb() rmb() -#define dma_wmb() wmb() - #ifndef CONFIG_SMP #define fence() do { } while (0) #define smp_mb() barrier() @@ -81,27 +78,9 @@ static inline void fence(void) #endif #endif -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* _ASM_METAG_BARRIER_H */ -- cgit v1.2.3 From fa083e28f89a78b95ba8b7da86db40c13c60e95d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: mips: reuse asm-generic/barrier.h On mips dma_rmb, dma_wmb, smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release and smp_load_acquire match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/mips/include/asm/barrier.h | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 752e0b86c171..3eac4b909355 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -10,9 +10,6 @@ #include -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - #ifdef CONFIG_CPU_HAS_SYNC #define __sync() \ __asm__ __volatile__( \ @@ -87,8 +84,6 @@ #define wmb() fast_wmb() #define rmb() fast_rmb() -#define dma_wmb() fast_wmb() -#define dma_rmb() fast_rmb() #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) # ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -112,9 +107,6 @@ #define __WEAK_LLSC_MB " \n" #endif -#define smp_store_mb(var, value) \ - do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -129,22 +121,9 @@ #define nudge_writes() mb() #endif -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - #define smp_mb__before_atomic() smp_mb__before_llsc() #define smp_mb__after_atomic() smp_llsc_mb() +#include + #endif /* __ASM_BARRIER_H */ -- cgit v1.2.3 From 577f183acc88645eae116326cc2203dc88ea730c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: x86/um: reuse asm-generic/barrier.h On x86/um CONFIG_SMP is never defined. As a result, several macros match the asm-generic variant exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Richard Weinberger Acked-by: Peter Zijlstra (Intel) --- arch/x86/um/asm/barrier.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 755481f14d90..174781a404ff 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -36,13 +36,6 @@ #endif /* CONFIG_X86_PPRO_FENCE */ #define dma_wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) +#include #endif -- cgit v1.2.3 From 300b06d4555305dc227748674f75970f2f84c224 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: x86: reuse asm-generic/barrier.h As on most architectures, on x86 read_barrier_depends and smp_read_barrier_depends are empty. Drop the local definitions and pull the generic ones from asm-generic/barrier.h instead: they are identical. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner --- arch/x86/include/asm/barrier.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 0681d2532527..cc4c2a77bd01 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -43,9 +43,6 @@ #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif /* SMP */ -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #if defined(CONFIG_X86_PPRO_FENCE) /* @@ -91,4 +88,6 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* _ASM_X86_BARRIER_H */ -- cgit v1.2.3 From 003472a93ad019bfd054a5cbb30c6eec7d0395a3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: powerpc: define __smp_xxx This defines __smp_xxx barriers for powerpc for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Boqun Feng Acked-by: Peter Zijlstra (Intel) --- arch/powerpc/include/asm/barrier.h | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 980ad0cbdccf..c0deafc212b8 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -44,19 +44,11 @@ #define dma_rmb() __lwsync() #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#ifdef CONFIG_SMP -#define smp_lwsync() __lwsync() +#define __smp_lwsync() __lwsync() -#define smp_mb() mb() -#define smp_rmb() __lwsync() -#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#else -#define smp_lwsync() barrier() - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif /* CONFIG_SMP */ +#define __smp_mb() mb() +#define __smp_rmb() __lwsync() +#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") /* * This is a barrier which prevents following instructions from being @@ -67,18 +59,18 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_lwsync(); \ + __smp_lwsync(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_lwsync(); \ + __smp_lwsync(); \ ___p1; \ }) -- cgit v1.2.3 From fd072df850e536bf46e9981be4be95961ce5eef3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: arm64: define __smp_xxx This defines __smp_xxx barriers for arm64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: arm64 does not support !SMP config, so smp_xxx and __smp_xxx are always equivalent. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/arm64/include/asm/barrier.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 91a43f48914d..dae5c49618db 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,11 +35,11 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) -#define smp_mb() dmb(ish) -#define smp_rmb() dmb(ishld) -#define smp_wmb() dmb(ishst) +#define __smp_mb() dmb(ish) +#define __smp_rmb() dmb(ishld) +#define __smp_wmb() dmb(ishst) -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ @@ -62,7 +62,7 @@ do { \ } \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ union { typeof(*p) __val; char __c[1]; } __u; \ compiletime_assert_atomic_type(*p); \ -- cgit v1.2.3 From 2b1f3de10267dd1034d24f9e77dd5e8f07793925 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: arm: define __smp_xxx This defines __smp_xxx barriers for arm, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Russell King Acked-by: Peter Zijlstra (Intel) --- arch/arm/include/asm/barrier.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 31152e8c7501..112cc1a5d47f 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -60,15 +60,9 @@ extern void arm_heavy_mb(void); #define dma_wmb() barrier() #endif -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb(ish) -#define smp_rmb() smp_mb() -#define smp_wmb() dmb(ishst) -#endif +#define __smp_mb() dmb(ish) +#define __smp_rmb() __smp_mb() +#define __smp_wmb() dmb(ishst) #include -- cgit v1.2.3 From 27f6cabc0ebf9e452c3251bf0511c41cf2c75dde Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: blackfin: define __smp_xxx This defines __smp_xxx barriers for blackfin, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/blackfin/include/asm/barrier.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index dfb66fe88b34..7cca51cae5ff 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -78,8 +78,8 @@ #endif /* !CONFIG_SMP */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.2.3 From eebd1b927822f13429ec09d0a48fe92716b22840 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: ia64: define __smp_xxx This defines __smp_xxx barriers for ia64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 2f933480a764..588f1614cafc 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -42,28 +42,24 @@ #define dma_rmb() mb() #define dma_wmb() mb() -#ifdef CONFIG_SMP -# define smp_mb() mb() -#else -# define smp_mb() barrier() -#endif +# define __smp_mb() mb() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ -- cgit v1.2.3 From afc22de0c0ca8b4697a8aec2bbb35d4cc385e7e0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: metag: define __smp_xxx This defines __smp_xxx barriers for metag, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: as __smp_XX macros should not depend on CONFIG_SMP, they can not use the existing fence() macro since that is defined differently between SMP and !SMP. For this reason, this patch introduces a wrapper metag_fence() that doesn't depend on CONFIG_SMP. fence() is then defined using that, depending on CONFIG_SMP. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: James Hogan Acked-by: Peter Zijlstra (Intel) --- arch/metag/include/asm/barrier.h | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index b5b778bb0b33..5418517aa5eb 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -44,13 +44,6 @@ static inline void wr_fence(void) #define rmb() barrier() #define wmb() mb() -#ifndef CONFIG_SMP -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else - #ifdef CONFIG_METAG_SMP_WRITE_REORDERING /* * Write to the atomic memory unlock system event register (command 0). This is @@ -60,26 +53,31 @@ static inline void wr_fence(void) * incoherence). It is therefore ineffective if used after and on the same * thread as a write. */ -static inline void fence(void) +static inline void metag_fence(void) { volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; barrier(); } -#define smp_mb() fence() -#define smp_rmb() fence() -#define smp_wmb() barrier() +#define __smp_mb() metag_fence() +#define __smp_rmb() metag_fence() +#define __smp_wmb() barrier() #else -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define metag_fence() do { } while (0) +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif + +#ifdef CONFIG_SMP +#define fence() metag_fence() +#else +#define fence() do { } while (0) #endif -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.2.3 From a60514bae72ee41b506b8702dfdd6eeeffe58556 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: mips: define __smp_xxx This defines __smp_xxx barriers for mips, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: the only exception is smp_mb__before_llsc which is mips-specific. We define both the __smp_mb__before_llsc variant (for use in asm/barriers.h) and smp_mb__before_llsc (for use elsewhere on this architecture). Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/mips/include/asm/barrier.h | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 3eac4b909355..d296633d890e 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -85,20 +85,20 @@ #define wmb() fast_wmb() #define rmb() fast_rmb() -#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) +#if defined(CONFIG_WEAK_ORDERING) # ifdef CONFIG_CPU_CAVIUM_OCTEON -# define smp_mb() __sync() -# define smp_rmb() barrier() -# define smp_wmb() __syncw() +# define __smp_mb() __sync() +# define __smp_rmb() barrier() +# define __smp_wmb() __syncw() # else -# define smp_mb() __asm__ __volatile__("sync" : : :"memory") -# define smp_rmb() __asm__ __volatile__("sync" : : :"memory") -# define smp_wmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_mb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory") # endif #else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) @@ -111,6 +111,7 @@ #ifdef CONFIG_CPU_CAVIUM_OCTEON #define smp_mb__before_llsc() smp_wmb() +#define __smp_mb__before_llsc() __smp_wmb() /* Cause previous writes to become visible on all CPUs as soon as possible */ #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ ".set arch=octeon\n\t" \ @@ -118,11 +119,12 @@ ".set pop" : : : "memory") #else #define smp_mb__before_llsc() smp_llsc_mb() +#define __smp_mb__before_llsc() smp_llsc_mb() #define nudge_writes() mb() #endif -#define smp_mb__before_atomic() smp_mb__before_llsc() -#define smp_mb__after_atomic() smp_llsc_mb() +#define __smp_mb__before_atomic() __smp_mb__before_llsc() +#define __smp_mb__after_atomic() smp_llsc_mb() #include -- cgit v1.2.3 From 82b44496abd91781ed4120c3b0c1a3d111c3e28e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: s390: define __smp_xxx This defines __smp_xxx barriers for s390, for use by virtualization. Some smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: smp_mb, smp_rmb and smp_wmb are defined as full barriers unconditionally on this architecture. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Martin Schwidefsky Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index c358c31a7f07..fbd25b299c8b 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -26,18 +26,21 @@ #define wmb() barrier() #define dma_rmb() mb() #define dma_wmb() mb() -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#define smp_store_release(p, v) \ +#define __smp_mb() mb() +#define __smp_rmb() rmb() +#define __smp_wmb() wmb() +#define smp_mb() __smp_mb() +#define smp_rmb() __smp_rmb() +#define smp_wmb() __smp_wmb() + +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ -- cgit v1.2.3 From 90a3ccb0be538a914e6a5c51ae919762261563ad Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: sh: define __smp_xxx, fix smp_store_mb for !SMP sh variant of smp_store_mb() calls xchg() on !SMP which is stronger than implied by both the name and the documentation. define __smp_store_mb instead: code in asm-generic/barrier.h will then define smp_store_mb correctly depending on CONFIG_SMP. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/barrier.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index bf91037db4e0..f887c6465a82 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -32,7 +32,8 @@ #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") #endif -#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) +#define smp_store_mb(var, value) __smp_store_mb(var, value) #include -- cgit v1.2.3 From 45d9b859411cb6d4dccc4e488336160acf9a6df5 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: sparc: define __smp_xxx This defines __smp_xxx barriers for sparc, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: David S. Miller Acked-by: Peter Zijlstra (Intel) --- arch/sparc/include/asm/barrier_64.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 26c3f7247f2d..c9f6ee64f41d 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,14 +37,14 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -52,8 +52,8 @@ do { \ ___p1; \ }) -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.2.3 From d39886a7d31e0182f8b7e3b0e953e636b0af96fd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: tile: define __smp_xxx This defines __smp_xxx barriers for tile, for use by virtualization. Some smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: for 32 bit, keep smp_mb__after_atomic around since it's faster than the generic implementation. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/tile/include/asm/barrier.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index 96a42ae79f4d..d55222806c2f 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -79,11 +79,12 @@ mb_incoherent(void) * But after the word is updated, the routine issues an "mf" before returning, * and since it's a function call, we don't even need a compiler barrier. */ -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() do { } while (0) +#define __smp_mb__before_atomic() __smp_mb() +#define __smp_mb__after_atomic() do { } while (0) +#define smp_mb__after_atomic() __smp_mb__after_atomic() #else /* 64 bit */ -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#define __smp_mb__before_atomic() __smp_mb() +#define __smp_mb__after_atomic() __smp_mb() #endif #include -- cgit v1.2.3 From 1ce790913b6ce67cd85235083aeb67898bad1a1f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: xtensa: define __smp_xxx This defines __smp_xxx barriers for xtensa, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/xtensa/include/asm/barrier.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 5b88774c75ab..956596e4d437 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -13,8 +13,8 @@ #define rmb() barrier() #define wmb() mb() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.2.3 From 1638fb72070f8faf2ac0787fafbb839d0c859d5b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: x86: define __smp_xxx This defines __smp_xxx barriers for x86, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner --- arch/x86/include/asm/barrier.h | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index cc4c2a77bd01..a584e1c50918 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -31,17 +31,10 @@ #endif #define dma_wmb() barrier() -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() dma_rmb() -#define smp_wmb() barrier() -#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else /* !SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) -#endif /* SMP */ +#define __smp_mb() mb() +#define __smp_rmb() dma_rmb() +#define __smp_wmb() barrier() +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #if defined(CONFIG_X86_PPRO_FENCE) @@ -50,31 +43,31 @@ * model and we should fall back to full barriers. */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ ___p1; \ }) #else /* regular x86 TSO memory ordering */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -85,8 +78,8 @@ do { \ #endif /* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.2.3 From 3226aad81aa670015a59e51458a0deb2d3bcb600 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 7 Jan 2016 17:54:54 +0200 Subject: sh: support 1 and 2 byte xchg This completes the xchg implementation for sh architecture. Note: The llsc variant is tricky since this only supports 4 byte atomics, the existing implementation of 1 byte xchg is wrong: we need to do a 4 byte cmpxchg and retry if any bytes changed meanwhile. Write this in C for clarity. Suggested-by: Rich Felker Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/cmpxchg-grb.h | 22 +++++++++++++++ arch/sh/include/asm/cmpxchg-irq.h | 11 ++++++++ arch/sh/include/asm/cmpxchg-llsc.h | 58 +++++++++++++++++++++++--------------- arch/sh/include/asm/cmpxchg.h | 3 ++ 4 files changed, 72 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h index f848dec9e483..2ed557b31bd9 100644 --- a/arch/sh/include/asm/cmpxchg-grb.h +++ b/arch/sh/include/asm/cmpxchg-grb.h @@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + unsigned long retval; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN */ + " mov.w @%1, %0 \n\t" /* load old value */ + " extu.w %0, %0 \n\t" /* extend as unsigned */ + " mov.w %2, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (retval), + "+r" (m), + "+r" (val) /* inhibit r15 overloading */ + : + : "memory" , "r0", "r1"); + + return retval; +} + static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) { unsigned long retval; diff --git a/arch/sh/include/asm/cmpxchg-irq.h b/arch/sh/include/asm/cmpxchg-irq.h index bd11f630414a..f88877257171 100644 --- a/arch/sh/include/asm/cmpxchg-irq.h +++ b/arch/sh/include/asm/cmpxchg-irq.h @@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) { unsigned long flags, retval; diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index 47136661a203..e754794e282f 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,6 +1,9 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H +#include +#include + static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -22,29 +25,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - unsigned long retval; - unsigned long tmp; - - __asm__ __volatile__ ( - "1: \n\t" - "movli.l @%2, %0 ! xchg_u8 \n\t" - "mov %0, %1 \n\t" - "mov %3, %0 \n\t" - "movco.l %0, @%2 \n\t" - "bf 1b \n\t" - "synco \n\t" - : "=&z"(tmp), "=&r" (retval) - : "r" (m), "r" (val & 0xff) - : "t", "memory" - ); - - return retval; -} - static inline unsigned long -__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) +__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) { unsigned long retval; unsigned long tmp; @@ -68,4 +50,36 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) return retval; } +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + #endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 85c97b188d71..5225916c1057 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h @@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void); case 4: \ __xchg__res = xchg_u32(__xchg_ptr, x); \ break; \ + case 2: \ + __xchg__res = xchg_u16(__xchg_ptr, x); \ + break; \ case 1: \ __xchg__res = xchg_u8(__xchg_ptr, x); \ break; \ -- cgit v1.2.3 From 9e3f84ce416663c84a191cb3ead300fc1a4adadc Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 8 Jan 2016 09:23:58 +0200 Subject: sh: move xchg_cmpxchg to a header by itself Looks like future sh variants will support a 4-byte cas which will be used to implement 1 and 2 byte xchg. This is exactly what we do for llsc now, move the portable part of the code into a separate header so it's easy to reuse. Suggested-by: Rich Felker Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/cmpxchg-llsc.h | 35 +------------------------- arch/sh/include/asm/cmpxchg-xchg.h | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 34 deletions(-) create mode 100644 arch/sh/include/asm/cmpxchg-xchg.h (limited to 'arch') diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index e754794e282f..fcfd32271bff 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,9 +1,6 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H -#include -#include - static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -50,36 +47,6 @@ __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) return retval; } -static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) -{ - int off = (unsigned long)ptr % sizeof(u32); - volatile u32 *p = ptr - off; -#ifdef __BIG_ENDIAN - int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; -#else - int bitoff = off * BITS_PER_BYTE; -#endif - u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; - u32 oldv, newv; - u32 ret; - - do { - oldv = READ_ONCE(*p); - ret = (oldv & bitmask) >> bitoff; - newv = (oldv & ~bitmask) | (x << bitoff); - } while (__cmpxchg_u32(p, oldv, newv) != oldv); - - return ret; -} - -static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} - -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} +#include #endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h new file mode 100644 index 000000000000..7219719c23a3 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-xchg.h @@ -0,0 +1,51 @@ +#ifndef __ASM_SH_CMPXCHG_XCHG_H +#define __ASM_SH_CMPXCHG_XCHG_H + +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * file "COPYING" in the main directory of this archive for more details. + */ +#include +#include + +/* + * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg. + * Note: this header isn't self-contained: before including it, __cmpxchg_u32 + * must be defined first. + */ +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +#endif /* __ASM_SH_CMPXCHG_XCHG_H */ -- cgit v1.2.3 From a677f4869576eb177570ffee68598d2202de030f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 6 Jan 2016 09:13:14 +0200 Subject: s390: use generic memory barriers The s390 kernel is SMP to 99.99%, we just didn't bother with a non-smp variant for the memory-barriers. If the generic header is used we'd get the non-smp version for free. It will save a small amount of text space for CONFIG_SMP=n. Suggested-by: Martin Schwidefsky Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index fbd25b299c8b..4d26fa43ec90 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -29,9 +29,6 @@ #define __smp_mb() mb() #define __smp_rmb() rmb() #define __smp_wmb() wmb() -#define smp_mb() __smp_mb() -#define smp_rmb() __smp_rmb() -#define smp_wmb() __smp_wmb() #define __smp_store_release(p, v) \ do { \ -- cgit v1.2.3 From 779a6a36961b50cd154da5705d9e6508f819cc4e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 10 Jan 2016 13:19:38 +0200 Subject: s390: more efficient smp barriers As per: lkml.kernel.org/r/20150921112252.3c2937e1@mschwide atomics imply a barrier on s390, so s390 should change smp_mb__before_atomic and smp_mb__after_atomic to barrier() instead of smp_mb() and hence should not use the generic versions. Suggested-by: Peter Zijlstra Suggested-by: Martin Schwidefsky Signed-off-by: Michael S. Tsirkin Acked-by: Martin Schwidefsky Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 4d26fa43ec90..5c8db3ce61c8 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -45,6 +45,9 @@ do { \ ___p1; \ }) +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + #include #endif /* __ASM_BARRIER_H */ -- cgit v1.2.3