From f4c6b6bc5a4fc8d607f2d89369008c85a3a12a8b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Sep 2009 02:25:05 +0200 Subject: MIPS: Consolidate all CONFIG_CPU_HAS_LLSC use in a single C file. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/system.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/mips/include/asm/system.h') diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index cd30f83235bb..a2e9239b45aa 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -63,11 +63,23 @@ do { \ #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) #endif +#ifdef CONFIG_CPU_HAS_LLSC +#define __clear_software_ll_bit() do { } while (0) +#else +extern unsigned long ll_bit; + +#define __clear_software_ll_bit() \ +do { \ + ll_bit = 0; \ +} while (0) +#endif + #define switch_to(prev, next, last) \ do { \ __mips_mt_fpaff_switch_to(prev); \ if (cpu_has_dsp) \ __save_dsp(prev); \ + __clear_software_ll_bit(); \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) -- cgit v1.2.3 From f1e39a4a616cd9981a9decfd5332fd07a01abb8b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Sep 2009 02:25:05 +0200 Subject: MIPS: Rewrite sysmips(MIPS_ATOMIC_SET, ...) in C with inline assembler This way it doesn't have to use CONFIG_CPU_HAS_LLSC anymore. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/system.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/mips/include/asm/system.h') diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index a2e9239b45aa..23f68b40d4bb 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -32,6 +32,9 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); struct task_struct; +extern unsigned int ll_bit; +extern struct task_struct *ll_task; + #ifdef CONFIG_MIPS_MT_FPAFF /* -- cgit v1.2.3 From 43e6ae6d9f08304682294c14c6b7f2b2441668e7 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Sep 2009 02:25:05 +0200 Subject: MIPS: Rewrite clearing of ll_bit on context switch in C This also means there is now only one implementation not 3 left. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/system.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/mips/include/asm/system.h') diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index 23f68b40d4bb..cc7262ff0765 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -66,16 +66,11 @@ do { \ #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) #endif -#ifdef CONFIG_CPU_HAS_LLSC -#define __clear_software_ll_bit() do { } while (0) -#else -extern unsigned long ll_bit; - #define __clear_software_ll_bit() \ do { \ - ll_bit = 0; \ + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ + ll_bit = 0; \ } while (0) -#endif #define switch_to(prev, next, last) \ do { \ -- cgit v1.2.3 From b791d1193af9772040e592d5aa161790f800b762 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 13 Jul 2009 11:15:19 -0700 Subject: MIPS: Allow kernel use of LL/SC to be separate from the presence of LL/SC. On some CPUs, it is more efficient to disable and enable interrupts in the kernel rather than use ll/sc for atomic operations. But if we were to set cpu_has_llsc to false, we would break the userspace futex interface (in asm/futex.h). We separate the two concepts, with a new predicate kernel_uses_llsc, that lets us disable the kernel's use of ll/sc while still allowing the futex code to use it. Also there were a couple of cases in bitops.h where we were using ll/sc unconditionally even if cpu_has_llsc were false. Signed-off-by: David Daney Signed-off-by: Ralf Baechle --- arch/mips/include/asm/system.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/mips/include/asm/system.h') diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index cc7262ff0765..fcf5f98d90cc 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -94,7 +94,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { __u32 retval; - if (cpu_has_llsc && R10000_LLSC_WAR) { + if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long dummy; __asm__ __volatile__( @@ -109,7 +109,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); - } else if (cpu_has_llsc) { + } else if (kernel_uses_llsc) { unsigned long dummy; __asm__ __volatile__( @@ -146,7 +146,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) { __u64 retval; - if (cpu_has_llsc && R10000_LLSC_WAR) { + if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long dummy; __asm__ __volatile__( @@ -159,7 +159,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) : "memory"); - } else if (cpu_has_llsc) { + } else if (kernel_uses_llsc) { unsigned long dummy; __asm__ __volatile__( -- cgit v1.2.3