From 547efeadd42a3c75e41e33c0637cba100fc18289 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:19 +0200 Subject: x86/mtrr: Remove get_online_cpus() from mtrr_save_state() mtrr_save_state() is invoked from native_cpu_up() which is in the context of a CPU hotplug operation and therefor calling get_online_cpus() is pointless. While this works in the current get_online_cpus() implementation it prevents from converting the hotplug locking to percpu rwsems. Remove it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Tested-by: Paul E. McKenney Acked-by: Ingo Molnar Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20170524081547.651378834@linutronix.de --- arch/x86/kernel/cpu/mtrr/main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 2bce84d91c2b..c5bb63be4ba1 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -807,10 +807,8 @@ void mtrr_save_state(void) if (!mtrr_enabled()) return; - get_online_cpus(); first_cpu = cpumask_first(cpu_online_mask); smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); - put_online_cpus(); } void set_mtrr_aps_delayed_init(void) -- cgit v1.2.3 From 419af25fa4d0974fd758a668c08c369c19392a47 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:21 +0200 Subject: KVM/PPC/Book3S HV: Use cpuhp_setup_state_nocalls_cpuslocked() kvmppc_alloc_host_rm_ops() holds get_online_cpus() while invoking cpuhp_setup_state_nocalls(). cpuhp_setup_state_nocalls() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use cpuhp_setup_state_nocalls_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: kvm@vger.kernel.org Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Steven Rostedt Cc: kvm-ppc@vger.kernel.org Cc: Michael Ellerman Cc: linuxppc-dev@lists.ozlabs.org Cc: Alexander Graf Link: http://lkml.kernel.org/r/20170524081547.809616236@linutronix.de --- arch/powerpc/kvm/book3s_hv.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 42b7a4fd57d9..48a6bd160011 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3317,7 +3317,7 @@ void kvmppc_alloc_host_rm_ops(void) return; } - get_online_cpus(); + cpus_read_lock(); for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { if (!cpu_online(cpu)) @@ -3339,17 +3339,17 @@ void kvmppc_alloc_host_rm_ops(void) l_ops = (unsigned long) ops; if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { - put_online_cpus(); + cpus_read_unlock(); kfree(ops->rm_core); kfree(ops); return; } - cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE, - "ppc/kvm_book3s:prepare", - kvmppc_set_host_core, - kvmppc_clear_host_core); - put_online_cpus(); + cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE, + "ppc/kvm_book3s:prepare", + kvmppc_set_host_core, + kvmppc_clear_host_core); + cpus_read_unlock(); } void kvmppc_free_host_rm_ops(void) -- cgit v1.2.3 From 04b247c2ebdd6ba1c46c7c22546229a89760b43a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:24 +0200 Subject: perf/x86/intel/cqm: Use cpuhp_setup_state_cpuslocked() intel_cqm_init() holds get_online_cpus() while registerring the hotplug callbacks. cpuhp_setup_state() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use cpuhp_setup_state_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20170524081548.075604046@linutronix.de --- arch/x86/events/intel/cqm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 8c00dc09a5d2..2521f771f2f5 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c @@ -1682,7 +1682,7 @@ static int __init intel_cqm_init(void) * * Also, check that the scales match on all cpus. */ - get_online_cpus(); + cpus_read_lock(); for_each_online_cpu(cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -1746,14 +1746,14 @@ static int __init intel_cqm_init(void) * Setup the hot cpu notifier once we are sure cqm * is enabled to avoid notifier leak. */ - cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING, - "perf/x86/cqm:starting", - intel_cqm_cpu_starting, NULL); - cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online", - NULL, intel_cqm_cpu_exit); - + cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_STARTING, + "perf/x86/cqm:starting", + intel_cqm_cpu_starting, NULL); + cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_ONLINE, + "perf/x86/cqm:online", + NULL, intel_cqm_cpu_exit); out: - put_online_cpus(); + cpus_read_unlock(); if (ret) { kfree(str); -- cgit v1.2.3 From fe2a5cd8aa038e2b02fda983afc2083e94c04b4f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:25 +0200 Subject: ARM/hw_breakpoint: Use cpuhp_setup_state_cpuslocked() arch_hw_breakpoint_init() holds get_online_cpus() while registerring the hotplug callbacks. cpuhp_setup_state() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use cpuhp_setup_state_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Mark Rutland Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Will Deacon Cc: Steven Rostedt Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170524081548.170940729@linutronix.de --- arch/arm/kernel/hw_breakpoint.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index be3b3fbd382f..63cb4c7c6593 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void) * driven low on this core and there isn't an architected way to * determine that. */ - get_online_cpus(); + cpus_read_lock(); register_undef_hook(&debug_reg_hook); /* @@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void) * assume that a halting debugger will leave the world in a nice state * for us. */ - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online", - dbg_reset_online, NULL); + ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/hw_breakpoint:online", + dbg_reset_online, NULL); unregister_undef_hook(&debug_reg_hook); if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) { core_num_brps = 0; core_num_wrps = 0; if (ret > 0) cpuhp_remove_state_nocalls(ret); - put_online_cpus(); + cpus_read_unlock(); return 0; } @@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void) TRAP_HWBKPT, "watchpoint debug exception"); hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "breakpoint debug exception"); - put_online_cpus(); + cpus_read_unlock(); /* Register PM notifiers. */ pm_init(); -- cgit v1.2.3 From 2337e879e8805a630b418f3e73a98084d4724b83 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:26 +0200 Subject: s390/kernel: Use stop_machine_cpuslocked() stp_work_fn() holds get_online_cpus() while invoking stop_machine(). stop_machine() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use stop_machine_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Heiko Carstens Cc: Paul E. McKenney Cc: linux-s390@vger.kernel.org Cc: Peter Zijlstra Cc: Steven Rostedt Cc: David Hildenbrand Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20170524081548.250203087@linutronix.de --- arch/s390/kernel/time.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index c3a52f9a69a0..192efdfac918 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -636,10 +636,10 @@ static void stp_work_fn(struct work_struct *work) goto out_unlock; memset(&stp_sync, 0, sizeof(stp_sync)); - get_online_cpus(); + cpus_read_lock(); atomic_set(&stp_sync.cpus, num_online_cpus() - 1); - stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask); - put_online_cpus(); + stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask); + cpus_read_unlock(); if (!check_sync_clock()) /* -- cgit v1.2.3 From f9a69931c3959940538884d5962b770c3db75df5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:27 +0200 Subject: powerpc/powernv: Use stop_machine_cpuslocked() set_subcores_per_core() holds get_online_cpus() while invoking stop_machine(). stop_machine() invokes get_online_cpus() as well. This is correct, but prevents the conversion of the hotplug locking to a percpu rwsem. Use stop_machine_cpuslocked() to avoid the nested call. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Steven Rostedt Cc: Michael Ellerman Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/20170524081548.331016542@linutronix.de --- arch/powerpc/platforms/powernv/subcore.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index 0babef11136f..e6230f104dd9 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c @@ -348,7 +348,7 @@ static int set_subcores_per_core(int new_mode) state->master = 0; } - get_online_cpus(); + cpus_read_lock(); /* This cpu will update the globals before exiting stop machine */ this_cpu_ptr(&split_state)->master = 1; @@ -356,9 +356,10 @@ static int set_subcores_per_core(int new_mode) /* Ensure state is consistent before we call the other cpus */ mb(); - stop_machine(cpu_update_split_mode, &new_mode, cpu_online_mask); + stop_machine_cpuslocked(cpu_update_split_mode, &new_mode, + cpu_online_mask); - put_online_cpus(); + cpus_read_unlock(); return 0; } -- cgit v1.2.3 From 27d3b157fee0bad264eb745d5c547e2e0676f1a2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:29 +0200 Subject: x86/perf: Drop EXPORT of perf_check_microcode The only caller is the microcode update, which cannot be modular. Drop the export. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Borislav Petkov Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20170524081548.515204988@linutronix.de --- arch/x86/events/core.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 580b60f5ac83..ac650d57ebf7 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2224,7 +2224,6 @@ void perf_check_microcode(void) if (x86_pmu.check_microcode) x86_pmu.check_microcode(); } -EXPORT_SYMBOL_GPL(perf_check_microcode); static struct pmu pmu = { .pmu_enable = x86_pmu_enable, -- cgit v1.2.3 From 1ba143a5216fb148211160a0ecc1f8d3f92f06bb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 May 2017 10:15:30 +0200 Subject: perf/x86/intel: Drop get_online_cpus() in intel_snb_check_microcode() If intel_snb_check_microcode() is invoked via microcode_init -> perf_check_microcode -> intel_snb_check_microcode then get_online_cpus() is invoked nested. This works with the current implementation of get_online_cpus() but prevents converting it to a percpu rwsem. intel_snb_check_microcode() is also invoked from intel_sandybridge_quirk() unprotected. Drop get_online_cpus() from intel_snb_check_microcode() and add it to intel_sandybridge_quirk() so both call sites are protected. Convert *_online_cpus() to the new interfaces while at it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Borislav Petkov Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20170524081548.594862191@linutronix.de --- arch/x86/events/intel/core.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6d91d4e37a1..b9174aacf42f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3410,12 +3410,10 @@ static void intel_snb_check_microcode(void) int pebs_broken = 0; int cpu; - get_online_cpus(); for_each_online_cpu(cpu) { if ((pebs_broken = intel_snb_pebs_broken(cpu))) break; } - put_online_cpus(); if (pebs_broken == x86_pmu.pebs_broken) return; @@ -3488,7 +3486,9 @@ static bool check_msr(unsigned long msr, u64 mask) static __init void intel_sandybridge_quirk(void) { x86_pmu.check_microcode = intel_snb_check_microcode; + cpus_read_lock(); intel_snb_check_microcode(); + cpus_read_unlock(); } static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { @@ -4112,13 +4112,12 @@ static __init int fixup_ht_bug(void) lockup_detector_resume(); - get_online_cpus(); + cpus_read_lock(); - for_each_online_cpu(c) { + for_each_online_cpu(c) free_excl_cntrs(c); - } - put_online_cpus(); + cpus_read_unlock(); pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); return 0; } -- cgit v1.2.3 From f2545b2d4ce13e068897ef60ae64dffe215f4152 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:35 +0200 Subject: jump_label: Reorder hotplug lock and jump_label_lock The conversion of the hotplug locking to a percpu rwsem unearthed lock ordering issues all over the place. The jump_label code has two issues: 1) Nested get_online_cpus() invocations 2) Ordering problems vs. the cpus rwsem and the jump_label_mutex To cure these, the following lock order has been established; cpus_rwsem -> jump_label_lock -> text_mutex Even if not all architectures need protection against CPU hotplug, taking cpus_rwsem before jump_label_lock is now mandatory in code pathes which actually modify code and therefor need text_mutex protection. Move the get_online_cpus() invocations into the core jump label code and establish the proper lock order where required. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: "David S. Miller" Cc: Paul E. McKenney Cc: Chris Metcalf Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Jason Baron Cc: Ralf Baechle Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de --- arch/mips/kernel/jump_label.c | 2 -- arch/sparc/kernel/jump_label.c | 2 -- arch/tile/kernel/jump_label.c | 2 -- arch/x86/kernel/jump_label.c | 2 -- 4 files changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 3e586daa3a32..32e3168316cd 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e, insn.word = 0; /* nop */ } - get_online_cpus(); mutex_lock(&text_mutex); if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { insn_p->halfword[0] = insn.word >> 16; @@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e, (unsigned long)insn_p + sizeof(*insn_p)); mutex_unlock(&text_mutex); - put_online_cpus(); } #endif /* HAVE_JUMP_LABEL */ diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 07933b9e9ce0..93adde1ac166 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry, val = 0x01000000; } - get_online_cpus(); mutex_lock(&text_mutex); *insn = val; flushi(insn); mutex_unlock(&text_mutex); - put_online_cpus(); } #endif diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c index 07802d586988..93931a46625b 100644 --- a/arch/tile/kernel/jump_label.c +++ b/arch/tile/kernel/jump_label.c @@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e, void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { - get_online_cpus(); mutex_lock(&text_mutex); __jump_label_transform(e, type); flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits)); mutex_unlock(&text_mutex); - put_online_cpus(); } __init_or_module void arch_jump_label_transform_static(struct jump_entry *e, diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index c37bd0f39c70..ab4f491da2a9 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry, void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - get_online_cpus(); mutex_lock(&text_mutex); __jump_label_transform(entry, type, NULL, 0); mutex_unlock(&text_mutex); - put_online_cpus(); } static enum { -- cgit v1.2.3 From c23a465625e287c4deba0fdf5e8adc59cfd2a0b7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:37 +0200 Subject: arm64: Prevent cpu hotplug rwsem recursion The text patching functions which are invoked from jump_label and kprobes code are protected against cpu hotplug at the call sites. Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any unprotected callers. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: Mark Rutland Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Sebastian Siewior Cc: Will Deacon Cc: Steven Rostedt Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170524081549.197070135@linutronix.de --- arch/arm64/include/asm/insn.h | 1 - arch/arm64/kernel/insn.c | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 29cb2ca756f6..4214c38d016b 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -433,7 +433,6 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); int aarch64_insn_patch_text_nosync(void *addr, u32 insn); -int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); s32 aarch64_insn_adrp_get_offset(u32 insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index b884a926a632..cd872133e88e 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -255,6 +255,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) return ret; } +static int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) { struct aarch64_insn_patch patch = { @@ -267,8 +268,8 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) if (cnt <= 0) return -EINVAL; - return stop_machine(aarch64_insn_patch_text_cb, &patch, - cpu_online_mask); + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); } int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) -- cgit v1.2.3 From 9489cc8f370be811f7e741a772bcce88b712272d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:38 +0200 Subject: arm: Prevent hotplug rwsem recursion The text patching functions which are invoked from jump_label and kprobes code are protected against cpu hotplug at the call sites. Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any unprotected callers. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170524081549.275871311@linutronix.de --- arch/arm/kernel/patch.c | 2 +- arch/arm/probes/kprobes/core.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 020560b2dcb7..a1a34722c655 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn) .insn = insn, }; - stop_machine(patch_text_stop_machine, &patch, NULL); + stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); } diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index ad1f4e6a9e33..52d1cd14fda4 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -182,7 +182,8 @@ void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn) .addr = addr, .insn = insn, }; - stop_machine(__kprobes_remove_breakpoint, &p, cpu_online_mask); + stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p, + cpu_online_mask); } void __kprobes arch_disarm_kprobe(struct kprobe *p) -- cgit v1.2.3 From 5d5dbc4ef27e72104dea6102e4d1a1bf5a8ed971 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 May 2017 10:15:39 +0200 Subject: s390: Prevent hotplug rwsem recursion The text patching functions which are invoked from jump_label and kprobes code are protected against cpu hotplug at the call sites. Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any unprotected callers. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Heiko Carstens Cc: Paul E. McKenney Cc: linux-s390@vger.kernel.org Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Steven Rostedt Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20170524081549.354513406@linutronix.de --- arch/s390/kernel/jump_label.c | 2 +- arch/s390/kernel/kprobes.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 6aa630a8d24f..262506cee4c3 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -93,7 +93,7 @@ void arch_jump_label_transform(struct jump_entry *entry, args.entry = entry; args.type = type; - stop_machine(__sm_arch_jump_label_transform, &args, NULL); + stop_machine_cpuslocked(__sm_arch_jump_label_transform, &args, NULL); } void arch_jump_label_transform_static(struct jump_entry *entry, diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 3d6a99746454..6842e4501e2e 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -196,7 +196,7 @@ void arch_arm_kprobe(struct kprobe *p) { struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; - stop_machine(swap_instruction, &args, NULL); + stop_machine_cpuslocked(swap_instruction, &args, NULL); } NOKPROBE_SYMBOL(arch_arm_kprobe); @@ -204,7 +204,7 @@ void arch_disarm_kprobe(struct kprobe *p) { struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; - stop_machine(swap_instruction, &args, NULL); + stop_machine_cpuslocked(swap_instruction, &args, NULL); } NOKPROBE_SYMBOL(arch_disarm_kprobe); -- cgit v1.2.3 From 1b3b22507e0d45dedc6a54b26d56e0b8c4d36875 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 16 Jun 2017 01:22:38 -0700 Subject: ARM/hw_breakpoint: Fix possible recursive locking for arch_hw_breakpoint_init Recent change to use cpuhp_setup_state_cpuslocked() with commit fe2a5cd8aa03 ("ARM/hw_breakpoint: Use cpuhp_setup_state_cpuslocked()") missed to change the related paired cpuhp_remove_state_nocalls_cpuslocked(). Now if arch_hw_breakpoint_init() fails, we get "WARNING: possible recursive locking detected" on the exit path. Fixes: fe2a5cd8aa03 ("ARM/hw_breakpoint: Use cpuhp_setup_state_cpuslocked()") Signed-off-by: Tony Lindgren Acked-by: Sebastian Andrzej Siewior Cc: Mark Rutland Cc: linux-omap@vger.kernel.org Cc: Peter Zijlstra Cc: Will Deacon Cc: Steven Rostedt Cc: Russell King Cc: "Paul E . McKenney" Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170616082238.15553-1-tony@atomide.com Signed-off-by: Thomas Gleixner --- arch/arm/kernel/hw_breakpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 63cb4c7c6593..af2a7f1e3103 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1106,7 +1106,7 @@ static int __init arch_hw_breakpoint_init(void) core_num_brps = 0; core_num_wrps = 0; if (ret > 0) - cpuhp_remove_state_nocalls(ret); + cpuhp_remove_state_nocalls_cpuslocked(ret); cpus_read_unlock(); return 0; } -- cgit v1.2.3 From 3e401f7a2e5199151f735aee6a5c6b4776e6a35e Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 20 Jun 2017 19:08:30 -0300 Subject: powerpc: Only obtain cpu_hotplug_lock if called by rtasd Calling arch_update_cpu_topology from a CPU hotplug state machine callback hits a deadlock because the function tries to get a read lock on cpu_hotplug_lock while the state machine still holds a write lock on it. Since all callers of arch_update_cpu_topology except rtasd already hold cpu_hotplug_lock, this patch changes the function to use stop_machine_cpuslocked and creates a separate function for rtasd which still tries to obtain the lock. Michael Bringmann investigated the bug and provided a detailed analysis of the deadlock on this previous RFC for an alternate solution: Signed-off-by: Thiago Jung Bauermann Signed-off-by: Thomas Gleixner Acked-by: Michael Ellerman Cc: John Allen Cc: Michael Bringmann Cc: Nathan Fontenot Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1497996510-4032-1-git-send-email-bauerman@linux.vnet.ibm.com Link: https://patchwork.ozlabs.org/patch/771293/ --- arch/powerpc/include/asm/topology.h | 6 ++++++ arch/powerpc/kernel/rtasd.c | 2 +- arch/powerpc/mm/numa.c | 22 +++++++++++++++++++--- 3 files changed, 26 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 8b3b46b7b0f2..a2d36b7703ae 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -43,6 +43,7 @@ extern void __init dump_numa_cpu_topology(void); extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); +extern int numa_update_cpu_topology(bool cpus_locked); #else @@ -57,6 +58,11 @@ static inline void sysfs_remove_device_from_node(struct device *dev, int nid) { } + +static inline int numa_update_cpu_topology(bool cpus_locked) +{ + return 0; +} #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 3650732639ed..0f0b1b2f3b60 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -283,7 +283,7 @@ static void prrn_work_fn(struct work_struct *work) * the RTAS event. */ pseries_devicetree_update(-prrn_update_scope); - arch_update_cpu_topology(); + numa_update_cpu_topology(false); } static DECLARE_WORK(prrn_work, prrn_work_fn); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 371792e4418f..b95c584ce19d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1311,8 +1311,10 @@ static int update_lookup_table(void *data) /* * Update the node maps and sysfs entries for each cpu whose home node * has changed. Returns 1 when the topology has changed, and 0 otherwise. + * + * cpus_locked says whether we already hold cpu_hotplug_lock. */ -int arch_update_cpu_topology(void) +int numa_update_cpu_topology(bool cpus_locked) { unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; @@ -1400,15 +1402,23 @@ int arch_update_cpu_topology(void) if (!cpumask_weight(&updated_cpus)) goto out; - stop_machine(update_cpu_topology, &updates[0], &updated_cpus); + if (cpus_locked) + stop_machine_cpuslocked(update_cpu_topology, &updates[0], + &updated_cpus); + else + stop_machine(update_cpu_topology, &updates[0], &updated_cpus); /* * Update the numa-cpu lookup table with the new mappings, even for * offline CPUs. It is best to perform this update from the stop- * machine context. */ - stop_machine(update_lookup_table, &updates[0], + if (cpus_locked) + stop_machine_cpuslocked(update_lookup_table, &updates[0], cpumask_of(raw_smp_processor_id())); + else + stop_machine(update_lookup_table, &updates[0], + cpumask_of(raw_smp_processor_id())); for (ud = &updates[0]; ud; ud = ud->next) { unregister_cpu_under_node(ud->cpu, ud->old_nid); @@ -1426,6 +1436,12 @@ out: return changed; } +int arch_update_cpu_topology(void) +{ + lockdep_assert_cpus_held(); + return numa_update_cpu_topology(true); +} + static void topology_work_fn(struct work_struct *work) { rebuild_sched_domains(); -- cgit v1.2.3