summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c4
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/devtree.c4
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/etm.c6
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/io.c35
-rw-r--r--arch/arm/kernel/machine_kexec.c17
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/setup.c62
-rw-r--r--arch/arm/kernel/sigreturn_codes.S40
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/tcm.c4
-rw-r--r--arch/arm/kernel/topology.c12
-rw-r--r--arch/arm/kernel/traps.c18
21 files changed, 166 insertions, 80 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 1f031ddd0667..85e664b6a5f1 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -35,6 +35,8 @@ extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __do_div64(void);
+extern void __bswapsi2(void);
+extern void __bswapdi2(void);
extern void __aeabi_idiv(void);
extern void __aeabi_idivmod(void);
@@ -114,6 +116,8 @@ EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__do_div64);
+EXPORT_SYMBOL(__bswapsi2);
+EXPORT_SYMBOL(__bswapdi2);
#ifdef CONFIG_AEABI
EXPORT_SYMBOL(__aeabi_idiv);
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index c6ca7e376773..166e945de832 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -389,6 +389,8 @@
CALL(sys_process_vm_writev)
CALL(sys_kcmp)
CALL(sys_finit_module)
+/* 380 */ CALL(sys_sched_setattr)
+ CALL(sys_sched_getattr)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 739c3dfc1da2..f751714d52c1 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -33,7 +33,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- return alloc_bootmem_align(size, align);
+ return memblock_virt_alloc(size, align);
}
void __init arm_dt_memblock_reserve(void)
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+ return phys_id == cpu_logical_map(cpu);
}
static const void * __init arch_get_next_mach(const char *const **match)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index b3fb8c9e1ff2..1879e8dd2acc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -451,9 +451,11 @@ __und_usr_thumb:
.arch armv6t2
#endif
2: ldrht r5, [r4]
+ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16 @ 16bit undefined instruction
3: ldrht r0, [r2]
+ARM_BE8(rev16 r0, r0) @ little endian instruction
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 52b26432c9a9..2260f1855820 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -14,8 +14,6 @@
#include <asm/thread_notify.h>
#include <asm/v7m.h>
-#include <mach/entry-macro.S>
-
#include "entry-header.S"
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 8ff0ecdc637f..131a6ab5f355 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -385,7 +385,6 @@ out:
return ret;
out_unmap:
- amba_set_drvdata(dev, NULL);
iounmap(t->etb_regs);
out_release:
@@ -398,8 +397,6 @@ static int etb_remove(struct amba_device *dev)
{
struct tracectx *t = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
iounmap(t->etb_regs);
t->etb_regs = NULL;
@@ -588,7 +585,6 @@ out:
return ret;
out_unmap:
- amba_set_drvdata(dev, NULL);
iounmap(t->etm_regs);
out_release:
@@ -601,8 +597,6 @@ static int etm_remove(struct amba_device *dev)
{
struct tracectx *t = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
iounmap(t->etm_regs);
t->etm_regs = NULL;
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
#ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
- ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
set_region_nr r0, #MPU_RAM_REGION
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
- ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
+ ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 11d59b32fb8d..914616e0bdcd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -52,7 +52,8 @@
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+ add \rd, \phys, #TEXT_OFFSET
+ sub \rd, \rd, #PG_DIR_SIZE
.endm
/*
@@ -110,7 +111,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index dcd5b4d86143..9203cf883330 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -1,6 +1,41 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(__io_lock);
+
+/*
+ * Generic atomic MMIO modify.
+ *
+ * Allows thread-safe access to registers shared by unrelated subsystems.
+ * The access is protected by a single MMIO-wide lock.
+ */
+void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&__io_lock, flags);
+ value = readl_relaxed(reg) & ~mask;
+ value |= (set & mask);
+ writel_relaxed(value, reg);
+ raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify_relaxed);
+
+void atomic_io_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&__io_lock, flags);
+ value = readl_relaxed(reg) & ~mask;
+ value |= (set & mask);
+ writel(value, reg);
+ raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify);
/*
* Copy data from IO memory space to "real" memory space.
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 57221e349a7c..f0d180d8b29f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,11 +14,12 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
#include <asm/system_misc.h>
-extern const unsigned char relocate_new_kernel[];
+extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image)
{
unsigned long page_list;
unsigned long reboot_code_buffer_phys;
+ unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
+ unsigned long reboot_entry_phys;
void *reboot_code_buffer;
/*
@@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image)
/* copy our kernel relocation code to the control code page */
- memcpy(reboot_code_buffer,
- relocate_new_kernel, relocate_new_kernel_size);
+ reboot_entry = fncpy(reboot_code_buffer,
+ reboot_entry,
+ relocate_new_kernel_size);
+ reboot_entry_phys = (unsigned long)reboot_entry +
+ (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
-
- flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (kexec_reinit)
kexec_reinit();
- soft_restart(reboot_code_buffer_phys);
+ soft_restart(reboot_entry_phys);
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index bc3f2efa0d86..789d846a9184 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period;
int ret = 0;
- /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
- if (unlikely(period != hwc->last_period))
- left = period - (hwc->last_period - left);
-
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index d85055cd24ba..20d553c9f5e2 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
+ const int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf4864d..95858966d84e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -2,10 +2,12 @@
* relocate_kernel.S - put the kernel image in place to boot
*/
+#include <linux/linkage.h>
#include <asm/kexec.h>
- .globl relocate_new_kernel
-relocate_new_kernel:
+ .align 3 /* not needed for this code, but keeps fncpy() happy */
+
+ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
kexec_boot_atags:
.long 0x0
+ENDPROC(relocate_new_kernel)
+
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..b0df9761de6d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -334,7 +334,7 @@ static void __init cacheid_init(void)
cacheid = CACHEID_VIVT;
}
- printk("CPU: %s data cache, %s instruction cache\n",
+ pr_info("CPU: %s data cache, %s instruction cache\n",
cache_is_vivt() ? "VIVT" :
cache_is_vipt_aliasing() ? "VIPT aliasing" :
cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
@@ -416,7 +416,7 @@ void notrace cpu_init(void)
struct stack *stk = &stacks[cpu];
if (cpu >= NR_CPUS) {
- printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
+ pr_crit("CPU%u: bad primary CPU number\n", cpu);
BUG();
}
@@ -484,7 +484,7 @@ void __init smp_setup_processor_id(void)
*/
set_my_cpu_offset(0);
- printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
+ pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
}
struct mpidr_hash mpidr_hash;
@@ -564,8 +564,8 @@ static void __init setup_processor(void)
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
- printk("CPU configuration botched (ID %08x), unable "
- "to continue.\n", read_cpuid_id());
+ pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
+ read_cpuid_id());
while (1);
}
@@ -585,9 +585,9 @@ static void __init setup_processor(void)
cpu_cache = *list->cache;
#endif
- printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
- proc_arch[cpu_architecture()], cr_alignment);
+ pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+ cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ proc_arch[cpu_architecture()], cr_alignment);
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
list->arch_name, ENDIANNESS);
@@ -629,8 +629,8 @@ int __init arm_add_memory(u64 start, u64 size)
u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
- printk(KERN_CRIT "NR_BANKS too low, "
- "ignoring memory at 0x%08llx\n", (long long)start);
+ pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
+ (long long)start);
return -EINVAL;
}
@@ -643,14 +643,14 @@ int __init arm_add_memory(u64 start, u64 size)
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
- printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
- "32-bit physical address space\n", (long long)start);
+ pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
+ (long long)start);
return -EINVAL;
}
if (aligned_start + size > ULONG_MAX) {
- printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
- "32-bit physical address space\n", (long long)start);
+ pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
+ (long long)start);
/*
* To ensure bank->start + bank->size is representable in
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
@@ -660,6 +660,20 @@ int __init arm_add_memory(u64 start, u64 size)
}
#endif
+ if (aligned_start < PHYS_OFFSET) {
+ if (aligned_start + size <= PHYS_OFFSET) {
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, aligned_start + size);
+ return -EINVAL;
+ }
+
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, (u64)PHYS_OFFSET);
+
+ size -= PHYS_OFFSET - aligned_start;
+ aligned_start = PHYS_OFFSET;
+ }
+
bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
@@ -717,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_virt_alloc_low(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
@@ -817,18 +831,17 @@ static void __init reserve_crashkernel(void)
if (ret)
return;
- ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
+ ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
- printk(KERN_WARNING "crashkernel reservation failed - "
- "memory is in use (0x%lx)\n", (unsigned long)crash_base);
+ pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
+ (unsigned long)crash_base);
return;
}
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(total_mem >> 20));
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
@@ -873,8 +886,6 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- setup_dma_zone(mdesc);
-
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
@@ -892,6 +903,7 @@ void __init setup_arch(char **cmdline_p)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
+ setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index 3c5d0f2170fd..b84d0cb13682 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -30,6 +30,27 @@
* snippets.
*/
+/*
+ * In CPU_THUMBONLY case kernel arm opcodes are not allowed.
+ * Note in this case codes skips those instructions but it uses .org
+ * directive to keep correct layout of sigreturn_codes array.
+ */
+#ifndef CONFIG_CPU_THUMBONLY
+#define ARM_OK(code...) code
+#else
+#define ARM_OK(code...)
+#endif
+
+ .macro arm_slot n
+ .org sigreturn_codes + 12 * (\n)
+ARM_OK( .arm )
+ .endm
+
+ .macro thumb_slot n
+ .org sigreturn_codes + 12 * (\n) + 8
+ .thumb
+ .endm
+
#if __LINUX_ARM_ARCH__ <= 4
/*
* Note we manually set minimally required arch that supports
@@ -45,26 +66,27 @@
.global sigreturn_codes
.type sigreturn_codes, #object
- .arm
+ .align
sigreturn_codes:
/* ARM sigreturn syscall code snippet */
- mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
- swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ arm_slot 0
+ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
+ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn syscall code snippet */
- .thumb
+ thumb_slot 0
movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn_rt syscall code snippet */
- .arm
- mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
- swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ arm_slot 1
+ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
+ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn_rt syscall code snippet */
- .thumb
+ thumb_slot 1
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
swi #0
@@ -74,7 +96,7 @@ sigreturn_codes:
* it is thumb case or not, so we need additional
* word after real last entry.
*/
- .arm
+ arm_slot 2
.space 4
.size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index dc894ab3622b..b7b4c86e338b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -105,8 +105,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
secondary_data.pgdir = get_arch_pgd(idmap_pgd);
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif
- __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
- outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
+ sync_cache_w(&secondary_data);
/*
* Now bring the CPU into our world.
@@ -294,6 +293,9 @@ void __ref cpu_die(void)
if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
+ pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
+ cpu);
+
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation. There's some initialisation which needs
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index f50f19e5c138..7a3be1d4d0b1 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -52,7 +52,7 @@ static struct map_desc dtcm_iomap[] __initdata = {
.virtual = DTCM_OFFSET,
.pfn = __phys_to_pfn(DTCM_OFFSET),
.length = 0,
- .type = MT_MEMORY_DTCM
+ .type = MT_MEMORY_RW_DTCM
}
};
@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
.virtual = ITCM_OFFSET,
.pfn = __phys_to_pfn(ITCM_OFFSET),
.length = 0,
- .type = MT_MEMORY_ITCM
+ .type = MT_MEMORY_RWX_ITCM,
}
};
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 85a87370f144..0bc94b1fd1ae 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -68,16 +68,16 @@ struct cpu_efficiency {
* Processors that are not defined in the table,
* use the default SCHED_POWER_SCALE value for cpu_scale.
*/
-struct cpu_efficiency table_efficiency[] = {
+static const struct cpu_efficiency table_efficiency[] = {
{"arm,cortex-a15", 3891},
{"arm,cortex-a7", 2048},
{NULL, },
};
-unsigned long *__cpu_capacity;
+static unsigned long *__cpu_capacity;
#define cpu_capacity(cpu) __cpu_capacity[cpu]
-unsigned long middle_capacity = 1;
+static unsigned long middle_capacity = 1;
/*
* Iterate all CPUs' descriptor in DT and compute the efficiency
@@ -89,7 +89,7 @@ unsigned long middle_capacity = 1;
*/
static void __init parse_dt_topology(void)
{
- struct cpu_efficiency *cpu_eff;
+ const struct cpu_efficiency *cpu_eff;
struct device_node *cn = NULL;
unsigned long min_capacity = (unsigned long)(-1);
unsigned long max_capacity = 0;
@@ -158,7 +158,7 @@ static void __init parse_dt_topology(void)
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
* function returns directly for SMP system.
*/
-void update_cpu_power(unsigned int cpu)
+static void update_cpu_power(unsigned int cpu)
{
if (!cpu_capacity(cpu))
return;
@@ -185,7 +185,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
-void update_siblings_masks(unsigned int cpuid)
+static void update_siblings_masks(unsigned int cpuid)
{
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index dbf0923e8d76..172ee18ff124 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -36,7 +36,13 @@
#include <asm/system_misc.h>
#include <asm/opcodes.h>
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -56,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
@@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
instr2 = __mem_to_opcode_thumb16(instr2);
instr = __opcode_thumb32_compose(instr, instr2);
}
- } else if (get_user(instr, (u32 __user *)pc)) {
+ } else {
+ if (get_user(instr, (u32 __user *)pc))
+ goto die_sig;
instr = __mem_to_opcode_arm(instr);
- goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
@@ -509,9 +516,10 @@ static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
int ret;
- unsigned long chunk = PAGE_SIZE;
do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
if (signal_pending(current)) {
struct thread_info *ti = current_thread_info();