summaryrefslogtreecommitdiff
path: root/arch/xtensa/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/head.S181
-rw-r--r--arch/xtensa/kernel/irq.c207
-rw-r--r--arch/xtensa/kernel/mxhead.S85
-rw-r--r--arch/xtensa/kernel/setup.c131
-rw-r--r--arch/xtensa/kernel/smp.c592
-rw-r--r--arch/xtensa/kernel/time.c61
-rw-r--r--arch/xtensa/kernel/traps.c56
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S26
9 files changed, 1100 insertions, 240 deletions
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f90265ec1ccc..18d962a8c0c2 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o mxhead.o
AFLAGS_head.o += -mtext-section-literals
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 7d740ebbe198..aeeb3cc8a410 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -19,6 +19,7 @@
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -54,7 +55,7 @@ ENTRY(_start)
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, excsave1
- _j _SetupMMU
+ _j _SetupOCD
.align 4
.literal_position
@@ -62,6 +63,23 @@ ENTRY(_start)
.word _startup
.align 4
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
.global _SetupMMU
_SetupMMU:
Offset = _SetupMMU - _start
@@ -85,24 +103,11 @@ _SetupMMU:
ENDPROC(_start)
- __INIT
+ __REF
.literal_position
ENTRY(_startup)
- /* Disable interrupts and exceptions. */
-
- movi a0, LOCKLEVEL
- wsr a0, ps
-
- /* Start with a fresh windowbase and windowstart. */
-
- movi a1, 1
- movi a0, 0
- wsr a1, windowstart
- wsr a0, windowbase
- rsync
-
/* Set a0 to 0 for the remaining initialization. */
movi a0, 0
@@ -154,17 +159,6 @@ ENTRY(_startup)
wsr a0, cpenable
#endif
- /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
- *
- * Note: PS.EXCM must be cleared before using any loop
- * instructions; otherwise, they are silently disabled, and
- * at most one iteration of the loop is executed.
- */
-
- movi a1, LOCKLEVEL
- wsr a1, ps
- rsync
-
/* Initialize the caches.
* a2, a3 are just working registers (clobbered).
*/
@@ -182,6 +176,37 @@ ENTRY(_startup)
isync
+#ifdef CONFIG_HAVE_SMP
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 1
+ wer a3, a2
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+
+ movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+ # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
+ wsr a2, ps # (enable reg-windows; progmode stack)
+ rsync
+
+ /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
+
+ movi a2, debug_exception
+ wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+
+#ifdef CONFIG_SMP
+ /*
+ * Notice that we assume with SMP that cores have PRID
+ * supported by the cores.
+ */
+ rsr a2, prid
+ bnez a2, .Lboot_secondary
+
+#endif /* CONFIG_SMP */
+
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
@@ -234,24 +259,7 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3
isync
- /* Setup stack and enable window exceptions (keep irqs disabled) */
-
- movi a1, init_thread_union
- addi a1, a1, KERNEL_STACK_SIZE
-
- movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
- # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
- wsr a2, ps # (enable reg-windows; progmode stack)
- rsync
-
- /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
-
- movi a2, debug_exception
- wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
-
- /* Set up EXCSAVE[1] to point to the exc_table. */
-
- movi a6, exc_table
+ movi a6, 0
xsr a6, excsave1
/* init_arch kick-starts the linux kernel */
@@ -265,8 +273,93 @@ ENTRY(_startup)
should_never_return:
j should_never_return
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+ movi a2, cpu_start_ccount
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+
+ movi a6, 0
+ wsr a6, excsave1
+
+ movi a4, secondary_start_kernel
+ callx4 a4
+ j should_never_return
+
+#endif /* CONFIG_SMP */
+
ENDPROC(_startup)
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ ___flush_invalidate_dcache_all a2 a3
+#else
+ ___invalidate_dcache_all a2 a3
+#endif
+ memw
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 0
+ wer a3, a2
+ extw
+
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
+ s32i a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+#endif
+1:
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ j _startup
+
+ENDPROC(cpu_restart)
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+ .section ".data.init.refok"
+ .align 4
+ENTRY(start_info)
+ .long init_thread_union + KERNEL_STACK_SIZE
+
/*
* BSS section
*/
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 6f4f9749cff7..482868a2de6e 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -4,7 +4,7 @@
* Xtensa built-in interrupt controller and some generic functions copied
* from i386.
*
- * Copyright (C) 2002 - 2006 Tensilica, Inc.
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
@@ -18,36 +18,27 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <asm/mxregs.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
-static unsigned int cached_irq_mask;
-
atomic_t irq_err_count;
-static struct irq_domain *root_domain;
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
- int irq = irq_find_mapping(root_domain, hwirq);
+ int irq = irq_find_mapping(NULL, hwirq);
if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq);
}
- irq_enter();
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
@@ -62,95 +53,69 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
}
#endif
generic_handle_irq(irq);
-
- irq_exit();
- set_irq_regs(old_regs);
}
int arch_show_interrupts(struct seq_file *p, int prec)
{
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
-static void xtensa_irq_mask(struct irq_data *d)
-{
- cached_irq_mask &= ~(1 << d->hwirq);
- set_sr (cached_irq_mask, intenable);
-}
-
-static void xtensa_irq_unmask(struct irq_data *d)
-{
- cached_irq_mask |= 1 << d->hwirq;
- set_sr (cached_irq_mask, intenable);
-}
-
-static void xtensa_irq_enable(struct irq_data *d)
-{
- variant_irq_enable(d->hwirq);
- xtensa_irq_unmask(d);
-}
-
-static void xtensa_irq_disable(struct irq_data *d)
-{
- xtensa_irq_mask(d);
- variant_irq_disable(d->hwirq);
-}
-
-static void xtensa_irq_ack(struct irq_data *d)
-{
- set_sr(1 << d->hwirq, intclear);
-}
-
-static int xtensa_irq_retrigger(struct irq_data *d)
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
- set_sr(1 << d->hwirq, intset);
- return 1;
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ int_irq = xtensa_map_ext_irq(ext_irq);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = int_irq;
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
}
-static struct irq_chip xtensa_irq_chip = {
- .name = "xtensa",
- .irq_enable = xtensa_irq_enable,
- .irq_disable = xtensa_irq_disable,
- .irq_mask = xtensa_irq_mask,
- .irq_unmask = xtensa_irq_unmask,
- .irq_ack = xtensa_irq_ack,
- .irq_retrigger = xtensa_irq_retrigger,
-};
-
-static int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
+ struct irq_chip *irq_chip = d->host_data;
u32 mask = 1 << hw;
if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
- irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ irq_set_chip_and_handler_name(irq, irq_chip,
handle_simple_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
- irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ irq_set_chip_and_handler_name(irq, irq_chip,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
- irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ irq_set_chip_and_handler_name(irq, irq_chip,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
} else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
- irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
- handle_edge_irq, "edge");
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "timer");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */
-
- irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ irq_set_chip_and_handler_name(irq, irq_chip,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
return 0;
}
-static unsigned map_ext_irq(unsigned ext_irq)
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
{
unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
@@ -163,55 +128,77 @@ static unsigned map_ext_irq(unsigned ext_irq)
return XCHAL_NUM_INTERRUPTS;
}
-/*
- * Device Tree IRQ specifier translation function which works with one or
- * two cell bindings. First cell value maps directly to the hwirq number.
- * Second cell if present specifies whether hwirq number is external (1) or
- * internal (0).
- */
-int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type)
+unsigned xtensa_get_ext_irq_no(unsigned irq)
{
- if (WARN_ON(intsize < 1 || intsize > 2))
- return -EINVAL;
- if (intsize == 2 && intspec[1] == 1) {
- unsigned int_irq = map_ext_irq(intspec[0]);
- if (int_irq < XCHAL_NUM_INTERRUPTS)
- *out_hwirq = int_irq;
- else
- return -EINVAL;
- } else {
- *out_hwirq = intspec[0];
- }
- *out_type = IRQ_TYPE_NONE;
- return 0;
+ unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+ ((1u << irq) - 1);
+ return hweight32(mask);
}
-static const struct irq_domain_ops xtensa_irq_domain_ops = {
- .xlate = xtensa_irq_domain_xlate,
- .map = xtensa_irq_map,
-};
-
void __init init_IRQ(void)
{
- struct device_node *intc = NULL;
-
- cached_irq_mask = 0;
- set_sr(~0, intclear);
-
#ifdef CONFIG_OF
- /* The interrupt controller device node is mandatory */
- intc = of_find_compatible_node(NULL, NULL, "xtensa,pic");
- BUG_ON(!intc);
-
- root_domain = irq_domain_add_linear(intc, NR_IRQS,
- &xtensa_irq_domain_ops, NULL);
+ irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+ xtensa_mx_init_legacy(NULL);
#else
- root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
- &xtensa_irq_domain_ops, NULL);
+ xtensa_pic_init_legacy(NULL);
+#endif
#endif
- irq_set_default_host(root_domain);
+#ifdef CONFIG_SMP
+ ipi_init();
+#endif
variant_init_irq();
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (chip->irq_set_affinity)
+ chip->irq_set_affinity(data, cpumask_of(cpu), false);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i, cpu = smp_processor_id();
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ unsigned int newcpu;
+
+ if (irqd_is_per_cpu(data))
+ continue;
+
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ continue;
+
+ newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
+
+ cpumask_setall(data->affinity);
+ newcpu = cpumask_any_and(data->affinity,
+ cpu_online_mask);
+ }
+
+ route_irq(data, i, newcpu);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 000000000000..77a161a112c5
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,85 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+ .section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+ _j _SetupOCD
+
+ .begin no-absolute-literals
+ .literal_position
+
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+_SetupMMU:
+ Offset = _SetupMMU - _SecondaryResetVector
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ /*
+ * Start Secondary Processors with NULL pointer to boot params.
+ */
+ movi a2, 0 # a2 == NULL
+ movi a3, _startup
+ jx a3
+
+ .end no-absolute-literals
+
+
+ .section .SecondaryResetVector.remapped_text, "ax"
+ .global _RemappedSecondaryResetVector
+
+ .org 0 # Need to do org before literals
+
+_RemappedSecondaryResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+ . = _RemappedSecondaryResetVector + Offset
+
+_RemappedSetupMMU:
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 6e2b6638122d..7d12af1317f1 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -21,6 +21,8 @@
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
@@ -37,6 +39,7 @@
#endif
#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
@@ -45,6 +48,7 @@
#include <asm/setup.h>
#include <asm/param.h>
#include <asm/traps.h>
+#include <asm/smp.h>
#include <platform/hardware.h>
@@ -85,12 +89,6 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
sysmem_info_t __initdata sysmem;
-#ifdef CONFIG_MMU
-extern void init_mmu(void);
-#else
-static inline void init_mmu(void) { }
-#endif
-
extern int mem_reserve(unsigned long, unsigned long, int);
extern void bootmem_init(void);
extern void zones_init(void);
@@ -214,6 +212,42 @@ static int __init parse_bootparam(const bp_tag_t* tag)
#ifdef CONFIG_OF
bool __initdata dt_memory_scan = false;
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const __be32 *ranges;
+ unsigned long len;
+
+ if (depth > 1)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
+
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
+
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
+#endif
+
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
if (!dt_memory_scan)
@@ -234,6 +268,7 @@ void __init early_init_devtree(void *params)
dt_memory_scan = true;
early_init_dt_scan(params);
+ of_scan_flat_dt(xtensa_dt_io_area, NULL);
if (!command_line[0])
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -241,7 +276,7 @@ void __init early_init_devtree(void *params)
static int __init xtensa_device_probe(void)
{
- of_platform_populate(NULL, NULL, NULL, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
@@ -354,7 +389,8 @@ static inline int probed_compare_swap(int *v, int cmp, int set)
/* Handle probed exception */
-void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause)
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
{
if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
regs->pc += 3; /* skip the s32c1i instruction */
@@ -366,7 +402,7 @@ void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause)
/* Simple test of S32C1I (soc bringup assist) */
-void __init check_s32c1i(void)
+static int __init check_s32c1i(void)
{
int n, cause1, cause2;
void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
@@ -421,24 +457,21 @@ void __init check_s32c1i(void)
trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
}
#else /* XCHAL_HAVE_S32C1I */
/* This condition should not occur with a commercially deployed processor.
Display reminder for early engr test or demo chips / FPGA bitstreams */
-void __init check_s32c1i(void)
+static int __init check_s32c1i(void)
{
pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
}
#endif /* XCHAL_HAVE_S32C1I */
-#else /* CONFIG_S32C1I_SELFTEST */
-
-void __init check_s32c1i(void)
-{
-}
-
+early_initcall(check_s32c1i);
#endif /* CONFIG_S32C1I_SELFTEST */
@@ -447,8 +480,6 @@ void __init setup_arch(char **cmdline_p)
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
- check_s32c1i();
-
/* Reserve some memory regions */
#ifdef CONFIG_BLK_DEV_INITRD
@@ -505,6 +536,10 @@ void __init setup_arch(char **cmdline_p)
platform_setup(cmdline_p);
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
paging_init();
zones_init();
@@ -521,6 +556,22 @@ void __init setup_arch(char **cmdline_p)
#endif
}
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data, i);
+ cpu->hotpluggable = !!i;
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
void machine_restart(char * cmd)
{
platform_restart();
@@ -546,21 +597,27 @@ void machine_power_off(void)
static int
c_show(struct seq_file *f, void *slot)
{
+ char buf[NR_CPUS * 5];
+
+ cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
/* high-level stuff */
- seq_printf(f,"processor\t: 0\n"
- "vendor_id\t: Tensilica\n"
- "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
- "core ID\t\t: " XCHAL_CORE_ID "\n"
- "build ID\t: 0x%x\n"
- "byte order\t: %s\n"
- "cpu MHz\t\t: %lu.%02lu\n"
- "bogomips\t: %lu.%02lu\n",
- XCHAL_BUILD_UNIQUE_ID,
- XCHAL_HAVE_BE ? "big" : "little",
- ccount_freq/1000000,
- (ccount_freq/10000) % 100,
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
+ seq_printf(f, "CPU count\t: %u\n"
+ "CPU list\t: %s\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ num_online_cpus(),
+ buf,
+ XCHAL_BUILD_UNIQUE_ID,
+ XCHAL_HAVE_BE ? "big" : "little",
+ ccount_freq/1000000,
+ (ccount_freq/10000) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI
@@ -672,7 +729,7 @@ c_show(struct seq_file *f, void *slot)
static void *
c_start(struct seq_file *f, loff_t *pos)
{
- return (void *) ((*pos == 0) ? (void *)1 : NULL);
+ return (*pos == 0) ? (void *)1 : NULL;
}
static void *
@@ -688,10 +745,10 @@ c_stop(struct seq_file *f, void *v)
const struct seq_operations cpuinfo_op =
{
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: c_show
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
};
#endif /* CONFIG_PROC_FS */
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 000000000000..aa8bd8717927
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,592 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+# error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ 0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+static struct irqaction ipi_irqaction = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "ipi",
+};
+
+void ipi_init(void)
+{
+ unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+ setup_irq(irq, &ipi_irqaction);
+}
+
+static inline unsigned int get_core_count(void)
+{
+ /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+ unsigned int syscfgid = get_er(SYSCFGID);
+ return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+ /* Bits 0...18 of SYSCFGID contain the core id */
+ unsigned int core_id = get_er(SYSCFGID);
+ return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned i;
+
+ for (i = 0; i < max_cpus; ++i)
+ set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+ unsigned i;
+ unsigned int ncpus = get_core_count();
+ unsigned int core_id = get_core_id();
+
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+ BUG_ON(cpu != 0);
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ init_mmu();
+
+#ifdef CONFIG_DEBUG_KERNEL
+ if (boot_secondary_processors == 0) {
+ pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+ for (;;)
+ __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+ }
+
+ pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+#endif
+ /* Init EXCSAVE1 */
+
+ secondary_trap_init();
+
+ /* All kernel threads share the same mm context. */
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ enter_lazy_tlb(mm, current);
+
+ preempt_disable();
+ trace_hardirqs_off();
+
+ calibrate_delay();
+
+ notify_cpu_starting(cpu);
+
+ secondary_init_irq();
+ local_timer_setup(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ complete(&cpu_running);
+
+ cpu_startup_entry(CPUHP_ONLINE);
+}
+
+static void mx_cpu_start(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask | (1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned long ccount;
+ int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpu_start_id = cpu;
+ system_flush_invalidate_dcache_range(
+ (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+#endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+ for (i = 0; i < 2; ++i) {
+ do
+ ccount = get_ccount();
+ while (!ccount);
+
+ cpu_start_ccount = ccount;
+
+ while (time_before(jiffies, timeout)) {
+ mb();
+ if (!cpu_start_ccount)
+ break;
+ }
+
+ if (cpu_start_ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+ (void *)cpu, 1);
+ cpu_start_ccount = 0;
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret = 0;
+
+ if (cpu_asid_cache(cpu) == 0)
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+ start_info.stack = (unsigned long)task_pt_regs(idle);
+ wmb();
+
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+ if (!cpu_online(cpu))
+ ret = -EIO;
+ }
+
+ if (ret)
+ pr_err("CPU %u failed to boot\n", cpu);
+
+ return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ invalidate_page_directory();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+ smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+ if (cpu_start_id == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+ }
+ pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ idle_task_exit();
+ local_irq_disable();
+ __asm__ __volatile__(
+ " movi a2, cpu_restart\n"
+ " jx a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE = 0,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_MAX
+};
+
+static const struct {
+ const char *short_text;
+ const char *long_text;
+} ipi_text[] = {
+ { .short_text = "RES", .long_text = "Rescheduling interrupts" },
+ { .short_text = "CAL", .long_text = "Function call interrupts" },
+ { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+ unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+ enum ipi_msg_type msg_id)
+{
+ int index;
+ unsigned long mask = 0;
+
+ for_each_cpu(index, callmask)
+ if (index != smp_processor_id())
+ mask |= 1 << index;
+
+ set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ set_cpu_online(cpu, false);
+ machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned int msg;
+ unsigned i;
+
+ msg = get_er(MIPICAUSE(cpu));
+ for (i = 0; i < IPI_MAX; i++)
+ if (msg & (1 << i)) {
+ set_er(1 << i, MIPICAUSE(cpu));
+ ++ipi->ipi_count[i];
+ }
+
+ if (msg & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+ if (msg & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+ if (msg & (1 << IPI_CPU_STOP))
+ ipi_cpu_stop(cpu);
+
+ return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+ unsigned i;
+
+ for (i = 0; i < IPI_MAX; ++i) {
+ seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu",
+ per_cpu(ipi_data, cpu).ipi_count[i]);
+ seq_printf(p, " %s\n", ipi_text[i].long_text);
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ pr_debug("setup_profiling_timer %d\n", multiplier);
+ return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+ local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = addr,
+ };
+ on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+ local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = address,
+ .addr2 = pfn,
+ };
+ on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 9af3dd88ad7e..08b769d3b3a1 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -36,7 +36,7 @@ static cycle_t ccount_read(struct clocksource *cs)
return (cycle_t)get_ccount();
}
-static u32 notrace ccount_sched_clock_read(void)
+static u64 notrace ccount_sched_clock_read(void)
{
return get_ccount();
}
@@ -46,24 +46,19 @@ static struct clocksource ccount_clocksource = {
.rating = 200,
.read = ccount_read,
.mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev);
static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt);
-static struct ccount_timer_t {
+struct ccount_timer {
struct clock_event_device evt;
int irq_enabled;
-} ccount_timer = {
- .evt = {
- .name = "ccount_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 300,
- .set_next_event = ccount_timer_set_next_event,
- .set_mode = ccount_timer_set_mode,
- },
+ char name[24];
};
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
@@ -84,8 +79,8 @@ static int ccount_timer_set_next_event(unsigned long delta,
static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- struct ccount_timer_t *timer =
- container_of(evt, struct ccount_timer_t, evt);
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
/*
* There is no way to disable the timer interrupt at the device level,
@@ -117,9 +112,28 @@ static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_TIMER,
.name = "timer",
- .dev_id = &ccount_timer,
};
+void local_timer_setup(unsigned cpu)
+{
+ struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+ struct clock_event_device *clockevent = &timer->evt;
+
+ timer->irq_enabled = 1;
+ clockevent->name = timer->name;
+ snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+ clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->rating = 300;
+ clockevent->set_next_event = ccount_timer_set_next_event;
+ clockevent->set_mode = ccount_timer_set_mode;
+ clockevent->cpumask = cpumask_of(cpu);
+ clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ if (WARN(!clockevent->irq, "error: can't map timer irq"))
+ return;
+ clockevents_config_and_register(clockevent, ccount_freq,
+ 0xf, 0xffffffff);
+}
+
void __init time_init(void)
{
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
@@ -131,28 +145,21 @@ void __init time_init(void)
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
clocksource_register_hz(&ccount_clocksource, ccount_freq);
-
- ccount_timer.evt.cpumask = cpumask_of(0);
- ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
- if (WARN(!ccount_timer.evt.irq, "error: can't map timer irq"))
- return;
- clockevents_config_and_register(&ccount_timer.evt, ccount_freq, 0xf,
- 0xffffffff);
- setup_irq(ccount_timer.evt.irq, &timer_irqaction);
- ccount_timer.irq_enabled = 1;
-
- setup_sched_clock(ccount_sched_clock_read, 32, ccount_freq);
+ local_timer_setup(0);
+ setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+ sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+ clocksource_of_init();
}
/*
* The timer interrupt is called HZ times per second.
*/
-irqreturn_t timer_interrupt (int irq, void *dev_id)
+irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- struct ccount_timer_t *timer = dev_id;
- struct clock_event_device *evt = &timer->evt;
+ struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
+ set_linux_timer(get_linux_timer());
evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 3e8a05c874cd..eebbfd8c26fc 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -157,7 +157,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers.
*/
-unsigned long exc_table[EXC_TABLE_SIZE/4];
+DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
void die(const char*, struct pt_regs*, long);
@@ -212,6 +212,9 @@ void do_interrupt(struct pt_regs *regs)
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
for (;;) {
unsigned intread = get_sr(interrupt);
@@ -227,21 +230,13 @@ void do_interrupt(struct pt_regs *regs)
}
if (level == 0)
- return;
-
- /*
- * Clear the interrupt before processing, in case it's
- * edge-triggered or software-generated
- */
- while (int_at_level) {
- unsigned i = __ffs(int_at_level);
- unsigned mask = 1 << i;
-
- int_at_level ^= mask;
- set_sr(mask, intclear);
- do_IRQ(i, regs);
- }
+ break;
+
+ do_IRQ(__ffs(int_at_level), regs);
}
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
@@ -318,17 +313,31 @@ do_debug(struct pt_regs *regs)
}
+static void set_handler(int idx, void *handler)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
+}
+
/* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler)
{
- unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause];
- void *previous = (void *)*entry;
- *entry = (unsigned long)handler;
+ void *previous = (void *)per_cpu(exc_table, 0)[
+ EXC_TABLE_DEFAULT / 4 + cause];
+ set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
return previous;
}
+static void trap_init_excsave(void)
+{
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+}
+
/*
* Initialize dispatch tables.
*
@@ -342,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler)
* See vectors.S for more details.
*/
-#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
-
void __init trap_init(void)
{
int i;
@@ -373,10 +380,15 @@ void __init trap_init(void)
}
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
+ trap_init_excsave();
+}
- i = (unsigned long)exc_table;
- __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i));
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+ trap_init_excsave();
}
+#endif
/*
* This function dumps the current valid window frame and other base registers.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 21acd11b5df2..ee32c0085dff 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -165,6 +165,13 @@ SECTIONS
.DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
+#if defined(CONFIG_SMP)
+ RELOCATE_ENTRY(_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal);
+ RELOCATE_ENTRY(_SecondaryResetVector_text,
+ .SecondaryResetVector.text);
+#endif
+
__boot_reloc_table_end = ABSOLUTE(.) ;
@@ -272,6 +279,25 @@ SECTIONS
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+
+#if defined(CONFIG_SMP)
+
+ SECTION_VECTOR (_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal,
+ RESET_VECTOR1_VADDR - 4,
+ SIZEOF(.DoubleExceptionVector.text),
+ .DoubleExceptionVector.text)
+
+ SECTION_VECTOR (_SecondaryResetVector_text,
+ .SecondaryResetVector.text,
+ RESET_VECTOR1_VADDR,
+ 4,
+ .SecondaryResetVector.literal)
+
+ . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;