summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Callan <Jon.Callan@arm.com>2009-03-10 10:24:56 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2009-03-10 10:24:56 +0000
commitd47913c8fb9ff68ab16dfedcba0c48db8d7800c7 (patch)
treee39ae61fc4a76735b632fe00a45aa1dd92166b06
parent77582cfa8a38fc71d1c46b3296a9f7ba4ad80275 (diff)
ARM Ltd's reworked oprofile support
Signed-off-by: Jon Callan <Jon.Callan@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h25
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/mach-realview/include/mach/board-pbx.h10
-rw-r--r--arch/arm/oprofile/Makefile8
-rw-r--r--arch/arm/oprofile/common.c43
-rw-r--r--arch/arm/oprofile/op_arm11.c164
-rw-r--r--arch/arm/oprofile/op_arm11.h44
-rw-r--r--arch/arm/oprofile/op_arm_model.h89
-rw-r--r--arch/arm/oprofile/op_counter.h27
-rw-r--r--arch/arm/oprofile/op_l2x0.c243
-rw-r--r--arch/arm/oprofile/op_l2x0.h15
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.c162
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.h45
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c303
-rw-r--r--arch/arm/oprofile/op_model_mpcore.h61
-rw-r--r--arch/arm/oprofile/op_model_v6-7.c243
-rw-r--r--arch/arm/oprofile/op_model_v6.c67
-rw-r--r--arch/arm/oprofile/op_model_v7.c411
-rw-r--r--arch/arm/oprofile/op_model_v7.h103
-rw-r--r--arch/arm/oprofile/op_scu.c175
-rw-r--r--arch/arm/oprofile/op_scu.h23
-rw-r--r--arch/arm/oprofile/op_v7.c248
-rw-r--r--arch/arm/oprofile/op_v7.h36
-rw-r--r--kernel/irq/manage.c1
24 files changed, 1368 insertions, 1179 deletions
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index cdb9022716fd..3c655cee1712 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -51,6 +51,31 @@
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
+/* Interrupt bits */
+#define L2X0_INTR_ECNTR 0x01
+
+/* Aux Control bits */
+#define L2X0_AUX_CTRL_EMBUS (0x01<<20)
+
+/* Event Counter Control bits */
+#define L2X0_EVENT_CONTROL_ENABLE 0x1
+#define L2X0_EVENT_CONTROL_RESET_ALL 0x6
+
+/* Event Counter Config bits */
+#define L2X0_EVENT_CONFIG_DISABLED 0x0
+#define L2X0_EVENT_CONFIG_CO (0x1<<2)
+#define L2X0_EVENT_CONFIG_DRHIT (0x2<<2)
+#define L2X0_EVENT_CONFIG_DRREQ (0x3<<2)
+#define L2X0_EVENT_CONFIG_DWHIT (0x4<<2)
+#define L2X0_EVENT_CONFIG_DWREQ (0x5<<2)
+#define L2X0_EVENT_CONFIG_DWTREQ (0x6<<2)
+#define L2X0_EVENT_CONFIG_IRHIT (0x7<<2)
+#define L2X0_EVENT_CONFIG_IRREQ (0x8<<2)
+#define L2X0_EVENT_CONFIG_WA (0x9<<2)
+#define L2X0_EVENT_INTERRUPT_ON_INC 0x1
+#define L2X0_EVENT_INTERRUPT_ON_OVF 0x2
+#define L2X0_EVENT_INTERRUPT_DISABLED 0x3
+
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 57f894e6dde1..5b07a6e2006e 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -230,6 +230,7 @@ int cpu_architecture(void)
return cpu_arch;
}
+EXPORT_SYMBOL(cpu_architecture);
static void __init cacheid_init(void)
{
diff --git a/arch/arm/mach-realview/include/mach/board-pbx.h b/arch/arm/mach-realview/include/mach/board-pbx.h
index c26509388828..733953a30f8a 100644
--- a/arch/arm/mach-realview/include/mach/board-pbx.h
+++ b/arch/arm/mach-realview/include/mach/board-pbx.h
@@ -139,11 +139,11 @@
#define IRQ_PBX_WATCHDOG1 (IRQ_PBX_GIC_START + 40) /* Watchdog1 timer */
#define IRQ_PBX_TIMER4_5 (IRQ_PBX_GIC_START + 41) /* Timer 0/1 (default timer) */
#define IRQ_PBX_TIMER6_7 (IRQ_PBX_GIC_START + 42) /* Timer 2/3 */
-/* ... */
-#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
-#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 45)
-#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 46)
-#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 47)
+#define IRQ_PBX_L220_EVENT (IRQ_PBX_GIC_START + 43)
+#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
+#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 45)
+#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 46)
+#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 47)
/* ... */
#define IRQ_PBX_PCI0 (IRQ_PBX_GIC_START + 50)
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index 88e31f549f50..1593e3753549 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,9 +6,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
+oprofile-y := $(DRIVER_OBJS) common.o backtrace.o op_arm11.o op_v7.o op_model_v6-7.o
+oprofile-$(CONFIG_CACHE_L2X0) += op_l2x0.o
+oprofile-$(CONFIG_SMP) += op_scu.o
oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o
-oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o
-oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o
-oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o
-oprofile-$(CONFIG_OPROFILE_ARMV7) += op_model_v7.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 3fcd752d6146..2680f4d4b2f3 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -14,6 +14,10 @@
#include <linux/sysdev.h>
#include <linux/mutex.h>
+#include <asm/system.h>
+#include <asm/cputype.h>
+#include <asm/io.h>
+
#include "op_counter.h"
#include "op_arm_model.h"
@@ -130,24 +134,37 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
{
struct op_arm_model_spec *spec = NULL;
int ret = -ENODEV;
+ int cpu_arch = cpu_architecture();
ops->backtrace = arm_backtrace;
-#ifdef CONFIG_CPU_XSCALE
+ if (cpu_is_xscale())
spec = &op_xscale_spec;
-#endif
-
-#ifdef CONFIG_OPROFILE_ARMV6
+ else if (cpu_is_11mpcore() || cpu_arch == CPU_ARCH_ARMv6) {
+ /* cpu_architecture returns V7 for MPCore! */
spec = &op_armv6_spec;
-#endif
-
-#ifdef CONFIG_OPROFILE_MPCORE
- spec = &op_mpcore_spec;
-#endif
-
-#ifdef CONFIG_OPROFILE_ARMV7
+ if (cpu_is_11mpcore())
+ spec->name = "arm/11mpcore";
+ }
+ else if (cpu_arch == CPU_ARCH_ARMv7) {
spec = &op_armv7_spec;
-#endif
+ /*
+ * V7 CPUs all have the same kind of PMUs, but have a variable
+ * number of them. So the kernel side of Oprofile only needs
+ * to know whether we have the L2x0, and whether we're SMP.
+ * The user side needs more information, to decide which
+ * events file to use because, for example, some A8 event
+ * numbers differ from A9 event numbers).
+ */
+ if (cpu_is_a9()) {
+ if (is_smp())
+ spec->name = "arm/a9mpcore";
+ else
+ spec->name = "arm/a9";
+ }
+ else
+ spec->name = "arm/a8";
+ }
if (spec) {
ret = spec->init();
@@ -167,7 +184,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->start = op_arm_start;
ops->stop = op_arm_stop;
ops->cpu_type = op_arm_model->name;
- printk(KERN_INFO "oprofile: using %s\n", spec->name);
+ printk(KERN_INFO "oprofile: cpu_architecture() returns 0x%x, using %s model\n", cpu_arch, spec->name);
}
return ret;
diff --git a/arch/arm/oprofile/op_arm11.c b/arch/arm/oprofile/op_arm11.c
new file mode 100644
index 000000000000..4daa0084df15
--- /dev/null
+++ b/arch/arm/oprofile/op_arm11.c
@@ -0,0 +1,164 @@
+/**
+ * @file op_arm11.c
+ * ARM11 CP15 Performance Monitor Unit Driver
+ *
+ * @remark Copyright 2004-7 ARM SMP Development Team
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+
+#include "op_counter.h"
+#include "op_arm_model.h"
+#include "op_arm11.h"
+
+static inline void arm11_write_pmnc(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0x0ffff77f;
+ asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
+}
+
+static inline u32 arm11_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
+ return val;
+}
+
+static void arm11_reset_counter(unsigned int cnt)
+{
+ u32 val = -(u32)counter_config[COUNTER_CPUn_PMNm(smp_processor_id(), cnt)].count;
+ switch (cnt) {
+ case CCNT:
+ asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
+ break;
+
+ case PMN0:
+ asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
+ break;
+
+ case PMN1:
+ asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
+ break;
+ }
+}
+
+int arm11_setup_pmu(void)
+{
+ unsigned long event;
+ unsigned cpu;
+ u32 pmnc;
+
+ cpu = smp_processor_id();
+ if (arm11_read_pmnc() & PMCR_E) {
+ printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", cpu);
+ return -EBUSY;
+ }
+
+ /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
+ arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
+ PMCR_C | PMCR_P);
+
+ pmnc = 0;
+
+ if (counter_config[COUNTER_CPUn_PMNm(cpu, PMN0)].enabled) {
+ event = counter_config[COUNTER_CPUn_PMNm(cpu, PMN0)].event & 255;
+ pmnc |= event << 20;
+ pmnc |= PMCR_IEN_PMN0;
+ arm11_reset_counter(PMN0);
+ }
+ if (counter_config[COUNTER_CPUn_PMNm(cpu, PMN1)].enabled) {
+ event = counter_config[COUNTER_CPUn_PMNm(cpu, PMN1)].event & 255;
+ pmnc |= event << 12;
+ pmnc |= PMCR_IEN_PMN1;
+ arm11_reset_counter(PMN1);
+ }
+ if (counter_config[COUNTER_CPUn_CCNT(cpu)].enabled) {
+ pmnc |= PMCR_IEN_CCNT;
+ arm11_reset_counter(CCNT);
+ }
+
+ arm11_write_pmnc(pmnc);
+ return 0;
+}
+
+int arm11_start_pmu(void)
+{
+ arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
+ return 0;
+}
+
+int arm11_stop_pmu(void)
+{
+ unsigned int cnt;
+
+ arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++)
+ arm11_reset_counter(cnt);
+
+ return 0;
+}
+
+/*
+ * CPU counters' IRQ handler (one IRQ per CPU)
+ */
+static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
+{
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int cnt;
+ u32 pmnc;
+
+ pmnc = arm11_read_pmnc();
+
+ /* First check if the two event counters have overflowed */
+ for (cnt = PMN0; cnt <= PMN1; ++cnt) {
+ if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
+ arm11_reset_counter(cnt);
+ oprofile_add_sample(regs, COUNTER_CPUn_PMNm(smp_processor_id(), cnt));
+ }
+ }
+
+ /* Now check if the cycle counter has overflowed */
+ if ((pmnc & PMCR_OFL_CCNT) && (pmnc & PMCR_IEN_CCNT)) {
+ arm11_reset_counter(CCNT);
+ oprofile_add_sample(regs, COUNTER_CPUn_CCNT(smp_processor_id()));
+ }
+
+ /* Clear counter flag(s) */
+ arm11_write_pmnc(pmnc);
+ return IRQ_HANDLED;
+}
+
+int arm11_request_interrupts(int *irqs, int nr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for(i = 0; i < nr; i++) {
+ ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
+ if (ret != 0) {
+ printk(KERN_ERR "oprofile: unable to request IRQ%u for CP15 PMU\n",
+ irqs[i]);
+ break;
+ }
+ }
+
+ if (i != nr)
+ while (i-- != 0)
+ free_irq(irqs[i], NULL);
+
+ return ret;
+}
+
+void arm11_release_interrupts(int *irqs, int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++)
+ free_irq(irqs[i], NULL);
+}
diff --git a/arch/arm/oprofile/op_arm11.h b/arch/arm/oprofile/op_arm11.h
new file mode 100644
index 000000000000..6047ec209104
--- /dev/null
+++ b/arch/arm/oprofile/op_arm11.h
@@ -0,0 +1,44 @@
+/**
+ * @file op_arm11.h
+ * ARM11 CP15 Performance Monitor Unit Driver
+ * @remark Copyright 2004-7 ARM SMP Development Team
+ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
+ * @remark Copyright 2000-2004 MontaVista Software Inc
+ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
+ * @remark Copyright 2004 Intel Corporation
+ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
+ * @remark Copyright 2004 Oprofile Authors
+ *
+ * @remark Read the file COPYING
+ *
+ * @author Zwane Mwaikambo
+ */
+#ifndef OP_ARM11_H
+#define OP_ARM11_H
+
+/*
+ * Per-CPU PMCR
+ */
+#define PMCR_E (1 << 0) /* Enable */
+#define PMCR_P (1 << 1) /* Count reset */
+#define PMCR_C (1 << 2) /* Cycle counter reset */
+#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
+#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
+#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
+#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
+#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
+#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
+#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
+
+#define PMN0 0
+#define PMN1 1
+
+#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
+
+int arm11_setup_pmu(void);
+int arm11_start_pmu(void);
+int arm11_stop_pmu(void);
+int arm11_request_interrupts(int *, int);
+void arm11_release_interrupts(int *, int);
+
+#endif
diff --git a/arch/arm/oprofile/op_arm_model.h b/arch/arm/oprofile/op_arm_model.h
index 8c4e4f6a1de3..32288caa9511 100644
--- a/arch/arm/oprofile/op_arm_model.h
+++ b/arch/arm/oprofile/op_arm_model.h
@@ -20,16 +20,97 @@ struct op_arm_model_spec {
char *name;
};
-#ifdef CONFIG_CPU_XSCALE
extern struct op_arm_model_spec op_xscale_spec;
-#endif
-
extern struct op_arm_model_spec op_armv6_spec;
-extern struct op_arm_model_spec op_mpcore_spec;
extern struct op_arm_model_spec op_armv7_spec;
extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
extern void op_arm_exit(void);
+
+/*
+ * The macros need to be reimplemented as things we can call at runtime,
+ * along with cpu_is_xscale in system.h
+ */
+#ifdef CONFIG_CACHE_L2X0
+#define have_l2x0() 1
+#else
+#define have_l2x0() 0
+#endif
+#ifdef CONFIG_SMP
+#define is_smp() 1
+#else
+#define is_smp() 0
+#endif
+#ifdef CONFIG_REALVIEW_EB_A9MP
+#define cpu_is_a9() 1
+#else
+#define cpu_is_a9() 0
+#endif
+#if defined(CONFIG_REALVIEW_EB_ARM11MP) || defined(CONFIG_MACH_REALVIEW_PB11MP)
+#define cpu_is_11mpcore() 1
+#else
+#define cpu_is_11mpcore() 0
+#endif
+#ifdef CONFIG_MACH_REALVIEW_PBX
+#include <mach/hardware.h>
+#include <mach/io.h>
+#include <mach/board-pbx.h>
+#undef cpu_is_11mpcore
+#define cpu_is_11mpcore() core_tile_pbx11mp()
+#undef cpu_is_a9
+#define cpu_is_a9() core_tile_pbxa9mp()
+#endif
+
+/*
+ * ARM11MPCore SCU event monitor support
+ */
+#ifdef CONFIG_SMP
+#if defined(CONFIG_MACH_REALVIEW_EB)
+#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
+#elif defined(CONFIG_MACH_REALVIEW_PB11MP)
+#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_TC11MP_SCU_BASE + 0x10)
+#elif defined(CONFIG_MACH_REALVIEW_PBX)
+#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_PBX_TILE_SCU_BASE + 0x10)
+#else
+#error Cannot determine the base address of the SCU
+#endif
+#endif
+
+/*
+ * IRQ numbers for PMUs (A9MPCore and 11MPCore) and SCU (11MPCore only)
+ */
+#if defined(CONFIG_MACH_REALVIEW_EB) || defined(CONFIG_MACH_REALVIEW_PB11MP)
+#define IRQ_PMU_CPU0 IRQ_TC11MP_PMU_CPU0
+#define IRQ_PMU_CPU1 IRQ_TC11MP_PMU_CPU1
+#define IRQ_PMU_CPU2 IRQ_TC11MP_PMU_CPU2
+#define IRQ_PMU_CPU3 IRQ_TC11MP_PMU_CPU3
+#define IRQ_PMU_SCU0 IRQ_TC11MP_PMU_SCU0
+#define IRQ_PMU_SCU1 IRQ_TC11MP_PMU_SCU1
+#define IRQ_PMU_SCU2 IRQ_TC11MP_PMU_SCU2
+#define IRQ_PMU_SCU3 IRQ_TC11MP_PMU_SCU3
+#define IRQ_PMU_SCU4 IRQ_TC11MP_PMU_SCU4
+#define IRQ_PMU_SCU5 IRQ_TC11MP_PMU_SCU5
+#define IRQ_PMU_SCU6 IRQ_TC11MP_PMU_SCU6
+#define IRQ_PMU_SCU7 IRQ_TC11MP_PMU_SCU7
+#elif defined(CONFIG_MACH_REALVIEW_PBX)
+#define IRQ_PMU_CPU0 IRQ_PBX_PMU_CPU0
+#define IRQ_PMU_CPU1 IRQ_PBX_PMU_CPU1
+#define IRQ_PMU_CPU2 IRQ_PBX_PMU_CPU2
+#define IRQ_PMU_CPU3 IRQ_PBX_PMU_CPU3
+#define IRQ_PMU_SCU0 IRQ_PBX_PMU_SCU0
+#define IRQ_PMU_SCU1 IRQ_PBX_PMU_SCU1
+#define IRQ_PMU_SCU2 IRQ_PBX_PMU_SCU2
+#define IRQ_PMU_SCU3 IRQ_PBX_PMU_SCU3
+#define IRQ_PMU_SCU4 IRQ_PBX_PMU_SCU4
+#define IRQ_PMU_SCU5 IRQ_PBX_PMU_SCU5
+#define IRQ_PMU_SCU6 IRQ_PBX_PMU_SCU6
+#define IRQ_PMU_SCU7 IRQ_PBX_PMU_SCU7
+#elif defined(CONFIG_ARCH_OMAP2)
+#define IRQ_PMU_CPU0 3
+#else
+#error PMU/SCU Event Counter interrupt sources not defined!
+#endif
+
#endif /* OP_ARM_MODEL_H */
diff --git a/arch/arm/oprofile/op_counter.h b/arch/arm/oprofile/op_counter.h
index ca942a63b52f..3d92fb59b0a1 100644
--- a/arch/arm/oprofile/op_counter.h
+++ b/arch/arm/oprofile/op_counter.h
@@ -24,4 +24,31 @@ struct op_counter_config {
extern struct op_counter_config *counter_config;
+
+/*
+ * List of userspace counter numbers: we use the same layout for both
+ * the V6 and V7 oprofile models.
+ * 0- 7 CPU0 event counters and cycle counter
+ * 8-15 CPU1 event counters and cycle counter
+ * 16-23 CPU2 event counters and cycle counter
+ * 24-31 CPU3 event counters and cycle counter
+ * 32-39 SCU counters
+ * 40-41 L2X0 counters
+ */
+
+#define PMU_COUNTERS_PER_CPU 8 /* 7 event counters, 1 cycle counter */
+#define CCNT (PMU_COUNTERS_PER_CPU - 1)
+#define MAX_CPUS 4
+
+#define COUNTER_CPUn_PMNm(N,M) ((N) * PMU_COUNTERS_PER_CPU + (M))
+#define COUNTER_CPUn_CCNT(N) ((N+1) * PMU_COUNTERS_PER_CPU - 1)
+
+#define COUNTER_SCU_MN(N) (PMU_COUNTERS_PER_CPU * MAX_CPUS + (N))
+#define NUM_SCU_COUNTERS 8
+
+#define COUNTER_L2X0_EC(N) (COUNTER_SCU_MN(NUM_SCU_COUNTERS) + (N))
+#define L2X0_NUM_COUNTERS 2
+
+#define NUM_COUNTERS COUNTER_L2X0_EC(L2X0_NUM_COUNTERS)
+
#endif /* OP_COUNTER_H */
diff --git a/arch/arm/oprofile/op_l2x0.c b/arch/arm/oprofile/op_l2x0.c
new file mode 100644
index 000000000000..fa1f1b5dac4f
--- /dev/null
+++ b/arch/arm/oprofile/op_l2x0.c
@@ -0,0 +1,243 @@
+/**
+ * @file op_model_l2x0.c
+ * ARM L220/L230 Level 2 Cache Controller Event Counter Driver
+ * @remark Copyright 2004-7 ARM SMP Development Team
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+
+#include <asm/io.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <mach/platform.h>
+#include <mach/irqs.h>
+
+#include "op_counter.h"
+#include "op_arm_model.h"
+#include "op_l2x0.h"
+
+static unsigned l2x0_base, l2x0_irq;
+
+/*
+ * Determine L2X0 base address and event counter IRQ
+ */
+void l2x0_ec_setup(void)
+{
+#if defined(CONFIG_MACH_REALVIEW_EB)
+ l2x0_base = IO_ADDRESS(REALVIEW_EB11MP_L220_BASE);
+ l2x0_irq = IRQ_EB11MP_L220_EVENT;
+#elif defined(CONFIG_MACH_REALVIEW_PB11MP)
+ l2x0_base = IO_ADDRESS(IRQ_TC11MP_L220_EVENT);
+ l2x0_irq = IRQ_PB11MP_L220_EVENT;
+#elif defined(CONFIG_MACH_REALVIEW_PBX)
+ l2x0_base = IO_ADDRESS(REALVIEW_PBX_TILE_L220_BASE);
+ l2x0_irq = IRQ_PBX_L220_EVENT;
+#else
+#error l2x0_base and l2x0_irq not set!
+#endif
+}
+
+
+
+/*
+ * Read the configuration of an event counter
+ */
+static inline u32 l2x0_ec_read_config(unsigned int cnt)
+{
+ return readl(l2x0_base + L2X0_EVENT_CNT0_CFG - cnt * 4);
+}
+
+/*
+ * Change the configuration of an event counter
+ */
+static inline void l2x0_ec_write_config(unsigned int cnt, u32 config)
+{
+ writel(config, l2x0_base + L2X0_EVENT_CNT0_CFG - cnt * 4);
+}
+
+/*
+ * Reset a counter to its initial value
+ */
+static inline void l2x0_ec_reset(unsigned int cnt)
+{
+ u32 val, temp;
+
+ /*
+ * We can only write to the counter value when the counter is disabled
+ */
+ temp = l2x0_ec_read_config(cnt);
+ l2x0_ec_write_config(cnt, L2X0_EVENT_CONFIG_DISABLED);
+
+ /*
+ * Ok, set the counter value
+ */
+ val = -(u32)counter_config[COUNTER_L2X0_EC(cnt)].count;
+ writel(val, l2x0_base + L2X0_EVENT_CNT0_VAL - cnt * 4);
+
+ /*
+ * Now put the counter config back to what it was before
+ */
+ l2x0_ec_write_config(cnt, temp);
+}
+
+/*
+ * Read the current value of an event counter
+ */
+static inline u32 l2x0_ec_read_value(unsigned int cnt)
+{
+ return readl(l2x0_base + L2X0_EVENT_CNT0_VAL - cnt * 4);
+}
+
+/*
+ * Enable/disable L220/L230 event counting system
+ * We assume the Event Monitoring Bus is already enabled
+ * (that is, bit 20 is set in the L2X0 Aux control register)
+ * because it can't be set while the L2X0 is enabled.
+ */
+static inline void l2x0_ec_system_setup(unsigned enable)
+{
+ u32 val;
+ unsigned cnt;
+
+ /*
+ * Enable/disable and Reset all the counters
+ */
+ val = L2X0_EVENT_CONTROL_RESET_ALL;
+ if (enable)
+ val |= L2X0_EVENT_CONTROL_ENABLE;
+ writel(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
+
+ /*
+ * Set the individual counters to disabled (for now at least)
+ */
+ for (cnt = 0; cnt < L2X0_NUM_COUNTERS; ++cnt)
+ l2x0_ec_write_config(cnt, L2X0_EVENT_CONFIG_DISABLED);
+
+ /*
+ * Clear any stray EC interrupt, and set the mask appropriately
+ */
+ writel(L2X0_INTR_ECNTR, l2x0_base + L2X0_INTR_CLEAR);
+ val = readl(l2x0_base + L2X0_INTR_MASK);
+ if (enable)
+ val |= L2X0_INTR_ECNTR;
+ else
+ val &= !L2X0_INTR_ECNTR;
+ writel(val, l2x0_base + L2X0_INTR_MASK);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Rotate L220/L230 EC interrupts around all the online CPUs in an SMP system.
+ * We do this because we can't know which CPU caused an L220/L230 event,
+ * and this gives us a sensible statistical picture of what was running.
+ * This function is always called in interrupt context.
+ */
+static inline void l2x0_ec_rotate_irq(int irq)
+{
+ static unsigned cpu = 0;
+ cpumask_t mask;
+
+ if (is_smp()) {
+ cpu = next_cpu(cpu, cpu_online_map);
+ if (cpu >= NR_CPUS)
+ cpu = first_cpu(cpu_online_map);
+ mask = cpumask_of_cpu(cpu);
+ irq_set_affinity(irq, mask);
+ }
+}
+#endif
+
+/*
+ * L220/L230 event counter IRQ handler (
+ */
+static irqreturn_t l2x0_ec_interrupt(int irq, void *arg)
+{
+ u32 interrupt_status;
+ unsigned int cnt;
+
+ /* If it's an L2X0 EC interrupt, process it */
+ interrupt_status = readl(l2x0_base + L2X0_MASKED_INTR_STAT);
+
+ if (interrupt_status & L2X0_INTR_ECNTR) {
+ /*
+ * A counter that has overflowed reads 0xffffffff
+ * This is not actually documented anywhere...
+ */
+ for (cnt = 0; cnt < L2X0_NUM_COUNTERS; ++cnt) {
+ if (l2x0_ec_read_value(cnt) == 0xffffffff) {
+ oprofile_add_sample(get_irq_regs(),
+ COUNTER_L2X0_EC(cnt));
+ l2x0_ec_reset(cnt);
+ }
+ }
+ /*
+ * Clear the interrupt, and move it onto the next CPU.
+ */
+ writel(L2X0_INTR_ECNTR, l2x0_base + L2X0_INTR_CLEAR);
+#ifdef CONFIG_SMP
+ l2x0_ec_rotate_irq(irq);
+#endif
+ return IRQ_HANDLED;
+ }
+ else {
+ return IRQ_NONE;
+ }
+}
+
+int l2x0_ec_start(void)
+{
+ int ret = 0;
+ unsigned cnt;
+ u32 cfg;
+
+ /*
+ * Install handler for the L220/L230 event counter interrupt
+ */
+ ret = request_irq(l2x0_irq, l2x0_ec_interrupt, IRQF_DISABLED,
+ "L2X0 EC", NULL);
+ if (ret) {
+ printk(KERN_ERR "oprofile: unable to request IRQ%u "
+ "for L2X0 Event Counter\n", l2x0_irq);
+ return ret;
+ }
+
+ /*
+ * Enable the event counter system
+ */
+ l2x0_ec_system_setup(1);
+
+ /*
+ * Configure the events we're interested in, and reset the counters
+ */
+ for (cnt = 0; cnt < L2X0_NUM_COUNTERS; ++cnt) {
+ if (counter_config[COUNTER_L2X0_EC(cnt)].enabled) {
+ cfg = counter_config[COUNTER_L2X0_EC(cnt)].event & 0xFF;
+ cfg <<= 2;
+ cfg |= L2X0_EVENT_INTERRUPT_ON_OVF;
+ l2x0_ec_write_config(cnt, cfg);
+ l2x0_ec_reset(cnt);
+ }
+ else
+ l2x0_ec_write_config(cnt, L2X0_EVENT_CONFIG_DISABLED);
+ }
+
+ return 0;
+}
+
+void l2x0_ec_stop(void)
+{
+ unsigned cnt;
+
+ /* Disable individual L220/L230 event counters */
+ for (cnt = 0; cnt < L2X0_NUM_COUNTERS; ++cnt)
+ l2x0_ec_write_config(cnt, L2X0_EVENT_CONFIG_DISABLED);
+
+ /* Disable L220/L230 event counter system */
+ l2x0_ec_system_setup(0);
+
+ /* Remove L220/L230 event counter interrupt handler */
+ free_irq(l2x0_irq, NULL);
+}
diff --git a/arch/arm/oprofile/op_l2x0.h b/arch/arm/oprofile/op_l2x0.h
new file mode 100644
index 000000000000..8ec2df7b183d
--- /dev/null
+++ b/arch/arm/oprofile/op_l2x0.h
@@ -0,0 +1,15 @@
+/**
+ * @file op_model_l220.h
+ * ARM L220/L230 Level 2 Cache Controller Event Counter Driver
+ * @remark Copyright 2007 ARM SMP Development Team
+ *
+ * @remark Read the file COPYING
+ */
+#ifndef OP_MODEL_L2X0_H
+#define OP_MODEL_L2X0_H
+
+void l2x0_ec_setup(void);
+int l2x0_ec_start(void);
+void l2x0_ec_stop(void);
+
+#endif
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c
index ad80752cb9fb..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_arm11_core.c
+++ b/arch/arm/oprofile/op_model_arm11_core.c
@@ -1,162 +0,0 @@
-/**
- * @file op_model_arm11_core.c
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-/*
- * ARM11 PMU support
- */
-static inline void arm11_write_pmnc(u32 val)
-{
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0x0ffff77f;
- asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
-}
-
-static inline u32 arm11_read_pmnc(void)
-{
- u32 val;
- asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
- return val;
-}
-
-static void arm11_reset_counter(unsigned int cnt)
-{
- u32 val = -(u32)counter_config[CPU_COUNTER(smp_processor_id(), cnt)].count;
- switch (cnt) {
- case CCNT:
- asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
- break;
-
- case PMN0:
- asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
- break;
-
- case PMN1:
- asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
- break;
- }
-}
-
-int arm11_setup_pmu(void)
-{
- unsigned int cnt;
- u32 pmnc;
-
- if (arm11_read_pmnc() & PMCR_E) {
- printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
- arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
- PMCR_C | PMCR_P);
-
- for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
- unsigned long event;
-
- if (!counter_config[CPU_COUNTER(smp_processor_id(), cnt)].enabled)
- continue;
-
- event = counter_config[CPU_COUNTER(smp_processor_id(), cnt)].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- */
- if (cnt == PMN0) {
- pmnc |= event << 20;
- } else if (cnt == PMN1) {
- pmnc |= event << 12;
- }
-
- /*
- * We don't need to set the event if it's a cycle count
- * Enable interrupt for this counter
- */
- pmnc |= PMCR_IEN_PMN0 << cnt;
- arm11_reset_counter(cnt);
- }
- arm11_write_pmnc(pmnc);
-
- return 0;
-}
-
-int arm11_start_pmu(void)
-{
- arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
- return 0;
-}
-
-int arm11_stop_pmu(void)
-{
- unsigned int cnt;
-
- arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
-
- for (cnt = PMN0; cnt <= CCNT; cnt++)
- arm11_reset_counter(cnt);
-
- return 0;
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 pmnc;
-
- pmnc = arm11_read_pmnc();
-
- for (cnt = PMN0; cnt <= CCNT; cnt++) {
- if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
- arm11_reset_counter(cnt);
- oprofile_add_sample(regs, CPU_COUNTER(smp_processor_id(), cnt));
- }
- }
- /* Clear counter flag(s) */
- arm11_write_pmnc(pmnc);
- return IRQ_HANDLED;
-}
-
-int arm11_request_interrupts(int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for(i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for MPCORE-EM\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void arm11_release_interrupts(int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h
index 6f8538e5a960..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_arm11_core.h
+++ b/arch/arm/oprofile/op_model_arm11_core.h
@@ -1,45 +0,0 @@
-/**
- * @file op_model_arm11_core.h
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_ARM11_CORE_H
-#define OP_MODEL_ARM11_CORE_H
-
-/*
- * Per-CPU PMCR
- */
-#define PMCR_E (1 << 0) /* Enable */
-#define PMCR_P (1 << 1) /* Count reset */
-#define PMCR_C (1 << 2) /* Cycle counter reset */
-#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
-#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
-#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
-#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
-#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
-#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
-#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
-
-#define PMN0 0
-#define PMN1 1
-#define CCNT 2
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
-
-int arm11_setup_pmu(void);
-int arm11_start_pmu(void);
-int arm11_stop_pmu(void);
-int arm11_request_interrupts(int *, int);
-void arm11_release_interrupts(int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 4de366e8b4c5..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -1,303 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- *
- * Counters:
- * 0: PMN0 on CPU0, per-cpu configurable event counter
- * 1: PMN1 on CPU0, per-cpu configurable event counter
- * 2: CCNT on CPU0
- * 3: PMN0 on CPU1
- * 4: PMN1 on CPU1
- * 5: CCNT on CPU1
- * 6: PMN0 on CPU1
- * 7: PMN1 on CPU1
- * 8: CCNT on CPU1
- * 9: PMN0 on CPU1
- * 10: PMN1 on CPU1
- * 11: CCNT on CPU1
- * 12-19: configurable SCU event counters
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <asm/system.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-#include "op_model_mpcore.h"
-
-/*
- * MPCore SCU event monitor support
- */
-#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
-
-/*
- * Bitmask of used SCU counters
- */
-static unsigned int scu_em_used;
-
-/*
- * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
- */
-static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
-{
- writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
-}
-
-static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
-{
- event &= 0xff;
- writeb(event, &emc->MCEB[n]);
-}
-
-/*
- * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
- */
-static irqreturn_t scu_em_interrupt(int irq, void *arg)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int cnt;
-
- cnt = irq - IRQ_EB11MP_PMU_SCU0;
- oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
- scu_reset_counter(emc, cnt);
-
- /* Clear overflow flag for this counter */
- writel(1 << (cnt + 16), &emc->PMCR);
-
- return IRQ_HANDLED;
-}
-
-/* Configure just the SCU counters that the user has requested */
-static void scu_setup(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int i;
-
- scu_em_used = 0;
-
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (counter_config[SCU_COUNTER(i)].enabled &&
- counter_config[SCU_COUNTER(i)].event) {
- scu_set_event(emc, i, 0); /* disable counter for now */
- scu_em_used |= 1 << i;
- }
- }
-}
-
-static int scu_start(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
- unsigned long event;
- int ret = 0;
-
- /*
- * request the SCU counter interrupts that we need
- */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
- if (ret) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
- IRQ_EB11MP_PMU_SCU0 + i);
- goto err_free_scu;
- }
- }
- }
-
- /*
- * clear overflow and enable interrupt for all used counters
- */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- event = counter_config[SCU_COUNTER(i)].event;
- scu_set_event(emc, i, event);
-
- /* clear overflow/interrupt */
- temp |= 1 << (i + 16);
- /* enable interrupt*/
- temp |= 1 << (i + 8);
- }
- }
-
- /* Enable all 8 counters */
- temp |= PMCR_E;
- writel(temp, &emc->PMCR);
-
- return 0;
-
- err_free_scu:
- while (i--)
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- return ret;
-}
-
-static void scu_stop(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
-
- /* Disable counter interrupts */
- /* Don't disable all 8 counters (with the E bit) as they may be in use */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i))
- temp &= ~(1 << (i + 8));
- }
- writel(temp, &emc->PMCR);
-
- /* Free counter interrupts and reset counters */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- }
- }
-}
-
-struct em_function_data {
- int (*fn)(void);
- int ret;
-};
-
-static void em_func(void *data)
-{
- struct em_function_data *d = data;
- int ret = d->fn();
- if (ret)
- d->ret = ret;
-}
-
-static int em_call_function(int (*fn)(void))
-{
- struct em_function_data data;
-
- data.fn = fn;
- data.ret = 0;
-
- preempt_disable();
- smp_call_function(em_func, &data, 1);
- em_func(&data);
- preempt_enable();
-
- return data.ret;
-}
-
-/*
- * Glue to stick the individual ARM11 PMUs and the SCU
- * into the oprofile framework.
- */
-static int em_setup_ctrs(void)
-{
- int ret;
-
- /* Configure CPU counters by cross-calling to the other CPUs */
- ret = em_call_function(arm11_setup_pmu);
- if (ret == 0)
- scu_setup();
-
- return 0;
-}
-
-static int arm11_irqs[] = {
- [0] = IRQ_EB11MP_PMU_CPU0,
- [1] = IRQ_EB11MP_PMU_CPU1,
- [2] = IRQ_EB11MP_PMU_CPU2,
- [3] = IRQ_EB11MP_PMU_CPU3
-};
-
-static int em_start(void)
-{
- int ret;
-
- ret = arm11_request_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
- if (ret == 0) {
- em_call_function(arm11_start_pmu);
-
- ret = scu_start();
- if (ret)
- arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
- }
- return ret;
-}
-
-static void em_stop(void)
-{
- em_call_function(arm11_stop_pmu);
- arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
- scu_stop();
-}
-
-/*
- * Why isn't there a function to route an IRQ to a specific CPU in
- * genirq?
- */
-static void em_route_irq(int irq, unsigned int cpu)
-{
- struct irq_desc *desc = irq_desc + irq;
- cpumask_t mask = cpumask_of_cpu(cpu);
-
- spin_lock_irq(&desc->lock);
- desc->affinity = mask;
- desc->chip->set_affinity(irq, mask);
- spin_unlock_irq(&desc->lock);
-}
-
-static int em_setup(void)
-{
- /*
- * Send SCU PMU interrupts to the "owner" CPU.
- */
- em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
- em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
-
- /*
- * Send CP15 PMU interrupts to the owner CPU.
- */
- em_route_irq(IRQ_EB11MP_PMU_CPU0, 0);
- em_route_irq(IRQ_EB11MP_PMU_CPU1, 1);
- em_route_irq(IRQ_EB11MP_PMU_CPU2, 2);
- em_route_irq(IRQ_EB11MP_PMU_CPU3, 3);
-
- return 0;
-}
-
-struct op_arm_model_spec op_mpcore_spec = {
- .init = em_setup,
- .num_counters = MPCORE_NUM_COUNTERS,
- .setup_ctrs = em_setup_ctrs,
- .start = em_start,
- .stop = em_stop,
- .name = "arm/mpcore",
-};
diff --git a/arch/arm/oprofile/op_model_mpcore.h b/arch/arm/oprofile/op_model_mpcore.h
index 73d811023688..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_mpcore.h
+++ b/arch/arm/oprofile/op_model_mpcore.h
@@ -1,61 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_MPCORE_H
-#define OP_MODEL_MPCORE_H
-
-struct eventmonitor {
- unsigned long PMCR;
- unsigned char MCEB[8];
- unsigned long MC[8];
-};
-
-/*
- * List of userspace counter numbers: note that the structure is important.
- * The code relies on CPUn's counters being CPU0's counters + 3n
- * and on CPU0's counters starting at 0
- */
-
-#define COUNTER_CPU0_PMN0 0
-#define COUNTER_CPU0_PMN1 1
-#define COUNTER_CPU0_CCNT 2
-
-#define COUNTER_CPU1_PMN0 3
-#define COUNTER_CPU1_PMN1 4
-#define COUNTER_CPU1_CCNT 5
-
-#define COUNTER_CPU2_PMN0 6
-#define COUNTER_CPU2_PMN1 7
-#define COUNTER_CPU2_CCNT 8
-
-#define COUNTER_CPU3_PMN0 9
-#define COUNTER_CPU3_PMN1 10
-#define COUNTER_CPU3_CCNT 11
-
-#define COUNTER_SCU_MN0 12
-#define COUNTER_SCU_MN1 13
-#define COUNTER_SCU_MN2 14
-#define COUNTER_SCU_MN3 15
-#define COUNTER_SCU_MN4 16
-#define COUNTER_SCU_MN5 17
-#define COUNTER_SCU_MN6 18
-#define COUNTER_SCU_MN7 19
-#define NUM_SCU_COUNTERS 8
-
-#define SCU_COUNTER(number) ((number) + COUNTER_SCU_MN0)
-
-#define MPCORE_NUM_COUNTERS SCU_COUNTER(NUM_SCU_COUNTERS)
-
-#endif
diff --git a/arch/arm/oprofile/op_model_v6-7.c b/arch/arm/oprofile/op_model_v6-7.c
new file mode 100644
index 000000000000..6108a91331f7
--- /dev/null
+++ b/arch/arm/oprofile/op_model_v6-7.c
@@ -0,0 +1,243 @@
+/**
+ * @file op_model_v6-7.c
+ * ARM V6 and V7 Performance Monitor models
+ *
+ * Based on op_model_xscale.c
+ *
+ * @remark Copyright 2007 ARM SMP Development Team
+ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
+ * @remark Copyright 2000-2004 MontaVista Software Inc
+ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
+ * @remark Copyright 2004 Intel Corporation
+ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
+ * @remark Copyright 2004 OProfile Authors
+ *
+ * @remark Read the file COPYING
+ *
+ * @author Tony Lindgren <tony@atomide.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#include <linux/smp.h>
+
+#include "op_counter.h"
+#include "op_arm_model.h"
+#include "op_arm11.h"
+#include "op_v7.h"
+#include "op_scu.h"
+#include "op_l2x0.h"
+
+static int arm11_irqs[] = {
+ [0] = IRQ_PMU_CPU0,
+#ifdef CONFIG_SMP
+ [1] = IRQ_PMU_CPU1,
+ [2] = IRQ_PMU_CPU2,
+ [3] = IRQ_PMU_CPU3
+#endif
+};
+
+static int v7_irqs[] = {
+ [0] = IRQ_PMU_CPU0,
+#ifdef CONFIG_SMP
+ [1] = IRQ_PMU_CPU1,
+ [2] = IRQ_PMU_CPU2,
+ [3] = IRQ_PMU_CPU3
+#endif
+};
+
+
+/*
+ * Functions and struct to enable calling a function on all CPUs in an SMP
+ * system. This works on a non-SMP system too (i.e. just calls the function!)
+ */
+struct em_function_data {
+ int (*fn)(void);
+ int ret;
+};
+
+static void em_func(void *data)
+{
+ struct em_function_data *d = data;
+ int ret = d->fn();
+ if (ret)
+ d->ret = ret;
+}
+
+static int em_call_function(int (*fn)(void))
+{
+ struct em_function_data data;
+
+ data.fn = fn;
+ data.ret = 0;
+
+ get_cpu();
+ if (is_smp())
+ smp_call_function(em_func, &data, 1);
+ em_func(&data);
+ put_cpu();
+
+ return data.ret;
+}
+
+
+/*
+ * Why isn't there a function to route an IRQ to a specific CPU in
+ * genirq?
+ */
+#ifdef CONFIG_SMP
+void em_route_irq(int irq, unsigned int cpu)
+{
+ irq_set_affinity(irq, *(get_cpu_mask(cpu)));
+}
+#endif
+
+/*
+ * ARM V6 Oprofile callbacks
+ */
+static void v6_stop(void)
+{
+ em_call_function(arm11_stop_pmu);
+ arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
+ if (is_smp())
+ scu_stop();
+ if (have_l2x0())
+ l2x0_ec_stop();
+}
+
+static int v6_start(void)
+{
+ int ret;
+#ifdef CONFIG_SMP
+ unsigned i;
+
+ if (is_smp()) {
+ /*
+ * Send SCU and CP15 PMU interrupts to the "owner" CPU.
+ */
+ for (i=0; i<CONFIG_NR_CPUS; ++i) {
+ em_route_irq(IRQ_PMU_SCU0 + 2 * i, i);
+ em_route_irq(IRQ_PMU_SCU1 + 2 * i, i);
+ em_route_irq(IRQ_PMU_CPU0 + i, i);
+ }
+ }
+#endif
+
+ ret = arm11_request_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
+ if (ret == 0) {
+ em_call_function(arm11_start_pmu);
+
+ if (is_smp())
+ ret = scu_start();
+
+ if (!ret && have_l2x0())
+ ret = l2x0_ec_start();
+
+ if (ret)
+ arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
+ }
+ return ret;
+}
+
+static int v6_init(void)
+{
+ return 0;
+}
+
+static int v6_setup_ctrs(void)
+{
+ int ret;
+ ret = em_call_function(arm11_setup_pmu);
+
+ if (ret == 0 && is_smp())
+ scu_setup();
+
+ if (ret == 0 && have_l2x0())
+ l2x0_ec_setup();
+
+ return ret;
+}
+
+/*
+ * ARM V7 Oprofile callbacks
+ */
+static int v7_init(void)
+{
+ return 0;
+}
+
+
+static int v7_setup_ctrs(void)
+{
+ int ret;
+
+ ret = em_call_function(v7_setup_pmu);
+
+ if (ret == 0 && have_l2x0())
+ l2x0_ec_setup();
+
+ return ret;
+}
+
+static int v7_start(void)
+{
+ int ret;
+#ifdef CONFIG_SMP
+ unsigned i;
+
+ if (is_smp()) {
+ /*
+ * Send CP15 PMU interrupts to the owner CPU.
+ */
+ for (i=0; i<CONFIG_NR_CPUS; ++i) {
+ em_route_irq(IRQ_PMU_CPU0 + i, i);
+ }
+ }
+#endif
+
+ ret = v7_request_interrupts(v7_irqs, ARRAY_SIZE(v7_irqs));
+ if (ret == 0) {
+ em_call_function(v7_start_pmu);
+
+ if (have_l2x0())
+ ret = l2x0_ec_start();
+
+ if (ret)
+ v7_release_interrupts(v7_irqs, ARRAY_SIZE(v7_irqs));
+ }
+ return ret;
+}
+
+static void v7_stop(void)
+{
+ em_call_function(v7_stop_pmu);
+ v7_release_interrupts(v7_irqs, ARRAY_SIZE(v7_irqs));
+ if (have_l2x0())
+ l2x0_ec_stop();
+}
+
+
+struct op_arm_model_spec op_armv6_spec = {
+ .init = v6_init,
+ .num_counters = NUM_COUNTERS,
+ .setup_ctrs = v6_setup_ctrs,
+ .start = v6_start,
+ .stop = v6_stop,
+ .name = "arm/v6", /* This may get overwritten in common.c */
+};
+
+struct op_arm_model_spec op_armv7_spec = {
+ .init = v7_init,
+ .num_counters = NUM_COUNTERS,
+ .setup_ctrs = v7_setup_ctrs,
+ .start = v7_start,
+ .stop = v7_stop,
+ .name = "arm/v7", /* This gets overwritten in common.c */
+};
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c
index fe581383d3e2..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_v6.c
+++ b/arch/arm/oprofile/op_model_v6.c
@@ -1,67 +0,0 @@
-/**
- * @file op_model_v6.c
- * ARM11 Performance Monitor Driver
- *
- * Based on op_model_xscale.c
- *
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 OProfile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Tony Lindgren <tony@atomide.com>
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-static int irqs[] = {
-#ifdef CONFIG_ARCH_OMAP2
- 3,
-#endif
-};
-
-static void armv6_pmu_stop(void)
-{
- arm11_stop_pmu();
- arm11_release_interrupts(irqs, ARRAY_SIZE(irqs));
-}
-
-static int armv6_pmu_start(void)
-{
- int ret;
-
- ret = arm11_request_interrupts(irqs, ARRAY_SIZE(irqs));
- if (ret >= 0)
- ret = arm11_start_pmu();
-
- return ret;
-}
-
-static int armv6_detect_pmu(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv6_spec = {
- .init = armv6_detect_pmu,
- .num_counters = 3,
- .setup_ctrs = arm11_setup_pmu,
- .start = armv6_pmu_start,
- .stop = armv6_pmu_stop,
- .name = "arm/armv6",
-};
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
index f20295f14adb..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_v7.c
+++ b/arch/arm/oprofile/op_model_v7.c
@@ -1,411 +0,0 @@
-/**
- * op_model_v7.c
- * ARM V7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_v7.h"
-
-/* #define DEBUG */
-
-
-/*
- * ARM V7 PMNC support
- */
-
-static u32 cnt_en[CNTMAX];
-
-static inline void armv7_pmnc_write(u32 val)
-{
- val &= PMNC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
-}
-
-static inline u32 armv7_pmnc_read(void)
-{
- u32 val;
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- return val;
-}
-
-static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENC_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " interrupt enable %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = INTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= INTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_getreset_flags(void)
-{
- u32 val;
-
- /* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-
- /* Write to clear flags */
- val &= FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
-
- return val;
-}
-
-static inline int armv7_pmnc_select_counter(unsigned int cnt)
-{
- u32 val;
-
- if ((cnt == CCNT) || (cnt >= CNTMAX)) {
- printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- val = (cnt - CNT0) & SELECT_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
-
- return cnt;
-}
-
-static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
-{
- if (armv7_pmnc_select_counter(cnt) == cnt) {
- val &= EVTSEL_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
- }
-}
-
-static void armv7_pmnc_reset_counter(unsigned int cnt)
-{
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- u32 val = -(u32)counter_config[cpu_cnt].count;
-
- switch (cnt) {
- case CCNT:
- armv7_pmnc_disable_counter(cnt);
-
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- case CNT0:
- case CNT1:
- case CNT2:
- case CNT3:
- armv7_pmnc_disable_counter(cnt);
-
- if (armv7_pmnc_select_counter(cnt) == cnt)
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- default:
- printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- break;
- }
-}
-
-int armv7_setup_pmnc(void)
-{
- unsigned int cnt;
-
- if (armv7_pmnc_read() & PMNC_E) {
- printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
- " new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /*
- * Initialize & Reset PMNC: C bit, D bit and P bit.
- * Note: Using a slower count for CCNT (D bit: divide by 64) results
- * in a more stable system
- */
- armv7_pmnc_write(PMNC_P | PMNC_C | PMNC_D);
-
-
- for (cnt = CCNT; cnt < CNTMAX; cnt++) {
- unsigned long event;
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(cnt);
- cnt_en[cnt] = 0;
-
- if (!counter_config[cpu_cnt].enabled)
- continue;
-
- event = counter_config[cpu_cnt].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- * We don't need to set the event if it's a cycle count
- */
- if (cnt != CCNT)
- armv7_pmnc_write_evtsel(cnt, event);
-
- /*
- * Enable interrupt for this counter
- */
- armv7_pmnc_enable_intens(cnt);
-
- /*
- * Reset counter
- */
- armv7_pmnc_reset_counter(cnt);
-
- /*
- * Enable counter
- */
- armv7_pmnc_enable_counter(cnt);
- cnt_en[cnt] = 1;
- }
-
- return 0;
-}
-
-static inline void armv7_start_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
-}
-
-static inline void armv7_stop_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 flags;
-
-
- /*
- * Stop IRQ generation
- */
- armv7_stop_pmnc();
-
- /*
- * Get and reset overflow status flags
- */
- flags = armv7_pmnc_getreset_flags();
-
- /*
- * Cycle counter
- */
- if (flags & FLAG_C) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
- armv7_pmnc_reset_counter(CCNT);
- oprofile_add_sample(regs, cpu_cnt);
- }
-
- /*
- * PMNC counters 0:3
- */
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- if (flags & (1 << (cnt - CNT0))) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- armv7_pmnc_reset_counter(cnt);
- oprofile_add_sample(regs, cpu_cnt);
- }
- }
-
- /*
- * Allow IRQ generation
- */
- armv7_start_pmnc();
-
- return IRQ_HANDLED;
-}
-
-int armv7_request_interrupts(int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], armv7_pmnc_interrupt,
- IRQF_DISABLED, "CP15 PMNC", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u"
- " for ARMv7\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void armv7_release_interrupts(int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
-
-#ifdef DEBUG
-static void armv7_pmnc_dump_regs(void)
-{
- u32 val;
- unsigned int cnt;
-
- printk(KERN_INFO "PMNC registers dump:\n");
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
-
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
- }
-}
-#endif
-
-
-static int irqs[] = {
-#ifdef CONFIG_ARCH_OMAP3
- INT_34XX_BENCH_MPU_EMUL,
-#endif
-};
-
-static void armv7_pmnc_stop(void)
-{
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- armv7_stop_pmnc();
- armv7_release_interrupts(irqs, ARRAY_SIZE(irqs));
-}
-
-static int armv7_pmnc_start(void)
-{
- int ret;
-
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- ret = armv7_request_interrupts(irqs, ARRAY_SIZE(irqs));
- if (ret >= 0)
- armv7_start_pmnc();
-
- return ret;
-}
-
-static int armv7_detect_pmnc(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv7_spec = {
- .init = armv7_detect_pmnc,
- .num_counters = 5,
- .setup_ctrs = armv7_setup_pmnc,
- .start = armv7_pmnc_start,
- .stop = armv7_pmnc_stop,
- .name = "arm/armv7",
-};
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
index 0e19bcc2e100..e69de29bb2d1 100644
--- a/arch/arm/oprofile/op_model_v7.h
+++ b/arch/arm/oprofile/op_model_v7.h
@@ -1,103 +0,0 @@
-/**
- * op_model_v7.h
- * ARM v7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- * Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * Copyright 2000-2004 MontaVista Software Inc
- * Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * Copyright 2004 Intel Corporation
- * Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * Copyright 2004 Oprofile Authors
- *
- * Read the file COPYING
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef OP_MODEL_V7_H
-#define OP_MODEL_V7_H
-
-/*
- * Per-CPU PMNC: config reg
- */
-#define PMNC_E (1 << 0) /* Enable all counters */
-#define PMNC_P (1 << 1) /* Reset all counters */
-#define PMNC_C (1 << 2) /* Cycle counter reset */
-#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define PMNC_X (1 << 4) /* Export to ETM */
-#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define PMNC_MASK 0x3f /* Mask for writable bits */
-
-/*
- * Available counters
- */
-#define CCNT 0
-#define CNT0 1
-#define CNT1 2
-#define CNT2 3
-#define CNT3 4
-#define CNTMAX 5
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * CNTMAX + (counter))
-
-/*
- * CNTENS: counters enable reg
- */
-#define CNTENS_P0 (1 << 0)
-#define CNTENS_P1 (1 << 1)
-#define CNTENS_P2 (1 << 2)
-#define CNTENS_P3 (1 << 3)
-#define CNTENS_C (1 << 31)
-#define CNTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * CNTENC: counters disable reg
- */
-#define CNTENC_P0 (1 << 0)
-#define CNTENC_P1 (1 << 1)
-#define CNTENC_P2 (1 << 2)
-#define CNTENC_P3 (1 << 3)
-#define CNTENC_C (1 << 31)
-#define CNTENC_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * INTENS: counters overflow interrupt enable reg
- */
-#define INTENS_P0 (1 << 0)
-#define INTENS_P1 (1 << 1)
-#define INTENS_P2 (1 << 2)
-#define INTENS_P3 (1 << 3)
-#define INTENS_C (1 << 31)
-#define INTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * EVTSEL: Event selection reg
- */
-#define EVTSEL_MASK 0x7f /* Mask for writable bits */
-
-/*
- * SELECT: Counter selection reg
- */
-#define SELECT_MASK 0x1f /* Mask for writable bits */
-
-/*
- * FLAG: counters overflow flag status reg
- */
-#define FLAG_P0 (1 << 0)
-#define FLAG_P1 (1 << 1)
-#define FLAG_P2 (1 << 2)
-#define FLAG_P3 (1 << 3)
-#define FLAG_C (1 << 31)
-#define FLAG_MASK 0x8000000f /* Mask for writable bits */
-
-
-int armv7_setup_pmu(void);
-int armv7_start_pmu(void);
-int armv7_stop_pmu(void);
-int armv7_request_interrupts(int *, int);
-void armv7_release_interrupts(int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_scu.c b/arch/arm/oprofile/op_scu.c
new file mode 100644
index 000000000000..62186f0acc82
--- /dev/null
+++ b/arch/arm/oprofile/op_scu.c
@@ -0,0 +1,175 @@
+/**
+ * @file op_scu.c
+ * MPCORE Snoop Control Unit Event Monitor Driver
+ * @remark Copyright 2004-7 ARM SMP Development Team
+ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
+ * @remark Copyright 2000-2004 MontaVista Software Inc
+ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
+ * @remark Copyright 2004 Intel Corporation
+ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
+ * @remark Copyright 2004 Oprofile Authors
+ *
+ * @remark Read the file COPYING
+ *
+ * @author Zwane Mwaikambo
+ *
+ */
+
+/* #define DEBUG */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <asm/system.h>
+#include <mach/hardware.h>
+
+#include "op_counter.h"
+#include "op_arm_model.h"
+#include "op_scu.h"
+
+struct eventmonitor {
+ unsigned long PMCR;
+ unsigned char MCEB[8];
+ unsigned long MC[8];
+};
+
+#define PMCR_E 1
+
+/*
+ * Bitmask of used SCU counters
+ */
+static unsigned int scu_em_used;
+
+/*
+ * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
+ */
+static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
+{
+ writel(-(u32)counter_config[COUNTER_SCU_MN(n)].count, &emc->MC[n]);
+}
+
+static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
+{
+ event &= 0xff;
+ writeb(event, &emc->MCEB[n]);
+}
+
+/*
+ * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
+ */
+static irqreturn_t scu_em_interrupt(int irq, void *arg)
+{
+ struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
+ unsigned int cnt, tmp;
+
+ cnt = irq - IRQ_PMU_SCU0;
+ oprofile_add_sample(get_irq_regs(), COUNTER_SCU_MN(cnt));
+ scu_reset_counter(emc, cnt);
+
+ /* Clear overflow flag for this counter */
+ tmp = readl(&emc->PMCR);
+ tmp &= 0xff00ffff; /* mask out any other overflow flags */
+ tmp |= 1 << (cnt + 16);
+ writel(tmp, &emc->PMCR);
+
+ return IRQ_HANDLED;
+}
+
+/* Configure just the SCU counters that the user has requested */
+void scu_setup(void)
+{
+ struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
+ unsigned int i;
+
+ scu_em_used = 0;
+
+ for (i = 0; i < NUM_SCU_COUNTERS; i++) {
+ if (counter_config[COUNTER_SCU_MN(i)].enabled &&
+ counter_config[COUNTER_SCU_MN(i)].event) {
+ scu_set_event(emc, i, 0); /* disable counter for now */
+ scu_em_used |= 1 << i;
+ }
+ }
+}
+
+int scu_start(void)
+{
+ struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
+ unsigned int temp, i;
+ unsigned long event;
+ int ret = 0;
+
+ /*
+ * request the SCU counter interrupts that we need
+ */
+ for (i = 0; i < NUM_SCU_COUNTERS; i++) {
+ if (scu_em_used & (1 << i)) {
+ ret = request_irq(IRQ_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED,
+ "SCU PMU", NULL);
+ if (ret) {
+ printk(KERN_ERR
+ "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
+ IRQ_PMU_SCU0 + i);
+ goto err_free_scu;
+ }
+ }
+ }
+
+ /*
+ * clear overflow and enable interrupt for all used counters
+ */
+ temp = readl(&emc->PMCR);
+ for (i = 0; i < NUM_SCU_COUNTERS; i++) {
+ if (scu_em_used & (1 << i)) {
+ scu_reset_counter(emc, i);
+ event = counter_config[COUNTER_SCU_MN(i)].event;
+ scu_set_event(emc, i, event);
+
+ /* clear overflow/interrupt */
+ temp |= 1 << (i + 16);
+ /* enable interrupt*/
+ temp |= 1 << (i + 8);
+ }
+ }
+
+ /* Enable all 8 counters */
+ temp |= PMCR_E;
+ writel(temp, &emc->PMCR);
+
+ return 0;
+
+ err_free_scu:
+ while (i--)
+ free_irq(IRQ_PMU_SCU0 + i, NULL);
+ return ret;
+}
+
+void scu_stop(void)
+{
+ struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
+ unsigned int temp, i;
+
+ /* Disable counter interrupts */
+ /* Don't disable all 8 counters (with the E bit) as they may be in use */
+ temp = readl(&emc->PMCR);
+ for (i = 0; i < NUM_SCU_COUNTERS; i++) {
+ if (scu_em_used & (1 << i))
+ temp &= ~(1 << (i + 8));
+ }
+ writel(temp, &emc->PMCR);
+
+ /* Free counter interrupts and reset counters */
+ for (i = 0; i < NUM_SCU_COUNTERS; i++) {
+ if (scu_em_used & (1 << i)) {
+ scu_reset_counter(emc, i);
+ free_irq(IRQ_PMU_SCU0 + i, NULL);
+ }
+ }
+}
+
diff --git a/arch/arm/oprofile/op_scu.h b/arch/arm/oprofile/op_scu.h
new file mode 100644
index 000000000000..572bd7df8264
--- /dev/null
+++ b/arch/arm/oprofile/op_scu.h
@@ -0,0 +1,23 @@
+/**
+ * @file op_scu.c
+ * MPCORE Snoop Control Unit Event Monitor Driver
+ * @remark Copyright 2004 ARM SMP Development Team
+ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
+ * @remark Copyright 2000-2004 MontaVista Software Inc
+ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
+ * @remark Copyright 2004 Intel Corporation
+ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
+ * @remark Copyright 2004 Oprofile Authors
+ *
+ * @remark Read the file COPYING
+ *
+ * @author Zwane Mwaikambo
+ */
+#ifndef OP_SCU_H
+#define OP_SCU_H
+
+void scu_setup(void);
+int scu_start(void);
+void scu_stop(void);
+
+#endif
diff --git a/arch/arm/oprofile/op_v7.c b/arch/arm/oprofile/op_v7.c
new file mode 100644
index 000000000000..a71fcccc441e
--- /dev/null
+++ b/arch/arm/oprofile/op_v7.c
@@ -0,0 +1,248 @@
+/**
+ * @file op_v7.c
+ * ARM V7 Performance Monitor Unit Driver
+ *
+ * @remark Copyright 2007 ARM SMP Development Team
+ *
+ * @remark Read the file COPYING
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+
+#include "op_counter.h"
+#include "op_arm_model.h"
+#include "op_v7.h"
+
+/*
+ * We assume each CPU has PMU_COUNTERS_PER_CPU counters, where the
+ * last one is a cycle counter, the rest are event counters.
+ * The oprofile event files in userland should ensure that we will not
+ * access counters that aren't physically present, but we also check
+ * the counter numbers here.
+ */
+
+#define PMCR_N_MASK 0xf800
+#define PMCR_N_SHIFT 11
+
+static unsigned event_counters_per_cpu;
+
+/*
+ * ARM V7 PMU support
+ */
+static inline void v7_write_pmcr(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+static inline u32 v7_read_pmcr(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+static inline void v7_reset_counter(unsigned int cpu, unsigned int cnt)
+{
+ u32 val = -(u32)counter_config[COUNTER_CPUn_PMNm(cpu, cnt)].count;
+
+ if (cnt == CCNT)
+ /* Set cycle count in PMCCNTR */
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
+ else {
+ /* Select correct counter using PMSELR */
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (cnt));
+ /* Set the count value */
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
+ }
+}
+
+static inline void v7_set_event(unsigned int cnt, u32 val)
+{
+ /* Select correct counter using PMSELR */
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (cnt));
+ /* Set event type in PMXEVTYPER*/
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+}
+
+static inline void v7_clear_overflow_status(u32 status)
+{
+ /* Clear overflow bits in PMOVSR */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (status));
+}
+
+static inline u32 v7_read_overflow_status(void)
+{
+ u32 status;
+
+ /* Read overflow bits in PMOVSR */
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (status));
+
+ return status;
+}
+
+static inline void v7_enable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt == CCNT)
+ val = PMCNTEN_CCNT;
+ else
+ val = PMCNTEN_PMN0 << cnt;
+
+ /* Set bit in PMCNTEN */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+}
+
+static inline void v7_disable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt == CCNT)
+ val = PMCNTEN_CCNT;
+ else
+ val = PMCNTEN_PMN0 << cnt;
+
+ /* Clear bit in PMCNTEN */
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+}
+
+static inline void v7_set_interrupts(u32 interrupts)
+{
+ /* Clear all interrupts in PMINTENCLR */
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (0xFFFFFFFFU));
+
+ /* Set requested interrupts in PMINTENSET */
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (interrupts));
+}
+
+int v7_setup_pmu(void)
+{
+ unsigned int cnt, cpu;
+ u32 pmcr, interrupts;
+
+ /*
+ * No need for get_cpu/put_cpu, because we were either called
+ * from em_call_function(), which itself uses get_cpu/put_cpu,
+ * or smp_call_function(), which means we are in IRQ context.
+ */
+ pmcr = v7_read_pmcr();
+ cpu = smp_processor_id();
+
+ if (pmcr & PMCR_E) {
+ printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", cpu);
+ return -EBUSY;
+ }
+
+ /* Discover the number of counters */
+ event_counters_per_cpu = (pmcr & PMCR_N_MASK) >> PMCR_N_SHIFT;
+
+ /* initialize PMNC: reset all counters, clear DP,X,D,E bits */
+ v7_write_pmcr(PMCR_C | PMCR_P);
+ for (cnt = 0; cnt < event_counters_per_cpu; cnt++)
+ v7_disable_counter(cnt);
+ v7_disable_counter(CCNT);
+
+ interrupts = 0;
+
+ /* Set up the event counters */
+ for (cnt = 0; cnt < event_counters_per_cpu; cnt++) {
+ unsigned long event;
+
+ if (!counter_config[COUNTER_CPUn_PMNm(cpu, cnt)].enabled)
+ continue;
+
+ event = counter_config[COUNTER_CPUn_PMNm(cpu, cnt)].event & 255;
+
+ v7_set_event(cnt, event);
+ v7_reset_counter(cpu, cnt);
+ v7_enable_counter(cnt);
+ interrupts |= PMINTEN_PMN0 << cnt;
+ }
+
+ /* Now set up the cycle counter */
+ if (counter_config[COUNTER_CPUn_CCNT(cpu)].enabled) {
+ v7_reset_counter(cpu, CCNT);
+ v7_enable_counter(CCNT);
+ interrupts |= PMINTEN_CCNT;
+ }
+
+ /* Enable the required interrupts */
+ v7_set_interrupts(interrupts);
+
+ return 0;
+}
+
+int v7_start_pmu(void)
+{
+ v7_write_pmcr(v7_read_pmcr() | PMCR_E);
+ return 0;
+}
+
+int v7_stop_pmu(void)
+{
+ v7_write_pmcr(v7_read_pmcr() & ~PMCR_E);
+ return 0;
+}
+
+/*
+ * CPU counters' IRQ handler (one IRQ per CPU)
+ */
+static irqreturn_t v7_pmu_interrupt(int irq, void *arg)
+{
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int cnt, cpu;
+ u32 overflowed;
+
+ overflowed = v7_read_overflow_status();
+ cpu = smp_processor_id();
+
+ /* Check each event counter */
+ for (cnt = 0; cnt < CCNT; cnt++) {
+ if (overflowed & (PMOVSR_PMN0 << cnt)) {
+ v7_reset_counter(cpu, cnt);
+ oprofile_add_sample(regs, COUNTER_CPUn_PMNm(cpu, cnt));
+ }
+ }
+
+ /* Check the cycle counter */
+ if (overflowed & PMOVSR_CCNT) {
+ v7_reset_counter(cpu, CCNT);
+ oprofile_add_sample(regs, COUNTER_CPUn_CCNT(cpu));
+ }
+
+ v7_clear_overflow_status(overflowed);
+ return IRQ_HANDLED;
+}
+
+int v7_request_interrupts(int *irqs, int nr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for(i = 0; i < nr; i++) {
+ ret = request_irq(irqs[i], v7_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
+ if (ret != 0) {
+ printk(KERN_ERR "oprofile: unable to request IRQ%u for CP15 PMU\n",
+ irqs[i]);
+ break;
+ }
+ }
+
+ if (i != nr)
+ while (i-- != 0)
+ free_irq(irqs[i], NULL);
+
+ return ret;
+}
+
+void v7_release_interrupts(int *irqs, int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++)
+ free_irq(irqs[i], NULL);
+}
diff --git a/arch/arm/oprofile/op_v7.h b/arch/arm/oprofile/op_v7.h
new file mode 100644
index 000000000000..03ca7dfeecf5
--- /dev/null
+++ b/arch/arm/oprofile/op_v7.h
@@ -0,0 +1,36 @@
+/**
+ * @file op_v7.h
+ * ARM V7 Performance Monitor Unit Driver
+ *
+ * @remark Copyright 2007 ARM SMP Development Team
+ *
+ * @remark Read the file COPYING
+ */
+#ifndef OP_V7_H
+#define OP_V7_H
+
+/*
+ * V7 CP15 PMU
+ */
+#define PMCR_E (1 << 0) /* Enable */
+#define PMCR_P (1 << 1) /* Count reset */
+#define PMCR_C (1 << 2) /* Cycle counter reset */
+#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
+
+#define PMINTEN_PMN0 (1 << 0)
+#define PMINTEN_CCNT (1 << 31)
+
+#define PMOVSR_PMN0 (1 << 0)
+#define PMOVSR_CCNT (1 << 31)
+
+#define PMCNTEN_PMN0 (1 << 0)
+#define PMCNTEN_CCNT (1 << 31)
+
+
+int v7_setup_pmu(void);
+int v7_start_pmu(void);
+int v7_stop_pmu(void);
+int v7_request_interrupts(int *, int);
+void v7_release_interrupts(int *, int);
+
+#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 801addda3c43..275444179294 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -105,6 +105,7 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
+EXPORT_SYMBOL(irq_set_affinity);
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*