summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig54
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h20
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h34
-rw-r--r--arch/x86/include/asm/arch_hweight.h61
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h278
-rw-r--r--arch/x86/include/asm/atomic64_64.h25
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h44
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h58
-rw-r--r--arch/x86/include/asm/dwarf2.h12
-rw-r--r--arch/x86/include/asm/e820.h7
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hyperv.h11
-rw-r--r--arch/x86/include/asm/hypervisor.h27
-rw-r--r--arch/x86/include/asm/i387.h129
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/k8.h5
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h14
-rw-r--r--arch/x86/include/asm/percpu.h24
-rw-r--r--arch/x86/include/asm/perf_event_p4.h3
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h247
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h528
-rw-r--r--arch/x86/include/asm/vmware.h27
-rw-r--r--arch/x86/include/asm/xsave.h7
-rw-r--r--arch/x86/kernel/acpi/boot.c133
-rw-r--r--arch/x86/kernel/alternative.c47
-rw-r--r--arch/x86/kernel/amd_iommu.c197
-rw-r--r--arch/x86/kernel/amd_iommu_init.c6
-rw-r--r--arch/x86/kernel/apic/es7000_32.c19
-rw-r--r--arch/x86/kernel/apic/io_apic.c99
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c14
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Makefile4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.h9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c169
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c52
-rw-r--r--arch/x86/kernel/cpu/intel.c6
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c181
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c55
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c41
-rw-r--r--arch/x86/kernel/cpu/vmware.c38
-rw-r--r--arch/x86/kernel/entry_32.S19
-rw-r--r--arch/x86/kernel/i387.c107
-rw-r--r--arch/x86/kernel/i8253.c14
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kprobes.c27
-rw-r--r--arch/x86/kernel/microcode_core.c4
-rw-r--r--arch/x86/kernel/microcode_intel.c22
-rw-r--r--arch/x86/kernel/mpparse.c25
-rw-r--r--arch/x86/kernel/mrst.c5
-rw-r--r--arch/x86/kernel/process.c32
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/sfi.c4
-rw-r--r--arch/x86/kernel/tboot.c20
-rw-r--r--arch/x86/kernel/tlb_uv.c1280
-rw-r--r--arch/x86/kernel/traps.c173
-rw-r--r--arch/x86/kernel/uv_irq.c12
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c1
-rw-r--r--arch/x86/kernel/xsave.c8
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/atomic64_32.c273
-rw-r--r--arch/x86/lib/atomic64_386_32.S174
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S224
-rw-r--r--arch/x86/math-emu/fpu_aux.c6
-rw-r--r--arch/x86/math-emu/fpu_entry.c4
-rw-r--r--arch/x86/math-emu/fpu_system.h2
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/pat.c239
-rw-r--r--arch/x86/mm/pat_internal.h46
-rw-r--r--arch/x86/mm/pat_rbtree.c273
-rw-r--r--arch/x86/mm/srat_64.c51
-rw-r--r--arch/x86/oprofile/nmi_int.c199
-rw-r--r--arch/x86/oprofile/op_model_amd.c280
-rw-r--r--arch/x86/oprofile/op_model_p4.c52
-rw-r--r--arch/x86/oprofile/op_model_ppro.c77
-rw-r--r--arch/x86/oprofile/op_x86_model.h4
-rw-r--r--arch/x86/pci/mrst.c6
102 files changed, 4029 insertions, 2552 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 01177dcbe261..a2d3a5fbeeda 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -201,20 +201,17 @@ config HAVE_INTEL_TXT
# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
- bool
- default y
+ def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_IRQ_PROBE
- bool
- default y
+ def_bool y
config GENERIC_PENDING_IRQ
- bool
+ def_bool y
depends on GENERIC_HARDIRQS && SMP
- default y
config USE_GENERIC_SMP_HELPERS
def_bool y
@@ -229,19 +226,22 @@ config X86_64_SMP
depends on X86_64 && SMP
config X86_HT
- bool
+ def_bool y
depends on SMP
- default y
config X86_TRAMPOLINE
- bool
+ def_bool y
depends on SMP || (64BIT && ACPI_SLEEP)
- default y
config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
+config ARCH_HWEIGHT_CFLAGS
+ string
+ default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
+ default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
+
config KTIME_SCALAR
def_bool X86_32
source "init/Kconfig"
@@ -451,7 +451,7 @@ config X86_NUMAQ
firmware with - send email to <Martin.Bligh@us.ibm.com>.
config X86_SUPPORTS_MEMORY_FAILURE
- bool
+ def_bool y
# MCE code calls memory_failure():
depends on X86_MCE
# On 32-bit this adds too big of NODES_SHIFT and we run out of page flags:
@@ -459,7 +459,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
# On 32-bit SPARSEMEM adds too big of SECTIONS_WIDTH:
depends on X86_64 || !SPARSEMEM
select ARCH_SUPPORTS_MEMORY_FAILURE
- default y
config X86_VISWS
bool "SGI 320/540 (Visual Workstation)"
@@ -574,7 +573,6 @@ config PARAVIRT_SPINLOCKS
config PARAVIRT_CLOCK
bool
- default n
endif
@@ -753,7 +751,6 @@ config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
- default n
---help---
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
@@ -833,7 +830,6 @@ config X86_VISWS_APIC
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
bool "Reroute for broken boot IRQs"
- default n
depends on X86_IO_APIC
---help---
This option enables a workaround that fixes a source of
@@ -880,9 +876,8 @@ config X86_MCE_AMD
the DRAM Error Threshold.
config X86_ANCIENT_MCE
- def_bool n
+ bool "Support for old Pentium 5 / WinChip machine checks"
depends on X86_32 && X86_MCE
- prompt "Support for old Pentium 5 / WinChip machine checks"
---help---
Include support for machine check handling on old Pentium 5 or WinChip
systems. These typically need to be enabled explicitely on the command
@@ -890,8 +885,7 @@ config X86_ANCIENT_MCE
config X86_MCE_THRESHOLD
depends on X86_MCE_AMD || X86_MCE_INTEL
- bool
- default y
+ def_bool y
config X86_MCE_INJECT
depends on X86_MCE
@@ -1030,8 +1024,8 @@ config X86_CPUID
choice
prompt "High Memory Support"
- default HIGHMEM4G if !X86_NUMAQ
default HIGHMEM64G if X86_NUMAQ
+ default HIGHMEM4G
depends on X86_32
config NOHIGHMEM
@@ -1289,7 +1283,7 @@ source "mm/Kconfig"
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
- depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
+ depends on HIGHMEM
---help---
The VM uses one page table entry for each page of physical memory.
For systems with a lot of RAM, this can be wasteful of precious
@@ -1373,8 +1367,7 @@ config MATH_EMULATION
kernel, it won't hurt.
config MTRR
- bool
- default y
+ def_bool y
prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
---help---
On Intel P6 family processors (Pentium Pro, Pentium II and later)
@@ -1440,8 +1433,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
mtrr_spare_reg_nr=N on the kernel command line.
config X86_PAT
- bool
- default y
+ def_bool y
prompt "x86 PAT support" if EMBEDDED
depends on MTRR
---help---
@@ -1609,8 +1601,7 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE
config PHYSICAL_ALIGN
- hex
- prompt "Alignment value to which kernel should be aligned" if X86_32
+ hex "Alignment value to which kernel should be aligned" if X86_32
default "0x1000000"
range 0x2000 0x1000000
---help---
@@ -1657,7 +1648,6 @@ config COMPAT_VDSO
config CMDLINE_BOOL
bool "Built-in kernel command line"
- default n
---help---
Allow for specifying boot arguments to the kernel at
build time. On some systems (e.g. embedded ones), it is
@@ -1691,7 +1681,6 @@ config CMDLINE
config CMDLINE_OVERRIDE
bool "Built-in command line overrides boot loader arguments"
- default n
depends on CMDLINE_BOOL
---help---
Set this option to 'Y' to have the kernel ignore the boot loader
@@ -1727,8 +1716,7 @@ source "drivers/acpi/Kconfig"
source "drivers/sfi/Kconfig"
config X86_APM_BOOT
- bool
- default y
+ def_bool y
depends on APM || APM_MODULE
menuconfig APM
@@ -1957,8 +1945,7 @@ config DMAR_DEFAULT_ON
experimental.
config DMAR_BROKEN_GFX_WA
- def_bool n
- prompt "Workaround broken graphics drivers (going away soon)"
+ bool "Workaround broken graphics drivers (going away soon)"
depends on DMAR && BROKEN
---help---
Current Graphics drivers tend to use physical address
@@ -2056,7 +2043,6 @@ config SCx200HR_TIMER
config OLPC
bool "One Laptop Per Child support"
select GPIOLIB
- default n
---help---
Add support for detecting the unique features of the OLPC
XO hardware.
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 918fbb1855cc..2ac9069890cd 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -338,6 +338,10 @@ config X86_F00F_BUG
def_bool y
depends on M586MMX || M586TSC || M586 || M486 || M386
+config X86_INVD_BUG
+ def_bool y
+ depends on M486 || M386
+
config X86_WP_WORKS_OK
def_bool y
depends on !M386
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index bd58c8abbfbd..75085080b63e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -45,7 +45,6 @@ config EARLY_PRINTK
config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
- default n
depends on EARLY_PRINTK && PCI
---help---
Write kernel log output directly into the EHCI debug port.
@@ -76,7 +75,6 @@ config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL
depends on SMP
- default n
---help---
Say Y to verify that the per_cpu map being accessed has
been setup. Adds a fair amount of code to kernel memory
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 0a43dc515e4c..8aa1b59b9074 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe)
+cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index b97f786a48d5..a63a68be1cce 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -6,8 +6,8 @@
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
- _ASM_ALIGN
- _ASM_PTR 1b
+ .balign 4
+ .long 1b - .
.previous
.endm
#else
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..03b6bb5394a0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -28,20 +28,20 @@
*/
#ifdef CONFIG_SMP
-#define LOCK_PREFIX \
+#define LOCK_PREFIX_HERE \
".section .smp_locks,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR "661f\n" /* address */ \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
".previous\n" \
- "661:\n\tlock; "
+ "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */
+#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
#endif
-/* This must be included *after* the definition of LOCK_PREFIX */
-#include <asm/cpufeature.h>
-
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
@@ -96,6 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
".previous"
/*
+ * This must be included *after* the definition of ALTERNATIVE due to
+ * <asm/arch_hweight.h>
+ */
+#include <asm/cpufeature.h>
+
+/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 86a0ff0aeac7..7014e88bc779 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -174,6 +174,40 @@
(~((1ULL << (12 + ((lvl) * 9))) - 1)))
#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
+/*
+ * Returns the page table level to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_LEVEL(pagesize) \
+ ((__ffs(pagesize) - 12) / 9)
+/*
+ * Returns the number of ptes to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_PTE_COUNT(pagesize) \
+ (1ULL << ((__ffs(pagesize) - 12) % 9))
+
+/*
+ * Aligns a given io-virtual address to a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_ALIGN(address, pagesize) \
+ ((address) & ~((pagesize) - 1))
+/*
+ * Creates an IOMMU PTE for an address an a given pagesize
+ * The PTE has no permission bits set
+ * Pagesize is expected to be a power-of-two larger than 4096
+ */
+#define PAGE_SIZE_PTE(address, pagesize) \
+ (((address) | ((pagesize) - 1)) & \
+ (~(pagesize >> 1)) & PM_ADDR_MASK)
+
+/*
+ * Takes a PTE value with mode=0x07 and returns the page size it maps
+ */
+#define PTE_PAGE_SIZE(pte) \
+ (1ULL << (1 + ffz(((pte) | 0xfffULL))))
+
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..9686c3d9ff73
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_HWEIGHT_H
+#define _ASM_X86_HWEIGHT_H
+
+#ifdef CONFIG_64BIT
+/* popcnt %edi, %eax -- redundant REX prefix for alignment */
+#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %rdi, %rax */
+#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
+#define REG_IN "D"
+#define REG_OUT "a"
+#else
+/* popcnt %eax, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
+#define REG_IN "a"
+#define REG_OUT "a"
+#endif
+
+/*
+ * __sw_hweightXX are called from within the alternatives below
+ * and callee-clobbered registers need to be taken care of. See
+ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
+ * compiler switches.
+ */
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+
+ return res;
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight32(w & 0xff);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ unsigned long res = 0;
+
+#ifdef CONFIG_X86_32
+ return __arch_hweight32((u32)w) +
+ __arch_hweight32((u32)(w >> 32));
+#else
+ asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+#endif /* CONFIG_X86_32 */
+
+ return res;
+}
+
+#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8f8217b9bdac..952a826ac4e5 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return v->counter;
+ return (*(volatile int *)&(v)->counter);
}
/**
@@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline int atomic_dec_if_positive(atomic_t *v)
+{
+ int c, old, dec;
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 03027bf28de5..2a934aa19a43 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,109 +14,193 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
-extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
+#ifdef CONFIG_X86_CMPXCHG64
+#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
+#else
+#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
+#endif
+
+#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
+
+/**
+ * atomic64_cmpxchg - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ return cmpxchg64(&v->counter, o, n);
+}
/**
* atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically xchgs the value of @ptr to @new_val and returns
+ * Atomically xchgs the value of @v to @n and returns
* the old value.
*/
-extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
+{
+ long long o;
+ unsigned high = (unsigned)(n >> 32);
+ unsigned low = (unsigned)n;
+ asm volatile(ATOMIC64_ALTERNATIVE(xchg)
+ : "=A" (o), "+b" (low), "+c" (high)
+ : "S" (v)
+ : "memory"
+ );
+ return o;
+}
/**
* atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically sets the value of @ptr to @new_val.
+ * Atomically sets the value of @v to @n.
*/
-extern void atomic64_set(atomic64_t *ptr, u64 new_val);
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned high = (unsigned)(i >> 32);
+ unsigned low = (unsigned)i;
+ asm volatile(ATOMIC64_ALTERNATIVE(set)
+ : "+b" (low), "+c" (high)
+ : "S" (v)
+ : "eax", "edx", "memory"
+ );
+}
/**
* atomic64_read - read atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically reads the value of @ptr and returns it.
+ * Atomically reads the value of @v and returns it.
*/
-static inline u64 atomic64_read(atomic64_t *ptr)
+static inline long long atomic64_read(atomic64_t *v)
{
- u64 res;
-
- /*
- * Note, we inline this atomic64_t primitive because
- * it only clobbers EAX/EDX and leaves the others
- * untouched. We also (somewhat subtly) rely on the
- * fact that cmpxchg8b returns the current 64-bit value
- * of the memory location we are touching:
- */
- asm volatile(
- "mov %%ebx, %%eax\n\t"
- "mov %%ecx, %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "=&A" (res)
- : "m" (*ptr)
- );
-
- return res;
-}
-
-extern u64 atomic64_read(atomic64_t *ptr);
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(read)
+ : "=A" (r), "+c" (v)
+ : : "memory"
+ );
+ return r;
+ }
/**
* atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
+ * Atomically adds @i to @v and returns @i + *@v
*/
-extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/*
* Other variants with different arithmetic operators:
*/
-extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
-extern u64 atomic64_inc_return(atomic64_t *ptr);
-extern u64 atomic64_dec_return(atomic64_t *ptr);
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
+
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
/**
* atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr.
+ * Atomically adds @i to @v.
*/
-extern void atomic64_add(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
*
- * Atomically subtracts @delta from @ptr.
+ * Atomically subtracts @i from @v.
*/
-extern void atomic64_sub(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_sub(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
+static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
/**
* atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1.
+ * Atomically increments @v by 1.
*/
-extern void atomic64_inc(atomic64_t *ptr);
+static inline void atomic64_inc(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec - decrement atomic64 variable
@@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr);
*
* Atomically decrements @ptr by 1.
*/
-extern void atomic64_dec(atomic64_t *ptr);
+static inline void atomic64_dec(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically decrements @ptr by 1 and
+ * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
-extern int atomic64_dec_and_test(atomic64_t *ptr);
+static inline int atomic64_dec_and_test(atomic64_t *v)
+{
+ return atomic64_dec_return(v) == 0;
+}
/**
* atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1
+ * Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_inc_and_test(atomic64_t *ptr);
+static inline int atomic64_inc_and_test(atomic64_t *v)
+{
+ return atomic64_inc_return(v) == 0;
+}
/**
* atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns true
+ * Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
+static inline int atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned low = (unsigned)u;
+ unsigned high = (unsigned)(u >> 32);
+ asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
+ : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
+ : : "memory");
+ return (int)a;
+}
+
+
+static inline int atomic64_inc_not_zero(atomic64_t *v)
+{
+ int r;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
+ : "=a" (r)
+ : "S" (v)
+ : "ecx", "edx", "memory"
+ );
+ return r;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
+ : "=A" (r)
+ : "S" (v)
+ : "ecx", "memory"
+ );
+ return r;
+}
+
+#undef ATOMIC64_ALTERNATIVE
+#undef ATOMIC64_ALTERNATIVE_
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 51c5b4056929..49fd1ea22951 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
*/
static inline long atomic64_read(const atomic64_t *v)
{
- return v->counter;
+ return (*(volatile long *)&(v)->counter);
}
/**
@@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long c, old, dec;
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1
-#include <asm-generic/bitops/hweight.h>
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 7a1065958ba9..3b62ab56c7a0 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,7 +24,7 @@
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
- (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2))
+ (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
#endif
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..c70068d05f70 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
memcpy(dst, src, len);
}
-#define PG_WC PG_arch_1
-PAGEFLAG(WC, WC)
-
#ifdef CONFIG_X86_PAT
/*
* X86 PAT uses page flags WC and Uncached together to keep track of
@@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
* been changed from its default (value of -1 used to denote this).
* Note we do not support _PAGE_CACHE_UC here.
- *
- * Caller must hold memtype_lock for atomicity.
*/
+
+#define _PGMT_DEFAULT 0
+#define _PGMT_WC (1UL << PG_arch_1)
+#define _PGMT_UC_MINUS (1UL << PG_uncached)
+#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
+
static inline unsigned long get_page_memtype(struct page *pg)
{
- if (!PageUncached(pg) && !PageWC(pg))
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
return -1;
- else if (!PageUncached(pg) && PageWC(pg))
+ else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
- else if (PageUncached(pg) && !PageWC(pg))
+ else if (pg_flags == _PGMT_UC_MINUS)
return _PAGE_CACHE_UC_MINUS;
else
return _PAGE_CACHE_WB;
@@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
{
+ unsigned long memtype_flags = _PGMT_DEFAULT;
+ unsigned long old_flags;
+ unsigned long new_flags;
+
switch (memtype) {
case _PAGE_CACHE_WC:
- ClearPageUncached(pg);
- SetPageWC(pg);
+ memtype_flags = _PGMT_WC;
break;
case _PAGE_CACHE_UC_MINUS:
- SetPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_UC_MINUS;
break;
case _PAGE_CACHE_WB:
- SetPageUncached(pg);
- SetPageWC(pg);
- break;
- default:
- case -1:
- ClearPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_WB;
break;
}
+
+ do {
+ old_flags = pg->flags;
+ new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
+ } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ffb9bb6b6c37..8859e12dd3cf 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
- alternative_io("call cmpxchg8b_emu", \
+ alternative_io(LOCK_PREFIX_HERE \
+ "call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0cd82d068613..dca9c545f44e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -161,6 +161,7 @@
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
+#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
/* Virtualization flags: Linux defined */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -175,6 +176,7 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#include <asm/asm.h>
#include <linux/bitops.h>
extern const char * const x86_cap_flags[NCAPINTS*32];
@@ -283,6 +285,62 @@ extern const char * const x86_power_flags[32];
#endif /* CONFIG_X86_64 */
+/*
+ * Static testing of CPU features. Used the same as boot_cpu_has().
+ * These are only valid after alternatives have run, but will statically
+ * patch the target code for additional performance.
+ *
+ */
+static __always_inline __pure bool __static_cpu_has(u8 bit)
+{
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ asm goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b\n"
+ _ASM_PTR "0\n" /* no replacement */
+ " .byte %P0\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ " .byte 0xff + 0 - (2b-1b)\n" /* padding */
+ ".previous\n"
+ : : "i" (bit) : : t_no);
+ return true;
+ t_no:
+ return false;
+#else
+ u8 flag;
+ /* Open-coded due to __stringify() in ALTERNATIVE() */
+ asm volatile("1: movb $0,%0\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b\n"
+ _ASM_PTR "3f\n"
+ " .byte %P1\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 4f - 3f\n" /* replacement len */
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+ : "=qm" (flag) : "i" (bit));
+ return flag;
+#endif
+}
+
+#define static_cpu_has(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \
+ __static_cpu_has(bit) : \
+ boot_cpu_has(bit) \
+)
+
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index ae6253ab9029..733f7e91e7a9 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -34,6 +34,18 @@
#define CFI_SIGNAL_FRAME
#endif
+#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
+ /*
+ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+ * The latter we currently just discard since we don't do DWARF
+ * unwinding at runtime. So only the offline DWARF information is
+ * useful to anyone. Note we should not use this directive if this
+ * file is used in the vDSO assembly, or if vmlinux.lds.S gets
+ * changed so it doesn't discard .eh_frame.
+ */
+ .cfi_sections .debug_frame
+#endif
+
#else
/*
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296790d3..ec8a52d14ab1 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -45,7 +45,12 @@
#define E820_NVS 4
#define E820_UNUSABLE 5
-/* reserved RAM used by kernel itself */
+/*
+ * reserved RAM used by kernel itself
+ * if CONFIG_INTEL_TXT is enabled, memory of this type will be
+ * included in the S3 integrity calculation and so should not include
+ * any memory that BIOS might alter over the S3 transition
+ */
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f8576427cfe..aeab29aee617 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
+#define inc_irq_stat(member) percpu_inc(irq_stat.member)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index e153a2b3889a..5df477ac3af7 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_KVM_HYPERV_H
-#define _ASM_X86_KVM_HYPERV_H
+#ifndef _ASM_X86_HYPERV_H
+#define _ASM_X86_HYPERV_H
#include <linux/types.h>
@@ -14,6 +14,10 @@
#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
/*
* Feature identification. EAX indicates which features are available
* to the partition based upon the current partition privileges.
@@ -129,6 +133,9 @@
/* MSR used to provide vcpu index */
#define HV_X64_MSR_VP_INDEX 0x40000002
+/* MSR used to read the per-partition time reference counter */
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+
/* Define the virtual APIC registers */
#define HV_X64_MSR_EOI 0x40000070
#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b78c0941e422..70abda7058c8 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -17,10 +17,33 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-#ifndef ASM_X86__HYPERVISOR_H
-#define ASM_X86__HYPERVISOR_H
+#ifndef _ASM_X86_HYPERVISOR_H
+#define _ASM_X86_HYPERVISOR_H
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
+/*
+ * x86 hypervisor information
+ */
+struct hypervisor_x86 {
+ /* Hypervisor name */
+ const char *name;
+
+ /* Detection routine */
+ bool (*detect)(void);
+
+ /* Adjust CPU feature bits (run once per CPU) */
+ void (*set_cpu_features)(struct cpuinfo_x86 *);
+
+ /* Platform setup (run once per boot) */
+ void (*init_platform)(void);
+};
+
+extern const struct hypervisor_x86 *x86_hyper;
+
+/* Recognized hypervisors */
+extern const struct hypervisor_x86 x86_hyper_vmware;
+extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
+
#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..c991b3a7b904 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,7 +16,9 @@
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/hardirq.h>
+#include <linux/slab.h>
#include <asm/asm.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf);
#define X87_FSW_ES (1 << 7) /* Exception Summary */
+static __always_inline __pure bool use_xsave(void)
+{
+ return static_cpu_has(X86_FEATURE_XSAVE);
+}
+
#ifdef CONFIG_X86_64
/* Ignore delayed exceptions from user space */
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
-static inline void clear_fpu_state(struct task_struct *tsk)
+static inline void fpu_clear(struct fpu *fpu)
{
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
/*
* xsave header may indicate the init state of the FP.
*/
- if ((task_thread_info(tsk)->status & TS_XSAVE) &&
+ if (use_xsave() &&
!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
return;
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
X86_FEATURE_FXSAVE_LEAK);
}
+static inline void clear_fpu_state(struct task_struct *tsk)
+{
+ fpu_clear(&tsk->thread.fpu);
+}
+
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
return err;
}
-static inline void fxsave(struct task_struct *tsk)
+static inline void fpu_fxsave(struct fpu *fpu)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk)
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave (%1)"
- : "=m" (tsk->thread.xstate->fxsave)
- : "cdaSDb" (&tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave)
+ : "cdaSDb" (&fpu->state->fxsave));
#endif
}
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- xsave(tsk);
+ if (use_xsave())
+ fpu_xsave(fpu);
else
- fxsave(tsk);
+ fpu_fxsave(fpu);
+
+ fpu_clear(fpu);
+}
- clear_fpu_state(tsk);
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#else /* CONFIG_X86_32 */
#ifdef CONFIG_MATH_EMULATION
-extern void finit_task(struct task_struct *tsk);
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
-static inline void finit_task(struct task_struct *tsk)
-{
-}
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
static inline void tolerant_fwait(void)
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
/*
* These must be called with preempt disabled
*/
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE) {
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ if (use_xsave()) {
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
- xsave(tsk);
+ fpu_xsave(fpu);
/*
* xsave header may indicate the init state of the FP.
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
- [fx] "m" (tsk->thread.xstate->fxsave),
- [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
+ [fx] "m" (fpu->state->fxsave),
+ [fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +274,34 @@ clear_state:
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
+ ;
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
+
#endif /* CONFIG_X86_64 */
-static inline int restore_fpu_checking(struct task_struct *tsk)
+static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- return xrstor_checking(&tsk->thread.xstate->xsave);
+ return fxrstor_checking(&fpu->state->fxsave);
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
+{
+ if (use_xsave())
+ return fpu_xrstor_checking(fpu);
else
- return fxrstor_checking(&tsk->thread.xstate->fxsave);
+ return fpu_fxrstor_checking(fpu);
+}
+
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ return fpu_restore_checking(&tsk->thread.fpu);
}
/*
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.cwd;
+ return tsk->thread.fpu.state->fxsave.cwd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.cwd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.swd;
+ return tsk->thread.fpu.state->fxsave.swd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.swd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
- return tsk->thread.xstate->fxsave.mxcsr;
+ return tsk->thread.fpu.state->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
+static bool fpu_allocated(struct fpu *fpu)
+{
+ return fpu->state != NULL;
+}
+
+static inline int fpu_alloc(struct fpu *fpu)
+{
+ if (fpu_allocated(fpu))
+ return 0;
+ fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!fpu->state)
+ return -ENOMEM;
+ WARN_ON((unsigned long)fpu->state & 15);
+ return 0;
+}
+
+static inline void fpu_free(struct fpu *fpu)
+{
+ if (fpu->state) {
+ kmem_cache_free(task_xstate_cachep, fpu->state);
+ fpu->state = NULL;
+ }
+}
+
+static inline void fpu_copy(struct fpu *dst, struct fpu *src)
+{
+ memcpy(dst->state, src->state, xstate_size);
+}
+
#endif /* __ASSEMBLY__ */
#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..fc1f579fb965 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern raw_spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a03a515..63cb4096c3dc 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,6 @@ struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
-extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
@@ -180,12 +179,13 @@ extern void ioapic_write_entry(int apic, int pin,
extern void setup_ioapic_ids_from_mpc(void);
struct mp_ioapic_gsi{
- int gsi_base;
- int gsi_end;
+ u32 gsi_base;
+ u32 gsi_end;
};
extern struct mp_ioapic_gsi mp_gsi_routing[];
-int mp_find_ioapic(int gsi);
-int mp_find_ioapic_pin(int ioapic, int gsi);
+extern u32 gsi_end;
+int mp_find_ioapic(u32 gsi);
+int mp_find_ioapic_pin(int ioapic, u32 gsi);
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void __init pre_init_apic_IRQ0(void);
@@ -197,7 +197,8 @@ static const int timer_through_8259 = 0;
static inline void ioapic_init_mappings(void) { }
static inline void ioapic_insert_resources(void) { }
static inline void probe_nr_irqs_gsi(void) { }
-static inline int mp_find_ioapic(int gsi) { return 0; }
+#define gsi_end (NR_IRQS_LEGACY - 1)
+static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index f70e60071fe8..af00bd1d2089 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
+
#else
+#define num_k8_northbridges 0
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a88d05..c82868e9f905 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -105,16 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void);
struct device;
extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
int active_high_low);
-extern int acpi_probe_gsi(void);
-#ifdef CONFIG_X86_IO_APIC
-extern int mp_find_ioapic(int gsi);
-extern int mp_find_ioapic_pin(int ioapic, int gsi);
-#endif
-#else /* !CONFIG_ACPI: */
-static inline int acpi_probe_gsi(void)
-{
- return 0;
-}
#endif /* CONFIG_ACPI */
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000000..79ce5685ab64
--- /dev/null
+++ b/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_MSHYPER_H
+#define _ASM_X86_MSHYPER_H
+
+#include <linux/types.h>
+#include <asm/hyperv.h>
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 hints;
+};
+
+extern struct ms_hyperv_info ms_hyperv;
+
+#endif
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 66a272dfd8b8..0ec6d12d84e6 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -190,6 +190,29 @@ do { \
pfo_ret__; \
})
+#define percpu_unary_op(op, var) \
+({ \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+})
+
/*
* percpu_read() makes gcc load the percpu variable every time it is
* accessed while percpu_read_stable() allows the value to be cached.
@@ -207,6 +230,7 @@ do { \
#define percpu_and(var, val) percpu_to_op("and", var, val)
#define percpu_or(var, val) percpu_to_op("or", var, val)
#define percpu_xor(var, val) percpu_to_op("xor", var, val)
+#define percpu_inc(var) percpu_unary_op("inc", var)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index b05400a542ff..64a8ebff06fc 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -89,7 +89,8 @@
P4_CCCR_ENABLE)
/* HT mask */
-#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY)
+#define P4_CCCR_MASK_HT \
+ (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
#define P4_GEN_ESCR_EMASK(class, name, bit) \
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 32428b410b55..5a51379dcbe4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -113,7 +113,6 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
#endif
- unsigned int x86_hyper_vendor;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
@@ -127,9 +126,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UNKNOWN 0xff
-#define X86_HYPER_VENDOR_NONE 0
-#define X86_HYPER_VENDOR_VMWARE 1
-
/*
* capabilities of CPUs
*/
@@ -380,6 +376,10 @@ union thread_xstate {
struct xsave_struct xsave;
};
+struct fpu {
+ union thread_xstate *state;
+};
+
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
@@ -457,7 +457,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
- union thread_xstate *xstate;
+ struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index d017ed5502e2..d4092fac226b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -242,7 +242,6 @@ static inline struct thread_info *current_thread_info(void)
#define TS_POLLING 0x0004 /* true if in idle loop
and not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
-void math_error(void __user *);
+void math_error(struct pt_regs *, int, int);
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index b414d2b401f6..aa558ac0306e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -27,13 +27,14 @@
* set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
*
* We will use 31 sets, one for sending BAU messages from each of the 32
- * cpu's on the node.
+ * cpu's on the uvhub.
*
* TLB shootdown will use the first of the 8 descriptors of each set.
* Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
*/
#define UV_ITEMS_PER_DESCRIPTOR 8
+#define MAX_BAU_CONCURRENT 3
#define UV_CPUS_PER_ACT_STATUS 32
#define UV_ACT_STATUS_MASK 0x3
#define UV_ACT_STATUS_SIZE 2
@@ -45,6 +46,9 @@
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
+#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -55,15 +59,29 @@
#define DESC_STATUS_SOURCE_TIMEOUT 3
/*
- * source side thresholds at which message retries print a warning
+ * source side threshholds at which message retries print a warning
*/
#define SOURCE_TIMEOUT_LIMIT 20
#define DESTINATION_TIMEOUT_LIMIT 20
/*
+ * misc. delays, in microseconds
+ */
+#define THROTTLE_DELAY 10
+#define TIMEOUT_DELAY 10
+#define BIOS_TO 1000
+/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
+
+/*
+ * threshholds at which to use IPI to free resources
+ */
+#define PLUGSB4RESET 100
+#define TIMEOUTSB4RESET 100
+
+/*
* number of entries in the destination side payload queue
*/
-#define DEST_Q_SIZE 17
+#define DEST_Q_SIZE 20
/*
* number of destination side software ack resources
*/
@@ -72,9 +90,10 @@
/*
* completion statuses for sending a TLB flush message
*/
-#define FLUSH_RETRY 1
-#define FLUSH_GIVEUP 2
-#define FLUSH_COMPLETE 3
+#define FLUSH_RETRY_PLUGGED 1
+#define FLUSH_RETRY_TIMEOUT 2
+#define FLUSH_GIVEUP 3
+#define FLUSH_COMPLETE 4
/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -86,14 +105,14 @@
* 'base_dest_nodeid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_target_nodemask {
- unsigned long bits[BITS_TO_LONGS(256)];
+struct bau_target_uvhubmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
/*
- * mask of cpu's on a node
+ * mask of cpu's on a uvhub
* (during initialization we need to check that unsigned long has
- * enough bits for max. cpu's per node)
+ * enough bits for max. cpu's per uvhub)
*/
struct bau_local_cpumask {
unsigned long bits;
@@ -135,8 +154,8 @@ struct bau_msg_payload {
struct bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */
- /* bits 20:6 */ /* first bit in node_map */
+ unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+ /* bits 20:6 */ /* first bit in uvhub map */
unsigned int command:8; /* message type */
/* bits 28:21 */
/* 0x38: SN3net EndPoint Message */
@@ -146,26 +165,38 @@ struct bau_msg_header {
unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
/* Suppl_A is 56-41 */
- unsigned int payload_2a:8;/* becomes byte 16 of msg */
- /* bits 48:41 */ /* not currently using */
- unsigned int payload_2b:8;/* becomes byte 17 of msg */
- /* bits 56:49 */ /* not currently using */
+ unsigned int sequence:16;/* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
/* Address field (96:57) is never used as an
address (these are address bits 42:3) */
+
unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
/* address bits 27:4 are payload */
- /* these 24 bits become bytes 12-14 of msg */
+ /* these next 24 (58-81) bits become bytes 12-14 of msg */
+
+ /* bits 65:58 land in byte 12 */
unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
/* bit 58 */
-
- unsigned int payload_1a:5;/* not currently used */
- /* bits 63:59 */
- unsigned int payload_1b:8;/* not currently used */
- /* bits 71:64 */
- unsigned int payload_1c:8;/* not currently used */
- /* bits 79:72 */
- unsigned int payload_1d:2;/* not currently used */
+ unsigned int msg_type:3; /* software type of the message*/
+ /* bits 61:59 */
+ unsigned int canceled:1; /* message canceled, resource to be freed*/
+ /* bit 62 */
+ unsigned int payload_1a:1;/* not currently used */
+ /* bit 63 */
+ unsigned int payload_1b:2;/* not currently used */
+ /* bits 65:64 */
+
+ /* bits 73:66 land in byte 13 */
+ unsigned int payload_1ca:6;/* not currently used */
+ /* bits 71:66 */
+ unsigned int payload_1c:2;/* not currently used */
+ /* bits 73:72 */
+
+ /* bits 81:74 land in byte 14 */
+ unsigned int payload_1d:6;/* not currently used */
+ /* bits 79:74 */
+ unsigned int payload_1e:2;/* not currently used */
/* bits 81:80 */
unsigned int rsvd_4:7; /* must be zero */
@@ -178,7 +209,7 @@ struct bau_msg_header {
/* bits 95:90 */
unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
- unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */
+ unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
/* bit 101*/
unsigned int fairness:3;/* usually zero */
/* bits 104:102 */
@@ -191,13 +222,18 @@ struct bau_msg_header {
/* bits 127:107 */
};
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
+
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_target_nodemask distribution;
+ struct bau_target_uvhubmask distribution;
/*
* message template, consisting of header and payload:
*/
@@ -237,19 +273,25 @@ struct bau_payload_queue_entry {
unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits, bytes 10-11 */
- unsigned short replied_to:1; /* sent as 0 by the source */
- /* 1 bit */
- unsigned short unused1:7; /* not currently using */
- /* 7 bits: byte 12) */
+ /* these next 3 bytes come from bits 58-81 of the message header */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
+ /* byte 12 */
- unsigned char unused2[2]; /* not currently using */
- /* bytes 13-14 */
+ unsigned char unused2a; /* not currently using */
+ /* byte 13 */
+ unsigned char unused2; /* not currently using */
+ /* byte 14 */
unsigned char sw_ack_vector; /* filled in by the hardware */
/* byte 15 (bits 127:120) */
- unsigned char unused4[3]; /* not currently using bytes 17-19 */
- /* bytes 17-19 */
+ unsigned short sequence; /* message sequence number */
+ /* bytes 16-17 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ /* bytes 18-19 */
int number_of_cpus; /* filled in at destination */
/* 32 bits, bytes 20-23 (aligned) */
@@ -259,63 +301,93 @@ struct bau_payload_queue_entry {
};
/*
- * one for every slot in the destination payload queue
- */
-struct bau_msg_status {
- struct bau_local_cpumask seen_by; /* map of cpu's */
-};
-
-/*
- * one for every slot in the destination software ack resources
- */
-struct bau_sw_ack_status {
- struct bau_payload_queue_entry *msg; /* associated message */
- int watcher; /* cpu monitoring, or -1 */
-};
-
-/*
- * one on every node and per-cpu; to locate the software tables
+ * one per-cpu; to locate the software tables
*/
struct bau_control {
struct bau_desc *descriptor_base;
- struct bau_payload_queue_entry *bau_msg_head;
struct bau_payload_queue_entry *va_queue_first;
struct bau_payload_queue_entry *va_queue_last;
- struct bau_msg_status *msg_statuses;
- int *watching; /* pointer to array */
+ struct bau_payload_queue_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ unsigned long timeout_interval;
+ atomic_t active_descriptor_count;
+ int max_concurrent;
+ int max_concurrent_constant;
+ int retry_message_scans;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ short cpu;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t masks_lock;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
};
/*
* This structure is allocated per_cpu for UV TLB shootdown statistics.
*/
struct ptc_stats {
- unsigned long ptc_i; /* number of IPI-style flushes */
- unsigned long requestor; /* number of nodes this cpu sent to */
- unsigned long requestee; /* times cpu was remotely requested */
- unsigned long alltlb; /* times all tlb's on this cpu were flushed */
- unsigned long onetlb; /* times just one tlb on this cpu was flushed */
- unsigned long s_retry; /* retries on source side timeouts */
- unsigned long d_retry; /* retries on destination side timeouts */
- unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
- unsigned long dflush; /* cycles spent on destination side */
- unsigned long retriesok; /* successes on retries */
- unsigned long nomsg; /* interrupts with no message */
- unsigned long multmsg; /* interrupts with multiple messages */
- unsigned long ntargeted;/* nodes targeted */
+ /* sender statistics */
+ unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* number of cpus targeted */
+ unsigned long s_ntarguvhub; /* number of uvhubs targeted */
+ unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
+ unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
+ unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
+ unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
+ unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
+ unsigned long s_resets_plug; /* ipi-style resets from plug state */
+ unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
+ unsigned long s_busy; /* status stayed busy past s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ /* destination statistics */
+ unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination side */
+ unsigned long d_requestee; /* number of messages processed */
+ unsigned long d_retries; /* number of retry messages processed */
+ unsigned long d_canceled; /* number of messages canceled by retries */
+ unsigned long d_nocanceled; /* retries that found nothing to cancel */
+ unsigned long d_resets; /* number of ipi-style requests processed */
+ unsigned long d_rcanceled; /* number of messages canceled by resets */
};
-static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
+static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
{
- return constant_test_bit(node, &dstp->bits[0]);
+ return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
+static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
{
- __set_bit(node, &dstp->bits[0]);
+ __set_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
+static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
+ int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
+static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
+{
+ return bitmap_weight((unsigned long *)&dstp->bits[0],
+ UV_DISTRIBUTION_SIZE);
+}
static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
{
@@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
+struct atomic_short {
+ short counter;
+};
+
+/**
+ * atomic_read_short - read a short atomic variable
+ * @v: pointer of type atomic_short
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read_short(const struct atomic_short *v)
+{
+ return v->counter;
+}
+
+/**
+ * atomic_add_short_return - add and return a short int
+ * @i: short value to add
+ * @v: pointer of type atomic_short
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_short_return(short i, struct atomic_short *v)
+{
+ short __i = i;
+ asm volatile(LOCK_PREFIX "xaddw %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+}
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 14cc74ba5d23..bf6b88ef8eeb 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
* Access Global MMR space using the MMR space located at the top of physical
* memory.
*/
-static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
{
return __va(UV_GLOBAL_MMR64_BASE |
UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 2cae46c7c8a2..b2f2d2e05cec 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,4 +1,3 @@
-
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -15,13 +14,25 @@
#define UV_MMR_ENABLE (1UL << 63)
/* ========================================================================= */
+/* UVH_BAU_DATA_BROADCAST */
+/* ========================================================================= */
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x0440
+
+#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
+#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_bau_data_broadcast_u {
+ unsigned long v;
+ struct uvh_bau_data_broadcast_s {
+ unsigned long enable : 1; /* RW */
+ unsigned long rsvd_1_63: 63; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
-/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
#define UVH_BAU_DATA_CONFIG 0x61680UL
#define UVH_BAU_DATA_CONFIG_32 0x0438
@@ -604,6 +615,68 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
/* ========================================================================= */
+/* UVH_LB_BAU_MISC_CONTROL */
+/* ========================================================================= */
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
+
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+union uvh_lb_bau_misc_control_u {
+ unsigned long v;
+ struct uvh_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long csi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long rsvd_29_47 : 19; /* */
+ unsigned long fun : 16; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
@@ -681,334 +754,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
-/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
-/* ========================================================================= */
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
-
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
-
-union uvh_lb_mcast_aoerr0_rpt_enable_u {
- unsigned long v;
- struct uvh_lb_mcast_aoerr0_rpt_enable_s {
- unsigned long mcast_obese_msg : 1; /* RW */
- unsigned long mcast_data_sb_err : 1; /* RW */
- unsigned long mcast_nack_buff_parity : 1; /* RW */
- unsigned long mcast_timeout : 1; /* RW */
- unsigned long mcast_inactive_reply : 1; /* RW */
- unsigned long mcast_upgrade_error : 1; /* RW */
- unsigned long mcast_reg_count_underflow : 1; /* RW */
- unsigned long mcast_rep_obese_msg : 1; /* RW */
- unsigned long ucache_req_runt_msg : 1; /* RW */
- unsigned long ucache_req_obese_msg : 1; /* RW */
- unsigned long ucache_req_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_runt_msg : 1; /* RW */
- unsigned long ucache_rep_obese_msg : 1; /* RW */
- unsigned long ucache_rep_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_command_err : 1; /* RW */
- unsigned long ucache_pend_timeout : 1; /* RW */
- unsigned long macc_req_runt_msg : 1; /* RW */
- unsigned long macc_req_obese_msg : 1; /* RW */
- unsigned long macc_req_data_sb_err : 1; /* RW */
- unsigned long macc_rep_runt_msg : 1; /* RW */
- unsigned long macc_rep_obese_msg : 1; /* RW */
- unsigned long macc_rep_data_sb_err : 1; /* RW */
- unsigned long macc_amo_timeout : 1; /* RW */
- unsigned long macc_put_timeout : 1; /* RW */
- unsigned long macc_spurious_event : 1; /* RW */
- unsigned long ioh_destination_table_parity : 1; /* RW */
- unsigned long get_had_error_reply : 1; /* RW */
- unsigned long get_timeout : 1; /* RW */
- unsigned long lock_manager_had_error_reply : 1; /* RW */
- unsigned long put_had_error_reply : 1; /* RW */
- unsigned long put_timeout : 1; /* RW */
- unsigned long sb_activation_overrun : 1; /* RW */
- unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
- unsigned long completed_gb_activation_timeout : 1; /* RW */
- unsigned long descriptor_buffer_0_parity : 1; /* RW */
- unsigned long descriptor_buffer_1_parity : 1; /* RW */
- unsigned long socket_destination_table_parity : 1; /* RW */
- unsigned long bau_reply_payload_corruption : 1; /* RW */
- unsigned long io_port_destination_table_parity : 1; /* RW */
- unsigned long intd_soft_ack_timeout : 1; /* RW */
- unsigned long int_rep_obese_msg : 1; /* RW */
- unsigned long int_rep_command_err : 1; /* RW */
- unsigned long int_timeout : 1; /* RW */
- unsigned long rsvd_43_63 : 21; /* */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_CONFIG */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_CONFIG 0x61000UL
-
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
-#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
-#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
-#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
-#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_local_int0_config_u {
- unsigned long v;
- struct uvh_local_int0_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_ENABLE */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_ENABLE 0x65000UL
-
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-
-union uvh_local_int0_enable_u {
- unsigned long v;
- struct uvh_local_int0_enable_s {
- unsigned long lb_hcerr : 1; /* RW */
- unsigned long gr0_hcerr : 1; /* RW */
- unsigned long gr1_hcerr : 1; /* RW */
- unsigned long lh_hcerr : 1; /* RW */
- unsigned long rh_hcerr : 1; /* RW */
- unsigned long xn_hcerr : 1; /* RW */
- unsigned long si_hcerr : 1; /* RW */
- unsigned long lb_aoerr0 : 1; /* RW */
- unsigned long gr0_aoerr0 : 1; /* RW */
- unsigned long gr1_aoerr0 : 1; /* RW */
- unsigned long lh_aoerr0 : 1; /* RW */
- unsigned long rh_aoerr0 : 1; /* RW */
- unsigned long xn_aoerr0 : 1; /* RW */
- unsigned long si_aoerr0 : 1; /* RW */
- unsigned long lb_aoerr1 : 1; /* RW */
- unsigned long gr0_aoerr1 : 1; /* RW */
- unsigned long gr1_aoerr1 : 1; /* RW */
- unsigned long lh_aoerr1 : 1; /* RW */
- unsigned long rh_aoerr1 : 1; /* RW */
- unsigned long xn_aoerr1 : 1; /* RW */
- unsigned long si_aoerr1 : 1; /* RW */
- unsigned long rh_vpi_int : 1; /* RW */
- unsigned long system_shutdown_int : 1; /* RW */
- unsigned long lb_irq_int_0 : 1; /* RW */
- unsigned long lb_irq_int_1 : 1; /* RW */
- unsigned long lb_irq_int_2 : 1; /* RW */
- unsigned long lb_irq_int_3 : 1; /* RW */
- unsigned long lb_irq_int_4 : 1; /* RW */
- unsigned long lb_irq_int_5 : 1; /* RW */
- unsigned long lb_irq_int_6 : 1; /* RW */
- unsigned long lb_irq_int_7 : 1; /* RW */
- unsigned long lb_irq_int_8 : 1; /* RW */
- unsigned long lb_irq_int_9 : 1; /* RW */
- unsigned long lb_irq_int_10 : 1; /* RW */
- unsigned long lb_irq_int_11 : 1; /* RW */
- unsigned long lb_irq_int_12 : 1; /* RW */
- unsigned long lb_irq_int_13 : 1; /* RW */
- unsigned long lb_irq_int_14 : 1; /* RW */
- unsigned long lb_irq_int_15 : 1; /* RW */
- unsigned long l1_nmi_int : 1; /* RW */
- unsigned long stop_clock : 1; /* RW */
- unsigned long asic_to_l1 : 1; /* RW */
- unsigned long l1_to_asic : 1; /* RW */
- unsigned long ltc_int : 1; /* RW */
- unsigned long la_seq_trigger : 1; /* RW */
- unsigned long rsvd_45_63 : 19; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -1112,26 +857,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
};
/* ========================================================================= */
-/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
-/* ========================================================================= */
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
-
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_rh_gam_cfg_overlay_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_cfg_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long rsvd_46_62: 17; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -1263,101 +988,6 @@ union uvh_rtc1_int_config_u {
};
/* ========================================================================= */
-/* UVH_RTC2_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC2_INT_CONFIG 0x61600UL
-
-#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC2_INT_CONFIG_P_SHFT 13
-#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC2_INT_CONFIG_T_SHFT 15
-#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC2_INT_CONFIG_M_SHFT 16
-#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc2_int_config_u {
- unsigned long v;
- struct uvh_rtc2_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC3_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC3_INT_CONFIG 0x61640UL
-
-#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC3_INT_CONFIG_P_SHFT 13
-#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC3_INT_CONFIG_T_SHFT 15
-#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC3_INT_CONFIG_M_SHFT 16
-#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc3_int_config_u {
- unsigned long v;
- struct uvh_rtc3_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC_INC_RATIO */
-/* ========================================================================= */
-#define UVH_RTC_INC_RATIO 0x350000UL
-
-#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
-#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
-#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
-#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
-
-union uvh_rtc_inc_ratio_u {
- unsigned long v;
- struct uvh_rtc_inc_ratio_s {
- unsigned long fraction : 20; /* RW */
- unsigned long ratio : 3; /* RW */
- unsigned long rsvd_23_63: 41; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_SI_ADDR_MAP_CONFIG */
/* ========================================================================= */
#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
deleted file mode 100644
index e49ed6d2fd4e..000000000000
--- a/arch/x86/include/asm/vmware.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2008, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef ASM_X86__VMWARE_H
-#define ASM_X86__VMWARE_H
-
-extern void vmware_platform_setup(void);
-extern int vmware_platform(void);
-extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
-
-#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
void __user *fpstate,
struct _fpx_sw_bytes *sw);
-static inline int xrstor_checking(struct xsave_struct *fx)
+static inline int fpu_xrstor_checking(struct fpu *fpu)
{
+ struct xsave_struct *fx = &fpu->state->xsave;
int err;
asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
: "memory");
}
-static inline void xsave(struct task_struct *tsk)
+static inline void fpu_xsave(struct fpu *fpu)
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
- : : "D" (&(tsk->thread.xstate->xsave)),
+ : : "D" (&(fpu->state->xsave)),
"a" (-1), "d"(-1) : "memory");
}
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index cd40aba6aa95..9a5ed58f09dc 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -94,6 +94,53 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
/*
+ * ISA irqs by default are the first 16 gsis but can be
+ * any gsi as specified by an interrupt source override.
+ */
+static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+};
+
+static unsigned int gsi_to_irq(unsigned int gsi)
+{
+ unsigned int irq = gsi + NR_IRQS_LEGACY;
+ unsigned int i;
+
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ if (isa_irq_to_gsi[i] == gsi) {
+ return i;
+ }
+ }
+
+ /* Provide an identity mapping of gsi == irq
+ * except on truly weird platforms that have
+ * non isa irqs in the first 16 gsis.
+ */
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
+
+ return irq;
+}
+
+static u32 irq_to_gsi(int irq)
+{
+ unsigned int gsi;
+
+ if (irq < NR_IRQS_LEGACY)
+ gsi = isa_irq_to_gsi[irq];
+ else if (irq <= gsi_end)
+ gsi = irq;
+ else if (irq <= (gsi_end + NR_IRQS_LEGACY))
+ gsi = irq - gsi_end;
+ else
+ gsi = 0xffffffff;
+
+ return gsi;
+}
+
+/*
* Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
* to map the target physical address. The problem is that set_fixmap()
* provides a single page, and it is possible that the page is not
@@ -313,7 +360,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -333,7 +380,7 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
@@ -357,9 +404,10 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
- acpi_sci_ioapic_setup(intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
- (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
+ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
+ intsrc->global_irq);
return 0;
}
@@ -448,7 +496,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- *irq = gsi;
+ *irq = gsi_to_irq(gsi);
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
@@ -458,6 +506,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = irq_to_gsi(isa_irq);
+ return 0;
+}
+
/*
* success: return IRQ number (>=0)
* failure: return < 0
@@ -482,7 +538,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- irq = plat_gsi;
+ irq = gsi_to_irq(plat_gsi);
return irq;
}
@@ -867,29 +923,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
extern int es7000_plat;
#endif
-int __init acpi_probe_gsi(void)
-{
- int idx;
- int gsi;
- int max_gsi = 0;
-
- if (acpi_disabled)
- return 0;
-
- if (!acpi_ioapic)
- return 0;
-
- max_gsi = 0;
- for (idx = 0; idx < nr_ioapics; idx++) {
- gsi = mp_gsi_routing[idx].gsi_end;
-
- if (gsi > max_gsi)
- max_gsi = gsi;
- }
-
- return max_gsi + 1;
-}
-
static void assign_to_mp_irq(struct mpc_intsrc *m,
struct mpc_intsrc *mp_irq)
{
@@ -947,13 +980,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
+
+ isa_irq_to_gsi[bus_irq] = gsi;
}
void __init mp_config_acpi_legacy_irqs(void)
{
int i;
- int ioapic;
- unsigned int dstapic;
struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
@@ -974,19 +1007,27 @@ void __init mp_config_acpi_legacy_irqs(void)
#endif
/*
- * Locate the IOAPIC that manages the ISA IRQs (0-15).
- */
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- return;
- dstapic = mp_ioapics[ioapic].apicid;
-
- /*
* Use the default configuration for the IRQs 0-15. Unless
* overridden by (MADT) interrupt source override entries.
*/
for (i = 0; i < 16; i++) {
+ int ioapic, pin;
+ unsigned int dstapic;
int idx;
+ u32 gsi;
+
+ /* Locate the gsi that irq i maps to. */
+ if (acpi_isa_irq_to_gsi(i, &gsi))
+ continue;
+
+ /*
+ * Locate the IOAPIC that manages the ISA IRQ.
+ */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
+ continue;
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ dstapic = mp_ioapics[ioapic].apicid;
for (idx = 0; idx < mp_irq_entries; idx++) {
struct mpc_intsrc *irq = mp_irqs + idx;
@@ -996,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
break;
/* Do we already have a mapping for this IOAPIC pin */
- if (irq->dstapic == dstapic && irq->dstirq == i)
+ if (irq->dstapic == dstapic && irq->dstirq == pin)
break;
}
@@ -1011,7 +1052,7 @@ void __init mp_config_acpi_legacy_irqs(void)
mp_irq.dstapic = dstapic;
mp_irq.irqtype = mp_INT;
mp_irq.srcbusirq = i; /* Identity mapped */
- mp_irq.dstirq = i;
+ mp_irq.dstirq = pin;
save_mp_irq(&mp_irq);
}
@@ -1076,11 +1117,6 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
-#ifdef CONFIG_X86_32
- if (ioapic_renumber_irq)
- gsi = ioapic_renumber_irq(ioapic, gsi);
-#endif
-
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapics[ioapic].apicid,
@@ -1094,7 +1130,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- io_apic_set_pci_routing(dev, gsi, &irq_attr);
+ io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
return gsi;
}
@@ -1154,7 +1190,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
+ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
+ acpi_gbl_FADT.sci_interrupt);
/* Fill in identity legacy mappings where no override */
mp_config_acpi_legacy_irqs();
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1a160d5d44d0..70237732a6c7 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
}
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-extern u8 *__smp_locks[], *__smp_locks_end[];
+extern s32 __smp_locks[], __smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
@@ -235,37 +235,41 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
#ifdef CONFIG_SMP
-static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_lock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn DS segment override prefix into lock prefix */
- text_poke(*ptr, ((unsigned char []){0xf0}), 1);
+ if (*ptr == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
};
mutex_unlock(&text_mutex);
}
-static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
if (noreplace_smp)
return;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn lock prefix into DS segment override prefix */
- text_poke(*ptr, ((unsigned char []){0x3E}), 1);
+ if (*ptr == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
};
mutex_unlock(&text_mutex);
}
@@ -276,8 +280,8 @@ struct smp_alt_module {
char *name;
/* ptrs to lock prefixes */
- u8 **locks;
- u8 **locks_end;
+ const s32 *locks;
+ const s32 *locks_end;
/* .text segment, needed to avoid patching init code ;) */
u8 *text;
@@ -398,16 +402,19 @@ void alternatives_smp_switch(int smp)
int alternatives_text_reserved(void *start, void *end)
{
struct smp_alt_module *mod;
- u8 **ptr;
+ const s32 *poff;
u8 *text_start = start;
u8 *text_end = end;
list_for_each_entry(mod, &smp_alt_modules, next) {
if (mod->text > text_end || mod->text_end < text_start)
continue;
- for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
- if (text_start <= *ptr && text_end >= *ptr)
+ for (poff = mod->locks; poff < mod->locks_end; poff++) {
+ const u8 *ptr = (const u8 *)poff + *poff;
+
+ if (text_start <= ptr && text_end > ptr)
return 1;
+ }
}
return 0;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f854d89b7edf..fa5a1474cd18 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -731,18 +731,22 @@ static bool increase_address_space(struct protection_domain *domain,
static u64 *alloc_pte(struct protection_domain *domain,
unsigned long address,
- int end_lvl,
+ unsigned long page_size,
u64 **pte_page,
gfp_t gfp)
{
+ int level, end_lvl;
u64 *pte, *page;
- int level;
+
+ BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode))
increase_address_space(domain, gfp);
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
while (level > end_lvl) {
if (!IOMMU_PTE_PRESENT(*pte)) {
@@ -752,6 +756,10 @@ static u64 *alloc_pte(struct protection_domain *domain,
*pte = PM_LEVEL_PDE(level, virt_to_phys(page));
}
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
level -= 1;
pte = IOMMU_PTE_PAGE(*pte);
@@ -769,28 +777,47 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-static u64 *fetch_pte(struct protection_domain *domain,
- unsigned long address, int map_size)
+static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
{
int level;
u64 *pte;
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ if (address > PM_LEVEL_SIZE(domain->mode))
+ return NULL;
+
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
- while (level > map_size) {
+ while (level > 0) {
+
+ /* Not Present */
if (!IOMMU_PTE_PRESENT(*pte))
return NULL;
+ /* Large PTE */
+ if (PM_PTE_LEVEL(*pte) == 0x07) {
+ unsigned long pte_mask, __pte;
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ pte_mask = PTE_PAGE_SIZE(*pte);
+ pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+ __pte = ((unsigned long)pte) & pte_mask;
+
+ return (u64 *)__pte;
+ }
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
level -= 1;
+ /* Walk to the next level */
pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[PM_LEVEL_INDEX(level, address)];
-
- if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
- pte = NULL;
- break;
- }
}
return pte;
@@ -807,44 +834,84 @@ static int iommu_map_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long phys_addr,
int prot,
- int map_size)
+ unsigned long page_size)
{
u64 __pte, *pte;
-
- bus_addr = PAGE_ALIGN(bus_addr);
- phys_addr = PAGE_ALIGN(phys_addr);
-
- BUG_ON(!PM_ALIGNED(map_size, bus_addr));
- BUG_ON(!PM_ALIGNED(map_size, phys_addr));
+ int i, count;
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
- pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
+ bus_addr = PAGE_ALIGN(bus_addr);
+ phys_addr = PAGE_ALIGN(phys_addr);
+ count = PAGE_SIZE_PTE_COUNT(page_size);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; ++i)
+ if (IOMMU_PTE_PRESENT(pte[i]))
+ return -EBUSY;
- if (IOMMU_PTE_PRESENT(*pte))
- return -EBUSY;
+ if (page_size > PAGE_SIZE) {
+ __pte = PAGE_SIZE_PTE(phys_addr, page_size);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
+ } else
+ __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
- __pte = phys_addr | IOMMU_PTE_P;
if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR;
if (prot & IOMMU_PROT_IW)
__pte |= IOMMU_PTE_IW;
- *pte = __pte;
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
update_domain(dom);
return 0;
}
-static void iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr, int map_size)
+static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ unsigned long bus_addr,
+ unsigned long page_size)
{
- u64 *pte = fetch_pte(dom, bus_addr, map_size);
+ unsigned long long unmap_size, unmapped;
+ u64 *pte;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ unmapped = 0;
- if (pte)
- *pte = 0;
+ while (unmapped < page_size) {
+
+ pte = fetch_pte(dom, bus_addr);
+
+ if (!pte) {
+ /*
+ * No PTE for this address
+ * move forward in 4kb steps
+ */
+ unmap_size = PAGE_SIZE;
+ } else if (PM_PTE_LEVEL(*pte) == 0) {
+ /* 4kb PTE found for this address */
+ unmap_size = PAGE_SIZE;
+ *pte = 0ULL;
+ } else {
+ int count, i;
+
+ /* Large PTE found which maps this address */
+ unmap_size = PTE_PAGE_SIZE(*pte);
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+ }
+
+ bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ BUG_ON(!is_power_of_2(unmapped));
+
+ return unmapped;
}
/*
@@ -878,7 +945,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
for (addr = e->address_start; addr < e->address_end;
addr += PAGE_SIZE) {
ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
- PM_MAP_4k);
+ PAGE_SIZE);
if (ret)
return ret;
/*
@@ -1006,7 +1073,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
u64 *pte, *pte_page;
for (i = 0; i < num_ptes; ++i) {
- pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
+ pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
&pte_page, gfp);
if (!pte)
goto out_free;
@@ -1042,7 +1109,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
+ u64 *pte = fetch_pte(&dma_dom->domain, i);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
@@ -1712,7 +1779,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
if (!pte) {
- pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
+ pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
GFP_ATOMIC);
aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
} else
@@ -2439,12 +2506,11 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return ret;
}
-static int amd_iommu_map_range(struct iommu_domain *dom,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int iommu_prot)
+static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, int gfp_order, int iommu_prot)
{
+ unsigned long page_size = 0x1000UL << gfp_order;
struct protection_domain *domain = dom->priv;
- unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
int prot = 0;
int ret;
@@ -2453,61 +2519,50 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- iova &= PAGE_MASK;
- paddr &= PAGE_MASK;
-
mutex_lock(&domain->api_lock);
-
- for (i = 0; i < npages; ++i) {
- ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
- if (ret)
- return ret;
-
- iova += PAGE_SIZE;
- paddr += PAGE_SIZE;
- }
-
+ ret = iommu_map_page(domain, iova, paddr, prot, page_size);
mutex_unlock(&domain->api_lock);
- return 0;
+ return ret;
}
-static void amd_iommu_unmap_range(struct iommu_domain *dom,
- unsigned long iova, size_t size)
+static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ int gfp_order)
{
-
struct protection_domain *domain = dom->priv;
- unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
+ unsigned long page_size, unmap_size;
- iova &= PAGE_MASK;
+ page_size = 0x1000UL << gfp_order;
mutex_lock(&domain->api_lock);
-
- for (i = 0; i < npages; ++i) {
- iommu_unmap_page(domain, iova, PM_MAP_4k);
- iova += PAGE_SIZE;
- }
+ unmap_size = iommu_unmap_page(domain, iova, page_size);
+ mutex_unlock(&domain->api_lock);
iommu_flush_tlb_pde(domain);
- mutex_unlock(&domain->api_lock);
+ return get_order(unmap_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
unsigned long iova)
{
struct protection_domain *domain = dom->priv;
- unsigned long offset = iova & ~PAGE_MASK;
+ unsigned long offset_mask;
phys_addr_t paddr;
- u64 *pte;
+ u64 *pte, __pte;
- pte = fetch_pte(domain, iova, PM_MAP_4k);
+ pte = fetch_pte(domain, iova);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
- paddr = *pte & IOMMU_PAGE_MASK;
- paddr |= offset;
+ if (PM_PTE_LEVEL(*pte) == 0)
+ offset_mask = PAGE_SIZE - 1;
+ else
+ offset_mask = PTE_PAGE_SIZE(*pte) - 1;
+
+ __pte = *pte & PM_ADDR_MASK;
+ paddr = (__pte & ~offset_mask) | (iova & offset_mask);
return paddr;
}
@@ -2523,8 +2578,8 @@ static struct iommu_ops amd_iommu_ops = {
.domain_destroy = amd_iommu_domain_destroy,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map_range,
- .unmap = amd_iommu_unmap_range,
+ .map = amd_iommu_map,
+ .unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
};
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 6360abf993d4..3bacb4d0844c 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -120,6 +120,7 @@ struct ivmd_header {
bool amd_iommu_dump;
static int __initdata amd_iommu_detected;
+static bool __initdata amd_iommu_disabled;
u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
@@ -1372,6 +1373,9 @@ void __init amd_iommu_detect(void)
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return;
+ if (amd_iommu_disabled)
+ return;
+
if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
iommu_detected = 1;
amd_iommu_detected = 1;
@@ -1401,6 +1405,8 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) {
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true;
+ if (strncmp(str, "off", 3) == 0)
+ amd_iommu_disabled = true;
}
return 1;
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 03ba1b895f5e..425e53a87feb 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -131,24 +131,6 @@ int es7000_plat;
static unsigned int base;
-static int
-es7000_rename_gsi(int ioapic, int gsi)
-{
- if (es7000_plat == ES7000_ZORRO)
- return gsi;
-
- if (!base) {
- int i;
- for (i = 0; i < nr_ioapics; i++)
- base += nr_ioapic_registers[i];
- }
-
- if (!ioapic && (gsi < 16))
- gsi += base;
-
- return gsi;
-}
-
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{
unsigned long vect = 0, psaival = 0;
@@ -190,7 +172,6 @@ static void setup_unisys(void)
es7000_plat = ES7000_ZORRO;
else
es7000_plat = ES7000_CLASSIC;
- ioapic_renumber_irq = es7000_rename_gsi;
}
/*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index eb2789c3f721..33f3563a2a52 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -89,6 +89,9 @@ int nr_ioapics;
/* IO APIC gsi routing info */
struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
+/* The last gsi number used */
+u32 gsi_end;
+
/* MP IRQ source entries */
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
@@ -1013,10 +1016,9 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
-int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
- int irq, i;
+ int irq;
int bus = mp_irqs[idx].srcbus;
/*
@@ -1028,18 +1030,12 @@ static int pin_2_irq(int idx, int apic, int pin)
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
} else {
- /*
- * PCI IRQs are mapped in order
- */
- i = irq = 0;
- while (i < apic)
- irq += nr_ioapic_registers[i++];
- irq += pin;
- /*
- * For MPS mode, so far only needed by ES7000 platform
- */
- if (ioapic_renumber_irq)
- irq = ioapic_renumber_irq(apic, irq);
+ u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
+
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
}
#ifdef CONFIG_X86_32
@@ -1950,20 +1946,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
void __init enable_IO_APIC(void)
{
- union IO_APIC_reg_01 reg_01;
int i8259_apic, i8259_pin;
int apic;
- unsigned long flags;
-
- /*
- * The number of IO-APIC IRQ registers (== #pins):
- */
- for (apic = 0; apic < nr_ioapics; apic++) {
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_01.raw = io_apic_read(apic, 1);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- nr_ioapic_registers[apic] = reg_01.bits.entries+1;
- }
if (!legacy_pic->nr_legacy_irqs)
return;
@@ -3858,27 +3842,20 @@ int __init io_apic_get_redir_entries (int ioapic)
reg_01.raw = io_apic_read(ioapic, 1);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- return reg_01.bits.entries;
+ /* The register returns the maximum index redir index
+ * supported, which is one less than the total number of redir
+ * entries.
+ */
+ return reg_01.bits.entries + 1;
}
void __init probe_nr_irqs_gsi(void)
{
- int nr = 0;
+ int nr;
- nr = acpi_probe_gsi();
- if (nr > nr_irqs_gsi) {
+ nr = gsi_end + 1 + NR_IRQS_LEGACY;
+ if (nr > nr_irqs_gsi)
nr_irqs_gsi = nr;
- } else {
- /* for acpi=off or acpi is not compiled in */
- int idx;
-
- nr = 0;
- for (idx = 0; idx < nr_ioapics; idx++)
- nr += io_apic_get_redir_entries(idx) + 1;
-
- if (nr > nr_irqs_gsi)
- nr_irqs_gsi = nr;
- }
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
@@ -4085,22 +4062,27 @@ int __init io_apic_get_version(int ioapic)
return reg_01.bits.version;
}
-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
- int i;
+ int ioapic, pin, idx;
if (skip_ioapic_setup)
return -1;
- for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].irqtype == mp_INT &&
- mp_irqs[i].srcbusirq == bus_irq)
- break;
- if (i >= mp_irq_entries)
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
return -1;
- *trigger = irq_trigger(i);
- *polarity = irq_polarity(i);
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ if (pin < 0)
+ return -1;
+
+ idx = find_irq_entry(ioapic, pin, mp_INT);
+ if (idx < 0)
+ return -1;
+
+ *trigger = irq_trigger(idx);
+ *polarity = irq_polarity(idx);
return 0;
}
@@ -4241,7 +4223,7 @@ void __init ioapic_insert_resources(void)
}
}
-int mp_find_ioapic(int gsi)
+int mp_find_ioapic(u32 gsi)
{
int i = 0;
@@ -4256,7 +4238,7 @@ int mp_find_ioapic(int gsi)
return -1;
}
-int mp_find_ioapic_pin(int ioapic, int gsi)
+int mp_find_ioapic_pin(int ioapic, u32 gsi)
{
if (WARN_ON(ioapic == -1))
return -1;
@@ -4284,6 +4266,7 @@ static int bad_ioapic(unsigned long address)
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
{
int idx = 0;
+ int entries;
if (bad_ioapic(address))
return;
@@ -4302,9 +4285,17 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
+ entries = io_apic_get_redir_entries(idx);
mp_gsi_routing[idx].gsi_base = gsi_base;
- mp_gsi_routing[idx].gsi_end = gsi_base +
- io_apic_get_redir_entries(idx);
+ mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+ */
+ nr_ioapic_registers[idx] = entries;
+
+ if (mp_gsi_routing[idx].gsi_end > gsi_end)
+ gsi_end = mp_gsi_routing[idx].gsi_end;
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c085d52dbaf2..e46f98f36e31 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -735,9 +735,6 @@ void __init uv_system_init(void)
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
max_pnode = max(pnode, max_pnode);
-
- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
- cpu, apicid, pnode, nid, lcpu, blade);
}
/* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 031aa887b0eb..c4f9182ca3ac 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1224,7 +1224,7 @@ static void reinit_timer(void)
#ifdef INIT_TIMER_AFTER_SUSPEND
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/* set the clock to HZ */
outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
@@ -1232,7 +1232,7 @@ static void reinit_timer(void)
udelay(10);
outb_pit(LATCH >> 8, PIT_CH0); /* MSB */
udelay(10);
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
#endif
}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index c202b62f3671..3a785da34b6f 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o addon_cpuid_features.o
obj-y += proc.o capflags.o powerflags.o common.o
-obj-y += vmware.o hypervisor.o sched.o
+obj-y += vmware.o hypervisor.o sched.o mshyperv.o
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 97ad79cdf688..10fa5684a662 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -30,12 +30,14 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
+ { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 },
+ { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
+ { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
+ { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
+ { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
+ { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
{ 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 01a265212395..c39576cb3018 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -86,7 +86,7 @@ static void __init check_fpu(void)
static void __init check_hlt(void)
{
- if (paravirt_enabled())
+ if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
return;
printk(KERN_INFO "Checking 'hlt' instruction... ");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4868e4a951ee..c1c00d0b1692 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1243,10 +1243,7 @@ void __cpuinit cpu_init(void)
/*
* Force FPU initialization:
*/
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
mxcsr_feature_mask_init();
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile
index 1840c0a5170b..bd54bf67e6fb 100644
--- a/arch/x86/kernel/cpu/cpufreq/Makefile
+++ b/arch/x86/kernel/cpu/cpufreq/Makefile
@@ -2,8 +2,8 @@
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
-obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 459168083b77..1d3cddaa40ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -46,6 +46,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include "mperf.h"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"acpi-cpufreq", msg)
@@ -71,8 +72,6 @@ struct acpi_cpufreq_data {
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
-
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -240,45 +239,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_single(), on the target CPU */
-static void read_measured_perf_ctrs(void *_cur)
-{
- struct aperfmperf *am = _cur;
-
- get_aperfmperf(am);
-}
-
-/*
- * Return the measured active (C0) frequency on this CPU since last call
- * to this function.
- * Input: cpu number
- * Return: Average CPU frequency in terms of max frequency (zero on error)
- *
- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
- * over a period of time, while CPU is in C0 state.
- * IA32_MPERF counts at the rate of max advertised frequency
- * IA32_APERF counts at the rate of actual CPU frequency
- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
- * no meaning should be associated with absolute values of these MSRs.
- */
-static unsigned int get_measured_perf(struct cpufreq_policy *policy,
- unsigned int cpu)
-{
- struct aperfmperf perf;
- unsigned long ratio;
- unsigned int retval;
-
- if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
- return 0;
-
- ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
- per_cpu(acfreq_old_perf, cpu) = perf;
-
- retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
-
- return retval;
-}
-
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
@@ -702,7 +662,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware */
if (cpu_has(c, X86_FEATURE_APERFMPERF))
- acpi_cpufreq_driver.getavg = get_measured_perf;
+ acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
dprintk("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/arch/x86/kernel/cpu/cpufreq/mperf.c
new file mode 100644
index 000000000000..911e193018ae
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.c
@@ -0,0 +1,51 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+
+#include "mperf.h"
+
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
+
+/* Called via smp_call_function_single(), on the target CPU */
+static void read_measured_perf_ctrs(void *_cur)
+{
+ struct aperfmperf *am = _cur;
+
+ get_aperfmperf(am);
+}
+
+/*
+ * Return the measured active (C0) frequency on this CPU since last call
+ * to this function.
+ * Input: cpu number
+ * Return: Average CPU frequency in terms of max frequency (zero on error)
+ *
+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
+ * over a period of time, while CPU is in C0 state.
+ * IA32_MPERF counts at the rate of max advertised frequency
+ * IA32_APERF counts at the rate of actual CPU frequency
+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
+ * no meaning should be associated with absolute values of these MSRs.
+ */
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu)
+{
+ struct aperfmperf perf;
+ unsigned long ratio;
+ unsigned int retval;
+
+ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
+ return 0;
+
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
+
+ retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/arch/x86/kernel/cpu/cpufreq/mperf.h
new file mode 100644
index 000000000000..5dbf2950dc22
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.h
@@ -0,0 +1,9 @@
+/*
+ * (c) 2010 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ */
+
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b6215b9798e2..6f3dc8fbbfdc 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,6 +1,5 @@
-
/*
- * (c) 2003-2006 Advanced Micro Devices, Inc.
+ * (c) 2003-2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
@@ -46,6 +45,7 @@
#define PFX "powernow-k8: "
#define VERSION "version 2.20.00"
#include "powernow-k8.h"
+#include "mperf.h"
/* serialize freq changes */
static DEFINE_MUTEX(fidvid_mutex);
@@ -54,6 +54,12 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
+/* core performance boost */
+static bool cpb_capable, cpb_enabled;
+static struct msr __percpu *msrs;
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
#ifndef CONFIG_SMP
static inline const struct cpumask *cpu_core_mask(int cpu)
{
@@ -1249,6 +1255,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
int rc;
+ struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
if (!cpu_online(pol->cpu))
return -ENODEV;
@@ -1323,6 +1330,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
+ /* Check for APERF/MPERF support in hardware */
+ if (cpu_has(c, X86_FEATURE_APERFMPERF))
+ cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
+
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE)
@@ -1394,8 +1405,77 @@ out:
return khz;
}
+static void _cpb_toggle_msrs(bool t)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ if (t)
+ reg->l &= ~BIT(25);
+ else
+ reg->l |= BIT(25);
+ }
+ wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ put_online_cpus();
+}
+
+/*
+ * Switch on/off core performance boosting.
+ *
+ * 0=disable
+ * 1=enable.
+ */
+static void cpb_toggle(bool t)
+{
+ if (!cpb_capable)
+ return;
+
+ if (t && !cpb_enabled) {
+ cpb_enabled = true;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting enabled.\n");
+ } else if (!t && cpb_enabled) {
+ cpb_enabled = false;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting disabled.\n");
+ }
+}
+
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ int ret = -EINVAL;
+ unsigned long val = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (!ret && (val == 0 || val == 1) && cpb_capable)
+ cpb_toggle(val);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", cpb_enabled);
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(cpb);
+
static struct freq_attr *powernow_k8_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+ &cpb,
NULL,
};
@@ -1411,10 +1491,51 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+/*
+ * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
+ * cannot block the remaining ones from boosting. On the CPU_UP path we
+ * simply keep the boost-disable flag in sync with the current global
+ * state.
+ */
+static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action,
+ void *hcpu)
+{
+ unsigned cpu = (long)hcpu;
+ u32 lo, hi;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+
+ if (!cpb_enabled) {
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo |= BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ }
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo &= ~BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cpb_nb = {
+ .notifier_call = cpb_notify,
+};
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
- unsigned int i, supported_cpus = 0;
+ unsigned int i, supported_cpus = 0, cpu;
for_each_online_cpu(i) {
int rc;
@@ -1423,15 +1544,36 @@ static int __cpuinit powernowk8_init(void)
supported_cpus++;
}
- if (supported_cpus == num_online_cpus()) {
- printk(KERN_INFO PFX "Found %d %s "
- "processors (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(),
- boot_cpu_data.x86_model_id, supported_cpus);
- return cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (supported_cpus != num_online_cpus())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+
+ cpb_capable = true;
+
+ register_cpu_notifier(&cpb_nb);
+
+ msrs = msrs_alloc();
+ if (!msrs) {
+ printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
+ return -ENOMEM;
+ }
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ cpb_enabled |= !(!!(reg->l & BIT(25)));
+ }
+
+ printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
+ (cpb_enabled ? "on" : "off"));
}
- return -ENODEV;
+ return cpufreq_register_driver(&cpufreq_amd64_driver);
}
/* driver entry point for term */
@@ -1439,6 +1581,13 @@ static void __exit powernowk8_exit(void)
{
dprintk("exit\n");
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+ msrs_free(msrs);
+ msrs = NULL;
+
+ unregister_cpu_notifier(&cpb_nb);
+ }
+
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 02ce824073cb..df3529b1c02d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,7 +5,6 @@
* http://www.gnu.org/licenses/gpl.html
*/
-
enum pstate {
HW_PSTATE_INVALID = 0xff,
HW_PSTATE_0 = 0,
@@ -55,7 +54,6 @@ struct powernow_k8_data {
struct cpumask *available_cores;
};
-
/* processor's cpuid instruction support */
#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
#define CPUID_XFAM 0x0ff00000 /* extended family */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 08be922de33a..dd531cc56a8f 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -21,37 +21,55 @@
*
*/
+#include <linux/module.h>
#include <asm/processor.h>
-#include <asm/vmware.h>
#include <asm/hypervisor.h>
-static inline void __cpuinit
-detect_hypervisor_vendor(struct cpuinfo_x86 *c)
+/*
+ * Hypervisor detect order. This is specified explicitly here because
+ * some hypervisors might implement compatibility modes for other
+ * hypervisors and therefore need to be detected in specific sequence.
+ */
+static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
- if (vmware_platform())
- c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
- else
- c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
-}
+ &x86_hyper_vmware,
+ &x86_hyper_ms_hyperv,
+};
-static inline void __cpuinit
-hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
+const struct hypervisor_x86 *x86_hyper;
+EXPORT_SYMBOL(x86_hyper);
+
+static inline void __init
+detect_hypervisor_vendor(void)
{
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) {
- vmware_set_feature_bits(c);
- return;
+ const struct hypervisor_x86 *h, * const *p;
+
+ for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
+ h = *p;
+ if (h->detect()) {
+ x86_hyper = h;
+ printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
+ break;
+ }
}
}
void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
{
- detect_hypervisor_vendor(c);
- hypervisor_set_feature_bits(c);
+ if (x86_hyper && x86_hyper->set_cpu_features)
+ x86_hyper->set_cpu_features(c);
}
void __init init_hypervisor_platform(void)
{
+
+ detect_hypervisor_vendor();
+
+ if (!x86_hyper)
+ return;
+
init_hypervisor(&boot_cpu_data);
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
- vmware_platform_setup();
+
+ if (x86_hyper->init_platform)
+ x86_hyper->init_platform();
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index f5e5390d3459..85f69cdeae10 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -372,12 +372,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
- if (c->cpuid_level > 6) {
- unsigned ecx = cpuid_ecx(6);
- if (ecx & 0x01)
- set_cpu_cap(c, X86_FEATURE_APERFMPERF);
- }
-
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (cpu_has_ds) {
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index b3eeb66c0a51..33eae2062cf5 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -148,13 +148,19 @@ union _cpuid4_leaf_ecx {
u32 full;
};
+struct amd_l3_cache {
+ struct pci_dev *dev;
+ bool can_disable;
+ unsigned indices;
+ u8 subcaches[4];
+};
+
struct _cpuid4_info {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
@@ -164,8 +170,7 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
};
unsigned short num_cache_leaves;
@@ -302,87 +307,163 @@ struct _cache_attr {
};
#ifdef CONFIG_CPU_SUP_AMD
-static unsigned int __cpuinit amd_calc_l3_indices(void)
+
+/*
+ * L3 cache descriptors
+ */
+static struct amd_l3_cache **__cpuinitdata l3_caches;
+
+static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
{
- /*
- * We're called over smp_call_function_single() and therefore
- * are on the correct cpu.
- */
- int cpu = smp_processor_id();
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned int sc0, sc1, sc2, sc3;
u32 val = 0;
- pci_read_config_dword(dev, 0x1C4, &val);
+ pci_read_config_dword(l3->dev, 0x1C4, &val);
/* calculate subcache sizes */
- sc0 = !(val & BIT(0));
- sc1 = !(val & BIT(4));
- sc2 = !(val & BIT(8)) + !(val & BIT(9));
- sc3 = !(val & BIT(12)) + !(val & BIT(13));
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+ l3->subcaches[1] = sc1 = !(val & BIT(4));
+ l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
- return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+ l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
+static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
+{
+ struct amd_l3_cache *l3;
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+
+ l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
+ if (!l3) {
+ printk(KERN_WARNING "Error allocating L3 struct\n");
+ return NULL;
+ }
+
+ l3->dev = dev;
+
+ amd_calc_l3_indices(l3);
+
+ return l3;
}
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
- if (index < 3)
+ int node;
+
+ if (boot_cpu_data.x86 != 0x10)
return;
- if (boot_cpu_data.x86 == 0x11)
+ if (index < 3)
return;
/* see errata #382 and #388 */
- if ((boot_cpu_data.x86 == 0x10) &&
- ((boot_cpu_data.x86_model < 0x8) ||
- (boot_cpu_data.x86_mask < 0x1)))
+ if (boot_cpu_data.x86_model < 0x8)
+ return;
+
+ if ((boot_cpu_data.x86_model == 0x8 ||
+ boot_cpu_data.x86_model == 0x9)
+ &&
+ boot_cpu_data.x86_mask < 0x1)
+ return;
+
+ /* not in virtualized environments */
+ if (num_k8_northbridges == 0)
return;
- this_leaf->can_disable = true;
- this_leaf->l3_indices = amd_calc_l3_indices();
+ /*
+ * Strictly speaking, the amount in @size below is leaked since it is
+ * never freed but this is done only on shutdown so it doesn't matter.
+ */
+ if (!l3_caches) {
+ int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+
+ l3_caches = kzalloc(size, GFP_ATOMIC);
+ if (!l3_caches)
+ return;
+ }
+
+ node = amd_get_nb_id(smp_processor_id());
+
+ if (!l3_caches[node]) {
+ l3_caches[node] = amd_init_l3_cache(node);
+ l3_caches[node]->can_disable = true;
+ }
+
+ WARN_ON(!l3_caches[node]);
+
+ this_leaf->l3 = l3_caches[node];
}
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int index)
+ unsigned int slot)
{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
+ struct pci_dev *dev = this_leaf->l3->dev;
unsigned int reg = 0;
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!dev)
return -EINVAL;
- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+ pci_read_config_dword(dev, 0x1BC + slot * 4, &reg);
return sprintf(buf, "0x%08x\n", reg);
}
-#define SHOW_CACHE_DISABLE(index) \
+#define SHOW_CACHE_DISABLE(slot) \
static ssize_t \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
{ \
- return show_cache_disable(this_leaf, buf, index); \
+ return show_cache_disable(this_leaf, buf, slot); \
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)
+static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
+ unsigned slot, unsigned long idx)
+{
+ int i;
+
+ idx |= BIT(30);
+
+ /*
+ * disable index in all 4 subcaches
+ */
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
+
+ if (!l3->subcaches[i])
+ continue;
+
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+ * cache which indices we disable therefore a simple wbinvd()
+ * is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+
+ reg |= BIT(31);
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+ }
+}
+
+
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count, unsigned int index)
+ const char *buf, size_t count,
+ unsigned int slot)
{
+ struct pci_dev *dev = this_leaf->l3->dev;
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned long val = 0;
#define SUBCACHE_MASK (3UL << 20)
#define SUBCACHE_INDEX 0xfff
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
@@ -396,26 +477,20 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
/* do not allow writes outside of allowed bits */
if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
- ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
return -EINVAL;
- val |= BIT(30);
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
- /*
- * We need to WBINVD on a core on the node containing the L3 cache which
- * indices we disable therefore a simple wbinvd() is not sufficient.
- */
- wbinvd_on_cpu(cpu);
- pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
+
return count;
}
-#define STORE_CACHE_DISABLE(index) \
+#define STORE_CACHE_DISABLE(slot) \
static ssize_t \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
const char *buf, size_t count) \
{ \
- return store_cache_disable(this_leaf, buf, count, index); \
+ return store_cache_disable(this_leaf, buf, count, slot); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
@@ -443,8 +518,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
- if (boot_cpu_data.x86 >= 0x10)
- amd_check_l3_disable(index, this_leaf);
+ amd_check_l3_disable(index, this_leaf);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
@@ -701,6 +775,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
+ kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
kfree(per_cpu(ici_cpuid4_info, cpu));
per_cpu(ici_cpuid4_info, cpu) = NULL;
}
@@ -985,7 +1060,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_leaf = CPUID4_INFO_IDX(cpu, i);
- if (this_leaf->can_disable)
+ if (this_leaf->l3 && this_leaf->l3->can_disable)
ktype_cache.default_attrs = default_l3_attrs;
else
ktype_cache.default_attrs = default_attrs;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8a6f0afa767e..7a355ddcc64b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
struct mce m;
int i;
- __get_cpu_var(mce_poll_count)++;
+ percpu_inc(mce_poll_count);
mce_setup(&m);
@@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
atomic_inc(&mce_entry);
- __get_cpu_var(mce_exception_count)++;
+ percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
new file mode 100644
index 000000000000..16f41bbe46b6
--- /dev/null
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -0,0 +1,55 @@
+/*
+ * HyperV Detection code.
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/processor.h>
+#include <asm/hypervisor.h>
+#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+
+struct ms_hyperv_info ms_hyperv;
+
+static bool __init ms_hyperv_platform(void)
+{
+ u32 eax;
+ u32 hyp_signature[3];
+
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
+ &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
+
+ return eax >= HYPERV_CPUID_MIN &&
+ eax <= HYPERV_CPUID_MAX &&
+ !memcmp("Microsoft Hv", hyp_signature, 12);
+}
+
+static void __init ms_hyperv_init_platform(void)
+{
+ /*
+ * Extract the features and hints
+ */
+ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+ ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+}
+
+const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+ .name = "Microsoft HyperV",
+ .detect = ms_hyperv_platform,
+ .init_platform = ms_hyperv_init_platform,
+};
+EXPORT_SYMBOL(x86_hyper_ms_hyperv);
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 424fc8de68e4..ae85d69644d1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -465,15 +465,21 @@ out:
return rc;
}
-static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
+static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
- unsigned long dummy;
+ int overflow = 0;
+ u32 low, high;
- rdmsrl(hwc->config_base + hwc->idx, dummy);
- if (dummy & P4_CCCR_OVF) {
+ rdmsr(hwc->config_base + hwc->idx, low, high);
+
+ /* we need to check high bit for unflagged overflows */
+ if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
+ overflow = 1;
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
- ((u64)dummy) & ~P4_CCCR_OVF);
+ ((u64)low) & ~P4_CCCR_OVF);
}
+
+ return overflow;
}
static inline void p4_pmu_disable_event(struct perf_event *event)
@@ -584,21 +590,15 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(hwc->idx != idx);
- /*
- * FIXME: Redundant call, actually not needed
- * but just to check if we're screwed
- */
- p4_pmu_clear_cccr_ovf(hwc);
+ /* it might be unflagged overflow */
+ handled = p4_pmu_clear_cccr_ovf(hwc);
val = x86_perf_event_update(event);
- if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
+ if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
continue;
- /*
- * event overflow
- */
- handled = 1;
- data.period = event->hw.last_period;
+ /* event overflow for sure */
+ data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
@@ -670,7 +670,7 @@ static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
/*
* ESCR address hashing is tricky, ESCRs are not sequential
- * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and
+ * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
* the metric between any ESCRs is laid in range [0xa0,0xe1]
*
* so we make ~70% filled hashtable
@@ -735,8 +735,9 @@ static int p4_get_escr_idx(unsigned int addr)
{
unsigned int idx = P4_ESCR_MSR_IDX(addr);
- if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
- !p4_escr_table[idx])) {
+ if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
+ !p4_escr_table[idx] ||
+ p4_escr_table[idx] != addr)) {
WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
return -1;
}
@@ -762,7 +763,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
{
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
- int cpu = raw_smp_processor_id();
+ int cpu = smp_processor_id();
struct hw_perf_event *hwc;
struct p4_event_bind *bind;
unsigned int i, thread, num;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index dfdb4dba2320..b9d1ff588445 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,8 +24,8 @@
#include <linux/dmi.h>
#include <linux/module.h>
#include <asm/div64.h>
-#include <asm/vmware.h>
#include <asm/x86_init.h>
+#include <asm/hypervisor.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -65,7 +65,7 @@ static unsigned long vmware_get_tsc_khz(void)
return tsc_hz;
}
-void __init vmware_platform_setup(void)
+static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -83,26 +83,22 @@ void __init vmware_platform_setup(void)
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
-int vmware_platform(void)
+static bool __init vmware_platform(void)
{
if (cpu_has_hypervisor) {
- unsigned int eax, ebx, ecx, edx;
- char hyper_vendor_id[13];
-
- cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx);
- memcpy(hyper_vendor_id + 0, &ebx, 4);
- memcpy(hyper_vendor_id + 4, &ecx, 4);
- memcpy(hyper_vendor_id + 8, &edx, 4);
- hyper_vendor_id[12] = '\0';
- if (!strcmp(hyper_vendor_id, "VMwareVMware"))
- return 1;
+ unsigned int eax;
+ unsigned int hyper_vendor_id[3];
+
+ cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
+ &hyper_vendor_id[1], &hyper_vendor_id[2]);
+ if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
+ return true;
} else if (dmi_available && dmi_name_in_serial("VMware") &&
__vmware_platform())
- return 1;
+ return true;
- return 0;
+ return false;
}
-EXPORT_SYMBOL(vmware_platform);
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
@@ -116,8 +112,16 @@ EXPORT_SYMBOL(vmware_platform);
* so that the kernel could just trust the hypervisor with providing a
* reliable virtual TSC that is suitable for timekeeping.
*/
-void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c)
+static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
}
+
+const __refconst struct hypervisor_x86 x86_hyper_vmware = {
+ .name = "VMware",
+ .detect = vmware_platform,
+ .set_cpu_features = vmware_set_cpu_features,
+ .init_platform = vmware_platform_setup,
+};
+EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 44a8e0dc6737..cd49141cf153 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -53,6 +53,7 @@
#include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h>
+#include <asm/cpufeature.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
@@ -905,7 +906,25 @@ ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
pushl $0
CFI_ADJUST_CFA_OFFSET 4
+#ifdef CONFIG_X86_INVD_BUG
+ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
+661: pushl $do_general_protection
+662:
+.section .altinstructions,"a"
+ .balign 4
+ .long 661b
+ .long 663f
+ .byte X86_FEATURE_XMM
+ .byte 662b-661b
+ .byte 664f-663f
+.previous
+.section .altinstr_replacement,"ax"
+663: pushl $do_simd_coprocessor_error
+664:
+.previous
+#else
pushl $do_simd_coprocessor_error
+#endif
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 54c31c285488..86cef6b32253 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -102,65 +102,62 @@ void __cpuinit fpu_init(void)
mxcsr_feature_mask_init();
/* clean state in init */
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
}
#endif /* CONFIG_X86_64 */
-/*
- * The _current_ task is using the FPU for the first time
- * so initialize it and set the mxcsr to its default
- * value at reset if we support XMM instructions and then
- * remeber the current task has used the FPU.
- */
-int init_fpu(struct task_struct *tsk)
+static void fpu_finit(struct fpu *fpu)
{
- if (tsk_used_math(tsk)) {
- if (HAVE_HWFP && tsk == current)
- unlazy_fpu(tsk);
- return 0;
- }
-
- /*
- * Memory allocation at the first usage of the FPU and other state.
- */
- if (!tsk->thread.xstate) {
- tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!tsk->thread.xstate)
- return -ENOMEM;
- }
-
#ifdef CONFIG_X86_32
if (!HAVE_HWFP) {
- memset(tsk->thread.xstate, 0, xstate_size);
- finit_task(tsk);
- set_stopped_child_used_math(tsk);
- return 0;
+ finit_soft_fpu(&fpu->state->soft);
+ return;
}
#endif
if (cpu_has_fxsr) {
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
memset(fx, 0, xstate_size);
fx->cwd = 0x37f;
if (cpu_has_xmm)
fx->mxcsr = MXCSR_DEFAULT;
} else {
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &fpu->state->fsave;
memset(fp, 0, xstate_size);
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
+}
+
+/*
+ * The _current_ task is using the FPU for the first time
+ * so initialize it and set the mxcsr to its default
+ * value at reset if we support XMM instructions and then
+ * remeber the current task has used the FPU.
+ */
+int init_fpu(struct task_struct *tsk)
+{
+ int ret;
+
+ if (tsk_used_math(tsk)) {
+ if (HAVE_HWFP && tsk == current)
+ unlazy_fpu(tsk);
+ return 0;
+ }
+
/*
- * Only the device not available exception or ptrace can call init_fpu.
+ * Memory allocation at the first usage of the FPU and other state.
*/
+ ret = fpu_alloc(&tsk->thread.fpu);
+ if (ret)
+ return ret;
+
+ fpu_finit(&tsk->thread.fpu);
+
set_stopped_child_used_math(tsk);
return 0;
}
@@ -194,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -211,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
return ret;
}
@@ -246,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
* memory layout in the thread struct, so that we can copy the entire
* xstateregs to the user using one user_regset_copyout().
*/
- memcpy(&target->thread.xstate->fxsave.sw_reserved,
+ memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
return ret;
}
@@ -272,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
- xsave_hdr = &target->thread.xstate->xsave.xsave_hdr;
+ xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
xsave_hdr->xstate_bv &= pcntxt_mask;
/*
@@ -365,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
static void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -405,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -445,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0,
+ &target->thread.fpu.state->fsave, 0,
-1);
}
@@ -475,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0, -1);
+ &target->thread.fpu.state->fsave, 0, -1);
}
if (pos > 0 || count < sizeof(env))
@@ -490,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
return ret;
}
@@ -501,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
fp->status = fp->swd;
if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
@@ -512,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
struct user_i387_ia32_struct env;
int err = 0;
@@ -547,7 +544,7 @@ static int save_i387_xsave(void __user *buf)
* header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes.
*/
- tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
if (save_i387_fxsave(fx) < 0)
return -1;
@@ -599,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- return __copy_from_user(&tsk->thread.xstate->fsave, buf,
+ return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
sizeof(struct i387_fsave_struct));
}
@@ -610,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
struct user_i387_ia32_struct env;
int err;
- err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
+ err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
size);
/* mxcsr reserved bits must be masked to zero for security reasons */
- tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
if (err || __copy_from_user(&env, buf, sizeof(env)))
return 1;
convert_to_fxsr(tsk, &env);
@@ -629,7 +626,7 @@ static int restore_i387_xsave(void __user *buf)
struct i387_fxsave_struct __user *fx =
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr =
- &current->thread.xstate->xsave.xsave_hdr;
+ &current->thread.fpu.state->xsave.xsave_hdr;
u64 mask;
int err;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 23c167925a5c..2dfd31597443 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -16,7 +16,7 @@
#include <asm/hpet.h>
#include <asm/smp.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event;
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
@@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
outb_pit(delta & 0xff , PIT_CH0); /* LSB */
outb_pit(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
return 0;
}
@@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs)
int count;
u32 jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 0ed2d300cd46..990ae7cfc578 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
outb(0, 0xF0);
if (ignore_fpu_irq || !boot_cpu_data.hard_math)
return IRQ_NONE;
- math_error((void __user *)get_irq_regs()->ip);
+ math_error(get_irq_regs(), 0, 16);
return IRQ_HANDLED;
}
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index f2f56c0967b6..345a4b1fe144 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -542,20 +542,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
- regs->ip = (unsigned long)addr;
- return 1;
- }
-
/*
* We don't want to be preempted for the entire
* duration of kprobe processing. We conditionally
@@ -587,6 +573,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
setup_singlestep(p, regs, kcb, 0);
return 1;
}
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
+ */
+ regs->ip = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return 1;
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index cceb5bc3c3c2..2cd8c544e41a 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -201,9 +201,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
return error;
}
-static int microcode_open(struct inode *unused1, struct file *unused2)
+static int microcode_open(struct inode *inode, struct file *file)
{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
}
static ssize_t microcode_write(struct file *file, const char __user *buf,
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 85a343e28937..356170262a93 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -343,10 +343,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
int (*get_ucode_data)(void *, const void *, size_t))
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u8 *ucode_ptr = data, *new_mc = NULL, *mc;
+ u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
enum ucode_state state = UCODE_OK;
+ unsigned int curr_mc_size = 0;
while (leftover) {
struct microcode_header_intel mc_header;
@@ -361,9 +362,15 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
break;
}
- mc = vmalloc(mc_size);
- if (!mc)
- break;
+ /* For performance reasons, reuse mc area when possible */
+ if (!mc || mc_size > curr_mc_size) {
+ if (mc)
+ vfree(mc);
+ mc = vmalloc(mc_size);
+ if (!mc)
+ break;
+ curr_mc_size = mc_size;
+ }
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
@@ -376,13 +383,16 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
- } else
- vfree(mc);
+ mc = NULL; /* trigger new vmalloc */
+ }
ucode_ptr += mc_size;
leftover -= mc_size;
}
+ if (mc)
+ vfree(mc);
+
if (leftover) {
if (new_mc)
vfree(new_mc);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e81030f71a8f..5ae5d2426edf 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -115,21 +115,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
-static int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
- "(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
- }
- if (!address) {
- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
- " found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
static void __init MP_ioapic_info(struct mpc_ioapic *m)
{
if (!(m->flags & MPC_APIC_USABLE))
@@ -138,15 +123,7 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
m->apicid, m->apicver, m->apicaddr);
- if (bad_ioapic(m->apicaddr))
- return;
-
- mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
- mp_ioapics[nr_ioapics].apicid = m->apicid;
- mp_ioapics[nr_ioapics].type = m->type;
- mp_ioapics[nr_ioapics].apicver = m->apicver;
- mp_ioapics[nr_ioapics].flags = m->flags;
- nr_ioapics++;
+ mp_register_ioapic(m->apicid, m->apicaddr, gsi_end + 1);
}
static void print_MP_intsrc_info(struct mpc_intsrc *m)
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
index 0aad8670858e..e796448f0eb5 100644
--- a/arch/x86/kernel/mrst.c
+++ b/arch/x86/kernel/mrst.c
@@ -237,4 +237,9 @@ void __init x86_mrst_early_setup(void)
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index eccdb57094e3..e7e35219b32f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -31,24 +31,22 @@ struct kmem_cache *task_xstate_cachep;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
+ int ret;
+
*dst = *src;
- if (src->thread.xstate) {
- dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!dst->thread.xstate)
- return -ENOMEM;
- WARN_ON((unsigned long)dst->thread.xstate & 15);
- memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ if (fpu_allocated(&src->thread.fpu)) {
+ memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
+ ret = fpu_alloc(&dst->thread.fpu);
+ if (ret)
+ return ret;
+ fpu_copy(&dst->thread.fpu, &src->thread.fpu);
}
return 0;
}
void free_thread_xstate(struct task_struct *tsk)
{
- if (tsk->thread.xstate) {
- kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
- tsk->thread.xstate = NULL;
- }
+ fpu_free(&tsk->thread.fpu);
}
void free_thread_info(struct thread_info *ti)
@@ -548,11 +546,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
* check OSVW bit for CPUs that are not affected
* by erratum #400
*/
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
- if (val >= 2) {
- rdmsrl(MSR_AMD64_OSVW_STATUS, val);
- if (!(val & BIT(1)))
- goto no_c1e_idle;
+ if (cpu_has(c, X86_FEATURE_OSVW)) {
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
}
return 1;
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 75090c589b7a..8d128783af47 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -309,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 50cc84ac0a0d..3c2422a99f1f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -388,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0, LDT and the page table pointer:
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c
index 34e099382651..7ded57896c0a 100644
--- a/arch/x86/kernel/sfi.c
+++ b/arch/x86/kernel/sfi.c
@@ -81,7 +81,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
-static u32 gsi_base;
static int __init sfi_parse_ioapic(struct sfi_table_header *table)
{
@@ -94,8 +93,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table)
pentry = (struct sfi_apic_table_entry *)sb->pentry;
for (i = 0; i < num; i++) {
- mp_register_ioapic(i, pentry->phys_addr, gsi_base);
- gsi_base += io_apic_get_redir_entries(i);
+ mp_register_ioapic(i, pentry->phys_addr, gsi_end + 1);
pentry++;
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 86c9f91b48ae..cc2c60474fd0 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -175,6 +175,9 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
struct tboot_mac_region *mr;
phys_addr_t end = start + size;
+ if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS)
+ panic("tboot: Too many MAC regions\n");
+
if (start && size) {
mr = &tboot->mac_regions[tboot->num_mac_regions++];
mr->start = round_down(start, PAGE_SIZE);
@@ -184,18 +187,17 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
static int tboot_setup_sleep(void)
{
+ int i;
+
tboot->num_mac_regions = 0;
- /* S3 resume code */
- add_mac_region(acpi_wakeup_address, WAKEUP_SIZE);
+ for (i = 0; i < e820.nr_map; i++) {
+ if ((e820.map[i].type != E820_RAM)
+ && (e820.map[i].type != E820_RESERVED_KERN))
+ continue;
-#ifdef CONFIG_X86_TRAMPOLINE
- /* AP trampoline code */
- add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE);
-#endif
-
- /* kernel code + data + bss */
- add_mac_region(virt_to_phys(_text), _end - _text);
+ add_mac_region(e820.map[i].addr, e820.map[i].size);
+ }
tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 17b03dd3a6b5..7fea555929e2 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -20,42 +20,67 @@
#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
+#include <asm/timer.h>
-static struct bau_control **uv_bau_table_bases __read_mostly;
-static int uv_bau_retry_limit __read_mostly;
+struct msg_desc {
+ struct bau_payload_queue_entry *msg;
+ int msg_slot;
+ int sw_ack_slot;
+ struct bau_payload_queue_entry *va_queue_first;
+ struct bau_payload_queue_entry *va_queue_last;
+};
-/* base pnode in this partition */
-static int uv_partition_base_pnode __read_mostly;
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
+
+static int uv_bau_max_concurrent __read_mostly;
+
+static int nobau;
+static int __init setup_nobau(char *arg)
+{
+ nobau = 1;
+ return 0;
+}
+early_param("nobau", setup_nobau);
-static unsigned long uv_mmask __read_mostly;
+/* base pnode in this partition */
+static int uv_partition_base_pnode __read_mostly;
+/* position of pnode (which is nasid>>1): */
+static int uv_nshift __read_mostly;
+static unsigned long uv_mmask __read_mostly;
static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
+static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
+
+struct reset_args {
+ int sender;
+};
/*
- * Determine the first node on a blade.
+ * Determine the first node on a uvhub. 'Nodes' are used for kernel
+ * memory allocation.
*/
-static int __init blade_to_first_node(int blade)
+static int __init uvhub_to_first_node(int uvhub)
{
int node, b;
for_each_online_node(node) {
b = uv_node_to_blade_id(node);
- if (blade == b)
+ if (uvhub == b)
return node;
}
- return -1; /* shouldn't happen */
+ return -1;
}
/*
- * Determine the apicid of the first cpu on a blade.
+ * Determine the apicid of the first cpu on a uvhub.
*/
-static int __init blade_to_first_apicid(int blade)
+static int __init uvhub_to_first_apicid(int uvhub)
{
int cpu;
for_each_present_cpu(cpu)
- if (blade == uv_cpu_to_blade_id(cpu))
+ if (uvhub == uv_cpu_to_blade_id(cpu))
return per_cpu(x86_cpu_to_apicid, cpu);
return -1;
}
@@ -68,195 +93,459 @@ static int __init blade_to_first_apicid(int blade)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static void uv_reply_to_message(int resource,
- struct bau_payload_queue_entry *msg,
- struct bau_msg_status *msp)
+static inline void uv_reply_to_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
unsigned long dw;
+ struct bau_payload_queue_entry *msg;
- dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
+ msg = mdp->msg;
+ if (!msg->canceled) {
+ dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
+ msg->sw_ack_vector;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ }
msg->replied_to = 1;
msg->sw_ack_vector = 0;
- if (msp)
- msp->seen_by.bits = 0;
- uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
}
/*
- * Do all the things a cpu should do for a TLB shootdown message.
- * Other cpu's may come here at the same time for this message.
+ * Process the receipt of a RETRY message
*/
-static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
- int msg_slot, int sw_ack_slot)
+static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
- unsigned long this_cpu_mask;
- struct bau_msg_status *msp;
- int cpu;
+ int i;
+ int cancel_count = 0;
+ int slot2;
+ unsigned long msg_res;
+ unsigned long mmr = 0;
+ struct bau_payload_queue_entry *msg;
+ struct bau_payload_queue_entry *msg2;
+ struct ptc_stats *stat;
- msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
- cpu = uv_blade_processor_id();
- msg->number_of_cpus =
- uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
- this_cpu_mask = 1UL << cpu;
- if (msp->seen_by.bits & this_cpu_mask)
- return;
- atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_retries++;
+ /*
+ * cancel any message from msg+1 to the retry itself
+ */
+ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
+ if (msg2 > mdp->va_queue_last)
+ msg2 = mdp->va_queue_first;
+ if (msg2 == msg)
+ break;
+
+ /* same conditions for cancellation as uv_do_reset */
+ if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
+ (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
+ msg->sw_ack_vector) == 0) &&
+ (msg2->sending_cpu == msg->sending_cpu) &&
+ (msg2->msg_type != MSG_NOOP)) {
+ slot2 = msg2 - mdp->va_queue_first;
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg2->sw_ack_vector << 8) |
+ msg2->sw_ack_vector);
+ /*
+ * This is a message retry; clear the resources held
+ * by the previous message only if they timed out.
+ * If it has not timed out we have an unexpected
+ * situation to report.
+ */
+ if (mmr & (msg_res << 8)) {
+ /*
+ * is the resource timed out?
+ * make everyone ignore the cancelled message.
+ */
+ msg2->canceled = 1;
+ stat->d_canceled++;
+ cancel_count++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ (msg_res << 8) | msg_res);
+ } else
+ printk(KERN_INFO "note bau retry: no effect\n");
+ }
+ }
+ if (!cancel_count)
+ stat->d_nocanceled++;
+}
- if (msg->replied_to == 1)
- return;
+/*
+ * Do all the things a cpu should do for a TLB shootdown message.
+ * Other cpu's may come here at the same time for this message.
+ */
+static void uv_bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ int msg_ack_count;
+ short socket_ack_count = 0;
+ struct ptc_stats *stat;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *smaster = bcp->socket_master;
+ /*
+ * This must be a normal message, or retry of a normal message
+ */
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
- __get_cpu_var(ptcstats).alltlb++;
+ stat->d_alltlb++;
} else {
__flush_tlb_one(msg->address);
- __get_cpu_var(ptcstats).onetlb++;
+ stat->d_onetlb++;
}
+ stat->d_requestee++;
+
+ /*
+ * One cpu on each uvhub has the additional job on a RETRY
+ * of releasing the resource held by the message that is
+ * being retried. That message is identified by sending
+ * cpu number.
+ */
+ if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
+ uv_bau_process_retry_msg(mdp, bcp);
- __get_cpu_var(ptcstats).requestee++;
+ /*
+ * This is a sw_ack message, so we have to reply to it.
+ * Count each responding cpu on the socket. This avoids
+ * pinging the count's cache line back and forth between
+ * the sockets.
+ */
+ socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
+ &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ if (socket_ack_count == bcp->cpus_in_socket) {
+ /*
+ * Both sockets dump their completed count total into
+ * the message's count.
+ */
+ smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
+ msg_ack_count = atomic_add_short_return(socket_ack_count,
+ (struct atomic_short *)&msg->acknowledge_count);
+
+ if (msg_ack_count == bcp->cpus_in_uvhub) {
+ /*
+ * All cpus in uvhub saw it; reply
+ */
+ uv_reply_to_message(mdp, bcp);
+ }
+ }
- atomic_inc_short(&msg->acknowledge_count);
- if (msg->number_of_cpus == msg->acknowledge_count)
- uv_reply_to_message(sw_ack_slot, msg, msp);
+ return;
}
/*
- * Examine the payload queue on one distribution node to see
- * which messages have not been seen, and which cpu(s) have not seen them.
+ * Determine the first cpu on a uvhub.
+ */
+static int uvhub_to_first_cpu(int uvhub)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ if (uvhub == uv_cpu_to_blade_id(cpu))
+ return cpu;
+ return -1;
+}
+
+/*
+ * Last resort when we get a large number of destination timeouts is
+ * to clear resources held by a given cpu.
+ * Do this with IPI so that all messages in the BAU message queue
+ * can be identified by their nonzero sw_ack_vector field.
*
- * Returns the number of cpu's that have not responded.
+ * This is entered for a single cpu on the uvhub.
+ * The sender want's this uvhub to free a specific message's
+ * sw_ack resources.
*/
-static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
+static void
+uv_do_reset(void *ptr)
{
- struct bau_payload_queue_entry *msg;
- struct bau_msg_status *msp;
- int count = 0;
int i;
- int j;
+ int slot;
+ int count = 0;
+ unsigned long mmr;
+ unsigned long msg_res;
+ struct bau_control *bcp;
+ struct reset_args *rap;
+ struct bau_payload_queue_entry *msg;
+ struct ptc_stats *stat;
- for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
- msg++, i++) {
- if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
- msp = bau_tablesp->msg_statuses + i;
- printk(KERN_DEBUG
- "blade %d: address:%#lx %d of %d, not cpu(s): ",
- i, msg->address, msg->acknowledge_count,
- msg->number_of_cpus);
- for (j = 0; j < msg->number_of_cpus; j++) {
- if (!((1L << j) & msp->seen_by.bits)) {
- count++;
- printk("%d ", j);
- }
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ rap = (struct reset_args *)ptr;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_resets++;
+
+ /*
+ * We're looking for the given sender, and
+ * will free its sw_ack resource.
+ * If all cpu's finally responded after the timeout, its
+ * message 'replied_to' was set.
+ */
+ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ /* uv_do_reset: same conditions for cancellation as
+ uv_bau_process_retry_msg() */
+ if ((msg->replied_to == 0) &&
+ (msg->canceled == 0) &&
+ (msg->sending_cpu == rap->sender) &&
+ (msg->sw_ack_vector) &&
+ (msg->msg_type != MSG_NOOP)) {
+ /*
+ * make everyone else ignore this message
+ */
+ msg->canceled = 1;
+ slot = msg - bcp->va_queue_first;
+ count++;
+ /*
+ * only reset the resource if it is still pending
+ */
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg->sw_ack_vector << 8) |
+ msg->sw_ack_vector);
+ if (mmr & msg_res) {
+ stat->d_rcanceled++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ msg_res);
}
- printk("\n");
}
}
- return count;
+ return;
}
/*
- * Examine the payload queue on all the distribution nodes to see
- * which messages have not been seen, and which cpu(s) have not seen them.
- *
- * Returns the number of cpu's that have not responded.
+ * Use IPI to get all target uvhubs to release resources held by
+ * a given sending cpu number.
*/
-static int uv_examine_destinations(struct bau_target_nodemask *distribution)
+static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
+ int sender)
{
- int sender;
- int i;
- int count = 0;
+ int uvhub;
+ int cpu;
+ cpumask_t mask;
+ struct reset_args reset_args;
+
+ reset_args.sender = sender;
- sender = smp_processor_id();
- for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
- if (!bau_node_isset(i, distribution))
+ cpus_clear(mask);
+ /* find a single cpu for each uvhub in this distribution mask */
+ for (uvhub = 0;
+ uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
+ uvhub++) {
+ if (!bau_uvhub_isset(uvhub, distribution))
continue;
- count += uv_examine_destination(uv_bau_table_bases[i], sender);
+ /* find a cpu for this uvhub */
+ cpu = uvhub_to_first_cpu(uvhub);
+ cpu_set(cpu, mask);
}
- return count;
+ /* IPI all cpus; Preemption is already disabled */
+ smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+ return;
+}
+
+static inline unsigned long
+cycles_2_us(unsigned long long cyc)
+{
+ unsigned long long ns;
+ unsigned long us;
+ ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
+ >> CYC2NS_SCALE_FACTOR;
+ us = ns / 1000;
+ return us;
}
/*
- * wait for completion of a broadcast message
- *
- * return COMPLETE, RETRY or GIVEUP
+ * wait for all cpus on this hub to finish their sends and go quiet
+ * leaves uvhub_quiesce set so that no new broadcasts are started by
+ * bau_flush_send_and_wait()
+ */
+static inline void
+quiesce_local_uvhub(struct bau_control *hmaster)
+{
+ atomic_add_short_return(1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * mark this quiet-requestor as done
+ */
+static inline void
+end_uvhub_quiesce(struct bau_control *hmaster)
+{
+ atomic_add_short_return(-1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * Wait for completion of a broadcast software ack message
+ * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
static int uv_wait_completion(struct bau_desc *bau_desc,
- unsigned long mmr_offset, int right_shift)
+ unsigned long mmr_offset, int right_shift, int this_cpu,
+ struct bau_control *bcp, struct bau_control *smaster, long try)
{
- int exams = 0;
- long destination_timeouts = 0;
- long source_timeouts = 0;
+ int relaxes = 0;
unsigned long descriptor_status;
+ unsigned long mmr;
+ unsigned long mask;
+ cycles_t ttime;
+ cycles_t timeout_time;
+ struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+ timeout_time = get_cycles() + bcp->timeout_interval;
+ /* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status = (((unsigned long)
uv_read_local_mmr(mmr_offset) >>
right_shift) & UV_ACT_STATUS_MASK)) !=
DESC_STATUS_IDLE) {
- if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
- source_timeouts++;
- if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
- source_timeouts = 0;
- __get_cpu_var(ptcstats).s_retry++;
- return FLUSH_RETRY;
- }
/*
- * spin here looking for progress at the destinations
+ * Our software ack messages may be blocked because there are
+ * no swack resources available. As long as none of them
+ * has timed out hardware will NACK our message and its
+ * state will stay IDLE.
*/
- if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
- destination_timeouts++;
- if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
- /*
- * returns number of cpus not responding
- */
- if (uv_examine_destinations
- (&bau_desc->distribution) == 0) {
- __get_cpu_var(ptcstats).d_retry++;
- return FLUSH_RETRY;
- }
- exams++;
- if (exams >= uv_bau_retry_limit) {
- printk(KERN_DEBUG
- "uv_flush_tlb_others");
- printk("giving up on cpu %d\n",
- smp_processor_id());
+ if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_status ==
+ DESC_STATUS_DESTINATION_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttime = get_cycles();
+
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_status is still BUSY
+ */
+ cpu_relax();
+ relaxes++;
+ if (relaxes >= 10000) {
+ relaxes = 0;
+ if (get_cycles() > timeout_time) {
+ quiesce_local_uvhub(hmaster);
+
+ /* single-thread the register change */
+ spin_lock(&hmaster->masks_lock);
+ mmr = uv_read_local_mmr(mmr_offset);
+ mask = 0UL;
+ mask |= (3UL < right_shift);
+ mask = ~mask;
+ mmr &= mask;
+ uv_write_local_mmr(mmr_offset, mmr);
+ spin_unlock(&hmaster->masks_lock);
+ end_uvhub_quiesce(hmaster);
+ stat->s_busy++;
return FLUSH_GIVEUP;
}
- /*
- * delays can hang the simulator
- udelay(1000);
- */
- destination_timeouts = 0;
}
}
- cpu_relax();
}
+ bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
+static inline cycles_t
+sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
/**
* uv_flush_send_and_wait
*
- * Send a broadcast and wait for a broadcast message to complete.
+ * Send a broadcast and wait for it to complete.
*
- * The flush_mask contains the cpus the broadcast was sent to.
+ * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * cpus that are on the local uvhub.
*
- * Returns NULL if all remote flushing was done. The mask is zeroed.
+ * Returns NULL if all flushing represented in the mask was done. The mask
+ * is zeroed.
* Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set.
+ * mask will have some bits still set, representing any cpus on the local
+ * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
*/
-const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
- struct bau_desc *bau_desc,
- struct cpumask *flush_mask)
+const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ struct cpumask *flush_mask,
+ struct bau_control *bcp)
{
- int completion_status = 0;
int right_shift;
- int tries = 0;
- int pnode;
+ int uvhub;
int bit;
+ int completion_status = 0;
+ int seq_number = 0;
+ long try = 0;
+ int cpu = bcp->uvhub_cpu;
+ int this_cpu = bcp->cpu;
+ int this_uvhub = bcp->uvhub;
unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
+ struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
+ struct bau_control *smaster = bcp->socket_master;
+ struct bau_control *hmaster = bcp->uvhub_master;
+
+ /*
+ * Spin here while there are hmaster->max_concurrent or more active
+ * descriptors. This is the per-uvhub 'throttle'.
+ */
+ if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent));
+ }
+
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
if (cpu < UV_CPUS_PER_ACT_STATUS) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -268,24 +557,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
}
time1 = get_cycles();
do {
- tries++;
+ /*
+ * Every message from any given cpu gets a unique message
+ * sequence number. But retries use that same number.
+ * Our message may have timed out at the destination because
+ * all sw-ack resources are in use and there is a timeout
+ * pending there. In that case, our last send never got
+ * placed into the queue and we need to persist until it
+ * does.
+ *
+ * Make any retry a type MSG_RETRY so that the destination will
+ * free any resource held by a previous message from this cpu.
+ */
+ if (try == 0) {
+ /* use message type set by the caller the first time */
+ seq_number = bcp->message_number++;
+ } else {
+ /* use RETRY type on all the rest; same sequence */
+ bau_desc->header.msg_type = MSG_RETRY;
+ stat->s_retry_messages++;
+ }
+ bau_desc->header.sequence = seq_number;
index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
- cpu;
+ bcp->uvhub_cpu;
+ bcp->send_message = get_cycles();
+
uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+
+ try++;
completion_status = uv_wait_completion(bau_desc, mmr_offset,
- right_shift);
- } while (completion_status == FLUSH_RETRY);
+ right_shift, this_cpu, bcp, smaster, try);
+
+ if (completion_status == FLUSH_RETRY_PLUGGED) {
+ /*
+ * Our retries may be blocked by all destination swack
+ * resources being consumed, and a timeout pending. In
+ * that case hardware immediately returns the ERROR
+ * that looks like a destination timeout.
+ */
+ udelay(TIMEOUT_DELAY);
+ bcp->plugged_tries++;
+ if (bcp->plugged_tries >= PLUGSB4RESET) {
+ bcp->plugged_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_plug++;
+ }
+ } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
+ hmaster->max_concurrent = 1;
+ bcp->timeout_tries++;
+ udelay(TIMEOUT_DELAY);
+ if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
+ bcp->timeout_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_timeout++;
+ }
+ }
+ if (bcp->ipi_attempts >= 3) {
+ bcp->ipi_attempts = 0;
+ completion_status = FLUSH_GIVEUP;
+ break;
+ }
+ cpu_relax();
+ } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
+ (completion_status == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
- __get_cpu_var(ptcstats).sflush += (time2 - time1);
- if (tries > 1)
- __get_cpu_var(ptcstats).retriesok++;
- if (completion_status == FLUSH_GIVEUP) {
+ if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
+ && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
+ hmaster->max_concurrent++;
+
+ /*
+ * hold any cpu not timing out here; no other cpu currently held by
+ * the 'throttle' should enter the activation code
+ */
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
+ atomic_dec(&hmaster->active_descriptor_count);
+
+ /* guard against cycles wrap */
+ if (time2 > time1)
+ stat->s_time += (time2 - time1);
+ else
+ stat->s_requestor--; /* don't count this one */
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP) {
/*
* Cause the caller to do an IPI-style TLB shootdown on
- * the cpu's, all of which are still in the mask.
+ * the target cpu's, all of which are still in the mask.
*/
- __get_cpu_var(ptcstats).ptc_i++;
+ stat->s_giveup++;
return flush_mask;
}
@@ -294,18 +667,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
* use the IPI method of shootdown on them.
*/
for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- if (pnode == this_pnode)
+ uvhub = uv_cpu_to_blade_id(bit);
+ if (uvhub == this_uvhub)
continue;
cpumask_clear_cpu(bit, flush_mask);
}
if (!cpumask_empty(flush_mask))
return flush_mask;
+
return NULL;
}
-static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-
/**
* uv_flush_tlb_others - globally purge translation cache of a virtual
* address or all TLB's
@@ -322,8 +694,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
* The caller has derived the cpumask from the mm_struct. This function
* is called only if there are bits set in the mask. (e.g. flush_tlb_page())
*
- * The cpumask is converted into a nodemask of the nodes containing
- * the cpus.
+ * The cpumask is converted into a uvhubmask of the uvhubs containing
+ * those cpus.
*
* Note that this function should be called with preemption disabled.
*
@@ -335,52 +707,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va, unsigned int cpu)
{
- struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
- int i;
- int bit;
- int pnode;
- int uv_cpu;
- int this_pnode;
+ int remotes;
+ int tcpu;
+ int uvhub;
int locals = 0;
struct bau_desc *bau_desc;
+ struct cpumask *flush_mask;
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
- cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (nobau)
+ return cpumask;
- uv_cpu = uv_blade_processor_id();
- this_pnode = uv_hub_info->pnode;
- bau_desc = __get_cpu_var(bau_control).descriptor_base;
- bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
+ bcp = &per_cpu(bau_control, cpu);
+ /*
+ * Each sending cpu has a per-cpu mask which it fills from the caller's
+ * cpu mask. Only remote cpus are converted to uvhubs and copied.
+ */
+ flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
+ /*
+ * copy cpumask to flush_mask, removing current cpu
+ * (current cpu should already have been flushed by the caller and
+ * should never be returned if we return flush_mask)
+ */
+ cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (cpu_isset(cpu, *cpumask))
+ locals++; /* current cpu was targeted */
- bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ bau_desc = bcp->descriptor_base;
+ bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
- i = 0;
- for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
- if (pnode == this_pnode) {
+ bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ remotes = 0;
+ for_each_cpu(tcpu, flush_mask) {
+ uvhub = uv_cpu_to_blade_id(tcpu);
+ if (uvhub == bcp->uvhub) {
locals++;
continue;
}
- bau_node_set(pnode - uv_partition_base_pnode,
- &bau_desc->distribution);
- i++;
+ bau_uvhub_set(uvhub, &bau_desc->distribution);
+ remotes++;
}
- if (i == 0) {
+ if (remotes == 0) {
/*
- * no off_node flushing; return status for local node
+ * No off_hub flushing; return status for local hub.
+ * Return the caller's mask if all were local (the current
+ * cpu may be in that mask).
*/
if (locals)
- return flush_mask;
+ return cpumask;
else
return NULL;
}
- __get_cpu_var(ptcstats).requestor++;
- __get_cpu_var(ptcstats).ntargeted += i;
+ stat = &per_cpu(ptcstats, cpu);
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes;
+ remotes = bau_uvhub_weight(&bau_desc->distribution);
+ stat->s_ntarguvhub += remotes;
+ if (remotes >= 16)
+ stat->s_ntarguvhub16++;
+ else if (remotes >= 8)
+ stat->s_ntarguvhub8++;
+ else if (remotes >= 4)
+ stat->s_ntarguvhub4++;
+ else if (remotes >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
bau_desc->payload.address = va;
bau_desc->payload.sending_cpu = cpu;
- return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
+ /*
+ * uv_flush_send_and_wait returns null if all cpu's were messaged, or
+ * the adjusted flush_mask if any cpu's were not messaged.
+ */
+ return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
}
/*
@@ -389,87 +791,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
*
* We received a broadcast assist message.
*
- * Interrupts may have been disabled; this interrupt could represent
+ * Interrupts are disabled; this interrupt could represent
* the receipt of several messages.
*
- * All cores/threads on this node get this interrupt.
- * The last one to see it does the s/w ack.
+ * All cores/threads on this hub get this interrupt.
+ * The last one to see it does the software ack.
* (the resource will not be freed until noninterruptable cpus see this
- * interrupt; hardware will timeout the s/w ack and reply ERROR)
+ * interrupt; hardware may timeout the s/w ack and reply ERROR)
*/
void uv_bau_message_interrupt(struct pt_regs *regs)
{
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *msg;
- struct pt_regs *old_regs = set_irq_regs(regs);
- cycles_t time1;
- cycles_t time2;
- int msg_slot;
- int sw_ack_slot;
- int fw;
int count = 0;
- unsigned long local_pnode;
-
- ack_APIC_irq();
- exit_idle();
- irq_enter();
-
- time1 = get_cycles();
-
- local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
-
- va_queue_first = __get_cpu_var(bau_control).va_queue_first;
- va_queue_last = __get_cpu_var(bau_control).va_queue_last;
-
- msg = __get_cpu_var(bau_control).bau_msg_head;
+ cycles_t time_start;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *bcp;
+ struct ptc_stats *stat;
+ struct msg_desc msgdesc;
+
+ time_start = get_cycles();
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ stat = &per_cpu(ptcstats, smp_processor_id());
+ msgdesc.va_queue_first = bcp->va_queue_first;
+ msgdesc.va_queue_last = bcp->va_queue_last;
+ msg = bcp->bau_msg_head;
while (msg->sw_ack_vector) {
count++;
- fw = msg->sw_ack_vector;
- msg_slot = msg - va_queue_first;
- sw_ack_slot = ffs(fw) - 1;
-
- uv_bau_process_message(msg, msg_slot, sw_ack_slot);
-
+ msgdesc.msg_slot = msg - msgdesc.va_queue_first;
+ msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+ msgdesc.msg = msg;
+ uv_bau_process_message(&msgdesc, bcp);
msg++;
- if (msg > va_queue_last)
- msg = va_queue_first;
- __get_cpu_var(bau_control).bau_msg_head = msg;
+ if (msg > msgdesc.va_queue_last)
+ msg = msgdesc.va_queue_first;
+ bcp->bau_msg_head = msg;
}
+ stat->d_time += (get_cycles() - time_start);
if (!count)
- __get_cpu_var(ptcstats).nomsg++;
+ stat->d_nomsg++;
else if (count > 1)
- __get_cpu_var(ptcstats).multmsg++;
-
- time2 = get_cycles();
- __get_cpu_var(ptcstats).dflush += (time2 - time1);
-
- irq_exit();
- set_irq_regs(old_regs);
+ stat->d_multmsg++;
+ ack_APIC_irq();
}
/*
* uv_enable_timeouts
*
- * Each target blade (i.e. blades that have cpu's) needs to have
+ * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
* shootdown message timeouts enabled. The timeout does not cause
* an interrupt, but causes an error message to be returned to
* the sender.
*/
static void uv_enable_timeouts(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int nuvhubs;
int pnode;
unsigned long mmr_image;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- for (blade = 0; blade < nblades; blade++) {
- if (!uv_blade_nr_possible_cpus(blade))
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ if (!uv_blade_nr_possible_cpus(uvhub))
continue;
- pnode = uv_blade_to_pnode(blade);
+ pnode = uv_blade_to_pnode(uvhub);
mmr_image =
uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
/*
@@ -479,16 +864,16 @@ static void uv_enable_timeouts(void)
* To program the period, the SOFT_ACK_MODE must be off.
*/
mmr_image &= ~((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
* Set the 4-bit period.
*/
mmr_image &= ~((unsigned long)0xf <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
@@ -497,7 +882,7 @@ static void uv_enable_timeouts(void)
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
mmr_image |= ((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
}
@@ -522,9 +907,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
{
}
+static inline unsigned long long
+millisec_2_cycles(unsigned long millisec)
+{
+ unsigned long ns;
+ unsigned long long cyc;
+
+ ns = millisec * 1000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
/*
- * Display the statistics thru /proc
- * data points to the cpu number
+ * Display the statistics thru /proc.
+ * 'data' points to the cpu number
*/
static int uv_ptc_seq_show(struct seq_file *file, void *data)
{
@@ -535,78 +931,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
if (!cpu) {
seq_printf(file,
- "# cpu requestor requestee one all sretry dretry ptc_i ");
+ "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "sw_ack sflush dflush sok dnomsg dmult starget\n");
+ "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+ seq_printf(file,
+ "retries rok resetp resett giveup sto bz throt ");
+ seq_printf(file,
+ "sw_ack recv rtime all ");
+ seq_printf(file,
+ "one mult none retry canc nocan reset rcan\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
- cpu, stat->requestor,
- stat->requestee, stat->onetlb, stat->alltlb,
- stat->s_retry, stat->d_retry, stat->ptc_i);
- seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
+ /* source side statistics */
+ seq_printf(file,
+ "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ cpu, stat->s_requestor, cycles_2_us(stat->s_time),
+ stat->s_ntarguvhub, stat->s_ntarguvhub16,
+ stat->s_ntarguvhub8, stat->s_ntarguvhub4,
+ stat->s_ntarguvhub2, stat->s_ntarguvhub1,
+ stat->s_ntargcpu, stat->s_dtimeout);
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
+ stat->s_retry_messages, stat->s_retriesok,
+ stat->s_resets_plug, stat->s_resets_timeout,
+ stat->s_giveup, stat->s_stimeout,
+ stat->s_busy, stat->s_throttles);
+ /* destination side statistics */
+ seq_printf(file,
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
- stat->sflush, stat->dflush,
- stat->retriesok, stat->nomsg,
- stat->multmsg, stat->ntargeted);
+ stat->d_requestee, cycles_2_us(stat->d_time),
+ stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
+ stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ stat->d_nocanceled, stat->d_resets,
+ stat->d_rcanceled);
}
return 0;
}
/*
+ * -1: resetf the statistics
* 0: display meaning of the statistics
- * >0: retry limit
+ * >0: maximum concurrent active descriptors per uvhub (throttle)
*/
static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
size_t count, loff_t *data)
{
- long newmode;
+ int cpu;
+ long input_arg;
char optstr[64];
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
- if (strict_strtoul(optstr, 10, &newmode) < 0) {
+ if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
}
- if (newmode == 0) {
+ if (input_arg == 0) {
printk(KERN_DEBUG "# cpu: cpu number\n");
+ printk(KERN_DEBUG "Sender statistics:\n");
+ printk(KERN_DEBUG
+ "sent: number of shootdown messages sent\n");
+ printk(KERN_DEBUG
+ "stime: time spent sending messages\n");
+ printk(KERN_DEBUG
+ "numuvhubs: number of hubs targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "numuvhubs16: number times 16 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs8: number times 8 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs4: number times 4 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs2: number times 2 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs1: number times 1 hub targeted\n");
+ printk(KERN_DEBUG
+ "numcpus: number of cpus targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "dto: number of destination timeouts\n");
+ printk(KERN_DEBUG
+ "retries: destination timeout retries sent\n");
+ printk(KERN_DEBUG
+ "rok: : destination timeouts successfully retried\n");
+ printk(KERN_DEBUG
+ "resetp: ipi-style resource resets for plugs\n");
+ printk(KERN_DEBUG
+ "resett: ipi-style resource resets for timeouts\n");
+ printk(KERN_DEBUG
+ "giveup: fall-backs to ipi-style shootdowns\n");
+ printk(KERN_DEBUG
+ "sto: number of source timeouts\n");
+ printk(KERN_DEBUG
+ "bz: number of stay-busy's\n");
+ printk(KERN_DEBUG
+ "throt: number times spun in throttle\n");
+ printk(KERN_DEBUG "Destination side statistics:\n");
printk(KERN_DEBUG
- "requestor: times this cpu was the flush requestor\n");
+ "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
printk(KERN_DEBUG
- "requestee: times this cpu was requested to flush its TLBs\n");
+ "recv: shootdown messages received\n");
printk(KERN_DEBUG
- "one: times requested to flush a single address\n");
+ "rtime: time spent processing messages\n");
printk(KERN_DEBUG
- "all: times requested to flush all TLB's\n");
+ "all: shootdown all-tlb messages\n");
printk(KERN_DEBUG
- "sretry: number of retries of source-side timeouts\n");
+ "one: shootdown one-tlb messages\n");
printk(KERN_DEBUG
- "dretry: number of retries of destination-side timeouts\n");
+ "mult: interrupts that found multiple messages\n");
printk(KERN_DEBUG
- "ptc_i: times UV fell through to IPI-style flushes\n");
+ "none: interrupts that found no messages\n");
printk(KERN_DEBUG
- "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
+ "retry: number of retry messages processed\n");
printk(KERN_DEBUG
- "sflush_us: cycles spent in uv_flush_tlb_others()\n");
+ "canc: number messages canceled by retries\n");
printk(KERN_DEBUG
- "dflush_us: cycles spent in handling flush requests\n");
- printk(KERN_DEBUG "sok: successes on retry\n");
- printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
+ "nocan: number retries that found nothing to cancel\n");
printk(KERN_DEBUG
- "dmult: interrupts with multiple messages\n");
- printk(KERN_DEBUG "starget: nodes targeted\n");
+ "reset: number of ipi-style reset requests processed\n");
+ printk(KERN_DEBUG
+ "rcan: number messages canceled by reset requests\n");
+ } else if (input_arg == -1) {
+ for_each_present_cpu(cpu) {
+ stat = &per_cpu(ptcstats, cpu);
+ memset(stat, 0, sizeof(struct ptc_stats));
+ }
} else {
- uv_bau_retry_limit = newmode;
- printk(KERN_DEBUG "timeout retry limit:%d\n",
- uv_bau_retry_limit);
+ uv_bau_max_concurrent = input_arg;
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ if (uv_bau_max_concurrent < 1 ||
+ uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
+ printk(KERN_DEBUG
+ "Error: BAU max concurrent %d; %d is invalid\n",
+ bcp->max_concurrent, uv_bau_max_concurrent);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
+ uv_bau_max_concurrent);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ }
}
return count;
@@ -650,79 +1123,30 @@ static int __init uv_ptc_init(void)
}
/*
- * begin the initialization of the per-blade control structures
- */
-static struct bau_control * __init uv_table_bases_init(int blade, int node)
-{
- int i;
- struct bau_msg_status *msp;
- struct bau_control *bau_tabp;
-
- bau_tabp =
- kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
- BUG_ON(!bau_tabp);
-
- bau_tabp->msg_statuses =
- kmalloc_node(sizeof(struct bau_msg_status) *
- DEST_Q_SIZE, GFP_KERNEL, node);
- BUG_ON(!bau_tabp->msg_statuses);
-
- for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
- bau_cpubits_clear(&msp->seen_by, (int)
- uv_blade_nr_possible_cpus(blade));
-
- uv_bau_table_bases[blade] = bau_tabp;
-
- return bau_tabp;
-}
-
-/*
- * finish the initialization of the per-blade control structures
- */
-static void __init
-uv_table_bases_finish(int blade,
- struct bau_control *bau_tablesp,
- struct bau_desc *adp)
-{
- struct bau_control *bcp;
- int cpu;
-
- for_each_present_cpu(cpu) {
- if (blade != uv_cpu_to_blade_id(cpu))
- continue;
-
- bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
- bcp->bau_msg_head = bau_tablesp->va_queue_first;
- bcp->va_queue_first = bau_tablesp->va_queue_first;
- bcp->va_queue_last = bau_tablesp->va_queue_last;
- bcp->msg_statuses = bau_tablesp->msg_statuses;
- bcp->descriptor_base = adp;
- }
-}
-
-/*
* initialize the sending side's sending buffers
*/
-static struct bau_desc * __init
+static void
uv_activation_descriptor_init(int node, int pnode)
{
int i;
+ int cpu;
unsigned long pa;
unsigned long m;
unsigned long n;
- struct bau_desc *adp;
- struct bau_desc *ad2;
+ struct bau_desc *bau_desc;
+ struct bau_desc *bd2;
+ struct bau_control *bcp;
/*
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
- * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade
+ * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
*/
- adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
+ bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
- BUG_ON(!adp);
+ BUG_ON(!bau_desc);
- pa = uv_gpa(adp); /* need the real nasid*/
- n = uv_gpa_to_pnode(pa);
+ pa = uv_gpa(bau_desc); /* need the real nasid*/
+ n = pa >> uv_nshift;
m = pa & uv_mmask;
uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
@@ -731,96 +1155,188 @@ uv_activation_descriptor_init(int node, int pnode)
/*
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
* cpu even though we only use the first one; one descriptor can
- * describe a broadcast to 256 nodes.
+ * describe a broadcast to 256 uv hubs.
*/
- for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
- i++, ad2++) {
- memset(ad2, 0, sizeof(struct bau_desc));
- ad2->header.sw_ack_flag = 1;
+ for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
+ i++, bd2++) {
+ memset(bd2, 0, sizeof(struct bau_desc));
+ bd2->header.sw_ack_flag = 1;
/*
- * base_dest_nodeid is the first node in the partition, so
- * the bit map will indicate partition-relative node numbers.
- * note that base_dest_nodeid is actually a nasid.
+ * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+ * in the partition. The bit map will indicate uvhub numbers,
+ * which are 0-N in a partition. Pnodes are unique system-wide.
*/
- ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
- ad2->header.dest_subnodeid = 0x10; /* the LB */
- ad2->header.command = UV_NET_ENDPOINT_INTD;
- ad2->header.int_both = 1;
+ bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+ bd2->header.dest_subnodeid = 0x10; /* the LB */
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
*/
}
- return adp;
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+ continue;
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->descriptor_base = bau_desc;
+ }
}
/*
* initialize the destination side's receiving buffers
+ * entered for each uvhub in the partition
+ * - node is first node (kernel memory notion) on the uvhub
+ * - pnode is the uvhub's physical identifier
*/
-static struct bau_payload_queue_entry * __init
-uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
+static void
+uv_payload_queue_init(int node, int pnode)
{
- struct bau_payload_queue_entry *pqp;
- unsigned long pa;
int pn;
+ int cpu;
char *cp;
+ unsigned long pa;
+ struct bau_payload_queue_entry *pqp;
+ struct bau_payload_queue_entry *pqp_malloc;
+ struct bau_control *bcp;
pqp = (struct bau_payload_queue_entry *) kmalloc_node(
(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
GFP_KERNEL, node);
BUG_ON(!pqp);
+ pqp_malloc = pqp;
cp = (char *)pqp + 31;
pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
- bau_tablesp->va_queue_first = pqp;
+
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_cpu_to_pnode(cpu))
+ continue;
+ /* for every cpu on this pnode: */
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->va_queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ }
/*
* need the pnode of where the memory was really allocated
*/
pa = uv_gpa(pqp);
- pn = uv_gpa_to_pnode(pa);
+ pn = pa >> uv_nshift;
uv_write_global_mmr64(pnode,
UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
uv_physnodeaddr(pqp));
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
uv_physnodeaddr(pqp));
- bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
(unsigned long)
- uv_physnodeaddr(bau_tablesp->va_queue_last));
+ uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ /* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
-
- return pqp;
}
/*
- * Initialization of each UV blade's structures
+ * Initialization of each UV hub's structures
*/
-static int __init uv_init_blade(int blade)
+static void __init uv_init_uvhub(int uvhub, int vector)
{
int node;
int pnode;
- unsigned long pa;
unsigned long apicid;
- struct bau_desc *adp;
- struct bau_payload_queue_entry *pqp;
- struct bau_control *bau_tablesp;
-
- node = blade_to_first_node(blade);
- bau_tablesp = uv_table_bases_init(blade, node);
- pnode = uv_blade_to_pnode(blade);
- adp = uv_activation_descriptor_init(node, pnode);
- pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
- uv_table_bases_finish(blade, bau_tablesp, adp);
+
+ node = uvhub_to_first_node(uvhub);
+ pnode = uv_blade_to_pnode(uvhub);
+ uv_activation_descriptor_init(node, pnode);
+ uv_payload_queue_init(node, pnode);
/*
* the below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS
*/
- apicid = blade_to_first_apicid(blade);
- pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
+ apicid = uvhub_to_first_apicid(uvhub);
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
- ((apicid << 32) | UV_BAU_MESSAGE));
- return 0;
+ ((apicid << 32) | vector));
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static void uv_init_per_cpu(int nuvhubs)
+{
+ int i, j, k;
+ int cpu;
+ int pnode;
+ int uvhub;
+ short socket = 0;
+ struct bau_control *bcp;
+ struct uvhub_desc *bdp;
+ struct socket_desc *sdp;
+ struct bau_control *hmaster = NULL;
+ struct bau_control *smaster = NULL;
+ struct socket_desc {
+ short num_cpus;
+ short cpu_number[16];
+ };
+ struct uvhub_desc {
+ short num_sockets;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+ };
+ struct uvhub_desc *uvhub_descs;
+
+ uvhub_descs = (struct uvhub_desc *)
+ kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ memset(bcp, 0, sizeof(struct bau_control));
+ spin_lock_init(&bcp->masks_lock);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ pnode = uv_cpu_hub_info(cpu)->pnode;
+ uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ bdp = &uvhub_descs[uvhub];
+ bdp->num_cpus++;
+ bdp->uvhub = uvhub;
+ bdp->pnode = pnode;
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = millisec_2_cycles(3);
+ /* kludge: assume uv_hub.h is constant */
+ socket = (cpu_physical_id(cpu)>>5)&1;
+ if (socket >= bdp->num_sockets)
+ bdp->num_sockets = socket+1;
+ sdp = &bdp->socket[socket];
+ sdp->cpu_number[sdp->num_cpus] = cpu;
+ sdp->num_cpus++;
+ }
+ socket = 0;
+ for_each_possible_blade(uvhub) {
+ bdp = &uvhub_descs[uvhub];
+ for (i = 0; i < bdp->num_sockets; i++) {
+ sdp = &bdp->socket[i];
+ for (j = 0; j < sdp->num_cpus; j++) {
+ cpu = sdp->cpu_number[j];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (j == 0) {
+ smaster = bcp;
+ if (i == 0)
+ hmaster = bcp;
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = smaster;
+ bcp->uvhub_master = hmaster;
+ for (k = 0; k < DEST_Q_SIZE; k++)
+ bcp->socket_acknowledge_count[k] = 0;
+ bcp->uvhub_cpu =
+ uv_cpu_hub_info(cpu)->blade_processor_id;
+ }
+ socket++;
+ }
+ }
+ kfree(uvhub_descs);
}
/*
@@ -828,38 +1344,54 @@ static int __init uv_init_blade(int blade)
*/
static int __init uv_bau_init(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int pnode;
+ int nuvhubs;
int cur_cpu;
+ int vector;
+ unsigned long mmr;
if (!is_uv_system())
return 0;
+ if (nobau)
+ return 0;
+
for_each_possible_cpu(cur_cpu)
zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
GFP_KERNEL, cpu_to_node(cur_cpu));
- uv_bau_retry_limit = 1;
+ uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
+ uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- uv_bau_table_bases = (struct bau_control **)
- kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
- BUG_ON(!uv_bau_table_bases);
+ uv_init_per_cpu(nuvhubs);
uv_partition_base_pnode = 0x7fffffff;
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade) &&
- (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
- uv_partition_base_pnode = uv_blade_to_pnode(blade);
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade))
- uv_init_blade(blade);
-
- alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++)
+ if (uv_blade_nr_possible_cpus(uvhub) &&
+ (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
+ uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+
+ vector = UV_BAU_MESSAGE;
+ for_each_possible_blade(uvhub)
+ if (uv_blade_nr_possible_cpus(uvhub))
+ uv_init_uvhub(uvhub, vector);
+
uv_enable_timeouts();
+ alloc_intr_gate(vector, uv_bau_message_intr1);
+
+ for_each_possible_blade(uvhub) {
+ pnode = uv_blade_to_pnode(uvhub);
+ /* INIT the bau */
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+ ((unsigned long)1 << 63));
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+ }
return 0;
}
-__initcall(uv_bau_init);
-__initcall(uv_ptc_init);
+core_initcall(uv_bau_init);
+core_initcall(uv_ptc_init);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 36f1bd9f8e76..02cfb9b8f5b1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,15 +108,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
dec_preempt_count();
}
-#ifdef CONFIG_X86_32
-static inline void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
-{
- if (!user_mode_vm(regs))
- die(str, regs, err);
-}
-#endif
-
static void __kprobes
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
@@ -585,55 +576,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
return;
}
-#ifdef CONFIG_X86_64
-static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-{
- if (fixup_exception(regs))
- return 1;
-
- notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
- /* Illegal floating point operation in the kernel */
- current->thread.trap_no = trapnr;
- die(str, regs, 0);
- return 0;
-}
-#endif
-
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-void math_error(void __user *ip)
+void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
- struct task_struct *task;
+ struct task_struct *task = current;
siginfo_t info;
- unsigned short cwd, swd, err;
+ unsigned short err;
+ char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+ return;
+ conditional_sti(regs);
+
+ if (!user_mode_vm(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+ task->thread.trap_no = trapnr;
+ die(str, regs, error_code);
+ }
+ return;
+ }
/*
* Save the info for the exception handler and clear the error.
*/
- task = current;
save_init_fpu(task);
- task->thread.trap_no = 16;
- task->thread.error_code = 0;
+ task->thread.trap_no = trapnr;
+ task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = ip;
- /*
- * (~cwd & swd) will mask out exceptions that are not set to unmasked
- * status. 0x3f is the exception bits in these regs, 0x200 is the
- * C1 reg you need in case of a stack fault, 0x040 is the stack
- * fault bit. We should only be taking one exception at a time,
- * so if this combination doesn't produce any single exception,
- * then we have a bad program that isn't synchronizing its FPU usage
- * and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
- */
- cwd = get_fpu_cwd(task);
- swd = get_fpu_swd(task);
+ info.si_addr = (void __user *)regs->ip;
+ if (trapnr == 16) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
- err = swd & ~cwd;
+ err = swd & ~cwd;
+ } else {
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ unsigned short mxcsr = get_fpu_mxcsr(task);
+ err = ~(mxcsr >> 7) & mxcsr;
+ }
if (err & 0x001) { /* Invalid op */
/*
@@ -662,97 +665,17 @@ void math_error(void __user *ip)
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
#ifdef CONFIG_X86_32
ignore_fpu_irq = 1;
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel x87 math error", 16))
- return;
#endif
- math_error((void __user *)regs->ip);
-}
-
-static void simd_math_error(void __user *ip)
-{
- struct task_struct *task;
- siginfo_t info;
- unsigned short mxcsr;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 19;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = ip;
- /*
- * The SIMD FPU exceptions are handled a little differently, as there
- * is only a single status/control register. Thus, to determine which
- * unmasked exception was caught we must mask the exception mask bits
- * at 0x1f80, and then use these to mask the exception bits at 0x3f.
- */
- mxcsr = get_fpu_mxcsr(task);
- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
+ math_error(regs, error_code, 16);
}
dotraplinkage void
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
-#ifdef CONFIG_X86_32
- if (cpu_has_xmm) {
- /* Handle SIMD FPU exceptions on PIII+ processors. */
- ignore_fpu_irq = 1;
- simd_math_error((void __user *)regs->ip);
- return;
- }
- /*
- * Handle strange cache flush from user space exception
- * in all other cases. This is undocumented behaviour.
- */
- if (regs->flags & X86_VM_MASK) {
- handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
- return;
- }
- current->thread.trap_no = 19;
- current->thread.error_code = error_code;
- die_if_kernel("cache flush denied", regs, error_code);
- force_sig(SIGSEGV, current);
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel simd math error", 19))
- return;
- simd_math_error((void __user *)regs->ip);
-#endif
+ math_error(regs, error_code, 19);
}
dotraplinkage void
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 1d40336b030a..1132129db792 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -44,7 +44,7 @@ static void uv_ack_apic(unsigned int irq)
ack_APIC_irq();
}
-struct irq_chip uv_irq_chip = {
+static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.startup = uv_noop_ret,
.shutdown = uv_noop,
@@ -141,7 +141,7 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
*/
static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_desc *desc = irq_to_desc(irq);
@@ -160,7 +160,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
- if (restrict == UV_AFFINITY_CPU)
+ if (limit == UV_AFFINITY_CPU)
desc->status |= IRQ_NO_BALANCING;
else
desc->status |= IRQ_MOVE_PCNTXT;
@@ -214,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
unsigned long mmr_offset;
- unsigned mmr_pnode;
+ int mmr_pnode;
if (set_desc_affinity(desc, mask, &dest))
return -1;
@@ -248,7 +248,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
* interrupt is raised.
*/
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
int irq, ret;
@@ -258,7 +258,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
- restrict);
+ limit);
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 693920b22496..1b950d151e58 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(init_level4_pgt);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 782c3a362ec6..37e68fc5e24a 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf)
if (err)
return err;
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = xsave_user(buf);
else
err = fxsave_user(buf);
@@ -109,14 +109,14 @@ int save_i387_xstate(void __user *buf)
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
+ if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
xstate_size))
return -1;
}
clear_used_math(); /* trigger finit */
- if (task_thread_info(tsk)->status & TS_XSAVE) {
+ if (use_xsave()) {
struct _fpstate __user *fx = buf;
struct _xstate __user *x = buf;
u64 xstate_bv;
@@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf)
clts();
task_thread_info(current)->status |= TS_USEDFPU;
}
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = restore_user_xstate(buf);
else
err = fxrstor_checking((__force struct i387_fxsave_struct *)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ba58206812a..737361fcd503 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2067,7 +2067,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
return 1;
}
@@ -2479,7 +2479,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
@@ -2539,10 +2539,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32022a8a5c3b..edca080407a5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2703,8 +2703,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
- GUEST_INTR_STATE_NMI));
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
}
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 73d854c36e39..dd9bc8fb81ab 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1713,6 +1713,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
+ vcpu_load(vcpu);
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -1730,6 +1731,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = 0;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1750,9 +1752,11 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
+ vcpu_load(vcpu);
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
return 0;
out:
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index cbaf8f2b83df..f871e04b6965 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,11 +26,12 @@ obj-y += msr.o msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
+ lib-y += atomic64_cx8_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += semaphore_32.o string_32.o
ifneq ($(CONFIG_X86_CMPXCHG64),y)
- lib-y += cmpxchg8b_emu.o
+ lib-y += cmpxchg8b_emu.o atomic64_386_32.o
endif
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
index 824fa0be55a3..540179e8e9fa 100644
--- a/arch/x86/lib/atomic64_32.c
+++ b/arch/x86/lib/atomic64_32.c
@@ -6,225 +6,54 @@
#include <asm/cmpxchg.h>
#include <asm/atomic.h>
-static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new)
-{
- u32 low = new;
- u32 high = new >> 32;
-
- asm volatile(
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "+A" (old), "+m" (*ptr)
- : "b" (low), "c" (high)
- );
- return old;
-}
-
-u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
-{
- return cmpxchg8b(&ptr->counter, old_val, new_val);
-}
-EXPORT_SYMBOL(atomic64_cmpxchg);
-
-/**
- * atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically xchgs the value of @ptr to @new_val and returns
- * the old value.
- */
-u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, real_val = 0;
-
- do {
- old_val = real_val;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return old_val;
-}
-EXPORT_SYMBOL(atomic64_xchg);
-
-/**
- * atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically sets the value of @ptr to @new_val.
- */
-void atomic64_set(atomic64_t *ptr, u64 new_val)
-{
- atomic64_xchg(ptr, new_val);
-}
-EXPORT_SYMBOL(atomic64_set);
-
-/**
-EXPORT_SYMBOL(atomic64_read);
- * atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
- */
-noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, new_val, real_val = 0;
-
- do {
- old_val = real_val;
- new_val = old_val + delta;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return new_val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
-{
- return atomic64_add_return(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub_return);
-
-u64 atomic64_inc_return(atomic64_t *ptr)
-{
- return atomic64_add_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_return);
-
-u64 atomic64_dec_return(atomic64_t *ptr)
-{
- return atomic64_sub_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_return);
-
-/**
- * atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr.
- */
-void atomic64_add(u64 delta, atomic64_t *ptr)
-{
- atomic64_add_return(delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-/**
- * atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr.
- */
-void atomic64_sub(u64 delta, atomic64_t *ptr)
-{
- atomic64_add(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_sub_and_test(u64 delta, atomic64_t *ptr)
-{
- u64 new_val = atomic64_sub_return(delta, ptr);
-
- return new_val == 0;
-}
-EXPORT_SYMBOL(atomic64_sub_and_test);
-
-/**
- * atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1.
- */
-void atomic64_inc(atomic64_t *ptr)
-{
- atomic64_add(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc);
-
-/**
- * atomic64_dec - decrement atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1.
- */
-void atomic64_dec(atomic64_t *ptr)
-{
- atomic64_sub(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec);
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-int atomic64_dec_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_and_test);
-
-/**
- * atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_inc_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(-1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_and_test);
-
-/**
- * atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-int atomic64_add_negative(u64 delta, atomic64_t *ptr)
-{
- s64 new_val = atomic64_add_return(delta, ptr);
-
- return new_val < 0;
-}
-EXPORT_SYMBOL(atomic64_add_negative);
+long long atomic64_read_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_cx8);
+long long atomic64_set_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_cx8);
+long long atomic64_xchg_cx8(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_cx8);
+long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_cx8);
+long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_cx8);
+long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_cx8);
+long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_cx8);
+long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+int atomic64_inc_not_zero_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_cx8);
+int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_cx8);
+
+#ifndef CONFIG_X86_CMPXCHG64
+long long atomic64_read_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_386);
+long long atomic64_set_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_386);
+long long atomic64_xchg_386(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_386);
+long long atomic64_add_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_386);
+long long atomic64_sub_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_386);
+long long atomic64_inc_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_386);
+long long atomic64_dec_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_386);
+long long atomic64_add_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_386);
+long long atomic64_sub_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_386);
+long long atomic64_inc_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_386);
+long long atomic64_dec_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_386);
+long long atomic64_dec_if_positive_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+int atomic64_inc_not_zero_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_386);
+int atomic64_add_unless_386(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_386);
+#endif
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
new file mode 100644
index 000000000000..4a5979aa6883
--- /dev/null
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -0,0 +1,174 @@
+/*
+ * atomic64_t for 386/486
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+/* if you want SMP support, implement these with real spinlocks */
+.macro LOCK reg
+ pushfl
+ CFI_ADJUST_CFA_OFFSET 4
+ cli
+.endm
+
+.macro UNLOCK reg
+ popfl
+ CFI_ADJUST_CFA_OFFSET -4
+.endm
+
+.macro BEGIN func reg
+$v = \reg
+
+ENTRY(atomic64_\func\()_386)
+ CFI_STARTPROC
+ LOCK $v
+
+.macro RETURN
+ UNLOCK $v
+ ret
+.endm
+
+.macro END_
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_386)
+.purgem RETURN
+.purgem END_
+.purgem END
+.endm
+
+.macro END
+RETURN
+END_
+.endm
+.endm
+
+BEGIN read %ecx
+ movl ($v), %eax
+ movl 4($v), %edx
+END
+
+BEGIN set %esi
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN xchg %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN add %ecx
+ addl %eax, ($v)
+ adcl %edx, 4($v)
+END
+
+BEGIN add_return %ecx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN sub %ecx
+ subl %eax, ($v)
+ sbbl %edx, 4($v)
+END
+
+BEGIN sub_return %ecx
+ negl %edx
+ negl %eax
+ sbbl $0, %edx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN inc %esi
+ addl $1, ($v)
+ adcl $0, 4($v)
+END
+
+BEGIN inc_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN dec %esi
+ subl $1, ($v)
+ sbbl $0, 4($v)
+END
+
+BEGIN dec_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN add_unless %ecx
+ addl %eax, %esi
+ adcl %edx, %edi
+ addl ($v), %eax
+ adcl 4($v), %edx
+ cmpl %eax, %esi
+ je 3f
+1:
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ cmpl %edx, %edi
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+END_
+
+BEGIN inc_not_zero %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ testl %eax, %eax
+ je 3f
+1:
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+END_
+
+BEGIN dec_if_positive %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ js 1f
+ movl %eax, ($v)
+ movl %edx, 4($v)
+1:
+END
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
new file mode 100644
index 000000000000..71e080de3352
--- /dev/null
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -0,0 +1,224 @@
+/*
+ * atomic64_t for 586+
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+.macro SAVE reg
+ pushl %\reg
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET \reg, 0
+.endm
+
+.macro RESTORE reg
+ popl %\reg
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE \reg
+.endm
+
+.macro read64 reg
+ movl %ebx, %eax
+ movl %ecx, %edx
+/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
+ LOCK_PREFIX
+ cmpxchg8b (\reg)
+.endm
+
+ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_read_cx8)
+
+ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_set_cx8)
+
+ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+ movl %ebx, %eax
+ movl %ecx, %edx
+1:
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_xchg_cx8)
+
+.macro addsub_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+ SAVE esi
+ SAVE edi
+
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %ecx, %ebp
+
+ read64 %ebp
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+addsub_return add add adc
+addsub_return sub sub sbb
+
+.macro incdec_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+incdec_return inc add adc
+incdec_return dec sub sbb
+
+ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+2:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_dec_if_positive_cx8)
+
+ENTRY(atomic64_add_unless_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+/* these just push these two parameters on the stack */
+ SAVE edi
+ SAVE esi
+
+ movl %ecx, %ebp
+ movl %eax, %esi
+ movl %edx, %edi
+
+ read64 %ebp
+1:
+ cmpl %eax, 0(%esp)
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+ movl $1, %eax
+3:
+ addl $8, %esp
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
+ ret
+4:
+ cmpl %edx, 4(%esp)
+ jne 2b
+ xorl %eax, %eax
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_add_unless_cx8)
+
+ENTRY(atomic64_inc_not_zero_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ testl %eax, %eax
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ movl $1, %eax
+3:
+ RESTORE ebx
+ ret
+4:
+ testl %edx, %edx
+ jne 2b
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index aa0987088774..dc8adad10a2f 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -30,10 +30,10 @@ static void fclex(void)
}
/* Needs to be externally visible */
-void finit_task(struct task_struct *tsk)
+void finit_soft_fpu(struct i387_soft_struct *soft)
{
- struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
struct address *oaddr, *iaddr;
+ memset(soft, 0, sizeof(*soft));
soft->cwd = 0x037f;
soft->swd = 0;
soft->ftop = 0; /* We don't keep top in the status word internally. */
@@ -52,7 +52,7 @@ void finit_task(struct task_struct *tsk)
void finit(void)
{
- finit_task(current);
+ finit_soft_fpu(&current->thread.fpu.state->soft);
}
/*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 5d87f586f8d7..7718541541d4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -681,7 +681,7 @@ int fpregs_soft_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
void *space = s387->st_space;
int ret;
int offset, other, i, tags, regnr, tag, newtop;
@@ -733,7 +733,7 @@ int fpregs_soft_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
const void *space = s387->st_space;
int ret;
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 50fa0ec2c8a5..2c614410a5f3 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -31,7 +31,7 @@
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
== (1 << 10))
-#define I387 (current->thread.xstate)
+#define I387 (current->thread.fpu.state)
#define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 06630d26e56d..a4c768397baa 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -6,6 +6,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_physaddr.o := $(nostackp)
CFLAGS_setup_nx.o := $(nostackp)
+obj-$(CONFIG_X86_PAT) += pat_rbtree.o
obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edc8b95afc1a..bbe5502ee1cb 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,6 +30,8 @@
#include <asm/pat.h>
#include <asm/io.h>
+#include "pat_internal.h"
+
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
@@ -53,19 +55,15 @@ static inline void pat_disable(const char *reason)
#endif
-static int debug_enable;
+int pat_debug_enable;
static int __init pat_debug_setup(char *str)
{
- debug_enable = 1;
+ pat_debug_enable = 1;
return 0;
}
__setup("debugpat", pat_debug_setup);
-#define dprintk(fmt, arg...) \
- do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
-
-
static u64 __read_mostly boot_pat_state;
enum {
@@ -132,84 +130,7 @@ void pat_init(void)
#undef PAT
-static char *cattr_name(unsigned long flags)
-{
- switch (flags & _PAGE_CACHE_MASK) {
- case _PAGE_CACHE_UC: return "uncached";
- case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
- case _PAGE_CACHE_WB: return "write-back";
- case _PAGE_CACHE_WC: return "write-combining";
- default: return "broken";
- }
-}
-
-/*
- * The global memtype list keeps track of memory type for specific
- * physical memory areas. Conflicting memory types in different
- * mappings can cause CPU cache corruption. To avoid this we keep track.
- *
- * The list is sorted based on starting address and can contain multiple
- * entries for each address (this allows reference counting for overlapping
- * areas). All the aliases have the same cache attributes of course.
- * Zero attributes are represented as holes.
- *
- * The data structure is a list that is also organized as an rbtree
- * sorted on the start address of memtype range.
- *
- * memtype_lock protects both the linear list and rbtree.
- */
-
-struct memtype {
- u64 start;
- u64 end;
- unsigned long type;
- struct list_head nd;
- struct rb_node rb;
-};
-
-static struct rb_root memtype_rbroot = RB_ROOT;
-static LIST_HEAD(memtype_list);
-static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
-
-static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
-{
- struct rb_node *node = root->rb_node;
- struct memtype *last_lower = NULL;
-
- while (node) {
- struct memtype *data = container_of(node, struct memtype, rb);
-
- if (data->start < start) {
- last_lower = data;
- node = node->rb_right;
- } else if (data->start > start) {
- node = node->rb_left;
- } else
- return data;
- }
-
- /* Will return NULL if there is no entry with its start <= start */
- return last_lower;
-}
-
-static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
-{
- struct rb_node **new = &(root->rb_node);
- struct rb_node *parent = NULL;
-
- while (*new) {
- struct memtype *this = container_of(*new, struct memtype, rb);
-
- parent = *new;
- if (data->start <= this->start)
- new = &((*new)->rb_left);
- else if (data->start > this->start)
- new = &((*new)->rb_right);
- }
-
- rb_link_node(&data->rb, parent, new);
- rb_insert_color(&data->rb, root);
-}
+static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
/*
* Does intersection of PAT memory type and MTRR memory type and returns
@@ -237,33 +158,6 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type;
}
-static int
-chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
-{
- if (new->type != entry->type) {
- if (type) {
- new->type = entry->type;
- *type = entry->type;
- } else
- goto conflict;
- }
-
- /* check overlaps with more than one entry in the list */
- list_for_each_entry_continue(entry, &memtype_list, nd) {
- if (new->end <= entry->start)
- break;
- else if (new->type != entry->type)
- goto conflict;
- }
- return 0;
-
- conflict:
- printk(KERN_INFO "%s:%d conflicting memory types "
- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
- new->end, cattr_name(new->type), cattr_name(entry->type));
- return -EBUSY;
-}
-
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
int ram_page = 0, not_rampage = 0;
@@ -296,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
* Here we do two pass:
* - Find the memtype of all the pages in the range, look for any conflicts
* - In case of no conflicts, set the new memtype for pages in the range
- *
- * Caller must hold memtype_lock for atomicity.
*/
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
@@ -364,9 +256,8 @@ static int free_ram_pages_type(u64 start, u64 end)
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
{
- struct memtype *new, *entry;
+ struct memtype *new;
unsigned long actual_type;
- struct list_head *where;
int is_range_ram;
int err = 0;
@@ -404,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = reserve_ram_pages_type(start, end, req_type, new_type);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -423,42 +312,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
- /* Search for existing mapping that overlaps the current range */
- where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
- if (end <= entry->start) {
- where = entry->nd.prev;
- break;
- } else if (start <= entry->start) { /* end > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
- where = entry->nd.prev;
- }
- break;
- } else if (start < entry->end) { /* start > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
-
- /*
- * Move to right position in the linked
- * list to add this new entry
- */
- list_for_each_entry_continue(entry,
- &memtype_list, nd) {
- if (start <= entry->start) {
- where = entry->nd.prev;
- break;
- }
- }
- }
- break;
- }
- }
-
+ err = rbt_memtype_check_insert(new, new_type);
if (err) {
printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
"track %s, req %s\n",
@@ -469,13 +323,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
- if (where)
- list_add(&new->nd, where);
- else
- list_add_tail(&new->nd, &memtype_list);
-
- memtype_rb_insert(&memtype_rbroot, new);
-
spin_unlock(&memtype_lock);
dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -487,7 +334,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
int free_memtype(u64 start, u64 end)
{
- struct memtype *entry, *saved_entry;
int err = -EINVAL;
int is_range_ram;
@@ -501,9 +347,7 @@ int free_memtype(u64 start, u64 end)
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = free_ram_pages_type(start, end);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -511,46 +355,7 @@ int free_memtype(u64 start, u64 end)
}
spin_lock(&memtype_lock);
-
- entry = memtype_rb_search(&memtype_rbroot, start);
- if (unlikely(entry == NULL))
- goto unlock_ret;
-
- /*
- * Saved entry points to an entry with start same or less than what
- * we searched for. Now go through the list in both directions to look
- * for the entry that matches with both start and end, with list stored
- * in sorted start address
- */
- saved_entry = entry;
- list_for_each_entry_from(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start > start) {
- break;
- }
- }
-
- if (!err)
- goto unlock_ret;
-
- entry = saved_entry;
- list_for_each_entry_reverse(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start < start) {
- break;
- }
- }
-unlock_ret:
+ err = rbt_memtype_erase(start, end);
spin_unlock(&memtype_lock);
if (err) {
@@ -583,10 +388,8 @@ static unsigned long lookup_memtype(u64 paddr)
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
struct page *page;
- spin_lock(&memtype_lock);
page = pfn_to_page(paddr >> PAGE_SHIFT);
rettype = get_page_memtype(page);
- spin_unlock(&memtype_lock);
/*
* -1 from get_page_memtype() implies RAM page is in its
* default state and not reserved, and hence of type WB
@@ -599,7 +402,7 @@ static unsigned long lookup_memtype(u64 paddr)
spin_lock(&memtype_lock);
- entry = memtype_rb_search(&memtype_rbroot, paddr);
+ entry = rbt_memtype_lookup(paddr);
if (entry != NULL)
rettype = entry->type;
else
@@ -936,29 +739,25 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
-/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
- struct memtype *list_node, *print_entry;
- int i = 1;
+ struct memtype *print_entry;
+ int ret;
- print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+ print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
if (!print_entry)
return NULL;
spin_lock(&memtype_lock);
- list_for_each_entry(list_node, &memtype_list, nd) {
- if (pos == i) {
- *print_entry = *list_node;
- spin_unlock(&memtype_lock);
- return print_entry;
- }
- ++i;
- }
+ ret = rbt_memtype_copy_nth_element(print_entry, pos);
spin_unlock(&memtype_lock);
- kfree(print_entry);
- return NULL;
+ if (!ret) {
+ return print_entry;
+ } else {
+ kfree(print_entry);
+ return NULL;
+ }
}
static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
new file mode 100644
index 000000000000..4f39eefa3e61
--- /dev/null
+++ b/arch/x86/mm/pat_internal.h
@@ -0,0 +1,46 @@
+#ifndef __PAT_INTERNAL_H_
+#define __PAT_INTERNAL_H_
+
+extern int pat_debug_enable;
+
+#define dprintk(fmt, arg...) \
+ do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+
+struct memtype {
+ u64 start;
+ u64 end;
+ u64 subtree_max_end;
+ unsigned long type;
+ struct rb_node rb;
+};
+
+static inline char *cattr_name(unsigned long flags)
+{
+ switch (flags & _PAGE_CACHE_MASK) {
+ case _PAGE_CACHE_UC: return "uncached";
+ case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
+ case _PAGE_CACHE_WB: return "write-back";
+ case _PAGE_CACHE_WC: return "write-combining";
+ default: return "broken";
+ }
+}
+
+#ifdef CONFIG_X86_PAT
+extern int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type);
+extern int rbt_memtype_erase(u64 start, u64 end);
+extern struct memtype *rbt_memtype_lookup(u64 addr);
+extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
+#else
+static inline int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type)
+{ return 0; }
+static inline int rbt_memtype_erase(u64 start, u64 end)
+{ return 0; }
+static inline struct memtype *rbt_memtype_lookup(u64 addr)
+{ return NULL; }
+static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{ return 0; }
+#endif
+
+#endif /* __PAT_INTERNAL_H_ */
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
new file mode 100644
index 000000000000..07de4cb8cc30
--- /dev/null
+++ b/arch/x86/mm/pat_rbtree.c
@@ -0,0 +1,273 @@
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Interval tree (augmented rbtree) used to store the PAT memory type
+ * reservations.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/pat.h>
+
+#include "pat_internal.h"
+
+/*
+ * The memtype tree keeps track of memory type for specific
+ * physical memory areas. Without proper tracking, conflicting memory
+ * types in different mappings can cause CPU cache corruption.
+ *
+ * The tree is an interval tree (augmented rbtree) with tree ordered
+ * on starting address. Tree can contain multiple entries for
+ * different regions which overlap. All the aliases have the same
+ * cache attributes of course.
+ *
+ * memtype_lock protects the rbtree.
+ */
+
+static void memtype_rb_augment_cb(struct rb_node *node);
+static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb);
+
+static int is_node_overlap(struct memtype *node, u64 start, u64 end)
+{
+ if (node->start >= end || node->end <= start)
+ return 0;
+
+ return 1;
+}
+
+static u64 get_subtree_max_end(struct rb_node *node)
+{
+ u64 ret = 0;
+ if (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+ ret = data->subtree_max_end;
+ }
+ return ret;
+}
+
+/* Update 'subtree_max_end' for a node, based on node and its children */
+static void update_node_max_end(struct rb_node *node)
+{
+ struct memtype *data;
+ u64 max_end, child_max_end;
+
+ if (!node)
+ return;
+
+ data = container_of(node, struct memtype, rb);
+ max_end = data->end;
+
+ child_max_end = get_subtree_max_end(node->rb_right);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ child_max_end = get_subtree_max_end(node->rb_left);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ data->subtree_max_end = max_end;
+}
+
+/* Update 'subtree_max_end' for a node and all its ancestors */
+static void update_path_max_end(struct rb_node *node)
+{
+ u64 old_max_end, new_max_end;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ old_max_end = data->subtree_max_end;
+ update_node_max_end(node);
+ new_max_end = data->subtree_max_end;
+
+ if (new_max_end == old_max_end)
+ break;
+
+ node = rb_parent(node);
+ }
+}
+
+/* Find the first (lowest start addr) overlapping range from rb tree */
+static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct rb_node *node = root->rb_node;
+ struct memtype *last_lower = NULL;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ if (get_subtree_max_end(node->rb_left) > start) {
+ /* Lowest overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (is_node_overlap(data, start, end)) {
+ last_lower = data;
+ break;
+ } else if (start >= data->start) {
+ /* Lowest overlap if any must be on right side */
+ node = node->rb_right;
+ } else {
+ break;
+ }
+ }
+ return last_lower; /* Returns NULL if there is no overlap */
+}
+
+static struct memtype *memtype_rb_exact_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct memtype *match;
+
+ match = memtype_rb_lowest_match(root, start, end);
+ while (match != NULL && match->start < end) {
+ struct rb_node *node;
+
+ if (match->start == start && match->end == end)
+ return match;
+
+ node = rb_next(&match->rb);
+ if (node)
+ match = container_of(node, struct memtype, rb);
+ else
+ match = NULL;
+ }
+
+ return NULL; /* Returns NULL if there is no exact match */
+}
+
+static int memtype_rb_check_conflict(struct rb_root *root,
+ u64 start, u64 end,
+ unsigned long reqtype, unsigned long *newtype)
+{
+ struct rb_node *node;
+ struct memtype *match;
+ int found_type = reqtype;
+
+ match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
+ if (match == NULL)
+ goto success;
+
+ if (match->type != found_type && newtype == NULL)
+ goto failure;
+
+ dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
+ found_type = match->type;
+
+ node = rb_next(&match->rb);
+ while (node) {
+ match = container_of(node, struct memtype, rb);
+
+ if (match->start >= end) /* Checked all possible matches */
+ goto success;
+
+ if (is_node_overlap(match, start, end) &&
+ match->type != found_type) {
+ goto failure;
+ }
+
+ node = rb_next(&match->rb);
+ }
+success:
+ if (newtype)
+ *newtype = found_type;
+
+ return 0;
+
+failure:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+ "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+ end, cattr_name(found_type), cattr_name(match->type));
+ return -EBUSY;
+}
+
+static void memtype_rb_augment_cb(struct rb_node *node)
+{
+ if (node)
+ update_path_max_end(node);
+}
+
+static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
+{
+ struct rb_node **node = &(root->rb_node);
+ struct rb_node *parent = NULL;
+
+ while (*node) {
+ struct memtype *data = container_of(*node, struct memtype, rb);
+
+ parent = *node;
+ if (newdata->start <= data->start)
+ node = &((*node)->rb_left);
+ else if (newdata->start > data->start)
+ node = &((*node)->rb_right);
+ }
+
+ rb_link_node(&newdata->rb, parent, node);
+ rb_insert_color(&newdata->rb, root);
+}
+
+int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
+{
+ int err = 0;
+
+ err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
+ new->type, ret_type);
+
+ if (!err) {
+ if (ret_type)
+ new->type = *ret_type;
+
+ memtype_rb_insert(&memtype_rbroot, new);
+ }
+ return err;
+}
+
+int rbt_memtype_erase(u64 start, u64 end)
+{
+ struct memtype *data;
+
+ data = memtype_rb_exact_match(&memtype_rbroot, start, end);
+ if (!data)
+ return -EINVAL;
+
+ rb_erase(&data->rb, &memtype_rbroot);
+ return 0;
+}
+
+struct memtype *rbt_memtype_lookup(u64 addr)
+{
+ struct memtype *data;
+ data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
+ return data;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{
+ struct rb_node *node;
+ int i = 1;
+
+ node = rb_first(&memtype_rbroot);
+ while (node && pos != i) {
+ node = rb_next(node);
+ i++;
+ }
+
+ if (node) { /* pos == i */
+ struct memtype *this = container_of(node, struct memtype, rb);
+ *out = *this;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+#endif
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 28c68762648f..f9897f7a9ef1 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -363,6 +363,54 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
for (i = 0; i < MAX_NUMNODES; i++)
cutoff_node(i, start, end);
+ /*
+ * Join together blocks on the same node, holes between
+ * which don't overlap with memory on other nodes.
+ */
+ for (i = 0; i < num_node_memblks; ++i) {
+ int j, k;
+
+ for (j = i + 1; j < num_node_memblks; ++j) {
+ unsigned long start, end;
+
+ if (memblk_nodeid[i] != memblk_nodeid[j])
+ continue;
+ start = min(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ end = max(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ for (k = 0; k < num_node_memblks; ++k) {
+ if (memblk_nodeid[i] == memblk_nodeid[k])
+ continue;
+ if (start < node_memblk_range[k].end &&
+ end > node_memblk_range[k].start)
+ break;
+ }
+ if (k < num_node_memblks)
+ continue;
+ start = min(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ end = max(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ printk(KERN_INFO "SRAT: Node %d "
+ "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
+ memblk_nodeid[i],
+ node_memblk_range[i].start,
+ node_memblk_range[i].end,
+ node_memblk_range[j].start,
+ node_memblk_range[j].end,
+ start, end);
+ node_memblk_range[i].start = start;
+ node_memblk_range[i].end = end;
+ k = --num_node_memblks - j;
+ memmove(memblk_nodeid + j, memblk_nodeid + j+1,
+ k * sizeof(*memblk_nodeid));
+ memmove(node_memblk_range + j, node_memblk_range + j+1,
+ k * sizeof(*node_memblk_range));
+ --j;
+ }
+ }
+
memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
memblk_nodeid);
if (memnode_shift < 0) {
@@ -461,7 +509,8 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
* node, it must now point to the fake node ID.
*/
for (j = 0; j < MAX_LOCAL_APIC; j++)
- if (apicid_to_node[j] == nid)
+ if (apicid_to_node[j] == nid &&
+ fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i;
}
for (i = 0; i < num_nodes; i++)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2c505ee71014..b28d2f1253bb 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
-/* 0 == registered but off, 1 == registered and on */
-static int nmi_enabled = 0;
+/* must be protected with get_online_cpus()/put_online_cpus(): */
+static int nmi_enabled;
+static int ctr_running;
struct op_counter_config counter_config[OP_MAX_COUNTER];
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self,
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- int cpu = smp_processor_id();
switch (val) {
case DIE_NMI:
case DIE_NMI_IPI:
- model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
+ if (ctr_running)
+ model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
+ else if (!nmi_enabled)
+ break;
+ else
+ model->stop(&__get_cpu_var(cpu_msrs));
ret = NOTIFY_STOP;
break;
default:
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
static void nmi_cpu_start(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->start(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->start(msrs);
}
static int nmi_start(void)
{
+ get_online_cpus();
on_each_cpu(nmi_cpu_start, NULL, 1);
+ ctr_running = 1;
+ put_online_cpus();
return 0;
}
static void nmi_cpu_stop(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->stop(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->stop(msrs);
}
static void nmi_stop(void)
{
+ get_online_cpus();
on_each_cpu(nmi_cpu_stop, NULL, 1);
+ ctr_running = 0;
+ put_online_cpus();
}
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
@@ -252,7 +269,10 @@ static int nmi_switch_event(void)
if (nmi_multiplex_on() < 0)
return -EINVAL; /* not necessary */
- on_each_cpu(nmi_cpu_switch, NULL, 1);
+ get_online_cpus();
+ if (ctr_running)
+ on_each_cpu(nmi_cpu_switch, NULL, 1);
+ put_online_cpus();
return 0;
}
@@ -295,6 +315,7 @@ static void free_msrs(void)
kfree(per_cpu(cpu_msrs, i).controls);
per_cpu(cpu_msrs, i).controls = NULL;
}
+ nmi_shutdown_mux();
}
static int allocate_msrs(void)
@@ -307,14 +328,21 @@ static int allocate_msrs(void)
per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).counters)
- return 0;
+ goto fail;
per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).controls)
- return 0;
+ goto fail;
}
+ if (!nmi_setup_mux())
+ goto fail;
+
return 1;
+
+fail:
+ free_msrs();
+ return 0;
}
static void nmi_cpu_setup(void *dummy)
@@ -336,49 +364,6 @@ static struct notifier_block profile_exceptions_nb = {
.priority = 2
};
-static int nmi_setup(void)
-{
- int err = 0;
- int cpu;
-
- if (!allocate_msrs())
- err = -ENOMEM;
- else if (!nmi_setup_mux())
- err = -ENOMEM;
- else
- err = register_die_notifier(&profile_exceptions_nb);
-
- if (err) {
- free_msrs();
- nmi_shutdown_mux();
- return err;
- }
-
- /* We need to serialize save and setup for HT because the subset
- * of msrs are distinct for save and setup operations
- */
-
- /* Assume saved/restored counters are the same on all CPUs */
- model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
- for_each_possible_cpu(cpu) {
- if (!cpu)
- continue;
-
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
-
- mux_clone(cpu);
- }
- on_each_cpu(nmi_cpu_setup, NULL, 1);
- nmi_enabled = 1;
- return 0;
-}
-
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
@@ -412,20 +397,24 @@ static void nmi_cpu_shutdown(void *dummy)
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
nmi_cpu_restore_registers(msrs);
+ if (model->cpu_down)
+ model->cpu_down();
}
-static void nmi_shutdown(void)
+static void nmi_cpu_up(void *dummy)
{
- struct op_msrs *msrs;
+ if (nmi_enabled)
+ nmi_cpu_setup(dummy);
+ if (ctr_running)
+ nmi_cpu_start(dummy);
+}
- nmi_enabled = 0;
- on_each_cpu(nmi_cpu_shutdown, NULL, 1);
- unregister_die_notifier(&profile_exceptions_nb);
- nmi_shutdown_mux();
- msrs = &get_cpu_var(cpu_msrs);
- model->shutdown(msrs);
- free_msrs();
- put_cpu_var(cpu_msrs);
+static void nmi_cpu_down(void *dummy)
+{
+ if (ctr_running)
+ nmi_cpu_stop(dummy);
+ if (nmi_enabled)
+ nmi_cpu_shutdown(dummy);
}
static int nmi_create_files(struct super_block *sb, struct dentry *root)
@@ -457,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
return 0;
}
-#ifdef CONFIG_SMP
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
@@ -465,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+ smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
break;
case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+ smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
break;
}
return NOTIFY_DONE;
@@ -477,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
static struct notifier_block oprofile_cpu_nb = {
.notifier_call = oprofile_cpu_notifier
};
-#endif
+
+static int nmi_setup(void)
+{
+ int err = 0;
+ int cpu;
+
+ if (!allocate_msrs())
+ return -ENOMEM;
+
+ /* We need to serialize save and setup for HT because the subset
+ * of msrs are distinct for save and setup operations
+ */
+
+ /* Assume saved/restored counters are the same on all CPUs */
+ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
+ if (err)
+ goto fail;
+
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+
+ memcpy(per_cpu(cpu_msrs, cpu).counters,
+ per_cpu(cpu_msrs, 0).counters,
+ sizeof(struct op_msr) * model->num_counters);
+
+ memcpy(per_cpu(cpu_msrs, cpu).controls,
+ per_cpu(cpu_msrs, 0).controls,
+ sizeof(struct op_msr) * model->num_controls);
+
+ mux_clone(cpu);
+ }
+
+ nmi_enabled = 0;
+ ctr_running = 0;
+ barrier();
+ err = register_die_notifier(&profile_exceptions_nb);
+ if (err)
+ goto fail;
+
+ get_online_cpus();
+ register_cpu_notifier(&oprofile_cpu_nb);
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
+ nmi_enabled = 1;
+ put_online_cpus();
+
+ return 0;
+fail:
+ free_msrs();
+ return err;
+}
+
+static void nmi_shutdown(void)
+{
+ struct op_msrs *msrs;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&oprofile_cpu_nb);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 1);
+ nmi_enabled = 0;
+ ctr_running = 0;
+ put_online_cpus();
+ barrier();
+ unregister_die_notifier(&profile_exceptions_nb);
+ msrs = &get_cpu_var(cpu_msrs);
+ model->shutdown(msrs);
+ free_msrs();
+ put_cpu_var(cpu_msrs);
+}
#ifdef CONFIG_PM
@@ -687,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
return -ENODEV;
}
-#ifdef CONFIG_SMP
- register_cpu_notifier(&oprofile_cpu_nb);
-#endif
/* default values, can be overwritten by model */
ops->create_files = nmi_create_files;
ops->setup = nmi_setup;
@@ -716,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
void op_nmi_exit(void)
{
- if (using_nmi) {
+ if (using_nmi)
exit_sysfs();
-#ifdef CONFIG_SMP
- unregister_cpu_notifier(&oprofile_cpu_nb);
-#endif
- }
- if (model->exit)
- model->exit();
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 090cbbec7dbd..b67a6b5aa8d4 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -30,13 +30,10 @@
#include "op_counter.h"
#define NUM_COUNTERS 4
-#define NUM_CONTROLS 4
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
#define NUM_VIRT_COUNTERS 32
-#define NUM_VIRT_CONTROLS 32
#else
#define NUM_VIRT_COUNTERS NUM_COUNTERS
-#define NUM_VIRT_CONTROLS NUM_CONTROLS
#endif
#define OP_EVENT_MASK 0x0FFF
@@ -105,102 +102,6 @@ static u32 get_ibs_caps(void)
return ibs_caps;
}
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* enable active counters */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-#endif
-
-/* functions for op_amd_spec */
-
-static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < NUM_COUNTERS; i++) {
- if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
- msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
- }
-
- for (i = 0; i < NUM_CONTROLS; i++) {
- if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
- msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
- }
-}
-
-static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* setup reset_value */
- for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
- if (counter_config[i].enabled
- && msrs->counters[op_x86_virt_to_phys(i)].addr)
- reset_value[i] = counter_config[i].count;
- else
- reset_value[i] = 0;
- }
-
- /* clear all counters */
- for (i = 0; i < NUM_CONTROLS; ++i) {
- if (unlikely(!msrs->controls[i].addr)) {
- if (counter_config[i].enabled && !smp_processor_id())
- /*
- * counter is reserved, this is on all
- * cpus, so report only for cpu #0
- */
- op_x86_warn_reserved(i);
- continue;
- }
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- /* avoid a false detection of ctr overflows in NMI handler */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- if (unlikely(!msrs->counters[i].addr))
- continue;
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
-
- /* setup counter registers */
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
-
- /* setup control registers */
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
/*
* 16-bit Linear Feedback Shift Register (LFSR)
*
@@ -365,6 +266,125 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
}
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
+{
+ u64 val;
+ int i;
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!reset_value[virt])
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
+ }
+}
+
+#endif
+
+/* functions for op_amd_spec */
+
+static void op_amd_shutdown(struct op_msrs const * const msrs)
+{
+ int i;
+
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!msrs->counters[i].addr)
+ continue;
+ release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+ }
+}
+
+static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
+{
+ int i;
+
+ for (i = 0; i < NUM_COUNTERS; i++) {
+ if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
+ goto fail;
+ if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
+ release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ goto fail;
+ }
+ /* both registers must be reserved */
+ msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
+ msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
+ continue;
+ fail:
+ if (!counter_config[i].enabled)
+ continue;
+ op_x86_warn_reserved(i);
+ op_amd_shutdown(msrs);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
+{
+ u64 val;
+ int i;
+
+ /* setup reset_value */
+ for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
+ if (counter_config[i].enabled
+ && msrs->counters[op_x86_virt_to_phys(i)].addr)
+ reset_value[i] = counter_config[i].count;
+ else
+ reset_value[i] = 0;
+ }
+
+ /* clear all counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!msrs->controls[i].addr)
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
+ op_x86_warn_in_use(i);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
+ /*
+ * avoid a false detection of ctr overflows in NMI
+ * handler
+ */
+ wrmsrl(msrs->counters[i].addr, -1LL);
+ }
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!reset_value[virt])
+ continue;
+
+ /* setup counter registers */
+ wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
+
+ /* setup control registers */
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
+ }
+
+ if (ibs_caps)
+ setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
+}
+
+static void op_amd_cpu_shutdown(void)
+{
+ if (ibs_caps)
+ setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
+}
+
static int op_amd_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
@@ -425,42 +445,16 @@ static void op_amd_stop(struct op_msrs const * const msrs)
op_amd_stop_ibs();
}
-static void op_amd_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < NUM_COUNTERS; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- }
- for (i = 0; i < NUM_CONTROLS; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
-}
-
-static u8 ibs_eilvt_off;
-
-static inline void apic_init_ibs_nmi_per_cpu(void *arg)
-{
- ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
-}
-
-static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
-{
- setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
-}
-
-static int init_ibs_nmi(void)
+static int __init_ibs_nmi(void)
{
#define IBSCTL_LVTOFFSETVAL (1 << 8)
#define IBSCTL 0x1cc
struct pci_dev *cpu_cfg;
int nodes;
u32 value = 0;
+ u8 ibs_eilvt_off;
- /* per CPU setup */
- on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
+ ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
nodes = 0;
cpu_cfg = NULL;
@@ -490,22 +484,15 @@ static int init_ibs_nmi(void)
return 0;
}
-/* uninitialize the APIC for the IBS interrupts if needed */
-static void clear_ibs_nmi(void)
-{
- if (ibs_caps)
- on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
-}
-
/* initialize the APIC for the IBS interrupts if available */
-static void ibs_init(void)
+static void init_ibs(void)
{
ibs_caps = get_ibs_caps();
if (!ibs_caps)
return;
- if (init_ibs_nmi()) {
+ if (__init_ibs_nmi()) {
ibs_caps = 0;
return;
}
@@ -514,14 +501,6 @@ static void ibs_init(void)
(unsigned)ibs_caps);
}
-static void ibs_exit(void)
-{
- if (!ibs_caps)
- return;
-
- clear_ibs_nmi();
-}
-
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
static int setup_ibs_files(struct super_block *sb, struct dentry *root)
@@ -570,27 +549,22 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
static int op_amd_init(struct oprofile_operations *ops)
{
- ibs_init();
+ init_ibs();
create_arch_files = ops->create_files;
ops->create_files = setup_ibs_files;
return 0;
}
-static void op_amd_exit(void)
-{
- ibs_exit();
-}
-
struct op_x86_model_spec op_amd_spec = {
.num_counters = NUM_COUNTERS,
- .num_controls = NUM_CONTROLS,
+ .num_controls = NUM_COUNTERS,
.num_virt_counters = NUM_VIRT_COUNTERS,
.reserved = MSR_AMD_EVENTSEL_RESERVED,
.event_mask = OP_EVENT_MASK,
.init = op_amd_init,
- .exit = op_amd_exit,
.fill_in_addresses = &op_amd_fill_in_addresses,
.setup_ctrs = &op_amd_setup_ctrs,
+ .cpu_down = &op_amd_cpu_shutdown,
.check_ctrs = &op_amd_check_ctrs,
.start = &op_amd_start,
.stop = &op_amd_stop,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index e6a160a4684a..182558dd5515 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -385,8 +385,26 @@ static unsigned int get_stagger(void)
static unsigned long reset_value[NUM_COUNTERS_NON_HT];
+static void p4_shutdown(struct op_msrs const * const msrs)
+{
+ int i;
-static void p4_fill_in_addresses(struct op_msrs * const msrs)
+ for (i = 0; i < num_counters; ++i) {
+ if (msrs->counters[i].addr)
+ release_perfctr_nmi(msrs->counters[i].addr);
+ }
+ /*
+ * some of the control registers are specially reserved in
+ * conjunction with the counter registers (hence the starting offset).
+ * This saves a few bits.
+ */
+ for (i = num_counters; i < num_controls; ++i) {
+ if (msrs->controls[i].addr)
+ release_evntsel_nmi(msrs->controls[i].addr);
+ }
+}
+
+static int p4_fill_in_addresses(struct op_msrs * const msrs)
{
unsigned int i;
unsigned int addr, cccraddr, stag;
@@ -468,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
}
}
+
+ for (i = 0; i < num_counters; ++i) {
+ if (!counter_config[i].enabled)
+ continue;
+ if (msrs->controls[i].addr)
+ continue;
+ op_x86_warn_reserved(i);
+ p4_shutdown(msrs);
+ return -EBUSY;
+ }
+
+ return 0;
}
@@ -668,26 +698,6 @@ static void p4_stop(struct op_msrs const * const msrs)
}
}
-static void p4_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(msrs->counters[i].addr);
- }
- /*
- * some of the control registers are specially reserved in
- * conjunction with the counter registers (hence the starting offset).
- * This saves a few bits.
- */
- for (i = num_counters; i < num_controls; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(msrs->controls[i].addr);
- }
-}
-
-
#ifdef CONFIG_SMP
struct op_x86_model_spec op_p4_ht2_spec = {
.num_counters = NUM_COUNTERS_HT2,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index c8abc4d1bf35..d769cda54082 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -30,19 +30,46 @@ static int counter_width = 32;
static u64 *reset_value;
-static void ppro_fill_in_addresses(struct op_msrs * const msrs)
+static void ppro_shutdown(struct op_msrs const * const msrs)
{
int i;
- for (i = 0; i < num_counters; i++) {
- if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
- msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+ for (i = 0; i < num_counters; ++i) {
+ if (!msrs->counters[i].addr)
+ continue;
+ release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
+ release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
+ }
+ if (reset_value) {
+ kfree(reset_value);
+ reset_value = NULL;
}
+}
+
+static int ppro_fill_in_addresses(struct op_msrs * const msrs)
+{
+ int i;
for (i = 0; i < num_counters; i++) {
- if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
- msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+ if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
+ goto fail;
+ if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
+ release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
+ goto fail;
+ }
+ /* both registers must be reserved */
+ msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+ msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+ continue;
+ fail:
+ if (!counter_config[i].enabled)
+ continue;
+ op_x86_warn_reserved(i);
+ ppro_shutdown(msrs);
+ return -EBUSY;
}
+
+ return 0;
}
@@ -78,26 +105,17 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
/* clear all counters */
for (i = 0; i < num_counters; ++i) {
- if (unlikely(!msrs->controls[i].addr)) {
- if (counter_config[i].enabled && !smp_processor_id())
- /*
- * counter is reserved, this is on all
- * cpus, so report only for cpu #0
- */
- op_x86_warn_reserved(i);
+ if (!msrs->controls[i].addr)
continue;
- }
rdmsrl(msrs->controls[i].addr, val);
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val);
- }
-
- /* avoid a false detection of ctr overflows in NMI handler */
- for (i = 0; i < num_counters; ++i) {
- if (unlikely(!msrs->counters[i].addr))
- continue;
+ /*
+ * avoid a false detection of ctr overflows in NMI *
+ * handler
+ */
wrmsrl(msrs->counters[i].addr, -1LL);
}
@@ -189,25 +207,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
}
}
-static void ppro_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- }
- for (i = 0; i < num_counters; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
- }
- if (reset_value) {
- kfree(reset_value);
- reset_value = NULL;
- }
-}
-
-
struct op_x86_model_spec op_ppro_spec = {
.num_counters = 2,
.num_controls = 2,
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index ff82a755edd4..89017fa1fd63 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -40,10 +40,10 @@ struct op_x86_model_spec {
u64 reserved;
u16 event_mask;
int (*init)(struct oprofile_operations *ops);
- void (*exit)(void);
- void (*fill_in_addresses)(struct op_msrs * const msrs);
+ int (*fill_in_addresses)(struct op_msrs * const msrs);
void (*setup_ctrs)(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs);
+ void (*cpu_down)(void);
int (*check_ctrs)(struct pt_regs * const regs,
struct op_msrs const * const msrs);
void (*start)(struct op_msrs const * const msrs);
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 8bf2fcb88d04..7ef3a2735df3 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -109,7 +109,7 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
decode++;
decode = ~(decode - 1);
} else {
- decode = ~0;
+ decode = 0;
}
/*
@@ -247,6 +247,10 @@ static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
u32 size;
int i;
+ /* Must have extended configuration space */
+ if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
+ return;
+
/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
offset = fixed_bar_cap(dev->bus, dev->devfn);
if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||