summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c82
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/multicalls.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
23 files changed, 213 insertions, 80 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
#define ARCH_DISCARD_MEMBLOCK
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-void memblock_x86_to_bootmem(u64 start, u64 end);
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
+bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
#endif
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
u64 product;
#ifdef __i386__
u32 tmp1, tmp2;
+#else
+ ulong tmp;
#endif
if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
#elif defined(__x86_64__)
__asm__ (
- "mul %%rdx ; shrd $32,%%rdx,%%rax"
- : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+ "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+ : [lo]"=a"(product),
+ [hi]"=d"(tmp)
+ : "0"(delta),
+ [mul_frac]"rm"((u64)mul_frac));
#else
#error implement me!
#endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
+#include <asm/dma.h>
#include <asm/amd_iommu_proto.h>
#include <asm/amd_iommu_types.h>
#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
if (pdev)
dev_data->alias = &pdev->dev;
+ else {
+ kfree(dev_data);
+ return -ENOTSUPP;
+ }
atomic_set(&dev_data->bind, 0);
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
return 0;
}
+static void iommu_ignore_device(struct device *dev)
+{
+ u16 devid, alias;
+
+ devid = get_device_id(dev);
+ alias = amd_iommu_alias_table[devid];
+
+ memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+
+ amd_iommu_rlookup_table[devid] = NULL;
+ amd_iommu_rlookup_table[alias] = NULL;
+}
+
static void iommu_uninit_device(struct device *dev)
{
kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
continue;
ret = iommu_init_device(&pdev->dev);
- if (ret)
+ if (ret == -ENOTSUPP)
+ iommu_ignore_device(&pdev->dev);
+ else if (ret)
goto out_free;
}
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
.dma_supported = amd_iommu_dma_supported,
};
+static unsigned device_dma_ops_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ unsigned unhandled = 0;
+
+ for_each_pci_dev(pdev) {
+ if (!check_device(&pdev->dev)) {
+ unhandled += 1;
+ continue;
+ }
+
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ }
+
+ return unhandled;
+}
+
/*
* The function which clues the AMD IOMMU driver into dma_ops.
*/
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
struct amd_iommu *iommu;
- int ret;
+ int ret, unhandled;
/*
* first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
swiotlb = 0;
/* Make the driver finally visible to the drivers */
- dma_ops = &amd_iommu_dma_ops;
+ unhandled = device_dma_ops_init();
+ if (unhandled && max_pfn > MAX_DMA32_PFN) {
+ /* There are unhandled devices - initialize swiotlb for them */
+ swiotlb = 1;
+ }
amd_iommu_stats_init();
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
- u32 ext_flags = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0;
+ u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
/* Initializes the device->iommu mapping for the driver */
static int __init init_iommu_devices(struct amd_iommu *iommu)
{
- u16 i;
+ u32 i;
for (i = iommu->first_device; i <= iommu->last_device; ++i)
set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
*/
static void init_device_table(void)
{
- u16 devid;
+ u32 devid;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af86bfea..b9338b8cf420 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
/*
* If mask=1, the LVT entry does not generate interrupts while mask=0
- * enables the vector. See also the BKDGs.
+ * enables the vector. See also the BKDGs. Must be called with
+ * preemption disabled.
*/
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a011b7d0..adc66c3a1fef 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
/* Direct Legacy VGA I/O traffic to designated IOH */
int uv_set_vga_state(struct pci_dev *pdev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
int domain, bus, rc;
- PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n",
- pdev->devfn, decode, command_bits, change_bridge);
+ PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
+ pdev->devfn, decode, command_bits, flags);
- if (!change_bridge)
+ if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
return 0;
if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc8461835..9aeb78a23de4 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of_pci.h>
+#include <linux/initrd.h>
#include <asm/hpet.h>
#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2e4928d45a2d..e1ba8cb24e4e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(pm_idle);
#endif
@@ -399,7 +399,7 @@ void default_idle(void)
cpu_relax();
}
}
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..a3d0dc59067b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
set_user_gs(regs, 0);
regs->fs = 0;
- set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..ca6f7ab8df33 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
regs->cs = _cs;
regs->ss = _ss;
regs->flags = X86_EFLAGS_IF;
- set_fs(USER_DS);
/*
* Free the old FP and other extended state
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11797de..9fd3137230d4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
+ /*
+ * Wait until the cpu which brought this one up marked it
+ * online before enabling interrupts. If we don't do that then
+ * we can end up waking up the softirq thread before this cpu
+ * reached the active state, which makes the scheduler unhappy
+ * and schedule the softirq thread on the wrong cpu. This is
+ * only observable with forced threaded interrupts, but in
+ * theory it could also happen w/o them. It's just way harder
+ * to achieve.
+ */
+ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+ cpu_relax();
+
/* enable local interrupts */
local_irq_enable();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..6df88c7885c0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
#define DstDI (5<<1) /* Destination is in ES:(E)DI */
#define DstMem64 (6<<1) /* 64bit memory operand */
#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
-#define DstMask (7<<1)
+#define DstDX (8<<1) /* Destination is in DX register */
+#define DstMask (0xf<<1)
/* Source operand type. */
-#define SrcNone (0<<4) /* No source operand. */
-#define SrcReg (1<<4) /* Register operand. */
-#define SrcMem (2<<4) /* Memory operand. */
-#define SrcMem16 (3<<4) /* Memory operand (16-bit). */
-#define SrcMem32 (4<<4) /* Memory operand (32-bit). */
-#define SrcImm (5<<4) /* Immediate operand. */
-#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
-#define SrcOne (7<<4) /* Implied '1' */
-#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
-#define SrcImmU (9<<4) /* Immediate operand, unsigned */
-#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
-#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
-#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
-#define SrcAcc (0xd<<4) /* Source Accumulator */
-#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
-#define SrcMask (0xf<<4)
+#define SrcNone (0<<5) /* No source operand. */
+#define SrcReg (1<<5) /* Register operand. */
+#define SrcMem (2<<5) /* Memory operand. */
+#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
+#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
+#define SrcImm (5<<5) /* Immediate operand. */
+#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
+#define SrcOne (7<<5) /* Implied '1' */
+#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
+#define SrcImmU (9<<5) /* Immediate operand, unsigned */
+#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
+#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
+#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
+#define SrcAcc (0xd<<5) /* Source Accumulator */
+#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
+#define SrcDX (0xf<<5) /* Source is in DX register */
+#define SrcMask (0xf<<5)
/* Generic ModRM decode. */
-#define ModRM (1<<8)
+#define ModRM (1<<9)
/* Destination is only written; never read. */
-#define Mov (1<<9)
-#define BitOp (1<<10)
-#define MemAbs (1<<11) /* Memory operand is absolute displacement */
-#define String (1<<12) /* String instruction (rep capable) */
-#define Stack (1<<13) /* Stack instruction (push/pop) */
-#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */
-#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
-#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */
-#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */
-#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */
-#define Sse (1<<17) /* SSE Vector instruction */
+#define Mov (1<<10)
+#define BitOp (1<<11)
+#define MemAbs (1<<12) /* Memory operand is absolute displacement */
+#define String (1<<13) /* String instruction (rep capable) */
+#define Stack (1<<14) /* Stack instruction (push/pop) */
+#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
+#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
+#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
+#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
+#define Sse (1<<18) /* SSE Vector instruction */
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
- D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
- D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
+ D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
+ D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
/* 0xE8 - 0xEF */
D(SrcImm | Stack), D(SrcImm | ImplicitOps),
D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
- D2bvIP(SrcNone | DstAcc, in, check_perm_in),
- D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
+ D2bvIP(SrcDX | DstAcc, in, check_perm_in),
+ D2bvIP(SrcAcc | DstDX, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
memop.bytes = c->op_bytes + 2;
goto srcmem_common;
break;
+ case SrcDX:
+ c->src.type = OP_REG;
+ c->src.bytes = 2;
+ c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&c->src);
+ break;
}
if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
c->dst.addr.mem.seg = VCPU_SREG_ES;
c->dst.val = 0;
break;
+ case DstDX:
+ c->dst.type = OP_REG;
+ c->dst.bytes = 2;
+ c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&c->dst);
+ break;
case ImplicitOps:
/* Special instructions do their own operand decoding. */
default:
@@ -4027,7 +4041,6 @@ special_insn:
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
- c->src.val = c->regs[VCPU_REGS_RDX];
do_io_in:
if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
&c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
break;
case 0xee: /* out dx,al */
case 0xef: /* out dx,(e/r)ax */
- c->dst.val = c->regs[VCPU_REGS_RDX];
do_io_out:
ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
&c->src.val, 1);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
- return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
+ return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gva_t addr, u32 access)
{
pt_element_t pte;
- pt_element_t __user *ptep_user;
+ pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
{
- vmx_decache_cr3(vcpu);
+ if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ vmx_decache_cr3(vcpu);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
#include <linux/range.h>
/* Check for already reserved areas */
-static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
+bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
if (addr >= ei_last)
continue;
*sizep = ei_last - addr;
- while (check_with_memblock_reserved_size(&addr, sizep, align))
+ while (memblock_x86_check_reserved_size(&addr, sizep, align))
;
if (*sizep)
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a567fe1e..9cbb710dc94b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
return 0;
}
+/*
+ * This runs only on the current cpu. We try to find an LVT offset and
+ * setup the local APIC. For this we must disable preemption. On
+ * success we initialize all nodes with this offset. This updates then
+ * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
+ * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
+ * amd_cpu_shutdown() using the new offset.
+ */
static int force_ibs_eilvt_setup(void)
{
int offset;
int ret;
- /*
- * find the next free available EILVT entry, skip offset 0,
- * pin search to this cpu
- */
preempt_disable();
+ /* find the next free available EILVT entry, skip offset 0 */
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
if (get_eilvt(offset))
break;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
- unsigned long long start = md->phys_addr;
- unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 start = md->phys_addr;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
-
- memblock_x86_reserve_range(start, start + size, "EFI Boot");
+ /* Only reserve where possible:
+ * - Not within any already allocated areas
+ * - Not over any memory area (really needed, if above?)
+ * - Not within any part of the kernel
+ * - Not the bios reserved area
+ */
+ if ((start+size >= virt_to_phys(_text)
+ && start <= virt_to_phys(_end)) ||
+ !e820_all_mapped(start, start+size, E820_RAM) ||
+ memblock_x86_check_reserved_size(&start, &size,
+ 1<<EFI_PAGE_SHIFT)) {
+ /* Could not reserve, skip it */
+ md->num_pages = 0;
+ memblock_dbg(PFX "Could not reserve boot range "
+ "[0x%010llx-0x%010llx]\n",
+ start, start+size-1);
+ } else
+ memblock_x86_reserve_range(start, start+size,
+ "EFI Boot");
}
}
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
md->type != EFI_BOOT_SERVICES_DATA)
continue;
+ /* Could not reserve boot area */
+ if (!size)
+ continue;
+
free_bootmem_late(start, size);
}
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
xen_reboot(SHUTDOWN_poweroff);
}
+static void xen_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ xen_reboot(SHUTDOWN_poweroff);
+}
+
static void xen_crash_shutdown(struct pt_regs *regs)
{
xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
static const struct machine_ops xen_machine_ops __initconst = {
.restart = xen_restart,
.halt = xen_machine_halt,
- .power_off = xen_machine_halt,
+ .power_off = xen_machine_power_off,
.shutdown = xen_machine_halt,
.crash_shutdown = xen_crash_shutdown,
.emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..673e968df3cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
#include <asm/page.h>
#include <asm/init.h>
#include <asm/pat.h>
+#include <asm/smp.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
{
struct {
struct mmuext_op op;
- DECLARE_BITMAP(mask, NR_CPUS);
+ DECLARE_BITMAP(mask, num_processors);
} *args;
struct multicall_space mcs;
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
+#ifdef CONFIG_X86_32
+ if (pfn > max_pfn_mapped)
+ max_pfn_mapped = pfn;
+#endif
+
if (!pte_none(pte_page[pteidx]))
continue;
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
initial_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+ xen_start_info->nr_pt_frames * PAGE_SIZE +
+ 512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 8bff7e7c290b..1b2b73ff0a6e 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
unsigned argidx = roundup(b->argidx, sizeof(u64));
BUG_ON(preemptible());
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
if (b->mcidx == MC_BATCH ||
- (argidx + args) > MC_ARGS) {
+ (argidx + args) >= MC_ARGS) {
mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.args = &b->args[argidx];
b->argidx = argidx + args;
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
return ret;
}
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
struct multicall_space ret = { NULL, NULL };
BUG_ON(preemptible());
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
if (b->mcidx == 0)
return ret;
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
if (b->entries[b->mcidx - 1].op != op)
return ret;
- if ((b->argidx + size) > MC_ARGS)
+ if ((b->argidx + size) >= MC_ARGS)
return ret;
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
return ret;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
-#ifdef CONFIG_X86_32
xen_extra_mem_start = mem_end;
-#else
- xen_extra_mem_start = max((1ULL << 32), mem_end);
-#endif
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
if (map[i].size > 0)
e820_add_region(map[i].addr, map[i].size, map[i].type);
}
+ /* Align the balloon area so that max_low_pfn does not get set
+ * to be at the _end_ of the PCI gap at the far end (fee01000).
+ * Note that xen_extra_mem_start gets set in the loop above to be
+ * past the last E820 region. */
+ if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
+ xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;
+ unsigned int i;
xen_init_lock_cpu(0);
smp_store_cpu_info(0);
cpu_data(0).x86_max_cores = 1;
+
+ for_each_possible_cpu(i) {
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
+ }
set_cpu_sibling_map(0);
if (xen_smp_intr_init(0))