summaryrefslogtreecommitdiff
path: root/arch/mips/kvm
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-11-28 23:04:52 +0000
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 15:21:11 +0000
commit1880afd6057f34586919715e8ffe9c5858f4a326 (patch)
tree25fff058093cb8d7efddefe0b31558b15c7f3374 /arch/mips/kvm
parent91737ea205856c41183c2530fdb6b407ceeb3836 (diff)
KVM: MIPS/T&E: Add lockless GVA access helpers
Add helpers to allow for lockless direct access to the GVA space, by changing the VCPU mode to READING_SHADOW_PAGE_TABLES for the duration of the access. This allows asynchronous TLB flush requests in future patches to safely trigger either a TLB flush before the direct GVA space access, or a delay until the in-progress lockless direct access is complete. The kvm_trap_emul_gva_lockless_begin() and kvm_trap_emul_gva_lockless_end() helpers take care of guarding the direct GVA accesses, and kvm_trap_emul_gva_fault() tries to handle a uaccess fault resulting from a flush having taken place. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/mmu.c51
-rw-r--r--arch/mips/kvm/trap_emul.c65
2 files changed, 116 insertions, 0 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index e41ee36dd626..32c317de6c0a 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -732,6 +732,57 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_restore(flags);
}
+/**
+ * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
+ * @vcpu: Virtual CPU.
+ * @gva: Guest virtual address to be accessed.
+ * @write: True if write attempted (must be dirtied and made writable).
+ *
+ * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
+ * dirtying the page if @write so that guest instructions can be modified.
+ *
+ * Returns: KVM_MIPS_MAPPED on success.
+ * KVM_MIPS_GVA if bad guest virtual address.
+ * KVM_MIPS_GPA if bad guest physical address.
+ * KVM_MIPS_TLB if guest TLB not present.
+ * KVM_MIPS_TLBINV if guest TLB present but not valid.
+ * KVM_MIPS_TLBMOD if guest TLB read only.
+ */
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb;
+ int index;
+
+ if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu) < 0)
+ return KVM_MIPS_GPA;
+ } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
+ /* Address should be in the guest TLB */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
+ if (index < 0)
+ return KVM_MIPS_TLB;
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Entry should be valid, and dirty for writes */
+ if (!TLB_IS_VALID(*tlb, gva))
+ return KVM_MIPS_TLBINV;
+ if (write && !TLB_IS_DIRTY(*tlb, gva))
+ return KVM_MIPS_TLBMOD;
+
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva))
+ return KVM_MIPS_GPA;
+ } else {
+ return KVM_MIPS_GVA;
+ }
+
+ return KVM_MIPS_MAPPED;
+}
+
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{
int err;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index edda8f039026..e20369d45f24 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -786,6 +786,71 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
}
}
+/**
+ * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Call before a GVA space access outside of guest mode, to ensure that
+ * asynchronous TLB flush requests are handled or delayed until completion of
+ * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
+ *
+ * Should be called with IRQs already enabled.
+ */
+void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
+ WARN_ON_ONCE(irqs_disabled());
+
+ /*
+ * The caller is about to access the GVA space, so we set the mode to
+ * force TLB flush requests to send an IPI, and also disable IRQs to
+ * delay IPI handling until kvm_trap_emul_gva_lockless_end().
+ */
+ local_irq_disable();
+
+ /*
+ * Make sure the read of VCPU requests is not reordered ahead of the
+ * write to vcpu->mode, or we could miss a TLB flush request while
+ * the requester sees the VCPU as outside of guest mode and not needing
+ * an IPI.
+ */
+ smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
+
+ /*
+ * If a TLB flush has been requested (potentially while
+ * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
+ * before accessing the GVA space, and be sure to reload the ASID if
+ * necessary as it'll be immediately used.
+ *
+ * TLB flush requests after this check will trigger an IPI due to the
+ * mode change above, which will be delayed due to IRQs disabled.
+ */
+ kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
+}
+
+/**
+ * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Called after a GVA space access outside of guest mode. Should have a matching
+ * call to kvm_trap_emul_gva_lockless_begin().
+ */
+void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front of GVA
+ * accesses, or a TLB flush requester may not think it necessary to send
+ * an IPI.
+ */
+ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
+
+ /*
+ * Now that the access to GVA space is complete, its safe for pending
+ * TLB flush request IPIs to be handled (which indicates completion).
+ */
+ local_irq_enable();
+}
+
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
struct kvm_vcpu *vcpu)
{