From 331468b11b94428a9eb2ed8b3240c17612533a99 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 1 Dec 2010 15:23:31 -0800 Subject: xen: drop all the special iomap pte paths. Xen can work out when we're doing IO mappings for itself, so we don't need to do anything special, and the extra tests just clog things up. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 0684f3c74d53..4f5e0dc5f6e5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -243,11 +243,6 @@ static bool xen_page_pinned(void *ptr) return PagePinned(page); } -static bool xen_iomap_pte(pte_t pte) -{ - return pte_flags(pte) & _PAGE_IOMAP; -} - void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) { struct multicall_space mcs; @@ -266,11 +261,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) } EXPORT_SYMBOL_GPL(xen_set_domain_pte); -static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) -{ - xen_set_domain_pte(ptep, pteval, DOMID_IO); -} - static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; @@ -347,11 +337,6 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (xen_iomap_pte(pteval)) { - xen_set_iomap_pte(ptep, pteval); - goto out; - } - ADD_STATS(set_pte_at, 1); // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); ADD_STATS(set_pte_at_current, mm == current->mm); @@ -632,11 +617,6 @@ void xen_set_pud(pud_t *ptr, pud_t val) void xen_set_pte(pte_t *ptep, pte_t pte) { - if (xen_iomap_pte(pte)) { - xen_set_iomap_pte(ptep, pte); - return; - } - ADD_STATS(pte_update, 1); // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); @@ -653,11 +633,6 @@ void xen_set_pte(pte_t *ptep, pte_t pte) #ifdef CONFIG_X86_PAE void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { - if (xen_iomap_pte(pte)) { - xen_set_iomap_pte(ptep, pte); - return; - } - set_64bit((u64 *)ptep, native_pte_val(pte)); } -- cgit v1.2.3 From a99ac5e8619c27dbb8e7fb5a4e0ca8c8aa214909 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 1 Dec 2010 15:13:34 -0800 Subject: xen: use mmu_update for xen_set_pte_at() In principle update_va_mapping is a good match for set_pte_at, since it gets the address being mapped, which allows Xen to use its linear pagetable mapping. However that assumes that the pmd for the address is attached to the current pagetable, which may not be true for a given user address space because the kernel pmd is not shared (at least on 32-bit guests). Normally the kernel will automatically sync a missing part of the pagetable with the init_mm pagetable transparently via faults, but that fails when a missing address is passed to Xen. And while the linear pagetable mapping is very useful for 32-bit Xen (as it avoids an explicit domain mapping), 32-bit Xen is deprecated. 64-bit Xen has all memory mapped all the time, so it makes no real difference. The upshot is that we should use mmu_update, since it can operate on non-current pagetables or detached pagetables. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4f5e0dc5f6e5..fb3e92e077e2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -342,22 +342,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, ADD_STATS(set_pte_at_current, mm == current->mm); ADD_STATS(set_pte_at_kernel, mm == &init_mm); - if (mm == current->mm || mm == &init_mm) { - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - struct multicall_space mcs; - mcs = xen_mc_entry(0); - - MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); - ADD_STATS(set_pte_at_batched, 1); - xen_mc_issue(PARAVIRT_LAZY_MMU); - goto out; - } else - if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) - goto out; - } - xen_set_pte(ptep, pteval); + if(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + struct mmu_update u; + + xen_mc_batch(); + + u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + u.val = pte_val_ma(pteval); + xen_extend_mmu_update(&u); -out: return; + xen_mc_issue(PARAVIRT_LAZY_MMU); + } else + native_set_pte(ptep, pteval); } pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, -- cgit v1.2.3 From 4a35c13cb808c63dd151bdd507b749e97231ef91 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 1 Dec 2010 15:30:41 -0800 Subject: xen: condense everything onto xen_set_pte xen_set_pte_at and xen_clear_pte are essentially identical to xen_set_pte, so just make them all common. When batched set_pte and pte_clear are the same, but the unbatch operation must be different: they need to update the two halves of the pte in different order. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 73 ++++++++++++++++++++---------------------------------- 1 file changed, 27 insertions(+), 46 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index fb3e92e077e2..11d7ef07d623 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -108,12 +108,6 @@ static struct { u32 prot_commit; u32 prot_commit_batched; - - u32 set_pte_at; - u32 set_pte_at_batched; - u32 set_pte_at_pinned; - u32 set_pte_at_current; - u32 set_pte_at_kernel; } mmu_stats; static u8 zero_stats; @@ -334,28 +328,39 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); } -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) +static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) { - ADD_STATS(set_pte_at, 1); -// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); - ADD_STATS(set_pte_at_current, mm == current->mm); - ADD_STATS(set_pte_at_kernel, mm == &init_mm); + struct mmu_update u; - if(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - struct mmu_update u; + if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) + return false; - xen_mc_batch(); + xen_mc_batch(); + + u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + u.val = pte_val_ma(pteval); + xen_extend_mmu_update(&u); - u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; - u.val = pte_val_ma(pteval); - xen_extend_mmu_update(&u); + xen_mc_issue(PARAVIRT_LAZY_MMU); + + return true; +} + +void xen_set_pte(pte_t *ptep, pte_t pteval) +{ + ADD_STATS(pte_update, 1); +// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); - xen_mc_issue(PARAVIRT_LAZY_MMU); - } else + if (!xen_batched_set_pte(ptep, pteval)) native_set_pte(ptep, pteval); } +void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + xen_set_pte(ptep, pteval); +} + pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -611,21 +616,6 @@ void xen_set_pud(pud_t *ptr, pud_t val) xen_set_pud_hyper(ptr, val); } -void xen_set_pte(pte_t *ptep, pte_t pte) -{ - ADD_STATS(pte_update, 1); -// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); - ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - -#ifdef CONFIG_X86_PAE - ptep->pte_high = pte.pte_high; - smp_wmb(); - ptep->pte_low = pte.pte_low; -#else - *ptep = pte; -#endif -} - #ifdef CONFIG_X86_PAE void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { @@ -634,9 +624,8 @@ void xen_set_pte_atomic(pte_t *ptep, pte_t pte) void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep->pte_low = 0; - smp_wmb(); /* make sure low gets written first */ - ptep->pte_high = 0; + if (!xen_batched_set_pte(ptep, native_make_pte(0))) + native_pte_clear(mm, addr, ptep); } void xen_pmd_clear(pmd_t *pmdp) @@ -2452,14 +2441,6 @@ static int __init xen_mmu_debugfs(void) xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, mmu_stats.mmu_update_histo, 20); - debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); - debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, - &mmu_stats.set_pte_at_batched); - debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, - &mmu_stats.set_pte_at_current); - debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, - &mmu_stats.set_pte_at_kernel); - debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, &mmu_stats.prot_commit_batched); -- cgit v1.2.3 From ef691947d8a3d479e67652312783aedcf629320a Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 1 Dec 2010 15:45:48 -0800 Subject: vmalloc: remove vmalloc_sync_all() from alloc_vm_area() There's no need for it: it will get faulted into the current pagetable as needed. Signed-off-by: Jeremy Fitzhardinge --- mm/vmalloc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5d6030235d7a..fdf4b1e88e53 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2148,10 +2148,6 @@ struct vm_struct *alloc_vm_area(size_t size) return NULL; } - /* Make sure the pagetables are constructed in process kernel - mappings */ - vmalloc_sync_all(); - return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); -- cgit v1.2.3 From 4c13629f816b1aeff92971a40819b4c25b0622f5 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 1 Dec 2010 22:57:39 -0800 Subject: xen: make a pile of mmu pvop functions static Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 46 +++++++++++++++++++++++----------------------- arch/x86/xen/mmu.h | 37 ------------------------------------- 2 files changed, 23 insertions(+), 60 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 11d7ef07d623..a87b6b4caa74 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -283,7 +283,7 @@ static void xen_extend_mmu_update(const struct mmu_update *update) *u = *update; } -void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) +static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) { struct mmu_update u; @@ -303,7 +303,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) preempt_enable(); } -void xen_set_pmd(pmd_t *ptr, pmd_t val) +static void xen_set_pmd(pmd_t *ptr, pmd_t val) { ADD_STATS(pmd_update, 1); @@ -346,7 +346,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) return true; } -void xen_set_pte(pte_t *ptep, pte_t pteval) +static void xen_set_pte(pte_t *ptep, pte_t pteval) { ADD_STATS(pte_update, 1); // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); @@ -355,7 +355,7 @@ void xen_set_pte(pte_t *ptep, pte_t pteval) native_set_pte(ptep, pteval); } -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, +static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { xen_set_pte(ptep, pteval); @@ -449,7 +449,7 @@ static pteval_t iomap_pte(pteval_t val) return val; } -pteval_t xen_pte_val(pte_t pte) +static pteval_t xen_pte_val(pte_t pte) { pteval_t pteval = pte.pte; @@ -466,7 +466,7 @@ pteval_t xen_pte_val(pte_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); -pgdval_t xen_pgd_val(pgd_t pgd) +static pgdval_t xen_pgd_val(pgd_t pgd) { return pte_mfn_to_pfn(pgd.pgd); } @@ -497,7 +497,7 @@ void xen_set_pat(u64 pat) WARN_ON(pat != 0x0007010600070106ull); } -pte_t xen_make_pte(pteval_t pte) +static pte_t xen_make_pte(pteval_t pte) { phys_addr_t addr = (pte & PTE_PFN_MASK); @@ -567,20 +567,20 @@ pte_t xen_make_pte_debug(pteval_t pte) PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); #endif -pgd_t xen_make_pgd(pgdval_t pgd) +static pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); return native_make_pgd(pgd); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); -pmdval_t xen_pmd_val(pmd_t pmd) +static pmdval_t xen_pmd_val(pmd_t pmd) { return pte_mfn_to_pfn(pmd.pmd); } PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); -void xen_set_pud_hyper(pud_t *ptr, pud_t val) +static void xen_set_pud_hyper(pud_t *ptr, pud_t val) { struct mmu_update u; @@ -600,7 +600,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val) preempt_enable(); } -void xen_set_pud(pud_t *ptr, pud_t val) +static void xen_set_pud(pud_t *ptr, pud_t val) { ADD_STATS(pud_update, 1); @@ -617,24 +617,24 @@ void xen_set_pud(pud_t *ptr, pud_t val) } #ifdef CONFIG_X86_PAE -void xen_set_pte_atomic(pte_t *ptep, pte_t pte) +static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { set_64bit((u64 *)ptep, native_pte_val(pte)); } -void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { if (!xen_batched_set_pte(ptep, native_make_pte(0))) native_pte_clear(mm, addr, ptep); } -void xen_pmd_clear(pmd_t *pmdp) +static void xen_pmd_clear(pmd_t *pmdp) { set_pmd(pmdp, __pmd(0)); } #endif /* CONFIG_X86_PAE */ -pmd_t xen_make_pmd(pmdval_t pmd) +static pmd_t xen_make_pmd(pmdval_t pmd) { pmd = pte_pfn_to_mfn(pmd); return native_make_pmd(pmd); @@ -642,13 +642,13 @@ pmd_t xen_make_pmd(pmdval_t pmd) PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); #if PAGETABLE_LEVELS == 4 -pudval_t xen_pud_val(pud_t pud) +static pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); } PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); -pud_t xen_make_pud(pudval_t pud) +static pud_t xen_make_pud(pudval_t pud) { pud = pte_pfn_to_mfn(pud); @@ -656,7 +656,7 @@ pud_t xen_make_pud(pudval_t pud) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); -pgd_t *xen_get_user_pgd(pgd_t *pgd) +static pgd_t *xen_get_user_pgd(pgd_t *pgd) { pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); unsigned offset = pgd - pgd_page; @@ -688,7 +688,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) * 2. It is always pinned * 3. It has no user pagetable attached to it */ -void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) +static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) { preempt_disable(); @@ -701,7 +701,7 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) preempt_enable(); } -void xen_set_pgd(pgd_t *ptr, pgd_t val) +static void xen_set_pgd(pgd_t *ptr, pgd_t val) { pgd_t *user_ptr = xen_get_user_pgd(ptr); @@ -1122,14 +1122,14 @@ void xen_mm_unpin_all(void) spin_unlock(&pgd_lock); } -void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) +static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) { spin_lock(&next->page_table_lock); xen_pgd_pin(next); spin_unlock(&next->page_table_lock); } -void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { spin_lock(&mm->page_table_lock); xen_pgd_pin(mm); @@ -1216,7 +1216,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm) * pagetable because of lazy tlb flushing. This means we need need to * switch all CPUs off this pagetable before we can unpin it. */ -void xen_exit_mmap(struct mm_struct *mm) +static void xen_exit_mmap(struct mm_struct *mm) { get_cpu(); /* make sure we don't move around */ xen_drop_mm_ref(mm); diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 537bb9aab777..73809bb951b4 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h @@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); - -void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); -void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); -void xen_exit_mmap(struct mm_struct *mm); - -pteval_t xen_pte_val(pte_t); -pmdval_t xen_pmd_val(pmd_t); -pgdval_t xen_pgd_val(pgd_t); - -pte_t xen_make_pte(pteval_t); -pmd_t xen_make_pmd(pmdval_t); -pgd_t xen_make_pgd(pgdval_t); - -void xen_set_pte(pte_t *ptep, pte_t pteval); -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval); - -#ifdef CONFIG_X86_PAE -void xen_set_pte_atomic(pte_t *ptep, pte_t pte); -void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void xen_pmd_clear(pmd_t *pmdp); -#endif /* CONFIG_X86_PAE */ - -void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); -void xen_set_pud(pud_t *ptr, pud_t val); -void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); -void xen_set_pud_hyper(pud_t *ptr, pud_t val); - -#if PAGETABLE_LEVELS == 4 -pudval_t xen_pud_val(pud_t pud); -pud_t xen_make_pud(pudval_t pudval); -void xen_set_pgd(pgd_t *pgdp, pgd_t pgd); -void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd); -#endif - -pgd_t *xen_get_user_pgd(pgd_t *pgd); - pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); -- cgit v1.2.3 From d5108316b894a172f891795dbad4975ab7ed7a41 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 22 Dec 2010 13:09:40 -0800 Subject: xen: use normal virt_to_machine for ptes We no longer support HIGHPTE allocations, so ptes should always be within the kernel's direct map, and don't need pagetable walks to convert to machine addresses. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a87b6b4caa74..1a41e9257076 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -246,7 +246,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) u = mcs.args; /* ptep might be kmapped when using 32-bit HIGHPTE */ - u->ptr = arbitrary_virt_to_machine(ptep).maddr; + u->ptr = virt_to_machine(ptep).maddr; u->val = pte_val_ma(pteval); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); @@ -292,7 +292,7 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ - u.ptr = arbitrary_virt_to_machine(ptr).maddr; + u.ptr = virt_to_machine(ptr).maddr; u.val = pmd_val_ma(val); xen_extend_mmu_update(&u); @@ -375,7 +375,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, xen_mc_batch(); - u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; + u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); @@ -589,7 +589,7 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ - u.ptr = arbitrary_virt_to_machine(ptr).maddr; + u.ptr = virt_to_machine(ptr).maddr; u.val = pud_val_ma(val); xen_extend_mmu_update(&u); @@ -2331,7 +2331,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, struct remap_data *rmd = data; pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); - rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; -- cgit v1.2.3 From c86d8077b3ec048e42e26372b02dae26b38b0d6b Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 16 Dec 2010 15:50:17 -0800 Subject: xen/mmu: remove all ad-hoc stats stuff To make way for tracing. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 138 ----------------------------------------------------- 1 file changed, 138 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 1a41e9257076..eb6d83a458c9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -75,61 +75,12 @@ #include "mmu.h" #include "debugfs.h" -#define MMU_UPDATE_HISTO 30 - /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and balloon lists. */ DEFINE_SPINLOCK(xen_reservation_lock); -#ifdef CONFIG_XEN_DEBUG_FS - -static struct { - u32 pgd_update; - u32 pgd_update_pinned; - u32 pgd_update_batched; - - u32 pud_update; - u32 pud_update_pinned; - u32 pud_update_batched; - - u32 pmd_update; - u32 pmd_update_pinned; - u32 pmd_update_batched; - - u32 pte_update; - u32 pte_update_pinned; - u32 pte_update_batched; - - u32 mmu_update; - u32 mmu_update_extended; - u32 mmu_update_histo[MMU_UPDATE_HISTO]; - - u32 prot_commit; - u32 prot_commit_batched; -} mmu_stats; - -static u8 zero_stats; - -static inline void check_zero(void) -{ - if (unlikely(zero_stats)) { - memset(&mmu_stats, 0, sizeof(mmu_stats)); - zero_stats = 0; - } -} - -#define ADD_STATS(elem, val) \ - do { check_zero(); mmu_stats.elem += (val); } while(0) - -#else /* !CONFIG_XEN_DEBUG_FS */ - -#define ADD_STATS(elem, val) do { (void)(val); } while(0) - -#endif /* CONFIG_XEN_DEBUG_FS */ - - /* * Identity map, in addition to plain kernel map. This needs to be * large enough to allocate page table pages to allocate the rest. @@ -263,20 +214,10 @@ static void xen_extend_mmu_update(const struct mmu_update *update) mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); if (mcs.mc != NULL) { - ADD_STATS(mmu_update_extended, 1); - ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); - mcs.mc->args[1]++; - - if (mcs.mc->args[1] < MMU_UPDATE_HISTO) - ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); - else - ADD_STATS(mmu_update_histo[0], 1); } else { - ADD_STATS(mmu_update, 1); mcs = __xen_mc_entry(sizeof(*u)); MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); - ADD_STATS(mmu_update_histo[1], 1); } u = mcs.args; @@ -296,8 +237,6 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) u.val = pmd_val_ma(val); xen_extend_mmu_update(&u); - ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); @@ -305,8 +244,6 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) static void xen_set_pmd(pmd_t *ptr, pmd_t val) { - ADD_STATS(pmd_update, 1); - /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -314,8 +251,6 @@ static void xen_set_pmd(pmd_t *ptr, pmd_t val) return; } - ADD_STATS(pmd_update_pinned, 1); - xen_set_pmd_hyper(ptr, val); } @@ -348,9 +283,6 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) static void xen_set_pte(pte_t *ptep, pte_t pteval) { - ADD_STATS(pte_update, 1); -// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); - if (!xen_batched_set_pte(ptep, pteval)) native_set_pte(ptep, pteval); } @@ -379,9 +311,6 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); - ADD_STATS(prot_commit, 1); - ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - xen_mc_issue(PARAVIRT_LAZY_MMU); } @@ -593,8 +522,6 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) u.val = pud_val_ma(val); xen_extend_mmu_update(&u); - ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); @@ -602,8 +529,6 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) static void xen_set_pud(pud_t *ptr, pud_t val) { - ADD_STATS(pud_update, 1); - /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -611,8 +536,6 @@ static void xen_set_pud(pud_t *ptr, pud_t val) return; } - ADD_STATS(pud_update_pinned, 1); - xen_set_pud_hyper(ptr, val); } @@ -705,8 +628,6 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) { pgd_t *user_ptr = xen_get_user_pgd(ptr); - ADD_STATS(pgd_update, 1); - /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -718,9 +639,6 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) return; } - ADD_STATS(pgd_update_pinned, 1); - ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - /* If it's pinned, then we can at least batch the kernel and user updates together. */ xen_mc_batch(); @@ -2384,8 +2302,6 @@ out: } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); -#ifdef CONFIG_XEN_DEBUG_FS - static int p2m_dump_open(struct inode *inode, struct file *filp) { return single_open(filp, p2m_dump_show, NULL); @@ -2397,57 +2313,3 @@ static const struct file_operations p2m_dump_fops = { .llseek = seq_lseek, .release = single_release, }; - -static struct dentry *d_mmu_debug; - -static int __init xen_mmu_debugfs(void) -{ - struct dentry *d_xen = xen_init_debugfs(); - - if (d_xen == NULL) - return -ENOMEM; - - d_mmu_debug = debugfs_create_dir("mmu", d_xen); - - debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); - - debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); - debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, - &mmu_stats.pgd_update_pinned); - debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, - &mmu_stats.pgd_update_pinned); - - debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); - debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, - &mmu_stats.pud_update_pinned); - debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, - &mmu_stats.pud_update_pinned); - - debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); - debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, - &mmu_stats.pmd_update_pinned); - debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, - &mmu_stats.pmd_update_pinned); - - debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); -// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, -// &mmu_stats.pte_update_pinned); - debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, - &mmu_stats.pte_update_pinned); - - debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); - debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, - &mmu_stats.mmu_update_extended); - xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, - mmu_stats.mmu_update_histo, 20); - - debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); - debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, - &mmu_stats.prot_commit_batched); - - debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); - return 0; -} -fs_initcall(xen_mmu_debugfs); - -#endif /* CONFIG_XEN_DEBUG_FS */ -- cgit v1.2.3 From f05608d278633988db39058a8649fe90e30e6194 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 24 Mar 2011 14:26:51 +0000 Subject: Use arbitrary_virt_to_machine() to deal with ioremapped pmd updates. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index eb6d83a458c9..c03101e47425 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -233,7 +233,7 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ - u.ptr = virt_to_machine(ptr).maddr; + u.ptr = arbitrary_virt_to_machine(ptr).maddr; u.val = pmd_val_ma(val); xen_extend_mmu_update(&u); -- cgit v1.2.3 From 2a001f6482643239b8a069df5c2bdb2082c98be4 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 6 Apr 2011 16:43:33 -0700 Subject: Use arbitrary_virt_to_machine() to deal with ioremapped pud updates. Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c03101e47425..083b835d2dd0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -518,7 +518,7 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) xen_mc_batch(); /* ptr may be ioremapped for 64-bit pagetable setup */ - u.ptr = virt_to_machine(ptr).maddr; + u.ptr = arbitrary_virt_to_machine(ptr).maddr; u.val = pud_val_ma(val); xen_extend_mmu_update(&u); -- cgit v1.2.3 From 4bf0ff24e371ce71521ccb21513203facfd8491f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 20 May 2011 16:34:44 -0700 Subject: xen: fix compile without CONFIG_XEN_DEBUG_FS Signed-off-by: Jeremy Fitzhardinge --- arch/x86/xen/mmu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 083b835d2dd0..285335448143 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2302,6 +2302,7 @@ out: } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); +#ifdef CONFIG_XEN_DEBUG_FS static int p2m_dump_open(struct inode *inode, struct file *filp) { return single_open(filp, p2m_dump_show, NULL); @@ -2313,3 +2314,4 @@ static const struct file_operations p2m_dump_fops = { .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_XEN_DEBUG_FS */ -- cgit v1.2.3