summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2005-09-03 15:57:52 -0700
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 00:06:24 -0700
commit7ef939054139ef857cebbec07cbd12d7cf7beedd (patch)
treec6e1c0722336b6155b0f7cf985b2ea4eb9c5d9e1 /arch
parentf9dfefe423a7633d81310c7b06c5566c74f9167b (diff)
[PATCH] uml: fix x86_64 page leak
We were leaking pmd pages when 3_LEVEL_PGTABLES was enabled. This fixes that. Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/um/kernel/skas/include/mmu-skas.h4
-rw-r--r--arch/um/kernel/skas/mmu.c9
2 files changed, 12 insertions, 1 deletions
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
index 278b72f1d9ad..09536f81ee42 100644
--- a/arch/um/kernel/skas/include/mmu-skas.h
+++ b/arch/um/kernel/skas/include/mmu-skas.h
@@ -6,11 +6,15 @@
#ifndef __SKAS_MMU_H
#define __SKAS_MMU_H
+#include "linux/config.h"
#include "mm_id.h"
struct mmu_context_skas {
struct mm_id id;
unsigned long last_page_table;
+#ifdef CONFIG_3_LEVEL_PGTABLES
+ unsigned long last_pmd;
+#endif
};
extern void switch_mm_skas(struct mm_id * mm_idp);
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index d837223e22af..240143b616a2 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*/
mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
+#ifdef CONFIG_3_LEVEL_PGTABLES
+ mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
+#endif
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte);
@@ -144,6 +147,10 @@ void destroy_context_skas(struct mm_struct *mm)
if(!proc_mm || !ptrace_faultinfo){
free_page(mmu->id.stack);
- free_page(mmu->last_page_table);
+ pte_free_kernel((pte_t *) mmu->last_page_table);
+ dec_page_state(nr_page_table_pages);
+#ifdef CONFIG_3_LEVEL_PGTABLES
+ pmd_free((pmd_t *) mmu->last_pmd);
+#endif
}
}