summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-04-16 05:52:22 +1000
committerPaul Mackerras <paulus@samba.org>2008-04-17 07:46:13 +1000
commitd7917ba7051e3fd12ebe2d5a09b29fb3a2b38190 (patch)
tree339d273aee5eb2cb83e226945f0319b51f4bdf3c /arch/powerpc/mm
parent99c62dd773797b68f3b1ca6bb3274725d1852fa2 (diff)
[POWERPC] Introduce lowmem_end_addr to distinguish from total_lowmem
total_lowmem represents the amount of low memory, not the physical address that low memory ends at. If the start of memory is at 0 it happens that total_lowmem can be used as both the size and the address that lowmem ends at (or more specifically one byte beyond the end). To make the code a bit more clear and deal with the case when the start of memory isn't at physical 0, we introduce lowmem_end_addr that represents one byte beyond the last physical address in the lowmem region. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/44x_mmu.c2
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mem.c16
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
5 files changed, 16 insertions, 9 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 04dc08798d3d..953fb919eb06 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -67,7 +67,7 @@ unsigned long __init mmu_mapin_ram(void)
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */
- for (addr = PPC_PIN_SIZE; addr < total_lowmem;
+ for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE)
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 1d7e5b8ade6a..ba61d088d2a2 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -146,6 +146,7 @@ void __init MMU_init(void)
}
total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
+ lowmem_end_addr = memstart_addr + total_lowmem;
#ifdef CONFIG_FSL_BOOKE
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
@@ -156,9 +157,10 @@ void __init MMU_init(void)
if (total_lowmem > __max_low_memory) {
total_lowmem = __max_low_memory;
+ lowmem_end_addr = memstart_addr + total_lowmem;
#ifndef CONFIG_HIGHMEM
total_memory = total_lowmem;
- lmb_enforce_memory_limit(total_lowmem);
+ lmb_enforce_memory_limit(lowmem_end_addr);
lmb_analyze();
#endif /* CONFIG_HIGHMEM */
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 5f553991698b..9ea65d978685 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -75,6 +75,8 @@
/* max amount of RAM to use */
unsigned long __max_memory;
+phys_addr_t memstart_addr;
+
void free_initmem(void)
{
unsigned long addr;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e3349ead3959..16def4dcff6d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,9 +216,11 @@ void __init do_init_bootmem(void)
unsigned long total_pages;
int boot_mapsize;
- max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
total_pages = total_lowmem >> PAGE_SHIFT;
+ max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
#endif
/*
@@ -244,18 +246,18 @@ void __init do_init_bootmem(void)
* present.
*/
#ifdef CONFIG_HIGHMEM
- free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
+ free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++) {
unsigned long addr = lmb.reserved.region[i].base +
lmb_size_bytes(&lmb.reserved, i) - 1;
- if (addr < total_lowmem)
+ if (addr < lowmem_end_addr)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i),
BOOTMEM_DEFAULT);
- else if (lmb.reserved.region[i].base < total_lowmem) {
- unsigned long adjusted_size = total_lowmem -
+ else if (lmb.reserved.region[i].base < lowmem_end_addr) {
+ unsigned long adjusted_size = lowmem_end_addr -
lmb.reserved.region[i].base;
reserve_bootmem(lmb.reserved.region[i].base,
adjusted_size, BOOTMEM_DEFAULT);
@@ -325,7 +327,7 @@ void __init paging_init(void)
(top_of_ram - total_ram) >> 20);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_HIGHMEM
- max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
#else
max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
@@ -380,7 +382,7 @@ void __init mem_init(void)
{
unsigned long pfn, highmem_mapnr;
- highmem_mapnr = total_lowmem >> PAGE_SHIFT;
+ highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
struct page *page = pfn_to_page(pfn);
if (lmb_is_reserved(pfn << PAGE_SHIFT))
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 5bc11f5933a9..67477e772958 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -52,6 +52,7 @@ extern unsigned long __initial_memory_limit;
extern unsigned long total_memory;
extern unsigned long total_lowmem;
extern phys_addr_t memstart_addr;
+extern phys_addr_t lowmem_end_addr;
/* ...and now those things that may be slightly different between processor
* architectures. -- Dan