summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/memory.h
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-09-07 15:06:42 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-12 11:48:09 +0100
commitb7cfda9fc3d7aa60cffab5367f2a72a4a70060cd (patch)
treea52c6cdbbc7973a4904ec499a607732832e4ca38 /arch/arm/include/asm/memory.h
parent5eb38f44839ec1aade167b125d86d265c886c195 (diff)
ARM: Fix pfn_valid() for sparse memory
On OMAP platforms, some people want to declare to segment up the memory between the kernel and a separate application such that there is a hole in the middle of the memory as far as Linux is concerned. However, they want to be able to mmap() the hole. This currently causes problems, because update_mmu_cache() thinks that there are valid struct pages for the "hole". Fix this by making pfn_valid() slightly more expensive, by checking whether the PFN is contained within the meminfo array. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-by: Khasim Syed Mohammed <khasim@ti.com>
Diffstat (limited to 'arch/arm/include/asm/memory.h')
-rw-r--r--arch/arm/include/asm/memory.h17
1 files changed, 0 insertions, 17 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 85763db87449..b3b54c6216a4 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -212,7 +212,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*
* page_to_pfn(page) convert a struct page * to a PFN number
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
- * pfn_valid(pfn) indicates whether a PFN number is valid
*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
@@ -221,10 +220,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-#ifndef CONFIG_SPARSEMEM
-#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
-#endif
-
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
@@ -241,18 +236,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn)
#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
-#define pfn_valid(pfn) \
- ({ \
- unsigned int nid = PFN_TO_NID(pfn); \
- int valid = nid < MAX_NUMNODES; \
- if (valid) { \
- pg_data_t *node = NODE_DATA(nid); \
- valid = (pfn - node->node_start_pfn) < \
- node->node_spanned_pages; \
- } \
- valid; \
- })
-
#define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))