summaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/Makefile1
-rw-r--r--arch/arc/mm/cache.c91
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/highmem.c140
-rw-r--r--arch/arc/mm/init.c104
-rw-r--r--arch/arc/mm/tlb.c234
-rw-r--r--arch/arc/mm/tlbex.S51
7 files changed, 521 insertions, 113 deletions
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 7beb941556c3..3703a4969349 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -8,3 +8,4 @@
obj-y := extable.o ioremap.o dma.o fault.o init.o
obj-y += tlb.o tlbex.o cache.o mmap.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 0d1a6e96839f..ff7ff6cbb811 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -25,7 +25,7 @@ static int l2_line_sz;
int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1;
-void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
+void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop);
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
@@ -37,7 +37,6 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
int n = 0;
struct cpuinfo_arc_cache *p;
-#define IS_USED_RUN(v) ((v) ? "" : "(disabled) ")
#define PR_CACHE(p, cfg, str) \
if (!(p)->ver) \
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
@@ -47,7 +46,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
(p)->sz_k, (p)->assoc, (p)->line_len, \
(p)->vipt ? "VIPT" : "PIPT", \
(p)->alias ? " aliasing" : "", \
- IS_ENABLED(cfg) ? "" : " (not used)");
+ IS_USED_CFG(cfg));
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
@@ -63,7 +62,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
if (ioc_exists)
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
- IS_USED_RUN(ioc_enable));
+ IS_DISABLED_RUN(ioc_enable));
return buf;
}
@@ -217,7 +216,7 @@ slc_chk:
*/
static inline
-void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned int aux_cmd;
@@ -254,8 +253,12 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
}
}
+/*
+ * For ARC700 MMUv3 I-cache and D-cache flushes
+ * Also reused for HS38 aliasing I-cache configuration
+ */
static inline
-void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned int aux_cmd, aux_tag;
@@ -290,6 +293,16 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
if (full_page)
write_aux_reg(aux_tag, paddr);
+ /*
+ * This is technically for MMU v4, using the MMU v3 programming model
+ * Special work for HS38 aliasing I-cache configuratino with PAE40
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ * Note that PTAG_HI is hoisted outside the line loop
+ */
+ if (is_pae40_enabled() && op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+
while (num_lines-- > 0) {
if (!full_page) {
write_aux_reg(aux_tag, paddr);
@@ -302,14 +315,20 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
}
/*
- * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
- * maintenance ops (in IVIL reg), as long as icache doesn't alias.
+ * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
+ * Here's how cache ops are implemented
+ *
+ * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
+ * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
+ * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
+ * respectively, similar to MMU v3 programming model, hence
+ * __cache_line_loop_v3() is used)
*
- * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
- * specified in PTAG (similar to MMU v3)
+ * If PAE40 is enabled, independent of aliasing considerations, the higher bits
+ * needs to be written into PTAG_HI
*/
static inline
-void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned int aux_cmd;
@@ -336,6 +355,22 @@ void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+ /*
+ * For HS38 PAE40 configuration
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ */
+ if (is_pae40_enabled()) {
+ if (cacheop == OP_INV_IC)
+ /*
+ * Non aliasing I-cache in HS38,
+ * aliasing I-cache handled in __cache_line_loop_v3()
+ */
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
while (num_lines-- > 0) {
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
@@ -413,7 +448,7 @@ static inline void __dc_entire_op(const int op)
/*
* D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
*/
-static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned long flags;
@@ -446,7 +481,7 @@ static inline void __ic_entire_inv(void)
}
static inline void
-__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
+__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
unsigned long flags;
@@ -463,7 +498,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
#else
struct ic_inv_args {
- unsigned long paddr, vaddr;
+ phys_addr_t paddr, vaddr;
int sz;
};
@@ -474,7 +509,7 @@ static void __ic_line_inv_vaddr_helper(void *info)
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
}
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
struct ic_inv_args ic_inv = {
@@ -495,7 +530,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
-noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
+noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -585,7 +620,7 @@ void flush_dcache_page(struct page *page)
} else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */
- unsigned long paddr = (unsigned long)page_address(page);
+ phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
@@ -733,14 +768,14 @@ EXPORT_SYMBOL(flush_icache_range);
* builtin kernel page will not have any virtual mappings.
* kprobe on loadable module will be kernel vaddr.
*/
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
{
__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
__ic_line_inv_vaddr(paddr, vaddr, len);
}
/* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
{
__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
}
@@ -749,7 +784,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
* wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr
*/
-void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
{
__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
}
@@ -807,8 +842,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
- unsigned long kfrom = (unsigned long)page_address(from);
- unsigned long kto = (unsigned long)page_address(to);
+ void *kfrom = kmap_atomic(from);
+ void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0;
/*
@@ -818,13 +853,16 @@ void copy_user_highpage(struct page *to, struct page *from,
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
+ *
+ * For !VIPT cache, all of this gets compiled out as
+ * addr_not_cache_congruent() is 0
*/
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
- __flush_dcache_page(kfrom, u_vaddr);
+ __flush_dcache_page((unsigned long)kfrom, u_vaddr);
clean_src_k_mappings = 1;
}
- copy_page((void *)kto, (void *)kfrom);
+ copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
@@ -841,11 +879,14 @@ void copy_user_highpage(struct page *to, struct page *from,
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
- __flush_dcache_page(kfrom, kfrom);
+ __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
set_bit(PG_dc_clean, &from->flags);
} else {
clear_bit(PG_dc_clean, &from->flags);
}
+
+ kunmap_atomic(kto);
+ kunmap_atomic(kfrom);
}
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d948e4e9d89c..af63f4a13e60 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -18,7 +18,14 @@
#include <asm/pgalloc.h>
#include <asm/mmu.h>
-static int handle_vmalloc_fault(unsigned long address)
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* only copy the information from the master page table,
* nothing more.
*/
- if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(address);
+ if (address >= VMALLOC_START) {
+ ret = handle_kernel_vaddr_fault(address);
if (unlikely(ret))
goto bad_area_nosemaphore;
else
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644
index 000000000000..065ee6bfa82a
--- /dev/null
+++ b/arch/arc/mm/highmem.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * HIGHMEM API:
+ *
+ * kmap() API provides sleep semantics hence refered to as "permanent maps"
+ * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
+ * for book-keeping
+ *
+ * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
+ * shortlived ala "temporary mappings" which historically were implemented as
+ * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
+ *
+ * Both these facts combined (preemption disabled and per-cpu allocation)
+ * means the total number of concurrent fixmaps will be limited to max
+ * such allocations in a single control path. Thus KM_TYPE_NR (another
+ * historic relic) is a small'ish number which caps max percpu fixmaps
+ *
+ * ARC HIGHMEM Details
+ *
+ * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
+ * is now shared between vmalloc and kmap (non overlapping though)
+ *
+ * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ * slots across NR_CPUS would be more than sufficient (generic code defines
+ * KM_TYPE_NR as 20).
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+ * the PGD and only works with a single page table @pkmap_page_table, hence
+ * sets the limit
+ */
+
+extern pte_t * pkmap_page_table;
+static pte_t * fixmap_page_table;
+
+void *kmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ return kmap_high(page);
+}
+
+void *kmap_atomic(struct page *page)
+{
+ int idx, cpu_idx;
+ unsigned long vaddr;
+
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ cpu_idx = kmap_atomic_idx_push();
+ idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+ vaddr = FIXMAP_ADDR(idx);
+
+ set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+ mk_pte(page, kmap_prot));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kv)
+{
+ unsigned long kvaddr = (unsigned long)kv;
+
+ if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+
+ /*
+ * Because preemption is disabled, this vaddr can be associated
+ * with the current allocated index.
+ * But in case of multiple live kmap_atomic(), it still relies on
+ * callers to unmap in right order.
+ */
+ int cpu_idx = kmap_atomic_idx();
+ int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+
+ WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+
+ pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+ local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
+{
+ pgd_t *pgd_k;
+ pud_t *pud_k;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ pgd_k = pgd_offset_k(kvaddr);
+ pud_k = pud_offset(pgd_k, kvaddr);
+ pmd_k = pmd_offset(pud_k, kvaddr);
+
+ pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+ return pte_k;
+}
+
+void kmap_init(void)
+{
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+
+ BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index d44eedd8c322..a9305b5a2cd4 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -15,6 +15,7 @@
#endif
#include <linux/swap.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
@@ -24,16 +25,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
EXPORT_SYMBOL(empty_zero_page);
-/* Default tot mem from .config */
-static unsigned long arc_mem_sz = 0x20000000; /* some default */
+static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
+static unsigned long low_mem_sz;
+
+#ifdef CONFIG_HIGHMEM
+static unsigned long min_high_pfn;
+static u64 high_mem_start;
+static u64 high_mem_sz;
+#endif
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
- arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+ low_mem_sz = memparse(str, NULL) & PAGE_MASK;
/* early console might not be setup yet - it will show up later */
- pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
return 0;
}
@@ -41,8 +48,22 @@ early_param("mem", setup_mem_sz);
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- arc_mem_sz = size & PAGE_MASK;
- pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+ int in_use = 0;
+
+ if (!low_mem_sz) {
+ BUG_ON(base != low_mem_start);
+ low_mem_sz = size;
+ in_use = 1;
+ } else {
+#ifdef CONFIG_HIGHMEM
+ high_mem_start = base;
+ high_mem_sz = size;
+ in_use = 1;
+#endif
+ }
+
+ pr_info("Memory @ %llx [%lldM] %s\n",
+ base, TO_MB(size), !in_use ? "Not used":"");
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -72,46 +93,62 @@ early_param("initrd", early_initrd);
void __init setup_arch_memory(void)
{
unsigned long zones_size[MAX_NR_ZONES];
- unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+ unsigned long zones_holes[MAX_NR_ZONES];
init_mm.start_code = (unsigned long)_text;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
- /*
- * We do it here, so that memory is correctly instantiated
- * even if "mem=xxx" cmline over-ride is given and/or
- * DT has memory node. Each causes an update to @arc_mem_sz
- * and we finally add memory one here
- */
- memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
-
- /*------------- externs in mm need setting up ---------------*/
-
/* first page of system - kernel .vector starts here */
min_low_pfn = ARCH_PFN_OFFSET;
- /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
- max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+ /* Last usable page of low mem */
+ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
- max_mapnr = max_low_pfn - min_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ min_high_pfn = PFN_DOWN(high_mem_start);
+ max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#endif
+
+ max_mapnr = max_pfn - min_low_pfn;
- /*------------- reserve kernel image -----------------------*/
- memblock_reserve(CONFIG_LINUX_LINK_BASE,
- __pa(_end) - CONFIG_LINUX_LINK_BASE);
+ /*------------- bootmem allocator setup -----------------------*/
+
+ /*
+ * seed the bootmem allocator after any DT memory node parsing or
+ * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
+ *
+ * Only low mem is added, otherwise we have crashes when allocating
+ * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
+ * avail memory, ending in highmem with a > 32-bit address. However
+ * it then tries to memset it with a truncaed 32-bit handle, causing
+ * the crash
+ */
+
+ memblock_add(low_mem_start, low_mem_sz);
+ memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
#ifdef CONFIG_BLK_DEV_INITRD
- /*------------- reserve initrd image -----------------------*/
if (initrd_start)
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
memblock_dump_all();
- /*-------------- node setup --------------------------------*/
+ /*----------------- node/zones setup --------------------------*/
memset(zones_size, 0, sizeof(zones_size));
- zones_size[ZONE_NORMAL] = max_mapnr;
+ memset(zones_holes, 0, sizeof(zones_holes));
+
+ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
+ zones_holes[ZONE_NORMAL] = 0;
+
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+
+ /* This handles the peripheral address space hole */
+ zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
+#endif
/*
* We can't use the helper free_area_init(zones[]) because it uses
@@ -122,9 +159,12 @@ void __init setup_arch_memory(void)
free_area_init_node(0, /* node-id */
zones_size, /* num pages per zone */
min_low_pfn, /* first pfn of node */
- NULL); /* NO holes */
+ zones_holes); /* holes */
- high_memory = (void *)end_mem;
+#ifdef CONFIG_HIGHMEM
+ high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+ kmap_init();
+#endif
}
/*
@@ -135,6 +175,14 @@ void __init setup_arch_memory(void)
*/
void __init mem_init(void)
{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ reset_all_zones_managed_pages();
+ for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
+
free_all_bootmem();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 2c7ce8bb7475..0ee739846847 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static inline void __tlb_entry_erase(void)
{
write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
write_aux_reg(ARC_REG_TLBPD0, 0);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
@@ -182,7 +186,7 @@ static void utlb_invalidate(void)
}
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
{
unsigned int idx;
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
}
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
{
write_aux_reg(ARC_REG_TLBPD0, pd0);
write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+
write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
}
@@ -240,22 +248,39 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
noinline void local_flush_tlb_all(void)
{
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned long flags;
unsigned int entry;
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ int num_tlb = mmu->sets * mmu->ways;
local_irq_save(flags);
/* Load PD0 and PD1 with template for a Blank Entry */
write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
write_aux_reg(ARC_REG_TLBPD0, 0);
- for (entry = 0; entry < mmu->num_tlb; entry++) {
+ for (entry = 0; entry < num_tlb; entry++) {
/* write this entry to the TLB */
write_aux_reg(ARC_REG_TLBINDEX, entry);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ const int stlb_idx = 0x800;
+
+ /* Blank sTLB entry */
+ write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
+
+ for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ }
+ }
+
utlb_invalidate();
local_irq_restore(flags);
@@ -409,6 +434,15 @@ static inline void ipi_flush_tlb_range(void *arg)
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ipi_flush_pmd_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+#endif
+
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
@@ -449,6 +483,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
+}
+#endif
+
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct tlb_args ta = {
@@ -463,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
-void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
- unsigned long pd0, pd1;
+ unsigned long pd0;
+ pte_t pd1;
/*
* create_tlb() assumes that current->mm == vma->mm, since
@@ -499,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
local_irq_save(flags);
- tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
+ tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
- address &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
/* update this PTE credentials */
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -511,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
/* ASID for this task */
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
- pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+ pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
/*
* ARC MMU provides fully orthogonal access bits for K/U mode,
@@ -547,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
@@ -580,6 +629,95 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
}
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/*
+ * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
+ * support.
+ *
+ * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
+ * new bit "SZ" in TLB page desciptor to distinguish between them.
+ * Super Page size is configurable in hardware (4K to 16M), but fixed once
+ * RTL builds.
+ *
+ * The exact THP size a Linx configuration will support is a function of:
+ * - MMU page size (typical 8K, RTL fixed)
+ * - software page walker address split between PGD:PTE:PFN (typical
+ * 11:8:13, but can be changed with 1 line)
+ * So for above default, THP size supported is 8K * (2^8) = 2M
+ *
+ * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
+ * reduces to 1 level (as PTE is folded into PGD and canonically referred
+ * to as PMD).
+ * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
+ */
+
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+{
+ pte_t pte = __pte(pmd_val(*pmd));
+ update_mmu_cache(vma, addr, &pte);
+}
+
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+{
+ struct list_head *lh = (struct list_head *) pgtable;
+
+ assert_spin_locked(&mm->page_table_lock);
+
+ /* FIFO */
+ if (!pmd_huge_pte(mm, pmdp))
+ INIT_LIST_HEAD(lh);
+ else
+ list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+ pmd_huge_pte(mm, pmdp) = pgtable;
+}
+
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+{
+ struct list_head *lh;
+ pgtable_t pgtable;
+
+ assert_spin_locked(&mm->page_table_lock);
+
+ pgtable = pmd_huge_pte(mm, pmdp);
+ lh = (struct list_head *) pgtable;
+ if (list_empty(lh))
+ pmd_huge_pte(mm, pmdp) = NULL;
+ else {
+ pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+ list_del(lh);
+ }
+
+ pte_val(pgtable[0]) = 0;
+ pte_val(pgtable[1]) = 0;
+
+ return pgtable;
+}
+
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned int cpu;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
+ unsigned int asid = hw_pid(vma->vm_mm, cpu);
+
+ /* No need to loop here: this will always be for 1 Huge Page */
+ tlb_entry_erase(start | _PAGE_HW_SZ | asid);
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif
+
/* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
@@ -598,10 +736,10 @@ void read_decode_mmu_bcr(void)
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
} *mmu3;
@@ -622,7 +760,7 @@ void read_decode_mmu_bcr(void)
if (mmu->ver <= 2) {
mmu2 = (struct bcr_mmu_1_2 *)&tmp;
- mmu->pg_sz_k = TO_KB(PAGE_SIZE);
+ mmu->pg_sz_k = TO_KB(0x2000);
mmu->sets = 1 << mmu2->sets;
mmu->ways = 1 << mmu2->ways;
mmu->u_dtlb = mmu2->u_dtlb;
@@ -634,6 +772,7 @@ void read_decode_mmu_bcr(void)
mmu->ways = 1 << mmu3->ways;
mmu->u_dtlb = mmu3->u_dtlb;
mmu->u_itlb = mmu3->u_itlb;
+ mmu->sasid = mmu3->sasid;
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -642,9 +781,9 @@ void read_decode_mmu_bcr(void)
mmu->ways = mmu4->n_ways * 2;
mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4;
+ mmu->sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
}
-
- mmu->num_tlb = mmu->sets * mmu->ways;
}
char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
@@ -655,14 +794,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page%s, ",
- p_mmu->s_pg_sz_m, " (not used)");
+ p_mmu->s_pg_sz_m,
+ IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
- p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+ p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
+ IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
@@ -690,6 +830,14 @@ void arc_mmu_init(void)
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
+ panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
+ (unsigned long)TO_MB(HPAGE_PMD_SIZE));
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+ panic("Hardware doesn't support PAE40\n");
+
/* Enable the MMU */
write_aux_reg(ARC_REG_PID, MMU_ENABLE);
@@ -725,15 +873,15 @@ void arc_mmu_init(void)
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
- int set, way, n;
- unsigned long flags, is_valid;
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int pd0[mmu->ways], pd1[mmu->ways];
+ unsigned int pd0[mmu->ways];
+ unsigned long flags;
+ int set;
local_irq_save(flags);
@@ -743,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
+ int is_valid, way;
+
/* read out all the ways of current set */
for (way = 0, is_valid = 0; way < mmu->ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
- pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
is_valid |= pd0[way] & _PAGE_PRESENT;
+ pd0[way] &= PAGE_MASK;
}
/* If all the WAYS in SET are empty, skip to next SET */
@@ -759,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/* Scan the set for duplicate ways: needs a nested loop */
for (way = 0; way < mmu->ways - 1; way++) {
+
+ int n;
+
if (!pd0[way])
continue;
for (n = way + 1; n < mmu->ways; n++) {
- if ((pd0[way] & PAGE_MASK) ==
- (pd0[n] & PAGE_MASK)) {
-
- if (dup_pd_verbose) {
- pr_info("Duplicate PD's @"
- "[%d:%d]/[%d:%d]\n",
- set, way, set, n);
- pr_info("TLBPD0[%u]: %08x\n",
- way, pd0[way]);
- }
-
- /*
- * clear entry @way and not @n. This is
- * critical to our optimised loop
- */
- pd0[way] = pd1[way] = 0;
- write_aux_reg(ARC_REG_TLBINDEX,
+ if (pd0[way] != pd0[n])
+ continue;
+
+ if (!dup_pd_silent)
+ pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+ pd0[way], set, way, n);
+
+ /*
+ * clear entry @way and not @n.
+ * This is critical to our optimised loop
+ */
+ pd0[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
- __tlb_entry_erase();
- }
+ __tlb_entry_erase();
}
}
}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f6f4c3cb505d..63860adc4814 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -205,20 +205,38 @@ ex_saved_reg1:
#endif
lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
- ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
- and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
- ; contains Ptr to Page Table
- bz.d do_slow_path_pf ; if no Page Table, do page fault
+ ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
+ tst r3, r3
+ bz do_slow_path_pf ; if no Page Table, do page fault
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
+ add2.nz r1, r1, r0
+ bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
+ mov.nz r0, r3
+
+#endif
+ and r1, r3, PAGE_MASK
; Get the PTE entry: The idea is
; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
- ; (3) z = pgtbl[y]
- ; To avoid the multiply by in end, we do the -2, <<2 below
+ ; (3) z = (pgtbl + y * 4)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
+#else
+#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
+#endif
+
+ ; multiply in step (3) above avoided by shifting lesser in step (1)
+ lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
+ and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
+ ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
+ ; r1: PTE ptr
+
+2:
- lsr r0, r2, (PAGE_SHIFT - 2)
- and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
- ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT
bz 1f
@@ -233,18 +251,23 @@ ex_saved_reg1:
;-----------------------------------------------------------------
; Convert Linux PTE entry into TLB entry
; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
; IN: r0 = PTE, r1 = ptr to PTE
.macro CONV_PTE_TO_TLB
- and r3, r0, PTE_BITS_RWX ; r w x
- lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only)
+ and r3, r0, PTE_BITS_RWX ; r w x
+ lsl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
and.f 0, r0, _PAGE_GLOBAL
- or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page)
+ or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
or r3, r3, r2
- sr r3, [ARC_REG_TLBPD1] ; these go in PD1
+ sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
+#ifdef CONFIG_ARC_HAS_PAE40
+ ld r3, [r1, 4] ; paddr[39..32]
+ sr r3, [ARC_REG_TLBPD1HI]
+#endif
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
@@ -365,7 +388,7 @@ ENTRY(EV_TLBMissD)
lr r3, [ecr]
or r0, r0, _PAGE_ACCESSED ; Accessed bit always
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
- or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
+ or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
st_s r0, [r1] ; Write back PTE
CONV_PTE_TO_TLB