summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/addrspace.h4
-rw-r--r--arch/sh/include/asm/atomic-irq.h16
-rw-r--r--arch/sh/include/asm/bitops-llsc.h72
-rw-r--r--arch/sh/include/asm/clock.h1
-rw-r--r--arch/sh/include/asm/cmpxchg-llsc.h38
-rw-r--r--arch/sh/include/asm/cpu-features.h1
-rw-r--r--arch/sh/include/asm/dma-sh.h118
-rw-r--r--arch/sh/include/asm/dma.h4
-rw-r--r--arch/sh/include/asm/entry-macros.S5
-rw-r--r--arch/sh/include/asm/gpio.h70
-rw-r--r--arch/sh/include/asm/hd64461.h1
-rw-r--r--arch/sh/include/asm/io.h4
-rw-r--r--arch/sh/include/asm/kprobes.h2
-rw-r--r--arch/sh/include/asm/mmu_context.h15
-rw-r--r--arch/sh/include/asm/mmu_context_32.h12
-rw-r--r--arch/sh/include/asm/page.h7
-rw-r--r--arch/sh/include/asm/processor.h2
-rw-r--r--arch/sh/include/asm/processor_32.h15
-rw-r--r--arch/sh/include/asm/processor_64.h14
-rw-r--r--arch/sh/include/asm/ptrace.h8
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/asm/suspend.h22
-rw-r--r--arch/sh/include/asm/timer.h4
-rw-r--r--arch/sh/include/asm/tlb.h100
24 files changed, 400 insertions, 136 deletions
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 36736c7e93db..80d40813e057 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -31,7 +31,7 @@
/* Returns the physical address of a PnSEG (n=1,2) address */
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
/*
* Map an address to a certain privileged segment
*/
@@ -43,7 +43,7 @@
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
-#endif /* 29BIT */
+#endif /* 29BIT || PMB_FIXED */
#endif /* P1SEG */
/* Check if an address can be reached in 29 bits */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 74f7943cff6f..a0b348068cae 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long flags;
local_irq_save(flags);
- *(long *)v += i;
+ v->counter += i;
local_irq_restore(flags);
}
@@ -20,7 +20,7 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long flags;
local_irq_save(flags);
- *(long *)v -= i;
+ v->counter -= i;
local_irq_restore(flags);
}
@@ -29,9 +29,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
unsigned long temp, flags;
local_irq_save(flags);
- temp = *(long *)v;
+ temp = v->counter;
temp += i;
- *(long *)v = temp;
+ v->counter = temp;
local_irq_restore(flags);
return temp;
@@ -42,9 +42,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
unsigned long temp, flags;
local_irq_save(flags);
- temp = *(long *)v;
+ temp = v->counter;
temp -= i;
- *(long *)v = temp;
+ v->counter = temp;
local_irq_restore(flags);
return temp;
@@ -55,7 +55,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned long flags;
local_irq_save(flags);
- *(long *)v &= ~mask;
+ v->counter &= ~mask;
local_irq_restore(flags);
}
@@ -64,7 +64,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned long flags;
local_irq_save(flags);
- *(long *)v |= mask;
+ v->counter |= mask;
local_irq_restore(flags);
}
diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
index 1d2fc0b010ad..d8328be06191 100644
--- a/arch/sh/include/asm/bitops-llsc.h
+++ b/arch/sh/include/asm/bitops-llsc.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SH_BITOPS_LLSC_H
#define __ASM_SH_BITOPS_LLSC_H
-static inline void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -13,16 +13,16 @@ static inline void set_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! set_bit \n\t"
- "or %3, %0 \n\t"
+ "or %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
- : "=&z" (tmp), "=r" (a)
- : "1" (a), "r" (mask)
+ : "=&z" (tmp)
+ : "r" (a), "r" (mask)
: "t", "memory"
);
}
-static inline void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -34,16 +34,16 @@ static inline void clear_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! clear_bit \n\t"
- "and %3, %0 \n\t"
+ "and %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
- : "=&z" (tmp), "=r" (a)
- : "1" (a), "r" (~mask)
+ : "=&z" (tmp)
+ : "r" (a), "r" (~mask)
: "t", "memory"
);
}
-static inline void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -55,16 +55,16 @@ static inline void change_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! change_bit \n\t"
- "xor %3, %0 \n\t"
+ "xor %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
- : "=&z" (tmp), "=r" (a)
- : "1" (a), "r" (mask)
+ : "=&z" (tmp)
+ : "r" (a), "r" (mask)
: "t", "memory"
);
}
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -75,21 +75,21 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! test_and_set_bit \n\t"
- "mov %0, %2 \n\t"
- "or %4, %0 \n\t"
- "movco.l %0, @%1 \n\t"
+ "movli.l @%2, %0 ! test_and_set_bit \n\t"
+ "mov %0, %1 \n\t"
+ "or %3, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
- "and %4, %2 \n\t"
- : "=&z" (tmp), "=r" (a), "=&r" (retval)
- : "1" (a), "r" (mask)
+ "and %3, %1 \n\t"
+ : "=&z" (tmp), "=&r" (retval)
+ : "r" (a), "r" (mask)
: "t", "memory"
);
return retval != 0;
}
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -100,22 +100,22 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! test_and_clear_bit \n\t"
- "mov %0, %2 \n\t"
- "and %5, %0 \n\t"
- "movco.l %0, @%1 \n\t"
+ "movli.l @%2, %0 ! test_and_clear_bit \n\t"
+ "mov %0, %1 \n\t"
+ "and %4, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
- "and %4, %2 \n\t"
+ "and %3, %1 \n\t"
"synco \n\t"
- : "=&z" (tmp), "=r" (a), "=&r" (retval)
- : "1" (a), "r" (mask), "r" (~mask)
+ : "=&z" (tmp), "=&r" (retval)
+ : "r" (a), "r" (mask), "r" (~mask)
: "t", "memory"
);
return retval != 0;
}
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -126,15 +126,15 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! test_and_change_bit \n\t"
- "mov %0, %2 \n\t"
- "xor %4, %0 \n\t"
- "movco.l %0, @%1 \n\t"
+ "movli.l @%2, %0 ! test_and_change_bit \n\t"
+ "mov %0, %1 \n\t"
+ "xor %3, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
- "and %4, %2 \n\t"
+ "and %3, %1 \n\t"
"synco \n\t"
- : "=&z" (tmp), "=r" (a), "=&r" (retval)
- : "1" (a), "r" (mask)
+ : "=&z" (tmp), "=&r" (retval)
+ : "r" (a), "r" (mask)
: "t", "memory"
);
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index f9c88583d90a..2f6c9627bc1f 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -15,6 +15,7 @@ struct clk_ops {
void (*disable)(struct clk *clk);
void (*recalc)(struct clk *clk);
int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id);
+ int (*set_parent)(struct clk *clk, struct clk *parent);
long (*round_rate)(struct clk *clk, unsigned long rate);
};
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
index aee3bf286581..0fac3da536ca 100644
--- a/arch/sh/include/asm/cmpxchg-llsc.h
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -8,14 +8,14 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! xchg_u32 \n\t"
- "mov %0, %2 \n\t"
- "mov %4, %0 \n\t"
- "movco.l %0, @%1 \n\t"
+ "movli.l @%2, %0 ! xchg_u32 \n\t"
+ "mov %0, %1 \n\t"
+ "mov %3, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
- : "=&z"(tmp), "=r" (m), "=&r" (retval)
- : "1" (m), "r" (val)
+ : "=&z"(tmp), "=&r" (retval)
+ : "r" (m), "r" (val)
: "t", "memory"
);
@@ -29,14 +29,14 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! xchg_u8 \n\t"
- "mov %0, %2 \n\t"
- "mov %4, %0 \n\t"
- "movco.l %0, @%1 \n\t"
+ "movli.l @%2, %0 ! xchg_u8 \n\t"
+ "mov %0, %1 \n\t"
+ "mov %3, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
- : "=&z"(tmp), "=r" (m), "=&r" (retval)
- : "1" (m), "r" (val & 0xff)
+ : "=&z"(tmp), "=&r" (retval)
+ : "r" (m), "r" (val & 0xff)
: "t", "memory"
);
@@ -51,17 +51,17 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __cmpxchg_u32 \n\t"
- "mov %0, %2 \n\t"
- "cmp/eq %2, %4 \n\t"
+ "movli.l @%2, %0 ! __cmpxchg_u32 \n\t"
+ "mov %0, %1 \n\t"
+ "cmp/eq %1, %3 \n\t"
"bf 2f \n\t"
- "mov %5, %0 \n\t"
+ "mov %3, %0 \n\t"
"2: \n\t"
- "movco.l %0, @%1 \n\t"
+ "movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
- : "=&z" (tmp), "=r" (m), "=&r" (retval)
- : "1" (m), "r" (old), "r" (new)
+ : "=&z" (tmp), "=&r" (retval)
+ : "r" (m), "r" (old), "r" (new)
: "t", "memory"
);
diff --git a/arch/sh/include/asm/cpu-features.h b/arch/sh/include/asm/cpu-features.h
index 86308aa39731..694abe490edb 100644
--- a/arch/sh/include/asm/cpu-features.h
+++ b/arch/sh/include/asm/cpu-features.h
@@ -21,5 +21,6 @@
#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */
#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
+#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */
#endif /* __ASM_SH_CPU_FEATURES_H */
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
new file mode 100644
index 000000000000..0c8f8e14622a
--- /dev/null
+++ b/arch/sh/include/asm/dma-sh.h
@@ -0,0 +1,118 @@
+/*
+ * arch/sh/include/asm/dma-sh.h
+ *
+ * Copyright (C) 2000 Takashi YOSHII
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __DMA_SH_H
+#define __DMA_SH_H
+
+#include <asm/dma.h>
+#include <cpu/dma.h>
+
+/* DMAOR contorl: The DMAOR access size is different by CPU.*/
+#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785)
+#define dmaor_read_reg(n) \
+ (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \
+ : ctrl_inw(SH_DMAC_BASE0 + DMAOR))
+#define dmaor_write_reg(n, data) \
+ (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \
+ : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR))
+#else /* Other CPU */
+#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR)
+#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)
+#endif
+
+static int dmte_irq_map[] __maybe_unused = {
+#if (MAX_DMA_CHANNELS >= 4)
+ DMTE0_IRQ,
+ DMTE0_IRQ + 1,
+ DMTE0_IRQ + 2,
+ DMTE0_IRQ + 3,
+#endif
+#if (MAX_DMA_CHANNELS >= 6)
+ DMTE4_IRQ,
+ DMTE4_IRQ + 1,
+#endif
+#if (MAX_DMA_CHANNELS >= 8)
+ DMTE6_IRQ,
+ DMTE6_IRQ + 1,
+#endif
+#if (MAX_DMA_CHANNELS >= 12)
+ DMTE8_IRQ,
+ DMTE9_IRQ,
+ DMTE10_IRQ,
+ DMTE11_IRQ,
+#endif
+};
+
+/* Definitions for the SuperH DMAC */
+#define REQ_L 0x00000000
+#define REQ_E 0x00080000
+#define RACK_H 0x00000000
+#define RACK_L 0x00040000
+#define ACK_R 0x00000000
+#define ACK_W 0x00020000
+#define ACK_H 0x00000000
+#define ACK_L 0x00010000
+#define DM_INC 0x00004000
+#define DM_DEC 0x00008000
+#define SM_INC 0x00001000
+#define SM_DEC 0x00002000
+#define RS_IN 0x00000200
+#define RS_OUT 0x00000300
+#define TS_BLK 0x00000040
+#define TM_BUR 0x00000020
+#define CHCR_DE 0x00000001
+#define CHCR_TE 0x00000002
+#define CHCR_IE 0x00000004
+
+/* DMAOR definitions */
+#define DMAOR_AE 0x00000004
+#define DMAOR_NMIF 0x00000002
+#define DMAOR_DME 0x00000001
+
+/*
+ * Define the default configuration for dual address memory-memory transfer.
+ * The 0x400 value represents auto-request, external->external.
+ */
+#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
+
+/* DMA base address */
+static u32 dma_base_addr[] __maybe_unused = {
+#if (MAX_DMA_CHANNELS >= 4)
+ SH_DMAC_BASE0 + 0x00, /* channel 0 */
+ SH_DMAC_BASE0 + 0x10,
+ SH_DMAC_BASE0 + 0x20,
+ SH_DMAC_BASE0 + 0x30,
+#endif
+#if (MAX_DMA_CHANNELS >= 6)
+ SH_DMAC_BASE0 + 0x50,
+ SH_DMAC_BASE0 + 0x60,
+#endif
+#if (MAX_DMA_CHANNELS >= 8)
+ SH_DMAC_BASE1 + 0x00,
+ SH_DMAC_BASE1 + 0x10,
+#endif
+#if (MAX_DMA_CHANNELS >= 12)
+ SH_DMAC_BASE1 + 0x20,
+ SH_DMAC_BASE1 + 0x30,
+ SH_DMAC_BASE1 + 0x50,
+ SH_DMAC_BASE1 + 0x60, /* channel 11 */
+#endif
+};
+
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index beca7128e2ab..6bd178473878 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -25,9 +25,9 @@
#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000)
#ifdef CONFIG_NR_DMA_CHANNELS
-# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS)
+# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS)
#else
-# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
+# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
#endif
/*
diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S
index 2dab0b8d9454..3a4752a65722 100644
--- a/arch/sh/include/asm/entry-macros.S
+++ b/arch/sh/include/asm/entry-macros.S
@@ -31,3 +31,8 @@
#endif
.endm
+#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
+# define PREF(x) pref @x
+#else
+# define PREF(x) nop
+#endif
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index 90673658eb14..61f93da2c62e 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -19,8 +19,42 @@
#include <cpu/gpio.h>
#endif
+#define ARCH_NR_GPIOS 512
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
typedef unsigned short pinmux_enum_t;
-typedef unsigned char pinmux_flag_t;
+typedef unsigned short pinmux_flag_t;
#define PINMUX_TYPE_NONE 0
#define PINMUX_TYPE_FUNCTION 1
@@ -34,6 +68,11 @@ typedef unsigned char pinmux_flag_t;
#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
+#define PINMUX_FLAG_DBIT_SHIFT 5
+#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
+#define PINMUX_FLAG_DREG_SHIFT 10
+#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
+
struct pinmux_gpio {
pinmux_enum_t enum_id;
pinmux_flag_t flags;
@@ -54,7 +93,7 @@ struct pinmux_cfg_reg {
.enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
struct pinmux_data_reg {
- unsigned long reg, reg_width;
+ unsigned long reg, reg_width, reg_shadow;
pinmux_enum_t *enum_ids;
};
@@ -89,34 +128,9 @@ struct pinmux_info {
unsigned int gpio_data_size;
unsigned long *gpio_in_use;
+ struct gpio_chip chip;
};
int register_pinmux(struct pinmux_info *pip);
-int __gpio_request(unsigned gpio);
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return __gpio_request(gpio);
-}
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-
-/* IRQ modes are unspported */
-static inline int gpio_to_irq(unsigned gpio)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-#include <asm-generic/gpio.h>
-
#endif /* __ASM_SH_GPIO_H */
diff --git a/arch/sh/include/asm/hd64461.h b/arch/sh/include/asm/hd64461.h
index 8c1353baf00f..52b4b6238277 100644
--- a/arch/sh/include/asm/hd64461.h
+++ b/arch/sh/include/asm/hd64461.h
@@ -242,7 +242,6 @@
#include <asm/io_generic.h>
/* arch/sh/cchips/hd6446x/hd64461/setup.c */
-int hd64461_irq_demux(int irq);
void hd64461_register_irq_demux(int irq,
int (*demux) (int irq, void *dev), void *dev);
void hd64461_unregister_irq_demux(int irq);
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 61f6dae40534..0454f8d68059 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr);
static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
-#ifdef CONFIG_SUPERH32
+#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
unsigned long last_addr = offset + size - 1;
#endif
void __iomem *ret;
@@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
if (ret)
return ret;
-#ifdef CONFIG_SUPERH32
+#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 6078d8e551d4..613644a758e8 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -16,7 +16,7 @@ typedef u16 kprobe_opcode_t;
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
-#define regs_return_value(regs) ((regs)->regs[0])
+#define regs_return_value(_regs) ((_regs)->regs[0])
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 5d9157bd474d..2a9c55f1a83f 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -19,13 +19,18 @@
* (a) TLB cache version (or round, cycle whatever expression you like)
* (b) ASID (Address Space IDentifier)
*/
+#ifdef CONFIG_CPU_HAS_PTEAEX
+#define MMU_CONTEXT_ASID_MASK 0x0000ffff
+#else
#define MMU_CONTEXT_ASID_MASK 0x000000ff
-#define MMU_CONTEXT_VERSION_MASK 0xffffff00
-#define MMU_CONTEXT_FIRST_VERSION 0x00000100
-#define NO_CONTEXT 0UL
+#endif
-/* ASID is 8-bit value, so it can't be 0x100 */
-#define MMU_NO_ASID 0x100
+#define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK)
+#define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1)
+
+/* Impossible ASID value, to differentiate from NO_CONTEXT. */
+#define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION
+#define NO_CONTEXT 0UL
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index f4f9aebd68b7..8ef800c549ab 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -10,6 +10,17 @@ static inline void destroy_context(struct mm_struct *mm)
/* Do nothing */
}
+#ifdef CONFIG_CPU_HAS_PTEAEX
+static inline void set_asid(unsigned long asid)
+{
+ __raw_writel(asid, MMU_PTEAEX);
+}
+
+static inline unsigned long get_asid(void)
+{
+ return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
+}
+#else
static inline void set_asid(unsigned long asid)
{
unsigned long __dummy;
@@ -33,6 +44,7 @@ static inline unsigned long get_asid(void)
asid &= MMU_CONTEXT_ASID_MASK;
return asid;
}
+#endif /* CONFIG_CPU_HAS_PTEAEX */
/* MMU_TTB is used for optimizing the fault handling. */
static inline void set_TTB(pgd_t *pgd)
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 5871d78e47e5..9c6d21ec0240 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -129,7 +129,12 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
-#ifdef CONFIG_32BIT
+#if defined(CONFIG_PMB_FIXED)
+/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
+#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
+#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
+#elif defined(CONFIG_32BIT)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 1ef4b24d7619..1fd58b421438 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -31,7 +31,7 @@ enum cpu_type {
CPU_SH7760, CPU_SH4_202, CPU_SH4_501,
/* SH-4A types */
- CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
+ CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786,
CPU_SH7723, CPU_SHX3,
/* SH4AL-DSP types */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index d79063c5eb9c..efdd78a53b11 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -108,12 +108,12 @@ extern int ubc_usercnt;
/*
* Do necessary setup to start up a newly executed thread.
*/
-#define start_thread(regs, new_pc, new_sp) \
+#define start_thread(_regs, new_pc, new_sp) \
set_fs(USER_DS); \
- regs->pr = 0; \
- regs->sr = SR_FD; /* User mode. */ \
- regs->pc = new_pc; \
- regs->regs[15] = new_sp
+ _regs->pr = 0; \
+ _regs->sr = SR_FD; /* User mode. */ \
+ _regs->pc = new_pc; \
+ _regs->regs[15] = new_sp
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -189,10 +189,9 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
-#define user_stack_pointer(regs) ((regs)->regs[15])
+#define user_stack_pointer(_regs) ((_regs)->regs[15])
-#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \
- defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
#define PREFETCH_STRIDE L1_CACHE_BYTES
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 803177fcf086..5727d31b0ccf 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -145,13 +145,13 @@ struct thread_struct {
*/
#define SR_USER (SR_MMU | SR_FD)
-#define start_thread(regs, new_pc, new_sp) \
+#define start_thread(_regs, new_pc, new_sp) \
set_fs(USER_DS); \
- regs->sr = SR_USER; /* User mode. */ \
- regs->pc = new_pc - 4; /* Compensate syscall exit */ \
- regs->pc |= 1; /* Set SHmedia ! */ \
- regs->regs[18] = 0; \
- regs->regs[15] = new_sp
+ _regs->sr = SR_USER; /* User mode. */ \
+ _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
+ _regs->pc |= 1; /* Set SHmedia ! */ \
+ _regs->regs[18] = 0; \
+ _regs->regs[15] = new_sp
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -226,7 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
-#define user_stack_pointer(regs) ((regs)->regs[15])
+#define user_stack_pointer(_regs) ((_regs)->regs[15])
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 12912ab80c15..81c6568fdb3e 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -122,14 +122,12 @@ extern void user_disable_single_step(struct task_struct *);
#ifdef CONFIG_SH_DSP
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
- - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
+ - sizeof(struct pt_dspregs)) - 1)
#define task_pt_dspregs(task) \
- ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE \
- - sizeof(unsigned long)) - 1)
+ ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE) - 1)
#else
#define task_pt_regs(task) \
- ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
- - sizeof(unsigned long)) - 1)
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
#endif
static inline unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 8f8f4ad400df..01a4076a3719 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,6 +3,7 @@
#include <asm-generic/sections.h>
+extern void __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char _ebss[];
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
new file mode 100644
index 000000000000..b1b995370e79
--- /dev/null
+++ b/arch/sh/include/asm/suspend.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_SH_SUSPEND_H
+#define _ASM_SH_SUSPEND_H
+
+#ifndef __ASSEMBLY__
+static inline int arch_prepare_suspend(void) { return 0; }
+
+#include <asm/ptrace.h>
+
+struct swsusp_arch_regs {
+ struct pt_regs user_regs;
+ unsigned long bank1_regs[8];
+};
+#endif
+
+/* flags passed to assembly suspend code */
+#define SUSP_SH_SLEEP (1 << 0) /* Regular sleep mode */
+#define SUSP_SH_STANDBY (1 << 1) /* SH-Mobile Software standby mode */
+#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */
+#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */
+#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */
+
+#endif /* _ASM_SH_SUSPEND_H */
diff --git a/arch/sh/include/asm/timer.h b/arch/sh/include/asm/timer.h
index a7ca3a195bb5..4c3b66e30af2 100644
--- a/arch/sh/include/asm/timer.h
+++ b/arch/sh/include/asm/timer.h
@@ -9,7 +9,6 @@ struct sys_timer_ops {
int (*init)(void);
int (*start)(void);
int (*stop)(void);
- cycle_t (*read)(void);
#ifndef CONFIG_GENERIC_TIME
unsigned long (*get_offset)(void);
#endif
@@ -39,6 +38,7 @@ struct sys_timer *get_sys_timer(void);
/* arch/sh/kernel/time.c */
void handle_timer_tick(void);
-extern unsigned long sh_hpt_frequency;
+
+extern struct clocksource clocksource_sh;
#endif /* __ASM_SH_TIMER_H */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 88ff1ae8a6b8..9c16f737074a 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -6,22 +6,106 @@
#endif
#ifndef __ASSEMBLY__
+#include <linux/pagemap.h>
+
+#ifdef CONFIG_MMU
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * TLB handling. This allows us to remove pages from the page
+ * tables, and efficiently handle the TLB issues.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int fullmm;
+ unsigned long start, end;
+};
-#define tlb_start_vma(tlb, vma) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end)
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-#define tlb_end_vma(tlb, vma) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end)
+static inline void init_tlb_gather(struct mmu_gather *tlb)
+{
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+ if (tlb->fullmm) {
+ tlb->start = 0;
+ tlb->end = TASK_SIZE;
+ }
+}
+
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+ tlb->fullmm = full_mm_flush;
+
+ init_tlb_gather(tlb);
+
+ return tlb;
+}
+
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (tlb->fullmm)
+ flush_tlb_mm(tlb->mm);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+static inline void
+tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
+{
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + PAGE_SIZE)
+ tlb->end = address + PAGE_SIZE;
+}
/*
- * Flush whole TLBs for MM
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
*/
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm)
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
+
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm && tlb->end) {
+ flush_tlb_range(vma, tlb->start, tlb->end);
+ init_tlb_gather(tlb);
+ }
+}
+
+#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
+#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
+#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp)
+
+#define tlb_migrate_finish(mm) do { } while (0)
+
+#else /* CONFIG_MMU */
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+#define tlb_flush(tlb) do { } while (0)
-#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
+#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_H */