summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/netlogic/mips-extns.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/netlogic/mips-extns.h')
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h223
1 files changed, 222 insertions, 1 deletions
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8c53d0ba4bf2..8ad2e0f81719 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -49,7 +49,7 @@
*/
#define write_c0_eimr(val) \
do { \
- if (sizeof(unsigned long) == 4) { \
+ if (sizeof(unsigned long) == 4) { \
unsigned long __flags; \
\
local_irq_save(__flags); \
@@ -68,9 +68,230 @@ do { \
__write_64bit_c0_register($9, 7, (val)); \
} while (0)
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv $1, $1, %0\n\t"
+ "dmtc0 $1, $9, 6\n\t"
+ ".set pop"
+ : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "xor $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+ uint64_t val;
+
+#ifdef CONFIG_64BIT
+ val = read_c0_eimr() & read_c0_eirr();
+#else
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "dmfc0 %M0, $9, 6\n\t"
+ "dmfc0 %L0, $9, 7\n\t"
+ "and %M0, %L0\n\t"
+ "dsll %L0, %M0, 32\n\t"
+ "dsra %M0, %M0, 32\n\t"
+ "dsra %L0, %L0, 32\n\t"
+ ".set pop"
+ : "=r" (val));
+#endif
+
+ return val;
+}
+
static inline int hard_smp_processor_id(void)
{
return __read_32bit_c0_register($15, 1) & 0x3ff;
}
+static inline int nlm_nodeid(void)
+{
+ return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+}
+
+static inline unsigned int nlm_core_id(void)
+{
+ return (read_c0_ebase() & 0x1c) >> 2;
+}
+
+static inline unsigned int nlm_thread_id(void)
+{
+ return read_c0_ebase() & 0x3;
+}
+
+#define __read_64bit_c2_split(source, sel) \
+({ \
+ unsigned long long __val; \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source ", " #sel "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ local_irq_restore(__flags); \
+ \
+ __val; \
+})
+
+#define __write_64bit_c2_split(source, sel, val) \
+do { \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ local_irq_restore(__flags); \
+} while (0)
+
+#define __read_32bit_c2_register(source, sel) \
+({ uint32_t __res; \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __read_64bit_c2_register(source, sel) \
+({ unsigned long long __res; \
+ if (sizeof(unsigned long) == 4) \
+ __res = __read_64bit_c2_split(source, sel); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __write_64bit_c2_register(register, sel, value) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_64bit_c2_split(register, sel, value); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+} while (0)
+
+#define __write_32bit_c2_register(reg, sel, value) \
+({ \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+})
+
#endif /*_ASM_NLM_MIPS_EXTS_H */