|
@@ -14,37 +14,6 @@
|
|
|
#define CPU_ARCH_ARMv6 8
|
|
|
#define CPU_ARCH_ARMv7 9
|
|
|
|
|
|
-/*
|
|
|
- * CR1 bits (CP#15 CR1)
|
|
|
- */
|
|
|
-#define CR_M (1 << 0) /* MMU enable */
|
|
|
-#define CR_A (1 << 1) /* Alignment abort enable */
|
|
|
-#define CR_C (1 << 2) /* Dcache enable */
|
|
|
-#define CR_W (1 << 3) /* Write buffer enable */
|
|
|
-#define CR_P (1 << 4) /* 32-bit exception handler */
|
|
|
-#define CR_D (1 << 5) /* 32-bit data address range */
|
|
|
-#define CR_L (1 << 6) /* Implementation defined */
|
|
|
-#define CR_B (1 << 7) /* Big endian */
|
|
|
-#define CR_S (1 << 8) /* System MMU protection */
|
|
|
-#define CR_R (1 << 9) /* ROM MMU protection */
|
|
|
-#define CR_F (1 << 10) /* Implementation defined */
|
|
|
-#define CR_Z (1 << 11) /* Implementation defined */
|
|
|
-#define CR_I (1 << 12) /* Icache enable */
|
|
|
-#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
|
|
|
-#define CR_RR (1 << 14) /* Round Robin cache replacement */
|
|
|
-#define CR_L4 (1 << 15) /* LDR pc can set T bit */
|
|
|
-#define CR_DT (1 << 16)
|
|
|
-#define CR_IT (1 << 18)
|
|
|
-#define CR_ST (1 << 19)
|
|
|
-#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
|
|
|
-#define CR_U (1 << 22) /* Unaligned access operation */
|
|
|
-#define CR_XP (1 << 23) /* Extended page tables */
|
|
|
-#define CR_VE (1 << 24) /* Vectored interrupts */
|
|
|
-#define CR_EE (1 << 25) /* Exception (Big) Endian */
|
|
|
-#define CR_TRE (1 << 28) /* TEX remap enable */
|
|
|
-#define CR_AFE (1 << 29) /* Access flag enable */
|
|
|
-#define CR_TE (1 << 30) /* Thumb exception enable */
|
|
|
-
|
|
|
/*
|
|
|
* This is used to ensure the compiler did actually allocate the register we
|
|
|
* asked it for some inline assembly sequences. Apparently we can't trust
|
|
@@ -119,12 +88,6 @@ extern void (*arm_pm_restart)(char str, const char *cmd);
|
|
|
|
|
|
extern unsigned int user_debug;
|
|
|
|
|
|
-#if __LINUX_ARM_ARCH__ >= 4
|
|
|
-#define vectors_high() (cr_alignment & CR_V)
|
|
|
-#else
|
|
|
-#define vectors_high() (0)
|
|
|
-#endif
|
|
|
-
|
|
|
#if __LINUX_ARM_ARCH__ >= 7 || \
|
|
|
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
|
|
|
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
|
@@ -185,46 +148,6 @@ extern unsigned int user_debug;
|
|
|
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
|
|
|
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
|
|
|
|
|
|
-extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
|
|
|
-extern unsigned long cr_alignment; /* defined in entry-armv.S */
|
|
|
-
|
|
|
-static inline unsigned int get_cr(void)
|
|
|
-{
|
|
|
- unsigned int val;
|
|
|
- asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
|
|
|
- return val;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void set_cr(unsigned int val)
|
|
|
-{
|
|
|
- asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
|
|
|
- : : "r" (val) : "cc");
|
|
|
- isb();
|
|
|
-}
|
|
|
-
|
|
|
-#ifndef CONFIG_SMP
|
|
|
-extern void adjust_cr(unsigned long mask, unsigned long set);
|
|
|
-#endif
|
|
|
-
|
|
|
-#define CPACC_FULL(n) (3 << (n * 2))
|
|
|
-#define CPACC_SVC(n) (1 << (n * 2))
|
|
|
-#define CPACC_DISABLE(n) (0 << (n * 2))
|
|
|
-
|
|
|
-static inline unsigned int get_copro_access(void)
|
|
|
-{
|
|
|
- unsigned int val;
|
|
|
- asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
|
|
|
- : "=r" (val) : : "cc");
|
|
|
- return val;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void set_copro_access(unsigned int val)
|
|
|
-{
|
|
|
- asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
|
|
|
- : : "r" (val) : "cc");
|
|
|
- isb();
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* switch_mm() may do a full cache flush over the context switch,
|
|
|
* so enable interrupts over the context switch to avoid high
|