system.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. #ifndef __ASM_ARM_SYSTEM_H
  2. #define __ASM_ARM_SYSTEM_H
  3. #ifdef __KERNEL__
  4. #define CPU_ARCH_UNKNOWN 0
  5. #define CPU_ARCH_ARMv3 1
  6. #define CPU_ARCH_ARMv4 2
  7. #define CPU_ARCH_ARMv4T 3
  8. #define CPU_ARCH_ARMv5 4
  9. #define CPU_ARCH_ARMv5T 5
  10. #define CPU_ARCH_ARMv5TE 6
  11. #define CPU_ARCH_ARMv5TEJ 7
  12. #define CPU_ARCH_ARMv6 8
  13. /*
  14. * CR1 bits (CP#15 CR1)
  15. */
  16. #define CR_M (1 << 0) /* MMU enable */
  17. #define CR_A (1 << 1) /* Alignment abort enable */
  18. #define CR_C (1 << 2) /* Dcache enable */
  19. #define CR_W (1 << 3) /* Write buffer enable */
  20. #define CR_P (1 << 4) /* 32-bit exception handler */
  21. #define CR_D (1 << 5) /* 32-bit data address range */
  22. #define CR_L (1 << 6) /* Implementation defined */
  23. #define CR_B (1 << 7) /* Big endian */
  24. #define CR_S (1 << 8) /* System MMU protection */
  25. #define CR_R (1 << 9) /* ROM MMU protection */
  26. #define CR_F (1 << 10) /* Implementation defined */
  27. #define CR_Z (1 << 11) /* Implementation defined */
  28. #define CR_I (1 << 12) /* Icache enable */
  29. #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
  30. #define CR_RR (1 << 14) /* Round Robin cache replacement */
  31. #define CR_L4 (1 << 15) /* LDR pc can set T bit */
  32. #define CR_DT (1 << 16)
  33. #define CR_IT (1 << 18)
  34. #define CR_ST (1 << 19)
  35. #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
  36. #define CR_U (1 << 22) /* Unaligned access operation */
  37. #define CR_XP (1 << 23) /* Extended page tables */
  38. #define CR_VE (1 << 24) /* Vectored interrupts */
  39. #define CPUID_ID 0
  40. #define CPUID_CACHETYPE 1
  41. #define CPUID_TCM 2
  42. #define CPUID_TLBTYPE 3
  43. #define read_cpuid(reg) \
  44. ({ \
  45. unsigned int __val; \
  46. asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
  47. : "=r" (__val) \
  48. : \
  49. : "cc"); \
  50. __val; \
  51. })
  52. /*
  53. * This is used to ensure the compiler did actually allocate the register we
  54. * asked it for some inline assembly sequences. Apparently we can't trust
  55. * the compiler from one version to another so a bit of paranoia won't hurt.
  56. * This string is meant to be concatenated with the inline asm string and
  57. * will cause compilation to stop on mismatch.
  58. * (for details, see gcc PR 15089)
  59. */
  60. #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
  61. #ifndef __ASSEMBLY__
  62. #include <linux/linkage.h>
  63. struct thread_info;
  64. struct task_struct;
  65. /* information about the system we're running on */
  66. extern unsigned int system_rev;
  67. extern unsigned int system_serial_low;
  68. extern unsigned int system_serial_high;
  69. extern unsigned int mem_fclk_21285;
  70. struct pt_regs;
  71. void die(const char *msg, struct pt_regs *regs, int err)
  72. __attribute__((noreturn));
  73. struct siginfo;
  74. void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
  75. unsigned long err, unsigned long trap);
  76. void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
  77. struct pt_regs *),
  78. int sig, const char *name);
  79. #define xchg(ptr,x) \
  80. ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  81. #define tas(ptr) (xchg((ptr),1))
  82. extern asmlinkage void __backtrace(void);
  83. extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
  84. struct mm_struct;
  85. extern void show_pte(struct mm_struct *mm, unsigned long addr);
  86. extern void __show_regs(struct pt_regs *);
  87. extern int cpu_architecture(void);
  88. extern void cpu_init(void);
  89. void arm_machine_restart(char mode);
  90. extern void (*arm_pm_restart)(char str);
  91. /*
  92. * Intel's XScale3 core supports some v6 features (supersections, L2)
  93. * but advertises itself as v5 as it does not support the v6 ISA. For
  94. * this reason, we need a way to explicitly test for this type of CPU.
  95. */
  96. #ifndef CONFIG_CPU_XSC3
  97. #define cpu_is_xsc3() 0
  98. #else
  99. static inline int cpu_is_xsc3(void)
  100. {
  101. extern unsigned int processor_id;
  102. if ((processor_id & 0xffffe000) == 0x69056000)
  103. return 1;
  104. return 0;
  105. }
  106. #endif
  107. #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
  108. #define cpu_is_xscale() 0
  109. #else
  110. #define cpu_is_xscale() 1
  111. #endif
  112. #define set_cr(x) \
  113. __asm__ __volatile__( \
  114. "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
  115. : : "r" (x) : "cc")
  116. #define get_cr() \
  117. ({ \
  118. unsigned int __val; \
  119. __asm__ __volatile__( \
  120. "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
  121. : "=r" (__val) : : "cc"); \
  122. __val; \
  123. })
  124. extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
  125. extern unsigned long cr_alignment; /* defined in entry-armv.S */
  126. #define UDBG_UNDEFINED (1 << 0)
  127. #define UDBG_SYSCALL (1 << 1)
  128. #define UDBG_BADABORT (1 << 2)
  129. #define UDBG_SEGV (1 << 3)
  130. #define UDBG_BUS (1 << 4)
  131. extern unsigned int user_debug;
  132. #if __LINUX_ARM_ARCH__ >= 4
  133. #define vectors_high() (cr_alignment & CR_V)
  134. #else
  135. #define vectors_high() (0)
  136. #endif
  137. #if __LINUX_ARM_ARCH__ >= 6
  138. #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
  139. : : "r" (0) : "memory")
  140. #else
  141. #define mb() __asm__ __volatile__ ("" : : : "memory")
  142. #endif
  143. #define rmb() mb()
  144. #define wmb() mb()
  145. #define read_barrier_depends() do { } while(0)
  146. #define set_mb(var, value) do { var = value; mb(); } while (0)
  147. #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
  148. /*
  149. * switch_mm() may do a full cache flush over the context switch,
  150. * so enable interrupts over the context switch to avoid high
  151. * latency.
  152. */
  153. #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
  154. /*
  155. * switch_to(prev, next) should switch from task `prev' to `next'
  156. * `prev' will never be the same as `next'. schedule() itself
  157. * contains the memory barrier to tell GCC not to cache `current'.
  158. */
  159. extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
  160. #define switch_to(prev,next,last) \
  161. do { \
  162. last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
  163. } while (0)
  164. /*
  165. * On SMP systems, when the scheduler does migration-cost autodetection,
  166. * it needs a way to flush as much of the CPU's caches as possible.
  167. *
  168. * TODO: fill this in!
  169. */
  170. static inline void sched_cacheflush(void)
  171. {
  172. }
  173. /*
  174. * CPU interrupt mask handling.
  175. */
  176. #if __LINUX_ARM_ARCH__ >= 6
  177. #define local_irq_save(x) \
  178. ({ \
  179. __asm__ __volatile__( \
  180. "mrs %0, cpsr @ local_irq_save\n" \
  181. "cpsid i" \
  182. : "=r" (x) : : "memory", "cc"); \
  183. })
  184. #define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
  185. #define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
  186. #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
  187. #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
  188. #else
  189. /*
  190. * Save the current interrupt enable state & disable IRQs
  191. */
  192. #define local_irq_save(x) \
  193. ({ \
  194. unsigned long temp; \
  195. (void) (&temp == &x); \
  196. __asm__ __volatile__( \
  197. "mrs %0, cpsr @ local_irq_save\n" \
  198. " orr %1, %0, #128\n" \
  199. " msr cpsr_c, %1" \
  200. : "=r" (x), "=r" (temp) \
  201. : \
  202. : "memory", "cc"); \
  203. })
  204. /*
  205. * Enable IRQs
  206. */
  207. #define local_irq_enable() \
  208. ({ \
  209. unsigned long temp; \
  210. __asm__ __volatile__( \
  211. "mrs %0, cpsr @ local_irq_enable\n" \
  212. " bic %0, %0, #128\n" \
  213. " msr cpsr_c, %0" \
  214. : "=r" (temp) \
  215. : \
  216. : "memory", "cc"); \
  217. })
  218. /*
  219. * Disable IRQs
  220. */
  221. #define local_irq_disable() \
  222. ({ \
  223. unsigned long temp; \
  224. __asm__ __volatile__( \
  225. "mrs %0, cpsr @ local_irq_disable\n" \
  226. " orr %0, %0, #128\n" \
  227. " msr cpsr_c, %0" \
  228. : "=r" (temp) \
  229. : \
  230. : "memory", "cc"); \
  231. })
  232. /*
  233. * Enable FIQs
  234. */
  235. #define local_fiq_enable() \
  236. ({ \
  237. unsigned long temp; \
  238. __asm__ __volatile__( \
  239. "mrs %0, cpsr @ stf\n" \
  240. " bic %0, %0, #64\n" \
  241. " msr cpsr_c, %0" \
  242. : "=r" (temp) \
  243. : \
  244. : "memory", "cc"); \
  245. })
  246. /*
  247. * Disable FIQs
  248. */
  249. #define local_fiq_disable() \
  250. ({ \
  251. unsigned long temp; \
  252. __asm__ __volatile__( \
  253. "mrs %0, cpsr @ clf\n" \
  254. " orr %0, %0, #64\n" \
  255. " msr cpsr_c, %0" \
  256. : "=r" (temp) \
  257. : \
  258. : "memory", "cc"); \
  259. })
  260. #endif
  261. /*
  262. * Save the current interrupt enable state.
  263. */
  264. #define local_save_flags(x) \
  265. ({ \
  266. __asm__ __volatile__( \
  267. "mrs %0, cpsr @ local_save_flags" \
  268. : "=r" (x) : : "memory", "cc"); \
  269. })
  270. /*
  271. * restore saved IRQ & FIQ state
  272. */
  273. #define local_irq_restore(x) \
  274. __asm__ __volatile__( \
  275. "msr cpsr_c, %0 @ local_irq_restore\n" \
  276. : \
  277. : "r" (x) \
  278. : "memory", "cc")
  279. #define irqs_disabled() \
  280. ({ \
  281. unsigned long flags; \
  282. local_save_flags(flags); \
  283. (int)(flags & PSR_I_BIT); \
  284. })
  285. #ifdef CONFIG_SMP
  286. #define smp_mb() mb()
  287. #define smp_rmb() rmb()
  288. #define smp_wmb() wmb()
  289. #define smp_read_barrier_depends() read_barrier_depends()
  290. #else
  291. #define smp_mb() barrier()
  292. #define smp_rmb() barrier()
  293. #define smp_wmb() barrier()
  294. #define smp_read_barrier_depends() do { } while(0)
  295. #endif /* CONFIG_SMP */
  296. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  297. /*
  298. * On the StrongARM, "swp" is terminally broken since it bypasses the
  299. * cache totally. This means that the cache becomes inconsistent, and,
  300. * since we use normal loads/stores as well, this is really bad.
  301. * Typically, this causes oopsen in filp_close, but could have other,
  302. * more disasterous effects. There are two work-arounds:
  303. * 1. Disable interrupts and emulate the atomic swap
  304. * 2. Clean the cache, perform atomic swap, flush the cache
  305. *
  306. * We choose (1) since its the "easiest" to achieve here and is not
  307. * dependent on the processor type.
  308. *
  309. * NOTE that this solution won't work on an SMP system, so explcitly
  310. * forbid it here.
  311. */
  312. #define swp_is_buggy
  313. #endif
  314. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  315. {
  316. extern void __bad_xchg(volatile void *, int);
  317. unsigned long ret;
  318. #ifdef swp_is_buggy
  319. unsigned long flags;
  320. #endif
  321. #if __LINUX_ARM_ARCH__ >= 6
  322. unsigned int tmp;
  323. #endif
  324. switch (size) {
  325. #if __LINUX_ARM_ARCH__ >= 6
  326. case 1:
  327. asm volatile("@ __xchg1\n"
  328. "1: ldrexb %0, [%3]\n"
  329. " strexb %1, %2, [%3]\n"
  330. " teq %1, #0\n"
  331. " bne 1b"
  332. : "=&r" (ret), "=&r" (tmp)
  333. : "r" (x), "r" (ptr)
  334. : "memory", "cc");
  335. break;
  336. case 4:
  337. asm volatile("@ __xchg4\n"
  338. "1: ldrex %0, [%3]\n"
  339. " strex %1, %2, [%3]\n"
  340. " teq %1, #0\n"
  341. " bne 1b"
  342. : "=&r" (ret), "=&r" (tmp)
  343. : "r" (x), "r" (ptr)
  344. : "memory", "cc");
  345. break;
  346. #elif defined(swp_is_buggy)
  347. #ifdef CONFIG_SMP
  348. #error SMP is not supported on this platform
  349. #endif
  350. case 1:
  351. local_irq_save(flags);
  352. ret = *(volatile unsigned char *)ptr;
  353. *(volatile unsigned char *)ptr = x;
  354. local_irq_restore(flags);
  355. break;
  356. case 4:
  357. local_irq_save(flags);
  358. ret = *(volatile unsigned long *)ptr;
  359. *(volatile unsigned long *)ptr = x;
  360. local_irq_restore(flags);
  361. break;
  362. #else
  363. case 1:
  364. asm volatile("@ __xchg1\n"
  365. " swpb %0, %1, [%2]"
  366. : "=&r" (ret)
  367. : "r" (x), "r" (ptr)
  368. : "memory", "cc");
  369. break;
  370. case 4:
  371. asm volatile("@ __xchg4\n"
  372. " swp %0, %1, [%2]"
  373. : "=&r" (ret)
  374. : "r" (x), "r" (ptr)
  375. : "memory", "cc");
  376. break;
  377. #endif
  378. default:
  379. __bad_xchg(ptr, size), ret = 0;
  380. break;
  381. }
  382. return ret;
  383. }
  384. extern void disable_hlt(void);
  385. extern void enable_hlt(void);
  386. #endif /* __ASSEMBLY__ */
  387. #define arch_align_stack(x) (x)
  388. #endif /* __KERNEL__ */
  389. #endif