system_32.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. #ifndef __ASM_SH_SYSTEM_32_H
  2. #define __ASM_SH_SYSTEM_32_H
  3. #include <linux/types.h>
  4. #include <asm/mmu.h>
  5. #ifdef CONFIG_SH_DSP
  6. #define is_dsp_enabled(tsk) \
  7. (!!(tsk->thread.dsp_status.status & SR_DSP))
  8. #define __restore_dsp(tsk) \
  9. do { \
  10. register u32 *__ts2 __asm__ ("r2") = \
  11. (u32 *)&tsk->thread.dsp_status; \
  12. __asm__ __volatile__ ( \
  13. ".balign 4\n\t" \
  14. "movs.l @r2+, a0\n\t" \
  15. "movs.l @r2+, a1\n\t" \
  16. "movs.l @r2+, a0g\n\t" \
  17. "movs.l @r2+, a1g\n\t" \
  18. "movs.l @r2+, m0\n\t" \
  19. "movs.l @r2+, m1\n\t" \
  20. "movs.l @r2+, x0\n\t" \
  21. "movs.l @r2+, x1\n\t" \
  22. "movs.l @r2+, y0\n\t" \
  23. "movs.l @r2+, y1\n\t" \
  24. "lds.l @r2+, dsr\n\t" \
  25. "ldc.l @r2+, rs\n\t" \
  26. "ldc.l @r2+, re\n\t" \
  27. "ldc.l @r2+, mod\n\t" \
  28. : : "r" (__ts2)); \
  29. } while (0)
  30. #define __save_dsp(tsk) \
  31. do { \
  32. register u32 *__ts2 __asm__ ("r2") = \
  33. (u32 *)&tsk->thread.dsp_status + 14; \
  34. \
  35. __asm__ __volatile__ ( \
  36. ".balign 4\n\t" \
  37. "stc.l mod, @-r2\n\t" \
  38. "stc.l re, @-r2\n\t" \
  39. "stc.l rs, @-r2\n\t" \
  40. "sts.l dsr, @-r2\n\t" \
  41. "movs.l y1, @-r2\n\t" \
  42. "movs.l y0, @-r2\n\t" \
  43. "movs.l x1, @-r2\n\t" \
  44. "movs.l x0, @-r2\n\t" \
  45. "movs.l m1, @-r2\n\t" \
  46. "movs.l m0, @-r2\n\t" \
  47. "movs.l a1g, @-r2\n\t" \
  48. "movs.l a0g, @-r2\n\t" \
  49. "movs.l a1, @-r2\n\t" \
  50. "movs.l a0, @-r2\n\t" \
  51. : : "r" (__ts2)); \
  52. } while (0)
  53. #else
  54. #define is_dsp_enabled(tsk) (0)
  55. #define __save_dsp(tsk) do { } while (0)
  56. #define __restore_dsp(tsk) do { } while (0)
  57. #endif
  58. #if defined(CONFIG_CPU_SH4A)
  59. #define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
  60. #else
  61. #define __icbi(addr) mb()
  62. #endif
  63. #define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
  64. #define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
  65. #define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
  66. struct task_struct *__switch_to(struct task_struct *prev,
  67. struct task_struct *next);
  68. /*
  69. * switch_to() should switch tasks to task nr n, first
  70. */
  71. #define switch_to(prev, next, last) \
  72. do { \
  73. register u32 *__ts1 __asm__ ("r1"); \
  74. register u32 *__ts2 __asm__ ("r2"); \
  75. register u32 *__ts4 __asm__ ("r4"); \
  76. register u32 *__ts5 __asm__ ("r5"); \
  77. register u32 *__ts6 __asm__ ("r6"); \
  78. register u32 __ts7 __asm__ ("r7"); \
  79. struct task_struct *__last; \
  80. \
  81. if (is_dsp_enabled(prev)) \
  82. __save_dsp(prev); \
  83. \
  84. __ts1 = (u32 *)&prev->thread.sp; \
  85. __ts2 = (u32 *)&prev->thread.pc; \
  86. __ts4 = (u32 *)prev; \
  87. __ts5 = (u32 *)next; \
  88. __ts6 = (u32 *)&next->thread.sp; \
  89. __ts7 = next->thread.pc; \
  90. \
  91. __asm__ __volatile__ ( \
  92. ".balign 4\n\t" \
  93. "stc.l gbr, @-r15\n\t" \
  94. "sts.l pr, @-r15\n\t" \
  95. "mov.l r8, @-r15\n\t" \
  96. "mov.l r9, @-r15\n\t" \
  97. "mov.l r10, @-r15\n\t" \
  98. "mov.l r11, @-r15\n\t" \
  99. "mov.l r12, @-r15\n\t" \
  100. "mov.l r13, @-r15\n\t" \
  101. "mov.l r14, @-r15\n\t" \
  102. "mov.l r15, @r1\t! save SP\n\t" \
  103. "mov.l @r6, r15\t! change to new stack\n\t" \
  104. "mova 1f, %0\n\t" \
  105. "mov.l %0, @r2\t! save PC\n\t" \
  106. "mov.l 2f, %0\n\t" \
  107. "jmp @%0\t! call __switch_to\n\t" \
  108. " lds r7, pr\t! with return to new PC\n\t" \
  109. ".balign 4\n" \
  110. "2:\n\t" \
  111. ".long __switch_to\n" \
  112. "1:\n\t" \
  113. "mov.l @r15+, r14\n\t" \
  114. "mov.l @r15+, r13\n\t" \
  115. "mov.l @r15+, r12\n\t" \
  116. "mov.l @r15+, r11\n\t" \
  117. "mov.l @r15+, r10\n\t" \
  118. "mov.l @r15+, r9\n\t" \
  119. "mov.l @r15+, r8\n\t" \
  120. "lds.l @r15+, pr\n\t" \
  121. "ldc.l @r15+, gbr\n\t" \
  122. : "=z" (__last) \
  123. : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
  124. "r" (__ts5), "r" (__ts6), "r" (__ts7) \
  125. : "r3", "t"); \
  126. \
  127. last = __last; \
  128. } while (0)
  129. #define finish_arch_switch(prev) \
  130. do { \
  131. if (is_dsp_enabled(prev)) \
  132. __restore_dsp(prev); \
  133. } while (0)
  134. /*
  135. * Jump to uncached area.
  136. * When handling TLB or caches, we need to do it from an uncached area.
  137. */
  138. #define jump_to_uncached() \
  139. do { \
  140. unsigned long __dummy; \
  141. \
  142. __asm__ __volatile__( \
  143. "mova 1f, %0\n\t" \
  144. "add %1, %0\n\t" \
  145. "jmp @%0\n\t" \
  146. " nop\n\t" \
  147. ".balign 4\n" \
  148. "1:" \
  149. : "=&z" (__dummy) \
  150. : "r" (cached_to_uncached)); \
  151. } while (0)
  152. /*
  153. * Back to cached area.
  154. */
  155. #define back_to_cached() \
  156. do { \
  157. unsigned long __dummy; \
  158. ctrl_barrier(); \
  159. __asm__ __volatile__( \
  160. "mov.l 1f, %0\n\t" \
  161. "jmp @%0\n\t" \
  162. " nop\n\t" \
  163. ".balign 4\n" \
  164. "1: .long 2f\n" \
  165. "2:" \
  166. : "=&r" (__dummy)); \
  167. } while (0)
  168. #ifdef CONFIG_CPU_HAS_SR_RB
  169. #define lookup_exception_vector() \
  170. ({ \
  171. unsigned long _vec; \
  172. \
  173. __asm__ __volatile__ ( \
  174. "stc r2_bank, %0\n\t" \
  175. : "=r" (_vec) \
  176. ); \
  177. \
  178. _vec; \
  179. })
  180. #else
  181. #define lookup_exception_vector() \
  182. ({ \
  183. unsigned long _vec; \
  184. __asm__ __volatile__ ( \
  185. "mov r4, %0\n\t" \
  186. : "=r" (_vec) \
  187. ); \
  188. \
  189. _vec; \
  190. })
  191. #endif
  192. static inline reg_size_t register_align(void *val)
  193. {
  194. return (unsigned long)(signed long)val;
  195. }
  196. int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  197. struct mem_access *ma, int);
  198. static inline void trigger_address_error(void)
  199. {
  200. if (__in_29bit_mode())
  201. __asm__ __volatile__ (
  202. "ldc %0, sr\n\t"
  203. "mov.l @%1, %0"
  204. :
  205. : "r" (0x10000000), "r" (0x80000001)
  206. );
  207. }
  208. asmlinkage void do_address_error(struct pt_regs *regs,
  209. unsigned long writeaccess,
  210. unsigned long address);
  211. asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
  212. unsigned long r6, unsigned long r7,
  213. struct pt_regs __regs);
  214. asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
  215. unsigned long r6, unsigned long r7,
  216. struct pt_regs __regs);
  217. asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
  218. unsigned long r6, unsigned long r7,
  219. struct pt_regs __regs);
  220. asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
  221. unsigned long r6, unsigned long r7,
  222. struct pt_regs __regs);
  223. static inline void set_bl_bit(void)
  224. {
  225. unsigned long __dummy0, __dummy1;
  226. __asm__ __volatile__ (
  227. "stc sr, %0\n\t"
  228. "or %2, %0\n\t"
  229. "and %3, %0\n\t"
  230. "ldc %0, sr\n\t"
  231. : "=&r" (__dummy0), "=r" (__dummy1)
  232. : "r" (0x10000000), "r" (0xffffff0f)
  233. : "memory"
  234. );
  235. }
  236. static inline void clear_bl_bit(void)
  237. {
  238. unsigned long __dummy0, __dummy1;
  239. __asm__ __volatile__ (
  240. "stc sr, %0\n\t"
  241. "and %2, %0\n\t"
  242. "ldc %0, sr\n\t"
  243. : "=&r" (__dummy0), "=r" (__dummy1)
  244. : "1" (~0x10000000)
  245. : "memory"
  246. );
  247. }
  248. #endif /* __ASM_SH_SYSTEM_32_H */