system_32.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #ifndef __ASM_SH_SYSTEM_32_H
  2. #define __ASM_SH_SYSTEM_32_H
  3. #include <linux/types.h>
  4. #ifdef CONFIG_SH_DSP
  5. #define is_dsp_enabled(tsk) \
  6. (!!(tsk->thread.dsp_status.status & SR_DSP))
  7. #define __restore_dsp(tsk) \
  8. do { \
  9. register u32 *__ts2 __asm__ ("r2") = \
  10. (u32 *)&tsk->thread.dsp_status; \
  11. __asm__ __volatile__ ( \
  12. ".balign 4\n\t" \
  13. "movs.l @r2+, a0\n\t" \
  14. "movs.l @r2+, a1\n\t" \
  15. "movs.l @r2+, a0g\n\t" \
  16. "movs.l @r2+, a1g\n\t" \
  17. "movs.l @r2+, m0\n\t" \
  18. "movs.l @r2+, m1\n\t" \
  19. "movs.l @r2+, x0\n\t" \
  20. "movs.l @r2+, x1\n\t" \
  21. "movs.l @r2+, y0\n\t" \
  22. "movs.l @r2+, y1\n\t" \
  23. "lds.l @r2+, dsr\n\t" \
  24. "ldc.l @r2+, rs\n\t" \
  25. "ldc.l @r2+, re\n\t" \
  26. "ldc.l @r2+, mod\n\t" \
  27. : : "r" (__ts2)); \
  28. } while (0)
  29. #define __save_dsp(tsk) \
  30. do { \
  31. register u32 *__ts2 __asm__ ("r2") = \
  32. (u32 *)&tsk->thread.dsp_status + 14; \
  33. \
  34. __asm__ __volatile__ ( \
  35. ".balign 4\n\t" \
  36. "stc.l mod, @-r2\n\t" \
  37. "stc.l re, @-r2\n\t" \
  38. "stc.l rs, @-r2\n\t" \
  39. "sts.l dsr, @-r2\n\t" \
  40. "movs.l y1, @-r2\n\t" \
  41. "movs.l y0, @-r2\n\t" \
  42. "movs.l x1, @-r2\n\t" \
  43. "movs.l x0, @-r2\n\t" \
  44. "movs.l m1, @-r2\n\t" \
  45. "movs.l m0, @-r2\n\t" \
  46. "movs.l a1g, @-r2\n\t" \
  47. "movs.l a0g, @-r2\n\t" \
  48. "movs.l a1, @-r2\n\t" \
  49. "movs.l a0, @-r2\n\t" \
  50. : : "r" (__ts2)); \
  51. } while (0)
  52. #else
  53. #define is_dsp_enabled(tsk) (0)
  54. #define __save_dsp(tsk) do { } while (0)
  55. #define __restore_dsp(tsk) do { } while (0)
  56. #endif
  57. #if defined(CONFIG_CPU_SH4A)
  58. #define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
  59. #else
  60. #define __icbi(addr) mb()
  61. #endif
  62. #define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
  63. #define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
  64. #define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
  65. struct task_struct *__switch_to(struct task_struct *prev,
  66. struct task_struct *next);
  67. /*
  68. * switch_to() should switch tasks to task nr n, first
  69. */
  70. #define switch_to(prev, next, last) \
  71. do { \
  72. register u32 *__ts1 __asm__ ("r1"); \
  73. register u32 *__ts2 __asm__ ("r2"); \
  74. register u32 *__ts4 __asm__ ("r4"); \
  75. register u32 *__ts5 __asm__ ("r5"); \
  76. register u32 *__ts6 __asm__ ("r6"); \
  77. register u32 __ts7 __asm__ ("r7"); \
  78. struct task_struct *__last; \
  79. \
  80. if (is_dsp_enabled(prev)) \
  81. __save_dsp(prev); \
  82. \
  83. __ts1 = (u32 *)&prev->thread.sp; \
  84. __ts2 = (u32 *)&prev->thread.pc; \
  85. __ts4 = (u32 *)prev; \
  86. __ts5 = (u32 *)next; \
  87. __ts6 = (u32 *)&next->thread.sp; \
  88. __ts7 = next->thread.pc; \
  89. \
  90. __asm__ __volatile__ ( \
  91. ".balign 4\n\t" \
  92. "stc.l gbr, @-r15\n\t" \
  93. "sts.l pr, @-r15\n\t" \
  94. "mov.l r8, @-r15\n\t" \
  95. "mov.l r9, @-r15\n\t" \
  96. "mov.l r10, @-r15\n\t" \
  97. "mov.l r11, @-r15\n\t" \
  98. "mov.l r12, @-r15\n\t" \
  99. "mov.l r13, @-r15\n\t" \
  100. "mov.l r14, @-r15\n\t" \
  101. "mov.l r15, @r1\t! save SP\n\t" \
  102. "mov.l @r6, r15\t! change to new stack\n\t" \
  103. "mova 1f, %0\n\t" \
  104. "mov.l %0, @r2\t! save PC\n\t" \
  105. "mov.l 2f, %0\n\t" \
  106. "jmp @%0\t! call __switch_to\n\t" \
  107. " lds r7, pr\t! with return to new PC\n\t" \
  108. ".balign 4\n" \
  109. "2:\n\t" \
  110. ".long __switch_to\n" \
  111. "1:\n\t" \
  112. "mov.l @r15+, r14\n\t" \
  113. "mov.l @r15+, r13\n\t" \
  114. "mov.l @r15+, r12\n\t" \
  115. "mov.l @r15+, r11\n\t" \
  116. "mov.l @r15+, r10\n\t" \
  117. "mov.l @r15+, r9\n\t" \
  118. "mov.l @r15+, r8\n\t" \
  119. "lds.l @r15+, pr\n\t" \
  120. "ldc.l @r15+, gbr\n\t" \
  121. : "=z" (__last) \
  122. : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
  123. "r" (__ts5), "r" (__ts6), "r" (__ts7) \
  124. : "r3", "t"); \
  125. \
  126. last = __last; \
  127. } while (0)
  128. #define finish_arch_switch(prev) \
  129. do { \
  130. if (is_dsp_enabled(prev)) \
  131. __restore_dsp(prev); \
  132. } while (0)
  133. #define __uses_jump_to_uncached \
  134. noinline __attribute__ ((__section__ (".uncached.text")))
  135. /*
  136. * Jump to uncached area.
  137. * When handling TLB or caches, we need to do it from an uncached area.
  138. */
  139. #define jump_to_uncached() \
  140. do { \
  141. unsigned long __dummy; \
  142. \
  143. __asm__ __volatile__( \
  144. "mova 1f, %0\n\t" \
  145. "add %1, %0\n\t" \
  146. "jmp @%0\n\t" \
  147. " nop\n\t" \
  148. ".balign 4\n" \
  149. "1:" \
  150. : "=&z" (__dummy) \
  151. : "r" (cached_to_uncached)); \
  152. } while (0)
  153. /*
  154. * Back to cached area.
  155. */
  156. #define back_to_cached() \
  157. do { \
  158. unsigned long __dummy; \
  159. ctrl_barrier(); \
  160. __asm__ __volatile__( \
  161. "mov.l 1f, %0\n\t" \
  162. "jmp @%0\n\t" \
  163. " nop\n\t" \
  164. ".balign 4\n" \
  165. "1: .long 2f\n" \
  166. "2:" \
  167. : "=&r" (__dummy)); \
  168. } while (0)
  169. #ifdef CONFIG_CPU_HAS_SR_RB
  170. #define lookup_exception_vector() \
  171. ({ \
  172. unsigned long _vec; \
  173. \
  174. __asm__ __volatile__ ( \
  175. "stc r2_bank, %0\n\t" \
  176. : "=r" (_vec) \
  177. ); \
  178. \
  179. _vec; \
  180. })
  181. #else
  182. #define lookup_exception_vector() \
  183. ({ \
  184. unsigned long _vec; \
  185. __asm__ __volatile__ ( \
  186. "mov r4, %0\n\t" \
  187. : "=r" (_vec) \
  188. ); \
  189. \
  190. _vec; \
  191. })
  192. #endif
  193. static inline reg_size_t register_align(void *val)
  194. {
  195. return (unsigned long)(signed long)val;
  196. }
  197. int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  198. struct mem_access *ma, int);
  199. asmlinkage void do_address_error(struct pt_regs *regs,
  200. unsigned long writeaccess,
  201. unsigned long address);
  202. asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
  203. unsigned long r6, unsigned long r7,
  204. struct pt_regs __regs);
  205. asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
  206. unsigned long r6, unsigned long r7,
  207. struct pt_regs __regs);
  208. asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
  209. unsigned long r6, unsigned long r7,
  210. struct pt_regs __regs);
  211. asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
  212. unsigned long r6, unsigned long r7,
  213. struct pt_regs __regs);
  214. #endif /* __ASM_SH_SYSTEM_32_H */