system.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. #ifndef _M68KNOMMU_SYSTEM_H
  2. #define _M68KNOMMU_SYSTEM_H
  3. #include <linux/linkage.h>
  4. #include <asm/segment.h>
  5. #include <asm/entry.h>
  6. /*
  7. * switch_to(n) should switch tasks to task ptr, first checking that
  8. * ptr isn't the current task, in which case it does nothing. This
  9. * also clears the TS-flag if the task we switched to has used the
  10. * math co-processor latest.
  11. */
  12. /*
  13. * switch_to() saves the extra registers, that are not saved
  14. * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  15. * a0-a1. Some of these are used by schedule() and its predecessors
  16. * and so we might get see unexpected behaviors when a task returns
  17. * with unexpected register values.
  18. *
  19. * syscall stores these registers itself and none of them are used
  20. * by syscall after the function in the syscall has been called.
  21. *
  22. * Beware that resume now expects *next to be in d1 and the offset of
  23. * tss to be in a1. This saves a few instructions as we no longer have
  24. * to push them onto the stack and read them back right after.
  25. *
  26. * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  27. *
  28. * Changed 96/09/19 by Andreas Schwab
  29. * pass prev in a0, next in a1, offset of tss in d1, and whether
  30. * the mm structures are shared in d2 (to avoid atc flushing).
  31. */
  32. asmlinkage void resume(void);
  33. #define switch_to(prev,next,last) \
  34. { \
  35. void *_last; \
  36. __asm__ __volatile__( \
  37. "movel %1, %%a0\n\t" \
  38. "movel %2, %%a1\n\t" \
  39. "jbsr resume\n\t" \
  40. "movel %%d1, %0\n\t" \
  41. : "=d" (_last) \
  42. : "d" (prev), "d" (next) \
  43. : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
  44. (last) = _last; \
  45. }
  46. #ifdef CONFIG_COLDFIRE
  47. #define local_irq_enable() __asm__ __volatile__ ( \
  48. "move %/sr,%%d0\n\t" \
  49. "andi.l #0xf8ff,%%d0\n\t" \
  50. "move %%d0,%/sr\n" \
  51. : /* no outputs */ \
  52. : \
  53. : "cc", "%d0", "memory")
  54. #define local_irq_disable() __asm__ __volatile__ ( \
  55. "move %/sr,%%d0\n\t" \
  56. "ori.l #0x0700,%%d0\n\t" \
  57. "move %%d0,%/sr\n" \
  58. : /* no outputs */ \
  59. : \
  60. : "cc", "%d0", "memory")
  61. /* For spinlocks etc */
  62. #define local_irq_save(x) __asm__ __volatile__ ( \
  63. "movew %%sr,%0\n\t" \
  64. "movew #0x0700,%%d0\n\t" \
  65. "or.l %0,%%d0\n\t" \
  66. "movew %%d0,%/sr" \
  67. : "=d" (x) \
  68. : \
  69. : "cc", "%d0", "memory")
  70. #else
  71. /* portable version */ /* FIXME - see entry.h*/
  72. #define ALLOWINT 0xf8ff
  73. #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
  74. #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
  75. #endif
  76. #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
  77. #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
  78. /* For spinlocks etc */
  79. #ifndef local_irq_save
  80. #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0)
  81. #endif
  82. #define irqs_disabled() \
  83. ({ \
  84. unsigned long flags; \
  85. local_save_flags(flags); \
  86. ((flags & 0x0700) == 0x0700); \
  87. })
  88. #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  89. /*
  90. * Force strict CPU ordering.
  91. * Not really required on m68k...
  92. */
  93. #define nop() asm volatile ("nop"::)
  94. #define mb() asm volatile ("" : : :"memory")
  95. #define rmb() asm volatile ("" : : :"memory")
  96. #define wmb() asm volatile ("" : : :"memory")
  97. #define set_rmb(var, value) do { xchg(&var, value); } while (0)
  98. #define set_mb(var, value) set_rmb(var, value)
  99. #ifdef CONFIG_SMP
  100. #define smp_mb() mb()
  101. #define smp_rmb() rmb()
  102. #define smp_wmb() wmb()
  103. #define smp_read_barrier_depends() read_barrier_depends()
  104. #else
  105. #define smp_mb() barrier()
  106. #define smp_rmb() barrier()
  107. #define smp_wmb() barrier()
  108. #define smp_read_barrier_depends() do { } while(0)
  109. #endif
  110. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  111. #define tas(ptr) (xchg((ptr),1))
  112. struct __xchg_dummy { unsigned long a[100]; };
  113. #define __xg(x) ((volatile struct __xchg_dummy *)(x))
  114. #ifndef CONFIG_RMW_INSNS
  115. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  116. {
  117. unsigned long tmp, flags;
  118. local_irq_save(flags);
  119. switch (size) {
  120. case 1:
  121. __asm__ __volatile__
  122. ("moveb %2,%0\n\t"
  123. "moveb %1,%2"
  124. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  125. break;
  126. case 2:
  127. __asm__ __volatile__
  128. ("movew %2,%0\n\t"
  129. "movew %1,%2"
  130. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  131. break;
  132. case 4:
  133. __asm__ __volatile__
  134. ("movel %2,%0\n\t"
  135. "movel %1,%2"
  136. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  137. break;
  138. }
  139. local_irq_restore(flags);
  140. return tmp;
  141. }
  142. #else
  143. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  144. {
  145. switch (size) {
  146. case 1:
  147. __asm__ __volatile__
  148. ("moveb %2,%0\n\t"
  149. "1:\n\t"
  150. "casb %0,%1,%2\n\t"
  151. "jne 1b"
  152. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  153. break;
  154. case 2:
  155. __asm__ __volatile__
  156. ("movew %2,%0\n\t"
  157. "1:\n\t"
  158. "casw %0,%1,%2\n\t"
  159. "jne 1b"
  160. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  161. break;
  162. case 4:
  163. __asm__ __volatile__
  164. ("movel %2,%0\n\t"
  165. "1:\n\t"
  166. "casl %0,%1,%2\n\t"
  167. "jne 1b"
  168. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  169. break;
  170. }
  171. return x;
  172. }
  173. #endif
  174. /*
  175. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  176. * store NEW in MEM. Return the initial value in MEM. Success is
  177. * indicated by comparing RETURN with OLD.
  178. */
  179. #define __HAVE_ARCH_CMPXCHG 1
  180. static __inline__ unsigned long
  181. cmpxchg(volatile int *p, int old, int new)
  182. {
  183. unsigned long flags;
  184. int prev;
  185. local_irq_save(flags);
  186. if ((prev = *p) == old)
  187. *p = new;
  188. local_irq_restore(flags);
  189. return(prev);
  190. }
  191. #ifdef CONFIG_M68332
  192. #define HARD_RESET_NOW() ({ \
  193. local_irq_disable(); \
  194. asm(" \
  195. movew #0x0000, 0xfffa6a; \
  196. reset; \
  197. /*movew #0x1557, 0xfffa44;*/ \
  198. /*movew #0x0155, 0xfffa46;*/ \
  199. moveal #0, %a0; \
  200. movec %a0, %vbr; \
  201. moveal 0, %sp; \
  202. moveal 4, %a0; \
  203. jmp (%a0); \
  204. "); \
  205. })
  206. #endif
  207. #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \
  208. defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 )
  209. #define HARD_RESET_NOW() ({ \
  210. local_irq_disable(); \
  211. asm(" \
  212. moveal #0x10c00000, %a0; \
  213. moveb #0, 0xFFFFF300; \
  214. moveal 0(%a0), %sp; \
  215. moveal 4(%a0), %a0; \
  216. jmp (%a0); \
  217. "); \
  218. })
  219. #endif
  220. #ifdef CONFIG_COLDFIRE
  221. #if defined(CONFIG_M5272) && defined(CONFIG_NETtel)
  222. /*
  223. * Need to account for broken early mask of 5272 silicon. So don't
  224. * jump through the original start address. Jump strait into the
  225. * known start of the FLASH code.
  226. */
  227. #define HARD_RESET_NOW() ({ \
  228. asm(" \
  229. movew #0x2700, %sr; \
  230. jmp 0xf0000400; \
  231. "); \
  232. })
  233. #elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
  234. defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
  235. defined(CONFIG_CLEOPATRA)
  236. #define HARD_RESET_NOW() ({ \
  237. asm(" \
  238. movew #0x2700, %sr; \
  239. moveal #0x10000044, %a0; \
  240. movel #0xffffffff, (%a0); \
  241. moveal #0x10000001, %a0; \
  242. moveb #0x00, (%a0); \
  243. moveal #0xf0000004, %a0; \
  244. moveal (%a0), %a0; \
  245. jmp (%a0); \
  246. "); \
  247. })
  248. #elif defined(CONFIG_M5272)
  249. /*
  250. * Retrieve the boot address in flash using CSBR0 and CSOR0
  251. * find the reset vector at flash_address + 4 (e.g. 0x400)
  252. * remap it in the flash's current location (e.g. 0xf0000400)
  253. * and jump there.
  254. */
  255. #define HARD_RESET_NOW() ({ \
  256. asm(" \
  257. movew #0x2700, %%sr; \
  258. move.l %0+0x40,%%d0; \
  259. and.l %0+0x44,%%d0; \
  260. andi.l #0xfffff000,%%d0; \
  261. mov.l %%d0,%%a0; \
  262. or.l 4(%%a0),%%d0; \
  263. mov.l %%d0,%%a0; \
  264. jmp (%%a0);" \
  265. : /* No output */ \
  266. : "o" (*(char *)MCF_MBAR) ); \
  267. })
  268. #elif defined(CONFIG_M528x)
  269. /*
  270. * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR),
  271. * that when set, resets the MCF528x.
  272. */
  273. #define HARD_RESET_NOW() \
  274. ({ \
  275. unsigned char volatile *reset; \
  276. asm("move.w #0x2700, %sr"); \
  277. reset = ((volatile unsigned short *)(MCF_IPSBAR + 0x110000)); \
  278. while(1) \
  279. *reset |= (0x01 << 7);\
  280. })
  281. #elif defined(CONFIG_M523x)
  282. #define HARD_RESET_NOW() ({ \
  283. asm(" \
  284. movew #0x2700, %sr; \
  285. movel #0x01000000, %sp; \
  286. moveal #0x40110000, %a0; \
  287. moveb #0x80, (%a0); \
  288. "); \
  289. })
  290. #elif defined(CONFIG_M520x)
  291. /*
  292. * The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register
  293. * RCR), that when set, resets the MCF5208.
  294. */
  295. #define HARD_RESET_NOW() \
  296. ({ \
  297. unsigned char volatile *reset; \
  298. asm("move.w #0x2700, %sr"); \
  299. reset = ((volatile unsigned short *)(MCF_IPSBAR + 0xA0000)); \
  300. while(1) \
  301. *reset |= 0x80; \
  302. })
  303. #else
  304. #define HARD_RESET_NOW() ({ \
  305. asm(" \
  306. movew #0x2700, %sr; \
  307. moveal #0x4, %a0; \
  308. moveal (%a0), %a0; \
  309. jmp (%a0); \
  310. "); \
  311. })
  312. #endif
  313. #endif
  314. #define arch_align_stack(x) (x)
  315. #endif /* _M68KNOMMU_SYSTEM_H */