system.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. #ifndef _M68KNOMMU_SYSTEM_H
  2. #define _M68KNOMMU_SYSTEM_H
  3. #include <linux/config.h> /* get configuration macros */
  4. #include <linux/linkage.h>
  5. #include <asm/segment.h>
  6. #include <asm/entry.h>
  7. /*
  8. * switch_to(n) should switch tasks to task ptr, first checking that
  9. * ptr isn't the current task, in which case it does nothing. This
  10. * also clears the TS-flag if the task we switched to has used the
  11. * math co-processor latest.
  12. */
  13. /*
  14. * switch_to() saves the extra registers, that are not saved
  15. * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  16. * a0-a1. Some of these are used by schedule() and its predecessors
  17. * and so we might get see unexpected behaviors when a task returns
  18. * with unexpected register values.
  19. *
  20. * syscall stores these registers itself and none of them are used
  21. * by syscall after the function in the syscall has been called.
  22. *
  23. * Beware that resume now expects *next to be in d1 and the offset of
  24. * tss to be in a1. This saves a few instructions as we no longer have
  25. * to push them onto the stack and read them back right after.
  26. *
  27. * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  28. *
  29. * Changed 96/09/19 by Andreas Schwab
  30. * pass prev in a0, next in a1, offset of tss in d1, and whether
  31. * the mm structures are shared in d2 (to avoid atc flushing).
  32. */
  33. asmlinkage void resume(void);
  34. #define switch_to(prev,next,last) \
  35. { \
  36. void *_last; \
  37. __asm__ __volatile__( \
  38. "movel %1, %%a0\n\t" \
  39. "movel %2, %%a1\n\t" \
  40. "jbsr resume\n\t" \
  41. "movel %%d1, %0\n\t" \
  42. : "=d" (_last) \
  43. : "d" (prev), "d" (next) \
  44. : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
  45. (last) = _last; \
  46. }
  47. #ifdef CONFIG_COLDFIRE
  48. #define local_irq_enable() __asm__ __volatile__ ( \
  49. "move %/sr,%%d0\n\t" \
  50. "andi.l #0xf8ff,%%d0\n\t" \
  51. "move %%d0,%/sr\n" \
  52. : /* no outputs */ \
  53. : \
  54. : "cc", "%d0", "memory")
  55. #define local_irq_disable() __asm__ __volatile__ ( \
  56. "move %/sr,%%d0\n\t" \
  57. "ori.l #0x0700,%%d0\n\t" \
  58. "move %%d0,%/sr\n" \
  59. : /* no outputs */ \
  60. : \
  61. : "cc", "%d0", "memory")
  62. /* For spinlocks etc */
  63. #define local_irq_save(x) __asm__ __volatile__ ( \
  64. "movew %%sr,%0\n\t" \
  65. "movew #0x0700,%%d0\n\t" \
  66. "or.l %0,%%d0\n\t" \
  67. "movew %%d0,%/sr" \
  68. : "=d" (x) \
  69. : \
  70. : "cc", "%d0", "memory")
  71. #else
  72. /* portable version */ /* FIXME - see entry.h*/
  73. #define ALLOWINT 0xf8ff
  74. #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
  75. #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
  76. #endif
  77. #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
  78. #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
  79. /* For spinlocks etc */
  80. #ifndef local_irq_save
  81. #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0)
  82. #endif
  83. #define irqs_disabled() \
  84. ({ \
  85. unsigned long flags; \
  86. local_save_flags(flags); \
  87. ((flags & 0x0700) == 0x0700); \
  88. })
  89. #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  90. /*
  91. * Force strict CPU ordering.
  92. * Not really required on m68k...
  93. */
  94. #define nop() asm volatile ("nop"::)
  95. #define mb() asm volatile ("" : : :"memory")
  96. #define rmb() asm volatile ("" : : :"memory")
  97. #define wmb() asm volatile ("" : : :"memory")
  98. #define set_rmb(var, value) do { xchg(&var, value); } while (0)
  99. #define set_mb(var, value) set_rmb(var, value)
  100. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  101. #ifdef CONFIG_SMP
  102. #define smp_mb() mb()
  103. #define smp_rmb() rmb()
  104. #define smp_wmb() wmb()
  105. #define smp_read_barrier_depends() read_barrier_depends()
  106. #else
  107. #define smp_mb() barrier()
  108. #define smp_rmb() barrier()
  109. #define smp_wmb() barrier()
  110. #define smp_read_barrier_depends() do { } while(0)
  111. #endif
  112. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  113. #define tas(ptr) (xchg((ptr),1))
  114. struct __xchg_dummy { unsigned long a[100]; };
  115. #define __xg(x) ((volatile struct __xchg_dummy *)(x))
  116. #ifndef CONFIG_RMW_INSNS
  117. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  118. {
  119. unsigned long tmp, flags;
  120. local_irq_save(flags);
  121. switch (size) {
  122. case 1:
  123. __asm__ __volatile__
  124. ("moveb %2,%0\n\t"
  125. "moveb %1,%2"
  126. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  127. break;
  128. case 2:
  129. __asm__ __volatile__
  130. ("movew %2,%0\n\t"
  131. "movew %1,%2"
  132. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  133. break;
  134. case 4:
  135. __asm__ __volatile__
  136. ("movel %2,%0\n\t"
  137. "movel %1,%2"
  138. : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  139. break;
  140. }
  141. local_irq_restore(flags);
  142. return tmp;
  143. }
  144. #else
  145. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  146. {
  147. switch (size) {
  148. case 1:
  149. __asm__ __volatile__
  150. ("moveb %2,%0\n\t"
  151. "1:\n\t"
  152. "casb %0,%1,%2\n\t"
  153. "jne 1b"
  154. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  155. break;
  156. case 2:
  157. __asm__ __volatile__
  158. ("movew %2,%0\n\t"
  159. "1:\n\t"
  160. "casw %0,%1,%2\n\t"
  161. "jne 1b"
  162. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  163. break;
  164. case 4:
  165. __asm__ __volatile__
  166. ("movel %2,%0\n\t"
  167. "1:\n\t"
  168. "casl %0,%1,%2\n\t"
  169. "jne 1b"
  170. : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
  171. break;
  172. }
  173. return x;
  174. }
  175. #endif
  176. /*
  177. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  178. * store NEW in MEM. Return the initial value in MEM. Success is
  179. * indicated by comparing RETURN with OLD.
  180. */
  181. #define __HAVE_ARCH_CMPXCHG 1
  182. static __inline__ unsigned long
  183. cmpxchg(volatile int *p, int old, int new)
  184. {
  185. unsigned long flags;
  186. int prev;
  187. local_irq_save(flags);
  188. if ((prev = *p) == old)
  189. *p = new;
  190. local_irq_restore(flags);
  191. return(prev);
  192. }
  193. #ifdef CONFIG_M68332
  194. #define HARD_RESET_NOW() ({ \
  195. local_irq_disable(); \
  196. asm(" \
  197. movew #0x0000, 0xfffa6a; \
  198. reset; \
  199. /*movew #0x1557, 0xfffa44;*/ \
  200. /*movew #0x0155, 0xfffa46;*/ \
  201. moveal #0, %a0; \
  202. movec %a0, %vbr; \
  203. moveal 0, %sp; \
  204. moveal 4, %a0; \
  205. jmp (%a0); \
  206. "); \
  207. })
  208. #endif
  209. #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \
  210. defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 )
  211. #define HARD_RESET_NOW() ({ \
  212. local_irq_disable(); \
  213. asm(" \
  214. moveal #0x10c00000, %a0; \
  215. moveb #0, 0xFFFFF300; \
  216. moveal 0(%a0), %sp; \
  217. moveal 4(%a0), %a0; \
  218. jmp (%a0); \
  219. "); \
  220. })
  221. #endif
  222. #ifdef CONFIG_COLDFIRE
  223. #if defined(CONFIG_M5272) && defined(CONFIG_NETtel)
  224. /*
  225. * Need to account for broken early mask of 5272 silicon. So don't
  226. * jump through the original start address. Jump strait into the
  227. * known start of the FLASH code.
  228. */
  229. #define HARD_RESET_NOW() ({ \
  230. asm(" \
  231. movew #0x2700, %sr; \
  232. jmp 0xf0000400; \
  233. "); \
  234. })
  235. #elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
  236. defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
  237. defined(CONFIG_CLEOPATRA)
  238. #define HARD_RESET_NOW() ({ \
  239. asm(" \
  240. movew #0x2700, %sr; \
  241. moveal #0x10000044, %a0; \
  242. movel #0xffffffff, (%a0); \
  243. moveal #0x10000001, %a0; \
  244. moveb #0x00, (%a0); \
  245. moveal #0xf0000004, %a0; \
  246. moveal (%a0), %a0; \
  247. jmp (%a0); \
  248. "); \
  249. })
  250. #elif defined(CONFIG_M5272)
  251. /*
  252. * Retrieve the boot address in flash using CSBR0 and CSOR0
  253. * find the reset vector at flash_address + 4 (e.g. 0x400)
  254. * remap it in the flash's current location (e.g. 0xf0000400)
  255. * and jump there.
  256. */
  257. #define HARD_RESET_NOW() ({ \
  258. asm(" \
  259. movew #0x2700, %%sr; \
  260. move.l %0+0x40,%%d0; \
  261. and.l %0+0x44,%%d0; \
  262. andi.l #0xfffff000,%%d0; \
  263. mov.l %%d0,%%a0; \
  264. or.l 4(%%a0),%%d0; \
  265. mov.l %%d0,%%a0; \
  266. jmp (%%a0);" \
  267. : /* No output */ \
  268. : "o" (*(char *)MCF_MBAR) ); \
  269. })
  270. #elif defined(CONFIG_M528x)
  271. /*
  272. * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR),
  273. * that when set, resets the MCF528x.
  274. */
  275. #define HARD_RESET_NOW() \
  276. ({ \
  277. unsigned char volatile *reset; \
  278. asm("move.w #0x2700, %sr"); \
  279. reset = ((volatile unsigned short *)(MCF_IPSBAR + 0x110000)); \
  280. while(1) \
  281. *reset |= (0x01 << 7);\
  282. })
  283. #elif defined(CONFIG_M523x)
  284. #define HARD_RESET_NOW() ({ \
  285. asm(" \
  286. movew #0x2700, %sr; \
  287. movel #0x01000000, %sp; \
  288. moveal #0x40110000, %a0; \
  289. moveb #0x80, (%a0); \
  290. "); \
  291. })
  292. #elif defined(CONFIG_M520x)
  293. /*
  294. * The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register
  295. * RCR), that when set, resets the MCF5208.
  296. */
  297. #define HARD_RESET_NOW() \
  298. ({ \
  299. unsigned char volatile *reset; \
  300. asm("move.w #0x2700, %sr"); \
  301. reset = ((volatile unsigned short *)(MCF_IPSBAR + 0xA0000)); \
  302. while(1) \
  303. *reset |= 0x80; \
  304. })
  305. #else
  306. #define HARD_RESET_NOW() ({ \
  307. asm(" \
  308. movew #0x2700, %sr; \
  309. moveal #0x4, %a0; \
  310. moveal (%a0), %a0; \
  311. jmp (%a0); \
  312. "); \
  313. })
  314. #endif
  315. #endif
  316. #define arch_align_stack(x) (x)
  317. #endif /* _M68KNOMMU_SYSTEM_H */