system.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. #ifndef __ASM_SYSTEM_H
  2. #define __ASM_SYSTEM_H
  3. #include <linux/config.h>
  4. #include <linux/kernel.h>
  5. #include <asm/segment.h>
  6. #include <asm/cpufeature.h>
  7. #include <linux/bitops.h> /* for LOCK_PREFIX */
  8. #ifdef __KERNEL__
  9. struct task_struct; /* one of the stranger aspects of C forward declarations.. */
  10. extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
  11. #define switch_to(prev,next,last) do { \
  12. unsigned long esi,edi; \
  13. asm volatile("pushl %%ebp\n\t" \
  14. "movl %%esp,%0\n\t" /* save ESP */ \
  15. "movl %5,%%esp\n\t" /* restore ESP */ \
  16. "movl $1f,%1\n\t" /* save EIP */ \
  17. "pushl %6\n\t" /* restore EIP */ \
  18. "jmp __switch_to\n" \
  19. "1:\t" \
  20. "popl %%ebp\n\t" \
  21. :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
  22. "=a" (last),"=S" (esi),"=D" (edi) \
  23. :"m" (next->thread.esp),"m" (next->thread.eip), \
  24. "2" (prev), "d" (next)); \
  25. } while (0)
  26. #define _set_base(addr,base) do { unsigned long __pr; \
  27. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  28. "rorl $16,%%edx\n\t" \
  29. "movb %%dl,%2\n\t" \
  30. "movb %%dh,%3" \
  31. :"=&d" (__pr) \
  32. :"m" (*((addr)+2)), \
  33. "m" (*((addr)+4)), \
  34. "m" (*((addr)+7)), \
  35. "0" (base) \
  36. ); } while(0)
  37. #define _set_limit(addr,limit) do { unsigned long __lr; \
  38. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  39. "rorl $16,%%edx\n\t" \
  40. "movb %2,%%dh\n\t" \
  41. "andb $0xf0,%%dh\n\t" \
  42. "orb %%dh,%%dl\n\t" \
  43. "movb %%dl,%2" \
  44. :"=&d" (__lr) \
  45. :"m" (*(addr)), \
  46. "m" (*((addr)+6)), \
  47. "0" (limit) \
  48. ); } while(0)
  49. #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
  50. #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
  51. static inline unsigned long _get_base(char * addr)
  52. {
  53. unsigned long __base;
  54. __asm__("movb %3,%%dh\n\t"
  55. "movb %2,%%dl\n\t"
  56. "shll $16,%%edx\n\t"
  57. "movw %1,%%dx"
  58. :"=&d" (__base)
  59. :"m" (*((addr)+2)),
  60. "m" (*((addr)+4)),
  61. "m" (*((addr)+7)));
  62. return __base;
  63. }
  64. #define get_base(ldt) _get_base( ((char *)&(ldt)) )
  65. /*
  66. * Load a segment. Fall back on loading the zero
  67. * segment if something goes wrong..
  68. */
  69. #define loadsegment(seg,value) \
  70. asm volatile("\n" \
  71. "1:\t" \
  72. "mov %0,%%" #seg "\n" \
  73. "2:\n" \
  74. ".section .fixup,\"ax\"\n" \
  75. "3:\t" \
  76. "pushl $0\n\t" \
  77. "popl %%" #seg "\n\t" \
  78. "jmp 2b\n" \
  79. ".previous\n" \
  80. ".section __ex_table,\"a\"\n\t" \
  81. ".align 4\n\t" \
  82. ".long 1b,3b\n" \
  83. ".previous" \
  84. : :"rm" (value))
  85. /*
  86. * Save a segment register away
  87. */
  88. #define savesegment(seg, value) \
  89. asm volatile("mov %%" #seg ",%0":"=rm" (value))
  90. /*
  91. * Clear and set 'TS' bit respectively
  92. */
  93. #define clts() __asm__ __volatile__ ("clts")
  94. #define read_cr0() ({ \
  95. unsigned int __dummy; \
  96. __asm__ __volatile__( \
  97. "movl %%cr0,%0\n\t" \
  98. :"=r" (__dummy)); \
  99. __dummy; \
  100. })
  101. #define write_cr0(x) \
  102. __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
  103. #define read_cr2() ({ \
  104. unsigned int __dummy; \
  105. __asm__ __volatile__( \
  106. "movl %%cr2,%0\n\t" \
  107. :"=r" (__dummy)); \
  108. __dummy; \
  109. })
  110. #define write_cr2(x) \
  111. __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
  112. #define read_cr3() ({ \
  113. unsigned int __dummy; \
  114. __asm__ ( \
  115. "movl %%cr3,%0\n\t" \
  116. :"=r" (__dummy)); \
  117. __dummy; \
  118. })
  119. #define write_cr3(x) \
  120. __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
  121. #define read_cr4() ({ \
  122. unsigned int __dummy; \
  123. __asm__( \
  124. "movl %%cr4,%0\n\t" \
  125. :"=r" (__dummy)); \
  126. __dummy; \
  127. })
  128. #define write_cr4(x) \
  129. __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
  130. #define stts() write_cr0(8 | read_cr0())
  131. #endif /* __KERNEL__ */
  132. #define wbinvd() \
  133. __asm__ __volatile__ ("wbinvd": : :"memory");
  134. static inline unsigned long get_limit(unsigned long segment)
  135. {
  136. unsigned long __limit;
  137. __asm__("lsll %1,%0"
  138. :"=r" (__limit):"r" (segment));
  139. return __limit+1;
  140. }
  141. #define nop() __asm__ __volatile__ ("nop")
  142. #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  143. #define tas(ptr) (xchg((ptr),1))
  144. struct __xchg_dummy { unsigned long a[100]; };
  145. #define __xg(x) ((struct __xchg_dummy *)(x))
  146. #ifdef CONFIG_X86_CMPXCHG64
  147. /*
  148. * The semantics of XCHGCMP8B are a bit strange, this is why
  149. * there is a loop and the loading of %%eax and %%edx has to
  150. * be inside. This inlines well in most cases, the cached
  151. * cost is around ~38 cycles. (in the future we might want
  152. * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
  153. * might have an implicit FPU-save as a cost, so it's not
  154. * clear which path to go.)
  155. *
  156. * cmpxchg8b must be used with the lock prefix here to allow
  157. * the instruction to be executed atomically, see page 3-102
  158. * of the instruction set reference 24319102.pdf. We need
  159. * the reader side to see the coherent 64bit value.
  160. */
  161. static inline void __set_64bit (unsigned long long * ptr,
  162. unsigned int low, unsigned int high)
  163. {
  164. __asm__ __volatile__ (
  165. "\n1:\t"
  166. "movl (%0), %%eax\n\t"
  167. "movl 4(%0), %%edx\n\t"
  168. "lock cmpxchg8b (%0)\n\t"
  169. "jnz 1b"
  170. : /* no outputs */
  171. : "D"(ptr),
  172. "b"(low),
  173. "c"(high)
  174. : "ax","dx","memory");
  175. }
  176. static inline void __set_64bit_constant (unsigned long long *ptr,
  177. unsigned long long value)
  178. {
  179. __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
  180. }
  181. #define ll_low(x) *(((unsigned int*)&(x))+0)
  182. #define ll_high(x) *(((unsigned int*)&(x))+1)
  183. static inline void __set_64bit_var (unsigned long long *ptr,
  184. unsigned long long value)
  185. {
  186. __set_64bit(ptr,ll_low(value), ll_high(value));
  187. }
  188. #define set_64bit(ptr,value) \
  189. (__builtin_constant_p(value) ? \
  190. __set_64bit_constant(ptr, value) : \
  191. __set_64bit_var(ptr, value) )
  192. #define _set_64bit(ptr,value) \
  193. (__builtin_constant_p(value) ? \
  194. __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
  195. __set_64bit(ptr, ll_low(value), ll_high(value)) )
  196. #endif
  197. /*
  198. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  199. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  200. * but generally the primitive is invalid, *ptr is output argument. --ANK
  201. */
  202. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  203. {
  204. switch (size) {
  205. case 1:
  206. __asm__ __volatile__("xchgb %b0,%1"
  207. :"=q" (x)
  208. :"m" (*__xg(ptr)), "0" (x)
  209. :"memory");
  210. break;
  211. case 2:
  212. __asm__ __volatile__("xchgw %w0,%1"
  213. :"=r" (x)
  214. :"m" (*__xg(ptr)), "0" (x)
  215. :"memory");
  216. break;
  217. case 4:
  218. __asm__ __volatile__("xchgl %0,%1"
  219. :"=r" (x)
  220. :"m" (*__xg(ptr)), "0" (x)
  221. :"memory");
  222. break;
  223. }
  224. return x;
  225. }
  226. /*
  227. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  228. * store NEW in MEM. Return the initial value in MEM. Success is
  229. * indicated by comparing RETURN with OLD.
  230. */
  231. #ifdef CONFIG_X86_CMPXCHG
  232. #define __HAVE_ARCH_CMPXCHG 1
  233. static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  234. unsigned long new, int size)
  235. {
  236. unsigned long prev;
  237. switch (size) {
  238. case 1:
  239. __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  240. : "=a"(prev)
  241. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  242. : "memory");
  243. return prev;
  244. case 2:
  245. __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  246. : "=a"(prev)
  247. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  248. : "memory");
  249. return prev;
  250. case 4:
  251. __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
  252. : "=a"(prev)
  253. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  254. : "memory");
  255. return prev;
  256. }
  257. return old;
  258. }
  259. #define cmpxchg(ptr,o,n)\
  260. ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
  261. (unsigned long)(n),sizeof(*(ptr))))
  262. #endif
  263. #ifdef CONFIG_X86_CMPXCHG64
  264. static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
  265. unsigned long long new)
  266. {
  267. unsigned long long prev;
  268. __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
  269. : "=A"(prev)
  270. : "b"((unsigned long)new),
  271. "c"((unsigned long)(new >> 32)),
  272. "m"(*__xg(ptr)),
  273. "0"(old)
  274. : "memory");
  275. return prev;
  276. }
  277. #define cmpxchg64(ptr,o,n)\
  278. ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
  279. (unsigned long long)(n)))
  280. #endif
  281. #ifdef __KERNEL__
  282. struct alt_instr {
  283. __u8 *instr; /* original instruction */
  284. __u8 *replacement;
  285. __u8 cpuid; /* cpuid bit set for replacement */
  286. __u8 instrlen; /* length of original instruction */
  287. __u8 replacementlen; /* length of new instruction, <= instrlen */
  288. __u8 pad;
  289. };
  290. #endif
  291. /*
  292. * Alternative instructions for different CPU types or capabilities.
  293. *
  294. * This allows to use optimized instructions even on generic binary
  295. * kernels.
  296. *
  297. * length of oldinstr must be longer or equal the length of newinstr
  298. * It can be padded with nops as needed.
  299. *
  300. * For non barrier like inlines please define new variants
  301. * without volatile and memory clobber.
  302. */
  303. #define alternative(oldinstr, newinstr, feature) \
  304. asm volatile ("661:\n\t" oldinstr "\n662:\n" \
  305. ".section .altinstructions,\"a\"\n" \
  306. " .align 4\n" \
  307. " .long 661b\n" /* label */ \
  308. " .long 663f\n" /* new instruction */ \
  309. " .byte %c0\n" /* feature bit */ \
  310. " .byte 662b-661b\n" /* sourcelen */ \
  311. " .byte 664f-663f\n" /* replacementlen */ \
  312. ".previous\n" \
  313. ".section .altinstr_replacement,\"ax\"\n" \
  314. "663:\n\t" newinstr "\n664:\n" /* replacement */ \
  315. ".previous" :: "i" (feature) : "memory")
  316. /*
  317. * Alternative inline assembly with input.
  318. *
  319. * Pecularities:
  320. * No memory clobber here.
  321. * Argument numbers start with 1.
  322. * Best is to use constraints that are fixed size (like (%1) ... "r")
  323. * If you use variable sized constraints like "m" or "g" in the
  324. * replacement maake sure to pad to the worst case length.
  325. */
  326. #define alternative_input(oldinstr, newinstr, feature, input...) \
  327. asm volatile ("661:\n\t" oldinstr "\n662:\n" \
  328. ".section .altinstructions,\"a\"\n" \
  329. " .align 4\n" \
  330. " .long 661b\n" /* label */ \
  331. " .long 663f\n" /* new instruction */ \
  332. " .byte %c0\n" /* feature bit */ \
  333. " .byte 662b-661b\n" /* sourcelen */ \
  334. " .byte 664f-663f\n" /* replacementlen */ \
  335. ".previous\n" \
  336. ".section .altinstr_replacement,\"ax\"\n" \
  337. "663:\n\t" newinstr "\n664:\n" /* replacement */ \
  338. ".previous" :: "i" (feature), ##input)
  339. /*
  340. * Force strict CPU ordering.
  341. * And yes, this is required on UP too when we're talking
  342. * to devices.
  343. *
  344. * For now, "wmb()" doesn't actually do anything, as all
  345. * Intel CPU's follow what Intel calls a *Processor Order*,
  346. * in which all writes are seen in the program order even
  347. * outside the CPU.
  348. *
  349. * I expect future Intel CPU's to have a weaker ordering,
  350. * but I'd also expect them to finally get their act together
  351. * and add some real memory barriers if so.
  352. *
  353. * Some non intel clones support out of order store. wmb() ceases to be a
  354. * nop for these.
  355. */
  356. /*
  357. * Actually only lfence would be needed for mb() because all stores done
  358. * by the kernel should be already ordered. But keep a full barrier for now.
  359. */
  360. #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  361. #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  362. /**
  363. * read_barrier_depends - Flush all pending reads that subsequents reads
  364. * depend on.
  365. *
  366. * No data-dependent reads from memory-like regions are ever reordered
  367. * over this barrier. All reads preceding this primitive are guaranteed
  368. * to access memory (but not necessarily other CPUs' caches) before any
  369. * reads following this primitive that depend on the data return by
  370. * any of the preceding reads. This primitive is much lighter weight than
  371. * rmb() on most CPUs, and is never heavier weight than is
  372. * rmb().
  373. *
  374. * These ordering constraints are respected by both the local CPU
  375. * and the compiler.
  376. *
  377. * Ordering is not guaranteed by anything other than these primitives,
  378. * not even by data dependencies. See the documentation for
  379. * memory_barrier() for examples and URLs to more information.
  380. *
  381. * For example, the following code would force ordering (the initial
  382. * value of "a" is zero, "b" is one, and "p" is "&a"):
  383. *
  384. * <programlisting>
  385. * CPU 0 CPU 1
  386. *
  387. * b = 2;
  388. * memory_barrier();
  389. * p = &b; q = p;
  390. * read_barrier_depends();
  391. * d = *q;
  392. * </programlisting>
  393. *
  394. * because the read of "*q" depends on the read of "p" and these
  395. * two reads are separated by a read_barrier_depends(). However,
  396. * the following code, with the same initial values for "a" and "b":
  397. *
  398. * <programlisting>
  399. * CPU 0 CPU 1
  400. *
  401. * a = 2;
  402. * memory_barrier();
  403. * b = 3; y = b;
  404. * read_barrier_depends();
  405. * x = a;
  406. * </programlisting>
  407. *
  408. * does not enforce ordering, since there is no data dependency between
  409. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  410. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  411. * in cases like thiswhere there are no data dependencies.
  412. **/
  413. #define read_barrier_depends() do { } while(0)
  414. #ifdef CONFIG_X86_OOSTORE
  415. /* Actually there are no OOO store capable CPUs for now that do SSE,
  416. but make it already an possibility. */
  417. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  418. #else
  419. #define wmb() __asm__ __volatile__ ("": : :"memory")
  420. #endif
  421. #ifdef CONFIG_SMP
  422. #define smp_mb() mb()
  423. #define smp_rmb() rmb()
  424. #define smp_wmb() wmb()
  425. #define smp_read_barrier_depends() read_barrier_depends()
  426. #define set_mb(var, value) do { xchg(&var, value); } while (0)
  427. #else
  428. #define smp_mb() barrier()
  429. #define smp_rmb() barrier()
  430. #define smp_wmb() barrier()
  431. #define smp_read_barrier_depends() do { } while(0)
  432. #define set_mb(var, value) do { var = value; barrier(); } while (0)
  433. #endif
  434. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  435. /* interrupt control.. */
  436. #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
  437. #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
  438. #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
  439. #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
  440. /* used in the idle loop; sti takes one instruction cycle to complete */
  441. #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
  442. /* used when interrupts are already enabled or to shutdown the processor */
  443. #define halt() __asm__ __volatile__("hlt": : :"memory")
  444. #define irqs_disabled() \
  445. ({ \
  446. unsigned long flags; \
  447. local_save_flags(flags); \
  448. !(flags & (1<<9)); \
  449. })
  450. /* For spinlocks etc */
  451. #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
  452. /*
  453. * disable hlt during certain critical i/o operations
  454. */
  455. #define HAVE_DISABLE_HLT
  456. void disable_hlt(void);
  457. void enable_hlt(void);
  458. extern int es7000_plat;
  459. void cpu_idle_wait(void);
  460. extern unsigned long arch_align_stack(unsigned long sp);
  461. #endif