system.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. #ifndef __ASM_SYSTEM_H
  2. #define __ASM_SYSTEM_H
  3. #include <linux/kernel.h>
  4. #include <asm/segment.h>
  5. #include <asm/cpufeature.h>
  6. #include <linux/bitops.h> /* for LOCK_PREFIX */
  7. #ifdef __KERNEL__
  8. struct task_struct; /* one of the stranger aspects of C forward declarations.. */
  9. extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
  10. /*
  11. * Saving eflags is important. It switches not only IOPL between tasks,
  12. * it also protects other tasks from NT leaking through sysenter etc.
  13. */
  14. #define switch_to(prev,next,last) do { \
  15. unsigned long esi,edi; \
  16. asm volatile("pushfl\n\t" /* Save flags */ \
  17. "pushl %%ebp\n\t" \
  18. "movl %%esp,%0\n\t" /* save ESP */ \
  19. "movl %5,%%esp\n\t" /* restore ESP */ \
  20. "movl $1f,%1\n\t" /* save EIP */ \
  21. "pushl %6\n\t" /* restore EIP */ \
  22. "jmp __switch_to\n" \
  23. "1:\t" \
  24. "popl %%ebp\n\t" \
  25. "popfl" \
  26. :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
  27. "=a" (last),"=S" (esi),"=D" (edi) \
  28. :"m" (next->thread.esp),"m" (next->thread.eip), \
  29. "2" (prev), "d" (next)); \
  30. } while (0)
  31. #define _set_base(addr,base) do { unsigned long __pr; \
  32. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  33. "rorl $16,%%edx\n\t" \
  34. "movb %%dl,%2\n\t" \
  35. "movb %%dh,%3" \
  36. :"=&d" (__pr) \
  37. :"m" (*((addr)+2)), \
  38. "m" (*((addr)+4)), \
  39. "m" (*((addr)+7)), \
  40. "0" (base) \
  41. ); } while(0)
  42. #define _set_limit(addr,limit) do { unsigned long __lr; \
  43. __asm__ __volatile__ ("movw %%dx,%1\n\t" \
  44. "rorl $16,%%edx\n\t" \
  45. "movb %2,%%dh\n\t" \
  46. "andb $0xf0,%%dh\n\t" \
  47. "orb %%dh,%%dl\n\t" \
  48. "movb %%dl,%2" \
  49. :"=&d" (__lr) \
  50. :"m" (*(addr)), \
  51. "m" (*((addr)+6)), \
  52. "0" (limit) \
  53. ); } while(0)
  54. #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
  55. #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
  56. /*
  57. * Load a segment. Fall back on loading the zero
  58. * segment if something goes wrong..
  59. */
  60. #define loadsegment(seg,value) \
  61. asm volatile("\n" \
  62. "1:\t" \
  63. "mov %0,%%" #seg "\n" \
  64. "2:\n" \
  65. ".section .fixup,\"ax\"\n" \
  66. "3:\t" \
  67. "pushl $0\n\t" \
  68. "popl %%" #seg "\n\t" \
  69. "jmp 2b\n" \
  70. ".previous\n" \
  71. ".section __ex_table,\"a\"\n\t" \
  72. ".align 4\n\t" \
  73. ".long 1b,3b\n" \
  74. ".previous" \
  75. : :"rm" (value))
  76. /*
  77. * Save a segment register away
  78. */
  79. #define savesegment(seg, value) \
  80. asm volatile("mov %%" #seg ",%0":"=rm" (value))
  81. static inline void native_clts(void)
  82. {
  83. asm volatile ("clts");
  84. }
  85. static inline unsigned long native_read_cr0(void)
  86. {
  87. unsigned long val;
  88. asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
  89. return val;
  90. }
  91. static inline void native_write_cr0(unsigned long val)
  92. {
  93. asm volatile("movl %0,%%cr0": :"r" (val));
  94. }
  95. static inline unsigned long native_read_cr2(void)
  96. {
  97. unsigned long val;
  98. asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
  99. return val;
  100. }
  101. static inline void native_write_cr2(unsigned long val)
  102. {
  103. asm volatile("movl %0,%%cr2": :"r" (val));
  104. }
  105. static inline unsigned long native_read_cr3(void)
  106. {
  107. unsigned long val;
  108. asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
  109. return val;
  110. }
  111. static inline void native_write_cr3(unsigned long val)
  112. {
  113. asm volatile("movl %0,%%cr3": :"r" (val));
  114. }
  115. static inline unsigned long native_read_cr4(void)
  116. {
  117. unsigned long val;
  118. asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
  119. return val;
  120. }
  121. static inline unsigned long native_read_cr4_safe(void)
  122. {
  123. unsigned long val;
  124. /* This could fault if %cr4 does not exist */
  125. asm("1: movl %%cr4, %0 \n"
  126. "2: \n"
  127. ".section __ex_table,\"a\" \n"
  128. ".long 1b,2b \n"
  129. ".previous \n"
  130. : "=r" (val): "0" (0));
  131. return val;
  132. }
  133. static inline void native_write_cr4(unsigned long val)
  134. {
  135. asm volatile("movl %0,%%cr4": :"r" (val));
  136. }
  137. static inline void native_wbinvd(void)
  138. {
  139. asm volatile("wbinvd": : :"memory");
  140. }
  141. #ifdef CONFIG_PARAVIRT
  142. #include <asm/paravirt.h>
  143. #else
  144. #define read_cr0() (native_read_cr0())
  145. #define write_cr0(x) (native_write_cr0(x))
  146. #define read_cr2() (native_read_cr2())
  147. #define write_cr2(x) (native_write_cr2(x))
  148. #define read_cr3() (native_read_cr3())
  149. #define write_cr3(x) (native_write_cr3(x))
  150. #define read_cr4() (native_read_cr4())
  151. #define read_cr4_safe() (native_read_cr4_safe())
  152. #define write_cr4(x) (native_write_cr4(x))
  153. #define wbinvd() (native_wbinvd())
  154. /* Clear the 'TS' bit */
  155. #define clts() (native_clts())
  156. #endif/* CONFIG_PARAVIRT */
  157. /* Set the 'TS' bit */
  158. #define stts() write_cr0(8 | read_cr0())
  159. #endif /* __KERNEL__ */
  160. static inline unsigned long get_limit(unsigned long segment)
  161. {
  162. unsigned long __limit;
  163. __asm__("lsll %1,%0"
  164. :"=r" (__limit):"r" (segment));
  165. return __limit+1;
  166. }
  167. #define nop() __asm__ __volatile__ ("nop")
  168. #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  169. struct __xchg_dummy { unsigned long a[100]; };
  170. #define __xg(x) ((struct __xchg_dummy *)(x))
  171. #ifdef CONFIG_X86_CMPXCHG64
  172. /*
  173. * The semantics of XCHGCMP8B are a bit strange, this is why
  174. * there is a loop and the loading of %%eax and %%edx has to
  175. * be inside. This inlines well in most cases, the cached
  176. * cost is around ~38 cycles. (in the future we might want
  177. * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
  178. * might have an implicit FPU-save as a cost, so it's not
  179. * clear which path to go.)
  180. *
  181. * cmpxchg8b must be used with the lock prefix here to allow
  182. * the instruction to be executed atomically, see page 3-102
  183. * of the instruction set reference 24319102.pdf. We need
  184. * the reader side to see the coherent 64bit value.
  185. */
  186. static inline void __set_64bit (unsigned long long * ptr,
  187. unsigned int low, unsigned int high)
  188. {
  189. __asm__ __volatile__ (
  190. "\n1:\t"
  191. "movl (%0), %%eax\n\t"
  192. "movl 4(%0), %%edx\n\t"
  193. "lock cmpxchg8b (%0)\n\t"
  194. "jnz 1b"
  195. : /* no outputs */
  196. : "D"(ptr),
  197. "b"(low),
  198. "c"(high)
  199. : "ax","dx","memory");
  200. }
  201. static inline void __set_64bit_constant (unsigned long long *ptr,
  202. unsigned long long value)
  203. {
  204. __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
  205. }
  206. #define ll_low(x) *(((unsigned int*)&(x))+0)
  207. #define ll_high(x) *(((unsigned int*)&(x))+1)
  208. static inline void __set_64bit_var (unsigned long long *ptr,
  209. unsigned long long value)
  210. {
  211. __set_64bit(ptr,ll_low(value), ll_high(value));
  212. }
  213. #define set_64bit(ptr,value) \
  214. (__builtin_constant_p(value) ? \
  215. __set_64bit_constant(ptr, value) : \
  216. __set_64bit_var(ptr, value) )
  217. #define _set_64bit(ptr,value) \
  218. (__builtin_constant_p(value) ? \
  219. __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
  220. __set_64bit(ptr, ll_low(value), ll_high(value)) )
  221. #endif
  222. /*
  223. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  224. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  225. * but generally the primitive is invalid, *ptr is output argument. --ANK
  226. */
  227. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  228. {
  229. switch (size) {
  230. case 1:
  231. __asm__ __volatile__("xchgb %b0,%1"
  232. :"=q" (x)
  233. :"m" (*__xg(ptr)), "0" (x)
  234. :"memory");
  235. break;
  236. case 2:
  237. __asm__ __volatile__("xchgw %w0,%1"
  238. :"=r" (x)
  239. :"m" (*__xg(ptr)), "0" (x)
  240. :"memory");
  241. break;
  242. case 4:
  243. __asm__ __volatile__("xchgl %0,%1"
  244. :"=r" (x)
  245. :"m" (*__xg(ptr)), "0" (x)
  246. :"memory");
  247. break;
  248. }
  249. return x;
  250. }
  251. /*
  252. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  253. * store NEW in MEM. Return the initial value in MEM. Success is
  254. * indicated by comparing RETURN with OLD.
  255. */
  256. #ifdef CONFIG_X86_CMPXCHG
  257. #define __HAVE_ARCH_CMPXCHG 1
  258. #define cmpxchg(ptr,o,n)\
  259. ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
  260. (unsigned long)(n),sizeof(*(ptr))))
  261. #define sync_cmpxchg(ptr,o,n)\
  262. ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
  263. (unsigned long)(n),sizeof(*(ptr))))
  264. #define cmpxchg_local(ptr,o,n)\
  265. ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
  266. (unsigned long)(n),sizeof(*(ptr))))
  267. #endif
  268. static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  269. unsigned long new, int size)
  270. {
  271. unsigned long prev;
  272. switch (size) {
  273. case 1:
  274. __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  275. : "=a"(prev)
  276. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  277. : "memory");
  278. return prev;
  279. case 2:
  280. __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  281. : "=a"(prev)
  282. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  283. : "memory");
  284. return prev;
  285. case 4:
  286. __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
  287. : "=a"(prev)
  288. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  289. : "memory");
  290. return prev;
  291. }
  292. return old;
  293. }
  294. /*
  295. * Always use locked operations when touching memory shared with a
  296. * hypervisor, since the system may be SMP even if the guest kernel
  297. * isn't.
  298. */
  299. static inline unsigned long __sync_cmpxchg(volatile void *ptr,
  300. unsigned long old,
  301. unsigned long new, int size)
  302. {
  303. unsigned long prev;
  304. switch (size) {
  305. case 1:
  306. __asm__ __volatile__("lock; cmpxchgb %b1,%2"
  307. : "=a"(prev)
  308. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  309. : "memory");
  310. return prev;
  311. case 2:
  312. __asm__ __volatile__("lock; cmpxchgw %w1,%2"
  313. : "=a"(prev)
  314. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  315. : "memory");
  316. return prev;
  317. case 4:
  318. __asm__ __volatile__("lock; cmpxchgl %1,%2"
  319. : "=a"(prev)
  320. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  321. : "memory");
  322. return prev;
  323. }
  324. return old;
  325. }
  326. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  327. unsigned long old, unsigned long new, int size)
  328. {
  329. unsigned long prev;
  330. switch (size) {
  331. case 1:
  332. __asm__ __volatile__("cmpxchgb %b1,%2"
  333. : "=a"(prev)
  334. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  335. : "memory");
  336. return prev;
  337. case 2:
  338. __asm__ __volatile__("cmpxchgw %w1,%2"
  339. : "=a"(prev)
  340. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  341. : "memory");
  342. return prev;
  343. case 4:
  344. __asm__ __volatile__("cmpxchgl %1,%2"
  345. : "=a"(prev)
  346. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  347. : "memory");
  348. return prev;
  349. }
  350. return old;
  351. }
  352. #ifndef CONFIG_X86_CMPXCHG
  353. /*
  354. * Building a kernel capable running on 80386. It may be necessary to
  355. * simulate the cmpxchg on the 80386 CPU. For that purpose we define
  356. * a function for each of the sizes we support.
  357. */
  358. extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
  359. extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
  360. extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
  361. static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
  362. unsigned long new, int size)
  363. {
  364. switch (size) {
  365. case 1:
  366. return cmpxchg_386_u8(ptr, old, new);
  367. case 2:
  368. return cmpxchg_386_u16(ptr, old, new);
  369. case 4:
  370. return cmpxchg_386_u32(ptr, old, new);
  371. }
  372. return old;
  373. }
  374. #define cmpxchg(ptr,o,n) \
  375. ({ \
  376. __typeof__(*(ptr)) __ret; \
  377. if (likely(boot_cpu_data.x86 > 3)) \
  378. __ret = __cmpxchg((ptr), (unsigned long)(o), \
  379. (unsigned long)(n), sizeof(*(ptr))); \
  380. else \
  381. __ret = cmpxchg_386((ptr), (unsigned long)(o), \
  382. (unsigned long)(n), sizeof(*(ptr))); \
  383. __ret; \
  384. })
  385. #define cmpxchg_local(ptr,o,n) \
  386. ({ \
  387. __typeof__(*(ptr)) __ret; \
  388. if (likely(boot_cpu_data.x86 > 3)) \
  389. __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
  390. (unsigned long)(n), sizeof(*(ptr))); \
  391. else \
  392. __ret = cmpxchg_386((ptr), (unsigned long)(o), \
  393. (unsigned long)(n), sizeof(*(ptr))); \
  394. __ret; \
  395. })
  396. #endif
  397. #ifdef CONFIG_X86_CMPXCHG64
  398. static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
  399. unsigned long long new)
  400. {
  401. unsigned long long prev;
  402. __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
  403. : "=A"(prev)
  404. : "b"((unsigned long)new),
  405. "c"((unsigned long)(new >> 32)),
  406. "m"(*__xg(ptr)),
  407. "0"(old)
  408. : "memory");
  409. return prev;
  410. }
  411. static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
  412. unsigned long long old, unsigned long long new)
  413. {
  414. unsigned long long prev;
  415. __asm__ __volatile__("cmpxchg8b %3"
  416. : "=A"(prev)
  417. : "b"((unsigned long)new),
  418. "c"((unsigned long)(new >> 32)),
  419. "m"(*__xg(ptr)),
  420. "0"(old)
  421. : "memory");
  422. return prev;
  423. }
  424. #define cmpxchg64(ptr,o,n)\
  425. ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
  426. (unsigned long long)(n)))
  427. #define cmpxchg64_local(ptr,o,n)\
  428. ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
  429. (unsigned long long)(n)))
  430. #endif
  431. /*
  432. * Force strict CPU ordering.
  433. * And yes, this is required on UP too when we're talking
  434. * to devices.
  435. *
  436. * For now, "wmb()" doesn't actually do anything, as all
  437. * Intel CPU's follow what Intel calls a *Processor Order*,
  438. * in which all writes are seen in the program order even
  439. * outside the CPU.
  440. *
  441. * I expect future Intel CPU's to have a weaker ordering,
  442. * but I'd also expect them to finally get their act together
  443. * and add some real memory barriers if so.
  444. *
  445. * Some non intel clones support out of order store. wmb() ceases to be a
  446. * nop for these.
  447. */
  448. /*
  449. * Actually only lfence would be needed for mb() because all stores done
  450. * by the kernel should be already ordered. But keep a full barrier for now.
  451. */
  452. #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  453. #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  454. /**
  455. * read_barrier_depends - Flush all pending reads that subsequents reads
  456. * depend on.
  457. *
  458. * No data-dependent reads from memory-like regions are ever reordered
  459. * over this barrier. All reads preceding this primitive are guaranteed
  460. * to access memory (but not necessarily other CPUs' caches) before any
  461. * reads following this primitive that depend on the data return by
  462. * any of the preceding reads. This primitive is much lighter weight than
  463. * rmb() on most CPUs, and is never heavier weight than is
  464. * rmb().
  465. *
  466. * These ordering constraints are respected by both the local CPU
  467. * and the compiler.
  468. *
  469. * Ordering is not guaranteed by anything other than these primitives,
  470. * not even by data dependencies. See the documentation for
  471. * memory_barrier() for examples and URLs to more information.
  472. *
  473. * For example, the following code would force ordering (the initial
  474. * value of "a" is zero, "b" is one, and "p" is "&a"):
  475. *
  476. * <programlisting>
  477. * CPU 0 CPU 1
  478. *
  479. * b = 2;
  480. * memory_barrier();
  481. * p = &b; q = p;
  482. * read_barrier_depends();
  483. * d = *q;
  484. * </programlisting>
  485. *
  486. * because the read of "*q" depends on the read of "p" and these
  487. * two reads are separated by a read_barrier_depends(). However,
  488. * the following code, with the same initial values for "a" and "b":
  489. *
  490. * <programlisting>
  491. * CPU 0 CPU 1
  492. *
  493. * a = 2;
  494. * memory_barrier();
  495. * b = 3; y = b;
  496. * read_barrier_depends();
  497. * x = a;
  498. * </programlisting>
  499. *
  500. * does not enforce ordering, since there is no data dependency between
  501. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  502. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  503. * in cases like this where there are no data dependencies.
  504. **/
  505. #define read_barrier_depends() do { } while(0)
  506. #ifdef CONFIG_X86_OOSTORE
  507. /* Actually there are no OOO store capable CPUs for now that do SSE,
  508. but make it already an possibility. */
  509. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  510. #else
  511. #define wmb() __asm__ __volatile__ ("": : :"memory")
  512. #endif
  513. #ifdef CONFIG_SMP
  514. #define smp_mb() mb()
  515. #define smp_rmb() rmb()
  516. #define smp_wmb() wmb()
  517. #define smp_read_barrier_depends() read_barrier_depends()
  518. #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  519. #else
  520. #define smp_mb() barrier()
  521. #define smp_rmb() barrier()
  522. #define smp_wmb() barrier()
  523. #define smp_read_barrier_depends() do { } while(0)
  524. #define set_mb(var, value) do { var = value; barrier(); } while (0)
  525. #endif
  526. #include <linux/irqflags.h>
  527. /*
  528. * disable hlt during certain critical i/o operations
  529. */
  530. #define HAVE_DISABLE_HLT
  531. void disable_hlt(void);
  532. void enable_hlt(void);
  533. extern int es7000_plat;
  534. void cpu_idle_wait(void);
  535. /*
  536. * On SMP systems, when the scheduler does migration-cost autodetection,
  537. * it needs a way to flush as much of the CPU's caches as possible:
  538. */
  539. static inline void sched_cacheflush(void)
  540. {
  541. wbinvd();
  542. }
  543. extern unsigned long arch_align_stack(unsigned long sp);
  544. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  545. void default_idle(void);
  546. #endif