spitfire.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
  2. * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
  3. *
  4. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #ifndef _SPARC64_SPITFIRE_H
  7. #define _SPARC64_SPITFIRE_H
  8. #include <asm/asi.h>
  9. /* The following register addresses are accessible via ASI_DMMU
  10. * and ASI_IMMU, that is there is a distinct and unique copy of
  11. * each these registers for each TLB.
  12. */
  13. #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
  14. #define TLB_SFSR 0x0000000000000018 /* All chips */
  15. #define TSB_REG 0x0000000000000028 /* All chips */
  16. #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
  17. #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
  18. #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
  19. #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
  20. #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
  21. #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
  22. #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
  23. /* These registers only exist as one entity, and are accessed
  24. * via ASI_DMMU only.
  25. */
  26. #define PRIMARY_CONTEXT 0x0000000000000008
  27. #define SECONDARY_CONTEXT 0x0000000000000010
  28. #define DMMU_SFAR 0x0000000000000020
  29. #define VIRT_WATCHPOINT 0x0000000000000038
  30. #define PHYS_WATCHPOINT 0x0000000000000040
  31. #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
  32. #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
  33. #define L1DCACHE_SIZE 0x4000
  34. #ifndef __ASSEMBLY__
  35. enum ultra_tlb_layout {
  36. spitfire = 0,
  37. cheetah = 1,
  38. cheetah_plus = 2,
  39. };
  40. extern enum ultra_tlb_layout tlb_type;
  41. extern int cheetah_pcache_forced_on;
  42. extern void cheetah_enable_pcache(void);
  43. #define sparc64_highest_locked_tlbent() \
  44. (tlb_type == spitfire ? \
  45. SPITFIRE_HIGHEST_LOCKED_TLBENT : \
  46. CHEETAH_HIGHEST_LOCKED_TLBENT)
  47. static __inline__ unsigned long spitfire_get_isfsr(void)
  48. {
  49. unsigned long ret;
  50. __asm__ __volatile__("ldxa [%1] %2, %0"
  51. : "=r" (ret)
  52. : "r" (TLB_SFSR), "i" (ASI_IMMU));
  53. return ret;
  54. }
  55. static __inline__ unsigned long spitfire_get_dsfsr(void)
  56. {
  57. unsigned long ret;
  58. __asm__ __volatile__("ldxa [%1] %2, %0"
  59. : "=r" (ret)
  60. : "r" (TLB_SFSR), "i" (ASI_DMMU));
  61. return ret;
  62. }
  63. static __inline__ unsigned long spitfire_get_sfar(void)
  64. {
  65. unsigned long ret;
  66. __asm__ __volatile__("ldxa [%1] %2, %0"
  67. : "=r" (ret)
  68. : "r" (DMMU_SFAR), "i" (ASI_DMMU));
  69. return ret;
  70. }
  71. static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
  72. {
  73. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  74. "membar #Sync"
  75. : /* no outputs */
  76. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
  77. }
  78. static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
  79. {
  80. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  81. "membar #Sync"
  82. : /* no outputs */
  83. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
  84. }
  85. /* The data cache is write through, so this just invalidates the
  86. * specified line.
  87. */
  88. static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
  89. {
  90. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  91. "membar #Sync"
  92. : /* No outputs */
  93. : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
  94. __asm__ __volatile__ ("membar #Sync" : : : "memory");
  95. }
  96. /* The instruction cache lines are flushed with this, but note that
  97. * this does not flush the pipeline. It is possible for a line to
  98. * get flushed but stale instructions to still be in the pipeline,
  99. * a flush instruction (to any address) is sufficient to handle
  100. * this issue after the line is invalidated.
  101. */
  102. static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
  103. {
  104. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  105. "membar #Sync"
  106. : /* No outputs */
  107. : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
  108. }
  109. static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
  110. {
  111. unsigned long data;
  112. __asm__ __volatile__("ldxa [%1] %2, %0"
  113. : "=r" (data)
  114. : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
  115. /* Clear TTE diag bits. */
  116. data &= ~0x0003fe0000000000UL;
  117. return data;
  118. }
  119. static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
  120. {
  121. unsigned long tag;
  122. __asm__ __volatile__("ldxa [%1] %2, %0"
  123. : "=r" (tag)
  124. : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
  125. return tag;
  126. }
  127. static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
  128. {
  129. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  130. "membar #Sync"
  131. : /* No outputs */
  132. : "r" (data), "r" (entry << 3),
  133. "i" (ASI_DTLB_DATA_ACCESS));
  134. }
  135. static __inline__ unsigned long spitfire_get_itlb_data(int entry)
  136. {
  137. unsigned long data;
  138. __asm__ __volatile__("ldxa [%1] %2, %0"
  139. : "=r" (data)
  140. : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
  141. /* Clear TTE diag bits. */
  142. data &= ~0x0003fe0000000000UL;
  143. return data;
  144. }
  145. static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
  146. {
  147. unsigned long tag;
  148. __asm__ __volatile__("ldxa [%1] %2, %0"
  149. : "=r" (tag)
  150. : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
  151. return tag;
  152. }
  153. static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
  154. {
  155. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  156. "membar #Sync"
  157. : /* No outputs */
  158. : "r" (data), "r" (entry << 3),
  159. "i" (ASI_ITLB_DATA_ACCESS));
  160. }
  161. /* Spitfire hardware assisted TLB flushes. */
  162. /* Context level flushes. */
  163. static __inline__ void spitfire_flush_dtlb_primary_context(void)
  164. {
  165. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  166. "membar #Sync"
  167. : /* No outputs */
  168. : "r" (0x40), "i" (ASI_DMMU_DEMAP));
  169. }
  170. static __inline__ void spitfire_flush_itlb_primary_context(void)
  171. {
  172. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  173. "membar #Sync"
  174. : /* No outputs */
  175. : "r" (0x40), "i" (ASI_IMMU_DEMAP));
  176. }
  177. static __inline__ void spitfire_flush_dtlb_secondary_context(void)
  178. {
  179. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  180. "membar #Sync"
  181. : /* No outputs */
  182. : "r" (0x50), "i" (ASI_DMMU_DEMAP));
  183. }
  184. static __inline__ void spitfire_flush_itlb_secondary_context(void)
  185. {
  186. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  187. "membar #Sync"
  188. : /* No outputs */
  189. : "r" (0x50), "i" (ASI_IMMU_DEMAP));
  190. }
  191. static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
  192. {
  193. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  194. "membar #Sync"
  195. : /* No outputs */
  196. : "r" (0x60), "i" (ASI_DMMU_DEMAP));
  197. }
  198. static __inline__ void spitfire_flush_itlb_nucleus_context(void)
  199. {
  200. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  201. "membar #Sync"
  202. : /* No outputs */
  203. : "r" (0x60), "i" (ASI_IMMU_DEMAP));
  204. }
  205. /* Page level flushes. */
  206. static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
  207. {
  208. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  209. "membar #Sync"
  210. : /* No outputs */
  211. : "r" (page), "i" (ASI_DMMU_DEMAP));
  212. }
  213. static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
  214. {
  215. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  216. "membar #Sync"
  217. : /* No outputs */
  218. : "r" (page), "i" (ASI_IMMU_DEMAP));
  219. }
  220. static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
  221. {
  222. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  223. "membar #Sync"
  224. : /* No outputs */
  225. : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
  226. }
  227. static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
  228. {
  229. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  230. "membar #Sync"
  231. : /* No outputs */
  232. : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
  233. }
  234. static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
  235. {
  236. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  237. "membar #Sync"
  238. : /* No outputs */
  239. : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
  240. }
  241. static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
  242. {
  243. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  244. "membar #Sync"
  245. : /* No outputs */
  246. : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
  247. }
  248. /* Cheetah has "all non-locked" tlb flushes. */
  249. static __inline__ void cheetah_flush_dtlb_all(void)
  250. {
  251. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  252. "membar #Sync"
  253. : /* No outputs */
  254. : "r" (0x80), "i" (ASI_DMMU_DEMAP));
  255. }
  256. static __inline__ void cheetah_flush_itlb_all(void)
  257. {
  258. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  259. "membar #Sync"
  260. : /* No outputs */
  261. : "r" (0x80), "i" (ASI_IMMU_DEMAP));
  262. }
  263. /* Cheetah has a 4-tlb layout so direct access is a bit different.
  264. * The first two TLBs are fully assosciative, hold 16 entries, and are
  265. * used only for locked and >8K sized translations. One exists for
  266. * data accesses and one for instruction accesses.
  267. *
  268. * The third TLB is for data accesses to 8K non-locked translations, is
  269. * 2 way assosciative, and holds 512 entries. The fourth TLB is for
  270. * instruction accesses to 8K non-locked translations, is 2 way
  271. * assosciative, and holds 128 entries.
  272. *
  273. * Cheetah has some bug where bogus data can be returned from
  274. * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
  275. * the problem for me. -DaveM
  276. */
  277. static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
  278. {
  279. unsigned long data;
  280. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  281. "ldxa [%1] %2, %0"
  282. : "=r" (data)
  283. : "r" ((0 << 16) | (entry << 3)),
  284. "i" (ASI_DTLB_DATA_ACCESS));
  285. return data;
  286. }
  287. static __inline__ unsigned long cheetah_get_litlb_data(int entry)
  288. {
  289. unsigned long data;
  290. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  291. "ldxa [%1] %2, %0"
  292. : "=r" (data)
  293. : "r" ((0 << 16) | (entry << 3)),
  294. "i" (ASI_ITLB_DATA_ACCESS));
  295. return data;
  296. }
  297. static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
  298. {
  299. unsigned long tag;
  300. __asm__ __volatile__("ldxa [%1] %2, %0"
  301. : "=r" (tag)
  302. : "r" ((0 << 16) | (entry << 3)),
  303. "i" (ASI_DTLB_TAG_READ));
  304. return tag;
  305. }
  306. static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
  307. {
  308. unsigned long tag;
  309. __asm__ __volatile__("ldxa [%1] %2, %0"
  310. : "=r" (tag)
  311. : "r" ((0 << 16) | (entry << 3)),
  312. "i" (ASI_ITLB_TAG_READ));
  313. return tag;
  314. }
  315. static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
  316. {
  317. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  318. "membar #Sync"
  319. : /* No outputs */
  320. : "r" (data),
  321. "r" ((0 << 16) | (entry << 3)),
  322. "i" (ASI_DTLB_DATA_ACCESS));
  323. }
  324. static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
  325. {
  326. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  327. "membar #Sync"
  328. : /* No outputs */
  329. : "r" (data),
  330. "r" ((0 << 16) | (entry << 3)),
  331. "i" (ASI_ITLB_DATA_ACCESS));
  332. }
  333. static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
  334. {
  335. unsigned long data;
  336. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  337. "ldxa [%1] %2, %0"
  338. : "=r" (data)
  339. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
  340. return data;
  341. }
  342. static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
  343. {
  344. unsigned long tag;
  345. __asm__ __volatile__("ldxa [%1] %2, %0"
  346. : "=r" (tag)
  347. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
  348. return tag;
  349. }
  350. static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
  351. {
  352. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  353. "membar #Sync"
  354. : /* No outputs */
  355. : "r" (data),
  356. "r" ((tlb << 16) | (entry << 3)),
  357. "i" (ASI_DTLB_DATA_ACCESS));
  358. }
  359. static __inline__ unsigned long cheetah_get_itlb_data(int entry)
  360. {
  361. unsigned long data;
  362. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  363. "ldxa [%1] %2, %0"
  364. : "=r" (data)
  365. : "r" ((2 << 16) | (entry << 3)),
  366. "i" (ASI_ITLB_DATA_ACCESS));
  367. return data;
  368. }
  369. static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
  370. {
  371. unsigned long tag;
  372. __asm__ __volatile__("ldxa [%1] %2, %0"
  373. : "=r" (tag)
  374. : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
  375. return tag;
  376. }
  377. static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
  378. {
  379. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  380. "membar #Sync"
  381. : /* No outputs */
  382. : "r" (data), "r" ((2 << 16) | (entry << 3)),
  383. "i" (ASI_ITLB_DATA_ACCESS));
  384. }
  385. #endif /* !(__ASSEMBLY__) */
  386. #endif /* !(_SPARC64_SPITFIRE_H) */