spitfire.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
  2. * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
  3. *
  4. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #ifndef _SPARC64_SPITFIRE_H
  7. #define _SPARC64_SPITFIRE_H
  8. #include <asm/asi.h>
  9. /* The following register addresses are accessible via ASI_DMMU
  10. * and ASI_IMMU, that is there is a distinct and unique copy of
  11. * each these registers for each TLB.
  12. */
  13. #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
  14. #define TLB_SFSR 0x0000000000000018 /* All chips */
  15. #define TSB_REG 0x0000000000000028 /* All chips */
  16. #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
  17. #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
  18. #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
  19. #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
  20. #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
  21. #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
  22. #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
  23. /* These registers only exist as one entity, and are accessed
  24. * via ASI_DMMU only.
  25. */
  26. #define PRIMARY_CONTEXT 0x0000000000000008
  27. #define SECONDARY_CONTEXT 0x0000000000000010
  28. #define DMMU_SFAR 0x0000000000000020
  29. #define VIRT_WATCHPOINT 0x0000000000000038
  30. #define PHYS_WATCHPOINT 0x0000000000000040
  31. #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
  32. #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
  33. #define L1DCACHE_SIZE 0x4000
  34. #ifndef __ASSEMBLY__
  35. enum ultra_tlb_layout {
  36. spitfire = 0,
  37. cheetah = 1,
  38. cheetah_plus = 2,
  39. };
  40. extern enum ultra_tlb_layout tlb_type;
  41. extern int cheetah_pcache_forced_on;
  42. extern void cheetah_enable_pcache(void);
  43. #define sparc64_highest_locked_tlbent() \
  44. (tlb_type == spitfire ? \
  45. SPITFIRE_HIGHEST_LOCKED_TLBENT : \
  46. CHEETAH_HIGHEST_LOCKED_TLBENT)
  47. static __inline__ unsigned long spitfire_get_isfsr(void)
  48. {
  49. unsigned long ret;
  50. __asm__ __volatile__("ldxa [%1] %2, %0"
  51. : "=r" (ret)
  52. : "r" (TLB_SFSR), "i" (ASI_IMMU));
  53. return ret;
  54. }
  55. static __inline__ unsigned long spitfire_get_dsfsr(void)
  56. {
  57. unsigned long ret;
  58. __asm__ __volatile__("ldxa [%1] %2, %0"
  59. : "=r" (ret)
  60. : "r" (TLB_SFSR), "i" (ASI_DMMU));
  61. return ret;
  62. }
  63. static __inline__ unsigned long spitfire_get_sfar(void)
  64. {
  65. unsigned long ret;
  66. __asm__ __volatile__("ldxa [%1] %2, %0"
  67. : "=r" (ret)
  68. : "r" (DMMU_SFAR), "i" (ASI_DMMU));
  69. return ret;
  70. }
  71. static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
  72. {
  73. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  74. "membar #Sync"
  75. : /* no outputs */
  76. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
  77. }
  78. static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
  79. {
  80. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  81. "membar #Sync"
  82. : /* no outputs */
  83. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
  84. }
  85. /* The data cache is write through, so this just invalidates the
  86. * specified line.
  87. */
  88. static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
  89. {
  90. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  91. "membar #Sync"
  92. : /* No outputs */
  93. : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
  94. }
  95. /* The instruction cache lines are flushed with this, but note that
  96. * this does not flush the pipeline. It is possible for a line to
  97. * get flushed but stale instructions to still be in the pipeline,
  98. * a flush instruction (to any address) is sufficient to handle
  99. * this issue after the line is invalidated.
  100. */
  101. static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
  102. {
  103. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  104. "membar #Sync"
  105. : /* No outputs */
  106. : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
  107. }
  108. static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
  109. {
  110. unsigned long data;
  111. __asm__ __volatile__("ldxa [%1] %2, %0"
  112. : "=r" (data)
  113. : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
  114. /* Clear TTE diag bits. */
  115. data &= ~0x0003fe0000000000UL;
  116. return data;
  117. }
  118. static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
  119. {
  120. unsigned long tag;
  121. __asm__ __volatile__("ldxa [%1] %2, %0"
  122. : "=r" (tag)
  123. : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
  124. return tag;
  125. }
  126. static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
  127. {
  128. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  129. "membar #Sync"
  130. : /* No outputs */
  131. : "r" (data), "r" (entry << 3),
  132. "i" (ASI_DTLB_DATA_ACCESS));
  133. }
  134. static __inline__ unsigned long spitfire_get_itlb_data(int entry)
  135. {
  136. unsigned long data;
  137. __asm__ __volatile__("ldxa [%1] %2, %0"
  138. : "=r" (data)
  139. : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
  140. /* Clear TTE diag bits. */
  141. data &= ~0x0003fe0000000000UL;
  142. return data;
  143. }
  144. static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
  145. {
  146. unsigned long tag;
  147. __asm__ __volatile__("ldxa [%1] %2, %0"
  148. : "=r" (tag)
  149. : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
  150. return tag;
  151. }
  152. static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
  153. {
  154. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  155. "membar #Sync"
  156. : /* No outputs */
  157. : "r" (data), "r" (entry << 3),
  158. "i" (ASI_ITLB_DATA_ACCESS));
  159. }
  160. /* Spitfire hardware assisted TLB flushes. */
  161. /* Context level flushes. */
  162. static __inline__ void spitfire_flush_dtlb_primary_context(void)
  163. {
  164. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  165. "membar #Sync"
  166. : /* No outputs */
  167. : "r" (0x40), "i" (ASI_DMMU_DEMAP));
  168. }
  169. static __inline__ void spitfire_flush_itlb_primary_context(void)
  170. {
  171. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  172. "membar #Sync"
  173. : /* No outputs */
  174. : "r" (0x40), "i" (ASI_IMMU_DEMAP));
  175. }
  176. static __inline__ void spitfire_flush_dtlb_secondary_context(void)
  177. {
  178. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  179. "membar #Sync"
  180. : /* No outputs */
  181. : "r" (0x50), "i" (ASI_DMMU_DEMAP));
  182. }
  183. static __inline__ void spitfire_flush_itlb_secondary_context(void)
  184. {
  185. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  186. "membar #Sync"
  187. : /* No outputs */
  188. : "r" (0x50), "i" (ASI_IMMU_DEMAP));
  189. }
  190. static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
  191. {
  192. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  193. "membar #Sync"
  194. : /* No outputs */
  195. : "r" (0x60), "i" (ASI_DMMU_DEMAP));
  196. }
  197. static __inline__ void spitfire_flush_itlb_nucleus_context(void)
  198. {
  199. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  200. "membar #Sync"
  201. : /* No outputs */
  202. : "r" (0x60), "i" (ASI_IMMU_DEMAP));
  203. }
  204. /* Page level flushes. */
  205. static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
  206. {
  207. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  208. "membar #Sync"
  209. : /* No outputs */
  210. : "r" (page), "i" (ASI_DMMU_DEMAP));
  211. }
  212. static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
  213. {
  214. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  215. "membar #Sync"
  216. : /* No outputs */
  217. : "r" (page), "i" (ASI_IMMU_DEMAP));
  218. }
  219. static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
  220. {
  221. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  222. "membar #Sync"
  223. : /* No outputs */
  224. : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
  225. }
  226. static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
  227. {
  228. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  229. "membar #Sync"
  230. : /* No outputs */
  231. : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
  232. }
  233. static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
  234. {
  235. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  236. "membar #Sync"
  237. : /* No outputs */
  238. : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
  239. }
  240. static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
  241. {
  242. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  243. "membar #Sync"
  244. : /* No outputs */
  245. : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
  246. }
  247. /* Cheetah has "all non-locked" tlb flushes. */
  248. static __inline__ void cheetah_flush_dtlb_all(void)
  249. {
  250. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  251. "membar #Sync"
  252. : /* No outputs */
  253. : "r" (0x80), "i" (ASI_DMMU_DEMAP));
  254. }
  255. static __inline__ void cheetah_flush_itlb_all(void)
  256. {
  257. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  258. "membar #Sync"
  259. : /* No outputs */
  260. : "r" (0x80), "i" (ASI_IMMU_DEMAP));
  261. }
  262. /* Cheetah has a 4-tlb layout so direct access is a bit different.
  263. * The first two TLBs are fully assosciative, hold 16 entries, and are
  264. * used only for locked and >8K sized translations. One exists for
  265. * data accesses and one for instruction accesses.
  266. *
  267. * The third TLB is for data accesses to 8K non-locked translations, is
  268. * 2 way assosciative, and holds 512 entries. The fourth TLB is for
  269. * instruction accesses to 8K non-locked translations, is 2 way
  270. * assosciative, and holds 128 entries.
  271. *
  272. * Cheetah has some bug where bogus data can be returned from
  273. * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
  274. * the problem for me. -DaveM
  275. */
  276. static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
  277. {
  278. unsigned long data;
  279. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  280. "ldxa [%1] %2, %0"
  281. : "=r" (data)
  282. : "r" ((0 << 16) | (entry << 3)),
  283. "i" (ASI_DTLB_DATA_ACCESS));
  284. return data;
  285. }
  286. static __inline__ unsigned long cheetah_get_litlb_data(int entry)
  287. {
  288. unsigned long data;
  289. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  290. "ldxa [%1] %2, %0"
  291. : "=r" (data)
  292. : "r" ((0 << 16) | (entry << 3)),
  293. "i" (ASI_ITLB_DATA_ACCESS));
  294. return data;
  295. }
  296. static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
  297. {
  298. unsigned long tag;
  299. __asm__ __volatile__("ldxa [%1] %2, %0"
  300. : "=r" (tag)
  301. : "r" ((0 << 16) | (entry << 3)),
  302. "i" (ASI_DTLB_TAG_READ));
  303. return tag;
  304. }
  305. static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
  306. {
  307. unsigned long tag;
  308. __asm__ __volatile__("ldxa [%1] %2, %0"
  309. : "=r" (tag)
  310. : "r" ((0 << 16) | (entry << 3)),
  311. "i" (ASI_ITLB_TAG_READ));
  312. return tag;
  313. }
  314. static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
  315. {
  316. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  317. "membar #Sync"
  318. : /* No outputs */
  319. : "r" (data),
  320. "r" ((0 << 16) | (entry << 3)),
  321. "i" (ASI_DTLB_DATA_ACCESS));
  322. }
  323. static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
  324. {
  325. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  326. "membar #Sync"
  327. : /* No outputs */
  328. : "r" (data),
  329. "r" ((0 << 16) | (entry << 3)),
  330. "i" (ASI_ITLB_DATA_ACCESS));
  331. }
  332. static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
  333. {
  334. unsigned long data;
  335. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  336. "ldxa [%1] %2, %0"
  337. : "=r" (data)
  338. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
  339. return data;
  340. }
  341. static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
  342. {
  343. unsigned long tag;
  344. __asm__ __volatile__("ldxa [%1] %2, %0"
  345. : "=r" (tag)
  346. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
  347. return tag;
  348. }
  349. static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
  350. {
  351. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  352. "membar #Sync"
  353. : /* No outputs */
  354. : "r" (data),
  355. "r" ((tlb << 16) | (entry << 3)),
  356. "i" (ASI_DTLB_DATA_ACCESS));
  357. }
  358. static __inline__ unsigned long cheetah_get_itlb_data(int entry)
  359. {
  360. unsigned long data;
  361. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  362. "ldxa [%1] %2, %0"
  363. : "=r" (data)
  364. : "r" ((2 << 16) | (entry << 3)),
  365. "i" (ASI_ITLB_DATA_ACCESS));
  366. return data;
  367. }
  368. static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
  369. {
  370. unsigned long tag;
  371. __asm__ __volatile__("ldxa [%1] %2, %0"
  372. : "=r" (tag)
  373. : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
  374. return tag;
  375. }
  376. static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
  377. {
  378. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  379. "membar #Sync"
  380. : /* No outputs */
  381. : "r" (data), "r" ((2 << 16) | (entry << 3)),
  382. "i" (ASI_ITLB_DATA_ACCESS));
  383. }
  384. #endif /* !(__ASSEMBLY__) */
  385. #endif /* !(_SPARC64_SPITFIRE_H) */