spitfire.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
  2. * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
  3. *
  4. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #ifndef _SPARC64_SPITFIRE_H
  7. #define _SPARC64_SPITFIRE_H
  8. #include <asm/asi.h>
  9. /* The following register addresses are accessible via ASI_DMMU
  10. * and ASI_IMMU, that is there is a distinct and unique copy of
  11. * each these registers for each TLB.
  12. */
  13. #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
  14. #define TLB_SFSR 0x0000000000000018 /* All chips */
  15. #define TSB_REG 0x0000000000000028 /* All chips */
  16. #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
  17. #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
  18. #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
  19. #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
  20. #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
  21. #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
  22. #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
  23. /* These registers only exist as one entity, and are accessed
  24. * via ASI_DMMU only.
  25. */
  26. #define PRIMARY_CONTEXT 0x0000000000000008
  27. #define SECONDARY_CONTEXT 0x0000000000000010
  28. #define DMMU_SFAR 0x0000000000000020
  29. #define VIRT_WATCHPOINT 0x0000000000000038
  30. #define PHYS_WATCHPOINT 0x0000000000000040
  31. #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
  32. #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
  33. #define L1DCACHE_SIZE 0x4000
  34. #ifndef __ASSEMBLY__
  35. enum ultra_tlb_layout {
  36. spitfire = 0,
  37. cheetah = 1,
  38. cheetah_plus = 2,
  39. };
  40. extern enum ultra_tlb_layout tlb_type;
  41. #define sparc64_highest_locked_tlbent() \
  42. (tlb_type == spitfire ? \
  43. SPITFIRE_HIGHEST_LOCKED_TLBENT : \
  44. CHEETAH_HIGHEST_LOCKED_TLBENT)
  45. static __inline__ unsigned long spitfire_get_isfsr(void)
  46. {
  47. unsigned long ret;
  48. __asm__ __volatile__("ldxa [%1] %2, %0"
  49. : "=r" (ret)
  50. : "r" (TLB_SFSR), "i" (ASI_IMMU));
  51. return ret;
  52. }
  53. static __inline__ unsigned long spitfire_get_dsfsr(void)
  54. {
  55. unsigned long ret;
  56. __asm__ __volatile__("ldxa [%1] %2, %0"
  57. : "=r" (ret)
  58. : "r" (TLB_SFSR), "i" (ASI_DMMU));
  59. return ret;
  60. }
  61. static __inline__ unsigned long spitfire_get_sfar(void)
  62. {
  63. unsigned long ret;
  64. __asm__ __volatile__("ldxa [%1] %2, %0"
  65. : "=r" (ret)
  66. : "r" (DMMU_SFAR), "i" (ASI_DMMU));
  67. return ret;
  68. }
  69. static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
  70. {
  71. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  72. "membar #Sync"
  73. : /* no outputs */
  74. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
  75. }
  76. static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
  77. {
  78. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  79. "membar #Sync"
  80. : /* no outputs */
  81. : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
  82. }
  83. /* The data cache is write through, so this just invalidates the
  84. * specified line.
  85. */
  86. static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
  87. {
  88. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  89. "membar #Sync"
  90. : /* No outputs */
  91. : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
  92. __asm__ __volatile__ ("membar #Sync" : : : "memory");
  93. }
  94. /* The instruction cache lines are flushed with this, but note that
  95. * this does not flush the pipeline. It is possible for a line to
  96. * get flushed but stale instructions to still be in the pipeline,
  97. * a flush instruction (to any address) is sufficient to handle
  98. * this issue after the line is invalidated.
  99. */
  100. static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
  101. {
  102. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  103. "membar #Sync"
  104. : /* No outputs */
  105. : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
  106. }
  107. static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
  108. {
  109. unsigned long data;
  110. __asm__ __volatile__("ldxa [%1] %2, %0"
  111. : "=r" (data)
  112. : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
  113. /* Clear TTE diag bits. */
  114. data &= ~0x0003fe0000000000UL;
  115. return data;
  116. }
  117. static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
  118. {
  119. unsigned long tag;
  120. __asm__ __volatile__("ldxa [%1] %2, %0"
  121. : "=r" (tag)
  122. : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
  123. return tag;
  124. }
  125. static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
  126. {
  127. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  128. "membar #Sync"
  129. : /* No outputs */
  130. : "r" (data), "r" (entry << 3),
  131. "i" (ASI_DTLB_DATA_ACCESS));
  132. }
  133. static __inline__ unsigned long spitfire_get_itlb_data(int entry)
  134. {
  135. unsigned long data;
  136. __asm__ __volatile__("ldxa [%1] %2, %0"
  137. : "=r" (data)
  138. : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
  139. /* Clear TTE diag bits. */
  140. data &= ~0x0003fe0000000000UL;
  141. return data;
  142. }
  143. static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
  144. {
  145. unsigned long tag;
  146. __asm__ __volatile__("ldxa [%1] %2, %0"
  147. : "=r" (tag)
  148. : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
  149. return tag;
  150. }
  151. static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
  152. {
  153. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  154. "membar #Sync"
  155. : /* No outputs */
  156. : "r" (data), "r" (entry << 3),
  157. "i" (ASI_ITLB_DATA_ACCESS));
  158. }
  159. /* Spitfire hardware assisted TLB flushes. */
  160. /* Context level flushes. */
  161. static __inline__ void spitfire_flush_dtlb_primary_context(void)
  162. {
  163. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  164. "membar #Sync"
  165. : /* No outputs */
  166. : "r" (0x40), "i" (ASI_DMMU_DEMAP));
  167. }
  168. static __inline__ void spitfire_flush_itlb_primary_context(void)
  169. {
  170. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  171. "membar #Sync"
  172. : /* No outputs */
  173. : "r" (0x40), "i" (ASI_IMMU_DEMAP));
  174. }
  175. static __inline__ void spitfire_flush_dtlb_secondary_context(void)
  176. {
  177. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  178. "membar #Sync"
  179. : /* No outputs */
  180. : "r" (0x50), "i" (ASI_DMMU_DEMAP));
  181. }
  182. static __inline__ void spitfire_flush_itlb_secondary_context(void)
  183. {
  184. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  185. "membar #Sync"
  186. : /* No outputs */
  187. : "r" (0x50), "i" (ASI_IMMU_DEMAP));
  188. }
  189. static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
  190. {
  191. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  192. "membar #Sync"
  193. : /* No outputs */
  194. : "r" (0x60), "i" (ASI_DMMU_DEMAP));
  195. }
  196. static __inline__ void spitfire_flush_itlb_nucleus_context(void)
  197. {
  198. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  199. "membar #Sync"
  200. : /* No outputs */
  201. : "r" (0x60), "i" (ASI_IMMU_DEMAP));
  202. }
  203. /* Page level flushes. */
  204. static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
  205. {
  206. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  207. "membar #Sync"
  208. : /* No outputs */
  209. : "r" (page), "i" (ASI_DMMU_DEMAP));
  210. }
  211. static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
  212. {
  213. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  214. "membar #Sync"
  215. : /* No outputs */
  216. : "r" (page), "i" (ASI_IMMU_DEMAP));
  217. }
  218. static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
  219. {
  220. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  221. "membar #Sync"
  222. : /* No outputs */
  223. : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
  224. }
  225. static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
  226. {
  227. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  228. "membar #Sync"
  229. : /* No outputs */
  230. : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
  231. }
  232. static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
  233. {
  234. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  235. "membar #Sync"
  236. : /* No outputs */
  237. : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
  238. }
  239. static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
  240. {
  241. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  242. "membar #Sync"
  243. : /* No outputs */
  244. : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
  245. }
  246. /* Cheetah has "all non-locked" tlb flushes. */
  247. static __inline__ void cheetah_flush_dtlb_all(void)
  248. {
  249. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  250. "membar #Sync"
  251. : /* No outputs */
  252. : "r" (0x80), "i" (ASI_DMMU_DEMAP));
  253. }
  254. static __inline__ void cheetah_flush_itlb_all(void)
  255. {
  256. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  257. "membar #Sync"
  258. : /* No outputs */
  259. : "r" (0x80), "i" (ASI_IMMU_DEMAP));
  260. }
  261. /* Cheetah has a 4-tlb layout so direct access is a bit different.
  262. * The first two TLBs are fully assosciative, hold 16 entries, and are
  263. * used only for locked and >8K sized translations. One exists for
  264. * data accesses and one for instruction accesses.
  265. *
  266. * The third TLB is for data accesses to 8K non-locked translations, is
  267. * 2 way assosciative, and holds 512 entries. The fourth TLB is for
  268. * instruction accesses to 8K non-locked translations, is 2 way
  269. * assosciative, and holds 128 entries.
  270. *
  271. * Cheetah has some bug where bogus data can be returned from
  272. * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
  273. * the problem for me. -DaveM
  274. */
  275. static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
  276. {
  277. unsigned long data;
  278. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  279. "ldxa [%1] %2, %0"
  280. : "=r" (data)
  281. : "r" ((0 << 16) | (entry << 3)),
  282. "i" (ASI_DTLB_DATA_ACCESS));
  283. return data;
  284. }
  285. static __inline__ unsigned long cheetah_get_litlb_data(int entry)
  286. {
  287. unsigned long data;
  288. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  289. "ldxa [%1] %2, %0"
  290. : "=r" (data)
  291. : "r" ((0 << 16) | (entry << 3)),
  292. "i" (ASI_ITLB_DATA_ACCESS));
  293. return data;
  294. }
  295. static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
  296. {
  297. unsigned long tag;
  298. __asm__ __volatile__("ldxa [%1] %2, %0"
  299. : "=r" (tag)
  300. : "r" ((0 << 16) | (entry << 3)),
  301. "i" (ASI_DTLB_TAG_READ));
  302. return tag;
  303. }
  304. static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
  305. {
  306. unsigned long tag;
  307. __asm__ __volatile__("ldxa [%1] %2, %0"
  308. : "=r" (tag)
  309. : "r" ((0 << 16) | (entry << 3)),
  310. "i" (ASI_ITLB_TAG_READ));
  311. return tag;
  312. }
  313. static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
  314. {
  315. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  316. "membar #Sync"
  317. : /* No outputs */
  318. : "r" (data),
  319. "r" ((0 << 16) | (entry << 3)),
  320. "i" (ASI_DTLB_DATA_ACCESS));
  321. }
  322. static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
  323. {
  324. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  325. "membar #Sync"
  326. : /* No outputs */
  327. : "r" (data),
  328. "r" ((0 << 16) | (entry << 3)),
  329. "i" (ASI_ITLB_DATA_ACCESS));
  330. }
  331. static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
  332. {
  333. unsigned long data;
  334. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  335. "ldxa [%1] %2, %0"
  336. : "=r" (data)
  337. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
  338. return data;
  339. }
  340. static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
  341. {
  342. unsigned long tag;
  343. __asm__ __volatile__("ldxa [%1] %2, %0"
  344. : "=r" (tag)
  345. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
  346. return tag;
  347. }
  348. static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
  349. {
  350. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  351. "membar #Sync"
  352. : /* No outputs */
  353. : "r" (data),
  354. "r" ((tlb << 16) | (entry << 3)),
  355. "i" (ASI_DTLB_DATA_ACCESS));
  356. }
  357. static __inline__ unsigned long cheetah_get_itlb_data(int entry)
  358. {
  359. unsigned long data;
  360. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  361. "ldxa [%1] %2, %0"
  362. : "=r" (data)
  363. : "r" ((2 << 16) | (entry << 3)),
  364. "i" (ASI_ITLB_DATA_ACCESS));
  365. return data;
  366. }
  367. static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
  368. {
  369. unsigned long tag;
  370. __asm__ __volatile__("ldxa [%1] %2, %0"
  371. : "=r" (tag)
  372. : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
  373. return tag;
  374. }
  375. static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
  376. {
  377. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  378. "membar #Sync"
  379. : /* No outputs */
  380. : "r" (data), "r" ((2 << 16) | (entry << 3)),
  381. "i" (ASI_ITLB_DATA_ACCESS));
  382. }
  383. #endif /* !(__ASSEMBLY__) */
  384. #endif /* !(_SPARC64_SPITFIRE_H) */