tlb.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * arch/xtensa/mm/mmu.c
  3. *
  4. * Logic that manipulates the Xtensa MMU. Derived from MIPS.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2003 Tensilica Inc.
  11. *
  12. * Joe Taylor
  13. * Chris Zankel <chris@zankel.net>
  14. * Marc Gauthier
  15. */
  16. #include <linux/mm.h>
  17. #include <asm/processor.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/system.h>
  21. #include <asm/cacheflush.h>
  22. static inline void __flush_itlb_all (void)
  23. {
  24. int way, index;
  25. for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
  26. for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
  27. int entry = way + (index << PAGE_SHIFT);
  28. invalidate_itlb_entry_no_isync (entry);
  29. }
  30. }
  31. asm volatile ("isync\n");
  32. }
  33. static inline void __flush_dtlb_all (void)
  34. {
  35. int way, index;
  36. for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
  37. for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
  38. int entry = way + (index << PAGE_SHIFT);
  39. invalidate_dtlb_entry_no_isync (entry);
  40. }
  41. }
  42. asm volatile ("isync\n");
  43. }
  44. void flush_tlb_all (void)
  45. {
  46. __flush_itlb_all();
  47. __flush_dtlb_all();
  48. }
  49. /* If mm is current, we simply assign the current task a new ASID, thus,
  50. * invalidating all previous tlb entries. If mm is someone else's user mapping,
  51. * wie invalidate the context, thus, when that user mapping is swapped in,
  52. * a new context will be assigned to it.
  53. */
  54. void flush_tlb_mm(struct mm_struct *mm)
  55. {
  56. #if 0
  57. printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
  58. #endif
  59. if (mm == current->active_mm) {
  60. int flags;
  61. local_save_flags(flags);
  62. get_new_mmu_context(mm, asid_cache);
  63. set_rasid_register(ASID_INSERT(mm->context));
  64. local_irq_restore(flags);
  65. }
  66. else
  67. mm->context = 0;
  68. }
  69. void flush_tlb_range (struct vm_area_struct *vma,
  70. unsigned long start, unsigned long end)
  71. {
  72. struct mm_struct *mm = vma->vm_mm;
  73. unsigned long flags;
  74. if (mm->context == NO_CONTEXT)
  75. return;
  76. #if 0
  77. printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
  78. (unsigned long)mm->context, start, end);
  79. #endif
  80. local_save_flags(flags);
  81. if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
  82. int oldpid = get_rasid_register();
  83. set_rasid_register (ASID_INSERT(mm->context));
  84. start &= PAGE_MASK;
  85. if (vma->vm_flags & VM_EXEC)
  86. while(start < end) {
  87. invalidate_itlb_mapping(start);
  88. invalidate_dtlb_mapping(start);
  89. start += PAGE_SIZE;
  90. }
  91. else
  92. while(start < end) {
  93. invalidate_dtlb_mapping(start);
  94. start += PAGE_SIZE;
  95. }
  96. set_rasid_register(oldpid);
  97. } else {
  98. get_new_mmu_context(mm, asid_cache);
  99. if (mm == current->active_mm)
  100. set_rasid_register(ASID_INSERT(mm->context));
  101. }
  102. local_irq_restore(flags);
  103. }
  104. void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
  105. {
  106. struct mm_struct* mm = vma->vm_mm;
  107. unsigned long flags;
  108. int oldpid;
  109. #if 0
  110. printk("[tlbpage<%02lx,%08lx>]\n",
  111. (unsigned long)mm->context, page);
  112. #endif
  113. if(mm->context == NO_CONTEXT)
  114. return;
  115. local_save_flags(flags);
  116. oldpid = get_rasid_register();
  117. if (vma->vm_flags & VM_EXEC)
  118. invalidate_itlb_mapping(page);
  119. invalidate_dtlb_mapping(page);
  120. set_rasid_register(oldpid);
  121. local_irq_restore(flags);
  122. #if 0
  123. flush_tlb_all();
  124. return;
  125. #endif
  126. }
  127. #ifdef DEBUG_TLB
  128. #define USE_ITLB 0
  129. #define USE_DTLB 1
  130. struct way_config_t {
  131. int indicies;
  132. int indicies_log2;
  133. int pgsz_log2;
  134. int arf;
  135. };
  136. static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
  137. {
  138. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
  139. XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
  140. XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
  141. XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
  142. },
  143. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
  144. XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
  145. XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
  146. XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
  147. },
  148. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
  149. XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
  150. XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
  151. XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
  152. },
  153. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
  154. XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
  155. XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
  156. XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
  157. },
  158. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
  159. XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
  160. XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
  161. XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
  162. },
  163. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
  164. XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
  165. XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
  166. XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
  167. },
  168. { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
  169. XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
  170. XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
  171. XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
  172. }
  173. };
  174. static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
  175. {
  176. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
  177. XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
  178. XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
  179. XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
  180. },
  181. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
  182. XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
  183. XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
  184. XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
  185. },
  186. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
  187. XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
  188. XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
  189. XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
  190. },
  191. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
  192. XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
  193. XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
  194. XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
  195. },
  196. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
  197. XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
  198. XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
  199. XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
  200. },
  201. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
  202. XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
  203. XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
  204. XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
  205. },
  206. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
  207. XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
  208. XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
  209. XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
  210. },
  211. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
  212. XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
  213. XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
  214. XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
  215. },
  216. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
  217. XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
  218. XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
  219. XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
  220. },
  221. { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
  222. XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
  223. XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
  224. XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
  225. }
  226. };
  227. /* Total number of entries: */
  228. #define ITLB_TOTAL_ENTRIES \
  229. XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
  230. XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
  231. XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
  232. XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
  233. XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
  234. XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
  235. XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
  236. #define DTLB_TOTAL_ENTRIES \
  237. XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
  238. XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
  239. XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
  240. XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
  241. XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
  242. XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
  243. XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
  244. XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
  245. XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
  246. XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
  247. typedef struct {
  248. unsigned va;
  249. unsigned pa;
  250. unsigned char asid;
  251. unsigned char ca;
  252. unsigned char way;
  253. unsigned char index;
  254. unsigned char pgsz_log2; /* 0 .. 32 */
  255. unsigned char type; /* 0=ITLB 1=DTLB */
  256. } tlb_dump_entry_t;
  257. /* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
  258. int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
  259. {
  260. if (a->asid < b->asid) return -1;
  261. if (a->asid > b->asid) return 1;
  262. if (a->va < b->va) return -1;
  263. if (a->va > b->va) return 1;
  264. if (a->pa < b->pa) return -1;
  265. if (a->pa > b->pa) return 1;
  266. if (a->ca < b->ca) return -1;
  267. if (a->ca > b->ca) return 1;
  268. if (a->way < b->way) return -1;
  269. if (a->way > b->way) return 1;
  270. if (a->index < b->index) return -1;
  271. if (a->index > b->index) return 1;
  272. return 0;
  273. }
  274. void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
  275. {
  276. int i, j;
  277. /* Simple O(n*n) sort: */
  278. for (i = 0; i < n-1; i++)
  279. for (j = i+1; j < n; j++)
  280. if (cmp_tlb_dump_info(t+i, t+j) > 0) {
  281. tlb_dump_entry_t tmp = t[i];
  282. t[i] = t[j];
  283. t[j] = tmp;
  284. }
  285. }
  286. static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
  287. static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
  288. static inline char *way_type (int type)
  289. {
  290. return type ? "autorefill" : "non-autorefill";
  291. }
  292. void print_entry (struct way_config_t *way_info,
  293. unsigned int way,
  294. unsigned int index,
  295. unsigned int virtual,
  296. unsigned int translation)
  297. {
  298. char valid_chr;
  299. unsigned int va, pa, asid, ca;
  300. va = virtual &
  301. ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
  302. asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
  303. pa = translation & ~((1 << way_info->pgsz_log2) - 1);
  304. ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
  305. valid_chr = asid ? 'V' : 'I';
  306. /* Compute and incorporate the effect of the index bits on the
  307. * va. It's more useful for kernel debugging, since we always
  308. * want to know the effective va anyway. */
  309. va += index << way_info->pgsz_log2;
  310. printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
  311. way, index, valid_chr, va, pa, asid, ca);
  312. }
  313. void print_itlb_entry (struct way_config_t *way_info, int way, int index)
  314. {
  315. print_entry (way_info, way, index,
  316. read_itlb_virtual (way + (index << way_info->pgsz_log2)),
  317. read_itlb_translation (way + (index << way_info->pgsz_log2)));
  318. }
  319. void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
  320. {
  321. print_entry (way_info, way, index,
  322. read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
  323. read_dtlb_translation (way + (index << way_info->pgsz_log2)));
  324. }
  325. void dump_itlb (void)
  326. {
  327. int way, index;
  328. printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
  329. for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
  330. printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
  331. way, itlb[way].indicies,
  332. itlb[way].pgsz_log2, way_type(itlb[way].arf));
  333. for (index = 0; index < itlb[way].indicies; index++) {
  334. print_itlb_entry(&itlb[way], way, index);
  335. }
  336. }
  337. }
  338. void dump_dtlb (void)
  339. {
  340. int way, index;
  341. printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
  342. for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
  343. printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
  344. way, dtlb[way].indicies,
  345. dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
  346. for (index = 0; index < dtlb[way].indicies; index++) {
  347. print_dtlb_entry(&dtlb[way], way, index);
  348. }
  349. }
  350. }
  351. void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
  352. int entries, int ways, int type, int show_invalid)
  353. {
  354. tlb_dump_entry_t *e = tinfo;
  355. int way, i;
  356. /* Gather all info: */
  357. for (way = 0; way < ways; way++) {
  358. struct way_config_t *cfg = config + way;
  359. for (i = 0; i < cfg->indicies; i++) {
  360. unsigned wayindex = way + (i << cfg->pgsz_log2);
  361. unsigned vv = (type ? read_dtlb_virtual (wayindex)
  362. : read_itlb_virtual (wayindex));
  363. unsigned pp = (type ? read_dtlb_translation (wayindex)
  364. : read_itlb_translation (wayindex));
  365. /* Compute and incorporate the effect of the index bits on the
  366. * va. It's more useful for kernel debugging, since we always
  367. * want to know the effective va anyway. */
  368. e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
  369. e->va += (i << cfg->pgsz_log2);
  370. e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
  371. e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
  372. e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
  373. e->way = way;
  374. e->index = i;
  375. e->pgsz_log2 = cfg->pgsz_log2;
  376. e->type = type;
  377. e++;
  378. }
  379. }
  380. #if 1
  381. /* Sort by ASID and VADDR: */
  382. sort_tlb_dump_info (tinfo, entries);
  383. #endif
  384. /* Display all sorted info: */
  385. printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
  386. for (e = tinfo, i = 0; i < entries; i++, e++) {
  387. #if 0
  388. if (e->asid == 0 && !show_invalid)
  389. continue;
  390. #endif
  391. printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
  392. (e->type ? 'D' : 'I'), e->way, e->index,
  393. e->asid, e->va, e->pa, e->ca,
  394. (1 << (e->pgsz_log2 % 10)),
  395. " kMG"[e->pgsz_log2 / 10]
  396. );
  397. }
  398. }
  399. void dump_tlbs2 (int showinv)
  400. {
  401. dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
  402. dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
  403. }
  404. void dump_all_tlbs (void)
  405. {
  406. dump_tlbs2 (1);
  407. }
  408. void dump_valid_tlbs (void)
  409. {
  410. dump_tlbs2 (0);
  411. }
  412. void dump_tlbs (void)
  413. {
  414. dump_itlb();
  415. dump_dtlb();
  416. }
  417. void dump_cache_tag(int dcache, int idx)
  418. {
  419. int w, i, s, e;
  420. unsigned long tag, index;
  421. unsigned long num_lines, num_ways, cache_size, line_size;
  422. num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
  423. cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
  424. line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
  425. num_lines = cache_size / num_ways;
  426. s = 0; e = num_lines;
  427. if (idx >= 0)
  428. e = (s = idx * line_size) + 1;
  429. for (i = s; i < e; i+= line_size) {
  430. printk("\nline %#08x:", i);
  431. for (w = 0; w < num_ways; w++) {
  432. index = w * num_lines + i;
  433. if (dcache)
  434. __asm__ __volatile__("ldct %0, %1\n\t"
  435. : "=a"(tag) : "a"(index));
  436. else
  437. __asm__ __volatile__("lict %0, %1\n\t"
  438. : "=a"(tag) : "a"(index));
  439. printk(" %#010lx", tag);
  440. }
  441. }
  442. printk ("\n");
  443. }
  444. void dump_icache(int index)
  445. {
  446. unsigned long data, addr;
  447. int w, i;
  448. const unsigned long num_ways = XCHAL_ICACHE_WAYS;
  449. const unsigned long cache_size = XCHAL_ICACHE_SIZE;
  450. const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
  451. const unsigned long num_lines = cache_size / num_ways / line_size;
  452. for (w = 0; w < num_ways; w++) {
  453. printk ("\nWay %d", w);
  454. for (i = 0; i < line_size; i+= 4) {
  455. addr = w * num_lines + index * line_size + i;
  456. __asm__ __volatile__("licw %0, %1\n\t"
  457. : "=a"(data) : "a"(addr));
  458. printk(" %#010lx", data);
  459. }
  460. }
  461. printk ("\n");
  462. }
  463. void dump_cache_tags(void)
  464. {
  465. printk("Instruction cache\n");
  466. dump_cache_tag(0, -1);
  467. printk("Data cache\n");
  468. dump_cache_tag(1, -1);
  469. }
  470. #endif