tlb-r4k.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  7. * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8. * Carsten Langgaard, carstenl@mips.com
  9. * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/config.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <asm/cpu.h>
  16. #include <asm/bootinfo.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/system.h>
  20. extern void build_tlb_refill_handler(void);
  21. /* CP0 hazard avoidance. */
  22. #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
  23. "nop; nop; nop; nop; nop; nop;\n\t" \
  24. ".set reorder\n\t")
  25. void local_flush_tlb_all(void)
  26. {
  27. unsigned long flags;
  28. unsigned long old_ctx;
  29. int entry;
  30. local_irq_save(flags);
  31. /* Save old context and create impossible VPN2 value */
  32. old_ctx = read_c0_entryhi();
  33. write_c0_entrylo0(0);
  34. write_c0_entrylo1(0);
  35. entry = read_c0_wired();
  36. /* Blast 'em all away. */
  37. while (entry < current_cpu_data.tlbsize) {
  38. /*
  39. * Make sure all entries differ. If they're not different
  40. * MIPS32 will take revenge ...
  41. */
  42. write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
  43. write_c0_index(entry);
  44. mtc0_tlbw_hazard();
  45. tlb_write_indexed();
  46. entry++;
  47. }
  48. tlbw_use_hazard();
  49. write_c0_entryhi(old_ctx);
  50. local_irq_restore(flags);
  51. }
  52. void local_flush_tlb_mm(struct mm_struct *mm)
  53. {
  54. int cpu = smp_processor_id();
  55. if (cpu_context(cpu, mm) != 0)
  56. drop_mmu_context(mm,cpu);
  57. }
  58. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  59. unsigned long end)
  60. {
  61. struct mm_struct *mm = vma->vm_mm;
  62. int cpu = smp_processor_id();
  63. if (cpu_context(cpu, mm) != 0) {
  64. unsigned long flags;
  65. int size;
  66. local_irq_save(flags);
  67. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  68. size = (size + 1) >> 1;
  69. if (size <= current_cpu_data.tlbsize/2) {
  70. int oldpid = read_c0_entryhi();
  71. int newpid = cpu_asid(cpu, mm);
  72. start &= (PAGE_MASK << 1);
  73. end += ((PAGE_SIZE << 1) - 1);
  74. end &= (PAGE_MASK << 1);
  75. while (start < end) {
  76. int idx;
  77. write_c0_entryhi(start | newpid);
  78. start += (PAGE_SIZE << 1);
  79. mtc0_tlbw_hazard();
  80. tlb_probe();
  81. BARRIER;
  82. idx = read_c0_index();
  83. write_c0_entrylo0(0);
  84. write_c0_entrylo1(0);
  85. if (idx < 0)
  86. continue;
  87. /* Make sure all entries differ. */
  88. write_c0_entryhi(CKSEG0 +
  89. (idx << (PAGE_SHIFT + 1)));
  90. mtc0_tlbw_hazard();
  91. tlb_write_indexed();
  92. }
  93. tlbw_use_hazard();
  94. write_c0_entryhi(oldpid);
  95. } else {
  96. drop_mmu_context(mm, cpu);
  97. }
  98. local_irq_restore(flags);
  99. }
  100. }
  101. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  102. {
  103. unsigned long flags;
  104. int size;
  105. local_irq_save(flags);
  106. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  107. size = (size + 1) >> 1;
  108. if (size <= current_cpu_data.tlbsize / 2) {
  109. int pid = read_c0_entryhi();
  110. start &= (PAGE_MASK << 1);
  111. end += ((PAGE_SIZE << 1) - 1);
  112. end &= (PAGE_MASK << 1);
  113. while (start < end) {
  114. int idx;
  115. write_c0_entryhi(start);
  116. start += (PAGE_SIZE << 1);
  117. mtc0_tlbw_hazard();
  118. tlb_probe();
  119. BARRIER;
  120. idx = read_c0_index();
  121. write_c0_entrylo0(0);
  122. write_c0_entrylo1(0);
  123. if (idx < 0)
  124. continue;
  125. /* Make sure all entries differ. */
  126. write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
  127. mtc0_tlbw_hazard();
  128. tlb_write_indexed();
  129. }
  130. tlbw_use_hazard();
  131. write_c0_entryhi(pid);
  132. } else {
  133. local_flush_tlb_all();
  134. }
  135. local_irq_restore(flags);
  136. }
  137. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  138. {
  139. int cpu = smp_processor_id();
  140. if (cpu_context(cpu, vma->vm_mm) != 0) {
  141. unsigned long flags;
  142. int oldpid, newpid, idx;
  143. newpid = cpu_asid(cpu, vma->vm_mm);
  144. page &= (PAGE_MASK << 1);
  145. local_irq_save(flags);
  146. oldpid = read_c0_entryhi();
  147. write_c0_entryhi(page | newpid);
  148. mtc0_tlbw_hazard();
  149. tlb_probe();
  150. BARRIER;
  151. idx = read_c0_index();
  152. write_c0_entrylo0(0);
  153. write_c0_entrylo1(0);
  154. if (idx < 0)
  155. goto finish;
  156. /* Make sure all entries differ. */
  157. write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
  158. mtc0_tlbw_hazard();
  159. tlb_write_indexed();
  160. tlbw_use_hazard();
  161. finish:
  162. write_c0_entryhi(oldpid);
  163. local_irq_restore(flags);
  164. }
  165. }
  166. /*
  167. * This one is only used for pages with the global bit set so we don't care
  168. * much about the ASID.
  169. */
  170. void local_flush_tlb_one(unsigned long page)
  171. {
  172. unsigned long flags;
  173. int oldpid, idx;
  174. local_irq_save(flags);
  175. page &= (PAGE_MASK << 1);
  176. oldpid = read_c0_entryhi();
  177. write_c0_entryhi(page);
  178. mtc0_tlbw_hazard();
  179. tlb_probe();
  180. BARRIER;
  181. idx = read_c0_index();
  182. write_c0_entrylo0(0);
  183. write_c0_entrylo1(0);
  184. if (idx >= 0) {
  185. /* Make sure all entries differ. */
  186. write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
  187. mtc0_tlbw_hazard();
  188. tlb_write_indexed();
  189. tlbw_use_hazard();
  190. }
  191. write_c0_entryhi(oldpid);
  192. local_irq_restore(flags);
  193. }
  194. /*
  195. * We will need multiple versions of update_mmu_cache(), one that just
  196. * updates the TLB with the new pte(s), and another which also checks
  197. * for the R4k "end of page" hardware bug and does the needy.
  198. */
  199. void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
  200. {
  201. unsigned long flags;
  202. pgd_t *pgdp;
  203. pmd_t *pmdp;
  204. pte_t *ptep;
  205. int idx, pid;
  206. /*
  207. * Handle debugger faulting in for debugee.
  208. */
  209. if (current->active_mm != vma->vm_mm)
  210. return;
  211. pid = read_c0_entryhi() & ASID_MASK;
  212. local_irq_save(flags);
  213. address &= (PAGE_MASK << 1);
  214. write_c0_entryhi(address | pid);
  215. pgdp = pgd_offset(vma->vm_mm, address);
  216. mtc0_tlbw_hazard();
  217. tlb_probe();
  218. BARRIER;
  219. pmdp = pmd_offset(pgdp, address);
  220. idx = read_c0_index();
  221. ptep = pte_offset_map(pmdp, address);
  222. #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
  223. write_c0_entrylo0(ptep->pte_high);
  224. ptep++;
  225. write_c0_entrylo1(ptep->pte_high);
  226. #else
  227. write_c0_entrylo0(pte_val(*ptep++) >> 6);
  228. write_c0_entrylo1(pte_val(*ptep) >> 6);
  229. #endif
  230. write_c0_entryhi(address | pid);
  231. mtc0_tlbw_hazard();
  232. if (idx < 0)
  233. tlb_write_random();
  234. else
  235. tlb_write_indexed();
  236. tlbw_use_hazard();
  237. write_c0_entryhi(pid);
  238. local_irq_restore(flags);
  239. }
  240. #if 0
  241. static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
  242. unsigned long address, pte_t pte)
  243. {
  244. unsigned long flags;
  245. unsigned int asid;
  246. pgd_t *pgdp;
  247. pmd_t *pmdp;
  248. pte_t *ptep;
  249. int idx;
  250. local_irq_save(flags);
  251. address &= (PAGE_MASK << 1);
  252. asid = read_c0_entryhi() & ASID_MASK;
  253. write_c0_entryhi(address | asid);
  254. pgdp = pgd_offset(vma->vm_mm, address);
  255. mtc0_tlbw_hazard();
  256. tlb_probe();
  257. BARRIER;
  258. pmdp = pmd_offset(pgdp, address);
  259. idx = read_c0_index();
  260. ptep = pte_offset_map(pmdp, address);
  261. write_c0_entrylo0(pte_val(*ptep++) >> 6);
  262. write_c0_entrylo1(pte_val(*ptep) >> 6);
  263. mtc0_tlbw_hazard();
  264. if (idx < 0)
  265. tlb_write_random();
  266. else
  267. tlb_write_indexed();
  268. tlbw_use_hazard();
  269. local_irq_restore(flags);
  270. }
  271. #endif
  272. void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
  273. unsigned long entryhi, unsigned long pagemask)
  274. {
  275. unsigned long flags;
  276. unsigned long wired;
  277. unsigned long old_pagemask;
  278. unsigned long old_ctx;
  279. local_irq_save(flags);
  280. /* Save old context and create impossible VPN2 value */
  281. old_ctx = read_c0_entryhi();
  282. old_pagemask = read_c0_pagemask();
  283. wired = read_c0_wired();
  284. write_c0_wired(wired + 1);
  285. write_c0_index(wired);
  286. BARRIER;
  287. write_c0_pagemask(pagemask);
  288. write_c0_entryhi(entryhi);
  289. write_c0_entrylo0(entrylo0);
  290. write_c0_entrylo1(entrylo1);
  291. mtc0_tlbw_hazard();
  292. tlb_write_indexed();
  293. tlbw_use_hazard();
  294. write_c0_entryhi(old_ctx);
  295. BARRIER;
  296. write_c0_pagemask(old_pagemask);
  297. local_flush_tlb_all();
  298. local_irq_restore(flags);
  299. }
  300. /*
  301. * Used for loading TLB entries before trap_init() has started, when we
  302. * don't actually want to add a wired entry which remains throughout the
  303. * lifetime of the system
  304. */
  305. static int temp_tlb_entry __initdata;
  306. __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
  307. unsigned long entryhi, unsigned long pagemask)
  308. {
  309. int ret = 0;
  310. unsigned long flags;
  311. unsigned long wired;
  312. unsigned long old_pagemask;
  313. unsigned long old_ctx;
  314. local_irq_save(flags);
  315. /* Save old context and create impossible VPN2 value */
  316. old_ctx = read_c0_entryhi();
  317. old_pagemask = read_c0_pagemask();
  318. wired = read_c0_wired();
  319. if (--temp_tlb_entry < wired) {
  320. printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
  321. ret = -ENOSPC;
  322. goto out;
  323. }
  324. write_c0_index(temp_tlb_entry);
  325. write_c0_pagemask(pagemask);
  326. write_c0_entryhi(entryhi);
  327. write_c0_entrylo0(entrylo0);
  328. write_c0_entrylo1(entrylo1);
  329. mtc0_tlbw_hazard();
  330. tlb_write_indexed();
  331. tlbw_use_hazard();
  332. write_c0_entryhi(old_ctx);
  333. write_c0_pagemask(old_pagemask);
  334. out:
  335. local_irq_restore(flags);
  336. return ret;
  337. }
  338. static void __init probe_tlb(unsigned long config)
  339. {
  340. struct cpuinfo_mips *c = &current_cpu_data;
  341. unsigned int reg;
  342. /*
  343. * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
  344. * is not supported, we assume R4k style. Cpu probing already figured
  345. * out the number of tlb entries.
  346. */
  347. if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
  348. return;
  349. reg = read_c0_config1();
  350. if (!((config >> 7) & 3))
  351. panic("No TLB present");
  352. c->tlbsize = ((reg >> 25) & 0x3f) + 1;
  353. }
  354. void __init tlb_init(void)
  355. {
  356. unsigned int config = read_c0_config();
  357. /*
  358. * You should never change this register:
  359. * - On R4600 1.7 the tlbp never hits for pages smaller than
  360. * the value in the c0_pagemask register.
  361. * - The entire mm handling assumes the c0_pagemask register to
  362. * be set for 4kb pages.
  363. */
  364. probe_tlb(config);
  365. write_c0_pagemask(PM_DEFAULT_MASK);
  366. write_c0_wired(0);
  367. temp_tlb_entry = current_cpu_data.tlbsize - 1;
  368. local_flush_tlb_all();
  369. build_tlb_refill_handler();
  370. }