tlb.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /*
  2. * TLB Management (flush/create/diagnostics) for ARC700
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * vineetg: Aug 2011
  11. * -Reintroduce duplicate PD fixup - some customer chips still have the issue
  12. *
  13. * vineetg: May 2011
  14. * -No need to flush_cache_page( ) for each call to update_mmu_cache()
  15. * some of the LMBench tests improved amazingly
  16. * = page-fault thrice as fast (75 usec to 28 usec)
  17. * = mmap twice as fast (9.6 msec to 4.6 msec),
  18. * = fork (5.3 msec to 3.7 msec)
  19. *
  20. * vineetg: April 2011 :
  21. * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
  22. * helps avoid a shift when preparing PD0 from PTE
  23. *
  24. * vineetg: April 2011 : Preparing for MMU V3
  25. * -MMU v2/v3 BCRs decoded differently
  26. * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
  27. * -tlb_entry_erase( ) can be void
  28. * -local_flush_tlb_range( ):
  29. * = need not "ceil" @end
  30. * = walks MMU only if range spans < 32 entries, as opposed to 256
  31. *
  32. * Vineetg: Sept 10th 2008
  33. * -Changes related to MMU v2 (Rel 4.8)
  34. *
  35. * Vineetg: Aug 29th 2008
  36. * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
  37. * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
  38. * it fails. Thus need to load it with ANY valid value before invoking
  39. * TLBIVUTLB cmd
  40. *
  41. * Vineetg: Aug 21th 2008:
  42. * -Reduced the duration of IRQ lockouts in TLB Flush routines
  43. * -Multiple copies of TLB erase code seperated into a "single" function
  44. * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
  45. * in interrupt-safe region.
  46. *
  47. * Vineetg: April 23rd Bug #93131
  48. * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
  49. * flush is more than the size of TLB itself.
  50. *
  51. * Rahul Trivedi : Codito Technologies 2004
  52. */
  53. #include <linux/module.h>
  54. #include <linux/bug.h>
  55. #include <asm/arcregs.h>
  56. #include <asm/setup.h>
  57. #include <asm/mmu_context.h>
  58. #include <asm/mmu.h>
  59. /* Need for ARC MMU v2
  60. *
  61. * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
  62. * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
  63. * map into same set, there would be contention for the 2 ways causing severe
  64. * Thrashing.
  65. *
  66. * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
  67. * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
  68. * Given this, the thrasing problem should never happen because once the 3
  69. * J-TLB entries are created (even though 3rd will knock out one of the prev
  70. * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
  71. *
  72. * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
  73. * This is a simple design for keeping them in sync. So what do we do?
  74. * The solution which James came up was pretty neat. It utilised the assoc
  75. * of uTLBs by not invalidating always but only when absolutely necessary.
  76. *
  77. * - Existing TLB commands work as before
  78. * - New command (TLBWriteNI) for TLB write without clearing uTLBs
  79. * - New command (TLBIVUTLB) to invalidate uTLBs.
  80. *
  81. * The uTLBs need only be invalidated when pages are being removed from the
  82. * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
  83. * as a result of a miss, the removed entry is still allowed to exist in the
  84. * uTLBs as it is still valid and present in the OS page table. This allows the
  85. * full associativity of the uTLBs to hide the limited associativity of the main
  86. * TLB.
  87. *
  88. * During a miss handler, the new "TLBWriteNI" command is used to load
  89. * entries without clearing the uTLBs.
  90. *
  91. * When the OS page table is updated, TLB entries that may be associated with a
  92. * removed page are removed (flushed) from the TLB using TLBWrite. In this
  93. * circumstance, the uTLBs must also be cleared. This is done by using the
  94. * existing TLBWrite command. An explicit IVUTLB is also required for those
  95. * corner cases when TLBWrite was not executed at all because the corresp
  96. * J-TLB entry got evicted/replaced.
  97. */
  98. /* A copy of the ASID from the PID reg is kept in asid_cache */
  99. DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
  100. /*
  101. * Utility Routine to erase a J-TLB entry
  102. * Caller needs to setup Index Reg (manually or via getIndex)
  103. */
  104. static inline void __tlb_entry_erase(void)
  105. {
  106. write_aux_reg(ARC_REG_TLBPD1, 0);
  107. write_aux_reg(ARC_REG_TLBPD0, 0);
  108. write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
  109. }
  110. static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
  111. {
  112. unsigned int idx;
  113. write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
  114. write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
  115. idx = read_aux_reg(ARC_REG_TLBINDEX);
  116. return idx;
  117. }
  118. static void tlb_entry_erase(unsigned int vaddr_n_asid)
  119. {
  120. unsigned int idx;
  121. /* Locate the TLB entry for this vaddr + ASID */
  122. idx = tlb_entry_lkup(vaddr_n_asid);
  123. /* No error means entry found, zero it out */
  124. if (likely(!(idx & TLB_LKUP_ERR))) {
  125. __tlb_entry_erase();
  126. } else {
  127. /* Duplicate entry error */
  128. WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
  129. vaddr_n_asid);
  130. }
  131. }
  132. /****************************************************************************
  133. * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
  134. *
  135. * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
  136. *
  137. * utlb_invalidate ( )
  138. * -For v2 MMU calls Flush uTLB Cmd
  139. * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
  140. * This is because in v1 TLBWrite itself invalidate uTLBs
  141. ***************************************************************************/
  142. static void utlb_invalidate(void)
  143. {
  144. #if (CONFIG_ARC_MMU_VER >= 2)
  145. #if (CONFIG_ARC_MMU_VER == 2)
  146. /* MMU v2 introduced the uTLB Flush command.
  147. * There was however an obscure hardware bug, where uTLB flush would
  148. * fail when a prior probe for J-TLB (both totally unrelated) would
  149. * return lkup err - because the entry didnt exist in MMU.
  150. * The Workround was to set Index reg with some valid value, prior to
  151. * flush. This was fixed in MMU v3 hence not needed any more
  152. */
  153. unsigned int idx;
  154. /* make sure INDEX Reg is valid */
  155. idx = read_aux_reg(ARC_REG_TLBINDEX);
  156. /* If not write some dummy val */
  157. if (unlikely(idx & TLB_LKUP_ERR))
  158. write_aux_reg(ARC_REG_TLBINDEX, 0xa);
  159. #endif
  160. write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
  161. #endif
  162. }
  163. static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
  164. {
  165. unsigned int idx;
  166. /*
  167. * First verify if entry for this vaddr+ASID already exists
  168. * This also sets up PD0 (vaddr, ASID..) for final commit
  169. */
  170. idx = tlb_entry_lkup(pd0);
  171. /*
  172. * If Not already present get a free slot from MMU.
  173. * Otherwise, Probe would have located the entry and set INDEX Reg
  174. * with existing location. This will cause Write CMD to over-write
  175. * existing entry with new PD0 and PD1
  176. */
  177. if (likely(idx & TLB_LKUP_ERR))
  178. write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
  179. /* setup the other half of TLB entry (pfn, rwx..) */
  180. write_aux_reg(ARC_REG_TLBPD1, pd1);
  181. /*
  182. * Commit the Entry to MMU
  183. * It doesnt sound safe to use the TLBWriteNI cmd here
  184. * which doesn't flush uTLBs. I'd rather be safe than sorry.
  185. */
  186. write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
  187. }
  188. /*
  189. * Un-conditionally (without lookup) erase the entire MMU contents
  190. */
  191. noinline void local_flush_tlb_all(void)
  192. {
  193. unsigned long flags;
  194. unsigned int entry;
  195. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  196. local_irq_save(flags);
  197. /* Load PD0 and PD1 with template for a Blank Entry */
  198. write_aux_reg(ARC_REG_TLBPD1, 0);
  199. write_aux_reg(ARC_REG_TLBPD0, 0);
  200. for (entry = 0; entry < mmu->num_tlb; entry++) {
  201. /* write this entry to the TLB */
  202. write_aux_reg(ARC_REG_TLBINDEX, entry);
  203. write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
  204. }
  205. utlb_invalidate();
  206. local_irq_restore(flags);
  207. }
  208. /*
  209. * Flush the entrie MM for userland. The fastest way is to move to Next ASID
  210. */
  211. noinline void local_flush_tlb_mm(struct mm_struct *mm)
  212. {
  213. /*
  214. * Small optimisation courtesy IA64
  215. * flush_mm called during fork,exit,munmap etc, multiple times as well.
  216. * Only for fork( ) do we need to move parent to a new MMU ctxt,
  217. * all other cases are NOPs, hence this check.
  218. */
  219. if (atomic_read(&mm->mm_users) == 0)
  220. return;
  221. /*
  222. * - Move to a new ASID, but only if the mm is still wired in
  223. * (Android Binder ended up calling this for vma->mm != tsk->mm,
  224. * causing h/w - s/w ASID to get out of sync)
  225. * - Also get_new_mmu_context() new implementation allocates a new
  226. * ASID only if it is not allocated already - so unallocate first
  227. */
  228. destroy_context(mm);
  229. if (current->mm == mm)
  230. get_new_mmu_context(mm);
  231. }
  232. /*
  233. * Flush a Range of TLB entries for userland.
  234. * @start is inclusive, while @end is exclusive
  235. * Difference between this and Kernel Range Flush is
  236. * -Here the fastest way (if range is too large) is to move to next ASID
  237. * without doing any explicit Shootdown
  238. * -In case of kernel Flush, entry has to be shot down explictly
  239. */
  240. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  241. unsigned long end)
  242. {
  243. const unsigned int cpu = smp_processor_id();
  244. unsigned long flags;
  245. /* If range @start to @end is more than 32 TLB entries deep,
  246. * its better to move to a new ASID rather than searching for
  247. * individual entries and then shooting them down
  248. *
  249. * The calc above is rough, doesn't account for unaligned parts,
  250. * since this is heuristics based anyways
  251. */
  252. if (unlikely((end - start) >= PAGE_SIZE * 32)) {
  253. local_flush_tlb_mm(vma->vm_mm);
  254. return;
  255. }
  256. /*
  257. * @start moved to page start: this alone suffices for checking
  258. * loop end condition below, w/o need for aligning @end to end
  259. * e.g. 2000 to 4001 will anyhow loop twice
  260. */
  261. start &= PAGE_MASK;
  262. local_irq_save(flags);
  263. if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
  264. while (start < end) {
  265. tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
  266. start += PAGE_SIZE;
  267. }
  268. }
  269. utlb_invalidate();
  270. local_irq_restore(flags);
  271. }
  272. /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
  273. * @start, @end interpreted as kvaddr
  274. * Interestingly, shared TLB entries can also be flushed using just
  275. * @start,@end alone (interpreted as user vaddr), although technically SASID
  276. * is also needed. However our smart TLbProbe lookup takes care of that.
  277. */
  278. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  279. {
  280. unsigned long flags;
  281. /* exactly same as above, except for TLB entry not taking ASID */
  282. if (unlikely((end - start) >= PAGE_SIZE * 32)) {
  283. local_flush_tlb_all();
  284. return;
  285. }
  286. start &= PAGE_MASK;
  287. local_irq_save(flags);
  288. while (start < end) {
  289. tlb_entry_erase(start);
  290. start += PAGE_SIZE;
  291. }
  292. utlb_invalidate();
  293. local_irq_restore(flags);
  294. }
  295. /*
  296. * Delete TLB entry in MMU for a given page (??? address)
  297. * NOTE One TLB entry contains translation for single PAGE
  298. */
  299. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  300. {
  301. const unsigned int cpu = smp_processor_id();
  302. unsigned long flags;
  303. /* Note that it is critical that interrupts are DISABLED between
  304. * checking the ASID and using it flush the TLB entry
  305. */
  306. local_irq_save(flags);
  307. if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
  308. tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
  309. utlb_invalidate();
  310. }
  311. local_irq_restore(flags);
  312. }
  313. #ifdef CONFIG_SMP
  314. struct tlb_args {
  315. struct vm_area_struct *ta_vma;
  316. unsigned long ta_start;
  317. unsigned long ta_end;
  318. };
  319. static inline void ipi_flush_tlb_page(void *arg)
  320. {
  321. struct tlb_args *ta = arg;
  322. local_flush_tlb_page(ta->ta_vma, ta->ta_start);
  323. }
  324. static inline void ipi_flush_tlb_range(void *arg)
  325. {
  326. struct tlb_args *ta = arg;
  327. local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
  328. }
  329. static inline void ipi_flush_tlb_kernel_range(void *arg)
  330. {
  331. struct tlb_args *ta = (struct tlb_args *)arg;
  332. local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
  333. }
  334. void flush_tlb_all(void)
  335. {
  336. on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
  337. }
  338. void flush_tlb_mm(struct mm_struct *mm)
  339. {
  340. on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
  341. mm, 1);
  342. }
  343. void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  344. {
  345. struct tlb_args ta = {
  346. .ta_vma = vma,
  347. .ta_start = uaddr
  348. };
  349. on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
  350. }
  351. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  352. unsigned long end)
  353. {
  354. struct tlb_args ta = {
  355. .ta_vma = vma,
  356. .ta_start = start,
  357. .ta_end = end
  358. };
  359. on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
  360. }
  361. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  362. {
  363. struct tlb_args ta = {
  364. .ta_start = start,
  365. .ta_end = end
  366. };
  367. on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
  368. }
  369. #endif
  370. /*
  371. * Routine to create a TLB entry
  372. */
  373. void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  374. {
  375. unsigned long flags;
  376. unsigned int asid_or_sasid, rwx;
  377. unsigned long pd0, pd1;
  378. /*
  379. * create_tlb() assumes that current->mm == vma->mm, since
  380. * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
  381. * -completes the lazy write to SASID reg (again valid for curr tsk)
  382. *
  383. * Removing the assumption involves
  384. * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
  385. * -Fix the TLB paranoid debug code to not trigger false negatives.
  386. * -More importantly it makes this handler inconsistent with fast-path
  387. * TLB Refill handler which always deals with "current"
  388. *
  389. * Lets see the use cases when current->mm != vma->mm and we land here
  390. * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
  391. * Here VM wants to pre-install a TLB entry for user stack while
  392. * current->mm still points to pre-execve mm (hence the condition).
  393. * However the stack vaddr is soon relocated (randomization) and
  394. * move_page_tables() tries to undo that TLB entry.
  395. * Thus not creating TLB entry is not any worse.
  396. *
  397. * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
  398. * breakpoint in debugged task. Not creating a TLB now is not
  399. * performance critical.
  400. *
  401. * Both the cases above are not good enough for code churn.
  402. */
  403. if (current->active_mm != vma->vm_mm)
  404. return;
  405. local_irq_save(flags);
  406. tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
  407. address &= PAGE_MASK;
  408. /* update this PTE credentials */
  409. pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
  410. /* Create HW TLB(PD0,PD1) from PTE */
  411. /* ASID for this task */
  412. asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
  413. pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
  414. /*
  415. * ARC MMU provides fully orthogonal access bits for K/U mode,
  416. * however Linux only saves 1 set to save PTE real-estate
  417. * Here we convert 3 PTE bits into 6 MMU bits:
  418. * -Kernel only entries have Kr Kw Kx 0 0 0
  419. * -User entries have mirrored K and U bits
  420. */
  421. rwx = pte_val(*ptep) & PTE_BITS_RWX;
  422. if (pte_val(*ptep) & _PAGE_GLOBAL)
  423. rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
  424. else
  425. rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
  426. pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
  427. tlb_entry_insert(pd0, pd1);
  428. local_irq_restore(flags);
  429. }
  430. /*
  431. * Called at the end of pagefault, for a userspace mapped page
  432. * -pre-install the corresponding TLB entry into MMU
  433. * -Finalize the delayed D-cache flush of kernel mapping of page due to
  434. * flush_dcache_page(), copy_user_page()
  435. *
  436. * Note that flush (when done) involves both WBACK - so physical page is
  437. * in sync as well as INV - so any non-congruent aliases don't remain
  438. */
  439. void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
  440. pte_t *ptep)
  441. {
  442. unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
  443. unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
  444. struct page *page = pfn_to_page(pte_pfn(*ptep));
  445. create_tlb(vma, vaddr, ptep);
  446. if (page == ZERO_PAGE(0)) {
  447. return;
  448. }
  449. /*
  450. * Exec page : Independent of aliasing/page-color considerations,
  451. * since icache doesn't snoop dcache on ARC, any dirty
  452. * K-mapping of a code page needs to be wback+inv so that
  453. * icache fetch by userspace sees code correctly.
  454. * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
  455. * so userspace sees the right data.
  456. * (Avoids the flush for Non-exec + congruent mapping case)
  457. */
  458. if ((vma->vm_flags & VM_EXEC) ||
  459. addr_not_cache_congruent(paddr, vaddr)) {
  460. int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
  461. if (dirty) {
  462. /* wback + inv dcache lines */
  463. __flush_dcache_page(paddr, paddr);
  464. /* invalidate any existing icache lines */
  465. if (vma->vm_flags & VM_EXEC)
  466. __inv_icache_page(paddr, vaddr);
  467. }
  468. }
  469. }
  470. /* Read the Cache Build Confuration Registers, Decode them and save into
  471. * the cpuinfo structure for later use.
  472. * No Validation is done here, simply read/convert the BCRs
  473. */
  474. void read_decode_mmu_bcr(void)
  475. {
  476. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  477. unsigned int tmp;
  478. struct bcr_mmu_1_2 {
  479. #ifdef CONFIG_CPU_BIG_ENDIAN
  480. unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
  481. #else
  482. unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
  483. #endif
  484. } *mmu2;
  485. struct bcr_mmu_3 {
  486. #ifdef CONFIG_CPU_BIG_ENDIAN
  487. unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
  488. u_itlb:4, u_dtlb:4;
  489. #else
  490. unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
  491. ways:4, ver:8;
  492. #endif
  493. } *mmu3;
  494. tmp = read_aux_reg(ARC_REG_MMU_BCR);
  495. mmu->ver = (tmp >> 24);
  496. if (mmu->ver <= 2) {
  497. mmu2 = (struct bcr_mmu_1_2 *)&tmp;
  498. mmu->pg_sz = PAGE_SIZE;
  499. mmu->sets = 1 << mmu2->sets;
  500. mmu->ways = 1 << mmu2->ways;
  501. mmu->u_dtlb = mmu2->u_dtlb;
  502. mmu->u_itlb = mmu2->u_itlb;
  503. } else {
  504. mmu3 = (struct bcr_mmu_3 *)&tmp;
  505. mmu->pg_sz = 512 << mmu3->pg_sz;
  506. mmu->sets = 1 << mmu3->sets;
  507. mmu->ways = 1 << mmu3->ways;
  508. mmu->u_dtlb = mmu3->u_dtlb;
  509. mmu->u_itlb = mmu3->u_itlb;
  510. }
  511. mmu->num_tlb = mmu->sets * mmu->ways;
  512. }
  513. char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
  514. {
  515. int n = 0;
  516. struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
  517. n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
  518. p_mmu->ver, TO_KB(p_mmu->pg_sz));
  519. n += scnprintf(buf + n, len - n,
  520. "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
  521. p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
  522. p_mmu->u_dtlb, p_mmu->u_itlb,
  523. IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : "");
  524. return buf;
  525. }
  526. void arc_mmu_init(void)
  527. {
  528. char str[256];
  529. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  530. printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
  531. /* For efficiency sake, kernel is compile time built for a MMU ver
  532. * This must match the hardware it is running on.
  533. * Linux built for MMU V2, if run on MMU V1 will break down because V1
  534. * hardware doesn't understand cmds such as WriteNI, or IVUTLB
  535. * On the other hand, Linux built for V1 if run on MMU V2 will do
  536. * un-needed workarounds to prevent memcpy thrashing.
  537. * Similarly MMU V3 has new features which won't work on older MMU
  538. */
  539. if (mmu->ver != CONFIG_ARC_MMU_VER) {
  540. panic("MMU ver %d doesn't match kernel built for %d...\n",
  541. mmu->ver, CONFIG_ARC_MMU_VER);
  542. }
  543. if (mmu->pg_sz != PAGE_SIZE)
  544. panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
  545. /* Enable the MMU */
  546. write_aux_reg(ARC_REG_PID, MMU_ENABLE);
  547. /* In smp we use this reg for interrupt 1 scratch */
  548. #ifndef CONFIG_SMP
  549. /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
  550. write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
  551. #endif
  552. }
  553. /*
  554. * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
  555. * The mapping is Column-first.
  556. * --------------------- -----------
  557. * |way0|way1|way2|way3| |way0|way1|
  558. * --------------------- -----------
  559. * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
  560. * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
  561. * ~ ~ ~ ~
  562. * [set127] | 508| 509| 510| 511| | 254| 255|
  563. * --------------------- -----------
  564. * For normal operations we don't(must not) care how above works since
  565. * MMU cmd getIndex(vaddr) abstracts that out.
  566. * However for walking WAYS of a SET, we need to know this
  567. */
  568. #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
  569. /* Handling of Duplicate PD (TLB entry) in MMU.
  570. * -Could be due to buggy customer tapeouts or obscure kernel bugs
  571. * -MMU complaints not at the time of duplicate PD installation, but at the
  572. * time of lookup matching multiple ways.
  573. * -Ideally these should never happen - but if they do - workaround by deleting
  574. * the duplicate one.
  575. * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
  576. */
  577. volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
  578. void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
  579. struct pt_regs *regs)
  580. {
  581. int set, way, n;
  582. unsigned long flags, is_valid;
  583. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  584. unsigned int pd0[mmu->ways], pd1[mmu->ways];
  585. local_irq_save(flags);
  586. /* re-enable the MMU */
  587. write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
  588. /* loop thru all sets of TLB */
  589. for (set = 0; set < mmu->sets; set++) {
  590. /* read out all the ways of current set */
  591. for (way = 0, is_valid = 0; way < mmu->ways; way++) {
  592. write_aux_reg(ARC_REG_TLBINDEX,
  593. SET_WAY_TO_IDX(mmu, set, way));
  594. write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
  595. pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
  596. pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
  597. is_valid |= pd0[way] & _PAGE_PRESENT;
  598. }
  599. /* If all the WAYS in SET are empty, skip to next SET */
  600. if (!is_valid)
  601. continue;
  602. /* Scan the set for duplicate ways: needs a nested loop */
  603. for (way = 0; way < mmu->ways - 1; way++) {
  604. if (!pd0[way])
  605. continue;
  606. for (n = way + 1; n < mmu->ways; n++) {
  607. if ((pd0[way] & PAGE_MASK) ==
  608. (pd0[n] & PAGE_MASK)) {
  609. if (dup_pd_verbose) {
  610. pr_info("Duplicate PD's @"
  611. "[%d:%d]/[%d:%d]\n",
  612. set, way, set, n);
  613. pr_info("TLBPD0[%u]: %08x\n",
  614. way, pd0[way]);
  615. }
  616. /*
  617. * clear entry @way and not @n. This is
  618. * critical to our optimised loop
  619. */
  620. pd0[way] = pd1[way] = 0;
  621. write_aux_reg(ARC_REG_TLBINDEX,
  622. SET_WAY_TO_IDX(mmu, set, way));
  623. __tlb_entry_erase();
  624. }
  625. }
  626. }
  627. }
  628. local_irq_restore(flags);
  629. }
  630. /***********************************************************************
  631. * Diagnostic Routines
  632. * -Called from Low Level TLB Hanlders if things don;t look good
  633. **********************************************************************/
  634. #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
  635. /*
  636. * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
  637. * don't match
  638. */
  639. void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
  640. {
  641. pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
  642. is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
  643. __asm__ __volatile__("flag 1");
  644. }
  645. void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
  646. {
  647. unsigned int mmu_asid;
  648. mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
  649. /*
  650. * At the time of a TLB miss/installation
  651. * - HW version needs to match SW version
  652. * - SW needs to have a valid ASID
  653. */
  654. if (addr < 0x70000000 &&
  655. ((mm_asid == MM_CTXT_NO_ASID) ||
  656. (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
  657. print_asid_mismatch(mm_asid, mmu_asid, 0);
  658. }
  659. #endif