tlb.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * TLB Management (flush/create/diagnostics) for ARC700
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <asm/arcregs.h>
  12. #include <asm/mmu_context.h>
  13. #include <asm/tlb.h>
  14. /* A copy of the ASID from the PID reg is kept in asid_cache */
  15. int asid_cache = FIRST_ASID;
  16. /* ASID to mm struct mapping. We have one extra entry corresponding to
  17. * NO_ASID to save us a compare when clearing the mm entry for old asid
  18. * see get_new_mmu_context (asm-arc/mmu_context.h)
  19. */
  20. struct mm_struct *asid_mm_map[NUM_ASID + 1];
  21. /*
  22. * Routine to create a TLB entry
  23. */
  24. void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  25. {
  26. unsigned long flags;
  27. unsigned int idx, asid_or_sasid;
  28. unsigned long pd0_flags;
  29. /*
  30. * create_tlb() assumes that current->mm == vma->mm, since
  31. * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
  32. * -completes the lazy write to SASID reg (again valid for curr tsk)
  33. *
  34. * Removing the assumption involves
  35. * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
  36. * -Fix the TLB paranoid debug code to not trigger false negatives.
  37. * -More importantly it makes this handler inconsistent with fast-path
  38. * TLB Refill handler which always deals with "current"
  39. *
  40. * Lets see the use cases when current->mm != vma->mm and we land here
  41. * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
  42. * Here VM wants to pre-install a TLB entry for user stack while
  43. * current->mm still points to pre-execve mm (hence the condition).
  44. * However the stack vaddr is soon relocated (randomization) and
  45. * move_page_tables() tries to undo that TLB entry.
  46. * Thus not creating TLB entry is not any worse.
  47. *
  48. * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
  49. * breakpoint in debugged task. Not creating a TLB now is not
  50. * performance critical.
  51. *
  52. * Both the cases above are not good enough for code churn.
  53. */
  54. if (current->active_mm != vma->vm_mm)
  55. return;
  56. local_irq_save(flags);
  57. tlb_paranoid_check(vma->vm_mm->context.asid, address);
  58. address &= PAGE_MASK;
  59. /* update this PTE credentials */
  60. pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
  61. /* Create HW TLB entry Flags (in PD0) from PTE Flags */
  62. #if (CONFIG_ARC_MMU_VER <= 2)
  63. pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
  64. #else
  65. pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
  66. #endif
  67. /* ASID for this task */
  68. asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
  69. write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
  70. /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
  71. write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
  72. /* First verify if entry for this vaddr+ASID already exists */
  73. write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
  74. idx = read_aux_reg(ARC_REG_TLBINDEX);
  75. /*
  76. * If Not already present get a free slot from MMU.
  77. * Otherwise, Probe would have located the entry and set INDEX Reg
  78. * with existing location. This will cause Write CMD to over-write
  79. * existing entry with new PD0 and PD1
  80. */
  81. if (likely(idx & TLB_LKUP_ERR))
  82. write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
  83. /*
  84. * Commit the Entry to MMU
  85. * It doesnt sound safe to use the TLBWriteNI cmd here
  86. * which doesn't flush uTLBs. I'd rather be safe than sorry.
  87. */
  88. write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
  89. local_irq_restore(flags);
  90. }
  91. /* arch hook called by core VM at the end of handle_mm_fault( ),
  92. * when a new PTE is entered in Page Tables or an existing one
  93. * is modified. We aggresively pre-install a TLB entry
  94. */
  95. void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
  96. pte_t *ptep)
  97. {
  98. create_tlb(vma, vaddress, ptep);
  99. }
  100. /* Read the Cache Build Confuration Registers, Decode them and save into
  101. * the cpuinfo structure for later use.
  102. * No Validation is done here, simply read/convert the BCRs
  103. */
  104. void __init read_decode_mmu_bcr(void)
  105. {
  106. unsigned int tmp;
  107. struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
  108. struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
  109. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  110. tmp = read_aux_reg(ARC_REG_MMU_BCR);
  111. mmu->ver = (tmp >> 24);
  112. if (mmu->ver <= 2) {
  113. mmu2 = (struct bcr_mmu_1_2 *)&tmp;
  114. mmu->pg_sz = PAGE_SIZE;
  115. mmu->sets = 1 << mmu2->sets;
  116. mmu->ways = 1 << mmu2->ways;
  117. mmu->u_dtlb = mmu2->u_dtlb;
  118. mmu->u_itlb = mmu2->u_itlb;
  119. } else {
  120. mmu3 = (struct bcr_mmu_3 *)&tmp;
  121. mmu->pg_sz = 512 << mmu3->pg_sz;
  122. mmu->sets = 1 << mmu3->sets;
  123. mmu->ways = 1 << mmu3->ways;
  124. mmu->u_dtlb = mmu3->u_dtlb;
  125. mmu->u_itlb = mmu3->u_itlb;
  126. }
  127. mmu->num_tlb = mmu->sets * mmu->ways;
  128. }
  129. void __init arc_mmu_init(void)
  130. {
  131. /*
  132. * ASID mgmt data structures are compile time init
  133. * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
  134. */
  135. local_flush_tlb_all();
  136. /* Enable the MMU */
  137. write_aux_reg(ARC_REG_PID, MMU_ENABLE);
  138. }
  139. /*
  140. * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
  141. * The mapping is Column-first.
  142. * --------------------- -----------
  143. * |way0|way1|way2|way3| |way0|way1|
  144. * --------------------- -----------
  145. * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
  146. * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
  147. * ~ ~ ~ ~
  148. * [set127] | 508| 509| 510| 511| | 254| 255|
  149. * --------------------- -----------
  150. * For normal operations we don't(must not) care how above works since
  151. * MMU cmd getIndex(vaddr) abstracts that out.
  152. * However for walking WAYS of a SET, we need to know this
  153. */
  154. #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
  155. /* Handling of Duplicate PD (TLB entry) in MMU.
  156. * -Could be due to buggy customer tapeouts or obscure kernel bugs
  157. * -MMU complaints not at the time of duplicate PD installation, but at the
  158. * time of lookup matching multiple ways.
  159. * -Ideally these should never happen - but if they do - workaround by deleting
  160. * the duplicate one.
  161. * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
  162. */
  163. volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
  164. void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
  165. struct pt_regs *regs)
  166. {
  167. int set, way, n;
  168. unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
  169. unsigned long flags, is_valid;
  170. struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
  171. local_irq_save(flags);
  172. /* re-enable the MMU */
  173. write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
  174. /* loop thru all sets of TLB */
  175. for (set = 0; set < mmu->sets; set++) {
  176. /* read out all the ways of current set */
  177. for (way = 0, is_valid = 0; way < mmu->ways; way++) {
  178. write_aux_reg(ARC_REG_TLBINDEX,
  179. SET_WAY_TO_IDX(mmu, set, way));
  180. write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
  181. pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
  182. pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
  183. is_valid |= pd0[way] & _PAGE_PRESENT;
  184. }
  185. /* If all the WAYS in SET are empty, skip to next SET */
  186. if (!is_valid)
  187. continue;
  188. /* Scan the set for duplicate ways: needs a nested loop */
  189. for (way = 0; way < mmu->ways; way++) {
  190. if (!pd0[way])
  191. continue;
  192. for (n = way + 1; n < mmu->ways; n++) {
  193. if ((pd0[way] & PAGE_MASK) ==
  194. (pd0[n] & PAGE_MASK)) {
  195. if (dup_pd_verbose) {
  196. pr_info("Duplicate PD's @"
  197. "[%d:%d]/[%d:%d]\n",
  198. set, way, set, n);
  199. pr_info("TLBPD0[%u]: %08x\n",
  200. way, pd0[way]);
  201. }
  202. /*
  203. * clear entry @way and not @n. This is
  204. * critical to our optimised loop
  205. */
  206. pd0[way] = pd1[way] = 0;
  207. write_aux_reg(ARC_REG_TLBINDEX,
  208. SET_WAY_TO_IDX(mmu, set, way));
  209. __tlb_entry_erase();
  210. }
  211. }
  212. }
  213. }
  214. local_irq_restore(flags);
  215. }
  216. /***********************************************************************
  217. * Diagnostic Routines
  218. * -Called from Low Level TLB Hanlders if things don;t look good
  219. **********************************************************************/
  220. #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
  221. /*
  222. * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
  223. * don't match
  224. */
  225. void print_asid_mismatch(int is_fast_path)
  226. {
  227. int pid_sw, pid_hw;
  228. pid_sw = current->active_mm->context.asid;
  229. pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
  230. pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
  231. is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
  232. __asm__ __volatile__("flag 1");
  233. }
  234. void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
  235. {
  236. unsigned int pid_hw;
  237. pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
  238. if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
  239. print_asid_mismatch(0);
  240. }
  241. #endif