tlb_32.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * This file contains the routines for TLB flushing.
  3. * On machines where the MMU uses a hash table to store virtual to
  4. * physical translations, these routines flush entries from the
  5. * hash table also.
  6. * -- paulus
  7. *
  8. * Derived from arch/ppc/mm/init.c:
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  12. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  13. * Copyright (C) 1996 Paul Mackerras
  14. *
  15. * Derived from "arch/i386/mm/init.c"
  16. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/mm.h>
  26. #include <linux/init.h>
  27. #include <linux/highmem.h>
  28. #include <linux/pagemap.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/tlb.h>
  31. #include "mmu_decl.h"
  32. /*
  33. * Called when unmapping pages to flush entries from the TLB/hash table.
  34. */
  35. void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
  36. {
  37. unsigned long ptephys;
  38. if (Hash != 0) {
  39. ptephys = __pa(ptep) & PAGE_MASK;
  40. flush_hash_pages(mm->context.id, addr, ptephys, 1);
  41. }
  42. }
  43. /*
  44. * Called by ptep_set_access_flags, must flush on CPUs for which the
  45. * DSI handler can't just "fixup" the TLB on a write fault
  46. */
  47. void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
  48. {
  49. if (Hash != 0)
  50. return;
  51. _tlbie(addr);
  52. }
  53. /*
  54. * Called at the end of a mmu_gather operation to make sure the
  55. * TLB flush is completely done.
  56. */
  57. void tlb_flush(struct mmu_gather *tlb)
  58. {
  59. if (Hash == 0) {
  60. /*
  61. * 603 needs to flush the whole TLB here since
  62. * it doesn't use a hash table.
  63. */
  64. _tlbia();
  65. }
  66. }
  67. /*
  68. * TLB flushing:
  69. *
  70. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  71. * - flush_tlb_page(vma, vmaddr) flushes one page
  72. * - flush_tlb_range(vma, start, end) flushes a range of pages
  73. * - flush_tlb_kernel_range(start, end) flushes kernel pages
  74. *
  75. * since the hardware hash table functions as an extension of the
  76. * tlb as far as the linux tables are concerned, flush it too.
  77. * -- Cort
  78. */
  79. /*
  80. * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
  81. * the cache operations on the bus. Hence we need to use an IPI
  82. * to get the other CPU(s) to invalidate their TLBs.
  83. */
  84. #ifdef CONFIG_SMP_750
  85. #define FINISH_FLUSH smp_send_tlb_invalidate(0)
  86. #else
  87. #define FINISH_FLUSH do { } while (0)
  88. #endif
  89. static void flush_range(struct mm_struct *mm, unsigned long start,
  90. unsigned long end)
  91. {
  92. pmd_t *pmd;
  93. unsigned long pmd_end;
  94. int count;
  95. unsigned int ctx = mm->context.id;
  96. if (Hash == 0) {
  97. _tlbia();
  98. return;
  99. }
  100. start &= PAGE_MASK;
  101. if (start >= end)
  102. return;
  103. end = (end - 1) | ~PAGE_MASK;
  104. pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
  105. for (;;) {
  106. pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
  107. if (pmd_end > end)
  108. pmd_end = end;
  109. if (!pmd_none(*pmd)) {
  110. count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
  111. flush_hash_pages(ctx, start, pmd_val(*pmd), count);
  112. }
  113. if (pmd_end == end)
  114. break;
  115. start = pmd_end + 1;
  116. ++pmd;
  117. }
  118. }
  119. /*
  120. * Flush kernel TLB entries in the given range
  121. */
  122. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  123. {
  124. flush_range(&init_mm, start, end);
  125. FINISH_FLUSH;
  126. }
  127. /*
  128. * Flush all the (user) entries for the address space described by mm.
  129. */
  130. void flush_tlb_mm(struct mm_struct *mm)
  131. {
  132. struct vm_area_struct *mp;
  133. if (Hash == 0) {
  134. _tlbia();
  135. return;
  136. }
  137. /*
  138. * It is safe to go down the mm's list of vmas when called
  139. * from dup_mmap, holding mmap_sem. It would also be safe from
  140. * unmap_region or exit_mmap, but not from vmtruncate on SMP -
  141. * but it seems dup_mmap is the only SMP case which gets here.
  142. */
  143. for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
  144. flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
  145. FINISH_FLUSH;
  146. }
  147. void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  148. {
  149. struct mm_struct *mm;
  150. pmd_t *pmd;
  151. if (Hash == 0) {
  152. _tlbie(vmaddr);
  153. return;
  154. }
  155. mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
  156. pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
  157. if (!pmd_none(*pmd))
  158. flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
  159. FINISH_FLUSH;
  160. }
  161. /*
  162. * For each address in the range, find the pte for the address
  163. * and check _PAGE_HASHPTE bit; if it is set, find and destroy
  164. * the corresponding HPTE.
  165. */
  166. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  167. unsigned long end)
  168. {
  169. flush_range(vma->vm_mm, start, end);
  170. FINISH_FLUSH;
  171. }