tlb.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <asm/pgtable.h>
  11. #include <asm/pgalloc.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/tlb.h>
  16. /* Heavily inspired by the ppc64 code. */
  17. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
  18. void flush_tlb_pending(void)
  19. {
  20. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  21. if (mp->tlb_nr) {
  22. if (CTX_VALID(mp->mm->context)) {
  23. #ifdef CONFIG_SMP
  24. smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
  25. &mp->vaddrs[0]);
  26. #else
  27. __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
  28. mp->tlb_nr, &mp->vaddrs[0]);
  29. #endif
  30. }
  31. mp->tlb_nr = 0;
  32. }
  33. }
  34. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
  35. {
  36. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  37. unsigned long nr;
  38. vaddr &= PAGE_MASK;
  39. if (pte_exec(orig))
  40. vaddr |= 0x1UL;
  41. if (pte_dirty(orig)) {
  42. unsigned long paddr, pfn = pte_pfn(orig);
  43. struct address_space *mapping;
  44. struct page *page;
  45. if (!pfn_valid(pfn))
  46. goto no_cache_flush;
  47. page = pfn_to_page(pfn);
  48. if (PageReserved(page))
  49. goto no_cache_flush;
  50. /* A real file page? */
  51. mapping = page_mapping(page);
  52. if (!mapping)
  53. goto no_cache_flush;
  54. paddr = (unsigned long) page_address(page);
  55. if ((paddr ^ vaddr) & (1 << 13))
  56. flush_dcache_page_all(mm, page);
  57. }
  58. no_cache_flush:
  59. if (mp->fullmm)
  60. return;
  61. nr = mp->tlb_nr;
  62. if (unlikely(nr != 0 && mm != mp->mm)) {
  63. flush_tlb_pending();
  64. nr = 0;
  65. }
  66. if (nr == 0)
  67. mp->mm = mm;
  68. mp->vaddrs[nr] = vaddr;
  69. mp->tlb_nr = ++nr;
  70. if (nr >= TLB_BATCH_NR)
  71. flush_tlb_pending();
  72. }
  73. void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
  74. {
  75. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  76. unsigned long nr = mp->tlb_nr;
  77. long s = start, e = end, vpte_base;
  78. if (mp->fullmm)
  79. return;
  80. /* If start is greater than end, that is a real problem. */
  81. BUG_ON(start > end);
  82. /* However, straddling the VA space hole is quite normal. */
  83. s &= PMD_MASK;
  84. e = (e + PMD_SIZE - 1) & PMD_MASK;
  85. vpte_base = (tlb_type == spitfire ?
  86. VPTE_BASE_SPITFIRE :
  87. VPTE_BASE_CHEETAH);
  88. if (unlikely(nr != 0 && mm != mp->mm)) {
  89. flush_tlb_pending();
  90. nr = 0;
  91. }
  92. if (nr == 0)
  93. mp->mm = mm;
  94. start = vpte_base + (s >> (PAGE_SHIFT - 3));
  95. end = vpte_base + (e >> (PAGE_SHIFT - 3));
  96. /* If the request straddles the VA space hole, we
  97. * need to swap start and end. The reason this
  98. * occurs is that "vpte_base" is the center of
  99. * the linear page table mapping area. Thus,
  100. * high addresses with the sign bit set map to
  101. * addresses below vpte_base and non-sign bit
  102. * addresses map to addresses above vpte_base.
  103. */
  104. if (end < start) {
  105. unsigned long tmp = start;
  106. start = end;
  107. end = tmp;
  108. }
  109. while (start < end) {
  110. mp->vaddrs[nr] = start;
  111. mp->tlb_nr = ++nr;
  112. if (nr >= TLB_BATCH_NR) {
  113. flush_tlb_pending();
  114. nr = 0;
  115. }
  116. start += PAGE_SIZE;
  117. }
  118. if (nr)
  119. flush_tlb_pending();
  120. }