tlb.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <asm/pgtable.h>
  11. #include <asm/pgalloc.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/tlb.h>
  16. /* Heavily inspired by the ppc64 code. */
  17. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
  18. { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
  19. void flush_tlb_pending(void)
  20. {
  21. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  22. if (mp->tlb_nr) {
  23. if (CTX_VALID(mp->mm->context)) {
  24. #ifdef CONFIG_SMP
  25. smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
  26. &mp->vaddrs[0]);
  27. #else
  28. __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
  29. mp->tlb_nr, &mp->vaddrs[0]);
  30. #endif
  31. }
  32. mp->tlb_nr = 0;
  33. }
  34. }
  35. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
  36. {
  37. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  38. unsigned long nr;
  39. vaddr &= PAGE_MASK;
  40. if (pte_exec(orig))
  41. vaddr |= 0x1UL;
  42. if (pte_dirty(orig)) {
  43. unsigned long paddr, pfn = pte_pfn(orig);
  44. struct address_space *mapping;
  45. struct page *page;
  46. if (!pfn_valid(pfn))
  47. goto no_cache_flush;
  48. page = pfn_to_page(pfn);
  49. if (PageReserved(page))
  50. goto no_cache_flush;
  51. /* A real file page? */
  52. mapping = page_mapping(page);
  53. if (!mapping)
  54. goto no_cache_flush;
  55. paddr = (unsigned long) page_address(page);
  56. if ((paddr ^ vaddr) & (1 << 13))
  57. flush_dcache_page_all(mm, page);
  58. }
  59. no_cache_flush:
  60. if (mp->tlb_frozen)
  61. return;
  62. nr = mp->tlb_nr;
  63. if (unlikely(nr != 0 && mm != mp->mm)) {
  64. flush_tlb_pending();
  65. nr = 0;
  66. }
  67. if (nr == 0)
  68. mp->mm = mm;
  69. mp->vaddrs[nr] = vaddr;
  70. mp->tlb_nr = ++nr;
  71. if (nr >= TLB_BATCH_NR)
  72. flush_tlb_pending();
  73. }
  74. void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
  75. {
  76. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  77. unsigned long nr = mp->tlb_nr;
  78. long s = start, e = end, vpte_base;
  79. if (mp->tlb_frozen)
  80. return;
  81. /* If start is greater than end, that is a real problem. */
  82. BUG_ON(start > end);
  83. /* However, straddling the VA space hole is quite normal. */
  84. s &= PMD_MASK;
  85. e = (e + PMD_SIZE - 1) & PMD_MASK;
  86. vpte_base = (tlb_type == spitfire ?
  87. VPTE_BASE_SPITFIRE :
  88. VPTE_BASE_CHEETAH);
  89. if (unlikely(nr != 0 && mm != mp->mm)) {
  90. flush_tlb_pending();
  91. nr = 0;
  92. }
  93. if (nr == 0)
  94. mp->mm = mm;
  95. start = vpte_base + (s >> (PAGE_SHIFT - 3));
  96. end = vpte_base + (e >> (PAGE_SHIFT - 3));
  97. /* If the request straddles the VA space hole, we
  98. * need to swap start and end. The reason this
  99. * occurs is that "vpte_base" is the center of
  100. * the linear page table mapping area. Thus,
  101. * high addresses with the sign bit set map to
  102. * addresses below vpte_base and non-sign bit
  103. * addresses map to addresses above vpte_base.
  104. */
  105. if (end < start) {
  106. unsigned long tmp = start;
  107. start = end;
  108. end = tmp;
  109. }
  110. while (start < end) {
  111. mp->vaddrs[nr] = start;
  112. mp->tlb_nr = ++nr;
  113. if (nr >= TLB_BATCH_NR) {
  114. flush_tlb_pending();
  115. nr = 0;
  116. }
  117. start += PAGE_SIZE;
  118. }
  119. if (nr)
  120. flush_tlb_pending();
  121. }