tlb.c 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/tlb.h>
  17. /* Heavily inspired by the ppc64 code. */
  18. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
  19. void flush_tlb_pending(void)
  20. {
  21. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  22. preempt_disable();
  23. if (mp->tlb_nr) {
  24. flush_tsb_user(mp);
  25. if (CTX_VALID(mp->mm->context)) {
  26. #ifdef CONFIG_SMP
  27. smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
  28. &mp->vaddrs[0]);
  29. #else
  30. __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
  31. mp->tlb_nr, &mp->vaddrs[0]);
  32. #endif
  33. }
  34. mp->tlb_nr = 0;
  35. }
  36. preempt_enable();
  37. }
  38. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
  39. {
  40. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  41. unsigned long nr;
  42. vaddr &= PAGE_MASK;
  43. if (pte_exec(orig))
  44. vaddr |= 0x1UL;
  45. if (tlb_type != hypervisor &&
  46. pte_dirty(orig)) {
  47. unsigned long paddr, pfn = pte_pfn(orig);
  48. struct address_space *mapping;
  49. struct page *page;
  50. if (!pfn_valid(pfn))
  51. goto no_cache_flush;
  52. page = pfn_to_page(pfn);
  53. if (PageReserved(page))
  54. goto no_cache_flush;
  55. /* A real file page? */
  56. mapping = page_mapping(page);
  57. if (!mapping)
  58. goto no_cache_flush;
  59. paddr = (unsigned long) page_address(page);
  60. if ((paddr ^ vaddr) & (1 << 13))
  61. flush_dcache_page_all(mm, page);
  62. }
  63. no_cache_flush:
  64. if (mp->fullmm)
  65. return;
  66. nr = mp->tlb_nr;
  67. if (unlikely(nr != 0 && mm != mp->mm)) {
  68. flush_tlb_pending();
  69. nr = 0;
  70. }
  71. if (nr == 0)
  72. mp->mm = mm;
  73. mp->vaddrs[nr] = vaddr;
  74. mp->tlb_nr = ++nr;
  75. if (nr >= TLB_BATCH_NR)
  76. flush_tlb_pending();
  77. }