tlb.c 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/tlb.h>
  17. /* Heavily inspired by the ppc64 code. */
  18. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  19. void flush_tlb_pending(void)
  20. {
  21. struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
  22. if (mp->tlb_nr) {
  23. flush_tsb_user(mp);
  24. if (CTX_VALID(mp->mm->context)) {
  25. #ifdef CONFIG_SMP
  26. smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
  27. &mp->vaddrs[0]);
  28. #else
  29. __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
  30. mp->tlb_nr, &mp->vaddrs[0]);
  31. #endif
  32. }
  33. mp->tlb_nr = 0;
  34. }
  35. put_cpu_var(mmu_gathers);
  36. }
  37. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
  38. {
  39. struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
  40. unsigned long nr;
  41. vaddr &= PAGE_MASK;
  42. if (pte_exec(orig))
  43. vaddr |= 0x1UL;
  44. if (tlb_type != hypervisor &&
  45. pte_dirty(orig)) {
  46. unsigned long paddr, pfn = pte_pfn(orig);
  47. struct address_space *mapping;
  48. struct page *page;
  49. if (!pfn_valid(pfn))
  50. goto no_cache_flush;
  51. page = pfn_to_page(pfn);
  52. if (PageReserved(page))
  53. goto no_cache_flush;
  54. /* A real file page? */
  55. mapping = page_mapping(page);
  56. if (!mapping)
  57. goto no_cache_flush;
  58. paddr = (unsigned long) page_address(page);
  59. if ((paddr ^ vaddr) & (1 << 13))
  60. flush_dcache_page_all(mm, page);
  61. }
  62. no_cache_flush:
  63. if (mp->fullmm)
  64. return;
  65. nr = mp->tlb_nr;
  66. if (unlikely(nr != 0 && mm != mp->mm)) {
  67. flush_tlb_pending();
  68. nr = 0;
  69. }
  70. if (nr == 0)
  71. mp->mm = mm;
  72. mp->vaddrs[nr] = vaddr;
  73. mp->tlb_nr = ++nr;
  74. if (nr >= TLB_BATCH_NR)
  75. flush_tlb_pending();
  76. }