tlb.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*
  2. * arch/arm/include/asm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Experimentation shows that on a StrongARM, it appears to be faster
  11. * to use the "invalidate whole tlb" rather than "invalidate single
  12. * tlb" for this.
  13. *
  14. * This appears true for both the process fork+exit case, as well as
  15. * the munmap-large-area case.
  16. */
  17. #ifndef __ASMARM_TLB_H
  18. #define __ASMARM_TLB_H
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. #ifndef CONFIG_MMU
  22. #include <linux/pagemap.h>
  23. #include <asm-generic/tlb.h>
  24. #else /* !CONFIG_MMU */
  25. #include <asm/pgalloc.h>
  26. /*
  27. * TLB handling. This allows us to remove pages from the page
  28. * tables, and efficiently handle the TLB issues.
  29. */
  30. struct mmu_gather {
  31. struct mm_struct *mm;
  32. unsigned int fullmm;
  33. unsigned long range_start;
  34. unsigned long range_end;
  35. };
  36. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  37. static inline struct mmu_gather *
  38. tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  39. {
  40. struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  41. tlb->mm = mm;
  42. tlb->fullmm = full_mm_flush;
  43. return tlb;
  44. }
  45. static inline void
  46. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  47. {
  48. if (tlb->fullmm)
  49. flush_tlb_mm(tlb->mm);
  50. /* keep the page table cache within bounds */
  51. check_pgt_cache();
  52. put_cpu_var(mmu_gathers);
  53. }
  54. /*
  55. * Memorize the range for the TLB flush.
  56. */
  57. static inline void
  58. tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  59. {
  60. if (!tlb->fullmm) {
  61. if (addr < tlb->range_start)
  62. tlb->range_start = addr;
  63. if (addr + PAGE_SIZE > tlb->range_end)
  64. tlb->range_end = addr + PAGE_SIZE;
  65. }
  66. }
  67. /*
  68. * In the case of tlb vma handling, we can optimise these away in the
  69. * case where we're doing a full MM flush. When we're doing a munmap,
  70. * the vmas are adjusted to only cover the region to be torn down.
  71. */
  72. static inline void
  73. tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  74. {
  75. if (!tlb->fullmm) {
  76. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  77. tlb->range_start = TASK_SIZE;
  78. tlb->range_end = 0;
  79. }
  80. }
  81. static inline void
  82. tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  83. {
  84. if (!tlb->fullmm && tlb->range_end > 0)
  85. flush_tlb_range(vma, tlb->range_start, tlb->range_end);
  86. }
  87. #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
  88. #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
  89. #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
  90. #define tlb_migrate_finish(mm) do { } while (0)
  91. #endif /* CONFIG_MMU */
  92. #endif