tlb.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * linux/include/asm-arm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Experimentation shows that on a StrongARM, it appears to be faster
  11. * to use the "invalidate whole tlb" rather than "invalidate single
  12. * tlb" for this.
  13. *
  14. * This appears true for both the process fork+exit case, as well as
  15. * the munmap-large-area case.
  16. */
  17. #ifndef __ASMARM_TLB_H
  18. #define __ASMARM_TLB_H
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm/pgalloc.h>
  22. /*
  23. * TLB handling. This allows us to remove pages from the page
  24. * tables, and efficiently handle the TLB issues.
  25. */
  26. struct mmu_gather {
  27. struct mm_struct *mm;
  28. unsigned int freed;
  29. unsigned int fullmm;
  30. unsigned int flushes;
  31. unsigned int avoided_flushes;
  32. };
  33. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  34. static inline struct mmu_gather *
  35. tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  36. {
  37. int cpu = smp_processor_id();
  38. struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu);
  39. tlb->mm = mm;
  40. tlb->freed = 0;
  41. tlb->fullmm = full_mm_flush;
  42. return tlb;
  43. }
  44. static inline void
  45. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  46. {
  47. struct mm_struct *mm = tlb->mm;
  48. unsigned long freed = tlb->freed;
  49. int rss = get_mm_counter(mm, rss);
  50. if (rss < freed)
  51. freed = rss;
  52. add_mm_counter(mm, rss, -freed);
  53. if (tlb->fullmm)
  54. flush_tlb_mm(mm);
  55. /* keep the page table cache within bounds */
  56. check_pgt_cache();
  57. }
  58. static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb)
  59. {
  60. return tlb->fullmm;
  61. }
  62. #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
  63. /*
  64. * In the case of tlb vma handling, we can optimise these away in the
  65. * case where we're doing a full MM flush. When we're doing a munmap,
  66. * the vmas are adjusted to only cover the region to be torn down.
  67. */
  68. static inline void
  69. tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  70. {
  71. if (!tlb->fullmm)
  72. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  73. }
  74. static inline void
  75. tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  76. {
  77. if (!tlb->fullmm)
  78. flush_tlb_range(vma, vma->vm_start, vma->vm_end);
  79. }
  80. #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
  81. #define pte_free_tlb(tlb,ptep) pte_free(ptep)
  82. #define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
  83. #define tlb_migrate_finish(mm) do { } while (0)
  84. #endif