tlbflush.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #ifndef _ASM_IA64_TLBFLUSH_H
  2. #define _ASM_IA64_TLBFLUSH_H
  3. /*
  4. * Copyright (C) 2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <asm/intrinsics.h>
  9. #include <asm/mmu_context.h>
  10. #include <asm/page.h>
  11. /*
  12. * Now for some TLB flushing routines. This is the kind of stuff that
  13. * can be very expensive, so try to avoid them whenever possible.
  14. */
  15. /*
  16. * Flush everything (kernel mapping may also have changed due to
  17. * vmalloc/vfree).
  18. */
  19. extern void local_flush_tlb_all (void);
  20. #ifdef CONFIG_SMP
  21. extern void smp_flush_tlb_all (void);
  22. extern void smp_flush_tlb_mm (struct mm_struct *mm);
  23. extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
  24. # define flush_tlb_all() smp_flush_tlb_all()
  25. #else
  26. # define flush_tlb_all() local_flush_tlb_all()
  27. # define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
  28. #endif
  29. static inline void
  30. local_finish_flush_tlb_mm (struct mm_struct *mm)
  31. {
  32. if (mm == current->active_mm)
  33. activate_context(mm);
  34. }
  35. /*
  36. * Flush a specified user mapping. This is called, e.g., as a result of fork() and
  37. * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
  38. * the PTEs of the parent task.
  39. */
  40. static inline void
  41. flush_tlb_mm (struct mm_struct *mm)
  42. {
  43. if (!mm)
  44. return;
  45. set_bit(mm->context, ia64_ctx.flushmap);
  46. mm->context = 0;
  47. if (atomic_read(&mm->mm_users) == 0)
  48. return; /* happens as a result of exit_mmap() */
  49. #ifdef CONFIG_SMP
  50. smp_flush_tlb_mm(mm);
  51. #else
  52. local_finish_flush_tlb_mm(mm);
  53. #endif
  54. }
  55. extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
  56. /*
  57. * Page-granular tlb flush.
  58. */
  59. static inline void
  60. flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
  61. {
  62. #ifdef CONFIG_SMP
  63. flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
  64. #else
  65. if (vma->vm_mm == current->active_mm)
  66. ia64_ptcl(addr, (PAGE_SHIFT << 2));
  67. else
  68. vma->vm_mm->context = 0;
  69. #endif
  70. }
  71. /*
  72. * Flush the TLB entries mapping the virtually mapped linear page
  73. * table corresponding to address range [START-END).
  74. */
  75. static inline void
  76. flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
  77. {
  78. /*
  79. * Deprecated. The virtual page table is now flushed via the normal gather/flush
  80. * interface (see tlb.h).
  81. */
  82. }
  83. /*
  84. * Flush the local TLB. Invoked from another cpu using an IPI.
  85. */
  86. #ifdef CONFIG_SMP
  87. void smp_local_flush_tlb(void);
  88. #else
  89. #define smp_local_flush_tlb()
  90. #endif
  91. #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
  92. #endif /* _ASM_IA64_TLBFLUSH_H */