tlbflush.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #ifndef _ASM_IA64_TLBFLUSH_H
  2. #define _ASM_IA64_TLBFLUSH_H
  3. /*
  4. * Copyright (C) 2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <asm/intrinsics.h>
  9. #include <asm/mmu_context.h>
  10. #include <asm/page.h>
  11. /*
  12. * Now for some TLB flushing routines. This is the kind of stuff that
  13. * can be very expensive, so try to avoid them whenever possible.
  14. */
  15. /*
  16. * Flush everything (kernel mapping may also have changed due to
  17. * vmalloc/vfree).
  18. */
  19. extern void local_flush_tlb_all (void);
  20. #ifdef CONFIG_SMP
  21. extern void smp_flush_tlb_all (void);
  22. extern void smp_flush_tlb_mm (struct mm_struct *mm);
  23. # define flush_tlb_all() smp_flush_tlb_all()
  24. #else
  25. # define flush_tlb_all() local_flush_tlb_all()
  26. #endif
  27. static inline void
  28. local_finish_flush_tlb_mm (struct mm_struct *mm)
  29. {
  30. if (mm == current->active_mm)
  31. activate_context(mm);
  32. }
  33. /*
  34. * Flush a specified user mapping. This is called, e.g., as a result of fork() and
  35. * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
  36. * the PTEs of the parent task.
  37. */
  38. static inline void
  39. flush_tlb_mm (struct mm_struct *mm)
  40. {
  41. if (!mm)
  42. return;
  43. set_bit(mm->context, ia64_ctx.flushmap);
  44. mm->context = 0;
  45. if (atomic_read(&mm->mm_users) == 0)
  46. return; /* happens as a result of exit_mmap() */
  47. #ifdef CONFIG_SMP
  48. smp_flush_tlb_mm(mm);
  49. #else
  50. local_finish_flush_tlb_mm(mm);
  51. #endif
  52. }
  53. extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
  54. /*
  55. * Page-granular tlb flush.
  56. */
  57. static inline void
  58. flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
  59. {
  60. #ifdef CONFIG_SMP
  61. flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
  62. #else
  63. if (vma->vm_mm == current->active_mm)
  64. ia64_ptcl(addr, (PAGE_SHIFT << 2));
  65. else
  66. vma->vm_mm->context = 0;
  67. #endif
  68. }
  69. /*
  70. * Flush the TLB entries mapping the virtually mapped linear page
  71. * table corresponding to address range [START-END).
  72. */
  73. static inline void
  74. flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
  75. {
  76. /*
  77. * Deprecated. The virtual page table is now flushed via the normal gather/flush
  78. * interface (see tlb.h).
  79. */
  80. }
  81. #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
  82. #endif /* _ASM_IA64_TLBFLUSH_H */