tlbflush.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef _ASM_IA64_TLBFLUSH_H
  2. #define _ASM_IA64_TLBFLUSH_H
  3. /*
  4. * Copyright (C) 2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/config.h>
  8. #include <linux/mm.h>
  9. #include <asm/intrinsics.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/page.h>
  12. /*
  13. * Now for some TLB flushing routines. This is the kind of stuff that
  14. * can be very expensive, so try to avoid them whenever possible.
  15. */
  16. /*
  17. * Flush everything (kernel mapping may also have changed due to
  18. * vmalloc/vfree).
  19. */
  20. extern void local_flush_tlb_all (void);
  21. #ifdef CONFIG_SMP
  22. extern void smp_flush_tlb_all (void);
  23. extern void smp_flush_tlb_mm (struct mm_struct *mm);
  24. # define flush_tlb_all() smp_flush_tlb_all()
  25. #else
  26. # define flush_tlb_all() local_flush_tlb_all()
  27. #endif
  28. static inline void
  29. local_finish_flush_tlb_mm (struct mm_struct *mm)
  30. {
  31. if (mm == current->active_mm)
  32. activate_context(mm);
  33. }
  34. /*
  35. * Flush a specified user mapping. This is called, e.g., as a result of fork() and
  36. * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
  37. * the PTEs of the parent task.
  38. */
  39. static inline void
  40. flush_tlb_mm (struct mm_struct *mm)
  41. {
  42. if (!mm)
  43. return;
  44. set_bit(mm->context, ia64_ctx.flushmap);
  45. mm->context = 0;
  46. if (atomic_read(&mm->mm_users) == 0)
  47. return; /* happens as a result of exit_mmap() */
  48. #ifdef CONFIG_SMP
  49. smp_flush_tlb_mm(mm);
  50. #else
  51. local_finish_flush_tlb_mm(mm);
  52. #endif
  53. }
  54. extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
  55. /*
  56. * Page-granular tlb flush.
  57. */
  58. static inline void
  59. flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
  60. {
  61. #ifdef CONFIG_SMP
  62. flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
  63. #else
  64. if (vma->vm_mm == current->active_mm)
  65. ia64_ptcl(addr, (PAGE_SHIFT << 2));
  66. else
  67. vma->vm_mm->context = 0;
  68. #endif
  69. }
  70. /*
  71. * Flush the TLB entries mapping the virtually mapped linear page
  72. * table corresponding to address range [START-END).
  73. */
  74. static inline void
  75. flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
  76. {
  77. /*
  78. * Deprecated. The virtual page table is now flushed via the normal gather/flush
  79. * interface (see tlb.h).
  80. */
  81. }
  82. #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
  83. #endif /* _ASM_IA64_TLBFLUSH_H */