tlbflush.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #ifndef _PARISC_TLBFLUSH_H
  2. #define _PARISC_TLBFLUSH_H
  3. /* TLB flushing routines.... */
  4. #include <linux/config.h>
  5. #include <linux/mm.h>
  6. #include <asm/mmu_context.h>
  7. extern void flush_tlb_all(void);
  8. /*
  9. * flush_tlb_mm()
  10. *
  11. * XXX This code is NOT valid for HP-UX compatibility processes,
  12. * (although it will probably work 99% of the time). HP-UX
  13. * processes are free to play with the space id's and save them
  14. * over long periods of time, etc. so we have to preserve the
  15. * space and just flush the entire tlb. We need to check the
  16. * personality in order to do that, but the personality is not
  17. * currently being set correctly.
  18. *
  19. * Of course, Linux processes could do the same thing, but
  20. * we don't support that (and the compilers, dynamic linker,
  21. * etc. do not do that).
  22. */
  23. static inline void flush_tlb_mm(struct mm_struct *mm)
  24. {
  25. BUG_ON(mm == &init_mm); /* Should never happen */
  26. #ifdef CONFIG_SMP
  27. flush_tlb_all();
  28. #else
  29. if (mm) {
  30. if (mm->context != 0)
  31. free_sid(mm->context);
  32. mm->context = alloc_sid();
  33. if (mm == current->active_mm)
  34. load_context(mm->context);
  35. }
  36. #endif
  37. }
  38. extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
  39. {
  40. }
  41. static inline void flush_tlb_page(struct vm_area_struct *vma,
  42. unsigned long addr)
  43. {
  44. /* For one page, it's not worth testing the split_tlb variable */
  45. mb();
  46. mtsp(vma->vm_mm->context,1);
  47. purge_tlb_start();
  48. pdtlb(addr);
  49. pitlb(addr);
  50. purge_tlb_end();
  51. }
  52. static inline void flush_tlb_range(struct vm_area_struct *vma,
  53. unsigned long start, unsigned long end)
  54. {
  55. unsigned long npages;
  56. npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  57. if (npages >= 512) /* XXX arbitrary, should be tuned */
  58. flush_tlb_all();
  59. else {
  60. mtsp(vma->vm_mm->context,1);
  61. if (split_tlb) {
  62. purge_tlb_start();
  63. while (npages--) {
  64. pdtlb(start);
  65. pitlb(start);
  66. start += PAGE_SIZE;
  67. }
  68. purge_tlb_end();
  69. } else {
  70. purge_tlb_start();
  71. while (npages--) {
  72. pdtlb(start);
  73. start += PAGE_SIZE;
  74. }
  75. purge_tlb_end();
  76. }
  77. }
  78. }
  79. #define flush_tlb_kernel_range(start, end) flush_tlb_all()
  80. #endif