tlbflush.h 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. #ifndef _PPC64_TLBFLUSH_H
  2. #define _PPC64_TLBFLUSH_H
  3. /*
  4. * TLB flushing:
  5. *
  6. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  7. * - flush_tlb_page(vma, vmaddr) flushes one page
  8. * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  9. * - flush_tlb_range(vma, start, end) flushes a range of pages
  10. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  11. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  12. */
  13. #include <linux/percpu.h>
  14. #include <asm/page.h>
  15. #define PPC64_TLB_BATCH_NR 192
  16. struct mm_struct;
  17. struct ppc64_tlb_batch {
  18. unsigned long index;
  19. unsigned long context;
  20. struct mm_struct *mm;
  21. pte_t pte[PPC64_TLB_BATCH_NR];
  22. unsigned long addr[PPC64_TLB_BATCH_NR];
  23. unsigned long vaddr[PPC64_TLB_BATCH_NR];
  24. };
  25. DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  26. extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  27. static inline void flush_tlb_pending(void)
  28. {
  29. struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
  30. if (batch->index)
  31. __flush_tlb_pending(batch);
  32. put_cpu_var(ppc64_tlb_batch);
  33. }
  34. #define flush_tlb_mm(mm) flush_tlb_pending()
  35. #define flush_tlb_page(vma, addr) flush_tlb_pending()
  36. #define flush_tlb_page_nohash(vma, addr) do { } while (0)
  37. #define flush_tlb_range(vma, start, end) \
  38. do { (void)(start); flush_tlb_pending(); } while (0)
  39. #define flush_tlb_kernel_range(start, end) flush_tlb_pending()
  40. #define flush_tlb_pgtables(mm, start, end) do { } while (0)
  41. extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
  42. int local);
  43. void flush_hash_range(unsigned long context, unsigned long number, int local);
  44. #endif /* _PPC64_TLBFLUSH_H */