tlbflush.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. #ifndef _PPC64_TLBFLUSH_H
  2. #define _PPC64_TLBFLUSH_H
  3. /*
  4. * TLB flushing:
  5. *
  6. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  7. * - flush_tlb_page(vma, vmaddr) flushes one page
  8. * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  9. * - flush_tlb_range(vma, start, end) flushes a range of pages
  10. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  11. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  12. */
  13. #include <linux/percpu.h>
  14. #include <asm/page.h>
  15. #define PPC64_TLB_BATCH_NR 192
  16. struct mm_struct;
  17. struct ppc64_tlb_batch {
  18. unsigned long index;
  19. struct mm_struct *mm;
  20. pte_t pte[PPC64_TLB_BATCH_NR];
  21. unsigned long vaddr[PPC64_TLB_BATCH_NR];
  22. };
  23. DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  24. extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  25. static inline void flush_tlb_pending(void)
  26. {
  27. struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
  28. if (batch->index)
  29. __flush_tlb_pending(batch);
  30. put_cpu_var(ppc64_tlb_batch);
  31. }
  32. #define flush_tlb_mm(mm) flush_tlb_pending()
  33. #define flush_tlb_page(vma, addr) flush_tlb_pending()
  34. #define flush_tlb_page_nohash(vma, addr) do { } while (0)
  35. #define flush_tlb_range(vma, start, end) \
  36. do { (void)(start); flush_tlb_pending(); } while (0)
  37. #define flush_tlb_kernel_range(start, end) flush_tlb_pending()
  38. #define flush_tlb_pgtables(mm, start, end) do { } while (0)
  39. extern void flush_hash_page(unsigned long va, pte_t pte, int local);
  40. void flush_hash_range(unsigned long number, int local);
  41. #endif /* _PPC64_TLBFLUSH_H */