tlbflush.h 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. #ifndef _PPC64_TLBFLUSH_H
  2. #define _PPC64_TLBFLUSH_H
  3. /*
  4. * TLB flushing:
  5. *
  6. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  7. * - flush_tlb_page(vma, vmaddr) flushes one page
  8. * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  9. * - flush_tlb_range(vma, start, end) flushes a range of pages
  10. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  11. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  12. */
  13. #include <linux/percpu.h>
  14. #include <asm/page.h>
  15. #define PPC64_TLB_BATCH_NR 192
  16. struct mm_struct;
  17. struct ppc64_tlb_batch {
  18. unsigned long index;
  19. struct mm_struct *mm;
  20. pte_t pte[PPC64_TLB_BATCH_NR];
  21. unsigned long vaddr[PPC64_TLB_BATCH_NR];
  22. unsigned int large;
  23. };
  24. DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  25. extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  26. static inline void flush_tlb_pending(void)
  27. {
  28. struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
  29. if (batch->index)
  30. __flush_tlb_pending(batch);
  31. put_cpu_var(ppc64_tlb_batch);
  32. }
  33. #define flush_tlb_mm(mm) flush_tlb_pending()
  34. #define flush_tlb_page(vma, addr) flush_tlb_pending()
  35. #define flush_tlb_page_nohash(vma, addr) do { } while (0)
  36. #define flush_tlb_range(vma, start, end) \
  37. do { (void)(start); flush_tlb_pending(); } while (0)
  38. #define flush_tlb_kernel_range(start, end) flush_tlb_pending()
  39. #define flush_tlb_pgtables(mm, start, end) do { } while (0)
  40. extern void flush_hash_page(unsigned long va, pte_t pte, int local);
  41. void flush_hash_range(unsigned long number, int local);
  42. #endif /* _PPC64_TLBFLUSH_H */