tlb.h 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /*
  2. * TLB shootdown specifics for powerpc
  3. *
  4. * Copyright (C) 2002 Anton Blanchard, IBM Corp.
  5. * Copyright (C) 2002 Paul Mackerras, IBM Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _ASM_POWERPC_TLB_H
  13. #define _ASM_POWERPC_TLB_H
  14. #ifdef __KERNEL__
  15. #ifndef __powerpc64__
  16. #include <asm/pgtable.h>
  17. #endif
  18. #include <asm/pgalloc.h>
  19. #include <asm/tlbflush.h>
  20. #ifndef __powerpc64__
  21. #include <asm/page.h>
  22. #include <asm/mmu.h>
  23. #endif
  24. struct mmu_gather;
  25. #define tlb_start_vma(tlb, vma) do { } while (0)
  26. #define tlb_end_vma(tlb, vma) do { } while (0)
  27. #if !defined(CONFIG_PPC_STD_MMU)
  28. #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  29. #elif defined(__powerpc64__)
  30. extern void pte_free_finish(void);
  31. static inline void tlb_flush(struct mmu_gather *tlb)
  32. {
  33. struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
  34. /* If there's a TLB batch pending, then we must flush it because the
  35. * pages are going to be freed and we really don't want to have a CPU
  36. * access a freed page because it has a stale TLB
  37. */
  38. if (tlbbatch->index)
  39. __flush_tlb_pending(tlbbatch);
  40. pte_free_finish();
  41. }
  42. #else
  43. extern void tlb_flush(struct mmu_gather *tlb);
  44. #endif
  45. /* Get the generic bits... */
  46. #include <asm-generic/tlb.h>
  47. #if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
  48. #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
  49. #else
  50. extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
  51. unsigned long address);
  52. static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  53. unsigned long address)
  54. {
  55. if (pte_val(*ptep) & _PAGE_HASHPTE)
  56. flush_hash_entry(tlb->mm, ptep, address);
  57. }
  58. #endif
  59. #endif /* __KERNEL__ */
  60. #endif /* __ASM_POWERPC_TLB_H */