tlb.h 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. /*
  2. * TLB shootdown specifics for powerpc
  3. *
  4. * Copyright (C) 2002 Anton Blanchard, IBM Corp.
  5. * Copyright (C) 2002 Paul Mackerras, IBM Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _ASM_POWERPC_TLB_H
  13. #define _ASM_POWERPC_TLB_H
  14. #ifdef __KERNEL__
  15. #ifndef __powerpc64__
  16. #include <asm/pgtable.h>
  17. #endif
  18. #include <asm/pgalloc.h>
  19. #include <asm/tlbflush.h>
  20. #ifndef __powerpc64__
  21. #include <asm/page.h>
  22. #include <asm/mmu.h>
  23. #endif
  24. #include <linux/pagemap.h>
  25. struct mmu_gather;
  26. #define tlb_start_vma(tlb, vma) do { } while (0)
  27. #define tlb_end_vma(tlb, vma) do { } while (0)
  28. #if !defined(CONFIG_PPC_STD_MMU)
  29. #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  30. #elif defined(__powerpc64__)
  31. extern void pte_free_finish(void);
  32. static inline void tlb_flush(struct mmu_gather *tlb)
  33. {
  34. struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
  35. /* If there's a TLB batch pending, then we must flush it because the
  36. * pages are going to be freed and we really don't want to have a CPU
  37. * access a freed page because it has a stale TLB
  38. */
  39. if (tlbbatch->index)
  40. __flush_tlb_pending(tlbbatch);
  41. pte_free_finish();
  42. }
  43. #else
  44. extern void tlb_flush(struct mmu_gather *tlb);
  45. #endif
  46. /* Get the generic bits... */
  47. #include <asm-generic/tlb.h>
  48. #if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
  49. #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
  50. #else
  51. extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
  52. unsigned long address);
  53. static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  54. unsigned long address)
  55. {
  56. if (pte_val(*ptep) & _PAGE_HASHPTE)
  57. flush_hash_entry(tlb->mm, ptep, address);
  58. }
  59. #endif
  60. #endif /* __KERNEL__ */
  61. #endif /* __ASM_POWERPC_TLB_H */