mmu_context.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  2. #define __ASM_POWERPC_MMU_CONTEXT_H
  3. #ifdef __KERNEL__
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/spinlock.h>
  8. #include <asm/mmu.h>
  9. #include <asm/cputable.h>
  10. #include <asm-generic/mm_hooks.h>
  11. #include <asm/cputhreads.h>
  12. /*
  13. * Most if the context management is out of line
  14. */
  15. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  16. extern void destroy_context(struct mm_struct *mm);
  17. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
  18. extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
  19. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  20. extern void set_context(unsigned long id, pgd_t *pgd);
  21. #ifdef CONFIG_PPC_BOOK3S_64
  22. static inline void mmu_context_init(void) { }
  23. #else
  24. extern void mmu_context_init(void);
  25. #endif
  26. /*
  27. * switch_mm is the entry point called from the architecture independent
  28. * code in kernel/sched.c
  29. */
  30. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  31. struct task_struct *tsk)
  32. {
  33. /* Mark this context has been used on the new CPU */
  34. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  35. /* 32-bit keeps track of the current PGDIR in the thread struct */
  36. #ifdef CONFIG_PPC32
  37. tsk->thread.pgdir = next->pgd;
  38. #endif /* CONFIG_PPC32 */
  39. /* 64-bit Book3E keeps track of current PGD in the PACA */
  40. #ifdef CONFIG_PPC_BOOK3E_64
  41. get_paca()->pgd = next->pgd;
  42. #endif
  43. /* Nothing else to do if we aren't actually switching */
  44. if (prev == next)
  45. return;
  46. /* We must stop all altivec streams before changing the HW
  47. * context
  48. */
  49. #ifdef CONFIG_ALTIVEC
  50. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  51. asm volatile ("dssall");
  52. #endif /* CONFIG_ALTIVEC */
  53. /* The actual HW switching method differs between the various
  54. * sub architectures.
  55. */
  56. #ifdef CONFIG_PPC_STD_MMU_64
  57. if (cpu_has_feature(CPU_FTR_SLB))
  58. switch_slb(tsk, next);
  59. else
  60. switch_stab(tsk, next);
  61. #else
  62. /* Out of line for now */
  63. switch_mmu_context(prev, next);
  64. #endif
  65. }
  66. #define deactivate_mm(tsk,mm) do { } while (0)
  67. /*
  68. * After we have set current->mm to a new value, this activates
  69. * the context for the new mm so we see the new mappings.
  70. */
  71. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  72. {
  73. unsigned long flags;
  74. local_irq_save(flags);
  75. switch_mm(prev, next, current);
  76. local_irq_restore(flags);
  77. }
  78. /* We don't currently use enter_lazy_tlb() for anything */
  79. static inline void enter_lazy_tlb(struct mm_struct *mm,
  80. struct task_struct *tsk)
  81. {
  82. /* 64-bit Book3E keeps track of current PGD in the PACA */
  83. #ifdef CONFIG_PPC_BOOK3E_64
  84. get_paca()->pgd = NULL;
  85. #endif
  86. }
  87. #endif /* __KERNEL__ */
  88. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */