mmu_context.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  2. #define __ASM_POWERPC_MMU_CONTEXT_H
  3. #ifdef __KERNEL__
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/spinlock.h>
  8. #include <asm/mmu.h>
  9. #include <asm/cputable.h>
  10. #include <asm-generic/mm_hooks.h>
  11. #include <asm/cputhreads.h>
  12. /*
  13. * Most if the context management is out of line
  14. */
  15. extern void mmu_context_init(void);
  16. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  17. extern void destroy_context(struct mm_struct *mm);
  18. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
  19. extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
  20. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  21. extern void set_context(unsigned long id, pgd_t *pgd);
  22. /*
  23. * switch_mm is the entry point called from the architecture independent
  24. * code in kernel/sched.c
  25. */
  26. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  27. struct task_struct *tsk)
  28. {
  29. /* Mark this context has been used on the new CPU */
  30. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  31. /* 32-bit keeps track of the current PGDIR in the thread struct */
  32. #ifdef CONFIG_PPC32
  33. tsk->thread.pgdir = next->pgd;
  34. #endif /* CONFIG_PPC32 */
  35. /* Nothing else to do if we aren't actually switching */
  36. if (prev == next)
  37. return;
  38. /* We must stop all altivec streams before changing the HW
  39. * context
  40. */
  41. #ifdef CONFIG_ALTIVEC
  42. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  43. asm volatile ("dssall");
  44. #endif /* CONFIG_ALTIVEC */
  45. /* The actual HW switching method differs between the various
  46. * sub architectures.
  47. */
  48. #ifdef CONFIG_PPC_STD_MMU_64
  49. if (cpu_has_feature(CPU_FTR_SLB))
  50. switch_slb(tsk, next);
  51. else
  52. switch_stab(tsk, next);
  53. #else
  54. /* Out of line for now */
  55. switch_mmu_context(prev, next);
  56. #endif
  57. }
  58. #define deactivate_mm(tsk,mm) do { } while (0)
  59. /*
  60. * After we have set current->mm to a new value, this activates
  61. * the context for the new mm so we see the new mappings.
  62. */
  63. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  64. {
  65. unsigned long flags;
  66. local_irq_save(flags);
  67. switch_mm(prev, next, current);
  68. local_irq_restore(flags);
  69. }
  70. /* We don't currently use enter_lazy_tlb() for anything */
  71. static inline void enter_lazy_tlb(struct mm_struct *mm,
  72. struct task_struct *tsk)
  73. {
  74. }
  75. #endif /* __KERNEL__ */
  76. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */