mmu_context.h 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  2. #define __ASM_POWERPC_MMU_CONTEXT_H
  3. #ifdef __KERNEL__
  4. #ifndef CONFIG_PPC64
  5. #include <asm-ppc/mmu_context.h>
  6. #else
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <asm/mmu.h>
  10. #include <asm/cputable.h>
  11. /*
  12. * Copyright (C) 2001 PPC 64 Team, IBM Corp
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. */
  19. /*
  20. * Getting into a kernel thread, there is no valid user segment, mark
  21. * paca->pgdir NULL so that SLB miss on user addresses will fault
  22. */
  23. static inline void enter_lazy_tlb(struct mm_struct *mm,
  24. struct task_struct *tsk)
  25. {
  26. #ifdef CONFIG_PPC_64K_PAGES
  27. get_paca()->pgdir = NULL;
  28. #endif /* CONFIG_PPC_64K_PAGES */
  29. }
  30. #define NO_CONTEXT 0
  31. #define MAX_CONTEXT (0x100000-1)
  32. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  33. extern void destroy_context(struct mm_struct *mm);
  34. extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
  35. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  36. /*
  37. * switch_mm is the entry point called from the architecture independent
  38. * code in kernel/sched.c
  39. */
  40. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  41. struct task_struct *tsk)
  42. {
  43. if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
  44. cpu_set(smp_processor_id(), next->cpu_vm_mask);
  45. /* No need to flush userspace segments if the mm doesnt change */
  46. #ifdef CONFIG_PPC_64K_PAGES
  47. if (prev == next && get_paca()->pgdir == next->pgd)
  48. return;
  49. #else
  50. if (prev == next)
  51. return;
  52. #endif /* CONFIG_PPC_64K_PAGES */
  53. #ifdef CONFIG_ALTIVEC
  54. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  55. asm volatile ("dssall");
  56. #endif /* CONFIG_ALTIVEC */
  57. if (cpu_has_feature(CPU_FTR_SLB))
  58. switch_slb(tsk, next);
  59. else
  60. switch_stab(tsk, next);
  61. }
  62. #define deactivate_mm(tsk,mm) do { } while (0)
  63. /*
  64. * After we have set current->mm to a new value, this activates
  65. * the context for the new mm so we see the new mappings.
  66. */
  67. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  68. {
  69. unsigned long flags;
  70. local_irq_save(flags);
  71. switch_mm(prev, next, current);
  72. local_irq_restore(flags);
  73. }
  74. #endif /* CONFIG_PPC64 */
  75. #endif /* __KERNEL__ */
  76. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */