mmu_context.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. /*
  2. * include/asm-s390/mmu_context.h
  3. *
  4. * S390 version
  5. *
  6. * Derived from "include/asm-i386/mmu_context.h"
  7. */
  8. #ifndef __S390_MMU_CONTEXT_H
  9. #define __S390_MMU_CONTEXT_H
  10. #include <asm/pgalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm-generic/mm_hooks.h>
  13. static inline int init_new_context(struct task_struct *tsk,
  14. struct mm_struct *mm)
  15. {
  16. mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
  17. #ifdef CONFIG_64BIT
  18. mm->context.asce_bits |= _ASCE_TYPE_REGION3;
  19. #endif
  20. if (current->mm->context.alloc_pgste) {
  21. /*
  22. * alloc_pgste indicates, that any NEW context will be created
  23. * with extended page tables. The old context is unchanged. The
  24. * page table allocation and the page table operations will
  25. * look at has_pgste to distinguish normal and extended page
  26. * tables. The only way to create extended page tables is to
  27. * set alloc_pgste and then create a new context (e.g. dup_mm).
  28. * The page table allocation is called after init_new_context
  29. * and if has_pgste is set, it will create extended page
  30. * tables.
  31. */
  32. mm->context.noexec = 0;
  33. mm->context.has_pgste = 1;
  34. mm->context.alloc_pgste = 1;
  35. } else {
  36. mm->context.noexec = s390_noexec;
  37. mm->context.has_pgste = 0;
  38. mm->context.alloc_pgste = 0;
  39. }
  40. mm->context.asce_limit = STACK_TOP_MAX;
  41. crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  42. return 0;
  43. }
  44. #define destroy_context(mm) do { } while (0)
  45. #ifndef __s390x__
  46. #define LCTL_OPCODE "lctl"
  47. #else
  48. #define LCTL_OPCODE "lctlg"
  49. #endif
  50. static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
  51. {
  52. pgd_t *pgd = mm->pgd;
  53. S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
  54. if (switch_amode) {
  55. /* Load primary space page table origin. */
  56. pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
  57. S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
  58. asm volatile(LCTL_OPCODE" 1,1,%0\n"
  59. : : "m" (S390_lowcore.user_exec_asce) );
  60. } else
  61. /* Load home space page table origin. */
  62. asm volatile(LCTL_OPCODE" 13,13,%0"
  63. : : "m" (S390_lowcore.user_asce) );
  64. set_fs(current->thread.mm_segment);
  65. }
  66. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  67. struct task_struct *tsk)
  68. {
  69. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  70. update_mm(next, tsk);
  71. }
  72. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  73. #define deactivate_mm(tsk,mm) do { } while (0)
  74. static inline void activate_mm(struct mm_struct *prev,
  75. struct mm_struct *next)
  76. {
  77. switch_mm(prev, next, current);
  78. }
  79. #endif /* __S390_MMU_CONTEXT_H */