mmu_context.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /*
  2. * include/asm-s390/mmu_context.h
  3. *
  4. * S390 version
  5. *
  6. * Derived from "include/asm-i386/mmu_context.h"
  7. */
  8. #ifndef __S390_MMU_CONTEXT_H
  9. #define __S390_MMU_CONTEXT_H
  10. #include <asm/pgalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm-generic/mm_hooks.h>
  14. static inline int init_new_context(struct task_struct *tsk,
  15. struct mm_struct *mm)
  16. {
  17. atomic_set(&mm->context.attach_count, 0);
  18. mm->context.flush_mm = 0;
  19. mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
  20. #ifdef CONFIG_64BIT
  21. mm->context.asce_bits |= _ASCE_TYPE_REGION3;
  22. #endif
  23. if (current->mm->context.alloc_pgste) {
  24. /*
  25. * alloc_pgste indicates, that any NEW context will be created
  26. * with extended page tables. The old context is unchanged. The
  27. * page table allocation and the page table operations will
  28. * look at has_pgste to distinguish normal and extended page
  29. * tables. The only way to create extended page tables is to
  30. * set alloc_pgste and then create a new context (e.g. dup_mm).
  31. * The page table allocation is called after init_new_context
  32. * and if has_pgste is set, it will create extended page
  33. * tables.
  34. */
  35. mm->context.noexec = 0;
  36. mm->context.has_pgste = 1;
  37. mm->context.alloc_pgste = 1;
  38. } else {
  39. mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
  40. mm->context.has_pgste = 0;
  41. mm->context.alloc_pgste = 0;
  42. }
  43. mm->context.asce_limit = STACK_TOP_MAX;
  44. crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  45. return 0;
  46. }
  47. #define destroy_context(mm) do { } while (0)
  48. #ifndef __s390x__
  49. #define LCTL_OPCODE "lctl"
  50. #else
  51. #define LCTL_OPCODE "lctlg"
  52. #endif
  53. static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
  54. {
  55. pgd_t *pgd = mm->pgd;
  56. S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
  57. if (user_mode != HOME_SPACE_MODE) {
  58. /* Load primary space page table origin. */
  59. pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
  60. S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
  61. asm volatile(LCTL_OPCODE" 1,1,%0\n"
  62. : : "m" (S390_lowcore.user_exec_asce) );
  63. } else
  64. /* Load home space page table origin. */
  65. asm volatile(LCTL_OPCODE" 13,13,%0"
  66. : : "m" (S390_lowcore.user_asce) );
  67. set_fs(current->thread.mm_segment);
  68. }
  69. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  70. struct task_struct *tsk)
  71. {
  72. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  73. update_mm(next, tsk);
  74. atomic_dec(&prev->context.attach_count);
  75. WARN_ON(atomic_read(&prev->context.attach_count) < 0);
  76. atomic_inc(&next->context.attach_count);
  77. /* Check for TLBs not flushed yet */
  78. if (next->context.flush_mm)
  79. __tlb_flush_mm(next);
  80. }
  81. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  82. #define deactivate_mm(tsk,mm) do { } while (0)
  83. static inline void activate_mm(struct mm_struct *prev,
  84. struct mm_struct *next)
  85. {
  86. switch_mm(prev, next, current);
  87. }
  88. #endif /* __S390_MMU_CONTEXT_H */