mmu_context.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * include/asm-s390/mmu_context.h
  3. *
  4. * S390 version
  5. *
  6. * Derived from "include/asm-i386/mmu_context.h"
  7. */
  8. #ifndef __S390_MMU_CONTEXT_H
  9. #define __S390_MMU_CONTEXT_H
  10. #include <asm/pgalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/ctl_reg.h>
  14. #include <asm-generic/mm_hooks.h>
  15. static inline int init_new_context(struct task_struct *tsk,
  16. struct mm_struct *mm)
  17. {
  18. atomic_set(&mm->context.attach_count, 0);
  19. mm->context.flush_mm = 0;
  20. mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
  21. #ifdef CONFIG_64BIT
  22. mm->context.asce_bits |= _ASCE_TYPE_REGION3;
  23. #endif
  24. if (current->mm && current->mm->context.alloc_pgste) {
  25. /*
  26. * alloc_pgste indicates, that any NEW context will be created
  27. * with extended page tables. The old context is unchanged. The
  28. * page table allocation and the page table operations will
  29. * look at has_pgste to distinguish normal and extended page
  30. * tables. The only way to create extended page tables is to
  31. * set alloc_pgste and then create a new context (e.g. dup_mm).
  32. * The page table allocation is called after init_new_context
  33. * and if has_pgste is set, it will create extended page
  34. * tables.
  35. */
  36. mm->context.has_pgste = 1;
  37. mm->context.alloc_pgste = 1;
  38. } else {
  39. mm->context.has_pgste = 0;
  40. mm->context.alloc_pgste = 0;
  41. }
  42. mm->context.asce_limit = STACK_TOP_MAX;
  43. crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  44. return 0;
  45. }
  46. #define destroy_context(mm) do { } while (0)
  47. #ifndef CONFIG_64BIT
  48. #define LCTL_OPCODE "lctl"
  49. #else
  50. #define LCTL_OPCODE "lctlg"
  51. #endif
  52. static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
  53. {
  54. pgd_t *pgd = mm->pgd;
  55. S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
  56. if (user_mode != HOME_SPACE_MODE) {
  57. /* Load primary space page table origin. */
  58. asm volatile(LCTL_OPCODE" 1,1,%0\n"
  59. : : "m" (S390_lowcore.user_asce) );
  60. } else
  61. /* Load home space page table origin. */
  62. asm volatile(LCTL_OPCODE" 13,13,%0"
  63. : : "m" (S390_lowcore.user_asce) );
  64. set_fs(current->thread.mm_segment);
  65. }
  66. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  67. struct task_struct *tsk)
  68. {
  69. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  70. update_mm(next, tsk);
  71. atomic_dec(&prev->context.attach_count);
  72. WARN_ON(atomic_read(&prev->context.attach_count) < 0);
  73. atomic_inc(&next->context.attach_count);
  74. /* Check for TLBs not flushed yet */
  75. if (next->context.flush_mm)
  76. __tlb_flush_mm(next);
  77. }
  78. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  79. #define deactivate_mm(tsk,mm) do { } while (0)
  80. static inline void activate_mm(struct mm_struct *prev,
  81. struct mm_struct *next)
  82. {
  83. switch_mm(prev, next, current);
  84. }
  85. #endif /* __S390_MMU_CONTEXT_H */