mmu_context.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /* MN10300 MMU context management
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Modified by David Howells (dhowells@redhat.com)
  5. * - Derived from include/asm-m32r/mmu_context.h
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. *
  12. *
  13. * This implements an algorithm to provide TLB PID mappings to provide
  14. * selective access to the TLB for processes, thus reducing the number of TLB
  15. * flushes required.
  16. *
  17. * Note, however, that the M32R algorithm is technically broken as it does not
  18. * handle version wrap-around, and could, theoretically, have a problem with a
  19. * very long lived program that sleeps long enough for the version number to
  20. * wrap all the way around so that its TLB mappings appear valid once again.
  21. */
  22. #ifndef _ASM_MMU_CONTEXT_H
  23. #define _ASM_MMU_CONTEXT_H
  24. #include <asm/atomic.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm-generic/mm_hooks.h>
  28. #define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
  29. #define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
  30. #define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
  31. #define MMU_NO_CONTEXT 0x00000000UL
  32. extern unsigned long mmu_context_cache[NR_CPUS];
  33. #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
  34. #define enter_lazy_tlb(mm, tsk) do {} while (0)
  35. #ifdef CONFIG_SMP
  36. #define cpu_ran_vm(cpu, task) \
  37. cpu_set((cpu), (task)->cpu_vm_mask)
  38. #define cpu_maybe_ran_vm(cpu, task) \
  39. cpu_test_and_set((cpu), (task)->cpu_vm_mask)
  40. #else
  41. #define cpu_ran_vm(cpu, task) do {} while (0)
  42. #define cpu_maybe_ran_vm(cpu, task) true
  43. #endif /* CONFIG_SMP */
  44. /*
  45. * allocate an MMU context
  46. */
  47. static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
  48. {
  49. unsigned long *pmc = &mmu_context_cache[smp_processor_id()];
  50. unsigned long mc = ++(*pmc);
  51. if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
  52. /* we exhausted the TLB PIDs of this version on this CPU, so we
  53. * flush this CPU's TLB in its entirety and start new cycle */
  54. flush_tlb_all();
  55. /* fix the TLB version if needed (we avoid version #0 so as to
  56. * distingush MMU_NO_CONTEXT) */
  57. if (!mc)
  58. *pmc = mc = MMU_CONTEXT_FIRST_VERSION;
  59. }
  60. mm_context(mm) = mc;
  61. return mc;
  62. }
  63. /*
  64. * get an MMU context if one is needed
  65. */
  66. static inline unsigned long get_mmu_context(struct mm_struct *mm)
  67. {
  68. unsigned long mc = MMU_NO_CONTEXT, cache;
  69. if (mm) {
  70. cache = mmu_context_cache[smp_processor_id()];
  71. mc = mm_context(mm);
  72. /* if we have an old version of the context, replace it */
  73. if ((mc ^ cache) & MMU_CONTEXT_VERSION_MASK)
  74. mc = allocate_mmu_context(mm);
  75. }
  76. return mc;
  77. }
  78. /*
  79. * initialise the context related info for a new mm_struct instance
  80. */
  81. static inline int init_new_context(struct task_struct *tsk,
  82. struct mm_struct *mm)
  83. {
  84. int num_cpus = NR_CPUS, i;
  85. for (i = 0; i < num_cpus; i++)
  86. mm->context.tlbpid[i] = MMU_NO_CONTEXT;
  87. return 0;
  88. }
  89. /*
  90. * destroy context related info for an mm_struct that is about to be put to
  91. * rest
  92. */
  93. #define destroy_context(mm) do { } while (0)
  94. /*
  95. * after we have set current->mm to a new value, this activates the context for
  96. * the new mm so we see the new mappings.
  97. */
  98. static inline void activate_context(struct mm_struct *mm, int cpu)
  99. {
  100. PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
  101. }
  102. /*
  103. * change between virtual memory sets
  104. */
  105. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  106. struct task_struct *tsk)
  107. {
  108. int cpu = smp_processor_id();
  109. if (prev != next) {
  110. cpu_ran_vm(cpu, next);
  111. activate_context(next, cpu);
  112. PTBR = (unsigned long) next->pgd;
  113. } else if (!cpu_maybe_ran_vm(cpu, next)) {
  114. activate_context(next, cpu);
  115. }
  116. }
  117. #define deactivate_mm(tsk, mm) do {} while (0)
  118. #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
  119. #endif /* _ASM_MMU_CONTEXT_H */