mmu_context.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * File: include/asm-blackfin/mmu_context.h
  3. * Based on:
  4. * Author:
  5. *
  6. * Created:
  7. * Description:
  8. *
  9. * Modified:
  10. * Copyright 2004-2006 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #ifndef __BLACKFIN_MMU_CONTEXT_H__
  30. #define __BLACKFIN_MMU_CONTEXT_H__
  31. #include <linux/gfp.h>
  32. #include <linux/sched.h>
  33. #include <asm/setup.h>
  34. #include <asm/page.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/cplbinit.h>
  37. extern void *current_l1_stack_save;
  38. extern int nr_l1stack_tasks;
  39. extern void *l1_stack_base;
  40. extern unsigned long l1_stack_len;
  41. extern int l1sram_free(const void*);
  42. extern void *l1sram_alloc_max(void*);
  43. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  44. {
  45. }
  46. /* Called when creating a new context during fork() or execve(). */
  47. static inline int
  48. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  49. {
  50. #ifdef CONFIG_MPU
  51. unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
  52. mm->context.page_rwx_mask = (unsigned long *)p;
  53. memset(mm->context.page_rwx_mask, 0,
  54. page_mask_nelts * 3 * sizeof(long));
  55. #endif
  56. return 0;
  57. }
  58. static inline void free_l1stack(void)
  59. {
  60. nr_l1stack_tasks--;
  61. if (nr_l1stack_tasks == 0)
  62. l1sram_free(l1_stack_base);
  63. }
  64. static inline void destroy_context(struct mm_struct *mm)
  65. {
  66. struct sram_list_struct *tmp;
  67. if (current_l1_stack_save == mm->context.l1_stack_save)
  68. current_l1_stack_save = NULL;
  69. if (mm->context.l1_stack_save)
  70. free_l1stack();
  71. while ((tmp = mm->context.sram_list)) {
  72. mm->context.sram_list = tmp->next;
  73. sram_free(tmp->addr);
  74. kfree(tmp);
  75. }
  76. #ifdef CONFIG_MPU
  77. if (current_rwx_mask == mm->context.page_rwx_mask)
  78. current_rwx_mask = NULL;
  79. free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
  80. #endif
  81. }
  82. static inline unsigned long
  83. alloc_l1stack(unsigned long length, unsigned long *stack_base)
  84. {
  85. if (nr_l1stack_tasks == 0) {
  86. l1_stack_base = l1sram_alloc_max(&l1_stack_len);
  87. if (!l1_stack_base)
  88. return 0;
  89. }
  90. if (l1_stack_len < length) {
  91. if (nr_l1stack_tasks == 0)
  92. l1sram_free(l1_stack_base);
  93. return 0;
  94. }
  95. *stack_base = (unsigned long)l1_stack_base;
  96. nr_l1stack_tasks++;
  97. return l1_stack_len;
  98. }
  99. static inline int
  100. activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
  101. {
  102. if (current_l1_stack_save)
  103. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  104. mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
  105. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  106. return 1;
  107. }
  108. #define deactivate_mm(tsk,mm) do { } while (0)
  109. #define activate_mm(prev, next) switch_mm(prev, next, NULL)
  110. static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  111. struct task_struct *tsk)
  112. {
  113. if (prev_mm == next_mm)
  114. return;
  115. #ifdef CONFIG_MPU
  116. if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
  117. flush_switched_cplbs();
  118. set_mask_dcplbs(next_mm->context.page_rwx_mask);
  119. }
  120. #endif
  121. /* L1 stack switching. */
  122. if (!next_mm->context.l1_stack_save)
  123. return;
  124. if (next_mm->context.l1_stack_save == current_l1_stack_save)
  125. return;
  126. if (current_l1_stack_save) {
  127. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  128. }
  129. current_l1_stack_save = next_mm->context.l1_stack_save;
  130. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  131. }
  132. #ifdef CONFIG_MPU
  133. static inline void protect_page(struct mm_struct *mm, unsigned long addr,
  134. unsigned long flags)
  135. {
  136. unsigned long *mask = mm->context.page_rwx_mask;
  137. unsigned long page = addr >> 12;
  138. unsigned long idx = page >> 5;
  139. unsigned long bit = 1 << (page & 31);
  140. if (flags & VM_MAYREAD)
  141. mask[idx] |= bit;
  142. else
  143. mask[idx] &= ~bit;
  144. mask += page_mask_nelts;
  145. if (flags & VM_MAYWRITE)
  146. mask[idx] |= bit;
  147. else
  148. mask[idx] &= ~bit;
  149. mask += page_mask_nelts;
  150. if (flags & VM_MAYEXEC)
  151. mask[idx] |= bit;
  152. else
  153. mask[idx] &= ~bit;
  154. }
  155. static inline void update_protections(struct mm_struct *mm)
  156. {
  157. if (mm->context.page_rwx_mask == current_rwx_mask) {
  158. flush_switched_cplbs();
  159. set_mask_dcplbs(mm->context.page_rwx_mask);
  160. }
  161. }
  162. #endif
  163. #endif