mmu_context.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. */
  6. #ifndef __BLACKFIN_MMU_CONTEXT_H__
  7. #define __BLACKFIN_MMU_CONTEXT_H__
  8. #include <linux/slab.h>
  9. #include <linux/sched.h>
  10. #include <asm/setup.h>
  11. #include <asm/page.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/sections.h>
  15. /* Note: L1 stacks are CPU-private things, so we bluntly disable this
  16. feature in SMP mode, and use the per-CPU scratch SRAM bank only to
  17. store the PDA instead. */
  18. extern void *current_l1_stack_save;
  19. extern int nr_l1stack_tasks;
  20. extern void *l1_stack_base;
  21. extern unsigned long l1_stack_len;
  22. extern int l1sram_free(const void*);
  23. extern void *l1sram_alloc_max(void*);
  24. static inline void free_l1stack(void)
  25. {
  26. nr_l1stack_tasks--;
  27. if (nr_l1stack_tasks == 0)
  28. l1sram_free(l1_stack_base);
  29. }
  30. static inline unsigned long
  31. alloc_l1stack(unsigned long length, unsigned long *stack_base)
  32. {
  33. if (nr_l1stack_tasks == 0) {
  34. l1_stack_base = l1sram_alloc_max(&l1_stack_len);
  35. if (!l1_stack_base)
  36. return 0;
  37. }
  38. if (l1_stack_len < length) {
  39. if (nr_l1stack_tasks == 0)
  40. l1sram_free(l1_stack_base);
  41. return 0;
  42. }
  43. *stack_base = (unsigned long)l1_stack_base;
  44. nr_l1stack_tasks++;
  45. return l1_stack_len;
  46. }
  47. static inline int
  48. activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
  49. {
  50. if (current_l1_stack_save)
  51. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  52. mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
  53. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  54. return 1;
  55. }
  56. #define deactivate_mm(tsk,mm) do { } while (0)
  57. #define activate_mm(prev, next) switch_mm(prev, next, NULL)
  58. static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  59. struct task_struct *tsk)
  60. {
  61. #ifdef CONFIG_MPU
  62. unsigned int cpu = smp_processor_id();
  63. #endif
  64. if (prev_mm == next_mm)
  65. return;
  66. #ifdef CONFIG_MPU
  67. if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  68. flush_switched_cplbs(cpu);
  69. set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
  70. }
  71. #endif
  72. #ifdef CONFIG_APP_STACK_L1
  73. /* L1 stack switching. */
  74. if (!next_mm->context.l1_stack_save)
  75. return;
  76. if (next_mm->context.l1_stack_save == current_l1_stack_save)
  77. return;
  78. if (current_l1_stack_save) {
  79. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  80. }
  81. current_l1_stack_save = next_mm->context.l1_stack_save;
  82. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  83. #endif
  84. }
  85. #ifdef CONFIG_IPIPE
  86. #define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
  87. #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
  88. #else
  89. #define lock_mm_switch(flags) do { (void)(flags); } while (0)
  90. #define unlock_mm_switch(flags) do { (void)(flags); } while (0)
  91. #endif /* CONFIG_IPIPE */
  92. #ifdef CONFIG_MPU
  93. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  94. struct task_struct *tsk)
  95. {
  96. unsigned long flags;
  97. lock_mm_switch(flags);
  98. __switch_mm(prev, next, tsk);
  99. unlock_mm_switch(flags);
  100. }
  101. static inline void protect_page(struct mm_struct *mm, unsigned long addr,
  102. unsigned long flags)
  103. {
  104. unsigned long *mask = mm->context.page_rwx_mask;
  105. unsigned long page;
  106. unsigned long idx;
  107. unsigned long bit;
  108. if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
  109. page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
  110. else
  111. page = addr >> 12;
  112. idx = page >> 5;
  113. bit = 1 << (page & 31);
  114. if (flags & VM_READ)
  115. mask[idx] |= bit;
  116. else
  117. mask[idx] &= ~bit;
  118. mask += page_mask_nelts;
  119. if (flags & VM_WRITE)
  120. mask[idx] |= bit;
  121. else
  122. mask[idx] &= ~bit;
  123. mask += page_mask_nelts;
  124. if (flags & VM_EXEC)
  125. mask[idx] |= bit;
  126. else
  127. mask[idx] &= ~bit;
  128. }
  129. static inline void update_protections(struct mm_struct *mm)
  130. {
  131. unsigned int cpu = smp_processor_id();
  132. if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  133. flush_switched_cplbs(cpu);
  134. set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
  135. }
  136. }
  137. #else /* !CONFIG_MPU */
  138. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  139. struct task_struct *tsk)
  140. {
  141. __switch_mm(prev, next, tsk);
  142. }
  143. #endif
  144. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  145. {
  146. }
  147. /* Called when creating a new context during fork() or execve(). */
  148. static inline int
  149. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  150. {
  151. #ifdef CONFIG_MPU
  152. unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
  153. mm->context.page_rwx_mask = (unsigned long *)p;
  154. memset(mm->context.page_rwx_mask, 0,
  155. page_mask_nelts * 3 * sizeof(long));
  156. #endif
  157. return 0;
  158. }
  159. static inline void destroy_context(struct mm_struct *mm)
  160. {
  161. struct sram_list_struct *tmp;
  162. #ifdef CONFIG_MPU
  163. unsigned int cpu = smp_processor_id();
  164. #endif
  165. #ifdef CONFIG_APP_STACK_L1
  166. if (current_l1_stack_save == mm->context.l1_stack_save)
  167. current_l1_stack_save = 0;
  168. if (mm->context.l1_stack_save)
  169. free_l1stack();
  170. #endif
  171. while ((tmp = mm->context.sram_list)) {
  172. mm->context.sram_list = tmp->next;
  173. sram_free(tmp->addr);
  174. kfree(tmp);
  175. }
  176. #ifdef CONFIG_MPU
  177. if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
  178. current_rwx_mask[cpu] = NULL;
  179. free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
  180. #endif
  181. }
  182. #define ipipe_mm_switch_protect(flags) \
  183. flags = hard_local_irq_save_cond()
  184. #define ipipe_mm_switch_unprotect(flags) \
  185. hard_local_irq_restore_cond(flags)
  186. #endif