mmu_context.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. */
  6. #ifndef __BLACKFIN_MMU_CONTEXT_H__
  7. #define __BLACKFIN_MMU_CONTEXT_H__
  8. #include <linux/gfp.h>
  9. #include <linux/sched.h>
  10. #include <asm/setup.h>
  11. #include <asm/page.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/cplbinit.h>
  14. /* Note: L1 stacks are CPU-private things, so we bluntly disable this
  15. feature in SMP mode, and use the per-CPU scratch SRAM bank only to
  16. store the PDA instead. */
  17. extern void *current_l1_stack_save;
  18. extern int nr_l1stack_tasks;
  19. extern void *l1_stack_base;
  20. extern unsigned long l1_stack_len;
  21. extern int l1sram_free(const void*);
  22. extern void *l1sram_alloc_max(void*);
  23. static inline void free_l1stack(void)
  24. {
  25. nr_l1stack_tasks--;
  26. if (nr_l1stack_tasks == 0)
  27. l1sram_free(l1_stack_base);
  28. }
  29. static inline unsigned long
  30. alloc_l1stack(unsigned long length, unsigned long *stack_base)
  31. {
  32. if (nr_l1stack_tasks == 0) {
  33. l1_stack_base = l1sram_alloc_max(&l1_stack_len);
  34. if (!l1_stack_base)
  35. return 0;
  36. }
  37. if (l1_stack_len < length) {
  38. if (nr_l1stack_tasks == 0)
  39. l1sram_free(l1_stack_base);
  40. return 0;
  41. }
  42. *stack_base = (unsigned long)l1_stack_base;
  43. nr_l1stack_tasks++;
  44. return l1_stack_len;
  45. }
  46. static inline int
  47. activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
  48. {
  49. if (current_l1_stack_save)
  50. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  51. mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
  52. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  53. return 1;
  54. }
  55. #define deactivate_mm(tsk,mm) do { } while (0)
  56. #define activate_mm(prev, next) switch_mm(prev, next, NULL)
  57. static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  58. struct task_struct *tsk)
  59. {
  60. #ifdef CONFIG_MPU
  61. unsigned int cpu = smp_processor_id();
  62. #endif
  63. if (prev_mm == next_mm)
  64. return;
  65. #ifdef CONFIG_MPU
  66. if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  67. flush_switched_cplbs(cpu);
  68. set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
  69. }
  70. #endif
  71. #ifdef CONFIG_APP_STACK_L1
  72. /* L1 stack switching. */
  73. if (!next_mm->context.l1_stack_save)
  74. return;
  75. if (next_mm->context.l1_stack_save == current_l1_stack_save)
  76. return;
  77. if (current_l1_stack_save) {
  78. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  79. }
  80. current_l1_stack_save = next_mm->context.l1_stack_save;
  81. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  82. #endif
  83. }
  84. #ifdef CONFIG_MPU
  85. static inline void protect_page(struct mm_struct *mm, unsigned long addr,
  86. unsigned long flags)
  87. {
  88. unsigned long *mask = mm->context.page_rwx_mask;
  89. unsigned long page = addr >> 12;
  90. unsigned long idx = page >> 5;
  91. unsigned long bit = 1 << (page & 31);
  92. if (flags & VM_READ)
  93. mask[idx] |= bit;
  94. else
  95. mask[idx] &= ~bit;
  96. mask += page_mask_nelts;
  97. if (flags & VM_WRITE)
  98. mask[idx] |= bit;
  99. else
  100. mask[idx] &= ~bit;
  101. mask += page_mask_nelts;
  102. if (flags & VM_EXEC)
  103. mask[idx] |= bit;
  104. else
  105. mask[idx] &= ~bit;
  106. }
  107. static inline void update_protections(struct mm_struct *mm)
  108. {
  109. unsigned int cpu = smp_processor_id();
  110. if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  111. flush_switched_cplbs(cpu);
  112. set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
  113. }
  114. }
  115. #endif
  116. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  117. {
  118. }
  119. /* Called when creating a new context during fork() or execve(). */
  120. static inline int
  121. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  122. {
  123. #ifdef CONFIG_MPU
  124. unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
  125. mm->context.page_rwx_mask = (unsigned long *)p;
  126. memset(mm->context.page_rwx_mask, 0,
  127. page_mask_nelts * 3 * sizeof(long));
  128. #endif
  129. return 0;
  130. }
  131. static inline void destroy_context(struct mm_struct *mm)
  132. {
  133. struct sram_list_struct *tmp;
  134. #ifdef CONFIG_MPU
  135. unsigned int cpu = smp_processor_id();
  136. #endif
  137. #ifdef CONFIG_APP_STACK_L1
  138. if (current_l1_stack_save == mm->context.l1_stack_save)
  139. current_l1_stack_save = 0;
  140. if (mm->context.l1_stack_save)
  141. free_l1stack();
  142. #endif
  143. while ((tmp = mm->context.sram_list)) {
  144. mm->context.sram_list = tmp->next;
  145. sram_free(tmp->addr);
  146. kfree(tmp);
  147. }
  148. #ifdef CONFIG_MPU
  149. if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
  150. current_rwx_mask[cpu] = NULL;
  151. free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
  152. #endif
  153. }
  154. #endif