mmu_context.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. */
  6. #ifndef __BLACKFIN_MMU_CONTEXT_H__
  7. #define __BLACKFIN_MMU_CONTEXT_H__
  8. #include <linux/slab.h>
  9. #include <linux/sched.h>
  10. #include <asm/setup.h>
  11. #include <asm/page.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/sections.h>
  15. /* Note: L1 stacks are CPU-private things, so we bluntly disable this
  16. feature in SMP mode, and use the per-CPU scratch SRAM bank only to
  17. store the PDA instead. */
  18. extern void *current_l1_stack_save;
  19. extern int nr_l1stack_tasks;
  20. extern void *l1_stack_base;
  21. extern unsigned long l1_stack_len;
  22. extern int l1sram_free(const void*);
  23. extern void *l1sram_alloc_max(void*);
  24. static inline void free_l1stack(void)
  25. {
  26. nr_l1stack_tasks--;
  27. if (nr_l1stack_tasks == 0) {
  28. l1sram_free(l1_stack_base);
  29. l1_stack_base = NULL;
  30. l1_stack_len = 0;
  31. }
  32. }
  33. static inline unsigned long
  34. alloc_l1stack(unsigned long length, unsigned long *stack_base)
  35. {
  36. if (nr_l1stack_tasks == 0) {
  37. l1_stack_base = l1sram_alloc_max(&l1_stack_len);
  38. if (!l1_stack_base)
  39. return 0;
  40. }
  41. if (l1_stack_len < length) {
  42. if (nr_l1stack_tasks == 0)
  43. l1sram_free(l1_stack_base);
  44. return 0;
  45. }
  46. *stack_base = (unsigned long)l1_stack_base;
  47. nr_l1stack_tasks++;
  48. return l1_stack_len;
  49. }
  50. static inline int
  51. activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
  52. {
  53. if (current_l1_stack_save)
  54. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  55. mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
  56. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  57. return 1;
  58. }
  59. #define deactivate_mm(tsk,mm) do { } while (0)
  60. #define activate_mm(prev, next) switch_mm(prev, next, NULL)
  61. static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  62. struct task_struct *tsk)
  63. {
  64. #ifdef CONFIG_MPU
  65. unsigned int cpu = smp_processor_id();
  66. #endif
  67. if (prev_mm == next_mm)
  68. return;
  69. #ifdef CONFIG_MPU
  70. if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  71. flush_switched_cplbs(cpu);
  72. set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
  73. }
  74. #endif
  75. #ifdef CONFIG_APP_STACK_L1
  76. /* L1 stack switching. */
  77. if (!next_mm->context.l1_stack_save)
  78. return;
  79. if (next_mm->context.l1_stack_save == current_l1_stack_save)
  80. return;
  81. if (current_l1_stack_save) {
  82. memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  83. }
  84. current_l1_stack_save = next_mm->context.l1_stack_save;
  85. memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  86. #endif
  87. }
  88. #ifdef CONFIG_IPIPE
  89. #define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
  90. #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
  91. #else
  92. #define lock_mm_switch(flags) do { (void)(flags); } while (0)
  93. #define unlock_mm_switch(flags) do { (void)(flags); } while (0)
  94. #endif /* CONFIG_IPIPE */
  95. #ifdef CONFIG_MPU
  96. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  97. struct task_struct *tsk)
  98. {
  99. unsigned long flags;
  100. lock_mm_switch(flags);
  101. __switch_mm(prev, next, tsk);
  102. unlock_mm_switch(flags);
  103. }
  104. static inline void protect_page(struct mm_struct *mm, unsigned long addr,
  105. unsigned long flags)
  106. {
  107. unsigned long *mask = mm->context.page_rwx_mask;
  108. unsigned long page;
  109. unsigned long idx;
  110. unsigned long bit;
  111. if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
  112. page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
  113. else
  114. page = addr >> 12;
  115. idx = page >> 5;
  116. bit = 1 << (page & 31);
  117. if (flags & VM_READ)
  118. mask[idx] |= bit;
  119. else
  120. mask[idx] &= ~bit;
  121. mask += page_mask_nelts;
  122. if (flags & VM_WRITE)
  123. mask[idx] |= bit;
  124. else
  125. mask[idx] &= ~bit;
  126. mask += page_mask_nelts;
  127. if (flags & VM_EXEC)
  128. mask[idx] |= bit;
  129. else
  130. mask[idx] &= ~bit;
  131. }
  132. static inline void update_protections(struct mm_struct *mm)
  133. {
  134. unsigned int cpu = smp_processor_id();
  135. if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  136. flush_switched_cplbs(cpu);
  137. set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
  138. }
  139. }
  140. #else /* !CONFIG_MPU */
  141. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  142. struct task_struct *tsk)
  143. {
  144. __switch_mm(prev, next, tsk);
  145. }
  146. #endif
  147. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  148. {
  149. }
  150. /* Called when creating a new context during fork() or execve(). */
  151. static inline int
  152. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  153. {
  154. #ifdef CONFIG_MPU
  155. unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
  156. mm->context.page_rwx_mask = (unsigned long *)p;
  157. memset(mm->context.page_rwx_mask, 0,
  158. page_mask_nelts * 3 * sizeof(long));
  159. #endif
  160. return 0;
  161. }
  162. static inline void destroy_context(struct mm_struct *mm)
  163. {
  164. struct sram_list_struct *tmp;
  165. #ifdef CONFIG_MPU
  166. unsigned int cpu = smp_processor_id();
  167. #endif
  168. #ifdef CONFIG_APP_STACK_L1
  169. if (current_l1_stack_save == mm->context.l1_stack_save)
  170. current_l1_stack_save = 0;
  171. if (mm->context.l1_stack_save)
  172. free_l1stack();
  173. #endif
  174. while ((tmp = mm->context.sram_list)) {
  175. mm->context.sram_list = tmp->next;
  176. sram_free(tmp->addr);
  177. kfree(tmp);
  178. }
  179. #ifdef CONFIG_MPU
  180. if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
  181. current_rwx_mask[cpu] = NULL;
  182. free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
  183. #endif
  184. }
  185. #define ipipe_mm_switch_protect(flags) \
  186. flags = hard_local_irq_save_cond()
  187. #define ipipe_mm_switch_unprotect(flags) \
  188. hard_local_irq_restore_cond(flags)
  189. #endif