tlbflush.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * linux/include/asm-arm/tlbflush.h
  3. *
  4. * Copyright (C) 1999-2003 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_TLBFLUSH_H
  11. #define _ASMARM_TLBFLUSH_H
  12. #ifndef CONFIG_MMU
  13. #define tlb_flush(tlb) ((void) tlb)
  14. #else /* CONFIG_MMU */
  15. #include <asm/glue.h>
  16. #define TLB_V3_PAGE (1 << 0)
  17. #define TLB_V4_U_PAGE (1 << 1)
  18. #define TLB_V4_D_PAGE (1 << 2)
  19. #define TLB_V4_I_PAGE (1 << 3)
  20. #define TLB_V6_U_PAGE (1 << 4)
  21. #define TLB_V6_D_PAGE (1 << 5)
  22. #define TLB_V6_I_PAGE (1 << 6)
  23. #define TLB_V3_FULL (1 << 8)
  24. #define TLB_V4_U_FULL (1 << 9)
  25. #define TLB_V4_D_FULL (1 << 10)
  26. #define TLB_V4_I_FULL (1 << 11)
  27. #define TLB_V6_U_FULL (1 << 12)
  28. #define TLB_V6_D_FULL (1 << 13)
  29. #define TLB_V6_I_FULL (1 << 14)
  30. #define TLB_V6_U_ASID (1 << 16)
  31. #define TLB_V6_D_ASID (1 << 17)
  32. #define TLB_V6_I_ASID (1 << 18)
  33. #define TLB_DCLEAN (1 << 30)
  34. #define TLB_WB (1 << 31)
  35. /*
  36. * MMU TLB Model
  37. * =============
  38. *
  39. * We have the following to choose from:
  40. * v3 - ARMv3
  41. * v4 - ARMv4 without write buffer
  42. * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
  43. * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  44. * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  45. */
  46. #undef _TLB
  47. #undef MULTI_TLB
  48. #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
  49. #ifdef CONFIG_CPU_TLB_V3
  50. # define v3_possible_flags v3_tlb_flags
  51. # define v3_always_flags v3_tlb_flags
  52. # ifdef _TLB
  53. # define MULTI_TLB 1
  54. # else
  55. # define _TLB v3
  56. # endif
  57. #else
  58. # define v3_possible_flags 0
  59. # define v3_always_flags (-1UL)
  60. #endif
  61. #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
  62. #ifdef CONFIG_CPU_TLB_V4WT
  63. # define v4_possible_flags v4_tlb_flags
  64. # define v4_always_flags v4_tlb_flags
  65. # ifdef _TLB
  66. # define MULTI_TLB 1
  67. # else
  68. # define _TLB v4
  69. # endif
  70. #else
  71. # define v4_possible_flags 0
  72. # define v4_always_flags (-1UL)
  73. #endif
  74. #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
  75. TLB_V4_I_FULL | TLB_V4_D_FULL | \
  76. TLB_V4_I_PAGE | TLB_V4_D_PAGE)
  77. #ifdef CONFIG_CPU_TLB_V4WBI
  78. # define v4wbi_possible_flags v4wbi_tlb_flags
  79. # define v4wbi_always_flags v4wbi_tlb_flags
  80. # ifdef _TLB
  81. # define MULTI_TLB 1
  82. # else
  83. # define _TLB v4wbi
  84. # endif
  85. #else
  86. # define v4wbi_possible_flags 0
  87. # define v4wbi_always_flags (-1UL)
  88. #endif
  89. #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
  90. TLB_V4_I_FULL | TLB_V4_D_FULL | \
  91. TLB_V4_D_PAGE)
  92. #ifdef CONFIG_CPU_TLB_V4WB
  93. # define v4wb_possible_flags v4wb_tlb_flags
  94. # define v4wb_always_flags v4wb_tlb_flags
  95. # ifdef _TLB
  96. # define MULTI_TLB 1
  97. # else
  98. # define _TLB v4wb
  99. # endif
  100. #else
  101. # define v4wb_possible_flags 0
  102. # define v4wb_always_flags (-1UL)
  103. #endif
  104. #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
  105. TLB_V6_I_FULL | TLB_V6_D_FULL | \
  106. TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
  107. TLB_V6_I_ASID | TLB_V6_D_ASID)
  108. #ifdef CONFIG_CPU_TLB_V6
  109. # define v6wbi_possible_flags v6wbi_tlb_flags
  110. # define v6wbi_always_flags v6wbi_tlb_flags
  111. # ifdef _TLB
  112. # define MULTI_TLB 1
  113. # else
  114. # define _TLB v6wbi
  115. # endif
  116. #else
  117. # define v6wbi_possible_flags 0
  118. # define v6wbi_always_flags (-1UL)
  119. #endif
  120. #ifdef CONFIG_CPU_TLB_V7
  121. # define v7wbi_possible_flags v6wbi_tlb_flags
  122. # define v7wbi_always_flags v6wbi_tlb_flags
  123. # ifdef _TLB
  124. # define MULTI_TLB 1
  125. # else
  126. # define _TLB v7wbi
  127. # endif
  128. #else
  129. # define v7wbi_possible_flags 0
  130. # define v7wbi_always_flags (-1UL)
  131. #endif
  132. #ifndef _TLB
  133. #error Unknown TLB model
  134. #endif
  135. #ifndef __ASSEMBLY__
  136. #include <linux/sched.h>
  137. struct cpu_tlb_fns {
  138. void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
  139. void (*flush_kern_range)(unsigned long, unsigned long);
  140. unsigned long tlb_flags;
  141. };
  142. /*
  143. * Select the calling method
  144. */
  145. #ifdef MULTI_TLB
  146. #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
  147. #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
  148. #else
  149. #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
  150. #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
  151. extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
  152. extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
  153. #endif
  154. extern struct cpu_tlb_fns cpu_tlb;
  155. #define __cpu_tlb_flags cpu_tlb.tlb_flags
  156. /*
  157. * TLB Management
  158. * ==============
  159. *
  160. * The arch/arm/mm/tlb-*.S files implement these methods.
  161. *
  162. * The TLB specific code is expected to perform whatever tests it
  163. * needs to determine if it should invalidate the TLB for each
  164. * call. Start addresses are inclusive and end addresses are
  165. * exclusive; it is safe to round these addresses down.
  166. *
  167. * flush_tlb_all()
  168. *
  169. * Invalidate the entire TLB.
  170. *
  171. * flush_tlb_mm(mm)
  172. *
  173. * Invalidate all TLB entries in a particular address
  174. * space.
  175. * - mm - mm_struct describing address space
  176. *
  177. * flush_tlb_range(mm,start,end)
  178. *
  179. * Invalidate a range of TLB entries in the specified
  180. * address space.
  181. * - mm - mm_struct describing address space
  182. * - start - start address (may not be aligned)
  183. * - end - end address (exclusive, may not be aligned)
  184. *
  185. * flush_tlb_page(vaddr,vma)
  186. *
  187. * Invalidate the specified page in the specified address range.
  188. * - vaddr - virtual address (may not be aligned)
  189. * - vma - vma_struct describing address range
  190. *
  191. * flush_kern_tlb_page(kaddr)
  192. *
  193. * Invalidate the TLB entry for the specified page. The address
  194. * will be in the kernels virtual memory space. Current uses
  195. * only require the D-TLB to be invalidated.
  196. * - kaddr - Kernel virtual memory address
  197. */
  198. /*
  199. * We optimise the code below by:
  200. * - building a set of TLB flags that might be set in __cpu_tlb_flags
  201. * - building a set of TLB flags that will always be set in __cpu_tlb_flags
  202. * - if we're going to need __cpu_tlb_flags, access it once and only once
  203. *
  204. * This allows us to build optimal assembly for the single-CPU type case,
  205. * and as close to optimal given the compiler constrants for multi-CPU
  206. * case. We could do better for the multi-CPU case if the compiler
  207. * implemented the "%?" method, but this has been discontinued due to too
  208. * many people getting it wrong.
  209. */
  210. #define possible_tlb_flags (v3_possible_flags | \
  211. v4_possible_flags | \
  212. v4wbi_possible_flags | \
  213. v4wb_possible_flags | \
  214. v6wbi_possible_flags)
  215. #define always_tlb_flags (v3_always_flags & \
  216. v4_always_flags & \
  217. v4wbi_always_flags & \
  218. v4wb_always_flags & \
  219. v6wbi_always_flags)
  220. #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
  221. static inline void local_flush_tlb_all(void)
  222. {
  223. const int zero = 0;
  224. const unsigned int __tlb_flag = __cpu_tlb_flags;
  225. if (tlb_flag(TLB_WB))
  226. dsb();
  227. if (tlb_flag(TLB_V3_FULL))
  228. asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
  229. if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
  230. asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
  231. if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
  232. asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
  233. if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
  234. asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
  235. if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
  236. TLB_V6_I_PAGE | TLB_V6_D_PAGE |
  237. TLB_V6_I_ASID | TLB_V6_D_ASID)) {
  238. /* flush the branch target cache */
  239. asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
  240. dsb();
  241. isb();
  242. }
  243. }
  244. static inline void local_flush_tlb_mm(struct mm_struct *mm)
  245. {
  246. const int zero = 0;
  247. const int asid = ASID(mm);
  248. const unsigned int __tlb_flag = __cpu_tlb_flags;
  249. if (tlb_flag(TLB_WB))
  250. dsb();
  251. if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
  252. if (tlb_flag(TLB_V3_FULL))
  253. asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
  254. if (tlb_flag(TLB_V4_U_FULL))
  255. asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
  256. if (tlb_flag(TLB_V4_D_FULL))
  257. asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
  258. if (tlb_flag(TLB_V4_I_FULL))
  259. asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
  260. }
  261. if (tlb_flag(TLB_V6_U_ASID))
  262. asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
  263. if (tlb_flag(TLB_V6_D_ASID))
  264. asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
  265. if (tlb_flag(TLB_V6_I_ASID))
  266. asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
  267. if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
  268. TLB_V6_I_PAGE | TLB_V6_D_PAGE |
  269. TLB_V6_I_ASID | TLB_V6_D_ASID)) {
  270. /* flush the branch target cache */
  271. asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
  272. dsb();
  273. }
  274. }
  275. static inline void
  276. local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  277. {
  278. const int zero = 0;
  279. const unsigned int __tlb_flag = __cpu_tlb_flags;
  280. uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
  281. if (tlb_flag(TLB_WB))
  282. dsb();
  283. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
  284. if (tlb_flag(TLB_V3_PAGE))
  285. asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
  286. if (tlb_flag(TLB_V4_U_PAGE))
  287. asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
  288. if (tlb_flag(TLB_V4_D_PAGE))
  289. asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
  290. if (tlb_flag(TLB_V4_I_PAGE))
  291. asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
  292. if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
  293. asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
  294. }
  295. if (tlb_flag(TLB_V6_U_PAGE))
  296. asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
  297. if (tlb_flag(TLB_V6_D_PAGE))
  298. asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
  299. if (tlb_flag(TLB_V6_I_PAGE))
  300. asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
  301. if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
  302. TLB_V6_I_PAGE | TLB_V6_D_PAGE |
  303. TLB_V6_I_ASID | TLB_V6_D_ASID)) {
  304. /* flush the branch target cache */
  305. asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
  306. dsb();
  307. }
  308. }
  309. static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
  310. {
  311. const int zero = 0;
  312. const unsigned int __tlb_flag = __cpu_tlb_flags;
  313. kaddr &= PAGE_MASK;
  314. if (tlb_flag(TLB_WB))
  315. dsb();
  316. if (tlb_flag(TLB_V3_PAGE))
  317. asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
  318. if (tlb_flag(TLB_V4_U_PAGE))
  319. asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
  320. if (tlb_flag(TLB_V4_D_PAGE))
  321. asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
  322. if (tlb_flag(TLB_V4_I_PAGE))
  323. asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
  324. if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
  325. asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
  326. if (tlb_flag(TLB_V6_U_PAGE))
  327. asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
  328. if (tlb_flag(TLB_V6_D_PAGE))
  329. asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
  330. if (tlb_flag(TLB_V6_I_PAGE))
  331. asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
  332. if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
  333. TLB_V6_I_PAGE | TLB_V6_D_PAGE |
  334. TLB_V6_I_ASID | TLB_V6_D_ASID)) {
  335. /* flush the branch target cache */
  336. asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
  337. dsb();
  338. isb();
  339. }
  340. }
  341. /*
  342. * flush_pmd_entry
  343. *
  344. * Flush a PMD entry (word aligned, or double-word aligned) to
  345. * RAM if the TLB for the CPU we are running on requires this.
  346. * This is typically used when we are creating PMD entries.
  347. *
  348. * clean_pmd_entry
  349. *
  350. * Clean (but don't drain the write buffer) if the CPU requires
  351. * these operations. This is typically used when we are removing
  352. * PMD entries.
  353. */
  354. static inline void flush_pmd_entry(pmd_t *pmd)
  355. {
  356. const unsigned int __tlb_flag = __cpu_tlb_flags;
  357. if (tlb_flag(TLB_DCLEAN))
  358. asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
  359. : : "r" (pmd) : "cc");
  360. if (tlb_flag(TLB_WB))
  361. dsb();
  362. }
  363. static inline void clean_pmd_entry(pmd_t *pmd)
  364. {
  365. const unsigned int __tlb_flag = __cpu_tlb_flags;
  366. if (tlb_flag(TLB_DCLEAN))
  367. asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
  368. : : "r" (pmd) : "cc");
  369. }
  370. #undef tlb_flag
  371. #undef always_tlb_flags
  372. #undef possible_tlb_flags
  373. /*
  374. * Convert calls to our calling convention.
  375. */
  376. #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
  377. #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
  378. #ifndef CONFIG_SMP
  379. #define flush_tlb_all local_flush_tlb_all
  380. #define flush_tlb_mm local_flush_tlb_mm
  381. #define flush_tlb_page local_flush_tlb_page
  382. #define flush_tlb_kernel_page local_flush_tlb_kernel_page
  383. #define flush_tlb_range local_flush_tlb_range
  384. #define flush_tlb_kernel_range local_flush_tlb_kernel_range
  385. #else
  386. extern void flush_tlb_all(void);
  387. extern void flush_tlb_mm(struct mm_struct *mm);
  388. extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
  389. extern void flush_tlb_kernel_page(unsigned long kaddr);
  390. extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  391. extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  392. #endif
  393. /*
  394. * if PG_dcache_dirty is set for the page, we need to ensure that any
  395. * cache entries for the kernels virtual memory range are written
  396. * back to the page.
  397. */
  398. extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
  399. #endif
  400. #endif /* CONFIG_MMU */
  401. #endif