mmu.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. /*
  2. * linux/arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/init.h>
  14. #include <linux/mman.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/memblock.h>
  17. #include <linux/fs.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/sizes.h>
  20. #include <asm/cp15.h>
  21. #include <asm/cputype.h>
  22. #include <asm/sections.h>
  23. #include <asm/cachetype.h>
  24. #include <asm/setup.h>
  25. #include <asm/smp_plat.h>
  26. #include <asm/tlb.h>
  27. #include <asm/highmem.h>
  28. #include <asm/system_info.h>
  29. #include <asm/traps.h>
  30. #include <asm/mach/arch.h>
  31. #include <asm/mach/map.h>
  32. #include "mm.h"
  33. /*
  34. * empty_zero_page is a special page that is used for
  35. * zero-initialized data and COW.
  36. */
  37. struct page *empty_zero_page;
  38. EXPORT_SYMBOL(empty_zero_page);
  39. /*
  40. * The pmd table for the upper-most set of pages.
  41. */
  42. pmd_t *top_pmd;
  43. #define CPOLICY_UNCACHED 0
  44. #define CPOLICY_BUFFERED 1
  45. #define CPOLICY_WRITETHROUGH 2
  46. #define CPOLICY_WRITEBACK 3
  47. #define CPOLICY_WRITEALLOC 4
  48. static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  49. static unsigned int ecc_mask __initdata = 0;
  50. pgprot_t pgprot_user;
  51. pgprot_t pgprot_kernel;
  52. EXPORT_SYMBOL(pgprot_user);
  53. EXPORT_SYMBOL(pgprot_kernel);
  54. struct cachepolicy {
  55. const char policy[16];
  56. unsigned int cr_mask;
  57. pmdval_t pmd;
  58. pteval_t pte;
  59. };
  60. static struct cachepolicy cache_policies[] __initdata = {
  61. {
  62. .policy = "uncached",
  63. .cr_mask = CR_W|CR_C,
  64. .pmd = PMD_SECT_UNCACHED,
  65. .pte = L_PTE_MT_UNCACHED,
  66. }, {
  67. .policy = "buffered",
  68. .cr_mask = CR_C,
  69. .pmd = PMD_SECT_BUFFERED,
  70. .pte = L_PTE_MT_BUFFERABLE,
  71. }, {
  72. .policy = "writethrough",
  73. .cr_mask = 0,
  74. .pmd = PMD_SECT_WT,
  75. .pte = L_PTE_MT_WRITETHROUGH,
  76. }, {
  77. .policy = "writeback",
  78. .cr_mask = 0,
  79. .pmd = PMD_SECT_WB,
  80. .pte = L_PTE_MT_WRITEBACK,
  81. }, {
  82. .policy = "writealloc",
  83. .cr_mask = 0,
  84. .pmd = PMD_SECT_WBWA,
  85. .pte = L_PTE_MT_WRITEALLOC,
  86. }
  87. };
  88. /*
  89. * These are useful for identifying cache coherency
  90. * problems by allowing the cache or the cache and
  91. * writebuffer to be turned off. (Note: the write
  92. * buffer should not be on and the cache off).
  93. */
  94. static int __init early_cachepolicy(char *p)
  95. {
  96. int i;
  97. for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
  98. int len = strlen(cache_policies[i].policy);
  99. if (memcmp(p, cache_policies[i].policy, len) == 0) {
  100. cachepolicy = i;
  101. cr_alignment &= ~cache_policies[i].cr_mask;
  102. cr_no_alignment &= ~cache_policies[i].cr_mask;
  103. break;
  104. }
  105. }
  106. if (i == ARRAY_SIZE(cache_policies))
  107. printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
  108. /*
  109. * This restriction is partly to do with the way we boot; it is
  110. * unpredictable to have memory mapped using two different sets of
  111. * memory attributes (shared, type, and cache attribs). We can not
  112. * change these attributes once the initial assembly has setup the
  113. * page tables.
  114. */
  115. if (cpu_architecture() >= CPU_ARCH_ARMv6) {
  116. printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
  117. cachepolicy = CPOLICY_WRITEBACK;
  118. }
  119. flush_cache_all();
  120. set_cr(cr_alignment);
  121. return 0;
  122. }
  123. early_param("cachepolicy", early_cachepolicy);
  124. static int __init early_nocache(char *__unused)
  125. {
  126. char *p = "buffered";
  127. printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
  128. early_cachepolicy(p);
  129. return 0;
  130. }
  131. early_param("nocache", early_nocache);
  132. static int __init early_nowrite(char *__unused)
  133. {
  134. char *p = "uncached";
  135. printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
  136. early_cachepolicy(p);
  137. return 0;
  138. }
  139. early_param("nowb", early_nowrite);
  140. #ifndef CONFIG_ARM_LPAE
  141. static int __init early_ecc(char *p)
  142. {
  143. if (memcmp(p, "on", 2) == 0)
  144. ecc_mask = PMD_PROTECTION;
  145. else if (memcmp(p, "off", 3) == 0)
  146. ecc_mask = 0;
  147. return 0;
  148. }
  149. early_param("ecc", early_ecc);
  150. #endif
  151. static int __init noalign_setup(char *__unused)
  152. {
  153. cr_alignment &= ~CR_A;
  154. cr_no_alignment &= ~CR_A;
  155. set_cr(cr_alignment);
  156. return 1;
  157. }
  158. __setup("noalign", noalign_setup);
  159. #ifndef CONFIG_SMP
  160. void adjust_cr(unsigned long mask, unsigned long set)
  161. {
  162. unsigned long flags;
  163. mask &= ~CR_A;
  164. set &= mask;
  165. local_irq_save(flags);
  166. cr_no_alignment = (cr_no_alignment & ~mask) | set;
  167. cr_alignment = (cr_alignment & ~mask) | set;
  168. set_cr((get_cr() & ~mask) | set);
  169. local_irq_restore(flags);
  170. }
  171. #endif
  172. #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
  173. #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
  174. static struct mem_type mem_types[] = {
  175. [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
  176. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
  177. L_PTE_SHARED,
  178. .prot_l1 = PMD_TYPE_TABLE,
  179. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
  180. .domain = DOMAIN_IO,
  181. },
  182. [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
  183. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
  184. .prot_l1 = PMD_TYPE_TABLE,
  185. .prot_sect = PROT_SECT_DEVICE,
  186. .domain = DOMAIN_IO,
  187. },
  188. [MT_DEVICE_CACHED] = { /* ioremap_cached */
  189. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
  190. .prot_l1 = PMD_TYPE_TABLE,
  191. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
  192. .domain = DOMAIN_IO,
  193. },
  194. [MT_DEVICE_WC] = { /* ioremap_wc */
  195. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
  196. .prot_l1 = PMD_TYPE_TABLE,
  197. .prot_sect = PROT_SECT_DEVICE,
  198. .domain = DOMAIN_IO,
  199. },
  200. [MT_UNCACHED] = {
  201. .prot_pte = PROT_PTE_DEVICE,
  202. .prot_l1 = PMD_TYPE_TABLE,
  203. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  204. .domain = DOMAIN_IO,
  205. },
  206. [MT_CACHECLEAN] = {
  207. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  208. .domain = DOMAIN_KERNEL,
  209. },
  210. #ifndef CONFIG_ARM_LPAE
  211. [MT_MINICLEAN] = {
  212. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
  213. .domain = DOMAIN_KERNEL,
  214. },
  215. #endif
  216. [MT_LOW_VECTORS] = {
  217. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  218. L_PTE_RDONLY,
  219. .prot_l1 = PMD_TYPE_TABLE,
  220. .domain = DOMAIN_USER,
  221. },
  222. [MT_HIGH_VECTORS] = {
  223. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  224. L_PTE_USER | L_PTE_RDONLY,
  225. .prot_l1 = PMD_TYPE_TABLE,
  226. .domain = DOMAIN_USER,
  227. },
  228. [MT_MEMORY] = {
  229. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  230. .prot_l1 = PMD_TYPE_TABLE,
  231. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  232. .domain = DOMAIN_KERNEL,
  233. },
  234. [MT_ROM] = {
  235. .prot_sect = PMD_TYPE_SECT,
  236. .domain = DOMAIN_KERNEL,
  237. },
  238. [MT_MEMORY_NONCACHED] = {
  239. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  240. L_PTE_MT_BUFFERABLE,
  241. .prot_l1 = PMD_TYPE_TABLE,
  242. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  243. .domain = DOMAIN_KERNEL,
  244. },
  245. [MT_MEMORY_DTCM] = {
  246. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  247. L_PTE_XN,
  248. .prot_l1 = PMD_TYPE_TABLE,
  249. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  250. .domain = DOMAIN_KERNEL,
  251. },
  252. [MT_MEMORY_ITCM] = {
  253. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  254. .prot_l1 = PMD_TYPE_TABLE,
  255. .domain = DOMAIN_KERNEL,
  256. },
  257. [MT_MEMORY_SO] = {
  258. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  259. L_PTE_MT_UNCACHED,
  260. .prot_l1 = PMD_TYPE_TABLE,
  261. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
  262. PMD_SECT_UNCACHED | PMD_SECT_XN,
  263. .domain = DOMAIN_KERNEL,
  264. },
  265. [MT_MEMORY_DMA_READY] = {
  266. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  267. .prot_l1 = PMD_TYPE_TABLE,
  268. .domain = DOMAIN_KERNEL,
  269. },
  270. };
  271. const struct mem_type *get_mem_type(unsigned int type)
  272. {
  273. return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
  274. }
  275. EXPORT_SYMBOL(get_mem_type);
  276. /*
  277. * Adjust the PMD section entries according to the CPU in use.
  278. */
  279. static void __init build_mem_type_table(void)
  280. {
  281. struct cachepolicy *cp;
  282. unsigned int cr = get_cr();
  283. pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
  284. int cpu_arch = cpu_architecture();
  285. int i;
  286. if (cpu_arch < CPU_ARCH_ARMv6) {
  287. #if defined(CONFIG_CPU_DCACHE_DISABLE)
  288. if (cachepolicy > CPOLICY_BUFFERED)
  289. cachepolicy = CPOLICY_BUFFERED;
  290. #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
  291. if (cachepolicy > CPOLICY_WRITETHROUGH)
  292. cachepolicy = CPOLICY_WRITETHROUGH;
  293. #endif
  294. }
  295. if (cpu_arch < CPU_ARCH_ARMv5) {
  296. if (cachepolicy >= CPOLICY_WRITEALLOC)
  297. cachepolicy = CPOLICY_WRITEBACK;
  298. ecc_mask = 0;
  299. }
  300. if (is_smp())
  301. cachepolicy = CPOLICY_WRITEALLOC;
  302. /*
  303. * Strip out features not present on earlier architectures.
  304. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
  305. * without extended page tables don't have the 'Shared' bit.
  306. */
  307. if (cpu_arch < CPU_ARCH_ARMv5)
  308. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  309. mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
  310. if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
  311. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  312. mem_types[i].prot_sect &= ~PMD_SECT_S;
  313. /*
  314. * ARMv5 and lower, bit 4 must be set for page tables (was: cache
  315. * "update-able on write" bit on ARM610). However, Xscale and
  316. * Xscale3 require this bit to be cleared.
  317. */
  318. if (cpu_is_xscale() || cpu_is_xsc3()) {
  319. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  320. mem_types[i].prot_sect &= ~PMD_BIT4;
  321. mem_types[i].prot_l1 &= ~PMD_BIT4;
  322. }
  323. } else if (cpu_arch < CPU_ARCH_ARMv6) {
  324. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  325. if (mem_types[i].prot_l1)
  326. mem_types[i].prot_l1 |= PMD_BIT4;
  327. if (mem_types[i].prot_sect)
  328. mem_types[i].prot_sect |= PMD_BIT4;
  329. }
  330. }
  331. /*
  332. * Mark the device areas according to the CPU/architecture.
  333. */
  334. if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
  335. if (!cpu_is_xsc3()) {
  336. /*
  337. * Mark device regions on ARMv6+ as execute-never
  338. * to prevent speculative instruction fetches.
  339. */
  340. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
  341. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
  342. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
  343. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
  344. }
  345. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  346. /*
  347. * For ARMv7 with TEX remapping,
  348. * - shared device is SXCB=1100
  349. * - nonshared device is SXCB=0100
  350. * - write combine device mem is SXCB=0001
  351. * (Uncached Normal memory)
  352. */
  353. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
  354. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
  355. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  356. } else if (cpu_is_xsc3()) {
  357. /*
  358. * For Xscale3,
  359. * - shared device is TEXCB=00101
  360. * - nonshared device is TEXCB=01000
  361. * - write combine device mem is TEXCB=00100
  362. * (Inner/Outer Uncacheable in xsc3 parlance)
  363. */
  364. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
  365. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  366. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  367. } else {
  368. /*
  369. * For ARMv6 and ARMv7 without TEX remapping,
  370. * - shared device is TEXCB=00001
  371. * - nonshared device is TEXCB=01000
  372. * - write combine device mem is TEXCB=00100
  373. * (Uncached Normal in ARMv6 parlance).
  374. */
  375. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
  376. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  377. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  378. }
  379. } else {
  380. /*
  381. * On others, write combining is "Uncached/Buffered"
  382. */
  383. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  384. }
  385. /*
  386. * Now deal with the memory-type mappings
  387. */
  388. cp = &cache_policies[cachepolicy];
  389. vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
  390. /*
  391. * ARMv6 and above have extended page tables.
  392. */
  393. if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
  394. #ifndef CONFIG_ARM_LPAE
  395. /*
  396. * Mark cache clean areas and XIP ROM read only
  397. * from SVC mode and no access from userspace.
  398. */
  399. mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  400. mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  401. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  402. #endif
  403. if (is_smp()) {
  404. /*
  405. * Mark memory with the "shared" attribute
  406. * for SMP systems
  407. */
  408. user_pgprot |= L_PTE_SHARED;
  409. kern_pgprot |= L_PTE_SHARED;
  410. vecs_pgprot |= L_PTE_SHARED;
  411. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
  412. mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
  413. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
  414. mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  415. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  416. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  417. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  418. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  419. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  420. }
  421. }
  422. /*
  423. * Non-cacheable Normal - intended for memory areas that must
  424. * not cause dirty cache line writebacks when used
  425. */
  426. if (cpu_arch >= CPU_ARCH_ARMv6) {
  427. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  428. /* Non-cacheable Normal is XCB = 001 */
  429. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  430. PMD_SECT_BUFFERED;
  431. } else {
  432. /* For both ARMv6 and non-TEX-remapping ARMv7 */
  433. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  434. PMD_SECT_TEX(1);
  435. }
  436. } else {
  437. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
  438. }
  439. #ifdef CONFIG_ARM_LPAE
  440. /*
  441. * Do not generate access flag faults for the kernel mappings.
  442. */
  443. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  444. mem_types[i].prot_pte |= PTE_EXT_AF;
  445. if (mem_types[i].prot_sect)
  446. mem_types[i].prot_sect |= PMD_SECT_AF;
  447. }
  448. kern_pgprot |= PTE_EXT_AF;
  449. vecs_pgprot |= PTE_EXT_AF;
  450. #endif
  451. for (i = 0; i < 16; i++) {
  452. unsigned long v = pgprot_val(protection_map[i]);
  453. protection_map[i] = __pgprot(v | user_pgprot);
  454. }
  455. mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
  456. mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
  457. pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
  458. pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
  459. L_PTE_DIRTY | kern_pgprot);
  460. mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
  461. mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
  462. mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
  463. mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
  464. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  465. mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
  466. mem_types[MT_ROM].prot_sect |= cp->pmd;
  467. switch (cp->pmd) {
  468. case PMD_SECT_WT:
  469. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
  470. break;
  471. case PMD_SECT_WB:
  472. case PMD_SECT_WBWA:
  473. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
  474. break;
  475. }
  476. printk("Memory policy: ECC %sabled, Data cache %s\n",
  477. ecc_mask ? "en" : "dis", cp->policy);
  478. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  479. struct mem_type *t = &mem_types[i];
  480. if (t->prot_l1)
  481. t->prot_l1 |= PMD_DOMAIN(t->domain);
  482. if (t->prot_sect)
  483. t->prot_sect |= PMD_DOMAIN(t->domain);
  484. }
  485. }
  486. #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
  487. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  488. unsigned long size, pgprot_t vma_prot)
  489. {
  490. if (!pfn_valid(pfn))
  491. return pgprot_noncached(vma_prot);
  492. else if (file->f_flags & O_SYNC)
  493. return pgprot_writecombine(vma_prot);
  494. return vma_prot;
  495. }
  496. EXPORT_SYMBOL(phys_mem_access_prot);
  497. #endif
  498. #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
  499. static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
  500. {
  501. void *ptr = __va(memblock_alloc(sz, align));
  502. memset(ptr, 0, sz);
  503. return ptr;
  504. }
  505. static void __init *early_alloc(unsigned long sz)
  506. {
  507. return early_alloc_aligned(sz, sz);
  508. }
  509. static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
  510. {
  511. if (pmd_none(*pmd)) {
  512. pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
  513. __pmd_populate(pmd, __pa(pte), prot);
  514. }
  515. BUG_ON(pmd_bad(*pmd));
  516. return pte_offset_kernel(pmd, addr);
  517. }
  518. static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
  519. unsigned long end, unsigned long pfn,
  520. const struct mem_type *type)
  521. {
  522. pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
  523. do {
  524. set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
  525. pfn++;
  526. } while (pte++, addr += PAGE_SIZE, addr != end);
  527. }
  528. static void __init alloc_init_section(pud_t *pud, unsigned long addr,
  529. unsigned long end, phys_addr_t phys,
  530. const struct mem_type *type)
  531. {
  532. pmd_t *pmd = pmd_offset(pud, addr);
  533. /*
  534. * Try a section mapping - end, addr and phys must all be aligned
  535. * to a section boundary. Note that PMDs refer to the individual
  536. * L1 entries, whereas PGDs refer to a group of L1 entries making
  537. * up one logical pointer to an L2 table.
  538. */
  539. if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
  540. pmd_t *p = pmd;
  541. #ifndef CONFIG_ARM_LPAE
  542. if (addr & SECTION_SIZE)
  543. pmd++;
  544. #endif
  545. do {
  546. *pmd = __pmd(phys | type->prot_sect);
  547. phys += SECTION_SIZE;
  548. } while (pmd++, addr += SECTION_SIZE, addr != end);
  549. flush_pmd_entry(p);
  550. } else {
  551. /*
  552. * No need to loop; pte's aren't interested in the
  553. * individual L1 entries.
  554. */
  555. alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
  556. }
  557. }
  558. static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
  559. unsigned long end, unsigned long phys, const struct mem_type *type)
  560. {
  561. pud_t *pud = pud_offset(pgd, addr);
  562. unsigned long next;
  563. do {
  564. next = pud_addr_end(addr, end);
  565. alloc_init_section(pud, addr, next, phys, type);
  566. phys += next - addr;
  567. } while (pud++, addr = next, addr != end);
  568. }
  569. #ifndef CONFIG_ARM_LPAE
  570. static void __init create_36bit_mapping(struct map_desc *md,
  571. const struct mem_type *type)
  572. {
  573. unsigned long addr, length, end;
  574. phys_addr_t phys;
  575. pgd_t *pgd;
  576. addr = md->virtual;
  577. phys = __pfn_to_phys(md->pfn);
  578. length = PAGE_ALIGN(md->length);
  579. if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
  580. printk(KERN_ERR "MM: CPU does not support supersection "
  581. "mapping for 0x%08llx at 0x%08lx\n",
  582. (long long)__pfn_to_phys((u64)md->pfn), addr);
  583. return;
  584. }
  585. /* N.B. ARMv6 supersections are only defined to work with domain 0.
  586. * Since domain assignments can in fact be arbitrary, the
  587. * 'domain == 0' check below is required to insure that ARMv6
  588. * supersections are only allocated for domain 0 regardless
  589. * of the actual domain assignments in use.
  590. */
  591. if (type->domain) {
  592. printk(KERN_ERR "MM: invalid domain in supersection "
  593. "mapping for 0x%08llx at 0x%08lx\n",
  594. (long long)__pfn_to_phys((u64)md->pfn), addr);
  595. return;
  596. }
  597. if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
  598. printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
  599. " at 0x%08lx invalid alignment\n",
  600. (long long)__pfn_to_phys((u64)md->pfn), addr);
  601. return;
  602. }
  603. /*
  604. * Shift bits [35:32] of address into bits [23:20] of PMD
  605. * (See ARMv6 spec).
  606. */
  607. phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
  608. pgd = pgd_offset_k(addr);
  609. end = addr + length;
  610. do {
  611. pud_t *pud = pud_offset(pgd, addr);
  612. pmd_t *pmd = pmd_offset(pud, addr);
  613. int i;
  614. for (i = 0; i < 16; i++)
  615. *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
  616. addr += SUPERSECTION_SIZE;
  617. phys += SUPERSECTION_SIZE;
  618. pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
  619. } while (addr != end);
  620. }
  621. #endif /* !CONFIG_ARM_LPAE */
  622. /*
  623. * Create the page directory entries and any necessary
  624. * page tables for the mapping specified by `md'. We
  625. * are able to cope here with varying sizes and address
  626. * offsets, and we take full advantage of sections and
  627. * supersections.
  628. */
  629. static void __init create_mapping(struct map_desc *md)
  630. {
  631. unsigned long addr, length, end;
  632. phys_addr_t phys;
  633. const struct mem_type *type;
  634. pgd_t *pgd;
  635. if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
  636. printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
  637. " at 0x%08lx in user region\n",
  638. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  639. return;
  640. }
  641. if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
  642. md->virtual >= PAGE_OFFSET &&
  643. (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
  644. printk(KERN_WARNING "BUG: mapping for 0x%08llx"
  645. " at 0x%08lx out of vmalloc space\n",
  646. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  647. }
  648. type = &mem_types[md->type];
  649. #ifndef CONFIG_ARM_LPAE
  650. /*
  651. * Catch 36-bit addresses
  652. */
  653. if (md->pfn >= 0x100000) {
  654. create_36bit_mapping(md, type);
  655. return;
  656. }
  657. #endif
  658. addr = md->virtual & PAGE_MASK;
  659. phys = __pfn_to_phys(md->pfn);
  660. length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  661. if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
  662. printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
  663. "be mapped using pages, ignoring.\n",
  664. (long long)__pfn_to_phys(md->pfn), addr);
  665. return;
  666. }
  667. pgd = pgd_offset_k(addr);
  668. end = addr + length;
  669. do {
  670. unsigned long next = pgd_addr_end(addr, end);
  671. alloc_init_pud(pgd, addr, next, phys, type);
  672. phys += next - addr;
  673. addr = next;
  674. } while (pgd++, addr != end);
  675. }
  676. /*
  677. * Create the architecture specific mappings
  678. */
  679. void __init iotable_init(struct map_desc *io_desc, int nr)
  680. {
  681. struct map_desc *md;
  682. struct vm_struct *vm;
  683. if (!nr)
  684. return;
  685. vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
  686. for (md = io_desc; nr; md++, nr--) {
  687. create_mapping(md);
  688. vm->addr = (void *)(md->virtual & PAGE_MASK);
  689. vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  690. vm->phys_addr = __pfn_to_phys(md->pfn);
  691. vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
  692. vm->flags |= VM_ARM_MTYPE(md->type);
  693. vm->caller = iotable_init;
  694. vm_area_add_early(vm++);
  695. }
  696. }
  697. #ifndef CONFIG_ARM_LPAE
  698. /*
  699. * The Linux PMD is made of two consecutive section entries covering 2MB
  700. * (see definition in include/asm/pgtable-2level.h). However a call to
  701. * create_mapping() may optimize static mappings by using individual
  702. * 1MB section mappings. This leaves the actual PMD potentially half
  703. * initialized if the top or bottom section entry isn't used, leaving it
  704. * open to problems if a subsequent ioremap() or vmalloc() tries to use
  705. * the virtual space left free by that unused section entry.
  706. *
  707. * Let's avoid the issue by inserting dummy vm entries covering the unused
  708. * PMD halves once the static mappings are in place.
  709. */
  710. static void __init pmd_empty_section_gap(unsigned long addr)
  711. {
  712. struct vm_struct *vm;
  713. vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
  714. vm->addr = (void *)addr;
  715. vm->size = SECTION_SIZE;
  716. vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
  717. vm->caller = pmd_empty_section_gap;
  718. vm_area_add_early(vm);
  719. }
  720. static void __init fill_pmd_gaps(void)
  721. {
  722. struct vm_struct *vm;
  723. unsigned long addr, next = 0;
  724. pmd_t *pmd;
  725. /* we're still single threaded hence no lock needed here */
  726. for (vm = vmlist; vm; vm = vm->next) {
  727. if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
  728. continue;
  729. addr = (unsigned long)vm->addr;
  730. if (addr < next)
  731. continue;
  732. /*
  733. * Check if this vm starts on an odd section boundary.
  734. * If so and the first section entry for this PMD is free
  735. * then we block the corresponding virtual address.
  736. */
  737. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  738. pmd = pmd_off_k(addr);
  739. if (pmd_none(*pmd))
  740. pmd_empty_section_gap(addr & PMD_MASK);
  741. }
  742. /*
  743. * Then check if this vm ends on an odd section boundary.
  744. * If so and the second section entry for this PMD is empty
  745. * then we block the corresponding virtual address.
  746. */
  747. addr += vm->size;
  748. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  749. pmd = pmd_off_k(addr) + 1;
  750. if (pmd_none(*pmd))
  751. pmd_empty_section_gap(addr);
  752. }
  753. /* no need to look at any vm entry until we hit the next PMD */
  754. next = (addr + PMD_SIZE - 1) & PMD_MASK;
  755. }
  756. }
  757. #else
  758. #define fill_pmd_gaps() do { } while (0)
  759. #endif
  760. static void * __initdata vmalloc_min =
  761. (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
  762. /*
  763. * vmalloc=size forces the vmalloc area to be exactly 'size'
  764. * bytes. This can be used to increase (or decrease) the vmalloc
  765. * area - the default is 240m.
  766. */
  767. static int __init early_vmalloc(char *arg)
  768. {
  769. unsigned long vmalloc_reserve = memparse(arg, NULL);
  770. if (vmalloc_reserve < SZ_16M) {
  771. vmalloc_reserve = SZ_16M;
  772. printk(KERN_WARNING
  773. "vmalloc area too small, limiting to %luMB\n",
  774. vmalloc_reserve >> 20);
  775. }
  776. if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
  777. vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
  778. printk(KERN_WARNING
  779. "vmalloc area is too big, limiting to %luMB\n",
  780. vmalloc_reserve >> 20);
  781. }
  782. vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
  783. return 0;
  784. }
  785. early_param("vmalloc", early_vmalloc);
  786. phys_addr_t arm_lowmem_limit __initdata = 0;
  787. void __init sanity_check_meminfo(void)
  788. {
  789. int i, j, highmem = 0;
  790. for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
  791. struct membank *bank = &meminfo.bank[j];
  792. *bank = meminfo.bank[i];
  793. if (bank->start > ULONG_MAX)
  794. highmem = 1;
  795. #ifdef CONFIG_HIGHMEM
  796. if (__va(bank->start) >= vmalloc_min ||
  797. __va(bank->start) < (void *)PAGE_OFFSET)
  798. highmem = 1;
  799. bank->highmem = highmem;
  800. /*
  801. * Split those memory banks which are partially overlapping
  802. * the vmalloc area greatly simplifying things later.
  803. */
  804. if (!highmem && __va(bank->start) < vmalloc_min &&
  805. bank->size > vmalloc_min - __va(bank->start)) {
  806. if (meminfo.nr_banks >= NR_BANKS) {
  807. printk(KERN_CRIT "NR_BANKS too low, "
  808. "ignoring high memory\n");
  809. } else {
  810. memmove(bank + 1, bank,
  811. (meminfo.nr_banks - i) * sizeof(*bank));
  812. meminfo.nr_banks++;
  813. i++;
  814. bank[1].size -= vmalloc_min - __va(bank->start);
  815. bank[1].start = __pa(vmalloc_min - 1) + 1;
  816. bank[1].highmem = highmem = 1;
  817. j++;
  818. }
  819. bank->size = vmalloc_min - __va(bank->start);
  820. }
  821. #else
  822. bank->highmem = highmem;
  823. /*
  824. * Highmem banks not allowed with !CONFIG_HIGHMEM.
  825. */
  826. if (highmem) {
  827. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  828. "(!CONFIG_HIGHMEM).\n",
  829. (unsigned long long)bank->start,
  830. (unsigned long long)bank->start + bank->size - 1);
  831. continue;
  832. }
  833. /*
  834. * Check whether this memory bank would entirely overlap
  835. * the vmalloc area.
  836. */
  837. if (__va(bank->start) >= vmalloc_min ||
  838. __va(bank->start) < (void *)PAGE_OFFSET) {
  839. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  840. "(vmalloc region overlap).\n",
  841. (unsigned long long)bank->start,
  842. (unsigned long long)bank->start + bank->size - 1);
  843. continue;
  844. }
  845. /*
  846. * Check whether this memory bank would partially overlap
  847. * the vmalloc area.
  848. */
  849. if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
  850. __va(bank->start + bank->size - 1) <= __va(bank->start)) {
  851. unsigned long newsize = vmalloc_min - __va(bank->start);
  852. printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
  853. "to -%.8llx (vmalloc region overlap).\n",
  854. (unsigned long long)bank->start,
  855. (unsigned long long)bank->start + bank->size - 1,
  856. (unsigned long long)bank->start + newsize - 1);
  857. bank->size = newsize;
  858. }
  859. #endif
  860. if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
  861. arm_lowmem_limit = bank->start + bank->size;
  862. j++;
  863. }
  864. #ifdef CONFIG_HIGHMEM
  865. if (highmem) {
  866. const char *reason = NULL;
  867. if (cache_is_vipt_aliasing()) {
  868. /*
  869. * Interactions between kmap and other mappings
  870. * make highmem support with aliasing VIPT caches
  871. * rather difficult.
  872. */
  873. reason = "with VIPT aliasing cache";
  874. }
  875. if (reason) {
  876. printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
  877. reason);
  878. while (j > 0 && meminfo.bank[j - 1].highmem)
  879. j--;
  880. }
  881. }
  882. #endif
  883. meminfo.nr_banks = j;
  884. high_memory = __va(arm_lowmem_limit - 1) + 1;
  885. memblock_set_current_limit(arm_lowmem_limit);
  886. }
  887. static inline void prepare_page_table(void)
  888. {
  889. unsigned long addr;
  890. phys_addr_t end;
  891. /*
  892. * Clear out all the mappings below the kernel image.
  893. */
  894. for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
  895. pmd_clear(pmd_off_k(addr));
  896. #ifdef CONFIG_XIP_KERNEL
  897. /* The XIP kernel is mapped in the module area -- skip over it */
  898. addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
  899. #endif
  900. for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
  901. pmd_clear(pmd_off_k(addr));
  902. /*
  903. * Find the end of the first block of lowmem.
  904. */
  905. end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
  906. if (end >= arm_lowmem_limit)
  907. end = arm_lowmem_limit;
  908. /*
  909. * Clear out all the kernel space mappings, except for the first
  910. * memory bank, up to the vmalloc region.
  911. */
  912. for (addr = __phys_to_virt(end);
  913. addr < VMALLOC_START; addr += PMD_SIZE)
  914. pmd_clear(pmd_off_k(addr));
  915. }
  916. #ifdef CONFIG_ARM_LPAE
  917. /* the first page is reserved for pgd */
  918. #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
  919. PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
  920. #else
  921. #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
  922. #endif
  923. /*
  924. * Reserve the special regions of memory
  925. */
  926. void __init arm_mm_memblock_reserve(void)
  927. {
  928. /*
  929. * Reserve the page tables. These are already in use,
  930. * and can only be in node 0.
  931. */
  932. memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
  933. #ifdef CONFIG_SA1111
  934. /*
  935. * Because of the SA1111 DMA bug, we want to preserve our
  936. * precious DMA-able memory...
  937. */
  938. memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
  939. #endif
  940. }
  941. /*
  942. * Set up the device mappings. Since we clear out the page tables for all
  943. * mappings above VMALLOC_START, we will remove any debug device mappings.
  944. * This means you have to be careful how you debug this function, or any
  945. * called function. This means you can't use any function or debugging
  946. * method which may touch any device, otherwise the kernel _will_ crash.
  947. */
  948. static void __init devicemaps_init(struct machine_desc *mdesc)
  949. {
  950. struct map_desc map;
  951. unsigned long addr;
  952. void *vectors;
  953. /*
  954. * Allocate the vector page early.
  955. */
  956. vectors = early_alloc(PAGE_SIZE);
  957. early_trap_init(vectors);
  958. for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
  959. pmd_clear(pmd_off_k(addr));
  960. /*
  961. * Map the kernel if it is XIP.
  962. * It is always first in the modulearea.
  963. */
  964. #ifdef CONFIG_XIP_KERNEL
  965. map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
  966. map.virtual = MODULES_VADDR;
  967. map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
  968. map.type = MT_ROM;
  969. create_mapping(&map);
  970. #endif
  971. /*
  972. * Map the cache flushing regions.
  973. */
  974. #ifdef FLUSH_BASE
  975. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
  976. map.virtual = FLUSH_BASE;
  977. map.length = SZ_1M;
  978. map.type = MT_CACHECLEAN;
  979. create_mapping(&map);
  980. #endif
  981. #ifdef FLUSH_BASE_MINICACHE
  982. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
  983. map.virtual = FLUSH_BASE_MINICACHE;
  984. map.length = SZ_1M;
  985. map.type = MT_MINICLEAN;
  986. create_mapping(&map);
  987. #endif
  988. /*
  989. * Create a mapping for the machine vectors at the high-vectors
  990. * location (0xffff0000). If we aren't using high-vectors, also
  991. * create a mapping at the low-vectors virtual address.
  992. */
  993. map.pfn = __phys_to_pfn(virt_to_phys(vectors));
  994. map.virtual = 0xffff0000;
  995. map.length = PAGE_SIZE;
  996. map.type = MT_HIGH_VECTORS;
  997. create_mapping(&map);
  998. if (!vectors_high()) {
  999. map.virtual = 0;
  1000. map.type = MT_LOW_VECTORS;
  1001. create_mapping(&map);
  1002. }
  1003. /*
  1004. * Ask the machine support to map in the statically mapped devices.
  1005. */
  1006. if (mdesc->map_io)
  1007. mdesc->map_io();
  1008. fill_pmd_gaps();
  1009. /*
  1010. * Finally flush the caches and tlb to ensure that we're in a
  1011. * consistent state wrt the writebuffer. This also ensures that
  1012. * any write-allocated cache lines in the vector page are written
  1013. * back. After this point, we can start to touch devices again.
  1014. */
  1015. local_flush_tlb_all();
  1016. flush_cache_all();
  1017. }
  1018. static void __init kmap_init(void)
  1019. {
  1020. #ifdef CONFIG_HIGHMEM
  1021. pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
  1022. PKMAP_BASE, _PAGE_KERNEL_TABLE);
  1023. #endif
  1024. }
  1025. static void __init map_lowmem(void)
  1026. {
  1027. struct memblock_region *reg;
  1028. /* Map all the lowmem memory banks. */
  1029. for_each_memblock(memory, reg) {
  1030. phys_addr_t start = reg->base;
  1031. phys_addr_t end = start + reg->size;
  1032. struct map_desc map;
  1033. if (end > arm_lowmem_limit)
  1034. end = arm_lowmem_limit;
  1035. if (start >= end)
  1036. break;
  1037. map.pfn = __phys_to_pfn(start);
  1038. map.virtual = __phys_to_virt(start);
  1039. map.length = end - start;
  1040. map.type = MT_MEMORY;
  1041. create_mapping(&map);
  1042. }
  1043. }
  1044. /*
  1045. * paging_init() sets up the page tables, initialises the zone memory
  1046. * maps, and sets up the zero page, bad page and bad page tables.
  1047. */
  1048. void __init paging_init(struct machine_desc *mdesc)
  1049. {
  1050. void *zero_page;
  1051. memblock_set_current_limit(arm_lowmem_limit);
  1052. build_mem_type_table();
  1053. prepare_page_table();
  1054. map_lowmem();
  1055. dma_contiguous_remap();
  1056. devicemaps_init(mdesc);
  1057. kmap_init();
  1058. top_pmd = pmd_off_k(0xffff0000);
  1059. /* allocate the zero page. */
  1060. zero_page = early_alloc(PAGE_SIZE);
  1061. bootmem_init();
  1062. empty_zero_page = virt_to_page(zero_page);
  1063. __flush_dcache_page(NULL, empty_zero_page);
  1064. }