mmu.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. /*
  2. * linux/arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/init.h>
  14. #include <linux/mman.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/memblock.h>
  17. #include <linux/fs.h>
  18. #include <linux/vmalloc.h>
  19. #include <asm/cp15.h>
  20. #include <asm/cputype.h>
  21. #include <asm/sections.h>
  22. #include <asm/cachetype.h>
  23. #include <asm/setup.h>
  24. #include <asm/sizes.h>
  25. #include <asm/smp_plat.h>
  26. #include <asm/tlb.h>
  27. #include <asm/highmem.h>
  28. #include <asm/system_info.h>
  29. #include <asm/traps.h>
  30. #include <asm/mach/arch.h>
  31. #include <asm/mach/map.h>
  32. #include "mm.h"
  33. /*
  34. * empty_zero_page is a special page that is used for
  35. * zero-initialized data and COW.
  36. */
  37. struct page *empty_zero_page;
  38. EXPORT_SYMBOL(empty_zero_page);
  39. /*
  40. * The pmd table for the upper-most set of pages.
  41. */
  42. pmd_t *top_pmd;
  43. #define CPOLICY_UNCACHED 0
  44. #define CPOLICY_BUFFERED 1
  45. #define CPOLICY_WRITETHROUGH 2
  46. #define CPOLICY_WRITEBACK 3
  47. #define CPOLICY_WRITEALLOC 4
  48. static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  49. static unsigned int ecc_mask __initdata = 0;
  50. pgprot_t pgprot_user;
  51. pgprot_t pgprot_kernel;
  52. EXPORT_SYMBOL(pgprot_user);
  53. EXPORT_SYMBOL(pgprot_kernel);
  54. struct cachepolicy {
  55. const char policy[16];
  56. unsigned int cr_mask;
  57. pmdval_t pmd;
  58. pteval_t pte;
  59. };
  60. static struct cachepolicy cache_policies[] __initdata = {
  61. {
  62. .policy = "uncached",
  63. .cr_mask = CR_W|CR_C,
  64. .pmd = PMD_SECT_UNCACHED,
  65. .pte = L_PTE_MT_UNCACHED,
  66. }, {
  67. .policy = "buffered",
  68. .cr_mask = CR_C,
  69. .pmd = PMD_SECT_BUFFERED,
  70. .pte = L_PTE_MT_BUFFERABLE,
  71. }, {
  72. .policy = "writethrough",
  73. .cr_mask = 0,
  74. .pmd = PMD_SECT_WT,
  75. .pte = L_PTE_MT_WRITETHROUGH,
  76. }, {
  77. .policy = "writeback",
  78. .cr_mask = 0,
  79. .pmd = PMD_SECT_WB,
  80. .pte = L_PTE_MT_WRITEBACK,
  81. }, {
  82. .policy = "writealloc",
  83. .cr_mask = 0,
  84. .pmd = PMD_SECT_WBWA,
  85. .pte = L_PTE_MT_WRITEALLOC,
  86. }
  87. };
  88. /*
  89. * These are useful for identifying cache coherency
  90. * problems by allowing the cache or the cache and
  91. * writebuffer to be turned off. (Note: the write
  92. * buffer should not be on and the cache off).
  93. */
  94. static int __init early_cachepolicy(char *p)
  95. {
  96. int i;
  97. for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
  98. int len = strlen(cache_policies[i].policy);
  99. if (memcmp(p, cache_policies[i].policy, len) == 0) {
  100. cachepolicy = i;
  101. cr_alignment &= ~cache_policies[i].cr_mask;
  102. cr_no_alignment &= ~cache_policies[i].cr_mask;
  103. break;
  104. }
  105. }
  106. if (i == ARRAY_SIZE(cache_policies))
  107. printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
  108. /*
  109. * This restriction is partly to do with the way we boot; it is
  110. * unpredictable to have memory mapped using two different sets of
  111. * memory attributes (shared, type, and cache attribs). We can not
  112. * change these attributes once the initial assembly has setup the
  113. * page tables.
  114. */
  115. if (cpu_architecture() >= CPU_ARCH_ARMv6) {
  116. printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
  117. cachepolicy = CPOLICY_WRITEBACK;
  118. }
  119. flush_cache_all();
  120. set_cr(cr_alignment);
  121. return 0;
  122. }
  123. early_param("cachepolicy", early_cachepolicy);
  124. static int __init early_nocache(char *__unused)
  125. {
  126. char *p = "buffered";
  127. printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
  128. early_cachepolicy(p);
  129. return 0;
  130. }
  131. early_param("nocache", early_nocache);
  132. static int __init early_nowrite(char *__unused)
  133. {
  134. char *p = "uncached";
  135. printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
  136. early_cachepolicy(p);
  137. return 0;
  138. }
  139. early_param("nowb", early_nowrite);
  140. #ifndef CONFIG_ARM_LPAE
  141. static int __init early_ecc(char *p)
  142. {
  143. if (memcmp(p, "on", 2) == 0)
  144. ecc_mask = PMD_PROTECTION;
  145. else if (memcmp(p, "off", 3) == 0)
  146. ecc_mask = 0;
  147. return 0;
  148. }
  149. early_param("ecc", early_ecc);
  150. #endif
  151. static int __init noalign_setup(char *__unused)
  152. {
  153. cr_alignment &= ~CR_A;
  154. cr_no_alignment &= ~CR_A;
  155. set_cr(cr_alignment);
  156. return 1;
  157. }
  158. __setup("noalign", noalign_setup);
  159. #ifndef CONFIG_SMP
  160. void adjust_cr(unsigned long mask, unsigned long set)
  161. {
  162. unsigned long flags;
  163. mask &= ~CR_A;
  164. set &= mask;
  165. local_irq_save(flags);
  166. cr_no_alignment = (cr_no_alignment & ~mask) | set;
  167. cr_alignment = (cr_alignment & ~mask) | set;
  168. set_cr((get_cr() & ~mask) | set);
  169. local_irq_restore(flags);
  170. }
  171. #endif
  172. #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
  173. #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
  174. static struct mem_type mem_types[] = {
  175. [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
  176. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
  177. L_PTE_SHARED,
  178. .prot_l1 = PMD_TYPE_TABLE,
  179. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
  180. .domain = DOMAIN_IO,
  181. },
  182. [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
  183. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
  184. .prot_l1 = PMD_TYPE_TABLE,
  185. .prot_sect = PROT_SECT_DEVICE,
  186. .domain = DOMAIN_IO,
  187. },
  188. [MT_DEVICE_CACHED] = { /* ioremap_cached */
  189. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
  190. .prot_l1 = PMD_TYPE_TABLE,
  191. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
  192. .domain = DOMAIN_IO,
  193. },
  194. [MT_DEVICE_WC] = { /* ioremap_wc */
  195. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
  196. .prot_l1 = PMD_TYPE_TABLE,
  197. .prot_sect = PROT_SECT_DEVICE,
  198. .domain = DOMAIN_IO,
  199. },
  200. [MT_UNCACHED] = {
  201. .prot_pte = PROT_PTE_DEVICE,
  202. .prot_l1 = PMD_TYPE_TABLE,
  203. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  204. .domain = DOMAIN_IO,
  205. },
  206. [MT_CACHECLEAN] = {
  207. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  208. .domain = DOMAIN_KERNEL,
  209. },
  210. #ifndef CONFIG_ARM_LPAE
  211. [MT_MINICLEAN] = {
  212. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
  213. .domain = DOMAIN_KERNEL,
  214. },
  215. #endif
  216. [MT_LOW_VECTORS] = {
  217. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  218. L_PTE_RDONLY,
  219. .prot_l1 = PMD_TYPE_TABLE,
  220. .domain = DOMAIN_USER,
  221. },
  222. [MT_HIGH_VECTORS] = {
  223. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  224. L_PTE_USER | L_PTE_RDONLY,
  225. .prot_l1 = PMD_TYPE_TABLE,
  226. .domain = DOMAIN_USER,
  227. },
  228. [MT_MEMORY] = {
  229. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  230. .prot_l1 = PMD_TYPE_TABLE,
  231. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  232. .domain = DOMAIN_KERNEL,
  233. },
  234. [MT_ROM] = {
  235. .prot_sect = PMD_TYPE_SECT,
  236. .domain = DOMAIN_KERNEL,
  237. },
  238. [MT_MEMORY_NONCACHED] = {
  239. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  240. L_PTE_MT_BUFFERABLE,
  241. .prot_l1 = PMD_TYPE_TABLE,
  242. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  243. .domain = DOMAIN_KERNEL,
  244. },
  245. [MT_MEMORY_DTCM] = {
  246. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  247. L_PTE_XN,
  248. .prot_l1 = PMD_TYPE_TABLE,
  249. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  250. .domain = DOMAIN_KERNEL,
  251. },
  252. [MT_MEMORY_ITCM] = {
  253. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  254. .prot_l1 = PMD_TYPE_TABLE,
  255. .domain = DOMAIN_KERNEL,
  256. },
  257. [MT_MEMORY_SO] = {
  258. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  259. L_PTE_MT_UNCACHED,
  260. .prot_l1 = PMD_TYPE_TABLE,
  261. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
  262. PMD_SECT_UNCACHED | PMD_SECT_XN,
  263. .domain = DOMAIN_KERNEL,
  264. },
  265. [MT_MEMORY_DMA_READY] = {
  266. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  267. .prot_l1 = PMD_TYPE_TABLE,
  268. .domain = DOMAIN_KERNEL,
  269. },
  270. };
  271. const struct mem_type *get_mem_type(unsigned int type)
  272. {
  273. return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
  274. }
  275. EXPORT_SYMBOL(get_mem_type);
  276. /*
  277. * Adjust the PMD section entries according to the CPU in use.
  278. */
  279. static void __init build_mem_type_table(void)
  280. {
  281. struct cachepolicy *cp;
  282. unsigned int cr = get_cr();
  283. pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
  284. int cpu_arch = cpu_architecture();
  285. int i;
  286. if (cpu_arch < CPU_ARCH_ARMv6) {
  287. #if defined(CONFIG_CPU_DCACHE_DISABLE)
  288. if (cachepolicy > CPOLICY_BUFFERED)
  289. cachepolicy = CPOLICY_BUFFERED;
  290. #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
  291. if (cachepolicy > CPOLICY_WRITETHROUGH)
  292. cachepolicy = CPOLICY_WRITETHROUGH;
  293. #endif
  294. }
  295. if (cpu_arch < CPU_ARCH_ARMv5) {
  296. if (cachepolicy >= CPOLICY_WRITEALLOC)
  297. cachepolicy = CPOLICY_WRITEBACK;
  298. ecc_mask = 0;
  299. }
  300. if (is_smp())
  301. cachepolicy = CPOLICY_WRITEALLOC;
  302. /*
  303. * Strip out features not present on earlier architectures.
  304. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
  305. * without extended page tables don't have the 'Shared' bit.
  306. */
  307. if (cpu_arch < CPU_ARCH_ARMv5)
  308. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  309. mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
  310. if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
  311. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  312. mem_types[i].prot_sect &= ~PMD_SECT_S;
  313. /*
  314. * ARMv5 and lower, bit 4 must be set for page tables (was: cache
  315. * "update-able on write" bit on ARM610). However, Xscale and
  316. * Xscale3 require this bit to be cleared.
  317. */
  318. if (cpu_is_xscale() || cpu_is_xsc3()) {
  319. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  320. mem_types[i].prot_sect &= ~PMD_BIT4;
  321. mem_types[i].prot_l1 &= ~PMD_BIT4;
  322. }
  323. } else if (cpu_arch < CPU_ARCH_ARMv6) {
  324. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  325. if (mem_types[i].prot_l1)
  326. mem_types[i].prot_l1 |= PMD_BIT4;
  327. if (mem_types[i].prot_sect)
  328. mem_types[i].prot_sect |= PMD_BIT4;
  329. }
  330. }
  331. /*
  332. * Mark the device areas according to the CPU/architecture.
  333. */
  334. if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
  335. if (!cpu_is_xsc3()) {
  336. /*
  337. * Mark device regions on ARMv6+ as execute-never
  338. * to prevent speculative instruction fetches.
  339. */
  340. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
  341. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
  342. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
  343. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
  344. }
  345. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  346. /*
  347. * For ARMv7 with TEX remapping,
  348. * - shared device is SXCB=1100
  349. * - nonshared device is SXCB=0100
  350. * - write combine device mem is SXCB=0001
  351. * (Uncached Normal memory)
  352. */
  353. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
  354. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
  355. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  356. } else if (cpu_is_xsc3()) {
  357. /*
  358. * For Xscale3,
  359. * - shared device is TEXCB=00101
  360. * - nonshared device is TEXCB=01000
  361. * - write combine device mem is TEXCB=00100
  362. * (Inner/Outer Uncacheable in xsc3 parlance)
  363. */
  364. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
  365. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  366. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  367. } else {
  368. /*
  369. * For ARMv6 and ARMv7 without TEX remapping,
  370. * - shared device is TEXCB=00001
  371. * - nonshared device is TEXCB=01000
  372. * - write combine device mem is TEXCB=00100
  373. * (Uncached Normal in ARMv6 parlance).
  374. */
  375. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
  376. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  377. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  378. }
  379. } else {
  380. /*
  381. * On others, write combining is "Uncached/Buffered"
  382. */
  383. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  384. }
  385. /*
  386. * Now deal with the memory-type mappings
  387. */
  388. cp = &cache_policies[cachepolicy];
  389. vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
  390. /*
  391. * Only use write-through for non-SMP systems
  392. */
  393. if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
  394. vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
  395. /*
  396. * Enable CPU-specific coherency if supported.
  397. * (Only available on XSC3 at the moment.)
  398. */
  399. if (arch_is_coherent() && cpu_is_xsc3()) {
  400. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  401. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  402. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  403. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  404. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  405. }
  406. /*
  407. * ARMv6 and above have extended page tables.
  408. */
  409. if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
  410. #ifndef CONFIG_ARM_LPAE
  411. /*
  412. * Mark cache clean areas and XIP ROM read only
  413. * from SVC mode and no access from userspace.
  414. */
  415. mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  416. mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  417. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  418. #endif
  419. if (is_smp()) {
  420. /*
  421. * Mark memory with the "shared" attribute
  422. * for SMP systems
  423. */
  424. user_pgprot |= L_PTE_SHARED;
  425. kern_pgprot |= L_PTE_SHARED;
  426. vecs_pgprot |= L_PTE_SHARED;
  427. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
  428. mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
  429. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
  430. mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  431. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  432. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  433. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  434. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  435. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  436. }
  437. }
  438. /*
  439. * Non-cacheable Normal - intended for memory areas that must
  440. * not cause dirty cache line writebacks when used
  441. */
  442. if (cpu_arch >= CPU_ARCH_ARMv6) {
  443. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  444. /* Non-cacheable Normal is XCB = 001 */
  445. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  446. PMD_SECT_BUFFERED;
  447. } else {
  448. /* For both ARMv6 and non-TEX-remapping ARMv7 */
  449. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  450. PMD_SECT_TEX(1);
  451. }
  452. } else {
  453. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
  454. }
  455. #ifdef CONFIG_ARM_LPAE
  456. /*
  457. * Do not generate access flag faults for the kernel mappings.
  458. */
  459. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  460. mem_types[i].prot_pte |= PTE_EXT_AF;
  461. if (mem_types[i].prot_sect)
  462. mem_types[i].prot_sect |= PMD_SECT_AF;
  463. }
  464. kern_pgprot |= PTE_EXT_AF;
  465. vecs_pgprot |= PTE_EXT_AF;
  466. #endif
  467. for (i = 0; i < 16; i++) {
  468. unsigned long v = pgprot_val(protection_map[i]);
  469. protection_map[i] = __pgprot(v | user_pgprot);
  470. }
  471. mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
  472. mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
  473. pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
  474. pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
  475. L_PTE_DIRTY | kern_pgprot);
  476. mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
  477. mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
  478. mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
  479. mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
  480. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  481. mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
  482. mem_types[MT_ROM].prot_sect |= cp->pmd;
  483. switch (cp->pmd) {
  484. case PMD_SECT_WT:
  485. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
  486. break;
  487. case PMD_SECT_WB:
  488. case PMD_SECT_WBWA:
  489. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
  490. break;
  491. }
  492. printk("Memory policy: ECC %sabled, Data cache %s\n",
  493. ecc_mask ? "en" : "dis", cp->policy);
  494. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  495. struct mem_type *t = &mem_types[i];
  496. if (t->prot_l1)
  497. t->prot_l1 |= PMD_DOMAIN(t->domain);
  498. if (t->prot_sect)
  499. t->prot_sect |= PMD_DOMAIN(t->domain);
  500. }
  501. }
  502. #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
  503. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  504. unsigned long size, pgprot_t vma_prot)
  505. {
  506. if (!pfn_valid(pfn))
  507. return pgprot_noncached(vma_prot);
  508. else if (file->f_flags & O_SYNC)
  509. return pgprot_writecombine(vma_prot);
  510. return vma_prot;
  511. }
  512. EXPORT_SYMBOL(phys_mem_access_prot);
  513. #endif
  514. #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
  515. static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
  516. {
  517. void *ptr = __va(memblock_alloc(sz, align));
  518. memset(ptr, 0, sz);
  519. return ptr;
  520. }
  521. static void __init *early_alloc(unsigned long sz)
  522. {
  523. return early_alloc_aligned(sz, sz);
  524. }
  525. static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
  526. {
  527. if (pmd_none(*pmd)) {
  528. pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
  529. __pmd_populate(pmd, __pa(pte), prot);
  530. }
  531. BUG_ON(pmd_bad(*pmd));
  532. return pte_offset_kernel(pmd, addr);
  533. }
  534. static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
  535. unsigned long end, unsigned long pfn,
  536. const struct mem_type *type)
  537. {
  538. pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
  539. do {
  540. set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
  541. pfn++;
  542. } while (pte++, addr += PAGE_SIZE, addr != end);
  543. }
  544. static void __init alloc_init_section(pud_t *pud, unsigned long addr,
  545. unsigned long end, phys_addr_t phys,
  546. const struct mem_type *type)
  547. {
  548. pmd_t *pmd = pmd_offset(pud, addr);
  549. /*
  550. * Try a section mapping - end, addr and phys must all be aligned
  551. * to a section boundary. Note that PMDs refer to the individual
  552. * L1 entries, whereas PGDs refer to a group of L1 entries making
  553. * up one logical pointer to an L2 table.
  554. */
  555. if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
  556. pmd_t *p = pmd;
  557. #ifndef CONFIG_ARM_LPAE
  558. if (addr & SECTION_SIZE)
  559. pmd++;
  560. #endif
  561. do {
  562. *pmd = __pmd(phys | type->prot_sect);
  563. phys += SECTION_SIZE;
  564. } while (pmd++, addr += SECTION_SIZE, addr != end);
  565. flush_pmd_entry(p);
  566. } else {
  567. /*
  568. * No need to loop; pte's aren't interested in the
  569. * individual L1 entries.
  570. */
  571. alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
  572. }
  573. }
  574. static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
  575. unsigned long end, unsigned long phys, const struct mem_type *type)
  576. {
  577. pud_t *pud = pud_offset(pgd, addr);
  578. unsigned long next;
  579. do {
  580. next = pud_addr_end(addr, end);
  581. alloc_init_section(pud, addr, next, phys, type);
  582. phys += next - addr;
  583. } while (pud++, addr = next, addr != end);
  584. }
  585. #ifndef CONFIG_ARM_LPAE
  586. static void __init create_36bit_mapping(struct map_desc *md,
  587. const struct mem_type *type)
  588. {
  589. unsigned long addr, length, end;
  590. phys_addr_t phys;
  591. pgd_t *pgd;
  592. addr = md->virtual;
  593. phys = __pfn_to_phys(md->pfn);
  594. length = PAGE_ALIGN(md->length);
  595. if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
  596. printk(KERN_ERR "MM: CPU does not support supersection "
  597. "mapping for 0x%08llx at 0x%08lx\n",
  598. (long long)__pfn_to_phys((u64)md->pfn), addr);
  599. return;
  600. }
  601. /* N.B. ARMv6 supersections are only defined to work with domain 0.
  602. * Since domain assignments can in fact be arbitrary, the
  603. * 'domain == 0' check below is required to insure that ARMv6
  604. * supersections are only allocated for domain 0 regardless
  605. * of the actual domain assignments in use.
  606. */
  607. if (type->domain) {
  608. printk(KERN_ERR "MM: invalid domain in supersection "
  609. "mapping for 0x%08llx at 0x%08lx\n",
  610. (long long)__pfn_to_phys((u64)md->pfn), addr);
  611. return;
  612. }
  613. if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
  614. printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
  615. " at 0x%08lx invalid alignment\n",
  616. (long long)__pfn_to_phys((u64)md->pfn), addr);
  617. return;
  618. }
  619. /*
  620. * Shift bits [35:32] of address into bits [23:20] of PMD
  621. * (See ARMv6 spec).
  622. */
  623. phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
  624. pgd = pgd_offset_k(addr);
  625. end = addr + length;
  626. do {
  627. pud_t *pud = pud_offset(pgd, addr);
  628. pmd_t *pmd = pmd_offset(pud, addr);
  629. int i;
  630. for (i = 0; i < 16; i++)
  631. *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
  632. addr += SUPERSECTION_SIZE;
  633. phys += SUPERSECTION_SIZE;
  634. pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
  635. } while (addr != end);
  636. }
  637. #endif /* !CONFIG_ARM_LPAE */
  638. /*
  639. * Create the page directory entries and any necessary
  640. * page tables for the mapping specified by `md'. We
  641. * are able to cope here with varying sizes and address
  642. * offsets, and we take full advantage of sections and
  643. * supersections.
  644. */
  645. static void __init create_mapping(struct map_desc *md)
  646. {
  647. unsigned long addr, length, end;
  648. phys_addr_t phys;
  649. const struct mem_type *type;
  650. pgd_t *pgd;
  651. if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
  652. printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
  653. " at 0x%08lx in user region\n",
  654. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  655. return;
  656. }
  657. if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
  658. md->virtual >= PAGE_OFFSET &&
  659. (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
  660. printk(KERN_WARNING "BUG: mapping for 0x%08llx"
  661. " at 0x%08lx out of vmalloc space\n",
  662. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  663. }
  664. type = &mem_types[md->type];
  665. #ifndef CONFIG_ARM_LPAE
  666. /*
  667. * Catch 36-bit addresses
  668. */
  669. if (md->pfn >= 0x100000) {
  670. create_36bit_mapping(md, type);
  671. return;
  672. }
  673. #endif
  674. addr = md->virtual & PAGE_MASK;
  675. phys = __pfn_to_phys(md->pfn);
  676. length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  677. if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
  678. printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
  679. "be mapped using pages, ignoring.\n",
  680. (long long)__pfn_to_phys(md->pfn), addr);
  681. return;
  682. }
  683. pgd = pgd_offset_k(addr);
  684. end = addr + length;
  685. do {
  686. unsigned long next = pgd_addr_end(addr, end);
  687. alloc_init_pud(pgd, addr, next, phys, type);
  688. phys += next - addr;
  689. addr = next;
  690. } while (pgd++, addr != end);
  691. }
  692. /*
  693. * Create the architecture specific mappings
  694. */
  695. void __init iotable_init(struct map_desc *io_desc, int nr)
  696. {
  697. struct map_desc *md;
  698. struct vm_struct *vm;
  699. if (!nr)
  700. return;
  701. vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
  702. for (md = io_desc; nr; md++, nr--) {
  703. create_mapping(md);
  704. vm->addr = (void *)(md->virtual & PAGE_MASK);
  705. vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  706. vm->phys_addr = __pfn_to_phys(md->pfn);
  707. vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
  708. vm->flags |= VM_ARM_MTYPE(md->type);
  709. vm->caller = iotable_init;
  710. vm_area_add_early(vm++);
  711. }
  712. }
  713. #ifndef CONFIG_ARM_LPAE
  714. /*
  715. * The Linux PMD is made of two consecutive section entries covering 2MB
  716. * (see definition in include/asm/pgtable-2level.h). However a call to
  717. * create_mapping() may optimize static mappings by using individual
  718. * 1MB section mappings. This leaves the actual PMD potentially half
  719. * initialized if the top or bottom section entry isn't used, leaving it
  720. * open to problems if a subsequent ioremap() or vmalloc() tries to use
  721. * the virtual space left free by that unused section entry.
  722. *
  723. * Let's avoid the issue by inserting dummy vm entries covering the unused
  724. * PMD halves once the static mappings are in place.
  725. */
  726. static void __init pmd_empty_section_gap(unsigned long addr)
  727. {
  728. struct vm_struct *vm;
  729. vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
  730. vm->addr = (void *)addr;
  731. vm->size = SECTION_SIZE;
  732. vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
  733. vm->caller = pmd_empty_section_gap;
  734. vm_area_add_early(vm);
  735. }
  736. static void __init fill_pmd_gaps(void)
  737. {
  738. struct vm_struct *vm;
  739. unsigned long addr, next = 0;
  740. pmd_t *pmd;
  741. /* we're still single threaded hence no lock needed here */
  742. for (vm = vmlist; vm; vm = vm->next) {
  743. if (!(vm->flags & VM_ARM_STATIC_MAPPING))
  744. continue;
  745. addr = (unsigned long)vm->addr;
  746. if (addr < next)
  747. continue;
  748. /*
  749. * Check if this vm starts on an odd section boundary.
  750. * If so and the first section entry for this PMD is free
  751. * then we block the corresponding virtual address.
  752. */
  753. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  754. pmd = pmd_off_k(addr);
  755. if (pmd_none(*pmd))
  756. pmd_empty_section_gap(addr & PMD_MASK);
  757. }
  758. /*
  759. * Then check if this vm ends on an odd section boundary.
  760. * If so and the second section entry for this PMD is empty
  761. * then we block the corresponding virtual address.
  762. */
  763. addr += vm->size;
  764. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  765. pmd = pmd_off_k(addr) + 1;
  766. if (pmd_none(*pmd))
  767. pmd_empty_section_gap(addr);
  768. }
  769. /* no need to look at any vm entry until we hit the next PMD */
  770. next = (addr + PMD_SIZE - 1) & PMD_MASK;
  771. }
  772. }
  773. #else
  774. #define fill_pmd_gaps() do { } while (0)
  775. #endif
  776. static void * __initdata vmalloc_min =
  777. (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
  778. /*
  779. * vmalloc=size forces the vmalloc area to be exactly 'size'
  780. * bytes. This can be used to increase (or decrease) the vmalloc
  781. * area - the default is 240m.
  782. */
  783. static int __init early_vmalloc(char *arg)
  784. {
  785. unsigned long vmalloc_reserve = memparse(arg, NULL);
  786. if (vmalloc_reserve < SZ_16M) {
  787. vmalloc_reserve = SZ_16M;
  788. printk(KERN_WARNING
  789. "vmalloc area too small, limiting to %luMB\n",
  790. vmalloc_reserve >> 20);
  791. }
  792. if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
  793. vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
  794. printk(KERN_WARNING
  795. "vmalloc area is too big, limiting to %luMB\n",
  796. vmalloc_reserve >> 20);
  797. }
  798. vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
  799. return 0;
  800. }
  801. early_param("vmalloc", early_vmalloc);
  802. phys_addr_t arm_lowmem_limit __initdata = 0;
  803. void __init sanity_check_meminfo(void)
  804. {
  805. int i, j, highmem = 0;
  806. for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
  807. struct membank *bank = &meminfo.bank[j];
  808. *bank = meminfo.bank[i];
  809. if (bank->start > ULONG_MAX)
  810. highmem = 1;
  811. #ifdef CONFIG_HIGHMEM
  812. if (__va(bank->start) >= vmalloc_min ||
  813. __va(bank->start) < (void *)PAGE_OFFSET)
  814. highmem = 1;
  815. bank->highmem = highmem;
  816. /*
  817. * Split those memory banks which are partially overlapping
  818. * the vmalloc area greatly simplifying things later.
  819. */
  820. if (!highmem && __va(bank->start) < vmalloc_min &&
  821. bank->size > vmalloc_min - __va(bank->start)) {
  822. if (meminfo.nr_banks >= NR_BANKS) {
  823. printk(KERN_CRIT "NR_BANKS too low, "
  824. "ignoring high memory\n");
  825. } else {
  826. memmove(bank + 1, bank,
  827. (meminfo.nr_banks - i) * sizeof(*bank));
  828. meminfo.nr_banks++;
  829. i++;
  830. bank[1].size -= vmalloc_min - __va(bank->start);
  831. bank[1].start = __pa(vmalloc_min - 1) + 1;
  832. bank[1].highmem = highmem = 1;
  833. j++;
  834. }
  835. bank->size = vmalloc_min - __va(bank->start);
  836. }
  837. #else
  838. bank->highmem = highmem;
  839. /*
  840. * Highmem banks not allowed with !CONFIG_HIGHMEM.
  841. */
  842. if (highmem) {
  843. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  844. "(!CONFIG_HIGHMEM).\n",
  845. (unsigned long long)bank->start,
  846. (unsigned long long)bank->start + bank->size - 1);
  847. continue;
  848. }
  849. /*
  850. * Check whether this memory bank would entirely overlap
  851. * the vmalloc area.
  852. */
  853. if (__va(bank->start) >= vmalloc_min ||
  854. __va(bank->start) < (void *)PAGE_OFFSET) {
  855. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  856. "(vmalloc region overlap).\n",
  857. (unsigned long long)bank->start,
  858. (unsigned long long)bank->start + bank->size - 1);
  859. continue;
  860. }
  861. /*
  862. * Check whether this memory bank would partially overlap
  863. * the vmalloc area.
  864. */
  865. if (__va(bank->start + bank->size) > vmalloc_min ||
  866. __va(bank->start + bank->size) < __va(bank->start)) {
  867. unsigned long newsize = vmalloc_min - __va(bank->start);
  868. printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
  869. "to -%.8llx (vmalloc region overlap).\n",
  870. (unsigned long long)bank->start,
  871. (unsigned long long)bank->start + bank->size - 1,
  872. (unsigned long long)bank->start + newsize - 1);
  873. bank->size = newsize;
  874. }
  875. #endif
  876. if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
  877. arm_lowmem_limit = bank->start + bank->size;
  878. j++;
  879. }
  880. #ifdef CONFIG_HIGHMEM
  881. if (highmem) {
  882. const char *reason = NULL;
  883. if (cache_is_vipt_aliasing()) {
  884. /*
  885. * Interactions between kmap and other mappings
  886. * make highmem support with aliasing VIPT caches
  887. * rather difficult.
  888. */
  889. reason = "with VIPT aliasing cache";
  890. }
  891. if (reason) {
  892. printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
  893. reason);
  894. while (j > 0 && meminfo.bank[j - 1].highmem)
  895. j--;
  896. }
  897. }
  898. #endif
  899. meminfo.nr_banks = j;
  900. high_memory = __va(arm_lowmem_limit - 1) + 1;
  901. memblock_set_current_limit(arm_lowmem_limit);
  902. }
  903. static inline void prepare_page_table(void)
  904. {
  905. unsigned long addr;
  906. phys_addr_t end;
  907. /*
  908. * Clear out all the mappings below the kernel image.
  909. */
  910. for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
  911. pmd_clear(pmd_off_k(addr));
  912. #ifdef CONFIG_XIP_KERNEL
  913. /* The XIP kernel is mapped in the module area -- skip over it */
  914. addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
  915. #endif
  916. for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
  917. pmd_clear(pmd_off_k(addr));
  918. /*
  919. * Find the end of the first block of lowmem.
  920. */
  921. end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
  922. if (end >= arm_lowmem_limit)
  923. end = arm_lowmem_limit;
  924. /*
  925. * Clear out all the kernel space mappings, except for the first
  926. * memory bank, up to the vmalloc region.
  927. */
  928. for (addr = __phys_to_virt(end);
  929. addr < VMALLOC_START; addr += PMD_SIZE)
  930. pmd_clear(pmd_off_k(addr));
  931. }
  932. #ifdef CONFIG_ARM_LPAE
  933. /* the first page is reserved for pgd */
  934. #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
  935. PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
  936. #else
  937. #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
  938. #endif
  939. /*
  940. * Reserve the special regions of memory
  941. */
  942. void __init arm_mm_memblock_reserve(void)
  943. {
  944. /*
  945. * Reserve the page tables. These are already in use,
  946. * and can only be in node 0.
  947. */
  948. memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
  949. #ifdef CONFIG_SA1111
  950. /*
  951. * Because of the SA1111 DMA bug, we want to preserve our
  952. * precious DMA-able memory...
  953. */
  954. memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
  955. #endif
  956. }
  957. /*
  958. * Set up the device mappings. Since we clear out the page tables for all
  959. * mappings above VMALLOC_START, we will remove any debug device mappings.
  960. * This means you have to be careful how you debug this function, or any
  961. * called function. This means you can't use any function or debugging
  962. * method which may touch any device, otherwise the kernel _will_ crash.
  963. */
  964. static void __init devicemaps_init(struct machine_desc *mdesc)
  965. {
  966. struct map_desc map;
  967. unsigned long addr;
  968. void *vectors;
  969. /*
  970. * Allocate the vector page early.
  971. */
  972. vectors = early_alloc(PAGE_SIZE);
  973. early_trap_init(vectors);
  974. for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
  975. pmd_clear(pmd_off_k(addr));
  976. /*
  977. * Map the kernel if it is XIP.
  978. * It is always first in the modulearea.
  979. */
  980. #ifdef CONFIG_XIP_KERNEL
  981. map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
  982. map.virtual = MODULES_VADDR;
  983. map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
  984. map.type = MT_ROM;
  985. create_mapping(&map);
  986. #endif
  987. /*
  988. * Map the cache flushing regions.
  989. */
  990. #ifdef FLUSH_BASE
  991. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
  992. map.virtual = FLUSH_BASE;
  993. map.length = SZ_1M;
  994. map.type = MT_CACHECLEAN;
  995. create_mapping(&map);
  996. #endif
  997. #ifdef FLUSH_BASE_MINICACHE
  998. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
  999. map.virtual = FLUSH_BASE_MINICACHE;
  1000. map.length = SZ_1M;
  1001. map.type = MT_MINICLEAN;
  1002. create_mapping(&map);
  1003. #endif
  1004. /*
  1005. * Create a mapping for the machine vectors at the high-vectors
  1006. * location (0xffff0000). If we aren't using high-vectors, also
  1007. * create a mapping at the low-vectors virtual address.
  1008. */
  1009. map.pfn = __phys_to_pfn(virt_to_phys(vectors));
  1010. map.virtual = 0xffff0000;
  1011. map.length = PAGE_SIZE;
  1012. map.type = MT_HIGH_VECTORS;
  1013. create_mapping(&map);
  1014. if (!vectors_high()) {
  1015. map.virtual = 0;
  1016. map.type = MT_LOW_VECTORS;
  1017. create_mapping(&map);
  1018. }
  1019. /*
  1020. * Ask the machine support to map in the statically mapped devices.
  1021. */
  1022. if (mdesc->map_io)
  1023. mdesc->map_io();
  1024. fill_pmd_gaps();
  1025. /*
  1026. * Finally flush the caches and tlb to ensure that we're in a
  1027. * consistent state wrt the writebuffer. This also ensures that
  1028. * any write-allocated cache lines in the vector page are written
  1029. * back. After this point, we can start to touch devices again.
  1030. */
  1031. local_flush_tlb_all();
  1032. flush_cache_all();
  1033. }
  1034. static void __init kmap_init(void)
  1035. {
  1036. #ifdef CONFIG_HIGHMEM
  1037. pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
  1038. PKMAP_BASE, _PAGE_KERNEL_TABLE);
  1039. #endif
  1040. }
  1041. static void __init map_lowmem(void)
  1042. {
  1043. struct memblock_region *reg;
  1044. /* Map all the lowmem memory banks. */
  1045. for_each_memblock(memory, reg) {
  1046. phys_addr_t start = reg->base;
  1047. phys_addr_t end = start + reg->size;
  1048. struct map_desc map;
  1049. if (end > arm_lowmem_limit)
  1050. end = arm_lowmem_limit;
  1051. if (start >= end)
  1052. break;
  1053. map.pfn = __phys_to_pfn(start);
  1054. map.virtual = __phys_to_virt(start);
  1055. map.length = end - start;
  1056. map.type = MT_MEMORY;
  1057. create_mapping(&map);
  1058. }
  1059. }
  1060. /*
  1061. * paging_init() sets up the page tables, initialises the zone memory
  1062. * maps, and sets up the zero page, bad page and bad page tables.
  1063. */
  1064. void __init paging_init(struct machine_desc *mdesc)
  1065. {
  1066. void *zero_page;
  1067. memblock_set_current_limit(arm_lowmem_limit);
  1068. build_mem_type_table();
  1069. prepare_page_table();
  1070. map_lowmem();
  1071. dma_contiguous_remap();
  1072. devicemaps_init(mdesc);
  1073. kmap_init();
  1074. top_pmd = pmd_off_k(0xffff0000);
  1075. /* allocate the zero page. */
  1076. zero_page = early_alloc(PAGE_SIZE);
  1077. bootmem_init();
  1078. empty_zero_page = virt_to_page(zero_page);
  1079. __flush_dcache_page(NULL, empty_zero_page);
  1080. }