mmu.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * linux/arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/init.h>
  14. #include <linux/mman.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/memblock.h>
  17. #include <linux/fs.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/sizes.h>
  20. #include <asm/cp15.h>
  21. #include <asm/cputype.h>
  22. #include <asm/sections.h>
  23. #include <asm/cachetype.h>
  24. #include <asm/setup.h>
  25. #include <asm/smp_plat.h>
  26. #include <asm/tlb.h>
  27. #include <asm/highmem.h>
  28. #include <asm/system_info.h>
  29. #include <asm/traps.h>
  30. #include <asm/mach/arch.h>
  31. #include <asm/mach/map.h>
  32. #include <asm/mach/pci.h>
  33. #include "mm.h"
  34. /*
  35. * empty_zero_page is a special page that is used for
  36. * zero-initialized data and COW.
  37. */
  38. struct page *empty_zero_page;
  39. EXPORT_SYMBOL(empty_zero_page);
  40. /*
  41. * The pmd table for the upper-most set of pages.
  42. */
  43. pmd_t *top_pmd;
  44. #define CPOLICY_UNCACHED 0
  45. #define CPOLICY_BUFFERED 1
  46. #define CPOLICY_WRITETHROUGH 2
  47. #define CPOLICY_WRITEBACK 3
  48. #define CPOLICY_WRITEALLOC 4
  49. static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  50. static unsigned int ecc_mask __initdata = 0;
  51. pgprot_t pgprot_user;
  52. pgprot_t pgprot_kernel;
  53. EXPORT_SYMBOL(pgprot_user);
  54. EXPORT_SYMBOL(pgprot_kernel);
  55. struct cachepolicy {
  56. const char policy[16];
  57. unsigned int cr_mask;
  58. pmdval_t pmd;
  59. pteval_t pte;
  60. };
  61. static struct cachepolicy cache_policies[] __initdata = {
  62. {
  63. .policy = "uncached",
  64. .cr_mask = CR_W|CR_C,
  65. .pmd = PMD_SECT_UNCACHED,
  66. .pte = L_PTE_MT_UNCACHED,
  67. }, {
  68. .policy = "buffered",
  69. .cr_mask = CR_C,
  70. .pmd = PMD_SECT_BUFFERED,
  71. .pte = L_PTE_MT_BUFFERABLE,
  72. }, {
  73. .policy = "writethrough",
  74. .cr_mask = 0,
  75. .pmd = PMD_SECT_WT,
  76. .pte = L_PTE_MT_WRITETHROUGH,
  77. }, {
  78. .policy = "writeback",
  79. .cr_mask = 0,
  80. .pmd = PMD_SECT_WB,
  81. .pte = L_PTE_MT_WRITEBACK,
  82. }, {
  83. .policy = "writealloc",
  84. .cr_mask = 0,
  85. .pmd = PMD_SECT_WBWA,
  86. .pte = L_PTE_MT_WRITEALLOC,
  87. }
  88. };
  89. #ifdef CONFIG_CPU_CP15
  90. /*
  91. * These are useful for identifying cache coherency
  92. * problems by allowing the cache or the cache and
  93. * writebuffer to be turned off. (Note: the write
  94. * buffer should not be on and the cache off).
  95. */
  96. static int __init early_cachepolicy(char *p)
  97. {
  98. int i;
  99. for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
  100. int len = strlen(cache_policies[i].policy);
  101. if (memcmp(p, cache_policies[i].policy, len) == 0) {
  102. cachepolicy = i;
  103. cr_alignment &= ~cache_policies[i].cr_mask;
  104. cr_no_alignment &= ~cache_policies[i].cr_mask;
  105. break;
  106. }
  107. }
  108. if (i == ARRAY_SIZE(cache_policies))
  109. printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
  110. /*
  111. * This restriction is partly to do with the way we boot; it is
  112. * unpredictable to have memory mapped using two different sets of
  113. * memory attributes (shared, type, and cache attribs). We can not
  114. * change these attributes once the initial assembly has setup the
  115. * page tables.
  116. */
  117. if (cpu_architecture() >= CPU_ARCH_ARMv6) {
  118. printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
  119. cachepolicy = CPOLICY_WRITEBACK;
  120. }
  121. flush_cache_all();
  122. set_cr(cr_alignment);
  123. return 0;
  124. }
  125. early_param("cachepolicy", early_cachepolicy);
  126. static int __init early_nocache(char *__unused)
  127. {
  128. char *p = "buffered";
  129. printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
  130. early_cachepolicy(p);
  131. return 0;
  132. }
  133. early_param("nocache", early_nocache);
  134. static int __init early_nowrite(char *__unused)
  135. {
  136. char *p = "uncached";
  137. printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
  138. early_cachepolicy(p);
  139. return 0;
  140. }
  141. early_param("nowb", early_nowrite);
  142. #ifndef CONFIG_ARM_LPAE
  143. static int __init early_ecc(char *p)
  144. {
  145. if (memcmp(p, "on", 2) == 0)
  146. ecc_mask = PMD_PROTECTION;
  147. else if (memcmp(p, "off", 3) == 0)
  148. ecc_mask = 0;
  149. return 0;
  150. }
  151. early_param("ecc", early_ecc);
  152. #endif
  153. static int __init noalign_setup(char *__unused)
  154. {
  155. cr_alignment &= ~CR_A;
  156. cr_no_alignment &= ~CR_A;
  157. set_cr(cr_alignment);
  158. return 1;
  159. }
  160. __setup("noalign", noalign_setup);
  161. #ifndef CONFIG_SMP
  162. void adjust_cr(unsigned long mask, unsigned long set)
  163. {
  164. unsigned long flags;
  165. mask &= ~CR_A;
  166. set &= mask;
  167. local_irq_save(flags);
  168. cr_no_alignment = (cr_no_alignment & ~mask) | set;
  169. cr_alignment = (cr_alignment & ~mask) | set;
  170. set_cr((get_cr() & ~mask) | set);
  171. local_irq_restore(flags);
  172. }
  173. #endif
  174. #else /* ifdef CONFIG_CPU_CP15 */
  175. static int __init early_cachepolicy(char *p)
  176. {
  177. pr_warning("cachepolicy kernel parameter not supported without cp15\n");
  178. }
  179. early_param("cachepolicy", early_cachepolicy);
  180. static int __init noalign_setup(char *__unused)
  181. {
  182. pr_warning("noalign kernel parameter not supported without cp15\n");
  183. }
  184. __setup("noalign", noalign_setup);
  185. #endif /* ifdef CONFIG_CPU_CP15 / else */
  186. #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
  187. #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
  188. static struct mem_type mem_types[] = {
  189. [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
  190. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
  191. L_PTE_SHARED,
  192. .prot_l1 = PMD_TYPE_TABLE,
  193. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
  194. .domain = DOMAIN_IO,
  195. },
  196. [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
  197. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
  198. .prot_l1 = PMD_TYPE_TABLE,
  199. .prot_sect = PROT_SECT_DEVICE,
  200. .domain = DOMAIN_IO,
  201. },
  202. [MT_DEVICE_CACHED] = { /* ioremap_cached */
  203. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
  204. .prot_l1 = PMD_TYPE_TABLE,
  205. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
  206. .domain = DOMAIN_IO,
  207. },
  208. [MT_DEVICE_WC] = { /* ioremap_wc */
  209. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
  210. .prot_l1 = PMD_TYPE_TABLE,
  211. .prot_sect = PROT_SECT_DEVICE,
  212. .domain = DOMAIN_IO,
  213. },
  214. [MT_UNCACHED] = {
  215. .prot_pte = PROT_PTE_DEVICE,
  216. .prot_l1 = PMD_TYPE_TABLE,
  217. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  218. .domain = DOMAIN_IO,
  219. },
  220. [MT_CACHECLEAN] = {
  221. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  222. .domain = DOMAIN_KERNEL,
  223. },
  224. #ifndef CONFIG_ARM_LPAE
  225. [MT_MINICLEAN] = {
  226. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
  227. .domain = DOMAIN_KERNEL,
  228. },
  229. #endif
  230. [MT_LOW_VECTORS] = {
  231. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  232. L_PTE_RDONLY,
  233. .prot_l1 = PMD_TYPE_TABLE,
  234. .domain = DOMAIN_USER,
  235. },
  236. [MT_HIGH_VECTORS] = {
  237. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  238. L_PTE_USER | L_PTE_RDONLY,
  239. .prot_l1 = PMD_TYPE_TABLE,
  240. .domain = DOMAIN_USER,
  241. },
  242. [MT_MEMORY] = {
  243. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  244. .prot_l1 = PMD_TYPE_TABLE,
  245. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  246. .domain = DOMAIN_KERNEL,
  247. },
  248. [MT_ROM] = {
  249. .prot_sect = PMD_TYPE_SECT,
  250. .domain = DOMAIN_KERNEL,
  251. },
  252. [MT_MEMORY_NONCACHED] = {
  253. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  254. L_PTE_MT_BUFFERABLE,
  255. .prot_l1 = PMD_TYPE_TABLE,
  256. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  257. .domain = DOMAIN_KERNEL,
  258. },
  259. [MT_MEMORY_DTCM] = {
  260. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  261. L_PTE_XN,
  262. .prot_l1 = PMD_TYPE_TABLE,
  263. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  264. .domain = DOMAIN_KERNEL,
  265. },
  266. [MT_MEMORY_ITCM] = {
  267. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  268. .prot_l1 = PMD_TYPE_TABLE,
  269. .domain = DOMAIN_KERNEL,
  270. },
  271. [MT_MEMORY_SO] = {
  272. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  273. L_PTE_MT_UNCACHED | L_PTE_XN,
  274. .prot_l1 = PMD_TYPE_TABLE,
  275. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
  276. PMD_SECT_UNCACHED | PMD_SECT_XN,
  277. .domain = DOMAIN_KERNEL,
  278. },
  279. [MT_MEMORY_DMA_READY] = {
  280. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  281. .prot_l1 = PMD_TYPE_TABLE,
  282. .domain = DOMAIN_KERNEL,
  283. },
  284. };
  285. const struct mem_type *get_mem_type(unsigned int type)
  286. {
  287. return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
  288. }
  289. EXPORT_SYMBOL(get_mem_type);
  290. /*
  291. * Adjust the PMD section entries according to the CPU in use.
  292. */
  293. static void __init build_mem_type_table(void)
  294. {
  295. struct cachepolicy *cp;
  296. unsigned int cr = get_cr();
  297. pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
  298. int cpu_arch = cpu_architecture();
  299. int i;
  300. if (cpu_arch < CPU_ARCH_ARMv6) {
  301. #if defined(CONFIG_CPU_DCACHE_DISABLE)
  302. if (cachepolicy > CPOLICY_BUFFERED)
  303. cachepolicy = CPOLICY_BUFFERED;
  304. #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
  305. if (cachepolicy > CPOLICY_WRITETHROUGH)
  306. cachepolicy = CPOLICY_WRITETHROUGH;
  307. #endif
  308. }
  309. if (cpu_arch < CPU_ARCH_ARMv5) {
  310. if (cachepolicy >= CPOLICY_WRITEALLOC)
  311. cachepolicy = CPOLICY_WRITEBACK;
  312. ecc_mask = 0;
  313. }
  314. if (is_smp())
  315. cachepolicy = CPOLICY_WRITEALLOC;
  316. /*
  317. * Strip out features not present on earlier architectures.
  318. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
  319. * without extended page tables don't have the 'Shared' bit.
  320. */
  321. if (cpu_arch < CPU_ARCH_ARMv5)
  322. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  323. mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
  324. if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
  325. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  326. mem_types[i].prot_sect &= ~PMD_SECT_S;
  327. /*
  328. * ARMv5 and lower, bit 4 must be set for page tables (was: cache
  329. * "update-able on write" bit on ARM610). However, Xscale and
  330. * Xscale3 require this bit to be cleared.
  331. */
  332. if (cpu_is_xscale() || cpu_is_xsc3()) {
  333. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  334. mem_types[i].prot_sect &= ~PMD_BIT4;
  335. mem_types[i].prot_l1 &= ~PMD_BIT4;
  336. }
  337. } else if (cpu_arch < CPU_ARCH_ARMv6) {
  338. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  339. if (mem_types[i].prot_l1)
  340. mem_types[i].prot_l1 |= PMD_BIT4;
  341. if (mem_types[i].prot_sect)
  342. mem_types[i].prot_sect |= PMD_BIT4;
  343. }
  344. }
  345. /*
  346. * Mark the device areas according to the CPU/architecture.
  347. */
  348. if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
  349. if (!cpu_is_xsc3()) {
  350. /*
  351. * Mark device regions on ARMv6+ as execute-never
  352. * to prevent speculative instruction fetches.
  353. */
  354. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
  355. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
  356. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
  357. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
  358. }
  359. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  360. /*
  361. * For ARMv7 with TEX remapping,
  362. * - shared device is SXCB=1100
  363. * - nonshared device is SXCB=0100
  364. * - write combine device mem is SXCB=0001
  365. * (Uncached Normal memory)
  366. */
  367. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
  368. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
  369. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  370. } else if (cpu_is_xsc3()) {
  371. /*
  372. * For Xscale3,
  373. * - shared device is TEXCB=00101
  374. * - nonshared device is TEXCB=01000
  375. * - write combine device mem is TEXCB=00100
  376. * (Inner/Outer Uncacheable in xsc3 parlance)
  377. */
  378. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
  379. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  380. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  381. } else {
  382. /*
  383. * For ARMv6 and ARMv7 without TEX remapping,
  384. * - shared device is TEXCB=00001
  385. * - nonshared device is TEXCB=01000
  386. * - write combine device mem is TEXCB=00100
  387. * (Uncached Normal in ARMv6 parlance).
  388. */
  389. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
  390. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  391. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  392. }
  393. } else {
  394. /*
  395. * On others, write combining is "Uncached/Buffered"
  396. */
  397. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  398. }
  399. /*
  400. * Now deal with the memory-type mappings
  401. */
  402. cp = &cache_policies[cachepolicy];
  403. vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
  404. /*
  405. * ARMv6 and above have extended page tables.
  406. */
  407. if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
  408. #ifndef CONFIG_ARM_LPAE
  409. /*
  410. * Mark cache clean areas and XIP ROM read only
  411. * from SVC mode and no access from userspace.
  412. */
  413. mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  414. mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  415. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  416. #endif
  417. if (is_smp()) {
  418. /*
  419. * Mark memory with the "shared" attribute
  420. * for SMP systems
  421. */
  422. user_pgprot |= L_PTE_SHARED;
  423. kern_pgprot |= L_PTE_SHARED;
  424. vecs_pgprot |= L_PTE_SHARED;
  425. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
  426. mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
  427. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
  428. mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  429. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  430. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  431. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  432. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  433. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  434. }
  435. }
  436. /*
  437. * Non-cacheable Normal - intended for memory areas that must
  438. * not cause dirty cache line writebacks when used
  439. */
  440. if (cpu_arch >= CPU_ARCH_ARMv6) {
  441. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  442. /* Non-cacheable Normal is XCB = 001 */
  443. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  444. PMD_SECT_BUFFERED;
  445. } else {
  446. /* For both ARMv6 and non-TEX-remapping ARMv7 */
  447. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  448. PMD_SECT_TEX(1);
  449. }
  450. } else {
  451. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
  452. }
  453. #ifdef CONFIG_ARM_LPAE
  454. /*
  455. * Do not generate access flag faults for the kernel mappings.
  456. */
  457. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  458. mem_types[i].prot_pte |= PTE_EXT_AF;
  459. if (mem_types[i].prot_sect)
  460. mem_types[i].prot_sect |= PMD_SECT_AF;
  461. }
  462. kern_pgprot |= PTE_EXT_AF;
  463. vecs_pgprot |= PTE_EXT_AF;
  464. #endif
  465. for (i = 0; i < 16; i++) {
  466. pteval_t v = pgprot_val(protection_map[i]);
  467. protection_map[i] = __pgprot(v | user_pgprot);
  468. }
  469. mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
  470. mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
  471. pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
  472. pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
  473. L_PTE_DIRTY | kern_pgprot);
  474. mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
  475. mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
  476. mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
  477. mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
  478. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  479. mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
  480. mem_types[MT_ROM].prot_sect |= cp->pmd;
  481. switch (cp->pmd) {
  482. case PMD_SECT_WT:
  483. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
  484. break;
  485. case PMD_SECT_WB:
  486. case PMD_SECT_WBWA:
  487. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
  488. break;
  489. }
  490. printk("Memory policy: ECC %sabled, Data cache %s\n",
  491. ecc_mask ? "en" : "dis", cp->policy);
  492. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  493. struct mem_type *t = &mem_types[i];
  494. if (t->prot_l1)
  495. t->prot_l1 |= PMD_DOMAIN(t->domain);
  496. if (t->prot_sect)
  497. t->prot_sect |= PMD_DOMAIN(t->domain);
  498. }
  499. }
  500. #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
  501. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  502. unsigned long size, pgprot_t vma_prot)
  503. {
  504. if (!pfn_valid(pfn))
  505. return pgprot_noncached(vma_prot);
  506. else if (file->f_flags & O_SYNC)
  507. return pgprot_writecombine(vma_prot);
  508. return vma_prot;
  509. }
  510. EXPORT_SYMBOL(phys_mem_access_prot);
  511. #endif
  512. #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
  513. static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
  514. {
  515. void *ptr = __va(memblock_alloc(sz, align));
  516. memset(ptr, 0, sz);
  517. return ptr;
  518. }
  519. static void __init *early_alloc(unsigned long sz)
  520. {
  521. return early_alloc_aligned(sz, sz);
  522. }
  523. static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
  524. {
  525. if (pmd_none(*pmd)) {
  526. pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
  527. __pmd_populate(pmd, __pa(pte), prot);
  528. }
  529. BUG_ON(pmd_bad(*pmd));
  530. return pte_offset_kernel(pmd, addr);
  531. }
  532. static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
  533. unsigned long end, unsigned long pfn,
  534. const struct mem_type *type)
  535. {
  536. pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
  537. do {
  538. set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
  539. pfn++;
  540. } while (pte++, addr += PAGE_SIZE, addr != end);
  541. }
  542. static void __init alloc_init_section(pud_t *pud, unsigned long addr,
  543. unsigned long end, phys_addr_t phys,
  544. const struct mem_type *type)
  545. {
  546. pmd_t *pmd = pmd_offset(pud, addr);
  547. /*
  548. * Try a section mapping - end, addr and phys must all be aligned
  549. * to a section boundary. Note that PMDs refer to the individual
  550. * L1 entries, whereas PGDs refer to a group of L1 entries making
  551. * up one logical pointer to an L2 table.
  552. */
  553. if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
  554. pmd_t *p = pmd;
  555. #ifndef CONFIG_ARM_LPAE
  556. if (addr & SECTION_SIZE)
  557. pmd++;
  558. #endif
  559. do {
  560. *pmd = __pmd(phys | type->prot_sect);
  561. phys += SECTION_SIZE;
  562. } while (pmd++, addr += SECTION_SIZE, addr != end);
  563. flush_pmd_entry(p);
  564. } else {
  565. /*
  566. * No need to loop; pte's aren't interested in the
  567. * individual L1 entries.
  568. */
  569. alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
  570. }
  571. }
  572. static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
  573. unsigned long end, unsigned long phys, const struct mem_type *type)
  574. {
  575. pud_t *pud = pud_offset(pgd, addr);
  576. unsigned long next;
  577. do {
  578. next = pud_addr_end(addr, end);
  579. alloc_init_section(pud, addr, next, phys, type);
  580. phys += next - addr;
  581. } while (pud++, addr = next, addr != end);
  582. }
  583. #ifndef CONFIG_ARM_LPAE
  584. static void __init create_36bit_mapping(struct map_desc *md,
  585. const struct mem_type *type)
  586. {
  587. unsigned long addr, length, end;
  588. phys_addr_t phys;
  589. pgd_t *pgd;
  590. addr = md->virtual;
  591. phys = __pfn_to_phys(md->pfn);
  592. length = PAGE_ALIGN(md->length);
  593. if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
  594. printk(KERN_ERR "MM: CPU does not support supersection "
  595. "mapping for 0x%08llx at 0x%08lx\n",
  596. (long long)__pfn_to_phys((u64)md->pfn), addr);
  597. return;
  598. }
  599. /* N.B. ARMv6 supersections are only defined to work with domain 0.
  600. * Since domain assignments can in fact be arbitrary, the
  601. * 'domain == 0' check below is required to insure that ARMv6
  602. * supersections are only allocated for domain 0 regardless
  603. * of the actual domain assignments in use.
  604. */
  605. if (type->domain) {
  606. printk(KERN_ERR "MM: invalid domain in supersection "
  607. "mapping for 0x%08llx at 0x%08lx\n",
  608. (long long)__pfn_to_phys((u64)md->pfn), addr);
  609. return;
  610. }
  611. if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
  612. printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
  613. " at 0x%08lx invalid alignment\n",
  614. (long long)__pfn_to_phys((u64)md->pfn), addr);
  615. return;
  616. }
  617. /*
  618. * Shift bits [35:32] of address into bits [23:20] of PMD
  619. * (See ARMv6 spec).
  620. */
  621. phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
  622. pgd = pgd_offset_k(addr);
  623. end = addr + length;
  624. do {
  625. pud_t *pud = pud_offset(pgd, addr);
  626. pmd_t *pmd = pmd_offset(pud, addr);
  627. int i;
  628. for (i = 0; i < 16; i++)
  629. *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
  630. addr += SUPERSECTION_SIZE;
  631. phys += SUPERSECTION_SIZE;
  632. pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
  633. } while (addr != end);
  634. }
  635. #endif /* !CONFIG_ARM_LPAE */
  636. /*
  637. * Create the page directory entries and any necessary
  638. * page tables for the mapping specified by `md'. We
  639. * are able to cope here with varying sizes and address
  640. * offsets, and we take full advantage of sections and
  641. * supersections.
  642. */
  643. static void __init create_mapping(struct map_desc *md)
  644. {
  645. unsigned long addr, length, end;
  646. phys_addr_t phys;
  647. const struct mem_type *type;
  648. pgd_t *pgd;
  649. if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
  650. printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
  651. " at 0x%08lx in user region\n",
  652. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  653. return;
  654. }
  655. if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
  656. md->virtual >= PAGE_OFFSET &&
  657. (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
  658. printk(KERN_WARNING "BUG: mapping for 0x%08llx"
  659. " at 0x%08lx out of vmalloc space\n",
  660. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  661. }
  662. type = &mem_types[md->type];
  663. #ifndef CONFIG_ARM_LPAE
  664. /*
  665. * Catch 36-bit addresses
  666. */
  667. if (md->pfn >= 0x100000) {
  668. create_36bit_mapping(md, type);
  669. return;
  670. }
  671. #endif
  672. addr = md->virtual & PAGE_MASK;
  673. phys = __pfn_to_phys(md->pfn);
  674. length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  675. if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
  676. printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
  677. "be mapped using pages, ignoring.\n",
  678. (long long)__pfn_to_phys(md->pfn), addr);
  679. return;
  680. }
  681. pgd = pgd_offset_k(addr);
  682. end = addr + length;
  683. do {
  684. unsigned long next = pgd_addr_end(addr, end);
  685. alloc_init_pud(pgd, addr, next, phys, type);
  686. phys += next - addr;
  687. addr = next;
  688. } while (pgd++, addr != end);
  689. }
  690. /*
  691. * Create the architecture specific mappings
  692. */
  693. void __init iotable_init(struct map_desc *io_desc, int nr)
  694. {
  695. struct map_desc *md;
  696. struct vm_struct *vm;
  697. if (!nr)
  698. return;
  699. vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
  700. for (md = io_desc; nr; md++, nr--) {
  701. create_mapping(md);
  702. vm->addr = (void *)(md->virtual & PAGE_MASK);
  703. vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  704. vm->phys_addr = __pfn_to_phys(md->pfn);
  705. vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
  706. vm->flags |= VM_ARM_MTYPE(md->type);
  707. vm->caller = iotable_init;
  708. vm_area_add_early(vm++);
  709. }
  710. }
  711. void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
  712. void *caller)
  713. {
  714. struct vm_struct *vm;
  715. vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
  716. vm->addr = (void *)addr;
  717. vm->size = size;
  718. vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
  719. vm->caller = caller;
  720. vm_area_add_early(vm);
  721. }
  722. #ifndef CONFIG_ARM_LPAE
  723. /*
  724. * The Linux PMD is made of two consecutive section entries covering 2MB
  725. * (see definition in include/asm/pgtable-2level.h). However a call to
  726. * create_mapping() may optimize static mappings by using individual
  727. * 1MB section mappings. This leaves the actual PMD potentially half
  728. * initialized if the top or bottom section entry isn't used, leaving it
  729. * open to problems if a subsequent ioremap() or vmalloc() tries to use
  730. * the virtual space left free by that unused section entry.
  731. *
  732. * Let's avoid the issue by inserting dummy vm entries covering the unused
  733. * PMD halves once the static mappings are in place.
  734. */
  735. static void __init pmd_empty_section_gap(unsigned long addr)
  736. {
  737. vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
  738. }
  739. static void __init fill_pmd_gaps(void)
  740. {
  741. struct vm_struct *vm;
  742. unsigned long addr, next = 0;
  743. pmd_t *pmd;
  744. /* we're still single threaded hence no lock needed here */
  745. for (vm = vmlist; vm; vm = vm->next) {
  746. if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
  747. continue;
  748. addr = (unsigned long)vm->addr;
  749. if (addr < next)
  750. continue;
  751. /*
  752. * Check if this vm starts on an odd section boundary.
  753. * If so and the first section entry for this PMD is free
  754. * then we block the corresponding virtual address.
  755. */
  756. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  757. pmd = pmd_off_k(addr);
  758. if (pmd_none(*pmd))
  759. pmd_empty_section_gap(addr & PMD_MASK);
  760. }
  761. /*
  762. * Then check if this vm ends on an odd section boundary.
  763. * If so and the second section entry for this PMD is empty
  764. * then we block the corresponding virtual address.
  765. */
  766. addr += vm->size;
  767. if ((addr & ~PMD_MASK) == SECTION_SIZE) {
  768. pmd = pmd_off_k(addr) + 1;
  769. if (pmd_none(*pmd))
  770. pmd_empty_section_gap(addr);
  771. }
  772. /* no need to look at any vm entry until we hit the next PMD */
  773. next = (addr + PMD_SIZE - 1) & PMD_MASK;
  774. }
  775. }
  776. #else
  777. #define fill_pmd_gaps() do { } while (0)
  778. #endif
  779. #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
  780. static void __init pci_reserve_io(void)
  781. {
  782. struct vm_struct *vm;
  783. unsigned long addr;
  784. /* we're still single threaded hence no lock needed here */
  785. for (vm = vmlist; vm; vm = vm->next) {
  786. if (!(vm->flags & VM_ARM_STATIC_MAPPING))
  787. continue;
  788. addr = (unsigned long)vm->addr;
  789. addr &= ~(SZ_2M - 1);
  790. if (addr == PCI_IO_VIRT_BASE)
  791. return;
  792. }
  793. vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
  794. }
  795. #else
  796. #define pci_reserve_io() do { } while (0)
  797. #endif
  798. #ifdef CONFIG_DEBUG_LL
  799. void __init debug_ll_io_init(void)
  800. {
  801. struct map_desc map;
  802. debug_ll_addr(&map.pfn, &map.virtual);
  803. if (!map.pfn || !map.virtual)
  804. return;
  805. map.pfn = __phys_to_pfn(map.pfn);
  806. map.virtual &= PAGE_MASK;
  807. map.length = PAGE_SIZE;
  808. map.type = MT_DEVICE;
  809. create_mapping(&map);
  810. }
  811. #endif
  812. static void * __initdata vmalloc_min =
  813. (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
  814. /*
  815. * vmalloc=size forces the vmalloc area to be exactly 'size'
  816. * bytes. This can be used to increase (or decrease) the vmalloc
  817. * area - the default is 240m.
  818. */
  819. static int __init early_vmalloc(char *arg)
  820. {
  821. unsigned long vmalloc_reserve = memparse(arg, NULL);
  822. if (vmalloc_reserve < SZ_16M) {
  823. vmalloc_reserve = SZ_16M;
  824. printk(KERN_WARNING
  825. "vmalloc area too small, limiting to %luMB\n",
  826. vmalloc_reserve >> 20);
  827. }
  828. if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
  829. vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
  830. printk(KERN_WARNING
  831. "vmalloc area is too big, limiting to %luMB\n",
  832. vmalloc_reserve >> 20);
  833. }
  834. vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
  835. return 0;
  836. }
  837. early_param("vmalloc", early_vmalloc);
  838. phys_addr_t arm_lowmem_limit __initdata = 0;
  839. void __init sanity_check_meminfo(void)
  840. {
  841. int i, j, highmem = 0;
  842. for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
  843. struct membank *bank = &meminfo.bank[j];
  844. *bank = meminfo.bank[i];
  845. if (bank->start > ULONG_MAX)
  846. highmem = 1;
  847. #ifdef CONFIG_HIGHMEM
  848. if (__va(bank->start) >= vmalloc_min ||
  849. __va(bank->start) < (void *)PAGE_OFFSET)
  850. highmem = 1;
  851. bank->highmem = highmem;
  852. /*
  853. * Split those memory banks which are partially overlapping
  854. * the vmalloc area greatly simplifying things later.
  855. */
  856. if (!highmem && __va(bank->start) < vmalloc_min &&
  857. bank->size > vmalloc_min - __va(bank->start)) {
  858. if (meminfo.nr_banks >= NR_BANKS) {
  859. printk(KERN_CRIT "NR_BANKS too low, "
  860. "ignoring high memory\n");
  861. } else {
  862. memmove(bank + 1, bank,
  863. (meminfo.nr_banks - i) * sizeof(*bank));
  864. meminfo.nr_banks++;
  865. i++;
  866. bank[1].size -= vmalloc_min - __va(bank->start);
  867. bank[1].start = __pa(vmalloc_min - 1) + 1;
  868. bank[1].highmem = highmem = 1;
  869. j++;
  870. }
  871. bank->size = vmalloc_min - __va(bank->start);
  872. }
  873. #else
  874. bank->highmem = highmem;
  875. /*
  876. * Highmem banks not allowed with !CONFIG_HIGHMEM.
  877. */
  878. if (highmem) {
  879. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  880. "(!CONFIG_HIGHMEM).\n",
  881. (unsigned long long)bank->start,
  882. (unsigned long long)bank->start + bank->size - 1);
  883. continue;
  884. }
  885. /*
  886. * Check whether this memory bank would entirely overlap
  887. * the vmalloc area.
  888. */
  889. if (__va(bank->start) >= vmalloc_min ||
  890. __va(bank->start) < (void *)PAGE_OFFSET) {
  891. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  892. "(vmalloc region overlap).\n",
  893. (unsigned long long)bank->start,
  894. (unsigned long long)bank->start + bank->size - 1);
  895. continue;
  896. }
  897. /*
  898. * Check whether this memory bank would partially overlap
  899. * the vmalloc area.
  900. */
  901. if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
  902. __va(bank->start + bank->size - 1) <= __va(bank->start)) {
  903. unsigned long newsize = vmalloc_min - __va(bank->start);
  904. printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
  905. "to -%.8llx (vmalloc region overlap).\n",
  906. (unsigned long long)bank->start,
  907. (unsigned long long)bank->start + bank->size - 1,
  908. (unsigned long long)bank->start + newsize - 1);
  909. bank->size = newsize;
  910. }
  911. #endif
  912. if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
  913. arm_lowmem_limit = bank->start + bank->size;
  914. j++;
  915. }
  916. #ifdef CONFIG_HIGHMEM
  917. if (highmem) {
  918. const char *reason = NULL;
  919. if (cache_is_vipt_aliasing()) {
  920. /*
  921. * Interactions between kmap and other mappings
  922. * make highmem support with aliasing VIPT caches
  923. * rather difficult.
  924. */
  925. reason = "with VIPT aliasing cache";
  926. }
  927. if (reason) {
  928. printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
  929. reason);
  930. while (j > 0 && meminfo.bank[j - 1].highmem)
  931. j--;
  932. }
  933. }
  934. #endif
  935. meminfo.nr_banks = j;
  936. high_memory = __va(arm_lowmem_limit - 1) + 1;
  937. memblock_set_current_limit(arm_lowmem_limit);
  938. }
  939. static inline void prepare_page_table(void)
  940. {
  941. unsigned long addr;
  942. phys_addr_t end;
  943. /*
  944. * Clear out all the mappings below the kernel image.
  945. */
  946. for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
  947. pmd_clear(pmd_off_k(addr));
  948. #ifdef CONFIG_XIP_KERNEL
  949. /* The XIP kernel is mapped in the module area -- skip over it */
  950. addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
  951. #endif
  952. for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
  953. pmd_clear(pmd_off_k(addr));
  954. /*
  955. * Find the end of the first block of lowmem.
  956. */
  957. end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
  958. if (end >= arm_lowmem_limit)
  959. end = arm_lowmem_limit;
  960. /*
  961. * Clear out all the kernel space mappings, except for the first
  962. * memory bank, up to the vmalloc region.
  963. */
  964. for (addr = __phys_to_virt(end);
  965. addr < VMALLOC_START; addr += PMD_SIZE)
  966. pmd_clear(pmd_off_k(addr));
  967. }
  968. #ifdef CONFIG_ARM_LPAE
  969. /* the first page is reserved for pgd */
  970. #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
  971. PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
  972. #else
  973. #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
  974. #endif
  975. /*
  976. * Reserve the special regions of memory
  977. */
  978. void __init arm_mm_memblock_reserve(void)
  979. {
  980. /*
  981. * Reserve the page tables. These are already in use,
  982. * and can only be in node 0.
  983. */
  984. memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
  985. #ifdef CONFIG_SA1111
  986. /*
  987. * Because of the SA1111 DMA bug, we want to preserve our
  988. * precious DMA-able memory...
  989. */
  990. memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
  991. #endif
  992. }
  993. /*
  994. * Set up the device mappings. Since we clear out the page tables for all
  995. * mappings above VMALLOC_START, we will remove any debug device mappings.
  996. * This means you have to be careful how you debug this function, or any
  997. * called function. This means you can't use any function or debugging
  998. * method which may touch any device, otherwise the kernel _will_ crash.
  999. */
  1000. static void __init devicemaps_init(struct machine_desc *mdesc)
  1001. {
  1002. struct map_desc map;
  1003. unsigned long addr;
  1004. void *vectors;
  1005. /*
  1006. * Allocate the vector page early.
  1007. */
  1008. vectors = early_alloc(PAGE_SIZE);
  1009. early_trap_init(vectors);
  1010. for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
  1011. pmd_clear(pmd_off_k(addr));
  1012. /*
  1013. * Map the kernel if it is XIP.
  1014. * It is always first in the modulearea.
  1015. */
  1016. #ifdef CONFIG_XIP_KERNEL
  1017. map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
  1018. map.virtual = MODULES_VADDR;
  1019. map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
  1020. map.type = MT_ROM;
  1021. create_mapping(&map);
  1022. #endif
  1023. /*
  1024. * Map the cache flushing regions.
  1025. */
  1026. #ifdef FLUSH_BASE
  1027. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
  1028. map.virtual = FLUSH_BASE;
  1029. map.length = SZ_1M;
  1030. map.type = MT_CACHECLEAN;
  1031. create_mapping(&map);
  1032. #endif
  1033. #ifdef FLUSH_BASE_MINICACHE
  1034. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
  1035. map.virtual = FLUSH_BASE_MINICACHE;
  1036. map.length = SZ_1M;
  1037. map.type = MT_MINICLEAN;
  1038. create_mapping(&map);
  1039. #endif
  1040. /*
  1041. * Create a mapping for the machine vectors at the high-vectors
  1042. * location (0xffff0000). If we aren't using high-vectors, also
  1043. * create a mapping at the low-vectors virtual address.
  1044. */
  1045. map.pfn = __phys_to_pfn(virt_to_phys(vectors));
  1046. map.virtual = 0xffff0000;
  1047. map.length = PAGE_SIZE;
  1048. map.type = MT_HIGH_VECTORS;
  1049. create_mapping(&map);
  1050. if (!vectors_high()) {
  1051. map.virtual = 0;
  1052. map.type = MT_LOW_VECTORS;
  1053. create_mapping(&map);
  1054. }
  1055. /*
  1056. * Ask the machine support to map in the statically mapped devices.
  1057. */
  1058. if (mdesc->map_io)
  1059. mdesc->map_io();
  1060. fill_pmd_gaps();
  1061. /* Reserve fixed i/o space in VMALLOC region */
  1062. pci_reserve_io();
  1063. /*
  1064. * Finally flush the caches and tlb to ensure that we're in a
  1065. * consistent state wrt the writebuffer. This also ensures that
  1066. * any write-allocated cache lines in the vector page are written
  1067. * back. After this point, we can start to touch devices again.
  1068. */
  1069. local_flush_tlb_all();
  1070. flush_cache_all();
  1071. }
  1072. static void __init kmap_init(void)
  1073. {
  1074. #ifdef CONFIG_HIGHMEM
  1075. pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
  1076. PKMAP_BASE, _PAGE_KERNEL_TABLE);
  1077. #endif
  1078. }
  1079. static void __init map_lowmem(void)
  1080. {
  1081. struct memblock_region *reg;
  1082. /* Map all the lowmem memory banks. */
  1083. for_each_memblock(memory, reg) {
  1084. phys_addr_t start = reg->base;
  1085. phys_addr_t end = start + reg->size;
  1086. struct map_desc map;
  1087. if (end > arm_lowmem_limit)
  1088. end = arm_lowmem_limit;
  1089. if (start >= end)
  1090. break;
  1091. map.pfn = __phys_to_pfn(start);
  1092. map.virtual = __phys_to_virt(start);
  1093. map.length = end - start;
  1094. map.type = MT_MEMORY;
  1095. create_mapping(&map);
  1096. }
  1097. }
  1098. /*
  1099. * paging_init() sets up the page tables, initialises the zone memory
  1100. * maps, and sets up the zero page, bad page and bad page tables.
  1101. */
  1102. void __init paging_init(struct machine_desc *mdesc)
  1103. {
  1104. void *zero_page;
  1105. memblock_set_current_limit(arm_lowmem_limit);
  1106. build_mem_type_table();
  1107. prepare_page_table();
  1108. map_lowmem();
  1109. dma_contiguous_remap();
  1110. devicemaps_init(mdesc);
  1111. kmap_init();
  1112. top_pmd = pmd_off_k(0xffff0000);
  1113. /* allocate the zero page. */
  1114. zero_page = early_alloc(PAGE_SIZE);
  1115. bootmem_init();
  1116. empty_zero_page = virt_to_page(zero_page);
  1117. __flush_dcache_page(NULL, empty_zero_page);
  1118. }