mmu.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /*
  2. * linux/arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/init.h>
  14. #include <linux/mman.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/memblock.h>
  17. #include <linux/fs.h>
  18. #include <linux/vmalloc.h>
  19. #include <asm/cp15.h>
  20. #include <asm/cputype.h>
  21. #include <asm/sections.h>
  22. #include <asm/cachetype.h>
  23. #include <asm/setup.h>
  24. #include <asm/sizes.h>
  25. #include <asm/smp_plat.h>
  26. #include <asm/tlb.h>
  27. #include <asm/highmem.h>
  28. #include <asm/system_info.h>
  29. #include <asm/traps.h>
  30. #include <asm/mach/arch.h>
  31. #include <asm/mach/map.h>
  32. #include "mm.h"
  33. /*
  34. * empty_zero_page is a special page that is used for
  35. * zero-initialized data and COW.
  36. */
  37. struct page *empty_zero_page;
  38. EXPORT_SYMBOL(empty_zero_page);
  39. /*
  40. * The pmd table for the upper-most set of pages.
  41. */
  42. pmd_t *top_pmd;
  43. #define CPOLICY_UNCACHED 0
  44. #define CPOLICY_BUFFERED 1
  45. #define CPOLICY_WRITETHROUGH 2
  46. #define CPOLICY_WRITEBACK 3
  47. #define CPOLICY_WRITEALLOC 4
  48. static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  49. static unsigned int ecc_mask __initdata = 0;
  50. pgprot_t pgprot_user;
  51. pgprot_t pgprot_kernel;
  52. EXPORT_SYMBOL(pgprot_user);
  53. EXPORT_SYMBOL(pgprot_kernel);
  54. struct cachepolicy {
  55. const char policy[16];
  56. unsigned int cr_mask;
  57. pmdval_t pmd;
  58. pteval_t pte;
  59. };
  60. static struct cachepolicy cache_policies[] __initdata = {
  61. {
  62. .policy = "uncached",
  63. .cr_mask = CR_W|CR_C,
  64. .pmd = PMD_SECT_UNCACHED,
  65. .pte = L_PTE_MT_UNCACHED,
  66. }, {
  67. .policy = "buffered",
  68. .cr_mask = CR_C,
  69. .pmd = PMD_SECT_BUFFERED,
  70. .pte = L_PTE_MT_BUFFERABLE,
  71. }, {
  72. .policy = "writethrough",
  73. .cr_mask = 0,
  74. .pmd = PMD_SECT_WT,
  75. .pte = L_PTE_MT_WRITETHROUGH,
  76. }, {
  77. .policy = "writeback",
  78. .cr_mask = 0,
  79. .pmd = PMD_SECT_WB,
  80. .pte = L_PTE_MT_WRITEBACK,
  81. }, {
  82. .policy = "writealloc",
  83. .cr_mask = 0,
  84. .pmd = PMD_SECT_WBWA,
  85. .pte = L_PTE_MT_WRITEALLOC,
  86. }
  87. };
  88. /*
  89. * These are useful for identifying cache coherency
  90. * problems by allowing the cache or the cache and
  91. * writebuffer to be turned off. (Note: the write
  92. * buffer should not be on and the cache off).
  93. */
  94. static int __init early_cachepolicy(char *p)
  95. {
  96. int i;
  97. for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
  98. int len = strlen(cache_policies[i].policy);
  99. if (memcmp(p, cache_policies[i].policy, len) == 0) {
  100. cachepolicy = i;
  101. cr_alignment &= ~cache_policies[i].cr_mask;
  102. cr_no_alignment &= ~cache_policies[i].cr_mask;
  103. break;
  104. }
  105. }
  106. if (i == ARRAY_SIZE(cache_policies))
  107. printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
  108. /*
  109. * This restriction is partly to do with the way we boot; it is
  110. * unpredictable to have memory mapped using two different sets of
  111. * memory attributes (shared, type, and cache attribs). We can not
  112. * change these attributes once the initial assembly has setup the
  113. * page tables.
  114. */
  115. if (cpu_architecture() >= CPU_ARCH_ARMv6) {
  116. printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
  117. cachepolicy = CPOLICY_WRITEBACK;
  118. }
  119. flush_cache_all();
  120. set_cr(cr_alignment);
  121. return 0;
  122. }
  123. early_param("cachepolicy", early_cachepolicy);
  124. static int __init early_nocache(char *__unused)
  125. {
  126. char *p = "buffered";
  127. printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
  128. early_cachepolicy(p);
  129. return 0;
  130. }
  131. early_param("nocache", early_nocache);
  132. static int __init early_nowrite(char *__unused)
  133. {
  134. char *p = "uncached";
  135. printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
  136. early_cachepolicy(p);
  137. return 0;
  138. }
  139. early_param("nowb", early_nowrite);
  140. #ifndef CONFIG_ARM_LPAE
  141. static int __init early_ecc(char *p)
  142. {
  143. if (memcmp(p, "on", 2) == 0)
  144. ecc_mask = PMD_PROTECTION;
  145. else if (memcmp(p, "off", 3) == 0)
  146. ecc_mask = 0;
  147. return 0;
  148. }
  149. early_param("ecc", early_ecc);
  150. #endif
  151. static int __init noalign_setup(char *__unused)
  152. {
  153. cr_alignment &= ~CR_A;
  154. cr_no_alignment &= ~CR_A;
  155. set_cr(cr_alignment);
  156. return 1;
  157. }
  158. __setup("noalign", noalign_setup);
  159. #ifndef CONFIG_SMP
  160. void adjust_cr(unsigned long mask, unsigned long set)
  161. {
  162. unsigned long flags;
  163. mask &= ~CR_A;
  164. set &= mask;
  165. local_irq_save(flags);
  166. cr_no_alignment = (cr_no_alignment & ~mask) | set;
  167. cr_alignment = (cr_alignment & ~mask) | set;
  168. set_cr((get_cr() & ~mask) | set);
  169. local_irq_restore(flags);
  170. }
  171. #endif
  172. #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
  173. #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
  174. static struct mem_type mem_types[] = {
  175. [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
  176. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
  177. L_PTE_SHARED,
  178. .prot_l1 = PMD_TYPE_TABLE,
  179. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
  180. .domain = DOMAIN_IO,
  181. },
  182. [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
  183. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
  184. .prot_l1 = PMD_TYPE_TABLE,
  185. .prot_sect = PROT_SECT_DEVICE,
  186. .domain = DOMAIN_IO,
  187. },
  188. [MT_DEVICE_CACHED] = { /* ioremap_cached */
  189. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
  190. .prot_l1 = PMD_TYPE_TABLE,
  191. .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
  192. .domain = DOMAIN_IO,
  193. },
  194. [MT_DEVICE_WC] = { /* ioremap_wc */
  195. .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
  196. .prot_l1 = PMD_TYPE_TABLE,
  197. .prot_sect = PROT_SECT_DEVICE,
  198. .domain = DOMAIN_IO,
  199. },
  200. [MT_UNCACHED] = {
  201. .prot_pte = PROT_PTE_DEVICE,
  202. .prot_l1 = PMD_TYPE_TABLE,
  203. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  204. .domain = DOMAIN_IO,
  205. },
  206. [MT_CACHECLEAN] = {
  207. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  208. .domain = DOMAIN_KERNEL,
  209. },
  210. #ifndef CONFIG_ARM_LPAE
  211. [MT_MINICLEAN] = {
  212. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
  213. .domain = DOMAIN_KERNEL,
  214. },
  215. #endif
  216. [MT_LOW_VECTORS] = {
  217. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  218. L_PTE_RDONLY,
  219. .prot_l1 = PMD_TYPE_TABLE,
  220. .domain = DOMAIN_USER,
  221. },
  222. [MT_HIGH_VECTORS] = {
  223. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  224. L_PTE_USER | L_PTE_RDONLY,
  225. .prot_l1 = PMD_TYPE_TABLE,
  226. .domain = DOMAIN_USER,
  227. },
  228. [MT_MEMORY] = {
  229. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  230. .prot_l1 = PMD_TYPE_TABLE,
  231. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  232. .domain = DOMAIN_KERNEL,
  233. },
  234. [MT_ROM] = {
  235. .prot_sect = PMD_TYPE_SECT,
  236. .domain = DOMAIN_KERNEL,
  237. },
  238. [MT_MEMORY_NONCACHED] = {
  239. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  240. L_PTE_MT_BUFFERABLE,
  241. .prot_l1 = PMD_TYPE_TABLE,
  242. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  243. .domain = DOMAIN_KERNEL,
  244. },
  245. [MT_MEMORY_DTCM] = {
  246. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  247. L_PTE_XN,
  248. .prot_l1 = PMD_TYPE_TABLE,
  249. .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
  250. .domain = DOMAIN_KERNEL,
  251. },
  252. [MT_MEMORY_ITCM] = {
  253. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  254. .prot_l1 = PMD_TYPE_TABLE,
  255. .domain = DOMAIN_KERNEL,
  256. },
  257. [MT_MEMORY_SO] = {
  258. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  259. L_PTE_MT_UNCACHED,
  260. .prot_l1 = PMD_TYPE_TABLE,
  261. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
  262. PMD_SECT_UNCACHED | PMD_SECT_XN,
  263. .domain = DOMAIN_KERNEL,
  264. },
  265. [MT_MEMORY_DMA_READY] = {
  266. .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  267. .prot_l1 = PMD_TYPE_TABLE,
  268. .domain = DOMAIN_KERNEL,
  269. },
  270. };
  271. const struct mem_type *get_mem_type(unsigned int type)
  272. {
  273. return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
  274. }
  275. EXPORT_SYMBOL(get_mem_type);
  276. /*
  277. * Adjust the PMD section entries according to the CPU in use.
  278. */
  279. static void __init build_mem_type_table(void)
  280. {
  281. struct cachepolicy *cp;
  282. unsigned int cr = get_cr();
  283. pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
  284. int cpu_arch = cpu_architecture();
  285. int i;
  286. if (cpu_arch < CPU_ARCH_ARMv6) {
  287. #if defined(CONFIG_CPU_DCACHE_DISABLE)
  288. if (cachepolicy > CPOLICY_BUFFERED)
  289. cachepolicy = CPOLICY_BUFFERED;
  290. #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
  291. if (cachepolicy > CPOLICY_WRITETHROUGH)
  292. cachepolicy = CPOLICY_WRITETHROUGH;
  293. #endif
  294. }
  295. if (cpu_arch < CPU_ARCH_ARMv5) {
  296. if (cachepolicy >= CPOLICY_WRITEALLOC)
  297. cachepolicy = CPOLICY_WRITEBACK;
  298. ecc_mask = 0;
  299. }
  300. if (is_smp())
  301. cachepolicy = CPOLICY_WRITEALLOC;
  302. /*
  303. * Strip out features not present on earlier architectures.
  304. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
  305. * without extended page tables don't have the 'Shared' bit.
  306. */
  307. if (cpu_arch < CPU_ARCH_ARMv5)
  308. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  309. mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
  310. if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
  311. for (i = 0; i < ARRAY_SIZE(mem_types); i++)
  312. mem_types[i].prot_sect &= ~PMD_SECT_S;
  313. /*
  314. * ARMv5 and lower, bit 4 must be set for page tables (was: cache
  315. * "update-able on write" bit on ARM610). However, Xscale and
  316. * Xscale3 require this bit to be cleared.
  317. */
  318. if (cpu_is_xscale() || cpu_is_xsc3()) {
  319. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  320. mem_types[i].prot_sect &= ~PMD_BIT4;
  321. mem_types[i].prot_l1 &= ~PMD_BIT4;
  322. }
  323. } else if (cpu_arch < CPU_ARCH_ARMv6) {
  324. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  325. if (mem_types[i].prot_l1)
  326. mem_types[i].prot_l1 |= PMD_BIT4;
  327. if (mem_types[i].prot_sect)
  328. mem_types[i].prot_sect |= PMD_BIT4;
  329. }
  330. }
  331. /*
  332. * Mark the device areas according to the CPU/architecture.
  333. */
  334. if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
  335. if (!cpu_is_xsc3()) {
  336. /*
  337. * Mark device regions on ARMv6+ as execute-never
  338. * to prevent speculative instruction fetches.
  339. */
  340. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
  341. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
  342. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
  343. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
  344. }
  345. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  346. /*
  347. * For ARMv7 with TEX remapping,
  348. * - shared device is SXCB=1100
  349. * - nonshared device is SXCB=0100
  350. * - write combine device mem is SXCB=0001
  351. * (Uncached Normal memory)
  352. */
  353. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
  354. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
  355. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  356. } else if (cpu_is_xsc3()) {
  357. /*
  358. * For Xscale3,
  359. * - shared device is TEXCB=00101
  360. * - nonshared device is TEXCB=01000
  361. * - write combine device mem is TEXCB=00100
  362. * (Inner/Outer Uncacheable in xsc3 parlance)
  363. */
  364. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
  365. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  366. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  367. } else {
  368. /*
  369. * For ARMv6 and ARMv7 without TEX remapping,
  370. * - shared device is TEXCB=00001
  371. * - nonshared device is TEXCB=01000
  372. * - write combine device mem is TEXCB=00100
  373. * (Uncached Normal in ARMv6 parlance).
  374. */
  375. mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
  376. mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
  377. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
  378. }
  379. } else {
  380. /*
  381. * On others, write combining is "Uncached/Buffered"
  382. */
  383. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
  384. }
  385. /*
  386. * Now deal with the memory-type mappings
  387. */
  388. cp = &cache_policies[cachepolicy];
  389. vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
  390. /*
  391. * Enable CPU-specific coherency if supported.
  392. * (Only available on XSC3 at the moment.)
  393. */
  394. if (arch_is_coherent() && cpu_is_xsc3()) {
  395. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  396. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  397. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  398. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  399. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  400. }
  401. /*
  402. * ARMv6 and above have extended page tables.
  403. */
  404. if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
  405. #ifndef CONFIG_ARM_LPAE
  406. /*
  407. * Mark cache clean areas and XIP ROM read only
  408. * from SVC mode and no access from userspace.
  409. */
  410. mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  411. mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  412. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
  413. #endif
  414. if (is_smp()) {
  415. /*
  416. * Mark memory with the "shared" attribute
  417. * for SMP systems
  418. */
  419. user_pgprot |= L_PTE_SHARED;
  420. kern_pgprot |= L_PTE_SHARED;
  421. vecs_pgprot |= L_PTE_SHARED;
  422. mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
  423. mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
  424. mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
  425. mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  426. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  427. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  428. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  429. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  430. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  431. }
  432. }
  433. /*
  434. * Non-cacheable Normal - intended for memory areas that must
  435. * not cause dirty cache line writebacks when used
  436. */
  437. if (cpu_arch >= CPU_ARCH_ARMv6) {
  438. if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
  439. /* Non-cacheable Normal is XCB = 001 */
  440. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  441. PMD_SECT_BUFFERED;
  442. } else {
  443. /* For both ARMv6 and non-TEX-remapping ARMv7 */
  444. mem_types[MT_MEMORY_NONCACHED].prot_sect |=
  445. PMD_SECT_TEX(1);
  446. }
  447. } else {
  448. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
  449. }
  450. #ifdef CONFIG_ARM_LPAE
  451. /*
  452. * Do not generate access flag faults for the kernel mappings.
  453. */
  454. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  455. mem_types[i].prot_pte |= PTE_EXT_AF;
  456. if (mem_types[i].prot_sect)
  457. mem_types[i].prot_sect |= PMD_SECT_AF;
  458. }
  459. kern_pgprot |= PTE_EXT_AF;
  460. vecs_pgprot |= PTE_EXT_AF;
  461. #endif
  462. for (i = 0; i < 16; i++) {
  463. unsigned long v = pgprot_val(protection_map[i]);
  464. protection_map[i] = __pgprot(v | user_pgprot);
  465. }
  466. mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
  467. mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
  468. pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
  469. pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
  470. L_PTE_DIRTY | kern_pgprot);
  471. mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
  472. mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
  473. mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
  474. mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
  475. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  476. mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
  477. mem_types[MT_ROM].prot_sect |= cp->pmd;
  478. switch (cp->pmd) {
  479. case PMD_SECT_WT:
  480. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
  481. break;
  482. case PMD_SECT_WB:
  483. case PMD_SECT_WBWA:
  484. mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
  485. break;
  486. }
  487. printk("Memory policy: ECC %sabled, Data cache %s\n",
  488. ecc_mask ? "en" : "dis", cp->policy);
  489. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
  490. struct mem_type *t = &mem_types[i];
  491. if (t->prot_l1)
  492. t->prot_l1 |= PMD_DOMAIN(t->domain);
  493. if (t->prot_sect)
  494. t->prot_sect |= PMD_DOMAIN(t->domain);
  495. }
  496. }
  497. #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
  498. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  499. unsigned long size, pgprot_t vma_prot)
  500. {
  501. if (!pfn_valid(pfn))
  502. return pgprot_noncached(vma_prot);
  503. else if (file->f_flags & O_SYNC)
  504. return pgprot_writecombine(vma_prot);
  505. return vma_prot;
  506. }
  507. EXPORT_SYMBOL(phys_mem_access_prot);
  508. #endif
  509. #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
  510. static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
  511. {
  512. void *ptr = __va(memblock_alloc(sz, align));
  513. memset(ptr, 0, sz);
  514. return ptr;
  515. }
  516. static void __init *early_alloc(unsigned long sz)
  517. {
  518. return early_alloc_aligned(sz, sz);
  519. }
  520. static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
  521. {
  522. if (pmd_none(*pmd)) {
  523. pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
  524. __pmd_populate(pmd, __pa(pte), prot);
  525. }
  526. BUG_ON(pmd_bad(*pmd));
  527. return pte_offset_kernel(pmd, addr);
  528. }
  529. static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
  530. unsigned long end, unsigned long pfn,
  531. const struct mem_type *type)
  532. {
  533. pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
  534. do {
  535. set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
  536. pfn++;
  537. } while (pte++, addr += PAGE_SIZE, addr != end);
  538. }
  539. static void __init alloc_init_section(pud_t *pud, unsigned long addr,
  540. unsigned long end, phys_addr_t phys,
  541. const struct mem_type *type)
  542. {
  543. pmd_t *pmd = pmd_offset(pud, addr);
  544. /*
  545. * Try a section mapping - end, addr and phys must all be aligned
  546. * to a section boundary. Note that PMDs refer to the individual
  547. * L1 entries, whereas PGDs refer to a group of L1 entries making
  548. * up one logical pointer to an L2 table.
  549. */
  550. if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
  551. pmd_t *p = pmd;
  552. #ifndef CONFIG_ARM_LPAE
  553. if (addr & SECTION_SIZE)
  554. pmd++;
  555. #endif
  556. do {
  557. *pmd = __pmd(phys | type->prot_sect);
  558. phys += SECTION_SIZE;
  559. } while (pmd++, addr += SECTION_SIZE, addr != end);
  560. flush_pmd_entry(p);
  561. } else {
  562. /*
  563. * No need to loop; pte's aren't interested in the
  564. * individual L1 entries.
  565. */
  566. alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
  567. }
  568. }
  569. static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
  570. unsigned long end, unsigned long phys, const struct mem_type *type)
  571. {
  572. pud_t *pud = pud_offset(pgd, addr);
  573. unsigned long next;
  574. do {
  575. next = pud_addr_end(addr, end);
  576. alloc_init_section(pud, addr, next, phys, type);
  577. phys += next - addr;
  578. } while (pud++, addr = next, addr != end);
  579. }
  580. #ifndef CONFIG_ARM_LPAE
  581. static void __init create_36bit_mapping(struct map_desc *md,
  582. const struct mem_type *type)
  583. {
  584. unsigned long addr, length, end;
  585. phys_addr_t phys;
  586. pgd_t *pgd;
  587. addr = md->virtual;
  588. phys = __pfn_to_phys(md->pfn);
  589. length = PAGE_ALIGN(md->length);
  590. if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
  591. printk(KERN_ERR "MM: CPU does not support supersection "
  592. "mapping for 0x%08llx at 0x%08lx\n",
  593. (long long)__pfn_to_phys((u64)md->pfn), addr);
  594. return;
  595. }
  596. /* N.B. ARMv6 supersections are only defined to work with domain 0.
  597. * Since domain assignments can in fact be arbitrary, the
  598. * 'domain == 0' check below is required to insure that ARMv6
  599. * supersections are only allocated for domain 0 regardless
  600. * of the actual domain assignments in use.
  601. */
  602. if (type->domain) {
  603. printk(KERN_ERR "MM: invalid domain in supersection "
  604. "mapping for 0x%08llx at 0x%08lx\n",
  605. (long long)__pfn_to_phys((u64)md->pfn), addr);
  606. return;
  607. }
  608. if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
  609. printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
  610. " at 0x%08lx invalid alignment\n",
  611. (long long)__pfn_to_phys((u64)md->pfn), addr);
  612. return;
  613. }
  614. /*
  615. * Shift bits [35:32] of address into bits [23:20] of PMD
  616. * (See ARMv6 spec).
  617. */
  618. phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
  619. pgd = pgd_offset_k(addr);
  620. end = addr + length;
  621. do {
  622. pud_t *pud = pud_offset(pgd, addr);
  623. pmd_t *pmd = pmd_offset(pud, addr);
  624. int i;
  625. for (i = 0; i < 16; i++)
  626. *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
  627. addr += SUPERSECTION_SIZE;
  628. phys += SUPERSECTION_SIZE;
  629. pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
  630. } while (addr != end);
  631. }
  632. #endif /* !CONFIG_ARM_LPAE */
  633. /*
  634. * Create the page directory entries and any necessary
  635. * page tables for the mapping specified by `md'. We
  636. * are able to cope here with varying sizes and address
  637. * offsets, and we take full advantage of sections and
  638. * supersections.
  639. */
  640. static void __init create_mapping(struct map_desc *md)
  641. {
  642. unsigned long addr, length, end;
  643. phys_addr_t phys;
  644. const struct mem_type *type;
  645. pgd_t *pgd;
  646. if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
  647. printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
  648. " at 0x%08lx in user region\n",
  649. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  650. return;
  651. }
  652. if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
  653. md->virtual >= PAGE_OFFSET &&
  654. (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
  655. printk(KERN_WARNING "BUG: mapping for 0x%08llx"
  656. " at 0x%08lx out of vmalloc space\n",
  657. (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
  658. }
  659. type = &mem_types[md->type];
  660. #ifndef CONFIG_ARM_LPAE
  661. /*
  662. * Catch 36-bit addresses
  663. */
  664. if (md->pfn >= 0x100000) {
  665. create_36bit_mapping(md, type);
  666. return;
  667. }
  668. #endif
  669. addr = md->virtual & PAGE_MASK;
  670. phys = __pfn_to_phys(md->pfn);
  671. length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  672. if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
  673. printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
  674. "be mapped using pages, ignoring.\n",
  675. (long long)__pfn_to_phys(md->pfn), addr);
  676. return;
  677. }
  678. pgd = pgd_offset_k(addr);
  679. end = addr + length;
  680. do {
  681. unsigned long next = pgd_addr_end(addr, end);
  682. alloc_init_pud(pgd, addr, next, phys, type);
  683. phys += next - addr;
  684. addr = next;
  685. } while (pgd++, addr != end);
  686. }
  687. /*
  688. * Create the architecture specific mappings
  689. */
  690. void __init iotable_init(struct map_desc *io_desc, int nr)
  691. {
  692. struct map_desc *md;
  693. struct vm_struct *vm;
  694. if (!nr)
  695. return;
  696. vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
  697. for (md = io_desc; nr; md++, nr--) {
  698. create_mapping(md);
  699. vm->addr = (void *)(md->virtual & PAGE_MASK);
  700. vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  701. vm->phys_addr = __pfn_to_phys(md->pfn);
  702. vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
  703. vm->flags |= VM_ARM_MTYPE(md->type);
  704. vm->caller = iotable_init;
  705. vm_area_add_early(vm++);
  706. }
  707. }
  708. static void * __initdata vmalloc_min =
  709. (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
  710. /*
  711. * vmalloc=size forces the vmalloc area to be exactly 'size'
  712. * bytes. This can be used to increase (or decrease) the vmalloc
  713. * area - the default is 240m.
  714. */
  715. static int __init early_vmalloc(char *arg)
  716. {
  717. unsigned long vmalloc_reserve = memparse(arg, NULL);
  718. if (vmalloc_reserve < SZ_16M) {
  719. vmalloc_reserve = SZ_16M;
  720. printk(KERN_WARNING
  721. "vmalloc area too small, limiting to %luMB\n",
  722. vmalloc_reserve >> 20);
  723. }
  724. if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
  725. vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
  726. printk(KERN_WARNING
  727. "vmalloc area is too big, limiting to %luMB\n",
  728. vmalloc_reserve >> 20);
  729. }
  730. vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
  731. return 0;
  732. }
  733. early_param("vmalloc", early_vmalloc);
  734. phys_addr_t arm_lowmem_limit __initdata = 0;
  735. void __init sanity_check_meminfo(void)
  736. {
  737. int i, j, highmem = 0;
  738. for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
  739. struct membank *bank = &meminfo.bank[j];
  740. *bank = meminfo.bank[i];
  741. if (bank->start > ULONG_MAX)
  742. highmem = 1;
  743. #ifdef CONFIG_HIGHMEM
  744. if (__va(bank->start) >= vmalloc_min ||
  745. __va(bank->start) < (void *)PAGE_OFFSET)
  746. highmem = 1;
  747. bank->highmem = highmem;
  748. /*
  749. * Split those memory banks which are partially overlapping
  750. * the vmalloc area greatly simplifying things later.
  751. */
  752. if (!highmem && __va(bank->start) < vmalloc_min &&
  753. bank->size > vmalloc_min - __va(bank->start)) {
  754. if (meminfo.nr_banks >= NR_BANKS) {
  755. printk(KERN_CRIT "NR_BANKS too low, "
  756. "ignoring high memory\n");
  757. } else {
  758. memmove(bank + 1, bank,
  759. (meminfo.nr_banks - i) * sizeof(*bank));
  760. meminfo.nr_banks++;
  761. i++;
  762. bank[1].size -= vmalloc_min - __va(bank->start);
  763. bank[1].start = __pa(vmalloc_min - 1) + 1;
  764. bank[1].highmem = highmem = 1;
  765. j++;
  766. }
  767. bank->size = vmalloc_min - __va(bank->start);
  768. }
  769. #else
  770. bank->highmem = highmem;
  771. /*
  772. * Highmem banks not allowed with !CONFIG_HIGHMEM.
  773. */
  774. if (highmem) {
  775. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  776. "(!CONFIG_HIGHMEM).\n",
  777. (unsigned long long)bank->start,
  778. (unsigned long long)bank->start + bank->size - 1);
  779. continue;
  780. }
  781. /*
  782. * Check whether this memory bank would entirely overlap
  783. * the vmalloc area.
  784. */
  785. if (__va(bank->start) >= vmalloc_min ||
  786. __va(bank->start) < (void *)PAGE_OFFSET) {
  787. printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
  788. "(vmalloc region overlap).\n",
  789. (unsigned long long)bank->start,
  790. (unsigned long long)bank->start + bank->size - 1);
  791. continue;
  792. }
  793. /*
  794. * Check whether this memory bank would partially overlap
  795. * the vmalloc area.
  796. */
  797. if (__va(bank->start + bank->size) > vmalloc_min ||
  798. __va(bank->start + bank->size) < __va(bank->start)) {
  799. unsigned long newsize = vmalloc_min - __va(bank->start);
  800. printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
  801. "to -%.8llx (vmalloc region overlap).\n",
  802. (unsigned long long)bank->start,
  803. (unsigned long long)bank->start + bank->size - 1,
  804. (unsigned long long)bank->start + newsize - 1);
  805. bank->size = newsize;
  806. }
  807. #endif
  808. if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
  809. arm_lowmem_limit = bank->start + bank->size;
  810. j++;
  811. }
  812. #ifdef CONFIG_HIGHMEM
  813. if (highmem) {
  814. const char *reason = NULL;
  815. if (cache_is_vipt_aliasing()) {
  816. /*
  817. * Interactions between kmap and other mappings
  818. * make highmem support with aliasing VIPT caches
  819. * rather difficult.
  820. */
  821. reason = "with VIPT aliasing cache";
  822. }
  823. if (reason) {
  824. printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
  825. reason);
  826. while (j > 0 && meminfo.bank[j - 1].highmem)
  827. j--;
  828. }
  829. }
  830. #endif
  831. meminfo.nr_banks = j;
  832. high_memory = __va(arm_lowmem_limit - 1) + 1;
  833. memblock_set_current_limit(arm_lowmem_limit);
  834. }
  835. static inline void prepare_page_table(void)
  836. {
  837. unsigned long addr;
  838. phys_addr_t end;
  839. /*
  840. * Clear out all the mappings below the kernel image.
  841. */
  842. for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
  843. pmd_clear(pmd_off_k(addr));
  844. #ifdef CONFIG_XIP_KERNEL
  845. /* The XIP kernel is mapped in the module area -- skip over it */
  846. addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
  847. #endif
  848. for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
  849. pmd_clear(pmd_off_k(addr));
  850. /*
  851. * Find the end of the first block of lowmem.
  852. */
  853. end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
  854. if (end >= arm_lowmem_limit)
  855. end = arm_lowmem_limit;
  856. /*
  857. * Clear out all the kernel space mappings, except for the first
  858. * memory bank, up to the vmalloc region.
  859. */
  860. for (addr = __phys_to_virt(end);
  861. addr < VMALLOC_START; addr += PMD_SIZE)
  862. pmd_clear(pmd_off_k(addr));
  863. }
  864. #ifdef CONFIG_ARM_LPAE
  865. /* the first page is reserved for pgd */
  866. #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
  867. PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
  868. #else
  869. #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
  870. #endif
  871. /*
  872. * Reserve the special regions of memory
  873. */
  874. void __init arm_mm_memblock_reserve(void)
  875. {
  876. /*
  877. * Reserve the page tables. These are already in use,
  878. * and can only be in node 0.
  879. */
  880. memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
  881. #ifdef CONFIG_SA1111
  882. /*
  883. * Because of the SA1111 DMA bug, we want to preserve our
  884. * precious DMA-able memory...
  885. */
  886. memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
  887. #endif
  888. }
  889. /*
  890. * Set up the device mappings. Since we clear out the page tables for all
  891. * mappings above VMALLOC_START, we will remove any debug device mappings.
  892. * This means you have to be careful how you debug this function, or any
  893. * called function. This means you can't use any function or debugging
  894. * method which may touch any device, otherwise the kernel _will_ crash.
  895. */
  896. static void __init devicemaps_init(struct machine_desc *mdesc)
  897. {
  898. struct map_desc map;
  899. unsigned long addr;
  900. void *vectors;
  901. /*
  902. * Allocate the vector page early.
  903. */
  904. vectors = early_alloc(PAGE_SIZE);
  905. early_trap_init(vectors);
  906. for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
  907. pmd_clear(pmd_off_k(addr));
  908. /*
  909. * Map the kernel if it is XIP.
  910. * It is always first in the modulearea.
  911. */
  912. #ifdef CONFIG_XIP_KERNEL
  913. map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
  914. map.virtual = MODULES_VADDR;
  915. map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
  916. map.type = MT_ROM;
  917. create_mapping(&map);
  918. #endif
  919. /*
  920. * Map the cache flushing regions.
  921. */
  922. #ifdef FLUSH_BASE
  923. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
  924. map.virtual = FLUSH_BASE;
  925. map.length = SZ_1M;
  926. map.type = MT_CACHECLEAN;
  927. create_mapping(&map);
  928. #endif
  929. #ifdef FLUSH_BASE_MINICACHE
  930. map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
  931. map.virtual = FLUSH_BASE_MINICACHE;
  932. map.length = SZ_1M;
  933. map.type = MT_MINICLEAN;
  934. create_mapping(&map);
  935. #endif
  936. /*
  937. * Create a mapping for the machine vectors at the high-vectors
  938. * location (0xffff0000). If we aren't using high-vectors, also
  939. * create a mapping at the low-vectors virtual address.
  940. */
  941. map.pfn = __phys_to_pfn(virt_to_phys(vectors));
  942. map.virtual = 0xffff0000;
  943. map.length = PAGE_SIZE;
  944. map.type = MT_HIGH_VECTORS;
  945. create_mapping(&map);
  946. if (!vectors_high()) {
  947. map.virtual = 0;
  948. map.type = MT_LOW_VECTORS;
  949. create_mapping(&map);
  950. }
  951. /*
  952. * Ask the machine support to map in the statically mapped devices.
  953. */
  954. if (mdesc->map_io)
  955. mdesc->map_io();
  956. /*
  957. * Finally flush the caches and tlb to ensure that we're in a
  958. * consistent state wrt the writebuffer. This also ensures that
  959. * any write-allocated cache lines in the vector page are written
  960. * back. After this point, we can start to touch devices again.
  961. */
  962. local_flush_tlb_all();
  963. flush_cache_all();
  964. }
  965. static void __init kmap_init(void)
  966. {
  967. #ifdef CONFIG_HIGHMEM
  968. pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
  969. PKMAP_BASE, _PAGE_KERNEL_TABLE);
  970. #endif
  971. }
  972. static void __init map_lowmem(void)
  973. {
  974. struct memblock_region *reg;
  975. /* Map all the lowmem memory banks. */
  976. for_each_memblock(memory, reg) {
  977. phys_addr_t start = reg->base;
  978. phys_addr_t end = start + reg->size;
  979. struct map_desc map;
  980. if (end > arm_lowmem_limit)
  981. end = arm_lowmem_limit;
  982. if (start >= end)
  983. break;
  984. map.pfn = __phys_to_pfn(start);
  985. map.virtual = __phys_to_virt(start);
  986. map.length = end - start;
  987. map.type = MT_MEMORY;
  988. create_mapping(&map);
  989. }
  990. }
  991. /*
  992. * paging_init() sets up the page tables, initialises the zone memory
  993. * maps, and sets up the zero page, bad page and bad page tables.
  994. */
  995. void __init paging_init(struct machine_desc *mdesc)
  996. {
  997. void *zero_page;
  998. memblock_set_current_limit(arm_lowmem_limit);
  999. build_mem_type_table();
  1000. prepare_page_table();
  1001. map_lowmem();
  1002. dma_contiguous_remap();
  1003. devicemaps_init(mdesc);
  1004. kmap_init();
  1005. top_pmd = pmd_off_k(0xffff0000);
  1006. /* allocate the zero page. */
  1007. zero_page = early_alloc(PAGE_SIZE);
  1008. bootmem_init();
  1009. empty_zero_page = virt_to_page(zero_page);
  1010. __flush_dcache_page(NULL, empty_zero_page);
  1011. }