init_32.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pfn.h>
  24. #include <linux/poison.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/slab.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <asm/asm.h>
  32. #include <asm/bios_ebda.h>
  33. #include <asm/processor.h>
  34. #include <asm/system.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/dma.h>
  38. #include <asm/fixmap.h>
  39. #include <asm/e820.h>
  40. #include <asm/apic.h>
  41. #include <asm/bugs.h>
  42. #include <asm/tlb.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/pgalloc.h>
  45. #include <asm/sections.h>
  46. #include <asm/paravirt.h>
  47. #include <asm/setup.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/smp.h>
  50. unsigned int __VMALLOC_RESERVE = 128 << 20;
  51. unsigned long max_low_pfn_mapped;
  52. unsigned long max_pfn_mapped;
  53. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. static unsigned long __initdata table_start;
  57. static unsigned long __meminitdata table_end;
  58. static unsigned long __meminitdata table_top;
  59. static int __initdata after_init_bootmem;
  60. static __init void *alloc_low_page(unsigned long *phys)
  61. {
  62. unsigned long pfn = table_end++;
  63. void *adr;
  64. if (pfn >= table_top)
  65. panic("alloc_low_page: ran out of memory");
  66. adr = __va(pfn * PAGE_SIZE);
  67. memset(adr, 0, PAGE_SIZE);
  68. *phys = pfn * PAGE_SIZE;
  69. return adr;
  70. }
  71. /*
  72. * Creates a middle page table and puts a pointer to it in the
  73. * given global directory entry. This only returns the gd entry
  74. * in non-PAE compilation mode, since the middle layer is folded.
  75. */
  76. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  77. {
  78. pud_t *pud;
  79. pmd_t *pmd_table;
  80. #ifdef CONFIG_X86_PAE
  81. unsigned long phys;
  82. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  83. if (after_init_bootmem)
  84. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  85. else
  86. pmd_table = (pmd_t *)alloc_low_page(&phys);
  87. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  88. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  89. pud = pud_offset(pgd, 0);
  90. BUG_ON(pmd_table != pmd_offset(pud, 0));
  91. }
  92. #endif
  93. pud = pud_offset(pgd, 0);
  94. pmd_table = pmd_offset(pud, 0);
  95. return pmd_table;
  96. }
  97. /*
  98. * Create a page table and place a pointer to it in a middle page
  99. * directory entry:
  100. */
  101. static pte_t * __init one_page_table_init(pmd_t *pmd)
  102. {
  103. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  104. pte_t *page_table = NULL;
  105. if (after_init_bootmem) {
  106. #ifdef CONFIG_DEBUG_PAGEALLOC
  107. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  108. #endif
  109. if (!page_table)
  110. page_table =
  111. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  112. } else {
  113. unsigned long phys;
  114. page_table = (pte_t *)alloc_low_page(&phys);
  115. }
  116. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  117. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  118. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  119. }
  120. return pte_offset_kernel(pmd, 0);
  121. }
  122. /*
  123. * This function initializes a certain range of kernel virtual memory
  124. * with new bootmem page tables, everywhere page tables are missing in
  125. * the given range.
  126. *
  127. * NOTE: The pagetables are allocated contiguous on the physical space
  128. * so we can cache the place of the first one and move around without
  129. * checking the pgd every time.
  130. */
  131. static void __init
  132. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  133. {
  134. int pgd_idx, pmd_idx;
  135. unsigned long vaddr;
  136. pgd_t *pgd;
  137. pmd_t *pmd;
  138. vaddr = start;
  139. pgd_idx = pgd_index(vaddr);
  140. pmd_idx = pmd_index(vaddr);
  141. pgd = pgd_base + pgd_idx;
  142. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  143. pmd = one_md_table_init(pgd);
  144. pmd = pmd + pmd_index(vaddr);
  145. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  146. pmd++, pmd_idx++) {
  147. one_page_table_init(pmd);
  148. vaddr += PMD_SIZE;
  149. }
  150. pmd_idx = 0;
  151. }
  152. }
  153. static inline int is_kernel_text(unsigned long addr)
  154. {
  155. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  156. return 1;
  157. return 0;
  158. }
  159. /*
  160. * This maps the physical memory to kernel virtual address space, a total
  161. * of max_low_pfn pages, by creating page tables starting from address
  162. * PAGE_OFFSET:
  163. */
  164. static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
  165. unsigned long start_pfn,
  166. unsigned long end_pfn,
  167. int use_pse)
  168. {
  169. int pgd_idx, pmd_idx, pte_ofs;
  170. unsigned long pfn;
  171. pgd_t *pgd;
  172. pmd_t *pmd;
  173. pte_t *pte;
  174. unsigned pages_2m, pages_4k;
  175. int mapping_iter;
  176. /*
  177. * First iteration will setup identity mapping using large/small pages
  178. * based on use_pse, with other attributes same as set by
  179. * the early code in head_32.S
  180. *
  181. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  182. * as desired for the kernel identity mapping.
  183. *
  184. * This two pass mechanism conforms to the TLB app note which says:
  185. *
  186. * "Software should not write to a paging-structure entry in a way
  187. * that would change, for any linear address, both the page size
  188. * and either the page frame or attributes."
  189. */
  190. mapping_iter = 1;
  191. if (!cpu_has_pse)
  192. use_pse = 0;
  193. repeat:
  194. pages_2m = pages_4k = 0;
  195. pfn = start_pfn;
  196. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  197. pgd = pgd_base + pgd_idx;
  198. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  199. pmd = one_md_table_init(pgd);
  200. if (pfn >= end_pfn)
  201. continue;
  202. #ifdef CONFIG_X86_PAE
  203. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  204. pmd += pmd_idx;
  205. #else
  206. pmd_idx = 0;
  207. #endif
  208. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  209. pmd++, pmd_idx++) {
  210. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  211. /*
  212. * Map with big pages if possible, otherwise
  213. * create normal page tables:
  214. */
  215. if (use_pse) {
  216. unsigned int addr2;
  217. pgprot_t prot = PAGE_KERNEL_LARGE;
  218. /*
  219. * first pass will use the same initial
  220. * identity mapping attribute + _PAGE_PSE.
  221. */
  222. pgprot_t init_prot =
  223. __pgprot(PTE_IDENT_ATTR |
  224. _PAGE_PSE);
  225. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  226. PAGE_OFFSET + PAGE_SIZE-1;
  227. if (is_kernel_text(addr) ||
  228. is_kernel_text(addr2))
  229. prot = PAGE_KERNEL_LARGE_EXEC;
  230. pages_2m++;
  231. if (mapping_iter == 1)
  232. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  233. else
  234. set_pmd(pmd, pfn_pmd(pfn, prot));
  235. pfn += PTRS_PER_PTE;
  236. continue;
  237. }
  238. pte = one_page_table_init(pmd);
  239. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  240. pte += pte_ofs;
  241. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  242. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  243. pgprot_t prot = PAGE_KERNEL;
  244. /*
  245. * first pass will use the same initial
  246. * identity mapping attribute.
  247. */
  248. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  249. if (is_kernel_text(addr))
  250. prot = PAGE_KERNEL_EXEC;
  251. pages_4k++;
  252. if (mapping_iter == 1)
  253. set_pte(pte, pfn_pte(pfn, init_prot));
  254. else
  255. set_pte(pte, pfn_pte(pfn, prot));
  256. }
  257. }
  258. }
  259. if (mapping_iter == 1) {
  260. /*
  261. * update direct mapping page count only in the first
  262. * iteration.
  263. */
  264. update_page_count(PG_LEVEL_2M, pages_2m);
  265. update_page_count(PG_LEVEL_4K, pages_4k);
  266. /*
  267. * local global flush tlb, which will flush the previous
  268. * mappings present in both small and large page TLB's.
  269. */
  270. __flush_tlb_all();
  271. /*
  272. * Second iteration will set the actual desired PTE attributes.
  273. */
  274. mapping_iter = 2;
  275. goto repeat;
  276. }
  277. }
  278. /*
  279. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  280. * is valid. The argument is a physical page number.
  281. *
  282. *
  283. * On x86, access has to be given to the first megabyte of ram because that area
  284. * contains bios code and data regions used by X and dosemu and similar apps.
  285. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  286. * mmio resources as well as potential bios/acpi data regions.
  287. */
  288. int devmem_is_allowed(unsigned long pagenr)
  289. {
  290. if (pagenr <= 256)
  291. return 1;
  292. if (!page_is_ram(pagenr))
  293. return 1;
  294. return 0;
  295. }
  296. pte_t *kmap_pte;
  297. pgprot_t kmap_prot;
  298. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  299. {
  300. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  301. vaddr), vaddr), vaddr);
  302. }
  303. static void __init kmap_init(void)
  304. {
  305. unsigned long kmap_vstart;
  306. /*
  307. * Cache the first kmap pte:
  308. */
  309. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  310. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  311. kmap_prot = PAGE_KERNEL;
  312. }
  313. #ifdef CONFIG_HIGHMEM
  314. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  315. {
  316. unsigned long vaddr;
  317. pgd_t *pgd;
  318. pud_t *pud;
  319. pmd_t *pmd;
  320. pte_t *pte;
  321. vaddr = PKMAP_BASE;
  322. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  323. pgd = swapper_pg_dir + pgd_index(vaddr);
  324. pud = pud_offset(pgd, vaddr);
  325. pmd = pmd_offset(pud, vaddr);
  326. pte = pte_offset_kernel(pmd, vaddr);
  327. pkmap_page_table = pte;
  328. }
  329. static void __init add_one_highpage_init(struct page *page, int pfn)
  330. {
  331. ClearPageReserved(page);
  332. init_page_count(page);
  333. __free_page(page);
  334. totalhigh_pages++;
  335. }
  336. struct add_highpages_data {
  337. unsigned long start_pfn;
  338. unsigned long end_pfn;
  339. };
  340. static int __init add_highpages_work_fn(unsigned long start_pfn,
  341. unsigned long end_pfn, void *datax)
  342. {
  343. int node_pfn;
  344. struct page *page;
  345. unsigned long final_start_pfn, final_end_pfn;
  346. struct add_highpages_data *data;
  347. data = (struct add_highpages_data *)datax;
  348. final_start_pfn = max(start_pfn, data->start_pfn);
  349. final_end_pfn = min(end_pfn, data->end_pfn);
  350. if (final_start_pfn >= final_end_pfn)
  351. return 0;
  352. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  353. node_pfn++) {
  354. if (!pfn_valid(node_pfn))
  355. continue;
  356. page = pfn_to_page(node_pfn);
  357. add_one_highpage_init(page, node_pfn);
  358. }
  359. return 0;
  360. }
  361. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  362. unsigned long end_pfn)
  363. {
  364. struct add_highpages_data data;
  365. data.start_pfn = start_pfn;
  366. data.end_pfn = end_pfn;
  367. work_with_active_regions(nid, add_highpages_work_fn, &data);
  368. }
  369. #ifndef CONFIG_NUMA
  370. static void __init set_highmem_pages_init(void)
  371. {
  372. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  373. totalram_pages += totalhigh_pages;
  374. }
  375. #endif /* !CONFIG_NUMA */
  376. #else
  377. # define permanent_kmaps_init(pgd_base) do { } while (0)
  378. # define set_highmem_pages_init() do { } while (0)
  379. #endif /* CONFIG_HIGHMEM */
  380. void __init native_pagetable_setup_start(pgd_t *base)
  381. {
  382. unsigned long pfn, va;
  383. pgd_t *pgd;
  384. pud_t *pud;
  385. pmd_t *pmd;
  386. pte_t *pte;
  387. /*
  388. * Remove any mappings which extend past the end of physical
  389. * memory from the boot time page table:
  390. */
  391. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  392. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  393. pgd = base + pgd_index(va);
  394. if (!pgd_present(*pgd))
  395. break;
  396. pud = pud_offset(pgd, va);
  397. pmd = pmd_offset(pud, va);
  398. if (!pmd_present(*pmd))
  399. break;
  400. pte = pte_offset_kernel(pmd, va);
  401. if (!pte_present(*pte))
  402. break;
  403. pte_clear(NULL, va, pte);
  404. }
  405. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  406. }
  407. void __init native_pagetable_setup_done(pgd_t *base)
  408. {
  409. }
  410. /*
  411. * Build a proper pagetable for the kernel mappings. Up until this
  412. * point, we've been running on some set of pagetables constructed by
  413. * the boot process.
  414. *
  415. * If we're booting on native hardware, this will be a pagetable
  416. * constructed in arch/x86/kernel/head_32.S. The root of the
  417. * pagetable will be swapper_pg_dir.
  418. *
  419. * If we're booting paravirtualized under a hypervisor, then there are
  420. * more options: we may already be running PAE, and the pagetable may
  421. * or may not be based in swapper_pg_dir. In any case,
  422. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  423. * appropriately for the rest of the initialization to work.
  424. *
  425. * In general, pagetable_init() assumes that the pagetable may already
  426. * be partially populated, and so it avoids stomping on any existing
  427. * mappings.
  428. */
  429. static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
  430. {
  431. unsigned long vaddr, end;
  432. /*
  433. * Fixed mappings, only the page table structure has to be
  434. * created - mappings will be set by set_fixmap():
  435. */
  436. early_ioremap_clear();
  437. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  438. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  439. page_table_range_init(vaddr, end, pgd_base);
  440. early_ioremap_reset();
  441. }
  442. static void __init pagetable_init(void)
  443. {
  444. pgd_t *pgd_base = swapper_pg_dir;
  445. permanent_kmaps_init(pgd_base);
  446. }
  447. #ifdef CONFIG_ACPI_SLEEP
  448. /*
  449. * ACPI suspend needs this for resume, because things like the intel-agp
  450. * driver might have split up a kernel 4MB mapping.
  451. */
  452. char swsusp_pg_dir[PAGE_SIZE]
  453. __attribute__ ((aligned(PAGE_SIZE)));
  454. static inline void save_pg_dir(void)
  455. {
  456. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  457. }
  458. #else /* !CONFIG_ACPI_SLEEP */
  459. static inline void save_pg_dir(void)
  460. {
  461. }
  462. #endif /* !CONFIG_ACPI_SLEEP */
  463. void zap_low_mappings(void)
  464. {
  465. int i;
  466. /*
  467. * Zap initial low-memory mappings.
  468. *
  469. * Note that "pgd_clear()" doesn't do it for
  470. * us, because pgd_clear() is a no-op on i386.
  471. */
  472. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  473. #ifdef CONFIG_X86_PAE
  474. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  475. #else
  476. set_pgd(swapper_pg_dir+i, __pgd(0));
  477. #endif
  478. }
  479. flush_tlb_all();
  480. }
  481. int nx_enabled;
  482. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  483. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  484. #ifdef CONFIG_X86_PAE
  485. static int disable_nx __initdata;
  486. /*
  487. * noexec = on|off
  488. *
  489. * Control non executable mappings.
  490. *
  491. * on Enable
  492. * off Disable
  493. */
  494. static int __init noexec_setup(char *str)
  495. {
  496. if (!str || !strcmp(str, "on")) {
  497. if (cpu_has_nx) {
  498. __supported_pte_mask |= _PAGE_NX;
  499. disable_nx = 0;
  500. }
  501. } else {
  502. if (!strcmp(str, "off")) {
  503. disable_nx = 1;
  504. __supported_pte_mask &= ~_PAGE_NX;
  505. } else {
  506. return -EINVAL;
  507. }
  508. }
  509. return 0;
  510. }
  511. early_param("noexec", noexec_setup);
  512. static void __init set_nx(void)
  513. {
  514. unsigned int v[4], l, h;
  515. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  516. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  517. if ((v[3] & (1 << 20)) && !disable_nx) {
  518. rdmsr(MSR_EFER, l, h);
  519. l |= EFER_NX;
  520. wrmsr(MSR_EFER, l, h);
  521. nx_enabled = 1;
  522. __supported_pte_mask |= _PAGE_NX;
  523. }
  524. }
  525. }
  526. #endif
  527. /* user-defined highmem size */
  528. static unsigned int highmem_pages = -1;
  529. /*
  530. * highmem=size forces highmem to be exactly 'size' bytes.
  531. * This works even on boxes that have no highmem otherwise.
  532. * This also works to reduce highmem size on bigger boxes.
  533. */
  534. static int __init parse_highmem(char *arg)
  535. {
  536. if (!arg)
  537. return -EINVAL;
  538. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  539. return 0;
  540. }
  541. early_param("highmem", parse_highmem);
  542. /*
  543. * Determine low and high memory ranges:
  544. */
  545. void __init find_low_pfn_range(void)
  546. {
  547. /* it could update max_pfn */
  548. /* max_low_pfn is 0, we already have early_res support */
  549. max_low_pfn = max_pfn;
  550. if (max_low_pfn > MAXMEM_PFN) {
  551. if (highmem_pages == -1)
  552. highmem_pages = max_pfn - MAXMEM_PFN;
  553. if (highmem_pages + MAXMEM_PFN < max_pfn)
  554. max_pfn = MAXMEM_PFN + highmem_pages;
  555. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  556. printk(KERN_WARNING "only %luMB highmem pages "
  557. "available, ignoring highmem size of %uMB.\n",
  558. pages_to_mb(max_pfn - MAXMEM_PFN),
  559. pages_to_mb(highmem_pages));
  560. highmem_pages = 0;
  561. }
  562. max_low_pfn = MAXMEM_PFN;
  563. #ifndef CONFIG_HIGHMEM
  564. /* Maximum memory usable is what is directly addressable */
  565. printk(KERN_WARNING "Warning only %ldMB will be used.\n",
  566. MAXMEM>>20);
  567. if (max_pfn > MAX_NONPAE_PFN)
  568. printk(KERN_WARNING
  569. "Use a HIGHMEM64G enabled kernel.\n");
  570. else
  571. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  572. max_pfn = MAXMEM_PFN;
  573. #else /* !CONFIG_HIGHMEM */
  574. #ifndef CONFIG_HIGHMEM64G
  575. if (max_pfn > MAX_NONPAE_PFN) {
  576. max_pfn = MAX_NONPAE_PFN;
  577. printk(KERN_WARNING "Warning only 4GB will be used."
  578. "Use a HIGHMEM64G enabled kernel.\n");
  579. }
  580. #endif /* !CONFIG_HIGHMEM64G */
  581. #endif /* !CONFIG_HIGHMEM */
  582. } else {
  583. if (highmem_pages == -1)
  584. highmem_pages = 0;
  585. #ifdef CONFIG_HIGHMEM
  586. if (highmem_pages >= max_pfn) {
  587. printk(KERN_ERR "highmem size specified (%uMB) is "
  588. "bigger than pages available (%luMB)!.\n",
  589. pages_to_mb(highmem_pages),
  590. pages_to_mb(max_pfn));
  591. highmem_pages = 0;
  592. }
  593. if (highmem_pages) {
  594. if (max_low_pfn - highmem_pages <
  595. 64*1024*1024/PAGE_SIZE){
  596. printk(KERN_ERR "highmem size %uMB results in "
  597. "smaller than 64MB lowmem, ignoring it.\n"
  598. , pages_to_mb(highmem_pages));
  599. highmem_pages = 0;
  600. }
  601. max_low_pfn -= highmem_pages;
  602. }
  603. #else
  604. if (highmem_pages)
  605. printk(KERN_ERR "ignoring highmem size on non-highmem"
  606. " kernel!\n");
  607. #endif
  608. }
  609. }
  610. #ifndef CONFIG_NEED_MULTIPLE_NODES
  611. void __init initmem_init(unsigned long start_pfn,
  612. unsigned long end_pfn)
  613. {
  614. #ifdef CONFIG_HIGHMEM
  615. highstart_pfn = highend_pfn = max_pfn;
  616. if (max_pfn > max_low_pfn)
  617. highstart_pfn = max_low_pfn;
  618. memory_present(0, 0, highend_pfn);
  619. e820_register_active_regions(0, 0, highend_pfn);
  620. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  621. pages_to_mb(highend_pfn - highstart_pfn));
  622. num_physpages = highend_pfn;
  623. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  624. #else
  625. memory_present(0, 0, max_low_pfn);
  626. e820_register_active_regions(0, 0, max_low_pfn);
  627. num_physpages = max_low_pfn;
  628. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  629. #endif
  630. #ifdef CONFIG_FLATMEM
  631. max_mapnr = num_physpages;
  632. #endif
  633. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  634. pages_to_mb(max_low_pfn));
  635. setup_bootmem_allocator();
  636. }
  637. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  638. static void __init zone_sizes_init(void)
  639. {
  640. unsigned long max_zone_pfns[MAX_NR_ZONES];
  641. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  642. max_zone_pfns[ZONE_DMA] =
  643. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  644. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  645. #ifdef CONFIG_HIGHMEM
  646. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  647. #endif
  648. free_area_init_nodes(max_zone_pfns);
  649. }
  650. void __init setup_bootmem_allocator(void)
  651. {
  652. int i;
  653. unsigned long bootmap_size, bootmap;
  654. /*
  655. * Initialize the boot-time allocator (with low memory only):
  656. */
  657. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  658. bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  659. max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  660. PAGE_SIZE);
  661. if (bootmap == -1L)
  662. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  663. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  664. /* don't touch min_low_pfn */
  665. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  666. min_low_pfn, max_low_pfn);
  667. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  668. max_pfn_mapped<<PAGE_SHIFT);
  669. printk(KERN_INFO " low ram: %08lx - %08lx\n",
  670. min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
  671. printk(KERN_INFO " bootmap %08lx - %08lx\n",
  672. bootmap, bootmap + bootmap_size);
  673. for_each_online_node(i)
  674. free_bootmem_with_active_regions(i, max_low_pfn);
  675. early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
  676. after_init_bootmem = 1;
  677. }
  678. static void __init find_early_table_space(unsigned long end, int use_pse)
  679. {
  680. unsigned long puds, pmds, ptes, tables, start;
  681. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  682. tables = PAGE_ALIGN(puds * sizeof(pud_t));
  683. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  684. tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
  685. if (use_pse) {
  686. unsigned long extra;
  687. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  688. extra += PMD_SIZE;
  689. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  690. } else
  691. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  692. tables += PAGE_ALIGN(ptes * sizeof(pte_t));
  693. /* for fixmap */
  694. tables += PAGE_SIZE * 2;
  695. /*
  696. * RED-PEN putting page tables only on node 0 could
  697. * cause a hotspot and fill up ZONE_DMA. The page tables
  698. * need roughly 0.5KB per GB.
  699. */
  700. start = 0x7000;
  701. table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  702. tables, PAGE_SIZE);
  703. if (table_start == -1UL)
  704. panic("Cannot find space for the kernel page tables");
  705. table_start >>= PAGE_SHIFT;
  706. table_end = table_start;
  707. table_top = table_start + (tables>>PAGE_SHIFT);
  708. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  709. end, table_start << PAGE_SHIFT,
  710. (table_start << PAGE_SHIFT) + tables);
  711. }
  712. unsigned long __init_refok init_memory_mapping(unsigned long start,
  713. unsigned long end)
  714. {
  715. pgd_t *pgd_base = swapper_pg_dir;
  716. unsigned long start_pfn, end_pfn;
  717. unsigned long big_page_start;
  718. #ifdef CONFIG_DEBUG_PAGEALLOC
  719. /*
  720. * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
  721. * This will simplify cpa(), which otherwise needs to support splitting
  722. * large pages into small in interrupt context, etc.
  723. */
  724. int use_pse = 0;
  725. #else
  726. int use_pse = cpu_has_pse;
  727. #endif
  728. /*
  729. * Find space for the kernel direct mapping tables.
  730. */
  731. if (!after_init_bootmem)
  732. find_early_table_space(end, use_pse);
  733. #ifdef CONFIG_X86_PAE
  734. set_nx();
  735. if (nx_enabled)
  736. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  737. #endif
  738. /* Enable PSE if available */
  739. if (cpu_has_pse)
  740. set_in_cr4(X86_CR4_PSE);
  741. /* Enable PGE if available */
  742. if (cpu_has_pge) {
  743. set_in_cr4(X86_CR4_PGE);
  744. __supported_pte_mask |= _PAGE_GLOBAL;
  745. }
  746. /*
  747. * Don't use a large page for the first 2/4MB of memory
  748. * because there are often fixed size MTRRs in there
  749. * and overlapping MTRRs into large pages can cause
  750. * slowdowns.
  751. */
  752. big_page_start = PMD_SIZE;
  753. if (start < big_page_start) {
  754. start_pfn = start >> PAGE_SHIFT;
  755. end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
  756. } else {
  757. /* head is not big page alignment ? */
  758. start_pfn = start >> PAGE_SHIFT;
  759. end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  760. << (PMD_SHIFT - PAGE_SHIFT);
  761. }
  762. if (start_pfn < end_pfn)
  763. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
  764. /* big page range */
  765. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  766. << (PMD_SHIFT - PAGE_SHIFT);
  767. if (start_pfn < (big_page_start >> PAGE_SHIFT))
  768. start_pfn = big_page_start >> PAGE_SHIFT;
  769. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  770. if (start_pfn < end_pfn)
  771. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
  772. use_pse);
  773. /* tail is not big page alignment ? */
  774. start_pfn = end_pfn;
  775. if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
  776. end_pfn = end >> PAGE_SHIFT;
  777. if (start_pfn < end_pfn)
  778. kernel_physical_mapping_init(pgd_base, start_pfn,
  779. end_pfn, 0);
  780. }
  781. early_ioremap_page_table_range_init(pgd_base);
  782. load_cr3(swapper_pg_dir);
  783. __flush_tlb_all();
  784. if (!after_init_bootmem)
  785. reserve_early(table_start << PAGE_SHIFT,
  786. table_end << PAGE_SHIFT, "PGTABLE");
  787. if (!after_init_bootmem)
  788. early_memtest(start, end);
  789. return end >> PAGE_SHIFT;
  790. }
  791. /*
  792. * paging_init() sets up the page tables - note that the first 8MB are
  793. * already mapped by head.S.
  794. *
  795. * This routines also unmaps the page at virtual kernel address 0, so
  796. * that we can trap those pesky NULL-reference errors in the kernel.
  797. */
  798. void __init paging_init(void)
  799. {
  800. pagetable_init();
  801. __flush_tlb_all();
  802. kmap_init();
  803. /*
  804. * NOTE: at this point the bootmem allocator is fully available.
  805. */
  806. sparse_init();
  807. zone_sizes_init();
  808. }
  809. /*
  810. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  811. * and also on some strange 486's. All 586+'s are OK. This used to involve
  812. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  813. * switch to using exceptions got rid of all that.
  814. */
  815. static void __init test_wp_bit(void)
  816. {
  817. printk(KERN_INFO
  818. "Checking if this processor honours the WP bit even in supervisor mode...");
  819. /* Any page-aligned address will do, the test is non-destructive */
  820. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  821. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  822. clear_fixmap(FIX_WP_TEST);
  823. if (!boot_cpu_data.wp_works_ok) {
  824. printk(KERN_CONT "No.\n");
  825. #ifdef CONFIG_X86_WP_WORKS_OK
  826. panic(
  827. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  828. #endif
  829. } else {
  830. printk(KERN_CONT "Ok.\n");
  831. }
  832. }
  833. static struct kcore_list kcore_mem, kcore_vmalloc;
  834. void __init mem_init(void)
  835. {
  836. int codesize, reservedpages, datasize, initsize;
  837. int tmp;
  838. start_periodic_check_for_corruption();
  839. #ifdef CONFIG_FLATMEM
  840. BUG_ON(!mem_map);
  841. #endif
  842. /* this will put all low memory onto the freelists */
  843. totalram_pages += free_all_bootmem();
  844. reservedpages = 0;
  845. for (tmp = 0; tmp < max_low_pfn; tmp++)
  846. /*
  847. * Only count reserved RAM pages:
  848. */
  849. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  850. reservedpages++;
  851. set_highmem_pages_init();
  852. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  853. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  854. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  855. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  856. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  857. VMALLOC_END-VMALLOC_START);
  858. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  859. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  860. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  861. num_physpages << (PAGE_SHIFT-10),
  862. codesize >> 10,
  863. reservedpages << (PAGE_SHIFT-10),
  864. datasize >> 10,
  865. initsize >> 10,
  866. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  867. );
  868. printk(KERN_INFO "virtual kernel memory layout:\n"
  869. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  870. #ifdef CONFIG_HIGHMEM
  871. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  872. #endif
  873. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  874. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  875. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  876. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  877. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  878. FIXADDR_START, FIXADDR_TOP,
  879. (FIXADDR_TOP - FIXADDR_START) >> 10,
  880. #ifdef CONFIG_HIGHMEM
  881. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  882. (LAST_PKMAP*PAGE_SIZE) >> 10,
  883. #endif
  884. VMALLOC_START, VMALLOC_END,
  885. (VMALLOC_END - VMALLOC_START) >> 20,
  886. (unsigned long)__va(0), (unsigned long)high_memory,
  887. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  888. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  889. ((unsigned long)&__init_end -
  890. (unsigned long)&__init_begin) >> 10,
  891. (unsigned long)&_etext, (unsigned long)&_edata,
  892. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  893. (unsigned long)&_text, (unsigned long)&_etext,
  894. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  895. #ifdef CONFIG_HIGHMEM
  896. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  897. BUG_ON(VMALLOC_END > PKMAP_BASE);
  898. #endif
  899. BUG_ON(VMALLOC_START > VMALLOC_END);
  900. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  901. if (boot_cpu_data.wp_works_ok < 0)
  902. test_wp_bit();
  903. save_pg_dir();
  904. zap_low_mappings();
  905. }
  906. #ifdef CONFIG_MEMORY_HOTPLUG
  907. int arch_add_memory(int nid, u64 start, u64 size)
  908. {
  909. struct pglist_data *pgdata = NODE_DATA(nid);
  910. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  911. unsigned long start_pfn = start >> PAGE_SHIFT;
  912. unsigned long nr_pages = size >> PAGE_SHIFT;
  913. return __add_pages(zone, start_pfn, nr_pages);
  914. }
  915. #endif
  916. /*
  917. * This function cannot be __init, since exceptions don't work in that
  918. * section. Put this after the callers, so that it cannot be inlined.
  919. */
  920. static noinline int do_test_wp_bit(void)
  921. {
  922. char tmp_reg;
  923. int flag;
  924. __asm__ __volatile__(
  925. " movb %0, %1 \n"
  926. "1: movb %1, %0 \n"
  927. " xorl %2, %2 \n"
  928. "2: \n"
  929. _ASM_EXTABLE(1b,2b)
  930. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  931. "=q" (tmp_reg),
  932. "=r" (flag)
  933. :"2" (1)
  934. :"memory");
  935. return flag;
  936. }
  937. #ifdef CONFIG_DEBUG_RODATA
  938. const int rodata_test_data = 0xC3;
  939. EXPORT_SYMBOL_GPL(rodata_test_data);
  940. void mark_rodata_ro(void)
  941. {
  942. unsigned long start = PFN_ALIGN(_text);
  943. unsigned long size = PFN_ALIGN(_etext) - start;
  944. #ifndef CONFIG_DYNAMIC_FTRACE
  945. /* Dynamic tracing modifies the kernel text section */
  946. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  947. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  948. size >> 10);
  949. #ifdef CONFIG_CPA_DEBUG
  950. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  951. start, start+size);
  952. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  953. printk(KERN_INFO "Testing CPA: write protecting again\n");
  954. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  955. #endif
  956. #endif /* CONFIG_DYNAMIC_FTRACE */
  957. start += size;
  958. size = (unsigned long)__end_rodata - start;
  959. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  960. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  961. size >> 10);
  962. rodata_test();
  963. #ifdef CONFIG_CPA_DEBUG
  964. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  965. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  966. printk(KERN_INFO "Testing CPA: write protecting again\n");
  967. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  968. #endif
  969. }
  970. #endif
  971. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  972. {
  973. #ifdef CONFIG_DEBUG_PAGEALLOC
  974. /*
  975. * If debugging page accesses then do not free this memory but
  976. * mark them not present - any buggy init-section access will
  977. * create a kernel page fault:
  978. */
  979. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  980. begin, PAGE_ALIGN(end));
  981. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  982. #else
  983. unsigned long addr;
  984. /*
  985. * We just marked the kernel text read only above, now that
  986. * we are going to free part of that, we need to make that
  987. * writeable first.
  988. */
  989. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  990. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  991. ClearPageReserved(virt_to_page(addr));
  992. init_page_count(virt_to_page(addr));
  993. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  994. free_page(addr);
  995. totalram_pages++;
  996. }
  997. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  998. #endif
  999. }
  1000. void free_initmem(void)
  1001. {
  1002. free_init_pages("unused kernel memory",
  1003. (unsigned long)(&__init_begin),
  1004. (unsigned long)(&__init_end));
  1005. }
  1006. #ifdef CONFIG_BLK_DEV_INITRD
  1007. void free_initrd_mem(unsigned long start, unsigned long end)
  1008. {
  1009. free_init_pages("initrd memory", start, end);
  1010. }
  1011. #endif
  1012. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  1013. int flags)
  1014. {
  1015. return reserve_bootmem(phys, len, flags);
  1016. }