init_32.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pfn.h>
  24. #include <linux/poison.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/slab.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <asm/asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/bugs.h>
  41. #include <asm/tlb.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/pgalloc.h>
  44. #include <asm/sections.h>
  45. #include <asm/paravirt.h>
  46. #include <asm/setup.h>
  47. #include <asm/cacheflush.h>
  48. unsigned int __VMALLOC_RESERVE = 128 << 20;
  49. unsigned long max_pfn_mapped;
  50. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  51. unsigned long highstart_pfn, highend_pfn;
  52. static noinline int do_test_wp_bit(void);
  53. /*
  54. * Creates a middle page table and puts a pointer to it in the
  55. * given global directory entry. This only returns the gd entry
  56. * in non-PAE compilation mode, since the middle layer is folded.
  57. */
  58. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  59. {
  60. pud_t *pud;
  61. pmd_t *pmd_table;
  62. #ifdef CONFIG_X86_PAE
  63. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  64. pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  65. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  66. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  67. pud = pud_offset(pgd, 0);
  68. BUG_ON(pmd_table != pmd_offset(pud, 0));
  69. }
  70. #endif
  71. pud = pud_offset(pgd, 0);
  72. pmd_table = pmd_offset(pud, 0);
  73. return pmd_table;
  74. }
  75. /*
  76. * Create a page table and place a pointer to it in a middle page
  77. * directory entry:
  78. */
  79. static pte_t * __init one_page_table_init(pmd_t *pmd)
  80. {
  81. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  82. pte_t *page_table = NULL;
  83. #ifdef CONFIG_DEBUG_PAGEALLOC
  84. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  85. #endif
  86. if (!page_table) {
  87. page_table =
  88. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  89. }
  90. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  91. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  92. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  93. }
  94. return pte_offset_kernel(pmd, 0);
  95. }
  96. /*
  97. * This function initializes a certain range of kernel virtual memory
  98. * with new bootmem page tables, everywhere page tables are missing in
  99. * the given range.
  100. *
  101. * NOTE: The pagetables are allocated contiguous on the physical space
  102. * so we can cache the place of the first one and move around without
  103. * checking the pgd every time.
  104. */
  105. static void __init
  106. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  107. {
  108. int pgd_idx, pmd_idx;
  109. unsigned long vaddr;
  110. pgd_t *pgd;
  111. pmd_t *pmd;
  112. vaddr = start;
  113. pgd_idx = pgd_index(vaddr);
  114. pmd_idx = pmd_index(vaddr);
  115. pgd = pgd_base + pgd_idx;
  116. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  117. pmd = one_md_table_init(pgd);
  118. pmd = pmd + pmd_index(vaddr);
  119. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  120. pmd++, pmd_idx++) {
  121. one_page_table_init(pmd);
  122. vaddr += PMD_SIZE;
  123. }
  124. pmd_idx = 0;
  125. }
  126. }
  127. static inline int is_kernel_text(unsigned long addr)
  128. {
  129. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  130. return 1;
  131. return 0;
  132. }
  133. /*
  134. * This maps the physical memory to kernel virtual address space, a total
  135. * of max_low_pfn pages, by creating page tables starting from address
  136. * PAGE_OFFSET:
  137. */
  138. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  139. {
  140. int pgd_idx, pmd_idx, pte_ofs;
  141. unsigned long pfn;
  142. pgd_t *pgd;
  143. pmd_t *pmd;
  144. pte_t *pte;
  145. unsigned pages_2m = 0, pages_4k = 0;
  146. pgd_idx = pgd_index(PAGE_OFFSET);
  147. pgd = pgd_base + pgd_idx;
  148. pfn = 0;
  149. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  150. pmd = one_md_table_init(pgd);
  151. if (pfn >= max_low_pfn)
  152. continue;
  153. for (pmd_idx = 0;
  154. pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
  155. pmd++, pmd_idx++) {
  156. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  157. /*
  158. * Map with big pages if possible, otherwise
  159. * create normal page tables:
  160. *
  161. * Don't use a large page for the first 2/4MB of memory
  162. * because there are often fixed size MTRRs in there
  163. * and overlapping MTRRs into large pages can cause
  164. * slowdowns.
  165. */
  166. if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
  167. unsigned int addr2;
  168. pgprot_t prot = PAGE_KERNEL_LARGE;
  169. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  170. PAGE_OFFSET + PAGE_SIZE-1;
  171. if (is_kernel_text(addr) ||
  172. is_kernel_text(addr2))
  173. prot = PAGE_KERNEL_LARGE_EXEC;
  174. pages_2m++;
  175. set_pmd(pmd, pfn_pmd(pfn, prot));
  176. pfn += PTRS_PER_PTE;
  177. max_pfn_mapped = pfn;
  178. continue;
  179. }
  180. pte = one_page_table_init(pmd);
  181. for (pte_ofs = 0;
  182. pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
  183. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  184. pgprot_t prot = PAGE_KERNEL;
  185. if (is_kernel_text(addr))
  186. prot = PAGE_KERNEL_EXEC;
  187. pages_4k++;
  188. set_pte(pte, pfn_pte(pfn, prot));
  189. }
  190. max_pfn_mapped = pfn;
  191. }
  192. }
  193. update_page_count(PG_LEVEL_2M, pages_2m);
  194. update_page_count(PG_LEVEL_4K, pages_4k);
  195. }
  196. /*
  197. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  198. * is valid. The argument is a physical page number.
  199. *
  200. *
  201. * On x86, access has to be given to the first megabyte of ram because that area
  202. * contains bios code and data regions used by X and dosemu and similar apps.
  203. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  204. * mmio resources as well as potential bios/acpi data regions.
  205. */
  206. int devmem_is_allowed(unsigned long pagenr)
  207. {
  208. if (pagenr <= 256)
  209. return 1;
  210. if (!page_is_ram(pagenr))
  211. return 1;
  212. return 0;
  213. }
  214. #ifdef CONFIG_HIGHMEM
  215. pte_t *kmap_pte;
  216. pgprot_t kmap_prot;
  217. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  218. {
  219. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  220. vaddr), vaddr), vaddr);
  221. }
  222. static void __init kmap_init(void)
  223. {
  224. unsigned long kmap_vstart;
  225. /*
  226. * Cache the first kmap pte:
  227. */
  228. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  229. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  230. kmap_prot = PAGE_KERNEL;
  231. }
  232. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  233. {
  234. unsigned long vaddr;
  235. pgd_t *pgd;
  236. pud_t *pud;
  237. pmd_t *pmd;
  238. pte_t *pte;
  239. vaddr = PKMAP_BASE;
  240. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  241. pgd = swapper_pg_dir + pgd_index(vaddr);
  242. pud = pud_offset(pgd, vaddr);
  243. pmd = pmd_offset(pud, vaddr);
  244. pte = pte_offset_kernel(pmd, vaddr);
  245. pkmap_page_table = pte;
  246. }
  247. static void __init add_one_highpage_init(struct page *page, int pfn)
  248. {
  249. ClearPageReserved(page);
  250. init_page_count(page);
  251. __free_page(page);
  252. totalhigh_pages++;
  253. }
  254. struct add_highpages_data {
  255. unsigned long start_pfn;
  256. unsigned long end_pfn;
  257. };
  258. static int __init add_highpages_work_fn(unsigned long start_pfn,
  259. unsigned long end_pfn, void *datax)
  260. {
  261. int node_pfn;
  262. struct page *page;
  263. unsigned long final_start_pfn, final_end_pfn;
  264. struct add_highpages_data *data;
  265. data = (struct add_highpages_data *)datax;
  266. final_start_pfn = max(start_pfn, data->start_pfn);
  267. final_end_pfn = min(end_pfn, data->end_pfn);
  268. if (final_start_pfn >= final_end_pfn)
  269. return 0;
  270. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  271. node_pfn++) {
  272. if (!pfn_valid(node_pfn))
  273. continue;
  274. page = pfn_to_page(node_pfn);
  275. add_one_highpage_init(page, node_pfn);
  276. }
  277. return 0;
  278. }
  279. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  280. unsigned long end_pfn)
  281. {
  282. struct add_highpages_data data;
  283. data.start_pfn = start_pfn;
  284. data.end_pfn = end_pfn;
  285. work_with_active_regions(nid, add_highpages_work_fn, &data);
  286. }
  287. #ifndef CONFIG_NUMA
  288. static void __init set_highmem_pages_init(void)
  289. {
  290. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  291. totalram_pages += totalhigh_pages;
  292. }
  293. #endif /* !CONFIG_NUMA */
  294. #else
  295. # define kmap_init() do { } while (0)
  296. # define permanent_kmaps_init(pgd_base) do { } while (0)
  297. # define set_highmem_pages_init() do { } while (0)
  298. #endif /* CONFIG_HIGHMEM */
  299. pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
  300. EXPORT_SYMBOL(__PAGE_KERNEL);
  301. pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  302. void __init native_pagetable_setup_start(pgd_t *base)
  303. {
  304. unsigned long pfn, va;
  305. pgd_t *pgd;
  306. pud_t *pud;
  307. pmd_t *pmd;
  308. pte_t *pte;
  309. /*
  310. * Remove any mappings which extend past the end of physical
  311. * memory from the boot time page table:
  312. */
  313. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  314. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  315. pgd = base + pgd_index(va);
  316. if (!pgd_present(*pgd))
  317. break;
  318. pud = pud_offset(pgd, va);
  319. pmd = pmd_offset(pud, va);
  320. if (!pmd_present(*pmd))
  321. break;
  322. pte = pte_offset_kernel(pmd, va);
  323. if (!pte_present(*pte))
  324. break;
  325. pte_clear(NULL, va, pte);
  326. }
  327. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  328. }
  329. void __init native_pagetable_setup_done(pgd_t *base)
  330. {
  331. }
  332. /*
  333. * Build a proper pagetable for the kernel mappings. Up until this
  334. * point, we've been running on some set of pagetables constructed by
  335. * the boot process.
  336. *
  337. * If we're booting on native hardware, this will be a pagetable
  338. * constructed in arch/x86/kernel/head_32.S. The root of the
  339. * pagetable will be swapper_pg_dir.
  340. *
  341. * If we're booting paravirtualized under a hypervisor, then there are
  342. * more options: we may already be running PAE, and the pagetable may
  343. * or may not be based in swapper_pg_dir. In any case,
  344. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  345. * appropriately for the rest of the initialization to work.
  346. *
  347. * In general, pagetable_init() assumes that the pagetable may already
  348. * be partially populated, and so it avoids stomping on any existing
  349. * mappings.
  350. */
  351. static void __init pagetable_init(void)
  352. {
  353. pgd_t *pgd_base = swapper_pg_dir;
  354. unsigned long vaddr, end;
  355. paravirt_pagetable_setup_start(pgd_base);
  356. /* Enable PSE if available */
  357. if (cpu_has_pse)
  358. set_in_cr4(X86_CR4_PSE);
  359. /* Enable PGE if available */
  360. if (cpu_has_pge) {
  361. set_in_cr4(X86_CR4_PGE);
  362. __PAGE_KERNEL |= _PAGE_GLOBAL;
  363. __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
  364. }
  365. kernel_physical_mapping_init(pgd_base);
  366. remap_numa_kva();
  367. /*
  368. * Fixed mappings, only the page table structure has to be
  369. * created - mappings will be set by set_fixmap():
  370. */
  371. early_ioremap_clear();
  372. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  373. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  374. page_table_range_init(vaddr, end, pgd_base);
  375. early_ioremap_reset();
  376. permanent_kmaps_init(pgd_base);
  377. paravirt_pagetable_setup_done(pgd_base);
  378. }
  379. #ifdef CONFIG_ACPI_SLEEP
  380. /*
  381. * ACPI suspend needs this for resume, because things like the intel-agp
  382. * driver might have split up a kernel 4MB mapping.
  383. */
  384. char swsusp_pg_dir[PAGE_SIZE]
  385. __attribute__ ((aligned(PAGE_SIZE)));
  386. static inline void save_pg_dir(void)
  387. {
  388. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  389. }
  390. #else /* !CONFIG_ACPI_SLEEP */
  391. static inline void save_pg_dir(void)
  392. {
  393. }
  394. #endif /* !CONFIG_ACPI_SLEEP */
  395. void zap_low_mappings(void)
  396. {
  397. int i;
  398. /*
  399. * Zap initial low-memory mappings.
  400. *
  401. * Note that "pgd_clear()" doesn't do it for
  402. * us, because pgd_clear() is a no-op on i386.
  403. */
  404. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  405. #ifdef CONFIG_X86_PAE
  406. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  407. #else
  408. set_pgd(swapper_pg_dir+i, __pgd(0));
  409. #endif
  410. }
  411. flush_tlb_all();
  412. }
  413. int nx_enabled;
  414. pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
  415. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  416. #ifdef CONFIG_X86_PAE
  417. static int disable_nx __initdata;
  418. /*
  419. * noexec = on|off
  420. *
  421. * Control non executable mappings.
  422. *
  423. * on Enable
  424. * off Disable
  425. */
  426. static int __init noexec_setup(char *str)
  427. {
  428. if (!str || !strcmp(str, "on")) {
  429. if (cpu_has_nx) {
  430. __supported_pte_mask |= _PAGE_NX;
  431. disable_nx = 0;
  432. }
  433. } else {
  434. if (!strcmp(str, "off")) {
  435. disable_nx = 1;
  436. __supported_pte_mask &= ~_PAGE_NX;
  437. } else {
  438. return -EINVAL;
  439. }
  440. }
  441. return 0;
  442. }
  443. early_param("noexec", noexec_setup);
  444. static void __init set_nx(void)
  445. {
  446. unsigned int v[4], l, h;
  447. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  448. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  449. if ((v[3] & (1 << 20)) && !disable_nx) {
  450. rdmsr(MSR_EFER, l, h);
  451. l |= EFER_NX;
  452. wrmsr(MSR_EFER, l, h);
  453. nx_enabled = 1;
  454. __supported_pte_mask |= _PAGE_NX;
  455. }
  456. }
  457. }
  458. #endif
  459. /*
  460. * paging_init() sets up the page tables - note that the first 8MB are
  461. * already mapped by head.S.
  462. *
  463. * This routines also unmaps the page at virtual kernel address 0, so
  464. * that we can trap those pesky NULL-reference errors in the kernel.
  465. */
  466. void __init paging_init(void)
  467. {
  468. #ifdef CONFIG_X86_PAE
  469. set_nx();
  470. if (nx_enabled)
  471. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  472. #endif
  473. pagetable_init();
  474. load_cr3(swapper_pg_dir);
  475. __flush_tlb_all();
  476. kmap_init();
  477. }
  478. /*
  479. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  480. * and also on some strange 486's. All 586+'s are OK. This used to involve
  481. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  482. * switch to using exceptions got rid of all that.
  483. */
  484. static void __init test_wp_bit(void)
  485. {
  486. printk(KERN_INFO
  487. "Checking if this processor honours the WP bit even in supervisor mode...");
  488. /* Any page-aligned address will do, the test is non-destructive */
  489. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  490. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  491. clear_fixmap(FIX_WP_TEST);
  492. if (!boot_cpu_data.wp_works_ok) {
  493. printk(KERN_CONT "No.\n");
  494. #ifdef CONFIG_X86_WP_WORKS_OK
  495. panic(
  496. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  497. #endif
  498. } else {
  499. printk(KERN_CONT "Ok.\n");
  500. }
  501. }
  502. static struct kcore_list kcore_mem, kcore_vmalloc;
  503. void __init mem_init(void)
  504. {
  505. int codesize, reservedpages, datasize, initsize;
  506. int tmp;
  507. #ifdef CONFIG_FLATMEM
  508. BUG_ON(!mem_map);
  509. #endif
  510. /* this will put all low memory onto the freelists */
  511. totalram_pages += free_all_bootmem();
  512. reservedpages = 0;
  513. for (tmp = 0; tmp < max_low_pfn; tmp++)
  514. /*
  515. * Only count reserved RAM pages:
  516. */
  517. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  518. reservedpages++;
  519. set_highmem_pages_init();
  520. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  521. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  522. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  523. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  524. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  525. VMALLOC_END-VMALLOC_START);
  526. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  527. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  528. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  529. num_physpages << (PAGE_SHIFT-10),
  530. codesize >> 10,
  531. reservedpages << (PAGE_SHIFT-10),
  532. datasize >> 10,
  533. initsize >> 10,
  534. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  535. );
  536. printk(KERN_INFO "virtual kernel memory layout:\n"
  537. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  538. #ifdef CONFIG_HIGHMEM
  539. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  540. #endif
  541. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  542. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  543. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  544. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  545. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  546. FIXADDR_START, FIXADDR_TOP,
  547. (FIXADDR_TOP - FIXADDR_START) >> 10,
  548. #ifdef CONFIG_HIGHMEM
  549. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  550. (LAST_PKMAP*PAGE_SIZE) >> 10,
  551. #endif
  552. VMALLOC_START, VMALLOC_END,
  553. (VMALLOC_END - VMALLOC_START) >> 20,
  554. (unsigned long)__va(0), (unsigned long)high_memory,
  555. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  556. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  557. ((unsigned long)&__init_end -
  558. (unsigned long)&__init_begin) >> 10,
  559. (unsigned long)&_etext, (unsigned long)&_edata,
  560. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  561. (unsigned long)&_text, (unsigned long)&_etext,
  562. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  563. #ifdef CONFIG_HIGHMEM
  564. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  565. BUG_ON(VMALLOC_END > PKMAP_BASE);
  566. #endif
  567. BUG_ON(VMALLOC_START > VMALLOC_END);
  568. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  569. if (boot_cpu_data.wp_works_ok < 0)
  570. test_wp_bit();
  571. cpa_init();
  572. save_pg_dir();
  573. zap_low_mappings();
  574. }
  575. #ifdef CONFIG_MEMORY_HOTPLUG
  576. int arch_add_memory(int nid, u64 start, u64 size)
  577. {
  578. struct pglist_data *pgdata = NODE_DATA(nid);
  579. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  580. unsigned long start_pfn = start >> PAGE_SHIFT;
  581. unsigned long nr_pages = size >> PAGE_SHIFT;
  582. return __add_pages(zone, start_pfn, nr_pages);
  583. }
  584. #endif
  585. /*
  586. * This function cannot be __init, since exceptions don't work in that
  587. * section. Put this after the callers, so that it cannot be inlined.
  588. */
  589. static noinline int do_test_wp_bit(void)
  590. {
  591. char tmp_reg;
  592. int flag;
  593. __asm__ __volatile__(
  594. " movb %0, %1 \n"
  595. "1: movb %1, %0 \n"
  596. " xorl %2, %2 \n"
  597. "2: \n"
  598. _ASM_EXTABLE(1b,2b)
  599. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  600. "=q" (tmp_reg),
  601. "=r" (flag)
  602. :"2" (1)
  603. :"memory");
  604. return flag;
  605. }
  606. #ifdef CONFIG_DEBUG_RODATA
  607. const int rodata_test_data = 0xC3;
  608. EXPORT_SYMBOL_GPL(rodata_test_data);
  609. void mark_rodata_ro(void)
  610. {
  611. unsigned long start = PFN_ALIGN(_text);
  612. unsigned long size = PFN_ALIGN(_etext) - start;
  613. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  614. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  615. size >> 10);
  616. #ifdef CONFIG_CPA_DEBUG
  617. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  618. start, start+size);
  619. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  620. printk(KERN_INFO "Testing CPA: write protecting again\n");
  621. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  622. #endif
  623. start += size;
  624. size = (unsigned long)__end_rodata - start;
  625. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  626. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  627. size >> 10);
  628. rodata_test();
  629. #ifdef CONFIG_CPA_DEBUG
  630. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  631. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  632. printk(KERN_INFO "Testing CPA: write protecting again\n");
  633. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  634. #endif
  635. }
  636. #endif
  637. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  638. {
  639. #ifdef CONFIG_DEBUG_PAGEALLOC
  640. /*
  641. * If debugging page accesses then do not free this memory but
  642. * mark them not present - any buggy init-section access will
  643. * create a kernel page fault:
  644. */
  645. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  646. begin, PAGE_ALIGN(end));
  647. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  648. #else
  649. unsigned long addr;
  650. /*
  651. * We just marked the kernel text read only above, now that
  652. * we are going to free part of that, we need to make that
  653. * writeable first.
  654. */
  655. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  656. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  657. ClearPageReserved(virt_to_page(addr));
  658. init_page_count(virt_to_page(addr));
  659. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  660. free_page(addr);
  661. totalram_pages++;
  662. }
  663. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  664. #endif
  665. }
  666. void free_initmem(void)
  667. {
  668. free_init_pages("unused kernel memory",
  669. (unsigned long)(&__init_begin),
  670. (unsigned long)(&__init_end));
  671. }
  672. #ifdef CONFIG_BLK_DEV_INITRD
  673. void free_initrd_mem(unsigned long start, unsigned long end)
  674. {
  675. free_init_pages("initrd memory", start, end);
  676. }
  677. #endif
  678. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  679. int flags)
  680. {
  681. return reserve_bootmem(phys, len, flags);
  682. }