init_32.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * linux/arch/i386/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7. */
  8. #include <linux/module.h>
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/types.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <linux/smp.h>
  21. #include <linux/init.h>
  22. #include <linux/highmem.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/processor.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/bugs.h>
  41. #include <asm/tlb.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/pgalloc.h>
  44. #include <asm/sections.h>
  45. #include <asm/paravirt.h>
  46. unsigned int __VMALLOC_RESERVE = 128 << 20;
  47. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  48. unsigned long highstart_pfn, highend_pfn;
  49. static noinline int do_test_wp_bit(void);
  50. /*
  51. * Creates a middle page table and puts a pointer to it in the
  52. * given global directory entry. This only returns the gd entry
  53. * in non-PAE compilation mode, since the middle layer is folded.
  54. */
  55. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  56. {
  57. pud_t *pud;
  58. pmd_t *pmd_table;
  59. #ifdef CONFIG_X86_PAE
  60. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  61. pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  62. paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  63. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  64. pud = pud_offset(pgd, 0);
  65. BUG_ON(pmd_table != pmd_offset(pud, 0));
  66. }
  67. #endif
  68. pud = pud_offset(pgd, 0);
  69. pmd_table = pmd_offset(pud, 0);
  70. return pmd_table;
  71. }
  72. /*
  73. * Create a page table and place a pointer to it in a middle page
  74. * directory entry:
  75. */
  76. static pte_t * __init one_page_table_init(pmd_t *pmd)
  77. {
  78. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  79. pte_t *page_table = NULL;
  80. #ifdef CONFIG_DEBUG_PAGEALLOC
  81. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  82. #endif
  83. if (!page_table) {
  84. page_table =
  85. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  86. }
  87. paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  88. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  89. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  90. }
  91. return pte_offset_kernel(pmd, 0);
  92. }
  93. /*
  94. * This function initializes a certain range of kernel virtual memory
  95. * with new bootmem page tables, everywhere page tables are missing in
  96. * the given range.
  97. *
  98. * NOTE: The pagetables are allocated contiguous on the physical space
  99. * so we can cache the place of the first one and move around without
  100. * checking the pgd every time.
  101. */
  102. static void __init
  103. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  104. {
  105. int pgd_idx, pmd_idx;
  106. unsigned long vaddr;
  107. pgd_t *pgd;
  108. pmd_t *pmd;
  109. vaddr = start;
  110. pgd_idx = pgd_index(vaddr);
  111. pmd_idx = pmd_index(vaddr);
  112. pgd = pgd_base + pgd_idx;
  113. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  114. pmd = one_md_table_init(pgd);
  115. pmd = pmd + pmd_index(vaddr);
  116. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  117. pmd++, pmd_idx++) {
  118. one_page_table_init(pmd);
  119. vaddr += PMD_SIZE;
  120. }
  121. pmd_idx = 0;
  122. }
  123. }
  124. static inline int is_kernel_text(unsigned long addr)
  125. {
  126. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  127. return 1;
  128. return 0;
  129. }
  130. /*
  131. * This maps the physical memory to kernel virtual address space, a total
  132. * of max_low_pfn pages, by creating page tables starting from address
  133. * PAGE_OFFSET:
  134. */
  135. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  136. {
  137. int pgd_idx, pmd_idx, pte_ofs;
  138. unsigned long pfn;
  139. pgd_t *pgd;
  140. pmd_t *pmd;
  141. pte_t *pte;
  142. pgd_idx = pgd_index(PAGE_OFFSET);
  143. pgd = pgd_base + pgd_idx;
  144. pfn = 0;
  145. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  146. pmd = one_md_table_init(pgd);
  147. if (pfn >= max_low_pfn)
  148. continue;
  149. for (pmd_idx = 0;
  150. pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
  151. pmd++, pmd_idx++) {
  152. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  153. /*
  154. * Map with big pages if possible, otherwise
  155. * create normal page tables:
  156. */
  157. if (cpu_has_pse) {
  158. unsigned int addr2;
  159. pgprot_t prot = PAGE_KERNEL_LARGE;
  160. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  161. PAGE_OFFSET + PAGE_SIZE-1;
  162. if (is_kernel_text(addr) ||
  163. is_kernel_text(addr2))
  164. prot = PAGE_KERNEL_LARGE_EXEC;
  165. set_pmd(pmd, pfn_pmd(pfn, prot));
  166. pfn += PTRS_PER_PTE;
  167. continue;
  168. }
  169. pte = one_page_table_init(pmd);
  170. for (pte_ofs = 0;
  171. pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
  172. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  173. pgprot_t prot = PAGE_KERNEL;
  174. if (is_kernel_text(addr))
  175. prot = PAGE_KERNEL_EXEC;
  176. set_pte(pte, pfn_pte(pfn, prot));
  177. }
  178. }
  179. }
  180. }
  181. static inline int page_kills_ppro(unsigned long pagenr)
  182. {
  183. if (pagenr >= 0x70000 && pagenr <= 0x7003F)
  184. return 1;
  185. return 0;
  186. }
  187. #ifdef CONFIG_HIGHMEM
  188. pte_t *kmap_pte;
  189. pgprot_t kmap_prot;
  190. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  191. {
  192. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  193. vaddr), vaddr), vaddr);
  194. }
  195. static void __init kmap_init(void)
  196. {
  197. unsigned long kmap_vstart;
  198. /*
  199. * Cache the first kmap pte:
  200. */
  201. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  202. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  203. kmap_prot = PAGE_KERNEL;
  204. }
  205. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  206. {
  207. unsigned long vaddr;
  208. pgd_t *pgd;
  209. pud_t *pud;
  210. pmd_t *pmd;
  211. pte_t *pte;
  212. vaddr = PKMAP_BASE;
  213. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  214. pgd = swapper_pg_dir + pgd_index(vaddr);
  215. pud = pud_offset(pgd, vaddr);
  216. pmd = pmd_offset(pud, vaddr);
  217. pte = pte_offset_kernel(pmd, vaddr);
  218. pkmap_page_table = pte;
  219. }
  220. static void __meminit free_new_highpage(struct page *page)
  221. {
  222. init_page_count(page);
  223. __free_page(page);
  224. totalhigh_pages++;
  225. }
  226. void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
  227. {
  228. if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
  229. ClearPageReserved(page);
  230. free_new_highpage(page);
  231. } else
  232. SetPageReserved(page);
  233. }
  234. static int __meminit
  235. add_one_highpage_hotplug(struct page *page, unsigned long pfn)
  236. {
  237. free_new_highpage(page);
  238. totalram_pages++;
  239. #ifdef CONFIG_FLATMEM
  240. max_mapnr = max(pfn, max_mapnr);
  241. #endif
  242. num_physpages++;
  243. return 0;
  244. }
  245. /*
  246. * Not currently handling the NUMA case.
  247. * Assuming single node and all memory that
  248. * has been added dynamically that would be
  249. * onlined here is in HIGHMEM.
  250. */
  251. void __meminit online_page(struct page *page)
  252. {
  253. ClearPageReserved(page);
  254. add_one_highpage_hotplug(page, page_to_pfn(page));
  255. }
  256. #ifndef CONFIG_NUMA
  257. static void __init set_highmem_pages_init(int bad_ppro)
  258. {
  259. int pfn;
  260. for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
  261. /*
  262. * Holes under sparsemem might not have no mem_map[]:
  263. */
  264. if (pfn_valid(pfn))
  265. add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
  266. }
  267. totalram_pages += totalhigh_pages;
  268. }
  269. #endif /* !CONFIG_NUMA */
  270. #else
  271. # define kmap_init() do { } while (0)
  272. # define permanent_kmaps_init(pgd_base) do { } while (0)
  273. # define set_highmem_pages_init(bad_ppro) do { } while (0)
  274. #endif /* CONFIG_HIGHMEM */
  275. pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
  276. EXPORT_SYMBOL(__PAGE_KERNEL);
  277. pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  278. void __init native_pagetable_setup_start(pgd_t *base)
  279. {
  280. #ifdef CONFIG_X86_PAE
  281. int i;
  282. /*
  283. * Init entries of the first-level page table to the
  284. * zero page, if they haven't already been set up.
  285. *
  286. * In a normal native boot, we'll be running on a
  287. * pagetable rooted in swapper_pg_dir, but not in PAE
  288. * mode, so this will end up clobbering the mappings
  289. * for the lower 24Mbytes of the address space,
  290. * without affecting the kernel address space.
  291. */
  292. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  293. set_pgd(&base[i],
  294. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  295. /* Make sure kernel address space is empty so that a pagetable
  296. will be allocated for it. */
  297. memset(&base[USER_PTRS_PER_PGD], 0,
  298. KERNEL_PGD_PTRS * sizeof(pgd_t));
  299. #else
  300. paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
  301. #endif
  302. }
  303. void __init native_pagetable_setup_done(pgd_t *base)
  304. {
  305. #ifdef CONFIG_X86_PAE
  306. /*
  307. * Add low memory identity-mappings - SMP needs it when
  308. * starting up on an AP from real-mode. In the non-PAE
  309. * case we already have these mappings through head.S.
  310. * All user-space mappings are explicitly cleared after
  311. * SMP startup.
  312. */
  313. set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
  314. #endif
  315. }
  316. /*
  317. * Build a proper pagetable for the kernel mappings. Up until this
  318. * point, we've been running on some set of pagetables constructed by
  319. * the boot process.
  320. *
  321. * If we're booting on native hardware, this will be a pagetable
  322. * constructed in arch/i386/kernel/head.S, and not running in PAE mode
  323. * (even if we'll end up running in PAE). The root of the pagetable
  324. * will be swapper_pg_dir.
  325. *
  326. * If we're booting paravirtualized under a hypervisor, then there are
  327. * more options: we may already be running PAE, and the pagetable may
  328. * or may not be based in swapper_pg_dir. In any case,
  329. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  330. * appropriately for the rest of the initialization to work.
  331. *
  332. * In general, pagetable_init() assumes that the pagetable may already
  333. * be partially populated, and so it avoids stomping on any existing
  334. * mappings.
  335. */
  336. static void __init pagetable_init(void)
  337. {
  338. pgd_t *pgd_base = swapper_pg_dir;
  339. unsigned long vaddr, end;
  340. paravirt_pagetable_setup_start(pgd_base);
  341. /* Enable PSE if available */
  342. if (cpu_has_pse)
  343. set_in_cr4(X86_CR4_PSE);
  344. /* Enable PGE if available */
  345. if (cpu_has_pge) {
  346. set_in_cr4(X86_CR4_PGE);
  347. __PAGE_KERNEL |= _PAGE_GLOBAL;
  348. __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
  349. }
  350. kernel_physical_mapping_init(pgd_base);
  351. remap_numa_kva();
  352. /*
  353. * Fixed mappings, only the page table structure has to be
  354. * created - mappings will be set by set_fixmap():
  355. */
  356. early_ioremap_clear();
  357. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  358. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  359. page_table_range_init(vaddr, end, pgd_base);
  360. early_ioremap_reset();
  361. permanent_kmaps_init(pgd_base);
  362. paravirt_pagetable_setup_done(pgd_base);
  363. }
  364. #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
  365. /*
  366. * Swap suspend & friends need this for resume because things like the intel-agp
  367. * driver might have split up a kernel 4MB mapping.
  368. */
  369. char __nosavedata swsusp_pg_dir[PAGE_SIZE]
  370. __attribute__ ((aligned(PAGE_SIZE)));
  371. static inline void save_pg_dir(void)
  372. {
  373. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  374. }
  375. #else
  376. static inline void save_pg_dir(void)
  377. {
  378. }
  379. #endif
  380. void zap_low_mappings(void)
  381. {
  382. int i;
  383. save_pg_dir();
  384. /*
  385. * Zap initial low-memory mappings.
  386. *
  387. * Note that "pgd_clear()" doesn't do it for
  388. * us, because pgd_clear() is a no-op on i386.
  389. */
  390. for (i = 0; i < USER_PTRS_PER_PGD; i++) {
  391. #ifdef CONFIG_X86_PAE
  392. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  393. #else
  394. set_pgd(swapper_pg_dir+i, __pgd(0));
  395. #endif
  396. }
  397. flush_tlb_all();
  398. }
  399. int nx_enabled;
  400. pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
  401. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  402. #ifdef CONFIG_X86_PAE
  403. static int disable_nx __initdata;
  404. /*
  405. * noexec = on|off
  406. *
  407. * Control non executable mappings.
  408. *
  409. * on Enable
  410. * off Disable
  411. */
  412. static int __init noexec_setup(char *str)
  413. {
  414. if (!str || !strcmp(str, "on")) {
  415. if (cpu_has_nx) {
  416. __supported_pte_mask |= _PAGE_NX;
  417. disable_nx = 0;
  418. }
  419. } else {
  420. if (!strcmp(str, "off")) {
  421. disable_nx = 1;
  422. __supported_pte_mask &= ~_PAGE_NX;
  423. } else {
  424. return -EINVAL;
  425. }
  426. }
  427. return 0;
  428. }
  429. early_param("noexec", noexec_setup);
  430. static void __init set_nx(void)
  431. {
  432. unsigned int v[4], l, h;
  433. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  434. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  435. if ((v[3] & (1 << 20)) && !disable_nx) {
  436. rdmsr(MSR_EFER, l, h);
  437. l |= EFER_NX;
  438. wrmsr(MSR_EFER, l, h);
  439. nx_enabled = 1;
  440. __supported_pte_mask |= _PAGE_NX;
  441. }
  442. }
  443. }
  444. #endif
  445. /*
  446. * paging_init() sets up the page tables - note that the first 8MB are
  447. * already mapped by head.S.
  448. *
  449. * This routines also unmaps the page at virtual kernel address 0, so
  450. * that we can trap those pesky NULL-reference errors in the kernel.
  451. */
  452. void __init paging_init(void)
  453. {
  454. #ifdef CONFIG_X86_PAE
  455. set_nx();
  456. if (nx_enabled)
  457. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  458. #endif
  459. pagetable_init();
  460. load_cr3(swapper_pg_dir);
  461. #ifdef CONFIG_X86_PAE
  462. /*
  463. * We will bail out later - printk doesn't work right now so
  464. * the user would just see a hanging kernel.
  465. */
  466. if (cpu_has_pae)
  467. set_in_cr4(X86_CR4_PAE);
  468. #endif
  469. __flush_tlb_all();
  470. kmap_init();
  471. }
  472. /*
  473. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  474. * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
  475. * used to involve black magic jumps to work around some nasty CPU bugs,
  476. * but fortunately the switch to using exceptions got rid of all that.
  477. */
  478. static void __init test_wp_bit(void)
  479. {
  480. printk(KERN_INFO
  481. "Checking if this processor honours the WP bit even in supervisor mode...");
  482. /* Any page-aligned address will do, the test is non-destructive */
  483. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  484. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  485. clear_fixmap(FIX_WP_TEST);
  486. if (!boot_cpu_data.wp_works_ok) {
  487. printk(KERN_CONT "No.\n");
  488. #ifdef CONFIG_X86_WP_WORKS_OK
  489. panic(
  490. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  491. #endif
  492. } else {
  493. printk(KERN_CONT "Ok.\n");
  494. }
  495. }
  496. static struct kcore_list kcore_mem, kcore_vmalloc;
  497. void __init mem_init(void)
  498. {
  499. int codesize, reservedpages, datasize, initsize;
  500. int tmp, bad_ppro;
  501. #ifdef CONFIG_FLATMEM
  502. BUG_ON(!mem_map);
  503. #endif
  504. bad_ppro = ppro_with_ram_bug();
  505. #ifdef CONFIG_HIGHMEM
  506. /* check that fixmap and pkmap do not overlap */
  507. if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
  508. printk(KERN_ERR
  509. "fixmap and kmap areas overlap - this will crash\n");
  510. printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
  511. PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
  512. FIXADDR_START);
  513. BUG();
  514. }
  515. #endif
  516. /* this will put all low memory onto the freelists */
  517. totalram_pages += free_all_bootmem();
  518. reservedpages = 0;
  519. for (tmp = 0; tmp < max_low_pfn; tmp++)
  520. /*
  521. * Only count reserved RAM pages:
  522. */
  523. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  524. reservedpages++;
  525. set_highmem_pages_init(bad_ppro);
  526. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  527. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  528. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  529. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  530. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  531. VMALLOC_END-VMALLOC_START);
  532. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  533. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  534. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  535. num_physpages << (PAGE_SHIFT-10),
  536. codesize >> 10,
  537. reservedpages << (PAGE_SHIFT-10),
  538. datasize >> 10,
  539. initsize >> 10,
  540. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  541. );
  542. #if 1 /* double-sanity-check paranoia */
  543. printk(KERN_INFO "virtual kernel memory layout:\n"
  544. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  545. #ifdef CONFIG_HIGHMEM
  546. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  547. #endif
  548. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  549. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  550. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  551. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  552. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  553. FIXADDR_START, FIXADDR_TOP,
  554. (FIXADDR_TOP - FIXADDR_START) >> 10,
  555. #ifdef CONFIG_HIGHMEM
  556. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  557. (LAST_PKMAP*PAGE_SIZE) >> 10,
  558. #endif
  559. VMALLOC_START, VMALLOC_END,
  560. (VMALLOC_END - VMALLOC_START) >> 20,
  561. (unsigned long)__va(0), (unsigned long)high_memory,
  562. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  563. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  564. ((unsigned long)&__init_end -
  565. (unsigned long)&__init_begin) >> 10,
  566. (unsigned long)&_etext, (unsigned long)&_edata,
  567. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  568. (unsigned long)&_text, (unsigned long)&_etext,
  569. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  570. #ifdef CONFIG_HIGHMEM
  571. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  572. BUG_ON(VMALLOC_END > PKMAP_BASE);
  573. #endif
  574. BUG_ON(VMALLOC_START > VMALLOC_END);
  575. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  576. #endif /* double-sanity-check paranoia */
  577. #ifdef CONFIG_X86_PAE
  578. if (!cpu_has_pae)
  579. panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
  580. #endif
  581. if (boot_cpu_data.wp_works_ok < 0)
  582. test_wp_bit();
  583. /*
  584. * Subtle. SMP is doing it's boot stuff late (because it has to
  585. * fork idle threads) - but it also needs low mappings for the
  586. * protected-mode entry to work. We zap these entries only after
  587. * the WP-bit has been tested.
  588. */
  589. #ifndef CONFIG_SMP
  590. zap_low_mappings();
  591. #endif
  592. }
  593. #ifdef CONFIG_MEMORY_HOTPLUG
  594. int arch_add_memory(int nid, u64 start, u64 size)
  595. {
  596. struct pglist_data *pgdata = NODE_DATA(nid);
  597. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  598. unsigned long start_pfn = start >> PAGE_SHIFT;
  599. unsigned long nr_pages = size >> PAGE_SHIFT;
  600. return __add_pages(zone, start_pfn, nr_pages);
  601. }
  602. #endif
  603. struct kmem_cache *pmd_cache;
  604. void __init pgtable_cache_init(void)
  605. {
  606. if (PTRS_PER_PMD > 1) {
  607. pmd_cache = kmem_cache_create("pmd",
  608. PTRS_PER_PMD*sizeof(pmd_t),
  609. PTRS_PER_PMD*sizeof(pmd_t),
  610. SLAB_PANIC,
  611. pmd_ctor);
  612. }
  613. }
  614. /*
  615. * This function cannot be __init, since exceptions don't work in that
  616. * section. Put this after the callers, so that it cannot be inlined.
  617. */
  618. static noinline int do_test_wp_bit(void)
  619. {
  620. char tmp_reg;
  621. int flag;
  622. __asm__ __volatile__(
  623. " movb %0, %1 \n"
  624. "1: movb %1, %0 \n"
  625. " xorl %2, %2 \n"
  626. "2: \n"
  627. ".section __ex_table, \"a\"\n"
  628. " .align 4 \n"
  629. " .long 1b, 2b \n"
  630. ".previous \n"
  631. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  632. "=q" (tmp_reg),
  633. "=r" (flag)
  634. :"2" (1)
  635. :"memory");
  636. return flag;
  637. }
  638. #ifdef CONFIG_DEBUG_RODATA
  639. const int rodata_test_data = 0xC3;
  640. EXPORT_SYMBOL_GPL(rodata_test_data);
  641. void mark_rodata_ro(void)
  642. {
  643. unsigned long start = PFN_ALIGN(_text);
  644. unsigned long size = PFN_ALIGN(_etext) - start;
  645. #ifndef CONFIG_KPROBES
  646. #ifdef CONFIG_HOTPLUG_CPU
  647. /* It must still be possible to apply SMP alternatives. */
  648. if (num_possible_cpus() <= 1)
  649. #endif
  650. {
  651. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  652. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  653. size >> 10);
  654. #ifdef CONFIG_CPA_DEBUG
  655. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  656. start, start+size);
  657. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  658. printk(KERN_INFO "Testing CPA: write protecting again\n");
  659. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  660. #endif
  661. }
  662. #endif
  663. start += size;
  664. size = (unsigned long)__end_rodata - start;
  665. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  666. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  667. size >> 10);
  668. rodata_test();
  669. #ifdef CONFIG_CPA_DEBUG
  670. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  671. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  672. printk(KERN_INFO "Testing CPA: write protecting again\n");
  673. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  674. #endif
  675. }
  676. #endif
  677. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  678. {
  679. #ifdef CONFIG_DEBUG_PAGEALLOC
  680. /*
  681. * If debugging page accesses then do not free this memory but
  682. * mark them not present - any buggy init-section access will
  683. * create a kernel page fault:
  684. */
  685. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  686. begin, PAGE_ALIGN(end));
  687. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  688. #else
  689. unsigned long addr;
  690. /*
  691. * We just marked the kernel text read only above, now that
  692. * we are going to free part of that, we need to make that
  693. * writeable first.
  694. */
  695. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  696. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  697. ClearPageReserved(virt_to_page(addr));
  698. init_page_count(virt_to_page(addr));
  699. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  700. free_page(addr);
  701. totalram_pages++;
  702. }
  703. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  704. #endif
  705. }
  706. void free_initmem(void)
  707. {
  708. free_init_pages("unused kernel memory",
  709. (unsigned long)(&__init_begin),
  710. (unsigned long)(&__init_end));
  711. }
  712. #ifdef CONFIG_BLK_DEV_INITRD
  713. void free_initrd_mem(unsigned long start, unsigned long end)
  714. {
  715. free_init_pages("initrd memory", start, end);
  716. }
  717. #endif