init_32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. /*
  2. * linux/arch/i386/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7. */
  8. #include <linux/module.h>
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/types.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <linux/smp.h>
  21. #include <linux/init.h>
  22. #include <linux/highmem.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/processor.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/tlb.h>
  41. #include <asm/tlbflush.h>
  42. #include <asm/pgalloc.h>
  43. #include <asm/sections.h>
  44. #include <asm/paravirt.h>
  45. unsigned int __VMALLOC_RESERVE = 128 << 20;
  46. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  47. unsigned long highstart_pfn, highend_pfn;
  48. static int noinline do_test_wp_bit(void);
  49. /*
  50. * Creates a middle page table and puts a pointer to it in the
  51. * given global directory entry. This only returns the gd entry
  52. * in non-PAE compilation mode, since the middle layer is folded.
  53. */
  54. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  55. {
  56. pud_t *pud;
  57. pmd_t *pmd_table;
  58. #ifdef CONFIG_X86_PAE
  59. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  60. pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  61. paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  62. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  63. pud = pud_offset(pgd, 0);
  64. if (pmd_table != pmd_offset(pud, 0))
  65. BUG();
  66. }
  67. #endif
  68. pud = pud_offset(pgd, 0);
  69. pmd_table = pmd_offset(pud, 0);
  70. return pmd_table;
  71. }
  72. /*
  73. * Create a page table and place a pointer to it in a middle page
  74. * directory entry.
  75. */
  76. static pte_t * __init one_page_table_init(pmd_t *pmd)
  77. {
  78. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  79. pte_t *page_table = NULL;
  80. #ifdef CONFIG_DEBUG_PAGEALLOC
  81. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  82. #endif
  83. if (!page_table)
  84. page_table =
  85. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  86. paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  87. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  88. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  89. }
  90. return pte_offset_kernel(pmd, 0);
  91. }
  92. /*
  93. * This function initializes a certain range of kernel virtual memory
  94. * with new bootmem page tables, everywhere page tables are missing in
  95. * the given range.
  96. */
  97. /*
  98. * NOTE: The pagetables are allocated contiguous on the physical space
  99. * so we can cache the place of the first one and move around without
  100. * checking the pgd every time.
  101. */
  102. static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
  103. {
  104. pgd_t *pgd;
  105. pmd_t *pmd;
  106. int pgd_idx, pmd_idx;
  107. unsigned long vaddr;
  108. vaddr = start;
  109. pgd_idx = pgd_index(vaddr);
  110. pmd_idx = pmd_index(vaddr);
  111. pgd = pgd_base + pgd_idx;
  112. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  113. pmd = one_md_table_init(pgd);
  114. pmd = pmd + pmd_index(vaddr);
  115. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
  116. one_page_table_init(pmd);
  117. vaddr += PMD_SIZE;
  118. }
  119. pmd_idx = 0;
  120. }
  121. }
  122. static inline int is_kernel_text(unsigned long addr)
  123. {
  124. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  125. return 1;
  126. return 0;
  127. }
  128. /*
  129. * This maps the physical memory to kernel virtual address space, a total
  130. * of max_low_pfn pages, by creating page tables starting from address
  131. * PAGE_OFFSET.
  132. */
  133. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  134. {
  135. unsigned long pfn;
  136. pgd_t *pgd;
  137. pmd_t *pmd;
  138. pte_t *pte;
  139. int pgd_idx, pmd_idx, pte_ofs;
  140. pgd_idx = pgd_index(PAGE_OFFSET);
  141. pgd = pgd_base + pgd_idx;
  142. pfn = 0;
  143. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  144. pmd = one_md_table_init(pgd);
  145. if (pfn >= max_low_pfn)
  146. continue;
  147. for (pmd_idx = 0;
  148. pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
  149. pmd++, pmd_idx++) {
  150. unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
  151. /* Map with big pages if possible, otherwise
  152. create normal page tables. */
  153. if (cpu_has_pse) {
  154. unsigned int address2;
  155. pgprot_t prot = PAGE_KERNEL_LARGE;
  156. address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
  157. PAGE_OFFSET + PAGE_SIZE-1;
  158. if (is_kernel_text(address) ||
  159. is_kernel_text(address2))
  160. prot = PAGE_KERNEL_LARGE_EXEC;
  161. set_pmd(pmd, pfn_pmd(pfn, prot));
  162. pfn += PTRS_PER_PTE;
  163. } else {
  164. pte = one_page_table_init(pmd);
  165. for (pte_ofs = 0;
  166. pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
  167. pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
  168. pgprot_t prot = PAGE_KERNEL;
  169. if (is_kernel_text(address))
  170. prot = PAGE_KERNEL_EXEC;
  171. set_pte(pte, pfn_pte(pfn, prot));
  172. }
  173. }
  174. }
  175. }
  176. }
  177. static inline int page_kills_ppro(unsigned long pagenr)
  178. {
  179. if (pagenr >= 0x70000 && pagenr <= 0x7003F)
  180. return 1;
  181. return 0;
  182. }
  183. int page_is_ram(unsigned long pagenr)
  184. {
  185. int i;
  186. unsigned long addr, end;
  187. for (i = 0; i < e820.nr_map; i++) {
  188. if (e820.map[i].type != E820_RAM) /* not usable memory */
  189. continue;
  190. /*
  191. * !!!FIXME!!! Some BIOSen report areas as RAM that
  192. * are not. Notably the 640->1Mb area. We need a sanity
  193. * check here.
  194. */
  195. addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
  196. end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
  197. if ((pagenr >= addr) && (pagenr < end))
  198. return 1;
  199. }
  200. return 0;
  201. }
  202. #ifdef CONFIG_HIGHMEM
  203. pte_t *kmap_pte;
  204. pgprot_t kmap_prot;
  205. #define kmap_get_fixmap_pte(vaddr) \
  206. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
  207. static void __init kmap_init(void)
  208. {
  209. unsigned long kmap_vstart;
  210. /* cache the first kmap pte */
  211. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  212. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  213. kmap_prot = PAGE_KERNEL;
  214. }
  215. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  216. {
  217. pgd_t *pgd;
  218. pud_t *pud;
  219. pmd_t *pmd;
  220. pte_t *pte;
  221. unsigned long vaddr;
  222. vaddr = PKMAP_BASE;
  223. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  224. pgd = swapper_pg_dir + pgd_index(vaddr);
  225. pud = pud_offset(pgd, vaddr);
  226. pmd = pmd_offset(pud, vaddr);
  227. pte = pte_offset_kernel(pmd, vaddr);
  228. pkmap_page_table = pte;
  229. }
  230. static void __meminit free_new_highpage(struct page *page)
  231. {
  232. init_page_count(page);
  233. __free_page(page);
  234. totalhigh_pages++;
  235. }
  236. void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
  237. {
  238. if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
  239. ClearPageReserved(page);
  240. free_new_highpage(page);
  241. } else
  242. SetPageReserved(page);
  243. }
  244. static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
  245. {
  246. free_new_highpage(page);
  247. totalram_pages++;
  248. #ifdef CONFIG_FLATMEM
  249. max_mapnr = max(pfn, max_mapnr);
  250. #endif
  251. num_physpages++;
  252. return 0;
  253. }
  254. /*
  255. * Not currently handling the NUMA case.
  256. * Assuming single node and all memory that
  257. * has been added dynamically that would be
  258. * onlined here is in HIGHMEM
  259. */
  260. void __meminit online_page(struct page *page)
  261. {
  262. ClearPageReserved(page);
  263. add_one_highpage_hotplug(page, page_to_pfn(page));
  264. }
  265. #ifdef CONFIG_NUMA
  266. extern void set_highmem_pages_init(int);
  267. #else
  268. static void __init set_highmem_pages_init(int bad_ppro)
  269. {
  270. int pfn;
  271. for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
  272. /*
  273. * Holes under sparsemem might not have no mem_map[]:
  274. */
  275. if (pfn_valid(pfn))
  276. add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
  277. }
  278. totalram_pages += totalhigh_pages;
  279. }
  280. #endif /* CONFIG_FLATMEM */
  281. #else
  282. #define kmap_init() do { } while (0)
  283. #define permanent_kmaps_init(pgd_base) do { } while (0)
  284. #define set_highmem_pages_init(bad_ppro) do { } while (0)
  285. #endif /* CONFIG_HIGHMEM */
  286. unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
  287. EXPORT_SYMBOL(__PAGE_KERNEL);
  288. unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  289. #ifdef CONFIG_NUMA
  290. extern void __init remap_numa_kva(void);
  291. #else
  292. #define remap_numa_kva() do {} while (0)
  293. #endif
  294. void __init native_pagetable_setup_start(pgd_t *base)
  295. {
  296. #ifdef CONFIG_X86_PAE
  297. int i;
  298. /*
  299. * Init entries of the first-level page table to the
  300. * zero page, if they haven't already been set up.
  301. *
  302. * In a normal native boot, we'll be running on a
  303. * pagetable rooted in swapper_pg_dir, but not in PAE
  304. * mode, so this will end up clobbering the mappings
  305. * for the lower 24Mbytes of the address space,
  306. * without affecting the kernel address space.
  307. */
  308. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  309. set_pgd(&base[i],
  310. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  311. /* Make sure kernel address space is empty so that a pagetable
  312. will be allocated for it. */
  313. memset(&base[USER_PTRS_PER_PGD], 0,
  314. KERNEL_PGD_PTRS * sizeof(pgd_t));
  315. #else
  316. paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
  317. #endif
  318. }
  319. void __init native_pagetable_setup_done(pgd_t *base)
  320. {
  321. #ifdef CONFIG_X86_PAE
  322. /*
  323. * Add low memory identity-mappings - SMP needs it when
  324. * starting up on an AP from real-mode. In the non-PAE
  325. * case we already have these mappings through head.S.
  326. * All user-space mappings are explicitly cleared after
  327. * SMP startup.
  328. */
  329. set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
  330. #endif
  331. }
  332. /*
  333. * Build a proper pagetable for the kernel mappings. Up until this
  334. * point, we've been running on some set of pagetables constructed by
  335. * the boot process.
  336. *
  337. * If we're booting on native hardware, this will be a pagetable
  338. * constructed in arch/i386/kernel/head.S, and not running in PAE mode
  339. * (even if we'll end up running in PAE). The root of the pagetable
  340. * will be swapper_pg_dir.
  341. *
  342. * If we're booting paravirtualized under a hypervisor, then there are
  343. * more options: we may already be running PAE, and the pagetable may
  344. * or may not be based in swapper_pg_dir. In any case,
  345. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  346. * appropriately for the rest of the initialization to work.
  347. *
  348. * In general, pagetable_init() assumes that the pagetable may already
  349. * be partially populated, and so it avoids stomping on any existing
  350. * mappings.
  351. */
  352. static void __init pagetable_init (void)
  353. {
  354. unsigned long vaddr, end;
  355. pgd_t *pgd_base = swapper_pg_dir;
  356. paravirt_pagetable_setup_start(pgd_base);
  357. /* Enable PSE if available */
  358. if (cpu_has_pse)
  359. set_in_cr4(X86_CR4_PSE);
  360. /* Enable PGE if available */
  361. if (cpu_has_pge) {
  362. set_in_cr4(X86_CR4_PGE);
  363. __PAGE_KERNEL |= _PAGE_GLOBAL;
  364. __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
  365. }
  366. kernel_physical_mapping_init(pgd_base);
  367. remap_numa_kva();
  368. /*
  369. * Fixed mappings, only the page table structure has to be
  370. * created - mappings will be set by set_fixmap():
  371. */
  372. bt_ioremap_clear();
  373. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  374. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  375. page_table_range_init(vaddr, end, pgd_base);
  376. bt_ioremap_reset();
  377. permanent_kmaps_init(pgd_base);
  378. paravirt_pagetable_setup_done(pgd_base);
  379. }
  380. #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
  381. /*
  382. * Swap suspend & friends need this for resume because things like the intel-agp
  383. * driver might have split up a kernel 4MB mapping.
  384. */
  385. char __nosavedata swsusp_pg_dir[PAGE_SIZE]
  386. __attribute__ ((aligned (PAGE_SIZE)));
  387. static inline void save_pg_dir(void)
  388. {
  389. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  390. }
  391. #else
  392. static inline void save_pg_dir(void)
  393. {
  394. }
  395. #endif
  396. void zap_low_mappings (void)
  397. {
  398. int i;
  399. save_pg_dir();
  400. /*
  401. * Zap initial low-memory mappings.
  402. *
  403. * Note that "pgd_clear()" doesn't do it for
  404. * us, because pgd_clear() is a no-op on i386.
  405. */
  406. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  407. #ifdef CONFIG_X86_PAE
  408. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  409. #else
  410. set_pgd(swapper_pg_dir+i, __pgd(0));
  411. #endif
  412. flush_tlb_all();
  413. }
  414. int nx_enabled = 0;
  415. pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
  416. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  417. #ifdef CONFIG_X86_PAE
  418. static int disable_nx __initdata = 0;
  419. /*
  420. * noexec = on|off
  421. *
  422. * Control non executable mappings.
  423. *
  424. * on Enable
  425. * off Disable
  426. */
  427. static int __init noexec_setup(char *str)
  428. {
  429. if (!str || !strcmp(str, "on")) {
  430. if (cpu_has_nx) {
  431. __supported_pte_mask |= _PAGE_NX;
  432. disable_nx = 0;
  433. }
  434. } else if (!strcmp(str,"off")) {
  435. disable_nx = 1;
  436. __supported_pte_mask &= ~_PAGE_NX;
  437. } else
  438. return -EINVAL;
  439. return 0;
  440. }
  441. early_param("noexec", noexec_setup);
  442. static void __init set_nx(void)
  443. {
  444. unsigned int v[4], l, h;
  445. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  446. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  447. if ((v[3] & (1 << 20)) && !disable_nx) {
  448. rdmsr(MSR_EFER, l, h);
  449. l |= EFER_NX;
  450. wrmsr(MSR_EFER, l, h);
  451. nx_enabled = 1;
  452. __supported_pte_mask |= _PAGE_NX;
  453. }
  454. }
  455. }
  456. /*
  457. * Enables/disables executability of a given kernel page and
  458. * returns the previous setting.
  459. */
  460. int __init set_kernel_exec(unsigned long vaddr, int enable)
  461. {
  462. pte_t *pte;
  463. int ret = 1;
  464. int level;
  465. if (!nx_enabled)
  466. goto out;
  467. pte = lookup_address(vaddr, &level);
  468. BUG_ON(!pte);
  469. if (!pte_exec(*pte))
  470. ret = 0;
  471. if (enable)
  472. pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
  473. else
  474. pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
  475. pte_update_defer(&init_mm, vaddr, pte);
  476. __flush_tlb_all();
  477. out:
  478. return ret;
  479. }
  480. #endif
  481. /*
  482. * paging_init() sets up the page tables - note that the first 8MB are
  483. * already mapped by head.S.
  484. *
  485. * This routines also unmaps the page at virtual kernel address 0, so
  486. * that we can trap those pesky NULL-reference errors in the kernel.
  487. */
  488. void __init paging_init(void)
  489. {
  490. #ifdef CONFIG_X86_PAE
  491. set_nx();
  492. if (nx_enabled)
  493. printk("NX (Execute Disable) protection: active\n");
  494. #endif
  495. pagetable_init();
  496. load_cr3(swapper_pg_dir);
  497. #ifdef CONFIG_X86_PAE
  498. /*
  499. * We will bail out later - printk doesn't work right now so
  500. * the user would just see a hanging kernel.
  501. */
  502. if (cpu_has_pae)
  503. set_in_cr4(X86_CR4_PAE);
  504. #endif
  505. __flush_tlb_all();
  506. kmap_init();
  507. }
  508. /*
  509. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  510. * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
  511. * used to involve black magic jumps to work around some nasty CPU bugs,
  512. * but fortunately the switch to using exceptions got rid of all that.
  513. */
  514. static void __init test_wp_bit(void)
  515. {
  516. printk("Checking if this processor honours the WP bit even in supervisor mode... ");
  517. /* Any page-aligned address will do, the test is non-destructive */
  518. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  519. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  520. clear_fixmap(FIX_WP_TEST);
  521. if (!boot_cpu_data.wp_works_ok) {
  522. printk("No.\n");
  523. #ifdef CONFIG_X86_WP_WORKS_OK
  524. panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  525. #endif
  526. } else {
  527. printk("Ok.\n");
  528. }
  529. }
  530. static struct kcore_list kcore_mem, kcore_vmalloc;
  531. void __init mem_init(void)
  532. {
  533. extern int ppro_with_ram_bug(void);
  534. int codesize, reservedpages, datasize, initsize;
  535. int tmp;
  536. int bad_ppro;
  537. #ifdef CONFIG_FLATMEM
  538. BUG_ON(!mem_map);
  539. #endif
  540. bad_ppro = ppro_with_ram_bug();
  541. #ifdef CONFIG_HIGHMEM
  542. /* check that fixmap and pkmap do not overlap */
  543. if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
  544. printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
  545. printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
  546. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
  547. BUG();
  548. }
  549. #endif
  550. /* this will put all low memory onto the freelists */
  551. totalram_pages += free_all_bootmem();
  552. reservedpages = 0;
  553. for (tmp = 0; tmp < max_low_pfn; tmp++)
  554. /*
  555. * Only count reserved RAM pages
  556. */
  557. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  558. reservedpages++;
  559. set_highmem_pages_init(bad_ppro);
  560. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  561. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  562. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  563. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  564. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  565. VMALLOC_END-VMALLOC_START);
  566. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
  567. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  568. num_physpages << (PAGE_SHIFT-10),
  569. codesize >> 10,
  570. reservedpages << (PAGE_SHIFT-10),
  571. datasize >> 10,
  572. initsize >> 10,
  573. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  574. );
  575. #if 1 /* double-sanity-check paranoia */
  576. printk("virtual kernel memory layout:\n"
  577. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  578. #ifdef CONFIG_HIGHMEM
  579. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  580. #endif
  581. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  582. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  583. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  584. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  585. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  586. FIXADDR_START, FIXADDR_TOP,
  587. (FIXADDR_TOP - FIXADDR_START) >> 10,
  588. #ifdef CONFIG_HIGHMEM
  589. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  590. (LAST_PKMAP*PAGE_SIZE) >> 10,
  591. #endif
  592. VMALLOC_START, VMALLOC_END,
  593. (VMALLOC_END - VMALLOC_START) >> 20,
  594. (unsigned long)__va(0), (unsigned long)high_memory,
  595. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  596. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  597. ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
  598. (unsigned long)&_etext, (unsigned long)&_edata,
  599. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  600. (unsigned long)&_text, (unsigned long)&_etext,
  601. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  602. #ifdef CONFIG_HIGHMEM
  603. BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  604. BUG_ON(VMALLOC_END > PKMAP_BASE);
  605. #endif
  606. BUG_ON(VMALLOC_START > VMALLOC_END);
  607. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  608. #endif /* double-sanity-check paranoia */
  609. #ifdef CONFIG_X86_PAE
  610. if (!cpu_has_pae)
  611. panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
  612. #endif
  613. if (boot_cpu_data.wp_works_ok < 0)
  614. test_wp_bit();
  615. /*
  616. * Subtle. SMP is doing it's boot stuff late (because it has to
  617. * fork idle threads) - but it also needs low mappings for the
  618. * protected-mode entry to work. We zap these entries only after
  619. * the WP-bit has been tested.
  620. */
  621. #ifndef CONFIG_SMP
  622. zap_low_mappings();
  623. #endif
  624. }
  625. #ifdef CONFIG_MEMORY_HOTPLUG
  626. int arch_add_memory(int nid, u64 start, u64 size)
  627. {
  628. struct pglist_data *pgdata = NODE_DATA(nid);
  629. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  630. unsigned long start_pfn = start >> PAGE_SHIFT;
  631. unsigned long nr_pages = size >> PAGE_SHIFT;
  632. return __add_pages(zone, start_pfn, nr_pages);
  633. }
  634. #endif
  635. struct kmem_cache *pmd_cache;
  636. void __init pgtable_cache_init(void)
  637. {
  638. if (PTRS_PER_PMD > 1)
  639. pmd_cache = kmem_cache_create("pmd",
  640. PTRS_PER_PMD*sizeof(pmd_t),
  641. PTRS_PER_PMD*sizeof(pmd_t),
  642. SLAB_PANIC,
  643. pmd_ctor);
  644. }
  645. /*
  646. * This function cannot be __init, since exceptions don't work in that
  647. * section. Put this after the callers, so that it cannot be inlined.
  648. */
  649. static int noinline do_test_wp_bit(void)
  650. {
  651. char tmp_reg;
  652. int flag;
  653. __asm__ __volatile__(
  654. " movb %0,%1 \n"
  655. "1: movb %1,%0 \n"
  656. " xorl %2,%2 \n"
  657. "2: \n"
  658. ".section __ex_table,\"a\"\n"
  659. " .align 4 \n"
  660. " .long 1b,2b \n"
  661. ".previous \n"
  662. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  663. "=q" (tmp_reg),
  664. "=r" (flag)
  665. :"2" (1)
  666. :"memory");
  667. return flag;
  668. }
  669. #ifdef CONFIG_DEBUG_RODATA
  670. void mark_rodata_ro(void)
  671. {
  672. unsigned long start = PFN_ALIGN(_text);
  673. unsigned long size = PFN_ALIGN(_etext) - start;
  674. #ifndef CONFIG_KPROBES
  675. #ifdef CONFIG_HOTPLUG_CPU
  676. /* It must still be possible to apply SMP alternatives. */
  677. if (num_possible_cpus() <= 1)
  678. #endif
  679. {
  680. change_page_attr(virt_to_page(start),
  681. size >> PAGE_SHIFT, PAGE_KERNEL_RX);
  682. printk("Write protecting the kernel text: %luk\n", size >> 10);
  683. #ifdef CONFIG_CPA_DEBUG
  684. global_flush_tlb();
  685. printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
  686. change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
  687. PAGE_KERNEL_EXEC);
  688. global_flush_tlb();
  689. printk("Testing CPA: write protecting again\n");
  690. change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
  691. PAGE_KERNEL_RX);
  692. global_flush_tlb();
  693. #endif
  694. }
  695. #endif
  696. start += size;
  697. size = (unsigned long)__end_rodata - start;
  698. change_page_attr(virt_to_page(start),
  699. size >> PAGE_SHIFT, PAGE_KERNEL_RO);
  700. printk("Write protecting the kernel read-only data: %luk\n",
  701. size >> 10);
  702. /*
  703. * change_page_attr() requires a global_flush_tlb() call after it.
  704. * We do this after the printk so that if something went wrong in the
  705. * change, the printk gets out at least to give a better debug hint
  706. * of who is the culprit.
  707. */
  708. global_flush_tlb();
  709. #ifdef CONFIG_CPA_DEBUG
  710. printk("Testing CPA: undo %lx-%lx\n", start, start + size);
  711. change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
  712. PAGE_KERNEL);
  713. global_flush_tlb();
  714. printk("Testing CPA: write protecting again\n");
  715. change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
  716. PAGE_KERNEL_RO);
  717. global_flush_tlb();
  718. #endif
  719. }
  720. #endif
  721. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  722. {
  723. unsigned long addr;
  724. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  725. ClearPageReserved(virt_to_page(addr));
  726. init_page_count(virt_to_page(addr));
  727. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  728. free_page(addr);
  729. totalram_pages++;
  730. }
  731. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  732. }
  733. void free_initmem(void)
  734. {
  735. free_init_pages("unused kernel memory",
  736. (unsigned long)(&__init_begin),
  737. (unsigned long)(&__init_end));
  738. }
  739. #ifdef CONFIG_BLK_DEV_INITRD
  740. void free_initrd_mem(unsigned long start, unsigned long end)
  741. {
  742. free_init_pages("initrd memory", start, end);
  743. }
  744. #endif