init_32.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * linux/arch/i386/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7. */
  8. #include <linux/module.h>
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/types.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <linux/smp.h>
  21. #include <linux/init.h>
  22. #include <linux/highmem.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/processor.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/tlb.h>
  41. #include <asm/tlbflush.h>
  42. #include <asm/sections.h>
  43. #include <asm/paravirt.h>
  44. unsigned int __VMALLOC_RESERVE = 128 << 20;
  45. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  46. unsigned long highstart_pfn, highend_pfn;
  47. static int noinline do_test_wp_bit(void);
  48. /*
  49. * Creates a middle page table and puts a pointer to it in the
  50. * given global directory entry. This only returns the gd entry
  51. * in non-PAE compilation mode, since the middle layer is folded.
  52. */
  53. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  54. {
  55. pud_t *pud;
  56. pmd_t *pmd_table;
  57. #ifdef CONFIG_X86_PAE
  58. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  59. pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  60. paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
  61. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  62. pud = pud_offset(pgd, 0);
  63. if (pmd_table != pmd_offset(pud, 0))
  64. BUG();
  65. }
  66. #endif
  67. pud = pud_offset(pgd, 0);
  68. pmd_table = pmd_offset(pud, 0);
  69. return pmd_table;
  70. }
  71. /*
  72. * Create a page table and place a pointer to it in a middle page
  73. * directory entry.
  74. */
  75. static pte_t * __init one_page_table_init(pmd_t *pmd)
  76. {
  77. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  78. pte_t *page_table = NULL;
  79. #ifdef CONFIG_DEBUG_PAGEALLOC
  80. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  81. #endif
  82. if (!page_table)
  83. page_table =
  84. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  85. paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  86. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  87. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  88. }
  89. return pte_offset_kernel(pmd, 0);
  90. }
  91. /*
  92. * This function initializes a certain range of kernel virtual memory
  93. * with new bootmem page tables, everywhere page tables are missing in
  94. * the given range.
  95. */
  96. /*
  97. * NOTE: The pagetables are allocated contiguous on the physical space
  98. * so we can cache the place of the first one and move around without
  99. * checking the pgd every time.
  100. */
  101. static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
  102. {
  103. pgd_t *pgd;
  104. pmd_t *pmd;
  105. int pgd_idx, pmd_idx;
  106. unsigned long vaddr;
  107. vaddr = start;
  108. pgd_idx = pgd_index(vaddr);
  109. pmd_idx = pmd_index(vaddr);
  110. pgd = pgd_base + pgd_idx;
  111. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  112. pmd = one_md_table_init(pgd);
  113. pmd = pmd + pmd_index(vaddr);
  114. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
  115. one_page_table_init(pmd);
  116. vaddr += PMD_SIZE;
  117. }
  118. pmd_idx = 0;
  119. }
  120. }
  121. static inline int is_kernel_text(unsigned long addr)
  122. {
  123. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  124. return 1;
  125. return 0;
  126. }
  127. /*
  128. * This maps the physical memory to kernel virtual address space, a total
  129. * of max_low_pfn pages, by creating page tables starting from address
  130. * PAGE_OFFSET.
  131. */
  132. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  133. {
  134. unsigned long pfn;
  135. pgd_t *pgd;
  136. pmd_t *pmd;
  137. pte_t *pte;
  138. int pgd_idx, pmd_idx, pte_ofs;
  139. pgd_idx = pgd_index(PAGE_OFFSET);
  140. pgd = pgd_base + pgd_idx;
  141. pfn = 0;
  142. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  143. pmd = one_md_table_init(pgd);
  144. if (pfn >= max_low_pfn)
  145. continue;
  146. for (pmd_idx = 0;
  147. pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
  148. pmd++, pmd_idx++) {
  149. unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
  150. /* Map with big pages if possible, otherwise
  151. create normal page tables. */
  152. if (cpu_has_pse) {
  153. unsigned int address2;
  154. pgprot_t prot = PAGE_KERNEL_LARGE;
  155. address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
  156. PAGE_OFFSET + PAGE_SIZE-1;
  157. if (is_kernel_text(address) ||
  158. is_kernel_text(address2))
  159. prot = PAGE_KERNEL_LARGE_EXEC;
  160. set_pmd(pmd, pfn_pmd(pfn, prot));
  161. pfn += PTRS_PER_PTE;
  162. } else {
  163. pte = one_page_table_init(pmd);
  164. for (pte_ofs = 0;
  165. pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
  166. pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
  167. pgprot_t prot = PAGE_KERNEL;
  168. if (is_kernel_text(address))
  169. prot = PAGE_KERNEL_EXEC;
  170. set_pte(pte, pfn_pte(pfn, prot));
  171. }
  172. }
  173. }
  174. }
  175. }
  176. static inline int page_kills_ppro(unsigned long pagenr)
  177. {
  178. if (pagenr >= 0x70000 && pagenr <= 0x7003F)
  179. return 1;
  180. return 0;
  181. }
  182. int page_is_ram(unsigned long pagenr)
  183. {
  184. int i;
  185. unsigned long addr, end;
  186. for (i = 0; i < e820.nr_map; i++) {
  187. if (e820.map[i].type != E820_RAM) /* not usable memory */
  188. continue;
  189. /*
  190. * !!!FIXME!!! Some BIOSen report areas as RAM that
  191. * are not. Notably the 640->1Mb area. We need a sanity
  192. * check here.
  193. */
  194. addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
  195. end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
  196. if ((pagenr >= addr) && (pagenr < end))
  197. return 1;
  198. }
  199. return 0;
  200. }
  201. #ifdef CONFIG_HIGHMEM
  202. pte_t *kmap_pte;
  203. pgprot_t kmap_prot;
  204. #define kmap_get_fixmap_pte(vaddr) \
  205. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
  206. static void __init kmap_init(void)
  207. {
  208. unsigned long kmap_vstart;
  209. /* cache the first kmap pte */
  210. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  211. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  212. kmap_prot = PAGE_KERNEL;
  213. }
  214. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  215. {
  216. pgd_t *pgd;
  217. pud_t *pud;
  218. pmd_t *pmd;
  219. pte_t *pte;
  220. unsigned long vaddr;
  221. vaddr = PKMAP_BASE;
  222. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  223. pgd = swapper_pg_dir + pgd_index(vaddr);
  224. pud = pud_offset(pgd, vaddr);
  225. pmd = pmd_offset(pud, vaddr);
  226. pte = pte_offset_kernel(pmd, vaddr);
  227. pkmap_page_table = pte;
  228. }
  229. static void __meminit free_new_highpage(struct page *page)
  230. {
  231. init_page_count(page);
  232. __free_page(page);
  233. totalhigh_pages++;
  234. }
  235. void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
  236. {
  237. if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
  238. ClearPageReserved(page);
  239. free_new_highpage(page);
  240. } else
  241. SetPageReserved(page);
  242. }
  243. static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
  244. {
  245. free_new_highpage(page);
  246. totalram_pages++;
  247. #ifdef CONFIG_FLATMEM
  248. max_mapnr = max(pfn, max_mapnr);
  249. #endif
  250. num_physpages++;
  251. return 0;
  252. }
  253. /*
  254. * Not currently handling the NUMA case.
  255. * Assuming single node and all memory that
  256. * has been added dynamically that would be
  257. * onlined here is in HIGHMEM
  258. */
  259. void __meminit online_page(struct page *page)
  260. {
  261. ClearPageReserved(page);
  262. add_one_highpage_hotplug(page, page_to_pfn(page));
  263. }
  264. #ifdef CONFIG_NUMA
  265. extern void set_highmem_pages_init(int);
  266. #else
  267. static void __init set_highmem_pages_init(int bad_ppro)
  268. {
  269. int pfn;
  270. for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
  271. /*
  272. * Holes under sparsemem might not have no mem_map[]:
  273. */
  274. if (pfn_valid(pfn))
  275. add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
  276. }
  277. totalram_pages += totalhigh_pages;
  278. }
  279. #endif /* CONFIG_FLATMEM */
  280. #else
  281. #define kmap_init() do { } while (0)
  282. #define permanent_kmaps_init(pgd_base) do { } while (0)
  283. #define set_highmem_pages_init(bad_ppro) do { } while (0)
  284. #endif /* CONFIG_HIGHMEM */
  285. unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
  286. EXPORT_SYMBOL(__PAGE_KERNEL);
  287. unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  288. #ifdef CONFIG_NUMA
  289. extern void __init remap_numa_kva(void);
  290. #else
  291. #define remap_numa_kva() do {} while (0)
  292. #endif
  293. void __init native_pagetable_setup_start(pgd_t *base)
  294. {
  295. #ifdef CONFIG_X86_PAE
  296. int i;
  297. /*
  298. * Init entries of the first-level page table to the
  299. * zero page, if they haven't already been set up.
  300. *
  301. * In a normal native boot, we'll be running on a
  302. * pagetable rooted in swapper_pg_dir, but not in PAE
  303. * mode, so this will end up clobbering the mappings
  304. * for the lower 24Mbytes of the address space,
  305. * without affecting the kernel address space.
  306. */
  307. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  308. set_pgd(&base[i],
  309. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  310. /* Make sure kernel address space is empty so that a pagetable
  311. will be allocated for it. */
  312. memset(&base[USER_PTRS_PER_PGD], 0,
  313. KERNEL_PGD_PTRS * sizeof(pgd_t));
  314. #else
  315. paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
  316. #endif
  317. }
  318. void __init native_pagetable_setup_done(pgd_t *base)
  319. {
  320. #ifdef CONFIG_X86_PAE
  321. /*
  322. * Add low memory identity-mappings - SMP needs it when
  323. * starting up on an AP from real-mode. In the non-PAE
  324. * case we already have these mappings through head.S.
  325. * All user-space mappings are explicitly cleared after
  326. * SMP startup.
  327. */
  328. set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
  329. #endif
  330. }
  331. /*
  332. * Build a proper pagetable for the kernel mappings. Up until this
  333. * point, we've been running on some set of pagetables constructed by
  334. * the boot process.
  335. *
  336. * If we're booting on native hardware, this will be a pagetable
  337. * constructed in arch/i386/kernel/head.S, and not running in PAE mode
  338. * (even if we'll end up running in PAE). The root of the pagetable
  339. * will be swapper_pg_dir.
  340. *
  341. * If we're booting paravirtualized under a hypervisor, then there are
  342. * more options: we may already be running PAE, and the pagetable may
  343. * or may not be based in swapper_pg_dir. In any case,
  344. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  345. * appropriately for the rest of the initialization to work.
  346. *
  347. * In general, pagetable_init() assumes that the pagetable may already
  348. * be partially populated, and so it avoids stomping on any existing
  349. * mappings.
  350. */
  351. static void __init pagetable_init (void)
  352. {
  353. unsigned long vaddr, end;
  354. pgd_t *pgd_base = swapper_pg_dir;
  355. paravirt_pagetable_setup_start(pgd_base);
  356. /* Enable PSE if available */
  357. if (cpu_has_pse)
  358. set_in_cr4(X86_CR4_PSE);
  359. /* Enable PGE if available */
  360. if (cpu_has_pge) {
  361. set_in_cr4(X86_CR4_PGE);
  362. __PAGE_KERNEL |= _PAGE_GLOBAL;
  363. __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
  364. }
  365. kernel_physical_mapping_init(pgd_base);
  366. remap_numa_kva();
  367. /*
  368. * Fixed mappings, only the page table structure has to be
  369. * created - mappings will be set by set_fixmap():
  370. */
  371. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  372. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  373. page_table_range_init(vaddr, end, pgd_base);
  374. permanent_kmaps_init(pgd_base);
  375. paravirt_pagetable_setup_done(pgd_base);
  376. }
  377. #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
  378. /*
  379. * Swap suspend & friends need this for resume because things like the intel-agp
  380. * driver might have split up a kernel 4MB mapping.
  381. */
  382. char __nosavedata swsusp_pg_dir[PAGE_SIZE]
  383. __attribute__ ((aligned (PAGE_SIZE)));
  384. static inline void save_pg_dir(void)
  385. {
  386. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  387. }
  388. #else
  389. static inline void save_pg_dir(void)
  390. {
  391. }
  392. #endif
  393. void zap_low_mappings (void)
  394. {
  395. int i;
  396. save_pg_dir();
  397. /*
  398. * Zap initial low-memory mappings.
  399. *
  400. * Note that "pgd_clear()" doesn't do it for
  401. * us, because pgd_clear() is a no-op on i386.
  402. */
  403. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  404. #ifdef CONFIG_X86_PAE
  405. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  406. #else
  407. set_pgd(swapper_pg_dir+i, __pgd(0));
  408. #endif
  409. flush_tlb_all();
  410. }
  411. int nx_enabled = 0;
  412. pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
  413. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  414. #ifdef CONFIG_X86_PAE
  415. static int disable_nx __initdata = 0;
  416. /*
  417. * noexec = on|off
  418. *
  419. * Control non executable mappings.
  420. *
  421. * on Enable
  422. * off Disable
  423. */
  424. static int __init noexec_setup(char *str)
  425. {
  426. if (!str || !strcmp(str, "on")) {
  427. if (cpu_has_nx) {
  428. __supported_pte_mask |= _PAGE_NX;
  429. disable_nx = 0;
  430. }
  431. } else if (!strcmp(str,"off")) {
  432. disable_nx = 1;
  433. __supported_pte_mask &= ~_PAGE_NX;
  434. } else
  435. return -EINVAL;
  436. return 0;
  437. }
  438. early_param("noexec", noexec_setup);
  439. static void __init set_nx(void)
  440. {
  441. unsigned int v[4], l, h;
  442. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  443. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  444. if ((v[3] & (1 << 20)) && !disable_nx) {
  445. rdmsr(MSR_EFER, l, h);
  446. l |= EFER_NX;
  447. wrmsr(MSR_EFER, l, h);
  448. nx_enabled = 1;
  449. __supported_pte_mask |= _PAGE_NX;
  450. }
  451. }
  452. }
  453. /*
  454. * Enables/disables executability of a given kernel page and
  455. * returns the previous setting.
  456. */
  457. int __init set_kernel_exec(unsigned long vaddr, int enable)
  458. {
  459. pte_t *pte;
  460. int ret = 1;
  461. if (!nx_enabled)
  462. goto out;
  463. pte = lookup_address(vaddr);
  464. BUG_ON(!pte);
  465. if (!pte_exec_kernel(*pte))
  466. ret = 0;
  467. if (enable)
  468. pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
  469. else
  470. pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
  471. pte_update_defer(&init_mm, vaddr, pte);
  472. __flush_tlb_all();
  473. out:
  474. return ret;
  475. }
  476. #endif
  477. /*
  478. * paging_init() sets up the page tables - note that the first 8MB are
  479. * already mapped by head.S.
  480. *
  481. * This routines also unmaps the page at virtual kernel address 0, so
  482. * that we can trap those pesky NULL-reference errors in the kernel.
  483. */
  484. void __init paging_init(void)
  485. {
  486. #ifdef CONFIG_X86_PAE
  487. set_nx();
  488. if (nx_enabled)
  489. printk("NX (Execute Disable) protection: active\n");
  490. #endif
  491. pagetable_init();
  492. load_cr3(swapper_pg_dir);
  493. #ifdef CONFIG_X86_PAE
  494. /*
  495. * We will bail out later - printk doesn't work right now so
  496. * the user would just see a hanging kernel.
  497. */
  498. if (cpu_has_pae)
  499. set_in_cr4(X86_CR4_PAE);
  500. #endif
  501. __flush_tlb_all();
  502. kmap_init();
  503. }
  504. /*
  505. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  506. * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
  507. * used to involve black magic jumps to work around some nasty CPU bugs,
  508. * but fortunately the switch to using exceptions got rid of all that.
  509. */
  510. static void __init test_wp_bit(void)
  511. {
  512. printk("Checking if this processor honours the WP bit even in supervisor mode... ");
  513. /* Any page-aligned address will do, the test is non-destructive */
  514. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  515. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  516. clear_fixmap(FIX_WP_TEST);
  517. if (!boot_cpu_data.wp_works_ok) {
  518. printk("No.\n");
  519. #ifdef CONFIG_X86_WP_WORKS_OK
  520. panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  521. #endif
  522. } else {
  523. printk("Ok.\n");
  524. }
  525. }
  526. static struct kcore_list kcore_mem, kcore_vmalloc;
  527. void __init mem_init(void)
  528. {
  529. extern int ppro_with_ram_bug(void);
  530. int codesize, reservedpages, datasize, initsize;
  531. int tmp;
  532. int bad_ppro;
  533. #ifdef CONFIG_FLATMEM
  534. BUG_ON(!mem_map);
  535. #endif
  536. bad_ppro = ppro_with_ram_bug();
  537. #ifdef CONFIG_HIGHMEM
  538. /* check that fixmap and pkmap do not overlap */
  539. if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
  540. printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
  541. printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
  542. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
  543. BUG();
  544. }
  545. #endif
  546. /* this will put all low memory onto the freelists */
  547. totalram_pages += free_all_bootmem();
  548. reservedpages = 0;
  549. for (tmp = 0; tmp < max_low_pfn; tmp++)
  550. /*
  551. * Only count reserved RAM pages
  552. */
  553. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  554. reservedpages++;
  555. set_highmem_pages_init(bad_ppro);
  556. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  557. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  558. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  559. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  560. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  561. VMALLOC_END-VMALLOC_START);
  562. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
  563. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  564. num_physpages << (PAGE_SHIFT-10),
  565. codesize >> 10,
  566. reservedpages << (PAGE_SHIFT-10),
  567. datasize >> 10,
  568. initsize >> 10,
  569. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  570. );
  571. #if 1 /* double-sanity-check paranoia */
  572. printk("virtual kernel memory layout:\n"
  573. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  574. #ifdef CONFIG_HIGHMEM
  575. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  576. #endif
  577. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  578. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  579. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  580. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  581. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  582. FIXADDR_START, FIXADDR_TOP,
  583. (FIXADDR_TOP - FIXADDR_START) >> 10,
  584. #ifdef CONFIG_HIGHMEM
  585. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  586. (LAST_PKMAP*PAGE_SIZE) >> 10,
  587. #endif
  588. VMALLOC_START, VMALLOC_END,
  589. (VMALLOC_END - VMALLOC_START) >> 20,
  590. (unsigned long)__va(0), (unsigned long)high_memory,
  591. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  592. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  593. ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
  594. (unsigned long)&_etext, (unsigned long)&_edata,
  595. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  596. (unsigned long)&_text, (unsigned long)&_etext,
  597. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  598. #ifdef CONFIG_HIGHMEM
  599. BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  600. BUG_ON(VMALLOC_END > PKMAP_BASE);
  601. #endif
  602. BUG_ON(VMALLOC_START > VMALLOC_END);
  603. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  604. #endif /* double-sanity-check paranoia */
  605. #ifdef CONFIG_X86_PAE
  606. if (!cpu_has_pae)
  607. panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
  608. #endif
  609. if (boot_cpu_data.wp_works_ok < 0)
  610. test_wp_bit();
  611. /*
  612. * Subtle. SMP is doing it's boot stuff late (because it has to
  613. * fork idle threads) - but it also needs low mappings for the
  614. * protected-mode entry to work. We zap these entries only after
  615. * the WP-bit has been tested.
  616. */
  617. #ifndef CONFIG_SMP
  618. zap_low_mappings();
  619. #endif
  620. }
  621. #ifdef CONFIG_MEMORY_HOTPLUG
  622. int arch_add_memory(int nid, u64 start, u64 size)
  623. {
  624. struct pglist_data *pgdata = NODE_DATA(nid);
  625. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  626. unsigned long start_pfn = start >> PAGE_SHIFT;
  627. unsigned long nr_pages = size >> PAGE_SHIFT;
  628. return __add_pages(zone, start_pfn, nr_pages);
  629. }
  630. #endif
  631. struct kmem_cache *pmd_cache;
  632. void __init pgtable_cache_init(void)
  633. {
  634. if (PTRS_PER_PMD > 1)
  635. pmd_cache = kmem_cache_create("pmd",
  636. PTRS_PER_PMD*sizeof(pmd_t),
  637. PTRS_PER_PMD*sizeof(pmd_t),
  638. SLAB_PANIC,
  639. pmd_ctor);
  640. }
  641. /*
  642. * This function cannot be __init, since exceptions don't work in that
  643. * section. Put this after the callers, so that it cannot be inlined.
  644. */
  645. static int noinline do_test_wp_bit(void)
  646. {
  647. char tmp_reg;
  648. int flag;
  649. __asm__ __volatile__(
  650. " movb %0,%1 \n"
  651. "1: movb %1,%0 \n"
  652. " xorl %2,%2 \n"
  653. "2: \n"
  654. ".section __ex_table,\"a\"\n"
  655. " .align 4 \n"
  656. " .long 1b,2b \n"
  657. ".previous \n"
  658. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  659. "=q" (tmp_reg),
  660. "=r" (flag)
  661. :"2" (1)
  662. :"memory");
  663. return flag;
  664. }
  665. #ifdef CONFIG_DEBUG_RODATA
  666. void mark_rodata_ro(void)
  667. {
  668. unsigned long start = PFN_ALIGN(_text);
  669. unsigned long size = PFN_ALIGN(_etext) - start;
  670. #ifndef CONFIG_KPROBES
  671. #ifdef CONFIG_HOTPLUG_CPU
  672. /* It must still be possible to apply SMP alternatives. */
  673. if (num_possible_cpus() <= 1)
  674. #endif
  675. {
  676. change_page_attr(virt_to_page(start),
  677. size >> PAGE_SHIFT, PAGE_KERNEL_RX);
  678. printk("Write protecting the kernel text: %luk\n", size >> 10);
  679. }
  680. #endif
  681. start += size;
  682. size = (unsigned long)__end_rodata - start;
  683. change_page_attr(virt_to_page(start),
  684. size >> PAGE_SHIFT, PAGE_KERNEL_RO);
  685. printk("Write protecting the kernel read-only data: %luk\n",
  686. size >> 10);
  687. /*
  688. * change_page_attr() requires a global_flush_tlb() call after it.
  689. * We do this after the printk so that if something went wrong in the
  690. * change, the printk gets out at least to give a better debug hint
  691. * of who is the culprit.
  692. */
  693. global_flush_tlb();
  694. }
  695. #endif
  696. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  697. {
  698. unsigned long addr;
  699. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  700. ClearPageReserved(virt_to_page(addr));
  701. init_page_count(virt_to_page(addr));
  702. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  703. free_page(addr);
  704. totalram_pages++;
  705. }
  706. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  707. }
  708. void free_initmem(void)
  709. {
  710. free_init_pages("unused kernel memory",
  711. (unsigned long)(&__init_begin),
  712. (unsigned long)(&__init_end));
  713. }
  714. #ifdef CONFIG_BLK_DEV_INITRD
  715. void free_initrd_mem(unsigned long start, unsigned long end)
  716. {
  717. free_init_pages("initrd memory", start, end);
  718. }
  719. #endif