init.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /*
  2. * linux/arch/i386/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7. */
  8. #include <linux/module.h>
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/types.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <linux/smp.h>
  21. #include <linux/init.h>
  22. #include <linux/highmem.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/efi.h>
  30. #include <linux/memory_hotplug.h>
  31. #include <linux/initrd.h>
  32. #include <linux/cpumask.h>
  33. #include <asm/processor.h>
  34. #include <asm/system.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/dma.h>
  38. #include <asm/fixmap.h>
  39. #include <asm/e820.h>
  40. #include <asm/apic.h>
  41. #include <asm/tlb.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/sections.h>
  44. #include <asm/paravirt.h>
  45. unsigned int __VMALLOC_RESERVE = 128 << 20;
  46. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  47. unsigned long highstart_pfn, highend_pfn;
  48. static int noinline do_test_wp_bit(void);
  49. /*
  50. * Creates a middle page table and puts a pointer to it in the
  51. * given global directory entry. This only returns the gd entry
  52. * in non-PAE compilation mode, since the middle layer is folded.
  53. */
  54. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  55. {
  56. pud_t *pud;
  57. pmd_t *pmd_table;
  58. #ifdef CONFIG_X86_PAE
  59. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  60. pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  61. paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
  62. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  63. pud = pud_offset(pgd, 0);
  64. if (pmd_table != pmd_offset(pud, 0))
  65. BUG();
  66. }
  67. #endif
  68. pud = pud_offset(pgd, 0);
  69. pmd_table = pmd_offset(pud, 0);
  70. return pmd_table;
  71. }
  72. /*
  73. * Create a page table and place a pointer to it in a middle page
  74. * directory entry.
  75. */
  76. static pte_t * __init one_page_table_init(pmd_t *pmd)
  77. {
  78. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  79. pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  80. paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
  81. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  82. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  83. }
  84. return pte_offset_kernel(pmd, 0);
  85. }
  86. /*
  87. * This function initializes a certain range of kernel virtual memory
  88. * with new bootmem page tables, everywhere page tables are missing in
  89. * the given range.
  90. */
  91. /*
  92. * NOTE: The pagetables are allocated contiguous on the physical space
  93. * so we can cache the place of the first one and move around without
  94. * checking the pgd every time.
  95. */
  96. static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
  97. {
  98. pgd_t *pgd;
  99. pmd_t *pmd;
  100. int pgd_idx, pmd_idx;
  101. unsigned long vaddr;
  102. vaddr = start;
  103. pgd_idx = pgd_index(vaddr);
  104. pmd_idx = pmd_index(vaddr);
  105. pgd = pgd_base + pgd_idx;
  106. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  107. pmd = one_md_table_init(pgd);
  108. pmd = pmd + pmd_index(vaddr);
  109. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
  110. one_page_table_init(pmd);
  111. vaddr += PMD_SIZE;
  112. }
  113. pmd_idx = 0;
  114. }
  115. }
  116. static inline int is_kernel_text(unsigned long addr)
  117. {
  118. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  119. return 1;
  120. return 0;
  121. }
  122. /*
  123. * This maps the physical memory to kernel virtual address space, a total
  124. * of max_low_pfn pages, by creating page tables starting from address
  125. * PAGE_OFFSET.
  126. */
  127. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  128. {
  129. unsigned long pfn;
  130. pgd_t *pgd;
  131. pmd_t *pmd;
  132. pte_t *pte;
  133. int pgd_idx, pmd_idx, pte_ofs;
  134. pgd_idx = pgd_index(PAGE_OFFSET);
  135. pgd = pgd_base + pgd_idx;
  136. pfn = 0;
  137. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  138. pmd = one_md_table_init(pgd);
  139. if (pfn >= max_low_pfn)
  140. continue;
  141. for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
  142. unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
  143. /* Map with big pages if possible, otherwise create normal page tables. */
  144. if (cpu_has_pse) {
  145. unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
  146. if (is_kernel_text(address) || is_kernel_text(address2))
  147. set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
  148. else
  149. set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
  150. pfn += PTRS_PER_PTE;
  151. } else {
  152. pte = one_page_table_init(pmd);
  153. for (pte_ofs = 0;
  154. pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
  155. pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
  156. if (is_kernel_text(address))
  157. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
  158. else
  159. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
  160. }
  161. }
  162. }
  163. }
  164. }
  165. static inline int page_kills_ppro(unsigned long pagenr)
  166. {
  167. if (pagenr >= 0x70000 && pagenr <= 0x7003F)
  168. return 1;
  169. return 0;
  170. }
  171. int page_is_ram(unsigned long pagenr)
  172. {
  173. int i;
  174. unsigned long addr, end;
  175. if (efi_enabled) {
  176. efi_memory_desc_t *md;
  177. void *p;
  178. for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
  179. md = p;
  180. if (!is_available_memory(md))
  181. continue;
  182. addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
  183. end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
  184. if ((pagenr >= addr) && (pagenr < end))
  185. return 1;
  186. }
  187. return 0;
  188. }
  189. for (i = 0; i < e820.nr_map; i++) {
  190. if (e820.map[i].type != E820_RAM) /* not usable memory */
  191. continue;
  192. /*
  193. * !!!FIXME!!! Some BIOSen report areas as RAM that
  194. * are not. Notably the 640->1Mb area. We need a sanity
  195. * check here.
  196. */
  197. addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
  198. end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
  199. if ((pagenr >= addr) && (pagenr < end))
  200. return 1;
  201. }
  202. return 0;
  203. }
  204. #ifdef CONFIG_HIGHMEM
  205. pte_t *kmap_pte;
  206. pgprot_t kmap_prot;
  207. #define kmap_get_fixmap_pte(vaddr) \
  208. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
  209. static void __init kmap_init(void)
  210. {
  211. unsigned long kmap_vstart;
  212. /* cache the first kmap pte */
  213. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  214. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  215. kmap_prot = PAGE_KERNEL;
  216. }
  217. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  218. {
  219. pgd_t *pgd;
  220. pud_t *pud;
  221. pmd_t *pmd;
  222. pte_t *pte;
  223. unsigned long vaddr;
  224. vaddr = PKMAP_BASE;
  225. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  226. pgd = swapper_pg_dir + pgd_index(vaddr);
  227. pud = pud_offset(pgd, vaddr);
  228. pmd = pmd_offset(pud, vaddr);
  229. pte = pte_offset_kernel(pmd, vaddr);
  230. pkmap_page_table = pte;
  231. }
  232. static void __meminit free_new_highpage(struct page *page)
  233. {
  234. init_page_count(page);
  235. __free_page(page);
  236. totalhigh_pages++;
  237. }
  238. void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
  239. {
  240. if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
  241. ClearPageReserved(page);
  242. free_new_highpage(page);
  243. } else
  244. SetPageReserved(page);
  245. }
  246. static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
  247. {
  248. free_new_highpage(page);
  249. totalram_pages++;
  250. #ifdef CONFIG_FLATMEM
  251. max_mapnr = max(pfn, max_mapnr);
  252. #endif
  253. num_physpages++;
  254. return 0;
  255. }
  256. /*
  257. * Not currently handling the NUMA case.
  258. * Assuming single node and all memory that
  259. * has been added dynamically that would be
  260. * onlined here is in HIGHMEM
  261. */
  262. void __meminit online_page(struct page *page)
  263. {
  264. ClearPageReserved(page);
  265. add_one_highpage_hotplug(page, page_to_pfn(page));
  266. }
  267. #ifdef CONFIG_NUMA
  268. extern void set_highmem_pages_init(int);
  269. #else
  270. static void __init set_highmem_pages_init(int bad_ppro)
  271. {
  272. int pfn;
  273. for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
  274. add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
  275. totalram_pages += totalhigh_pages;
  276. }
  277. #endif /* CONFIG_FLATMEM */
  278. #else
  279. #define kmap_init() do { } while (0)
  280. #define permanent_kmaps_init(pgd_base) do { } while (0)
  281. #define set_highmem_pages_init(bad_ppro) do { } while (0)
  282. #endif /* CONFIG_HIGHMEM */
  283. unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
  284. EXPORT_SYMBOL(__PAGE_KERNEL);
  285. unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
  286. #ifdef CONFIG_NUMA
  287. extern void __init remap_numa_kva(void);
  288. #else
  289. #define remap_numa_kva() do {} while (0)
  290. #endif
  291. void __init native_pagetable_setup_start(pgd_t *base)
  292. {
  293. #ifdef CONFIG_X86_PAE
  294. int i;
  295. /*
  296. * Init entries of the first-level page table to the
  297. * zero page, if they haven't already been set up.
  298. *
  299. * In a normal native boot, we'll be running on a
  300. * pagetable rooted in swapper_pg_dir, but not in PAE
  301. * mode, so this will end up clobbering the mappings
  302. * for the lower 24Mbytes of the address space,
  303. * without affecting the kernel address space.
  304. */
  305. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  306. set_pgd(&base[i],
  307. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  308. /* Make sure kernel address space is empty so that a pagetable
  309. will be allocated for it. */
  310. memset(&base[USER_PTRS_PER_PGD], 0,
  311. KERNEL_PGD_PTRS * sizeof(pgd_t));
  312. #else
  313. paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
  314. #endif
  315. }
  316. void __init native_pagetable_setup_done(pgd_t *base)
  317. {
  318. #ifdef CONFIG_X86_PAE
  319. /*
  320. * Add low memory identity-mappings - SMP needs it when
  321. * starting up on an AP from real-mode. In the non-PAE
  322. * case we already have these mappings through head.S.
  323. * All user-space mappings are explicitly cleared after
  324. * SMP startup.
  325. */
  326. set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
  327. #endif
  328. }
  329. /*
  330. * Build a proper pagetable for the kernel mappings. Up until this
  331. * point, we've been running on some set of pagetables constructed by
  332. * the boot process.
  333. *
  334. * If we're booting on native hardware, this will be a pagetable
  335. * constructed in arch/i386/kernel/head.S, and not running in PAE mode
  336. * (even if we'll end up running in PAE). The root of the pagetable
  337. * will be swapper_pg_dir.
  338. *
  339. * If we're booting paravirtualized under a hypervisor, then there are
  340. * more options: we may already be running PAE, and the pagetable may
  341. * or may not be based in swapper_pg_dir. In any case,
  342. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  343. * appropriately for the rest of the initialization to work.
  344. *
  345. * In general, pagetable_init() assumes that the pagetable may already
  346. * be partially populated, and so it avoids stomping on any existing
  347. * mappings.
  348. */
  349. static void __init pagetable_init (void)
  350. {
  351. unsigned long vaddr, end;
  352. pgd_t *pgd_base = swapper_pg_dir;
  353. paravirt_pagetable_setup_start(pgd_base);
  354. /* Enable PSE if available */
  355. if (cpu_has_pse)
  356. set_in_cr4(X86_CR4_PSE);
  357. /* Enable PGE if available */
  358. if (cpu_has_pge) {
  359. set_in_cr4(X86_CR4_PGE);
  360. __PAGE_KERNEL |= _PAGE_GLOBAL;
  361. __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
  362. }
  363. kernel_physical_mapping_init(pgd_base);
  364. remap_numa_kva();
  365. /*
  366. * Fixed mappings, only the page table structure has to be
  367. * created - mappings will be set by set_fixmap():
  368. */
  369. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  370. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  371. page_table_range_init(vaddr, end, pgd_base);
  372. permanent_kmaps_init(pgd_base);
  373. paravirt_pagetable_setup_done(pgd_base);
  374. }
  375. #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
  376. /*
  377. * Swap suspend & friends need this for resume because things like the intel-agp
  378. * driver might have split up a kernel 4MB mapping.
  379. */
  380. char __nosavedata swsusp_pg_dir[PAGE_SIZE]
  381. __attribute__ ((aligned (PAGE_SIZE)));
  382. static inline void save_pg_dir(void)
  383. {
  384. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  385. }
  386. #else
  387. static inline void save_pg_dir(void)
  388. {
  389. }
  390. #endif
  391. void zap_low_mappings (void)
  392. {
  393. int i;
  394. save_pg_dir();
  395. /*
  396. * Zap initial low-memory mappings.
  397. *
  398. * Note that "pgd_clear()" doesn't do it for
  399. * us, because pgd_clear() is a no-op on i386.
  400. */
  401. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  402. #ifdef CONFIG_X86_PAE
  403. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  404. #else
  405. set_pgd(swapper_pg_dir+i, __pgd(0));
  406. #endif
  407. flush_tlb_all();
  408. }
  409. static int disable_nx __initdata = 0;
  410. u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
  411. /*
  412. * noexec = on|off
  413. *
  414. * Control non executable mappings.
  415. *
  416. * on Enable
  417. * off Disable
  418. */
  419. static int __init noexec_setup(char *str)
  420. {
  421. if (!str || !strcmp(str, "on")) {
  422. if (cpu_has_nx) {
  423. __supported_pte_mask |= _PAGE_NX;
  424. disable_nx = 0;
  425. }
  426. } else if (!strcmp(str,"off")) {
  427. disable_nx = 1;
  428. __supported_pte_mask &= ~_PAGE_NX;
  429. } else
  430. return -EINVAL;
  431. return 0;
  432. }
  433. early_param("noexec", noexec_setup);
  434. int nx_enabled = 0;
  435. #ifdef CONFIG_X86_PAE
  436. static void __init set_nx(void)
  437. {
  438. unsigned int v[4], l, h;
  439. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  440. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  441. if ((v[3] & (1 << 20)) && !disable_nx) {
  442. rdmsr(MSR_EFER, l, h);
  443. l |= EFER_NX;
  444. wrmsr(MSR_EFER, l, h);
  445. nx_enabled = 1;
  446. __supported_pte_mask |= _PAGE_NX;
  447. }
  448. }
  449. }
  450. /*
  451. * Enables/disables executability of a given kernel page and
  452. * returns the previous setting.
  453. */
  454. int __init set_kernel_exec(unsigned long vaddr, int enable)
  455. {
  456. pte_t *pte;
  457. int ret = 1;
  458. if (!nx_enabled)
  459. goto out;
  460. pte = lookup_address(vaddr);
  461. BUG_ON(!pte);
  462. if (!pte_exec_kernel(*pte))
  463. ret = 0;
  464. if (enable)
  465. pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
  466. else
  467. pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
  468. pte_update_defer(&init_mm, vaddr, pte);
  469. __flush_tlb_all();
  470. out:
  471. return ret;
  472. }
  473. #endif
  474. /*
  475. * paging_init() sets up the page tables - note that the first 8MB are
  476. * already mapped by head.S.
  477. *
  478. * This routines also unmaps the page at virtual kernel address 0, so
  479. * that we can trap those pesky NULL-reference errors in the kernel.
  480. */
  481. void __init paging_init(void)
  482. {
  483. #ifdef CONFIG_X86_PAE
  484. set_nx();
  485. if (nx_enabled)
  486. printk("NX (Execute Disable) protection: active\n");
  487. #endif
  488. pagetable_init();
  489. load_cr3(swapper_pg_dir);
  490. #ifdef CONFIG_X86_PAE
  491. /*
  492. * We will bail out later - printk doesn't work right now so
  493. * the user would just see a hanging kernel.
  494. */
  495. if (cpu_has_pae)
  496. set_in_cr4(X86_CR4_PAE);
  497. #endif
  498. __flush_tlb_all();
  499. kmap_init();
  500. }
  501. /*
  502. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  503. * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
  504. * used to involve black magic jumps to work around some nasty CPU bugs,
  505. * but fortunately the switch to using exceptions got rid of all that.
  506. */
  507. static void __init test_wp_bit(void)
  508. {
  509. printk("Checking if this processor honours the WP bit even in supervisor mode... ");
  510. /* Any page-aligned address will do, the test is non-destructive */
  511. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  512. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  513. clear_fixmap(FIX_WP_TEST);
  514. if (!boot_cpu_data.wp_works_ok) {
  515. printk("No.\n");
  516. #ifdef CONFIG_X86_WP_WORKS_OK
  517. panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  518. #endif
  519. } else {
  520. printk("Ok.\n");
  521. }
  522. }
  523. static struct kcore_list kcore_mem, kcore_vmalloc;
  524. void __init mem_init(void)
  525. {
  526. extern int ppro_with_ram_bug(void);
  527. int codesize, reservedpages, datasize, initsize;
  528. int tmp;
  529. int bad_ppro;
  530. #ifdef CONFIG_FLATMEM
  531. BUG_ON(!mem_map);
  532. #endif
  533. bad_ppro = ppro_with_ram_bug();
  534. #ifdef CONFIG_HIGHMEM
  535. /* check that fixmap and pkmap do not overlap */
  536. if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
  537. printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
  538. printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
  539. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
  540. BUG();
  541. }
  542. #endif
  543. /* this will put all low memory onto the freelists */
  544. totalram_pages += free_all_bootmem();
  545. reservedpages = 0;
  546. for (tmp = 0; tmp < max_low_pfn; tmp++)
  547. /*
  548. * Only count reserved RAM pages
  549. */
  550. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  551. reservedpages++;
  552. set_highmem_pages_init(bad_ppro);
  553. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  554. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  555. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  556. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  557. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  558. VMALLOC_END-VMALLOC_START);
  559. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
  560. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  561. num_physpages << (PAGE_SHIFT-10),
  562. codesize >> 10,
  563. reservedpages << (PAGE_SHIFT-10),
  564. datasize >> 10,
  565. initsize >> 10,
  566. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  567. );
  568. #if 1 /* double-sanity-check paranoia */
  569. printk("virtual kernel memory layout:\n"
  570. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  571. #ifdef CONFIG_HIGHMEM
  572. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  573. #endif
  574. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  575. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  576. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  577. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  578. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  579. FIXADDR_START, FIXADDR_TOP,
  580. (FIXADDR_TOP - FIXADDR_START) >> 10,
  581. #ifdef CONFIG_HIGHMEM
  582. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  583. (LAST_PKMAP*PAGE_SIZE) >> 10,
  584. #endif
  585. VMALLOC_START, VMALLOC_END,
  586. (VMALLOC_END - VMALLOC_START) >> 20,
  587. (unsigned long)__va(0), (unsigned long)high_memory,
  588. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  589. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  590. ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
  591. (unsigned long)&_etext, (unsigned long)&_edata,
  592. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  593. (unsigned long)&_text, (unsigned long)&_etext,
  594. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  595. #ifdef CONFIG_HIGHMEM
  596. BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  597. BUG_ON(VMALLOC_END > PKMAP_BASE);
  598. #endif
  599. BUG_ON(VMALLOC_START > VMALLOC_END);
  600. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  601. #endif /* double-sanity-check paranoia */
  602. #ifdef CONFIG_X86_PAE
  603. if (!cpu_has_pae)
  604. panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
  605. #endif
  606. if (boot_cpu_data.wp_works_ok < 0)
  607. test_wp_bit();
  608. /*
  609. * Subtle. SMP is doing it's boot stuff late (because it has to
  610. * fork idle threads) - but it also needs low mappings for the
  611. * protected-mode entry to work. We zap these entries only after
  612. * the WP-bit has been tested.
  613. */
  614. #ifndef CONFIG_SMP
  615. zap_low_mappings();
  616. #endif
  617. }
  618. #ifdef CONFIG_MEMORY_HOTPLUG
  619. int arch_add_memory(int nid, u64 start, u64 size)
  620. {
  621. struct pglist_data *pgdata = NODE_DATA(nid);
  622. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  623. unsigned long start_pfn = start >> PAGE_SHIFT;
  624. unsigned long nr_pages = size >> PAGE_SHIFT;
  625. return __add_pages(zone, start_pfn, nr_pages);
  626. }
  627. int remove_memory(u64 start, u64 size)
  628. {
  629. return -EINVAL;
  630. }
  631. EXPORT_SYMBOL_GPL(remove_memory);
  632. #endif
  633. struct kmem_cache *pmd_cache;
  634. void __init pgtable_cache_init(void)
  635. {
  636. size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
  637. if (PTRS_PER_PMD > 1) {
  638. pmd_cache = kmem_cache_create("pmd",
  639. PTRS_PER_PMD*sizeof(pmd_t),
  640. PTRS_PER_PMD*sizeof(pmd_t),
  641. SLAB_PANIC,
  642. pmd_ctor,
  643. NULL);
  644. if (!SHARED_KERNEL_PMD) {
  645. /* If we're in PAE mode and have a non-shared
  646. kernel pmd, then the pgd size must be a
  647. page size. This is because the pgd_list
  648. links through the page structure, so there
  649. can only be one pgd per page for this to
  650. work. */
  651. pgd_size = PAGE_SIZE;
  652. }
  653. }
  654. }
  655. /*
  656. * This function cannot be __init, since exceptions don't work in that
  657. * section. Put this after the callers, so that it cannot be inlined.
  658. */
  659. static int noinline do_test_wp_bit(void)
  660. {
  661. char tmp_reg;
  662. int flag;
  663. __asm__ __volatile__(
  664. " movb %0,%1 \n"
  665. "1: movb %1,%0 \n"
  666. " xorl %2,%2 \n"
  667. "2: \n"
  668. ".section __ex_table,\"a\"\n"
  669. " .align 4 \n"
  670. " .long 1b,2b \n"
  671. ".previous \n"
  672. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  673. "=q" (tmp_reg),
  674. "=r" (flag)
  675. :"2" (1)
  676. :"memory");
  677. return flag;
  678. }
  679. #ifdef CONFIG_DEBUG_RODATA
  680. void mark_rodata_ro(void)
  681. {
  682. unsigned long start = PFN_ALIGN(_text);
  683. unsigned long size = PFN_ALIGN(_etext) - start;
  684. #ifdef CONFIG_HOTPLUG_CPU
  685. /* It must still be possible to apply SMP alternatives. */
  686. if (num_possible_cpus() <= 1)
  687. #endif
  688. {
  689. change_page_attr(virt_to_page(start),
  690. size >> PAGE_SHIFT, PAGE_KERNEL_RX);
  691. printk("Write protecting the kernel text: %luk\n", size >> 10);
  692. }
  693. start += size;
  694. size = (unsigned long)__end_rodata - start;
  695. change_page_attr(virt_to_page(start),
  696. size >> PAGE_SHIFT, PAGE_KERNEL_RO);
  697. printk("Write protecting the kernel read-only data: %luk\n",
  698. size >> 10);
  699. /*
  700. * change_page_attr() requires a global_flush_tlb() call after it.
  701. * We do this after the printk so that if something went wrong in the
  702. * change, the printk gets out at least to give a better debug hint
  703. * of who is the culprit.
  704. */
  705. global_flush_tlb();
  706. }
  707. #endif
  708. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  709. {
  710. unsigned long addr;
  711. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  712. ClearPageReserved(virt_to_page(addr));
  713. init_page_count(virt_to_page(addr));
  714. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  715. free_page(addr);
  716. totalram_pages++;
  717. }
  718. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  719. }
  720. void free_initmem(void)
  721. {
  722. free_init_pages("unused kernel memory",
  723. (unsigned long)(&__init_begin),
  724. (unsigned long)(&__init_end));
  725. }
  726. #ifdef CONFIG_BLK_DEV_INITRD
  727. void free_initrd_mem(unsigned long start, unsigned long end)
  728. {
  729. free_init_pages("initrd memory", start, end);
  730. }
  731. #endif