init_32.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pfn.h>
  24. #include <linux/poison.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/slab.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <asm/asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/bugs.h>
  41. #include <asm/tlb.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/pgalloc.h>
  44. #include <asm/sections.h>
  45. #include <asm/paravirt.h>
  46. #include <asm/setup.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/smp.h>
  49. unsigned int __VMALLOC_RESERVE = 128 << 20;
  50. unsigned long max_low_pfn_mapped;
  51. unsigned long max_pfn_mapped;
  52. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  53. unsigned long highstart_pfn, highend_pfn;
  54. static noinline int do_test_wp_bit(void);
  55. static unsigned long __initdata table_start;
  56. static unsigned long __meminitdata table_end;
  57. static unsigned long __meminitdata table_top;
  58. static int __initdata after_init_bootmem;
  59. static __init void *alloc_low_page(unsigned long *phys)
  60. {
  61. unsigned long pfn = table_end++;
  62. void *adr;
  63. if (pfn >= table_top)
  64. panic("alloc_low_page: ran out of memory");
  65. adr = __va(pfn * PAGE_SIZE);
  66. memset(adr, 0, PAGE_SIZE);
  67. *phys = pfn * PAGE_SIZE;
  68. return adr;
  69. }
  70. /*
  71. * Creates a middle page table and puts a pointer to it in the
  72. * given global directory entry. This only returns the gd entry
  73. * in non-PAE compilation mode, since the middle layer is folded.
  74. */
  75. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  76. {
  77. pud_t *pud;
  78. pmd_t *pmd_table;
  79. #ifdef CONFIG_X86_PAE
  80. unsigned long phys;
  81. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  82. if (after_init_bootmem)
  83. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  84. else
  85. pmd_table = (pmd_t *)alloc_low_page(&phys);
  86. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  87. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  88. pud = pud_offset(pgd, 0);
  89. BUG_ON(pmd_table != pmd_offset(pud, 0));
  90. }
  91. #endif
  92. pud = pud_offset(pgd, 0);
  93. pmd_table = pmd_offset(pud, 0);
  94. return pmd_table;
  95. }
  96. /*
  97. * Create a page table and place a pointer to it in a middle page
  98. * directory entry:
  99. */
  100. static pte_t * __init one_page_table_init(pmd_t *pmd)
  101. {
  102. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  103. pte_t *page_table = NULL;
  104. if (after_init_bootmem) {
  105. #ifdef CONFIG_DEBUG_PAGEALLOC
  106. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  107. #endif
  108. if (!page_table)
  109. page_table =
  110. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  111. } else {
  112. unsigned long phys;
  113. page_table = (pte_t *)alloc_low_page(&phys);
  114. }
  115. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  116. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  117. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  118. }
  119. return pte_offset_kernel(pmd, 0);
  120. }
  121. /*
  122. * This function initializes a certain range of kernel virtual memory
  123. * with new bootmem page tables, everywhere page tables are missing in
  124. * the given range.
  125. *
  126. * NOTE: The pagetables are allocated contiguous on the physical space
  127. * so we can cache the place of the first one and move around without
  128. * checking the pgd every time.
  129. */
  130. static void __init
  131. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  132. {
  133. int pgd_idx, pmd_idx;
  134. unsigned long vaddr;
  135. pgd_t *pgd;
  136. pmd_t *pmd;
  137. vaddr = start;
  138. pgd_idx = pgd_index(vaddr);
  139. pmd_idx = pmd_index(vaddr);
  140. pgd = pgd_base + pgd_idx;
  141. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  142. pmd = one_md_table_init(pgd);
  143. pmd = pmd + pmd_index(vaddr);
  144. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  145. pmd++, pmd_idx++) {
  146. one_page_table_init(pmd);
  147. vaddr += PMD_SIZE;
  148. }
  149. pmd_idx = 0;
  150. }
  151. }
  152. static inline int is_kernel_text(unsigned long addr)
  153. {
  154. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  155. return 1;
  156. return 0;
  157. }
  158. /*
  159. * This maps the physical memory to kernel virtual address space, a total
  160. * of max_low_pfn pages, by creating page tables starting from address
  161. * PAGE_OFFSET:
  162. */
  163. static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
  164. unsigned long start_pfn,
  165. unsigned long end_pfn,
  166. int use_pse)
  167. {
  168. int pgd_idx, pmd_idx, pte_ofs;
  169. unsigned long pfn;
  170. pgd_t *pgd;
  171. pmd_t *pmd;
  172. pte_t *pte;
  173. unsigned pages_2m = 0, pages_4k = 0;
  174. if (!cpu_has_pse)
  175. use_pse = 0;
  176. pfn = start_pfn;
  177. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  178. pgd = pgd_base + pgd_idx;
  179. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  180. pmd = one_md_table_init(pgd);
  181. if (pfn >= end_pfn)
  182. continue;
  183. #ifdef CONFIG_X86_PAE
  184. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  185. pmd += pmd_idx;
  186. #else
  187. pmd_idx = 0;
  188. #endif
  189. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  190. pmd++, pmd_idx++) {
  191. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  192. /*
  193. * Map with big pages if possible, otherwise
  194. * create normal page tables:
  195. */
  196. if (use_pse) {
  197. unsigned int addr2;
  198. pgprot_t prot = PAGE_KERNEL_LARGE;
  199. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  200. PAGE_OFFSET + PAGE_SIZE-1;
  201. if (is_kernel_text(addr) ||
  202. is_kernel_text(addr2))
  203. prot = PAGE_KERNEL_LARGE_EXEC;
  204. pages_2m++;
  205. set_pmd(pmd, pfn_pmd(pfn, prot));
  206. pfn += PTRS_PER_PTE;
  207. continue;
  208. }
  209. pte = one_page_table_init(pmd);
  210. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  211. pte += pte_ofs;
  212. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  213. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  214. pgprot_t prot = PAGE_KERNEL;
  215. if (is_kernel_text(addr))
  216. prot = PAGE_KERNEL_EXEC;
  217. pages_4k++;
  218. set_pte(pte, pfn_pte(pfn, prot));
  219. }
  220. }
  221. }
  222. update_page_count(PG_LEVEL_2M, pages_2m);
  223. update_page_count(PG_LEVEL_4K, pages_4k);
  224. }
  225. /*
  226. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  227. * is valid. The argument is a physical page number.
  228. *
  229. *
  230. * On x86, access has to be given to the first megabyte of ram because that area
  231. * contains bios code and data regions used by X and dosemu and similar apps.
  232. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  233. * mmio resources as well as potential bios/acpi data regions.
  234. */
  235. int devmem_is_allowed(unsigned long pagenr)
  236. {
  237. if (pagenr <= 256)
  238. return 1;
  239. if (!page_is_ram(pagenr))
  240. return 1;
  241. return 0;
  242. }
  243. #ifdef CONFIG_HIGHMEM
  244. pte_t *kmap_pte;
  245. pgprot_t kmap_prot;
  246. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  247. {
  248. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  249. vaddr), vaddr), vaddr);
  250. }
  251. static void __init kmap_init(void)
  252. {
  253. unsigned long kmap_vstart;
  254. /*
  255. * Cache the first kmap pte:
  256. */
  257. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  258. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  259. kmap_prot = PAGE_KERNEL;
  260. }
  261. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  262. {
  263. unsigned long vaddr;
  264. pgd_t *pgd;
  265. pud_t *pud;
  266. pmd_t *pmd;
  267. pte_t *pte;
  268. vaddr = PKMAP_BASE;
  269. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  270. pgd = swapper_pg_dir + pgd_index(vaddr);
  271. pud = pud_offset(pgd, vaddr);
  272. pmd = pmd_offset(pud, vaddr);
  273. pte = pte_offset_kernel(pmd, vaddr);
  274. pkmap_page_table = pte;
  275. }
  276. static void __init add_one_highpage_init(struct page *page, int pfn)
  277. {
  278. ClearPageReserved(page);
  279. init_page_count(page);
  280. __free_page(page);
  281. totalhigh_pages++;
  282. }
  283. struct add_highpages_data {
  284. unsigned long start_pfn;
  285. unsigned long end_pfn;
  286. };
  287. static int __init add_highpages_work_fn(unsigned long start_pfn,
  288. unsigned long end_pfn, void *datax)
  289. {
  290. int node_pfn;
  291. struct page *page;
  292. unsigned long final_start_pfn, final_end_pfn;
  293. struct add_highpages_data *data;
  294. data = (struct add_highpages_data *)datax;
  295. final_start_pfn = max(start_pfn, data->start_pfn);
  296. final_end_pfn = min(end_pfn, data->end_pfn);
  297. if (final_start_pfn >= final_end_pfn)
  298. return 0;
  299. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  300. node_pfn++) {
  301. if (!pfn_valid(node_pfn))
  302. continue;
  303. page = pfn_to_page(node_pfn);
  304. add_one_highpage_init(page, node_pfn);
  305. }
  306. return 0;
  307. }
  308. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  309. unsigned long end_pfn)
  310. {
  311. struct add_highpages_data data;
  312. data.start_pfn = start_pfn;
  313. data.end_pfn = end_pfn;
  314. work_with_active_regions(nid, add_highpages_work_fn, &data);
  315. }
  316. #ifndef CONFIG_NUMA
  317. static void __init set_highmem_pages_init(void)
  318. {
  319. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  320. totalram_pages += totalhigh_pages;
  321. }
  322. #endif /* !CONFIG_NUMA */
  323. #else
  324. # define kmap_init() do { } while (0)
  325. # define permanent_kmaps_init(pgd_base) do { } while (0)
  326. # define set_highmem_pages_init() do { } while (0)
  327. #endif /* CONFIG_HIGHMEM */
  328. void __init native_pagetable_setup_start(pgd_t *base)
  329. {
  330. unsigned long pfn, va;
  331. pgd_t *pgd;
  332. pud_t *pud;
  333. pmd_t *pmd;
  334. pte_t *pte;
  335. /*
  336. * Remove any mappings which extend past the end of physical
  337. * memory from the boot time page table:
  338. */
  339. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  340. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  341. pgd = base + pgd_index(va);
  342. if (!pgd_present(*pgd))
  343. break;
  344. pud = pud_offset(pgd, va);
  345. pmd = pmd_offset(pud, va);
  346. if (!pmd_present(*pmd))
  347. break;
  348. pte = pte_offset_kernel(pmd, va);
  349. if (!pte_present(*pte))
  350. break;
  351. pte_clear(NULL, va, pte);
  352. }
  353. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  354. }
  355. void __init native_pagetable_setup_done(pgd_t *base)
  356. {
  357. }
  358. /*
  359. * Build a proper pagetable for the kernel mappings. Up until this
  360. * point, we've been running on some set of pagetables constructed by
  361. * the boot process.
  362. *
  363. * If we're booting on native hardware, this will be a pagetable
  364. * constructed in arch/x86/kernel/head_32.S. The root of the
  365. * pagetable will be swapper_pg_dir.
  366. *
  367. * If we're booting paravirtualized under a hypervisor, then there are
  368. * more options: we may already be running PAE, and the pagetable may
  369. * or may not be based in swapper_pg_dir. In any case,
  370. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  371. * appropriately for the rest of the initialization to work.
  372. *
  373. * In general, pagetable_init() assumes that the pagetable may already
  374. * be partially populated, and so it avoids stomping on any existing
  375. * mappings.
  376. */
  377. static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
  378. {
  379. unsigned long vaddr, end;
  380. /*
  381. * Fixed mappings, only the page table structure has to be
  382. * created - mappings will be set by set_fixmap():
  383. */
  384. early_ioremap_clear();
  385. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  386. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  387. page_table_range_init(vaddr, end, pgd_base);
  388. early_ioremap_reset();
  389. }
  390. static void __init pagetable_init(void)
  391. {
  392. pgd_t *pgd_base = swapper_pg_dir;
  393. permanent_kmaps_init(pgd_base);
  394. }
  395. #ifdef CONFIG_ACPI_SLEEP
  396. /*
  397. * ACPI suspend needs this for resume, because things like the intel-agp
  398. * driver might have split up a kernel 4MB mapping.
  399. */
  400. char swsusp_pg_dir[PAGE_SIZE]
  401. __attribute__ ((aligned(PAGE_SIZE)));
  402. static inline void save_pg_dir(void)
  403. {
  404. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  405. }
  406. #else /* !CONFIG_ACPI_SLEEP */
  407. static inline void save_pg_dir(void)
  408. {
  409. }
  410. #endif /* !CONFIG_ACPI_SLEEP */
  411. void zap_low_mappings(void)
  412. {
  413. int i;
  414. /*
  415. * Zap initial low-memory mappings.
  416. *
  417. * Note that "pgd_clear()" doesn't do it for
  418. * us, because pgd_clear() is a no-op on i386.
  419. */
  420. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  421. #ifdef CONFIG_X86_PAE
  422. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  423. #else
  424. set_pgd(swapper_pg_dir+i, __pgd(0));
  425. #endif
  426. }
  427. flush_tlb_all();
  428. }
  429. int nx_enabled;
  430. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
  431. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  432. #ifdef CONFIG_X86_PAE
  433. static int disable_nx __initdata;
  434. /*
  435. * noexec = on|off
  436. *
  437. * Control non executable mappings.
  438. *
  439. * on Enable
  440. * off Disable
  441. */
  442. static int __init noexec_setup(char *str)
  443. {
  444. if (!str || !strcmp(str, "on")) {
  445. if (cpu_has_nx) {
  446. __supported_pte_mask |= _PAGE_NX;
  447. disable_nx = 0;
  448. }
  449. } else {
  450. if (!strcmp(str, "off")) {
  451. disable_nx = 1;
  452. __supported_pte_mask &= ~_PAGE_NX;
  453. } else {
  454. return -EINVAL;
  455. }
  456. }
  457. return 0;
  458. }
  459. early_param("noexec", noexec_setup);
  460. static void __init set_nx(void)
  461. {
  462. unsigned int v[4], l, h;
  463. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  464. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  465. if ((v[3] & (1 << 20)) && !disable_nx) {
  466. rdmsr(MSR_EFER, l, h);
  467. l |= EFER_NX;
  468. wrmsr(MSR_EFER, l, h);
  469. nx_enabled = 1;
  470. __supported_pte_mask |= _PAGE_NX;
  471. }
  472. }
  473. }
  474. #endif
  475. /* user-defined highmem size */
  476. static unsigned int highmem_pages = -1;
  477. /*
  478. * highmem=size forces highmem to be exactly 'size' bytes.
  479. * This works even on boxes that have no highmem otherwise.
  480. * This also works to reduce highmem size on bigger boxes.
  481. */
  482. static int __init parse_highmem(char *arg)
  483. {
  484. if (!arg)
  485. return -EINVAL;
  486. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  487. return 0;
  488. }
  489. early_param("highmem", parse_highmem);
  490. /*
  491. * Determine low and high memory ranges:
  492. */
  493. void __init find_low_pfn_range(void)
  494. {
  495. /* it could update max_pfn */
  496. /* max_low_pfn is 0, we already have early_res support */
  497. max_low_pfn = max_pfn;
  498. if (max_low_pfn > MAXMEM_PFN) {
  499. if (highmem_pages == -1)
  500. highmem_pages = max_pfn - MAXMEM_PFN;
  501. if (highmem_pages + MAXMEM_PFN < max_pfn)
  502. max_pfn = MAXMEM_PFN + highmem_pages;
  503. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  504. printk(KERN_WARNING "only %luMB highmem pages "
  505. "available, ignoring highmem size of %uMB.\n",
  506. pages_to_mb(max_pfn - MAXMEM_PFN),
  507. pages_to_mb(highmem_pages));
  508. highmem_pages = 0;
  509. }
  510. max_low_pfn = MAXMEM_PFN;
  511. #ifndef CONFIG_HIGHMEM
  512. /* Maximum memory usable is what is directly addressable */
  513. printk(KERN_WARNING "Warning only %ldMB will be used.\n",
  514. MAXMEM>>20);
  515. if (max_pfn > MAX_NONPAE_PFN)
  516. printk(KERN_WARNING
  517. "Use a HIGHMEM64G enabled kernel.\n");
  518. else
  519. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  520. max_pfn = MAXMEM_PFN;
  521. #else /* !CONFIG_HIGHMEM */
  522. #ifndef CONFIG_HIGHMEM64G
  523. if (max_pfn > MAX_NONPAE_PFN) {
  524. max_pfn = MAX_NONPAE_PFN;
  525. printk(KERN_WARNING "Warning only 4GB will be used."
  526. "Use a HIGHMEM64G enabled kernel.\n");
  527. }
  528. #endif /* !CONFIG_HIGHMEM64G */
  529. #endif /* !CONFIG_HIGHMEM */
  530. } else {
  531. if (highmem_pages == -1)
  532. highmem_pages = 0;
  533. #ifdef CONFIG_HIGHMEM
  534. if (highmem_pages >= max_pfn) {
  535. printk(KERN_ERR "highmem size specified (%uMB) is "
  536. "bigger than pages available (%luMB)!.\n",
  537. pages_to_mb(highmem_pages),
  538. pages_to_mb(max_pfn));
  539. highmem_pages = 0;
  540. }
  541. if (highmem_pages) {
  542. if (max_low_pfn - highmem_pages <
  543. 64*1024*1024/PAGE_SIZE){
  544. printk(KERN_ERR "highmem size %uMB results in "
  545. "smaller than 64MB lowmem, ignoring it.\n"
  546. , pages_to_mb(highmem_pages));
  547. highmem_pages = 0;
  548. }
  549. max_low_pfn -= highmem_pages;
  550. }
  551. #else
  552. if (highmem_pages)
  553. printk(KERN_ERR "ignoring highmem size on non-highmem"
  554. " kernel!\n");
  555. #endif
  556. }
  557. }
  558. #ifndef CONFIG_NEED_MULTIPLE_NODES
  559. void __init initmem_init(unsigned long start_pfn,
  560. unsigned long end_pfn)
  561. {
  562. #ifdef CONFIG_HIGHMEM
  563. highstart_pfn = highend_pfn = max_pfn;
  564. if (max_pfn > max_low_pfn)
  565. highstart_pfn = max_low_pfn;
  566. memory_present(0, 0, highend_pfn);
  567. e820_register_active_regions(0, 0, highend_pfn);
  568. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  569. pages_to_mb(highend_pfn - highstart_pfn));
  570. num_physpages = highend_pfn;
  571. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  572. #else
  573. memory_present(0, 0, max_low_pfn);
  574. e820_register_active_regions(0, 0, max_low_pfn);
  575. num_physpages = max_low_pfn;
  576. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  577. #endif
  578. #ifdef CONFIG_FLATMEM
  579. max_mapnr = num_physpages;
  580. #endif
  581. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  582. pages_to_mb(max_low_pfn));
  583. setup_bootmem_allocator();
  584. }
  585. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  586. static void __init zone_sizes_init(void)
  587. {
  588. unsigned long max_zone_pfns[MAX_NR_ZONES];
  589. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  590. max_zone_pfns[ZONE_DMA] =
  591. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  592. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  593. #ifdef CONFIG_HIGHMEM
  594. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  595. #endif
  596. free_area_init_nodes(max_zone_pfns);
  597. }
  598. void __init setup_bootmem_allocator(void)
  599. {
  600. int i;
  601. unsigned long bootmap_size, bootmap;
  602. /*
  603. * Initialize the boot-time allocator (with low memory only):
  604. */
  605. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  606. bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  607. max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  608. PAGE_SIZE);
  609. if (bootmap == -1L)
  610. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  611. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  612. /* don't touch min_low_pfn */
  613. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  614. min_low_pfn, max_low_pfn);
  615. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  616. max_pfn_mapped<<PAGE_SHIFT);
  617. printk(KERN_INFO " low ram: %08lx - %08lx\n",
  618. min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
  619. printk(KERN_INFO " bootmap %08lx - %08lx\n",
  620. bootmap, bootmap + bootmap_size);
  621. for_each_online_node(i)
  622. free_bootmem_with_active_regions(i, max_low_pfn);
  623. early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
  624. after_init_bootmem = 1;
  625. }
  626. static void __init find_early_table_space(unsigned long end)
  627. {
  628. unsigned long puds, pmds, ptes, tables, start;
  629. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  630. tables = PAGE_ALIGN(puds * sizeof(pud_t));
  631. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  632. tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
  633. if (cpu_has_pse) {
  634. unsigned long extra;
  635. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  636. extra += PMD_SIZE;
  637. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  638. } else
  639. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  640. tables += PAGE_ALIGN(ptes * sizeof(pte_t));
  641. /* for fixmap */
  642. tables += PAGE_SIZE * 2;
  643. /*
  644. * RED-PEN putting page tables only on node 0 could
  645. * cause a hotspot and fill up ZONE_DMA. The page tables
  646. * need roughly 0.5KB per GB.
  647. */
  648. start = 0x7000;
  649. table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  650. tables, PAGE_SIZE);
  651. if (table_start == -1UL)
  652. panic("Cannot find space for the kernel page tables");
  653. table_start >>= PAGE_SHIFT;
  654. table_end = table_start;
  655. table_top = table_start + (tables>>PAGE_SHIFT);
  656. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  657. end, table_start << PAGE_SHIFT,
  658. (table_start << PAGE_SHIFT) + tables);
  659. }
  660. unsigned long __init_refok init_memory_mapping(unsigned long start,
  661. unsigned long end)
  662. {
  663. pgd_t *pgd_base = swapper_pg_dir;
  664. unsigned long start_pfn, end_pfn;
  665. unsigned long big_page_start;
  666. /*
  667. * Find space for the kernel direct mapping tables.
  668. */
  669. if (!after_init_bootmem)
  670. find_early_table_space(end);
  671. #ifdef CONFIG_X86_PAE
  672. set_nx();
  673. if (nx_enabled)
  674. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  675. #endif
  676. /* Enable PSE if available */
  677. if (cpu_has_pse)
  678. set_in_cr4(X86_CR4_PSE);
  679. /* Enable PGE if available */
  680. if (cpu_has_pge) {
  681. set_in_cr4(X86_CR4_PGE);
  682. __supported_pte_mask |= _PAGE_GLOBAL;
  683. }
  684. /*
  685. * Don't use a large page for the first 2/4MB of memory
  686. * because there are often fixed size MTRRs in there
  687. * and overlapping MTRRs into large pages can cause
  688. * slowdowns.
  689. */
  690. big_page_start = PMD_SIZE;
  691. if (start < big_page_start) {
  692. start_pfn = start >> PAGE_SHIFT;
  693. end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
  694. } else {
  695. /* head is not big page alignment ? */
  696. start_pfn = start >> PAGE_SHIFT;
  697. end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  698. << (PMD_SHIFT - PAGE_SHIFT);
  699. }
  700. if (start_pfn < end_pfn)
  701. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
  702. /* big page range */
  703. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  704. << (PMD_SHIFT - PAGE_SHIFT);
  705. if (start_pfn < (big_page_start >> PAGE_SHIFT))
  706. start_pfn = big_page_start >> PAGE_SHIFT;
  707. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  708. if (start_pfn < end_pfn)
  709. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
  710. cpu_has_pse);
  711. /* tail is not big page alignment ? */
  712. start_pfn = end_pfn;
  713. if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
  714. end_pfn = end >> PAGE_SHIFT;
  715. if (start_pfn < end_pfn)
  716. kernel_physical_mapping_init(pgd_base, start_pfn,
  717. end_pfn, 0);
  718. }
  719. early_ioremap_page_table_range_init(pgd_base);
  720. load_cr3(swapper_pg_dir);
  721. __flush_tlb_all();
  722. if (!after_init_bootmem)
  723. reserve_early(table_start << PAGE_SHIFT,
  724. table_end << PAGE_SHIFT, "PGTABLE");
  725. if (!after_init_bootmem)
  726. early_memtest(start, end);
  727. return end >> PAGE_SHIFT;
  728. }
  729. /*
  730. * paging_init() sets up the page tables - note that the first 8MB are
  731. * already mapped by head.S.
  732. *
  733. * This routines also unmaps the page at virtual kernel address 0, so
  734. * that we can trap those pesky NULL-reference errors in the kernel.
  735. */
  736. void __init paging_init(void)
  737. {
  738. pagetable_init();
  739. __flush_tlb_all();
  740. kmap_init();
  741. /*
  742. * NOTE: at this point the bootmem allocator is fully available.
  743. */
  744. sparse_init();
  745. zone_sizes_init();
  746. }
  747. /*
  748. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  749. * and also on some strange 486's. All 586+'s are OK. This used to involve
  750. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  751. * switch to using exceptions got rid of all that.
  752. */
  753. static void __init test_wp_bit(void)
  754. {
  755. printk(KERN_INFO
  756. "Checking if this processor honours the WP bit even in supervisor mode...");
  757. /* Any page-aligned address will do, the test is non-destructive */
  758. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  759. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  760. clear_fixmap(FIX_WP_TEST);
  761. if (!boot_cpu_data.wp_works_ok) {
  762. printk(KERN_CONT "No.\n");
  763. #ifdef CONFIG_X86_WP_WORKS_OK
  764. panic(
  765. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  766. #endif
  767. } else {
  768. printk(KERN_CONT "Ok.\n");
  769. }
  770. }
  771. static struct kcore_list kcore_mem, kcore_vmalloc;
  772. void __init mem_init(void)
  773. {
  774. int codesize, reservedpages, datasize, initsize;
  775. int tmp;
  776. #ifdef CONFIG_FLATMEM
  777. BUG_ON(!mem_map);
  778. #endif
  779. /* this will put all low memory onto the freelists */
  780. totalram_pages += free_all_bootmem();
  781. reservedpages = 0;
  782. for (tmp = 0; tmp < max_low_pfn; tmp++)
  783. /*
  784. * Only count reserved RAM pages:
  785. */
  786. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  787. reservedpages++;
  788. set_highmem_pages_init();
  789. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  790. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  791. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  792. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  793. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  794. VMALLOC_END-VMALLOC_START);
  795. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  796. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  797. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  798. num_physpages << (PAGE_SHIFT-10),
  799. codesize >> 10,
  800. reservedpages << (PAGE_SHIFT-10),
  801. datasize >> 10,
  802. initsize >> 10,
  803. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  804. );
  805. printk(KERN_INFO "virtual kernel memory layout:\n"
  806. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  807. #ifdef CONFIG_HIGHMEM
  808. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  809. #endif
  810. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  811. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  812. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  813. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  814. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  815. FIXADDR_START, FIXADDR_TOP,
  816. (FIXADDR_TOP - FIXADDR_START) >> 10,
  817. #ifdef CONFIG_HIGHMEM
  818. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  819. (LAST_PKMAP*PAGE_SIZE) >> 10,
  820. #endif
  821. VMALLOC_START, VMALLOC_END,
  822. (VMALLOC_END - VMALLOC_START) >> 20,
  823. (unsigned long)__va(0), (unsigned long)high_memory,
  824. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  825. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  826. ((unsigned long)&__init_end -
  827. (unsigned long)&__init_begin) >> 10,
  828. (unsigned long)&_etext, (unsigned long)&_edata,
  829. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  830. (unsigned long)&_text, (unsigned long)&_etext,
  831. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  832. #ifdef CONFIG_HIGHMEM
  833. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  834. BUG_ON(VMALLOC_END > PKMAP_BASE);
  835. #endif
  836. BUG_ON(VMALLOC_START > VMALLOC_END);
  837. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  838. if (boot_cpu_data.wp_works_ok < 0)
  839. test_wp_bit();
  840. cpa_init();
  841. save_pg_dir();
  842. zap_low_mappings();
  843. }
  844. #ifdef CONFIG_MEMORY_HOTPLUG
  845. int arch_add_memory(int nid, u64 start, u64 size)
  846. {
  847. struct pglist_data *pgdata = NODE_DATA(nid);
  848. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  849. unsigned long start_pfn = start >> PAGE_SHIFT;
  850. unsigned long nr_pages = size >> PAGE_SHIFT;
  851. return __add_pages(zone, start_pfn, nr_pages);
  852. }
  853. #endif
  854. /*
  855. * This function cannot be __init, since exceptions don't work in that
  856. * section. Put this after the callers, so that it cannot be inlined.
  857. */
  858. static noinline int do_test_wp_bit(void)
  859. {
  860. char tmp_reg;
  861. int flag;
  862. __asm__ __volatile__(
  863. " movb %0, %1 \n"
  864. "1: movb %1, %0 \n"
  865. " xorl %2, %2 \n"
  866. "2: \n"
  867. _ASM_EXTABLE(1b,2b)
  868. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  869. "=q" (tmp_reg),
  870. "=r" (flag)
  871. :"2" (1)
  872. :"memory");
  873. return flag;
  874. }
  875. #ifdef CONFIG_DEBUG_RODATA
  876. const int rodata_test_data = 0xC3;
  877. EXPORT_SYMBOL_GPL(rodata_test_data);
  878. void mark_rodata_ro(void)
  879. {
  880. unsigned long start = PFN_ALIGN(_text);
  881. unsigned long size = PFN_ALIGN(_etext) - start;
  882. #ifndef CONFIG_DYNAMIC_FTRACE
  883. /* Dynamic tracing modifies the kernel text section */
  884. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  885. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  886. size >> 10);
  887. #ifdef CONFIG_CPA_DEBUG
  888. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  889. start, start+size);
  890. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  891. printk(KERN_INFO "Testing CPA: write protecting again\n");
  892. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  893. #endif
  894. #endif /* CONFIG_DYNAMIC_FTRACE */
  895. start += size;
  896. size = (unsigned long)__end_rodata - start;
  897. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  898. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  899. size >> 10);
  900. rodata_test();
  901. #ifdef CONFIG_CPA_DEBUG
  902. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  903. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  904. printk(KERN_INFO "Testing CPA: write protecting again\n");
  905. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  906. #endif
  907. }
  908. #endif
  909. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  910. {
  911. #ifdef CONFIG_DEBUG_PAGEALLOC
  912. /*
  913. * If debugging page accesses then do not free this memory but
  914. * mark them not present - any buggy init-section access will
  915. * create a kernel page fault:
  916. */
  917. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  918. begin, PAGE_ALIGN(end));
  919. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  920. #else
  921. unsigned long addr;
  922. /*
  923. * We just marked the kernel text read only above, now that
  924. * we are going to free part of that, we need to make that
  925. * writeable first.
  926. */
  927. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  928. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  929. ClearPageReserved(virt_to_page(addr));
  930. init_page_count(virt_to_page(addr));
  931. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  932. free_page(addr);
  933. totalram_pages++;
  934. }
  935. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  936. #endif
  937. }
  938. void free_initmem(void)
  939. {
  940. free_init_pages("unused kernel memory",
  941. (unsigned long)(&__init_begin),
  942. (unsigned long)(&__init_end));
  943. }
  944. #ifdef CONFIG_BLK_DEV_INITRD
  945. void free_initrd_mem(unsigned long start, unsigned long end)
  946. {
  947. free_init_pages("initrd memory", start, end);
  948. }
  949. #endif
  950. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  951. int flags)
  952. {
  953. return reserve_bootmem(phys, len, flags);
  954. }