init_32.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/memblock.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/gfp.h>
  33. #include <asm/asm.h>
  34. #include <asm/bios_ebda.h>
  35. #include <asm/processor.h>
  36. #include <asm/system.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/dma.h>
  40. #include <asm/fixmap.h>
  41. #include <asm/e820.h>
  42. #include <asm/apic.h>
  43. #include <asm/bugs.h>
  44. #include <asm/tlb.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/sections.h>
  48. #include <asm/paravirt.h>
  49. #include <asm/setup.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/page_types.h>
  52. #include <asm/init.h>
  53. unsigned long highstart_pfn, highend_pfn;
  54. static noinline int do_test_wp_bit(void);
  55. bool __read_mostly __vmalloc_start_set = false;
  56. static __init void *alloc_low_page(void)
  57. {
  58. unsigned long pfn = e820_table_end++;
  59. void *adr;
  60. if (pfn >= e820_table_top)
  61. panic("alloc_low_page: ran out of memory");
  62. adr = __va(pfn * PAGE_SIZE);
  63. clear_page(adr);
  64. return adr;
  65. }
  66. /*
  67. * Creates a middle page table and puts a pointer to it in the
  68. * given global directory entry. This only returns the gd entry
  69. * in non-PAE compilation mode, since the middle layer is folded.
  70. */
  71. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  72. {
  73. pud_t *pud;
  74. pmd_t *pmd_table;
  75. #ifdef CONFIG_X86_PAE
  76. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  77. if (after_bootmem)
  78. pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
  79. else
  80. pmd_table = (pmd_t *)alloc_low_page();
  81. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  82. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  83. pud = pud_offset(pgd, 0);
  84. BUG_ON(pmd_table != pmd_offset(pud, 0));
  85. return pmd_table;
  86. }
  87. #endif
  88. pud = pud_offset(pgd, 0);
  89. pmd_table = pmd_offset(pud, 0);
  90. return pmd_table;
  91. }
  92. /*
  93. * Create a page table and place a pointer to it in a middle page
  94. * directory entry:
  95. */
  96. static pte_t * __init one_page_table_init(pmd_t *pmd)
  97. {
  98. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  99. pte_t *page_table = NULL;
  100. if (after_bootmem) {
  101. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
  102. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  103. #endif
  104. if (!page_table)
  105. page_table =
  106. (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
  107. } else
  108. page_table = (pte_t *)alloc_low_page();
  109. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  110. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  111. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  112. }
  113. return pte_offset_kernel(pmd, 0);
  114. }
  115. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  116. {
  117. int pgd_idx = pgd_index(vaddr);
  118. int pmd_idx = pmd_index(vaddr);
  119. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  120. }
  121. pte_t * __init populate_extra_pte(unsigned long vaddr)
  122. {
  123. int pte_idx = pte_index(vaddr);
  124. pmd_t *pmd;
  125. pmd = populate_extra_pmd(vaddr);
  126. return one_page_table_init(pmd) + pte_idx;
  127. }
  128. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  129. unsigned long vaddr, pte_t *lastpte)
  130. {
  131. #ifdef CONFIG_HIGHMEM
  132. /*
  133. * Something (early fixmap) may already have put a pte
  134. * page here, which causes the page table allocation
  135. * to become nonlinear. Attempt to fix it, and if it
  136. * is still nonlinear then we have to bug.
  137. */
  138. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  139. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  140. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  141. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  142. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  143. && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
  144. || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
  145. pte_t *newpte;
  146. int i;
  147. BUG_ON(after_bootmem);
  148. newpte = alloc_low_page();
  149. for (i = 0; i < PTRS_PER_PTE; i++)
  150. set_pte(newpte + i, pte[i]);
  151. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  152. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  153. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  154. __flush_tlb_all();
  155. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  156. pte = newpte;
  157. }
  158. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  159. && vaddr > fix_to_virt(FIX_KMAP_END)
  160. && lastpte && lastpte + PTRS_PER_PTE != pte);
  161. #endif
  162. return pte;
  163. }
  164. /*
  165. * This function initializes a certain range of kernel virtual memory
  166. * with new bootmem page tables, everywhere page tables are missing in
  167. * the given range.
  168. *
  169. * NOTE: The pagetables are allocated contiguous on the physical space
  170. * so we can cache the place of the first one and move around without
  171. * checking the pgd every time.
  172. */
  173. static void __init
  174. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  175. {
  176. int pgd_idx, pmd_idx;
  177. unsigned long vaddr;
  178. pgd_t *pgd;
  179. pmd_t *pmd;
  180. pte_t *pte = NULL;
  181. vaddr = start;
  182. pgd_idx = pgd_index(vaddr);
  183. pmd_idx = pmd_index(vaddr);
  184. pgd = pgd_base + pgd_idx;
  185. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  186. pmd = one_md_table_init(pgd);
  187. pmd = pmd + pmd_index(vaddr);
  188. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  189. pmd++, pmd_idx++) {
  190. pte = page_table_kmap_check(one_page_table_init(pmd),
  191. pmd, vaddr, pte);
  192. vaddr += PMD_SIZE;
  193. }
  194. pmd_idx = 0;
  195. }
  196. }
  197. static inline int is_kernel_text(unsigned long addr)
  198. {
  199. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  200. return 1;
  201. return 0;
  202. }
  203. /*
  204. * This maps the physical memory to kernel virtual address space, a total
  205. * of max_low_pfn pages, by creating page tables starting from address
  206. * PAGE_OFFSET:
  207. */
  208. unsigned long __init
  209. kernel_physical_mapping_init(unsigned long start,
  210. unsigned long end,
  211. unsigned long page_size_mask)
  212. {
  213. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  214. unsigned long last_map_addr = end;
  215. unsigned long start_pfn, end_pfn;
  216. pgd_t *pgd_base = swapper_pg_dir;
  217. int pgd_idx, pmd_idx, pte_ofs;
  218. unsigned long pfn;
  219. pgd_t *pgd;
  220. pmd_t *pmd;
  221. pte_t *pte;
  222. unsigned pages_2m, pages_4k;
  223. int mapping_iter;
  224. start_pfn = start >> PAGE_SHIFT;
  225. end_pfn = end >> PAGE_SHIFT;
  226. /*
  227. * First iteration will setup identity mapping using large/small pages
  228. * based on use_pse, with other attributes same as set by
  229. * the early code in head_32.S
  230. *
  231. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  232. * as desired for the kernel identity mapping.
  233. *
  234. * This two pass mechanism conforms to the TLB app note which says:
  235. *
  236. * "Software should not write to a paging-structure entry in a way
  237. * that would change, for any linear address, both the page size
  238. * and either the page frame or attributes."
  239. */
  240. mapping_iter = 1;
  241. if (!cpu_has_pse)
  242. use_pse = 0;
  243. repeat:
  244. pages_2m = pages_4k = 0;
  245. pfn = start_pfn;
  246. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  247. pgd = pgd_base + pgd_idx;
  248. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  249. pmd = one_md_table_init(pgd);
  250. if (pfn >= end_pfn)
  251. continue;
  252. #ifdef CONFIG_X86_PAE
  253. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  254. pmd += pmd_idx;
  255. #else
  256. pmd_idx = 0;
  257. #endif
  258. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  259. pmd++, pmd_idx++) {
  260. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  261. /*
  262. * Map with big pages if possible, otherwise
  263. * create normal page tables:
  264. */
  265. if (use_pse) {
  266. unsigned int addr2;
  267. pgprot_t prot = PAGE_KERNEL_LARGE;
  268. /*
  269. * first pass will use the same initial
  270. * identity mapping attribute + _PAGE_PSE.
  271. */
  272. pgprot_t init_prot =
  273. __pgprot(PTE_IDENT_ATTR |
  274. _PAGE_PSE);
  275. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  276. PAGE_OFFSET + PAGE_SIZE-1;
  277. if (is_kernel_text(addr) ||
  278. is_kernel_text(addr2))
  279. prot = PAGE_KERNEL_LARGE_EXEC;
  280. pages_2m++;
  281. if (mapping_iter == 1)
  282. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  283. else
  284. set_pmd(pmd, pfn_pmd(pfn, prot));
  285. pfn += PTRS_PER_PTE;
  286. continue;
  287. }
  288. pte = one_page_table_init(pmd);
  289. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  290. pte += pte_ofs;
  291. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  292. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  293. pgprot_t prot = PAGE_KERNEL;
  294. /*
  295. * first pass will use the same initial
  296. * identity mapping attribute.
  297. */
  298. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  299. if (is_kernel_text(addr))
  300. prot = PAGE_KERNEL_EXEC;
  301. pages_4k++;
  302. if (mapping_iter == 1) {
  303. set_pte(pte, pfn_pte(pfn, init_prot));
  304. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  305. } else
  306. set_pte(pte, pfn_pte(pfn, prot));
  307. }
  308. }
  309. }
  310. if (mapping_iter == 1) {
  311. /*
  312. * update direct mapping page count only in the first
  313. * iteration.
  314. */
  315. update_page_count(PG_LEVEL_2M, pages_2m);
  316. update_page_count(PG_LEVEL_4K, pages_4k);
  317. /*
  318. * local global flush tlb, which will flush the previous
  319. * mappings present in both small and large page TLB's.
  320. */
  321. __flush_tlb_all();
  322. /*
  323. * Second iteration will set the actual desired PTE attributes.
  324. */
  325. mapping_iter = 2;
  326. goto repeat;
  327. }
  328. return last_map_addr;
  329. }
  330. pte_t *kmap_pte;
  331. pgprot_t kmap_prot;
  332. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  333. {
  334. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  335. vaddr), vaddr), vaddr);
  336. }
  337. static void __init kmap_init(void)
  338. {
  339. unsigned long kmap_vstart;
  340. /*
  341. * Cache the first kmap pte:
  342. */
  343. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  344. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  345. kmap_prot = PAGE_KERNEL;
  346. }
  347. #ifdef CONFIG_HIGHMEM
  348. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  349. {
  350. unsigned long vaddr;
  351. pgd_t *pgd;
  352. pud_t *pud;
  353. pmd_t *pmd;
  354. pte_t *pte;
  355. vaddr = PKMAP_BASE;
  356. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  357. pgd = swapper_pg_dir + pgd_index(vaddr);
  358. pud = pud_offset(pgd, vaddr);
  359. pmd = pmd_offset(pud, vaddr);
  360. pte = pte_offset_kernel(pmd, vaddr);
  361. pkmap_page_table = pte;
  362. }
  363. static void __init add_one_highpage_init(struct page *page)
  364. {
  365. ClearPageReserved(page);
  366. init_page_count(page);
  367. __free_page(page);
  368. totalhigh_pages++;
  369. }
  370. void __init add_highpages_with_active_regions(int nid,
  371. unsigned long start_pfn, unsigned long end_pfn)
  372. {
  373. struct range *range;
  374. int nr_range;
  375. int i;
  376. nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
  377. for (i = 0; i < nr_range; i++) {
  378. struct page *page;
  379. int node_pfn;
  380. for (node_pfn = range[i].start; node_pfn < range[i].end;
  381. node_pfn++) {
  382. if (!pfn_valid(node_pfn))
  383. continue;
  384. page = pfn_to_page(node_pfn);
  385. add_one_highpage_init(page);
  386. }
  387. }
  388. }
  389. #else
  390. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  391. {
  392. }
  393. #endif /* CONFIG_HIGHMEM */
  394. void __init native_pagetable_setup_start(pgd_t *base)
  395. {
  396. unsigned long pfn, va;
  397. pgd_t *pgd;
  398. pud_t *pud;
  399. pmd_t *pmd;
  400. pte_t *pte;
  401. /*
  402. * Remove any mappings which extend past the end of physical
  403. * memory from the boot time page table:
  404. */
  405. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  406. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  407. pgd = base + pgd_index(va);
  408. if (!pgd_present(*pgd))
  409. break;
  410. pud = pud_offset(pgd, va);
  411. pmd = pmd_offset(pud, va);
  412. if (!pmd_present(*pmd))
  413. break;
  414. pte = pte_offset_kernel(pmd, va);
  415. if (!pte_present(*pte))
  416. break;
  417. pte_clear(NULL, va, pte);
  418. }
  419. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  420. }
  421. void __init native_pagetable_setup_done(pgd_t *base)
  422. {
  423. }
  424. /*
  425. * Build a proper pagetable for the kernel mappings. Up until this
  426. * point, we've been running on some set of pagetables constructed by
  427. * the boot process.
  428. *
  429. * If we're booting on native hardware, this will be a pagetable
  430. * constructed in arch/x86/kernel/head_32.S. The root of the
  431. * pagetable will be swapper_pg_dir.
  432. *
  433. * If we're booting paravirtualized under a hypervisor, then there are
  434. * more options: we may already be running PAE, and the pagetable may
  435. * or may not be based in swapper_pg_dir. In any case,
  436. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  437. * appropriately for the rest of the initialization to work.
  438. *
  439. * In general, pagetable_init() assumes that the pagetable may already
  440. * be partially populated, and so it avoids stomping on any existing
  441. * mappings.
  442. */
  443. void __init early_ioremap_page_table_range_init(void)
  444. {
  445. pgd_t *pgd_base = swapper_pg_dir;
  446. unsigned long vaddr, end;
  447. /*
  448. * Fixed mappings, only the page table structure has to be
  449. * created - mappings will be set by set_fixmap():
  450. */
  451. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  452. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  453. page_table_range_init(vaddr, end, pgd_base);
  454. early_ioremap_reset();
  455. }
  456. static void __init pagetable_init(void)
  457. {
  458. pgd_t *pgd_base = swapper_pg_dir;
  459. permanent_kmaps_init(pgd_base);
  460. }
  461. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  462. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  463. /* user-defined highmem size */
  464. static unsigned int highmem_pages = -1;
  465. /*
  466. * highmem=size forces highmem to be exactly 'size' bytes.
  467. * This works even on boxes that have no highmem otherwise.
  468. * This also works to reduce highmem size on bigger boxes.
  469. */
  470. static int __init parse_highmem(char *arg)
  471. {
  472. if (!arg)
  473. return -EINVAL;
  474. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  475. return 0;
  476. }
  477. early_param("highmem", parse_highmem);
  478. #define MSG_HIGHMEM_TOO_BIG \
  479. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  480. #define MSG_LOWMEM_TOO_SMALL \
  481. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  482. /*
  483. * All of RAM fits into lowmem - but if user wants highmem
  484. * artificially via the highmem=x boot parameter then create
  485. * it:
  486. */
  487. void __init lowmem_pfn_init(void)
  488. {
  489. /* max_low_pfn is 0, we already have early_res support */
  490. max_low_pfn = max_pfn;
  491. if (highmem_pages == -1)
  492. highmem_pages = 0;
  493. #ifdef CONFIG_HIGHMEM
  494. if (highmem_pages >= max_pfn) {
  495. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  496. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  497. highmem_pages = 0;
  498. }
  499. if (highmem_pages) {
  500. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  501. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  502. pages_to_mb(highmem_pages));
  503. highmem_pages = 0;
  504. }
  505. max_low_pfn -= highmem_pages;
  506. }
  507. #else
  508. if (highmem_pages)
  509. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  510. #endif
  511. }
  512. #define MSG_HIGHMEM_TOO_SMALL \
  513. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  514. #define MSG_HIGHMEM_TRIMMED \
  515. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  516. /*
  517. * We have more RAM than fits into lowmem - we try to put it into
  518. * highmem, also taking the highmem=x boot parameter into account:
  519. */
  520. void __init highmem_pfn_init(void)
  521. {
  522. max_low_pfn = MAXMEM_PFN;
  523. if (highmem_pages == -1)
  524. highmem_pages = max_pfn - MAXMEM_PFN;
  525. if (highmem_pages + MAXMEM_PFN < max_pfn)
  526. max_pfn = MAXMEM_PFN + highmem_pages;
  527. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  528. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  529. pages_to_mb(max_pfn - MAXMEM_PFN),
  530. pages_to_mb(highmem_pages));
  531. highmem_pages = 0;
  532. }
  533. #ifndef CONFIG_HIGHMEM
  534. /* Maximum memory usable is what is directly addressable */
  535. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  536. if (max_pfn > MAX_NONPAE_PFN)
  537. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  538. else
  539. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  540. max_pfn = MAXMEM_PFN;
  541. #else /* !CONFIG_HIGHMEM */
  542. #ifndef CONFIG_HIGHMEM64G
  543. if (max_pfn > MAX_NONPAE_PFN) {
  544. max_pfn = MAX_NONPAE_PFN;
  545. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  546. }
  547. #endif /* !CONFIG_HIGHMEM64G */
  548. #endif /* !CONFIG_HIGHMEM */
  549. }
  550. /*
  551. * Determine low and high memory ranges:
  552. */
  553. void __init find_low_pfn_range(void)
  554. {
  555. /* it could update max_pfn */
  556. if (max_pfn <= MAXMEM_PFN)
  557. lowmem_pfn_init();
  558. else
  559. highmem_pfn_init();
  560. }
  561. #ifndef CONFIG_NEED_MULTIPLE_NODES
  562. void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
  563. int acpi, int k8)
  564. {
  565. #ifdef CONFIG_HIGHMEM
  566. highstart_pfn = highend_pfn = max_pfn;
  567. if (max_pfn > max_low_pfn)
  568. highstart_pfn = max_low_pfn;
  569. memblock_x86_register_active_regions(0, 0, highend_pfn);
  570. sparse_memory_present_with_active_regions(0);
  571. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  572. pages_to_mb(highend_pfn - highstart_pfn));
  573. num_physpages = highend_pfn;
  574. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  575. #else
  576. memblock_x86_register_active_regions(0, 0, max_low_pfn);
  577. sparse_memory_present_with_active_regions(0);
  578. num_physpages = max_low_pfn;
  579. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  580. #endif
  581. #ifdef CONFIG_FLATMEM
  582. max_mapnr = num_physpages;
  583. #endif
  584. __vmalloc_start_set = true;
  585. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  586. pages_to_mb(max_low_pfn));
  587. setup_bootmem_allocator();
  588. }
  589. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  590. static void __init zone_sizes_init(void)
  591. {
  592. unsigned long max_zone_pfns[MAX_NR_ZONES];
  593. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  594. max_zone_pfns[ZONE_DMA] =
  595. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  596. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  597. #ifdef CONFIG_HIGHMEM
  598. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  599. #endif
  600. free_area_init_nodes(max_zone_pfns);
  601. }
  602. void __init setup_bootmem_allocator(void)
  603. {
  604. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  605. max_pfn_mapped<<PAGE_SHIFT);
  606. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  607. after_bootmem = 1;
  608. }
  609. /*
  610. * paging_init() sets up the page tables - note that the first 8MB are
  611. * already mapped by head.S.
  612. *
  613. * This routines also unmaps the page at virtual kernel address 0, so
  614. * that we can trap those pesky NULL-reference errors in the kernel.
  615. */
  616. void __init paging_init(void)
  617. {
  618. pagetable_init();
  619. __flush_tlb_all();
  620. kmap_init();
  621. /*
  622. * NOTE: at this point the bootmem allocator is fully available.
  623. */
  624. sparse_init();
  625. zone_sizes_init();
  626. }
  627. /*
  628. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  629. * and also on some strange 486's. All 586+'s are OK. This used to involve
  630. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  631. * switch to using exceptions got rid of all that.
  632. */
  633. static void __init test_wp_bit(void)
  634. {
  635. printk(KERN_INFO
  636. "Checking if this processor honours the WP bit even in supervisor mode...");
  637. /* Any page-aligned address will do, the test is non-destructive */
  638. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  639. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  640. clear_fixmap(FIX_WP_TEST);
  641. if (!boot_cpu_data.wp_works_ok) {
  642. printk(KERN_CONT "No.\n");
  643. #ifdef CONFIG_X86_WP_WORKS_OK
  644. panic(
  645. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  646. #endif
  647. } else {
  648. printk(KERN_CONT "Ok.\n");
  649. }
  650. }
  651. void __init mem_init(void)
  652. {
  653. int codesize, reservedpages, datasize, initsize;
  654. int tmp;
  655. pci_iommu_alloc();
  656. #ifdef CONFIG_FLATMEM
  657. BUG_ON(!mem_map);
  658. #endif
  659. /* this will put all low memory onto the freelists */
  660. totalram_pages += free_all_bootmem();
  661. reservedpages = 0;
  662. for (tmp = 0; tmp < max_low_pfn; tmp++)
  663. /*
  664. * Only count reserved RAM pages:
  665. */
  666. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  667. reservedpages++;
  668. set_highmem_pages_init();
  669. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  670. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  671. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  672. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  673. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  674. nr_free_pages() << (PAGE_SHIFT-10),
  675. num_physpages << (PAGE_SHIFT-10),
  676. codesize >> 10,
  677. reservedpages << (PAGE_SHIFT-10),
  678. datasize >> 10,
  679. initsize >> 10,
  680. totalhigh_pages << (PAGE_SHIFT-10));
  681. printk(KERN_INFO "virtual kernel memory layout:\n"
  682. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  683. #ifdef CONFIG_HIGHMEM
  684. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  685. #endif
  686. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  687. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  688. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  689. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  690. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  691. FIXADDR_START, FIXADDR_TOP,
  692. (FIXADDR_TOP - FIXADDR_START) >> 10,
  693. #ifdef CONFIG_HIGHMEM
  694. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  695. (LAST_PKMAP*PAGE_SIZE) >> 10,
  696. #endif
  697. VMALLOC_START, VMALLOC_END,
  698. (VMALLOC_END - VMALLOC_START) >> 20,
  699. (unsigned long)__va(0), (unsigned long)high_memory,
  700. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  701. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  702. ((unsigned long)&__init_end -
  703. (unsigned long)&__init_begin) >> 10,
  704. (unsigned long)&_etext, (unsigned long)&_edata,
  705. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  706. (unsigned long)&_text, (unsigned long)&_etext,
  707. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  708. /*
  709. * Check boundaries twice: Some fundamental inconsistencies can
  710. * be detected at build time already.
  711. */
  712. #define __FIXADDR_TOP (-PAGE_SIZE)
  713. #ifdef CONFIG_HIGHMEM
  714. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  715. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  716. #endif
  717. #define high_memory (-128UL << 20)
  718. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  719. #undef high_memory
  720. #undef __FIXADDR_TOP
  721. #ifdef CONFIG_HIGHMEM
  722. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  723. BUG_ON(VMALLOC_END > PKMAP_BASE);
  724. #endif
  725. BUG_ON(VMALLOC_START >= VMALLOC_END);
  726. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  727. if (boot_cpu_data.wp_works_ok < 0)
  728. test_wp_bit();
  729. }
  730. #ifdef CONFIG_MEMORY_HOTPLUG
  731. int arch_add_memory(int nid, u64 start, u64 size)
  732. {
  733. struct pglist_data *pgdata = NODE_DATA(nid);
  734. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  735. unsigned long start_pfn = start >> PAGE_SHIFT;
  736. unsigned long nr_pages = size >> PAGE_SHIFT;
  737. return __add_pages(nid, zone, start_pfn, nr_pages);
  738. }
  739. #endif
  740. /*
  741. * This function cannot be __init, since exceptions don't work in that
  742. * section. Put this after the callers, so that it cannot be inlined.
  743. */
  744. static noinline int do_test_wp_bit(void)
  745. {
  746. char tmp_reg;
  747. int flag;
  748. __asm__ __volatile__(
  749. " movb %0, %1 \n"
  750. "1: movb %1, %0 \n"
  751. " xorl %2, %2 \n"
  752. "2: \n"
  753. _ASM_EXTABLE(1b,2b)
  754. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  755. "=q" (tmp_reg),
  756. "=r" (flag)
  757. :"2" (1)
  758. :"memory");
  759. return flag;
  760. }
  761. #ifdef CONFIG_DEBUG_RODATA
  762. const int rodata_test_data = 0xC3;
  763. EXPORT_SYMBOL_GPL(rodata_test_data);
  764. int kernel_set_to_readonly __read_mostly;
  765. void set_kernel_text_rw(void)
  766. {
  767. unsigned long start = PFN_ALIGN(_text);
  768. unsigned long size = PFN_ALIGN(_etext) - start;
  769. if (!kernel_set_to_readonly)
  770. return;
  771. pr_debug("Set kernel text: %lx - %lx for read write\n",
  772. start, start+size);
  773. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  774. }
  775. void set_kernel_text_ro(void)
  776. {
  777. unsigned long start = PFN_ALIGN(_text);
  778. unsigned long size = PFN_ALIGN(_etext) - start;
  779. if (!kernel_set_to_readonly)
  780. return;
  781. pr_debug("Set kernel text: %lx - %lx for read only\n",
  782. start, start+size);
  783. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  784. }
  785. void mark_rodata_ro(void)
  786. {
  787. unsigned long start = PFN_ALIGN(_text);
  788. unsigned long size = PFN_ALIGN(_etext) - start;
  789. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  790. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  791. size >> 10);
  792. kernel_set_to_readonly = 1;
  793. #ifdef CONFIG_CPA_DEBUG
  794. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  795. start, start+size);
  796. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  797. printk(KERN_INFO "Testing CPA: write protecting again\n");
  798. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  799. #endif
  800. start += size;
  801. size = (unsigned long)__end_rodata - start;
  802. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  803. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  804. size >> 10);
  805. rodata_test();
  806. #ifdef CONFIG_CPA_DEBUG
  807. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  808. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  809. printk(KERN_INFO "Testing CPA: write protecting again\n");
  810. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  811. #endif
  812. }
  813. #endif