init_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/memblock.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/gfp.h>
  33. #include <asm/asm.h>
  34. #include <asm/bios_ebda.h>
  35. #include <asm/processor.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/olpc_ofw.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/sections.h>
  48. #include <asm/paravirt.h>
  49. #include <asm/setup.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/page_types.h>
  52. #include <asm/init.h>
  53. unsigned long highstart_pfn, highend_pfn;
  54. static noinline int do_test_wp_bit(void);
  55. bool __read_mostly __vmalloc_start_set = false;
  56. static __init void *alloc_low_page(void)
  57. {
  58. unsigned long pfn;
  59. void *adr;
  60. if ((pgt_buf_end + 1) >= pgt_buf_top) {
  61. unsigned long ret;
  62. if (min_pfn_mapped >= max_pfn_mapped)
  63. panic("alloc_low_page: ran out of memory");
  64. ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
  65. max_pfn_mapped << PAGE_SHIFT,
  66. PAGE_SIZE, PAGE_SIZE);
  67. if (!ret)
  68. panic("alloc_low_page: can not alloc memory");
  69. memblock_reserve(ret, PAGE_SIZE);
  70. pfn = ret >> PAGE_SHIFT;
  71. } else
  72. pfn = pgt_buf_end++;
  73. adr = __va(pfn * PAGE_SIZE);
  74. clear_page(adr);
  75. return adr;
  76. }
  77. /*
  78. * Creates a middle page table and puts a pointer to it in the
  79. * given global directory entry. This only returns the gd entry
  80. * in non-PAE compilation mode, since the middle layer is folded.
  81. */
  82. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  83. {
  84. pud_t *pud;
  85. pmd_t *pmd_table;
  86. #ifdef CONFIG_X86_PAE
  87. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  88. if (after_bootmem)
  89. pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
  90. else
  91. pmd_table = (pmd_t *)alloc_low_page();
  92. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  93. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  94. pud = pud_offset(pgd, 0);
  95. BUG_ON(pmd_table != pmd_offset(pud, 0));
  96. return pmd_table;
  97. }
  98. #endif
  99. pud = pud_offset(pgd, 0);
  100. pmd_table = pmd_offset(pud, 0);
  101. return pmd_table;
  102. }
  103. /*
  104. * Create a page table and place a pointer to it in a middle page
  105. * directory entry:
  106. */
  107. static pte_t * __init one_page_table_init(pmd_t *pmd)
  108. {
  109. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  110. pte_t *page_table = NULL;
  111. if (after_bootmem) {
  112. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
  113. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  114. #endif
  115. if (!page_table)
  116. page_table =
  117. (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
  118. } else
  119. page_table = (pte_t *)alloc_low_page();
  120. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  121. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  122. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  123. }
  124. return pte_offset_kernel(pmd, 0);
  125. }
  126. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  127. {
  128. int pgd_idx = pgd_index(vaddr);
  129. int pmd_idx = pmd_index(vaddr);
  130. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  131. }
  132. pte_t * __init populate_extra_pte(unsigned long vaddr)
  133. {
  134. int pte_idx = pte_index(vaddr);
  135. pmd_t *pmd;
  136. pmd = populate_extra_pmd(vaddr);
  137. return one_page_table_init(pmd) + pte_idx;
  138. }
  139. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  140. unsigned long vaddr, pte_t *lastpte)
  141. {
  142. #ifdef CONFIG_HIGHMEM
  143. /*
  144. * Something (early fixmap) may already have put a pte
  145. * page here, which causes the page table allocation
  146. * to become nonlinear. Attempt to fix it, and if it
  147. * is still nonlinear then we have to bug.
  148. */
  149. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  150. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  151. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  152. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  153. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  154. && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
  155. || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
  156. pte_t *newpte;
  157. int i;
  158. BUG_ON(after_bootmem);
  159. newpte = alloc_low_page();
  160. for (i = 0; i < PTRS_PER_PTE; i++)
  161. set_pte(newpte + i, pte[i]);
  162. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  163. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  164. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  165. __flush_tlb_all();
  166. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  167. pte = newpte;
  168. }
  169. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  170. && vaddr > fix_to_virt(FIX_KMAP_END)
  171. && lastpte && lastpte + PTRS_PER_PTE != pte);
  172. #endif
  173. return pte;
  174. }
  175. /*
  176. * This function initializes a certain range of kernel virtual memory
  177. * with new bootmem page tables, everywhere page tables are missing in
  178. * the given range.
  179. *
  180. * NOTE: The pagetables are allocated contiguous on the physical space
  181. * so we can cache the place of the first one and move around without
  182. * checking the pgd every time.
  183. */
  184. static void __init
  185. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  186. {
  187. int pgd_idx, pmd_idx;
  188. unsigned long vaddr;
  189. pgd_t *pgd;
  190. pmd_t *pmd;
  191. pte_t *pte = NULL;
  192. vaddr = start;
  193. pgd_idx = pgd_index(vaddr);
  194. pmd_idx = pmd_index(vaddr);
  195. pgd = pgd_base + pgd_idx;
  196. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  197. pmd = one_md_table_init(pgd);
  198. pmd = pmd + pmd_index(vaddr);
  199. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  200. pmd++, pmd_idx++) {
  201. pte = page_table_kmap_check(one_page_table_init(pmd),
  202. pmd, vaddr, pte);
  203. vaddr += PMD_SIZE;
  204. }
  205. pmd_idx = 0;
  206. }
  207. }
  208. static inline int is_kernel_text(unsigned long addr)
  209. {
  210. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  211. return 1;
  212. return 0;
  213. }
  214. /*
  215. * This maps the physical memory to kernel virtual address space, a total
  216. * of max_low_pfn pages, by creating page tables starting from address
  217. * PAGE_OFFSET:
  218. */
  219. unsigned long __init
  220. kernel_physical_mapping_init(unsigned long start,
  221. unsigned long end,
  222. unsigned long page_size_mask)
  223. {
  224. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  225. unsigned long last_map_addr = end;
  226. unsigned long start_pfn, end_pfn;
  227. pgd_t *pgd_base = swapper_pg_dir;
  228. int pgd_idx, pmd_idx, pte_ofs;
  229. unsigned long pfn;
  230. pgd_t *pgd;
  231. pmd_t *pmd;
  232. pte_t *pte;
  233. unsigned pages_2m, pages_4k;
  234. int mapping_iter;
  235. start_pfn = start >> PAGE_SHIFT;
  236. end_pfn = end >> PAGE_SHIFT;
  237. /*
  238. * First iteration will setup identity mapping using large/small pages
  239. * based on use_pse, with other attributes same as set by
  240. * the early code in head_32.S
  241. *
  242. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  243. * as desired for the kernel identity mapping.
  244. *
  245. * This two pass mechanism conforms to the TLB app note which says:
  246. *
  247. * "Software should not write to a paging-structure entry in a way
  248. * that would change, for any linear address, both the page size
  249. * and either the page frame or attributes."
  250. */
  251. mapping_iter = 1;
  252. if (!cpu_has_pse)
  253. use_pse = 0;
  254. repeat:
  255. pages_2m = pages_4k = 0;
  256. pfn = start_pfn;
  257. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  258. pgd = pgd_base + pgd_idx;
  259. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  260. pmd = one_md_table_init(pgd);
  261. if (pfn >= end_pfn)
  262. continue;
  263. #ifdef CONFIG_X86_PAE
  264. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  265. pmd += pmd_idx;
  266. #else
  267. pmd_idx = 0;
  268. #endif
  269. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  270. pmd++, pmd_idx++) {
  271. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  272. /*
  273. * Map with big pages if possible, otherwise
  274. * create normal page tables:
  275. */
  276. if (use_pse) {
  277. unsigned int addr2;
  278. pgprot_t prot = PAGE_KERNEL_LARGE;
  279. /*
  280. * first pass will use the same initial
  281. * identity mapping attribute + _PAGE_PSE.
  282. */
  283. pgprot_t init_prot =
  284. __pgprot(PTE_IDENT_ATTR |
  285. _PAGE_PSE);
  286. pfn &= PMD_MASK >> PAGE_SHIFT;
  287. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  288. PAGE_OFFSET + PAGE_SIZE-1;
  289. if (is_kernel_text(addr) ||
  290. is_kernel_text(addr2))
  291. prot = PAGE_KERNEL_LARGE_EXEC;
  292. pages_2m++;
  293. if (mapping_iter == 1)
  294. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  295. else
  296. set_pmd(pmd, pfn_pmd(pfn, prot));
  297. pfn += PTRS_PER_PTE;
  298. continue;
  299. }
  300. pte = one_page_table_init(pmd);
  301. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  302. pte += pte_ofs;
  303. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  304. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  305. pgprot_t prot = PAGE_KERNEL;
  306. /*
  307. * first pass will use the same initial
  308. * identity mapping attribute.
  309. */
  310. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  311. if (is_kernel_text(addr))
  312. prot = PAGE_KERNEL_EXEC;
  313. pages_4k++;
  314. if (mapping_iter == 1) {
  315. set_pte(pte, pfn_pte(pfn, init_prot));
  316. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  317. } else
  318. set_pte(pte, pfn_pte(pfn, prot));
  319. }
  320. }
  321. }
  322. if (mapping_iter == 1) {
  323. /*
  324. * update direct mapping page count only in the first
  325. * iteration.
  326. */
  327. update_page_count(PG_LEVEL_2M, pages_2m);
  328. update_page_count(PG_LEVEL_4K, pages_4k);
  329. /*
  330. * local global flush tlb, which will flush the previous
  331. * mappings present in both small and large page TLB's.
  332. */
  333. __flush_tlb_all();
  334. /*
  335. * Second iteration will set the actual desired PTE attributes.
  336. */
  337. mapping_iter = 2;
  338. goto repeat;
  339. }
  340. return last_map_addr;
  341. }
  342. pte_t *kmap_pte;
  343. pgprot_t kmap_prot;
  344. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  345. {
  346. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  347. vaddr), vaddr), vaddr);
  348. }
  349. static void __init kmap_init(void)
  350. {
  351. unsigned long kmap_vstart;
  352. /*
  353. * Cache the first kmap pte:
  354. */
  355. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  356. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  357. kmap_prot = PAGE_KERNEL;
  358. }
  359. #ifdef CONFIG_HIGHMEM
  360. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  361. {
  362. unsigned long vaddr;
  363. pgd_t *pgd;
  364. pud_t *pud;
  365. pmd_t *pmd;
  366. pte_t *pte;
  367. vaddr = PKMAP_BASE;
  368. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  369. pgd = swapper_pg_dir + pgd_index(vaddr);
  370. pud = pud_offset(pgd, vaddr);
  371. pmd = pmd_offset(pud, vaddr);
  372. pte = pte_offset_kernel(pmd, vaddr);
  373. pkmap_page_table = pte;
  374. }
  375. static void __init add_one_highpage_init(struct page *page)
  376. {
  377. ClearPageReserved(page);
  378. init_page_count(page);
  379. __free_page(page);
  380. totalhigh_pages++;
  381. }
  382. void __init add_highpages_with_active_regions(int nid,
  383. unsigned long start_pfn, unsigned long end_pfn)
  384. {
  385. phys_addr_t start, end;
  386. u64 i;
  387. for_each_free_mem_range(i, nid, &start, &end, NULL) {
  388. unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  389. start_pfn, end_pfn);
  390. unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  391. start_pfn, end_pfn);
  392. for ( ; pfn < e_pfn; pfn++)
  393. if (pfn_valid(pfn))
  394. add_one_highpage_init(pfn_to_page(pfn));
  395. }
  396. }
  397. #else
  398. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  399. {
  400. }
  401. #endif /* CONFIG_HIGHMEM */
  402. void __init native_pagetable_init(void)
  403. {
  404. unsigned long pfn, va;
  405. pgd_t *pgd, *base = swapper_pg_dir;
  406. pud_t *pud;
  407. pmd_t *pmd;
  408. pte_t *pte;
  409. /*
  410. * Remove any mappings which extend past the end of physical
  411. * memory from the boot time page table:
  412. */
  413. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  414. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  415. pgd = base + pgd_index(va);
  416. if (!pgd_present(*pgd))
  417. break;
  418. pud = pud_offset(pgd, va);
  419. pmd = pmd_offset(pud, va);
  420. if (!pmd_present(*pmd))
  421. break;
  422. pte = pte_offset_kernel(pmd, va);
  423. if (!pte_present(*pte))
  424. break;
  425. pte_clear(NULL, va, pte);
  426. }
  427. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  428. paging_init();
  429. }
  430. /*
  431. * Build a proper pagetable for the kernel mappings. Up until this
  432. * point, we've been running on some set of pagetables constructed by
  433. * the boot process.
  434. *
  435. * If we're booting on native hardware, this will be a pagetable
  436. * constructed in arch/x86/kernel/head_32.S. The root of the
  437. * pagetable will be swapper_pg_dir.
  438. *
  439. * If we're booting paravirtualized under a hypervisor, then there are
  440. * more options: we may already be running PAE, and the pagetable may
  441. * or may not be based in swapper_pg_dir. In any case,
  442. * paravirt_pagetable_init() will set up swapper_pg_dir
  443. * appropriately for the rest of the initialization to work.
  444. *
  445. * In general, pagetable_init() assumes that the pagetable may already
  446. * be partially populated, and so it avoids stomping on any existing
  447. * mappings.
  448. */
  449. void __init early_ioremap_page_table_range_init(void)
  450. {
  451. pgd_t *pgd_base = swapper_pg_dir;
  452. unsigned long vaddr, end;
  453. /*
  454. * Fixed mappings, only the page table structure has to be
  455. * created - mappings will be set by set_fixmap():
  456. */
  457. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  458. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  459. page_table_range_init(vaddr, end, pgd_base);
  460. early_ioremap_reset();
  461. }
  462. static void __init pagetable_init(void)
  463. {
  464. pgd_t *pgd_base = swapper_pg_dir;
  465. permanent_kmaps_init(pgd_base);
  466. }
  467. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  468. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  469. /* user-defined highmem size */
  470. static unsigned int highmem_pages = -1;
  471. /*
  472. * highmem=size forces highmem to be exactly 'size' bytes.
  473. * This works even on boxes that have no highmem otherwise.
  474. * This also works to reduce highmem size on bigger boxes.
  475. */
  476. static int __init parse_highmem(char *arg)
  477. {
  478. if (!arg)
  479. return -EINVAL;
  480. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  481. return 0;
  482. }
  483. early_param("highmem", parse_highmem);
  484. #define MSG_HIGHMEM_TOO_BIG \
  485. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  486. #define MSG_LOWMEM_TOO_SMALL \
  487. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  488. /*
  489. * All of RAM fits into lowmem - but if user wants highmem
  490. * artificially via the highmem=x boot parameter then create
  491. * it:
  492. */
  493. void __init lowmem_pfn_init(void)
  494. {
  495. /* max_low_pfn is 0, we already have early_res support */
  496. max_low_pfn = max_pfn;
  497. if (highmem_pages == -1)
  498. highmem_pages = 0;
  499. #ifdef CONFIG_HIGHMEM
  500. if (highmem_pages >= max_pfn) {
  501. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  502. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  503. highmem_pages = 0;
  504. }
  505. if (highmem_pages) {
  506. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  507. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  508. pages_to_mb(highmem_pages));
  509. highmem_pages = 0;
  510. }
  511. max_low_pfn -= highmem_pages;
  512. }
  513. #else
  514. if (highmem_pages)
  515. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  516. #endif
  517. }
  518. #define MSG_HIGHMEM_TOO_SMALL \
  519. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  520. #define MSG_HIGHMEM_TRIMMED \
  521. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  522. /*
  523. * We have more RAM than fits into lowmem - we try to put it into
  524. * highmem, also taking the highmem=x boot parameter into account:
  525. */
  526. void __init highmem_pfn_init(void)
  527. {
  528. max_low_pfn = MAXMEM_PFN;
  529. if (highmem_pages == -1)
  530. highmem_pages = max_pfn - MAXMEM_PFN;
  531. if (highmem_pages + MAXMEM_PFN < max_pfn)
  532. max_pfn = MAXMEM_PFN + highmem_pages;
  533. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  534. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  535. pages_to_mb(max_pfn - MAXMEM_PFN),
  536. pages_to_mb(highmem_pages));
  537. highmem_pages = 0;
  538. }
  539. #ifndef CONFIG_HIGHMEM
  540. /* Maximum memory usable is what is directly addressable */
  541. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  542. if (max_pfn > MAX_NONPAE_PFN)
  543. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  544. else
  545. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  546. max_pfn = MAXMEM_PFN;
  547. #else /* !CONFIG_HIGHMEM */
  548. #ifndef CONFIG_HIGHMEM64G
  549. if (max_pfn > MAX_NONPAE_PFN) {
  550. max_pfn = MAX_NONPAE_PFN;
  551. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  552. }
  553. #endif /* !CONFIG_HIGHMEM64G */
  554. #endif /* !CONFIG_HIGHMEM */
  555. }
  556. /*
  557. * Determine low and high memory ranges:
  558. */
  559. void __init find_low_pfn_range(void)
  560. {
  561. /* it could update max_pfn */
  562. if (max_pfn <= MAXMEM_PFN)
  563. lowmem_pfn_init();
  564. else
  565. highmem_pfn_init();
  566. }
  567. #ifndef CONFIG_NEED_MULTIPLE_NODES
  568. void __init initmem_init(void)
  569. {
  570. #ifdef CONFIG_HIGHMEM
  571. highstart_pfn = highend_pfn = max_pfn;
  572. if (max_pfn > max_low_pfn)
  573. highstart_pfn = max_low_pfn;
  574. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  575. pages_to_mb(highend_pfn - highstart_pfn));
  576. num_physpages = highend_pfn;
  577. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  578. #else
  579. num_physpages = max_low_pfn;
  580. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  581. #endif
  582. memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
  583. sparse_memory_present_with_active_regions(0);
  584. #ifdef CONFIG_FLATMEM
  585. max_mapnr = num_physpages;
  586. #endif
  587. __vmalloc_start_set = true;
  588. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  589. pages_to_mb(max_low_pfn));
  590. setup_bootmem_allocator();
  591. }
  592. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  593. void __init setup_bootmem_allocator(void)
  594. {
  595. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  596. max_pfn_mapped<<PAGE_SHIFT);
  597. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  598. after_bootmem = 1;
  599. }
  600. /*
  601. * paging_init() sets up the page tables - note that the first 8MB are
  602. * already mapped by head.S.
  603. *
  604. * This routines also unmaps the page at virtual kernel address 0, so
  605. * that we can trap those pesky NULL-reference errors in the kernel.
  606. */
  607. void __init paging_init(void)
  608. {
  609. pagetable_init();
  610. __flush_tlb_all();
  611. kmap_init();
  612. /*
  613. * NOTE: at this point the bootmem allocator is fully available.
  614. */
  615. olpc_dt_build_devicetree();
  616. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  617. sparse_init();
  618. zone_sizes_init();
  619. }
  620. /*
  621. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  622. * and also on some strange 486's. All 586+'s are OK. This used to involve
  623. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  624. * switch to using exceptions got rid of all that.
  625. */
  626. static void __init test_wp_bit(void)
  627. {
  628. printk(KERN_INFO
  629. "Checking if this processor honours the WP bit even in supervisor mode...");
  630. /* Any page-aligned address will do, the test is non-destructive */
  631. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
  632. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  633. clear_fixmap(FIX_WP_TEST);
  634. if (!boot_cpu_data.wp_works_ok) {
  635. printk(KERN_CONT "No.\n");
  636. #ifdef CONFIG_X86_WP_WORKS_OK
  637. panic(
  638. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  639. #endif
  640. } else {
  641. printk(KERN_CONT "Ok.\n");
  642. }
  643. }
  644. void __init mem_init(void)
  645. {
  646. int codesize, reservedpages, datasize, initsize;
  647. int tmp;
  648. pci_iommu_alloc();
  649. #ifdef CONFIG_FLATMEM
  650. BUG_ON(!mem_map);
  651. #endif
  652. /*
  653. * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  654. * be done before free_all_bootmem(). Memblock use free low memory for
  655. * temporary data (see find_range_array()) and for this purpose can use
  656. * pages that was already passed to the buddy allocator, hence marked as
  657. * not accessible in the page tables when compiled with
  658. * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  659. * important here.
  660. */
  661. set_highmem_pages_init();
  662. /* this will put all low memory onto the freelists */
  663. totalram_pages += free_all_bootmem();
  664. reservedpages = 0;
  665. for (tmp = 0; tmp < max_low_pfn; tmp++)
  666. /*
  667. * Only count reserved RAM pages:
  668. */
  669. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  670. reservedpages++;
  671. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  672. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  673. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  674. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  675. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  676. nr_free_pages() << (PAGE_SHIFT-10),
  677. num_physpages << (PAGE_SHIFT-10),
  678. codesize >> 10,
  679. reservedpages << (PAGE_SHIFT-10),
  680. datasize >> 10,
  681. initsize >> 10,
  682. totalhigh_pages << (PAGE_SHIFT-10));
  683. printk(KERN_INFO "virtual kernel memory layout:\n"
  684. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  685. #ifdef CONFIG_HIGHMEM
  686. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  687. #endif
  688. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  689. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  690. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  691. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  692. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  693. FIXADDR_START, FIXADDR_TOP,
  694. (FIXADDR_TOP - FIXADDR_START) >> 10,
  695. #ifdef CONFIG_HIGHMEM
  696. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  697. (LAST_PKMAP*PAGE_SIZE) >> 10,
  698. #endif
  699. VMALLOC_START, VMALLOC_END,
  700. (VMALLOC_END - VMALLOC_START) >> 20,
  701. (unsigned long)__va(0), (unsigned long)high_memory,
  702. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  703. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  704. ((unsigned long)&__init_end -
  705. (unsigned long)&__init_begin) >> 10,
  706. (unsigned long)&_etext, (unsigned long)&_edata,
  707. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  708. (unsigned long)&_text, (unsigned long)&_etext,
  709. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  710. /*
  711. * Check boundaries twice: Some fundamental inconsistencies can
  712. * be detected at build time already.
  713. */
  714. #define __FIXADDR_TOP (-PAGE_SIZE)
  715. #ifdef CONFIG_HIGHMEM
  716. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  717. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  718. #endif
  719. #define high_memory (-128UL << 20)
  720. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  721. #undef high_memory
  722. #undef __FIXADDR_TOP
  723. #ifdef CONFIG_HIGHMEM
  724. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  725. BUG_ON(VMALLOC_END > PKMAP_BASE);
  726. #endif
  727. BUG_ON(VMALLOC_START >= VMALLOC_END);
  728. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  729. if (boot_cpu_data.wp_works_ok < 0)
  730. test_wp_bit();
  731. }
  732. #ifdef CONFIG_MEMORY_HOTPLUG
  733. int arch_add_memory(int nid, u64 start, u64 size)
  734. {
  735. struct pglist_data *pgdata = NODE_DATA(nid);
  736. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  737. unsigned long start_pfn = start >> PAGE_SHIFT;
  738. unsigned long nr_pages = size >> PAGE_SHIFT;
  739. return __add_pages(nid, zone, start_pfn, nr_pages);
  740. }
  741. #endif
  742. /*
  743. * This function cannot be __init, since exceptions don't work in that
  744. * section. Put this after the callers, so that it cannot be inlined.
  745. */
  746. static noinline int do_test_wp_bit(void)
  747. {
  748. char tmp_reg;
  749. int flag;
  750. __asm__ __volatile__(
  751. " movb %0, %1 \n"
  752. "1: movb %1, %0 \n"
  753. " xorl %2, %2 \n"
  754. "2: \n"
  755. _ASM_EXTABLE(1b,2b)
  756. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  757. "=q" (tmp_reg),
  758. "=r" (flag)
  759. :"2" (1)
  760. :"memory");
  761. return flag;
  762. }
  763. #ifdef CONFIG_DEBUG_RODATA
  764. const int rodata_test_data = 0xC3;
  765. EXPORT_SYMBOL_GPL(rodata_test_data);
  766. int kernel_set_to_readonly __read_mostly;
  767. void set_kernel_text_rw(void)
  768. {
  769. unsigned long start = PFN_ALIGN(_text);
  770. unsigned long size = PFN_ALIGN(_etext) - start;
  771. if (!kernel_set_to_readonly)
  772. return;
  773. pr_debug("Set kernel text: %lx - %lx for read write\n",
  774. start, start+size);
  775. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  776. }
  777. void set_kernel_text_ro(void)
  778. {
  779. unsigned long start = PFN_ALIGN(_text);
  780. unsigned long size = PFN_ALIGN(_etext) - start;
  781. if (!kernel_set_to_readonly)
  782. return;
  783. pr_debug("Set kernel text: %lx - %lx for read only\n",
  784. start, start+size);
  785. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  786. }
  787. static void mark_nxdata_nx(void)
  788. {
  789. /*
  790. * When this called, init has already been executed and released,
  791. * so everything past _etext should be NX.
  792. */
  793. unsigned long start = PFN_ALIGN(_etext);
  794. /*
  795. * This comes from is_kernel_text upper limit. Also HPAGE where used:
  796. */
  797. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  798. if (__supported_pte_mask & _PAGE_NX)
  799. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  800. set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  801. }
  802. void mark_rodata_ro(void)
  803. {
  804. unsigned long start = PFN_ALIGN(_text);
  805. unsigned long size = PFN_ALIGN(_etext) - start;
  806. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  807. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  808. size >> 10);
  809. kernel_set_to_readonly = 1;
  810. #ifdef CONFIG_CPA_DEBUG
  811. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  812. start, start+size);
  813. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  814. printk(KERN_INFO "Testing CPA: write protecting again\n");
  815. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  816. #endif
  817. start += size;
  818. size = (unsigned long)__end_rodata - start;
  819. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  820. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  821. size >> 10);
  822. rodata_test();
  823. #ifdef CONFIG_CPA_DEBUG
  824. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  825. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  826. printk(KERN_INFO "Testing CPA: write protecting again\n");
  827. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  828. #endif
  829. mark_nxdata_nx();
  830. }
  831. #endif