init_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/memblock.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/gfp.h>
  33. #include <asm/asm.h>
  34. #include <asm/bios_ebda.h>
  35. #include <asm/processor.h>
  36. #include <asm/system.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/dma.h>
  40. #include <asm/fixmap.h>
  41. #include <asm/e820.h>
  42. #include <asm/apic.h>
  43. #include <asm/bugs.h>
  44. #include <asm/tlb.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/olpc_ofw.h>
  47. #include <asm/pgalloc.h>
  48. #include <asm/sections.h>
  49. #include <asm/paravirt.h>
  50. #include <asm/setup.h>
  51. #include <asm/cacheflush.h>
  52. #include <asm/page_types.h>
  53. #include <asm/init.h>
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. bool __read_mostly __vmalloc_start_set = false;
  57. static __init void *alloc_low_page(void)
  58. {
  59. unsigned long pfn = pgt_buf_end++;
  60. void *adr;
  61. if (pfn >= pgt_buf_top)
  62. panic("alloc_low_page: ran out of memory");
  63. adr = __va(pfn * PAGE_SIZE);
  64. clear_page(adr);
  65. return adr;
  66. }
  67. /*
  68. * Creates a middle page table and puts a pointer to it in the
  69. * given global directory entry. This only returns the gd entry
  70. * in non-PAE compilation mode, since the middle layer is folded.
  71. */
  72. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  73. {
  74. pud_t *pud;
  75. pmd_t *pmd_table;
  76. #ifdef CONFIG_X86_PAE
  77. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  78. if (after_bootmem)
  79. pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
  80. else
  81. pmd_table = (pmd_t *)alloc_low_page();
  82. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  83. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  84. pud = pud_offset(pgd, 0);
  85. BUG_ON(pmd_table != pmd_offset(pud, 0));
  86. return pmd_table;
  87. }
  88. #endif
  89. pud = pud_offset(pgd, 0);
  90. pmd_table = pmd_offset(pud, 0);
  91. return pmd_table;
  92. }
  93. /*
  94. * Create a page table and place a pointer to it in a middle page
  95. * directory entry:
  96. */
  97. static pte_t * __init one_page_table_init(pmd_t *pmd)
  98. {
  99. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  100. pte_t *page_table = NULL;
  101. if (after_bootmem) {
  102. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
  103. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  104. #endif
  105. if (!page_table)
  106. page_table =
  107. (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
  108. } else
  109. page_table = (pte_t *)alloc_low_page();
  110. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  111. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  112. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  113. }
  114. return pte_offset_kernel(pmd, 0);
  115. }
  116. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  117. {
  118. int pgd_idx = pgd_index(vaddr);
  119. int pmd_idx = pmd_index(vaddr);
  120. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  121. }
  122. pte_t * __init populate_extra_pte(unsigned long vaddr)
  123. {
  124. int pte_idx = pte_index(vaddr);
  125. pmd_t *pmd;
  126. pmd = populate_extra_pmd(vaddr);
  127. return one_page_table_init(pmd) + pte_idx;
  128. }
  129. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  130. unsigned long vaddr, pte_t *lastpte)
  131. {
  132. #ifdef CONFIG_HIGHMEM
  133. /*
  134. * Something (early fixmap) may already have put a pte
  135. * page here, which causes the page table allocation
  136. * to become nonlinear. Attempt to fix it, and if it
  137. * is still nonlinear then we have to bug.
  138. */
  139. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  140. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  141. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  142. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  143. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  144. && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
  145. || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
  146. pte_t *newpte;
  147. int i;
  148. BUG_ON(after_bootmem);
  149. newpte = alloc_low_page();
  150. for (i = 0; i < PTRS_PER_PTE; i++)
  151. set_pte(newpte + i, pte[i]);
  152. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  153. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  154. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  155. __flush_tlb_all();
  156. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  157. pte = newpte;
  158. }
  159. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  160. && vaddr > fix_to_virt(FIX_KMAP_END)
  161. && lastpte && lastpte + PTRS_PER_PTE != pte);
  162. #endif
  163. return pte;
  164. }
  165. /*
  166. * This function initializes a certain range of kernel virtual memory
  167. * with new bootmem page tables, everywhere page tables are missing in
  168. * the given range.
  169. *
  170. * NOTE: The pagetables are allocated contiguous on the physical space
  171. * so we can cache the place of the first one and move around without
  172. * checking the pgd every time.
  173. */
  174. static void __init
  175. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  176. {
  177. int pgd_idx, pmd_idx;
  178. unsigned long vaddr;
  179. pgd_t *pgd;
  180. pmd_t *pmd;
  181. pte_t *pte = NULL;
  182. vaddr = start;
  183. pgd_idx = pgd_index(vaddr);
  184. pmd_idx = pmd_index(vaddr);
  185. pgd = pgd_base + pgd_idx;
  186. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  187. pmd = one_md_table_init(pgd);
  188. pmd = pmd + pmd_index(vaddr);
  189. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  190. pmd++, pmd_idx++) {
  191. pte = page_table_kmap_check(one_page_table_init(pmd),
  192. pmd, vaddr, pte);
  193. vaddr += PMD_SIZE;
  194. }
  195. pmd_idx = 0;
  196. }
  197. }
  198. static inline int is_kernel_text(unsigned long addr)
  199. {
  200. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  201. return 1;
  202. return 0;
  203. }
  204. /*
  205. * This maps the physical memory to kernel virtual address space, a total
  206. * of max_low_pfn pages, by creating page tables starting from address
  207. * PAGE_OFFSET:
  208. */
  209. unsigned long __init
  210. kernel_physical_mapping_init(unsigned long start,
  211. unsigned long end,
  212. unsigned long page_size_mask)
  213. {
  214. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  215. unsigned long last_map_addr = end;
  216. unsigned long start_pfn, end_pfn;
  217. pgd_t *pgd_base = swapper_pg_dir;
  218. int pgd_idx, pmd_idx, pte_ofs;
  219. unsigned long pfn;
  220. pgd_t *pgd;
  221. pmd_t *pmd;
  222. pte_t *pte;
  223. unsigned pages_2m, pages_4k;
  224. int mapping_iter;
  225. start_pfn = start >> PAGE_SHIFT;
  226. end_pfn = end >> PAGE_SHIFT;
  227. /*
  228. * First iteration will setup identity mapping using large/small pages
  229. * based on use_pse, with other attributes same as set by
  230. * the early code in head_32.S
  231. *
  232. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  233. * as desired for the kernel identity mapping.
  234. *
  235. * This two pass mechanism conforms to the TLB app note which says:
  236. *
  237. * "Software should not write to a paging-structure entry in a way
  238. * that would change, for any linear address, both the page size
  239. * and either the page frame or attributes."
  240. */
  241. mapping_iter = 1;
  242. if (!cpu_has_pse)
  243. use_pse = 0;
  244. repeat:
  245. pages_2m = pages_4k = 0;
  246. pfn = start_pfn;
  247. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  248. pgd = pgd_base + pgd_idx;
  249. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  250. pmd = one_md_table_init(pgd);
  251. if (pfn >= end_pfn)
  252. continue;
  253. #ifdef CONFIG_X86_PAE
  254. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  255. pmd += pmd_idx;
  256. #else
  257. pmd_idx = 0;
  258. #endif
  259. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  260. pmd++, pmd_idx++) {
  261. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  262. /*
  263. * Map with big pages if possible, otherwise
  264. * create normal page tables:
  265. */
  266. if (use_pse) {
  267. unsigned int addr2;
  268. pgprot_t prot = PAGE_KERNEL_LARGE;
  269. /*
  270. * first pass will use the same initial
  271. * identity mapping attribute + _PAGE_PSE.
  272. */
  273. pgprot_t init_prot =
  274. __pgprot(PTE_IDENT_ATTR |
  275. _PAGE_PSE);
  276. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  277. PAGE_OFFSET + PAGE_SIZE-1;
  278. if (is_kernel_text(addr) ||
  279. is_kernel_text(addr2))
  280. prot = PAGE_KERNEL_LARGE_EXEC;
  281. pages_2m++;
  282. if (mapping_iter == 1)
  283. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  284. else
  285. set_pmd(pmd, pfn_pmd(pfn, prot));
  286. pfn += PTRS_PER_PTE;
  287. continue;
  288. }
  289. pte = one_page_table_init(pmd);
  290. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  291. pte += pte_ofs;
  292. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  293. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  294. pgprot_t prot = PAGE_KERNEL;
  295. /*
  296. * first pass will use the same initial
  297. * identity mapping attribute.
  298. */
  299. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  300. if (is_kernel_text(addr))
  301. prot = PAGE_KERNEL_EXEC;
  302. pages_4k++;
  303. if (mapping_iter == 1) {
  304. set_pte(pte, pfn_pte(pfn, init_prot));
  305. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  306. } else
  307. set_pte(pte, pfn_pte(pfn, prot));
  308. }
  309. }
  310. }
  311. if (mapping_iter == 1) {
  312. /*
  313. * update direct mapping page count only in the first
  314. * iteration.
  315. */
  316. update_page_count(PG_LEVEL_2M, pages_2m);
  317. update_page_count(PG_LEVEL_4K, pages_4k);
  318. /*
  319. * local global flush tlb, which will flush the previous
  320. * mappings present in both small and large page TLB's.
  321. */
  322. __flush_tlb_all();
  323. /*
  324. * Second iteration will set the actual desired PTE attributes.
  325. */
  326. mapping_iter = 2;
  327. goto repeat;
  328. }
  329. return last_map_addr;
  330. }
  331. pte_t *kmap_pte;
  332. pgprot_t kmap_prot;
  333. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  334. {
  335. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  336. vaddr), vaddr), vaddr);
  337. }
  338. static void __init kmap_init(void)
  339. {
  340. unsigned long kmap_vstart;
  341. /*
  342. * Cache the first kmap pte:
  343. */
  344. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  345. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  346. kmap_prot = PAGE_KERNEL;
  347. }
  348. #ifdef CONFIG_HIGHMEM
  349. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  350. {
  351. unsigned long vaddr;
  352. pgd_t *pgd;
  353. pud_t *pud;
  354. pmd_t *pmd;
  355. pte_t *pte;
  356. vaddr = PKMAP_BASE;
  357. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  358. pgd = swapper_pg_dir + pgd_index(vaddr);
  359. pud = pud_offset(pgd, vaddr);
  360. pmd = pmd_offset(pud, vaddr);
  361. pte = pte_offset_kernel(pmd, vaddr);
  362. pkmap_page_table = pte;
  363. }
  364. static void __init add_one_highpage_init(struct page *page)
  365. {
  366. ClearPageReserved(page);
  367. init_page_count(page);
  368. __free_page(page);
  369. totalhigh_pages++;
  370. }
  371. void __init add_highpages_with_active_regions(int nid,
  372. unsigned long start_pfn, unsigned long end_pfn)
  373. {
  374. struct range *range;
  375. int nr_range;
  376. int i;
  377. nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
  378. for (i = 0; i < nr_range; i++) {
  379. struct page *page;
  380. int node_pfn;
  381. for (node_pfn = range[i].start; node_pfn < range[i].end;
  382. node_pfn++) {
  383. if (!pfn_valid(node_pfn))
  384. continue;
  385. page = pfn_to_page(node_pfn);
  386. add_one_highpage_init(page);
  387. }
  388. }
  389. }
  390. #else
  391. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  392. {
  393. }
  394. #endif /* CONFIG_HIGHMEM */
  395. void __init native_pagetable_setup_start(pgd_t *base)
  396. {
  397. unsigned long pfn, va;
  398. pgd_t *pgd;
  399. pud_t *pud;
  400. pmd_t *pmd;
  401. pte_t *pte;
  402. /*
  403. * Remove any mappings which extend past the end of physical
  404. * memory from the boot time page table:
  405. */
  406. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  407. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  408. pgd = base + pgd_index(va);
  409. if (!pgd_present(*pgd))
  410. break;
  411. pud = pud_offset(pgd, va);
  412. pmd = pmd_offset(pud, va);
  413. if (!pmd_present(*pmd))
  414. break;
  415. pte = pte_offset_kernel(pmd, va);
  416. if (!pte_present(*pte))
  417. break;
  418. pte_clear(NULL, va, pte);
  419. }
  420. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  421. }
  422. void __init native_pagetable_setup_done(pgd_t *base)
  423. {
  424. }
  425. /*
  426. * Build a proper pagetable for the kernel mappings. Up until this
  427. * point, we've been running on some set of pagetables constructed by
  428. * the boot process.
  429. *
  430. * If we're booting on native hardware, this will be a pagetable
  431. * constructed in arch/x86/kernel/head_32.S. The root of the
  432. * pagetable will be swapper_pg_dir.
  433. *
  434. * If we're booting paravirtualized under a hypervisor, then there are
  435. * more options: we may already be running PAE, and the pagetable may
  436. * or may not be based in swapper_pg_dir. In any case,
  437. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  438. * appropriately for the rest of the initialization to work.
  439. *
  440. * In general, pagetable_init() assumes that the pagetable may already
  441. * be partially populated, and so it avoids stomping on any existing
  442. * mappings.
  443. */
  444. void __init early_ioremap_page_table_range_init(void)
  445. {
  446. pgd_t *pgd_base = swapper_pg_dir;
  447. unsigned long vaddr, end;
  448. /*
  449. * Fixed mappings, only the page table structure has to be
  450. * created - mappings will be set by set_fixmap():
  451. */
  452. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  453. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  454. page_table_range_init(vaddr, end, pgd_base);
  455. early_ioremap_reset();
  456. }
  457. static void __init pagetable_init(void)
  458. {
  459. pgd_t *pgd_base = swapper_pg_dir;
  460. permanent_kmaps_init(pgd_base);
  461. }
  462. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  463. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  464. /* user-defined highmem size */
  465. static unsigned int highmem_pages = -1;
  466. /*
  467. * highmem=size forces highmem to be exactly 'size' bytes.
  468. * This works even on boxes that have no highmem otherwise.
  469. * This also works to reduce highmem size on bigger boxes.
  470. */
  471. static int __init parse_highmem(char *arg)
  472. {
  473. if (!arg)
  474. return -EINVAL;
  475. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  476. return 0;
  477. }
  478. early_param("highmem", parse_highmem);
  479. #define MSG_HIGHMEM_TOO_BIG \
  480. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  481. #define MSG_LOWMEM_TOO_SMALL \
  482. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  483. /*
  484. * All of RAM fits into lowmem - but if user wants highmem
  485. * artificially via the highmem=x boot parameter then create
  486. * it:
  487. */
  488. void __init lowmem_pfn_init(void)
  489. {
  490. /* max_low_pfn is 0, we already have early_res support */
  491. max_low_pfn = max_pfn;
  492. if (highmem_pages == -1)
  493. highmem_pages = 0;
  494. #ifdef CONFIG_HIGHMEM
  495. if (highmem_pages >= max_pfn) {
  496. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  497. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  498. highmem_pages = 0;
  499. }
  500. if (highmem_pages) {
  501. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  502. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  503. pages_to_mb(highmem_pages));
  504. highmem_pages = 0;
  505. }
  506. max_low_pfn -= highmem_pages;
  507. }
  508. #else
  509. if (highmem_pages)
  510. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  511. #endif
  512. }
  513. #define MSG_HIGHMEM_TOO_SMALL \
  514. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  515. #define MSG_HIGHMEM_TRIMMED \
  516. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  517. /*
  518. * We have more RAM than fits into lowmem - we try to put it into
  519. * highmem, also taking the highmem=x boot parameter into account:
  520. */
  521. void __init highmem_pfn_init(void)
  522. {
  523. max_low_pfn = MAXMEM_PFN;
  524. if (highmem_pages == -1)
  525. highmem_pages = max_pfn - MAXMEM_PFN;
  526. if (highmem_pages + MAXMEM_PFN < max_pfn)
  527. max_pfn = MAXMEM_PFN + highmem_pages;
  528. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  529. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  530. pages_to_mb(max_pfn - MAXMEM_PFN),
  531. pages_to_mb(highmem_pages));
  532. highmem_pages = 0;
  533. }
  534. #ifndef CONFIG_HIGHMEM
  535. /* Maximum memory usable is what is directly addressable */
  536. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  537. if (max_pfn > MAX_NONPAE_PFN)
  538. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  539. else
  540. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  541. max_pfn = MAXMEM_PFN;
  542. #else /* !CONFIG_HIGHMEM */
  543. #ifndef CONFIG_HIGHMEM64G
  544. if (max_pfn > MAX_NONPAE_PFN) {
  545. max_pfn = MAX_NONPAE_PFN;
  546. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  547. }
  548. #endif /* !CONFIG_HIGHMEM64G */
  549. #endif /* !CONFIG_HIGHMEM */
  550. }
  551. /*
  552. * Determine low and high memory ranges:
  553. */
  554. void __init find_low_pfn_range(void)
  555. {
  556. /* it could update max_pfn */
  557. if (max_pfn <= MAXMEM_PFN)
  558. lowmem_pfn_init();
  559. else
  560. highmem_pfn_init();
  561. }
  562. #ifndef CONFIG_NEED_MULTIPLE_NODES
  563. void __init initmem_init(void)
  564. {
  565. #ifdef CONFIG_HIGHMEM
  566. highstart_pfn = highend_pfn = max_pfn;
  567. if (max_pfn > max_low_pfn)
  568. highstart_pfn = max_low_pfn;
  569. memblock_x86_register_active_regions(0, 0, highend_pfn);
  570. sparse_memory_present_with_active_regions(0);
  571. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  572. pages_to_mb(highend_pfn - highstart_pfn));
  573. num_physpages = highend_pfn;
  574. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  575. #else
  576. memblock_x86_register_active_regions(0, 0, max_low_pfn);
  577. sparse_memory_present_with_active_regions(0);
  578. num_physpages = max_low_pfn;
  579. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  580. #endif
  581. #ifdef CONFIG_FLATMEM
  582. max_mapnr = num_physpages;
  583. #endif
  584. __vmalloc_start_set = true;
  585. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  586. pages_to_mb(max_low_pfn));
  587. setup_bootmem_allocator();
  588. }
  589. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  590. static void __init zone_sizes_init(void)
  591. {
  592. unsigned long max_zone_pfns[MAX_NR_ZONES];
  593. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  594. #ifdef CONFIG_ZONE_DMA
  595. max_zone_pfns[ZONE_DMA] =
  596. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  597. #endif
  598. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  599. #ifdef CONFIG_HIGHMEM
  600. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  601. #endif
  602. free_area_init_nodes(max_zone_pfns);
  603. }
  604. void __init setup_bootmem_allocator(void)
  605. {
  606. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  607. max_pfn_mapped<<PAGE_SHIFT);
  608. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  609. after_bootmem = 1;
  610. }
  611. /*
  612. * paging_init() sets up the page tables - note that the first 8MB are
  613. * already mapped by head.S.
  614. *
  615. * This routines also unmaps the page at virtual kernel address 0, so
  616. * that we can trap those pesky NULL-reference errors in the kernel.
  617. */
  618. void __init paging_init(void)
  619. {
  620. pagetable_init();
  621. __flush_tlb_all();
  622. kmap_init();
  623. /*
  624. * NOTE: at this point the bootmem allocator is fully available.
  625. */
  626. olpc_dt_build_devicetree();
  627. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  628. sparse_init();
  629. zone_sizes_init();
  630. }
  631. /*
  632. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  633. * and also on some strange 486's. All 586+'s are OK. This used to involve
  634. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  635. * switch to using exceptions got rid of all that.
  636. */
  637. static void __init test_wp_bit(void)
  638. {
  639. printk(KERN_INFO
  640. "Checking if this processor honours the WP bit even in supervisor mode...");
  641. /* Any page-aligned address will do, the test is non-destructive */
  642. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  643. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  644. clear_fixmap(FIX_WP_TEST);
  645. if (!boot_cpu_data.wp_works_ok) {
  646. printk(KERN_CONT "No.\n");
  647. #ifdef CONFIG_X86_WP_WORKS_OK
  648. panic(
  649. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  650. #endif
  651. } else {
  652. printk(KERN_CONT "Ok.\n");
  653. }
  654. }
  655. void __init mem_init(void)
  656. {
  657. int codesize, reservedpages, datasize, initsize;
  658. int tmp;
  659. pci_iommu_alloc();
  660. #ifdef CONFIG_FLATMEM
  661. BUG_ON(!mem_map);
  662. #endif
  663. /* this will put all low memory onto the freelists */
  664. totalram_pages += free_all_bootmem();
  665. reservedpages = 0;
  666. for (tmp = 0; tmp < max_low_pfn; tmp++)
  667. /*
  668. * Only count reserved RAM pages:
  669. */
  670. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  671. reservedpages++;
  672. set_highmem_pages_init();
  673. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  674. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  675. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  676. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  677. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  678. nr_free_pages() << (PAGE_SHIFT-10),
  679. num_physpages << (PAGE_SHIFT-10),
  680. codesize >> 10,
  681. reservedpages << (PAGE_SHIFT-10),
  682. datasize >> 10,
  683. initsize >> 10,
  684. totalhigh_pages << (PAGE_SHIFT-10));
  685. printk(KERN_INFO "virtual kernel memory layout:\n"
  686. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  687. #ifdef CONFIG_HIGHMEM
  688. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  689. #endif
  690. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  691. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  692. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  693. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  694. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  695. FIXADDR_START, FIXADDR_TOP,
  696. (FIXADDR_TOP - FIXADDR_START) >> 10,
  697. #ifdef CONFIG_HIGHMEM
  698. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  699. (LAST_PKMAP*PAGE_SIZE) >> 10,
  700. #endif
  701. VMALLOC_START, VMALLOC_END,
  702. (VMALLOC_END - VMALLOC_START) >> 20,
  703. (unsigned long)__va(0), (unsigned long)high_memory,
  704. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  705. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  706. ((unsigned long)&__init_end -
  707. (unsigned long)&__init_begin) >> 10,
  708. (unsigned long)&_etext, (unsigned long)&_edata,
  709. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  710. (unsigned long)&_text, (unsigned long)&_etext,
  711. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  712. /*
  713. * Check boundaries twice: Some fundamental inconsistencies can
  714. * be detected at build time already.
  715. */
  716. #define __FIXADDR_TOP (-PAGE_SIZE)
  717. #ifdef CONFIG_HIGHMEM
  718. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  719. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  720. #endif
  721. #define high_memory (-128UL << 20)
  722. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  723. #undef high_memory
  724. #undef __FIXADDR_TOP
  725. #ifdef CONFIG_HIGHMEM
  726. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  727. BUG_ON(VMALLOC_END > PKMAP_BASE);
  728. #endif
  729. BUG_ON(VMALLOC_START >= VMALLOC_END);
  730. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  731. if (boot_cpu_data.wp_works_ok < 0)
  732. test_wp_bit();
  733. }
  734. #ifdef CONFIG_MEMORY_HOTPLUG
  735. int arch_add_memory(int nid, u64 start, u64 size)
  736. {
  737. struct pglist_data *pgdata = NODE_DATA(nid);
  738. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  739. unsigned long start_pfn = start >> PAGE_SHIFT;
  740. unsigned long nr_pages = size >> PAGE_SHIFT;
  741. return __add_pages(nid, zone, start_pfn, nr_pages);
  742. }
  743. #endif
  744. /*
  745. * This function cannot be __init, since exceptions don't work in that
  746. * section. Put this after the callers, so that it cannot be inlined.
  747. */
  748. static noinline int do_test_wp_bit(void)
  749. {
  750. char tmp_reg;
  751. int flag;
  752. __asm__ __volatile__(
  753. " movb %0, %1 \n"
  754. "1: movb %1, %0 \n"
  755. " xorl %2, %2 \n"
  756. "2: \n"
  757. _ASM_EXTABLE(1b,2b)
  758. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  759. "=q" (tmp_reg),
  760. "=r" (flag)
  761. :"2" (1)
  762. :"memory");
  763. return flag;
  764. }
  765. #ifdef CONFIG_DEBUG_RODATA
  766. const int rodata_test_data = 0xC3;
  767. EXPORT_SYMBOL_GPL(rodata_test_data);
  768. int kernel_set_to_readonly __read_mostly;
  769. void set_kernel_text_rw(void)
  770. {
  771. unsigned long start = PFN_ALIGN(_text);
  772. unsigned long size = PFN_ALIGN(_etext) - start;
  773. if (!kernel_set_to_readonly)
  774. return;
  775. pr_debug("Set kernel text: %lx - %lx for read write\n",
  776. start, start+size);
  777. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  778. }
  779. void set_kernel_text_ro(void)
  780. {
  781. unsigned long start = PFN_ALIGN(_text);
  782. unsigned long size = PFN_ALIGN(_etext) - start;
  783. if (!kernel_set_to_readonly)
  784. return;
  785. pr_debug("Set kernel text: %lx - %lx for read only\n",
  786. start, start+size);
  787. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  788. }
  789. static void mark_nxdata_nx(void)
  790. {
  791. /*
  792. * When this called, init has already been executed and released,
  793. * so everything past _etext should be NX.
  794. */
  795. unsigned long start = PFN_ALIGN(_etext);
  796. /*
  797. * This comes from is_kernel_text upper limit. Also HPAGE where used:
  798. */
  799. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  800. if (__supported_pte_mask & _PAGE_NX)
  801. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  802. set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  803. }
  804. void mark_rodata_ro(void)
  805. {
  806. unsigned long start = PFN_ALIGN(_text);
  807. unsigned long size = PFN_ALIGN(_etext) - start;
  808. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  809. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  810. size >> 10);
  811. kernel_set_to_readonly = 1;
  812. #ifdef CONFIG_CPA_DEBUG
  813. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  814. start, start+size);
  815. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  816. printk(KERN_INFO "Testing CPA: write protecting again\n");
  817. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  818. #endif
  819. start += size;
  820. size = (unsigned long)__end_rodata - start;
  821. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  822. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  823. size >> 10);
  824. rodata_test();
  825. #ifdef CONFIG_CPA_DEBUG
  826. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  827. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  828. printk(KERN_INFO "Testing CPA: write protecting again\n");
  829. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  830. #endif
  831. mark_nxdata_nx();
  832. }
  833. #endif