init_32.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/cacheflush.h>
  50. unsigned int __VMALLOC_RESERVE = 128 << 20;
  51. unsigned long max_low_pfn_mapped;
  52. unsigned long max_pfn_mapped;
  53. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. static unsigned long __initdata table_start;
  57. static unsigned long __meminitdata table_end;
  58. static unsigned long __meminitdata table_top;
  59. static int __initdata after_init_bootmem;
  60. static __init void *alloc_low_page(void)
  61. {
  62. unsigned long pfn = table_end++;
  63. void *adr;
  64. if (pfn >= table_top)
  65. panic("alloc_low_page: ran out of memory");
  66. adr = __va(pfn * PAGE_SIZE);
  67. memset(adr, 0, PAGE_SIZE);
  68. return adr;
  69. }
  70. /*
  71. * Creates a middle page table and puts a pointer to it in the
  72. * given global directory entry. This only returns the gd entry
  73. * in non-PAE compilation mode, since the middle layer is folded.
  74. */
  75. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  76. {
  77. pud_t *pud;
  78. pmd_t *pmd_table;
  79. #ifdef CONFIG_X86_PAE
  80. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  81. if (after_init_bootmem)
  82. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  83. else
  84. pmd_table = (pmd_t *)alloc_low_page();
  85. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  86. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  87. pud = pud_offset(pgd, 0);
  88. BUG_ON(pmd_table != pmd_offset(pud, 0));
  89. return pmd_table;
  90. }
  91. #endif
  92. pud = pud_offset(pgd, 0);
  93. pmd_table = pmd_offset(pud, 0);
  94. return pmd_table;
  95. }
  96. /*
  97. * Create a page table and place a pointer to it in a middle page
  98. * directory entry:
  99. */
  100. static pte_t * __init one_page_table_init(pmd_t *pmd)
  101. {
  102. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  103. pte_t *page_table = NULL;
  104. if (after_init_bootmem) {
  105. #ifdef CONFIG_DEBUG_PAGEALLOC
  106. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  107. #endif
  108. if (!page_table)
  109. page_table =
  110. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  111. } else
  112. page_table = (pte_t *)alloc_low_page();
  113. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  114. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  115. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  116. }
  117. return pte_offset_kernel(pmd, 0);
  118. }
  119. void __init populate_extra_pte(unsigned long vaddr)
  120. {
  121. int pgd_idx = pgd_index(vaddr);
  122. int pmd_idx = pmd_index(vaddr);
  123. pmd_t *pmd;
  124. pmd = one_md_table_init(swapper_pg_dir + pgd_idx);
  125. one_page_table_init(pmd + pmd_idx);
  126. }
  127. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  128. unsigned long vaddr, pte_t *lastpte)
  129. {
  130. #ifdef CONFIG_HIGHMEM
  131. /*
  132. * Something (early fixmap) may already have put a pte
  133. * page here, which causes the page table allocation
  134. * to become nonlinear. Attempt to fix it, and if it
  135. * is still nonlinear then we have to bug.
  136. */
  137. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  138. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  139. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  140. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  141. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  142. && ((__pa(pte) >> PAGE_SHIFT) < table_start
  143. || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
  144. pte_t *newpte;
  145. int i;
  146. BUG_ON(after_init_bootmem);
  147. newpte = alloc_low_page();
  148. for (i = 0; i < PTRS_PER_PTE; i++)
  149. set_pte(newpte + i, pte[i]);
  150. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  151. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  152. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  153. __flush_tlb_all();
  154. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  155. pte = newpte;
  156. }
  157. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  158. && vaddr > fix_to_virt(FIX_KMAP_END)
  159. && lastpte && lastpte + PTRS_PER_PTE != pte);
  160. #endif
  161. return pte;
  162. }
  163. /*
  164. * This function initializes a certain range of kernel virtual memory
  165. * with new bootmem page tables, everywhere page tables are missing in
  166. * the given range.
  167. *
  168. * NOTE: The pagetables are allocated contiguous on the physical space
  169. * so we can cache the place of the first one and move around without
  170. * checking the pgd every time.
  171. */
  172. static void __init
  173. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  174. {
  175. int pgd_idx, pmd_idx;
  176. unsigned long vaddr;
  177. pgd_t *pgd;
  178. pmd_t *pmd;
  179. pte_t *pte = NULL;
  180. vaddr = start;
  181. pgd_idx = pgd_index(vaddr);
  182. pmd_idx = pmd_index(vaddr);
  183. pgd = pgd_base + pgd_idx;
  184. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  185. pmd = one_md_table_init(pgd);
  186. pmd = pmd + pmd_index(vaddr);
  187. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  188. pmd++, pmd_idx++) {
  189. pte = page_table_kmap_check(one_page_table_init(pmd),
  190. pmd, vaddr, pte);
  191. vaddr += PMD_SIZE;
  192. }
  193. pmd_idx = 0;
  194. }
  195. }
  196. static inline int is_kernel_text(unsigned long addr)
  197. {
  198. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  199. return 1;
  200. return 0;
  201. }
  202. /*
  203. * This maps the physical memory to kernel virtual address space, a total
  204. * of max_low_pfn pages, by creating page tables starting from address
  205. * PAGE_OFFSET:
  206. */
  207. static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
  208. unsigned long start_pfn,
  209. unsigned long end_pfn,
  210. int use_pse)
  211. {
  212. int pgd_idx, pmd_idx, pte_ofs;
  213. unsigned long pfn;
  214. pgd_t *pgd;
  215. pmd_t *pmd;
  216. pte_t *pte;
  217. unsigned pages_2m, pages_4k;
  218. int mapping_iter;
  219. /*
  220. * First iteration will setup identity mapping using large/small pages
  221. * based on use_pse, with other attributes same as set by
  222. * the early code in head_32.S
  223. *
  224. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  225. * as desired for the kernel identity mapping.
  226. *
  227. * This two pass mechanism conforms to the TLB app note which says:
  228. *
  229. * "Software should not write to a paging-structure entry in a way
  230. * that would change, for any linear address, both the page size
  231. * and either the page frame or attributes."
  232. */
  233. mapping_iter = 1;
  234. if (!cpu_has_pse)
  235. use_pse = 0;
  236. repeat:
  237. pages_2m = pages_4k = 0;
  238. pfn = start_pfn;
  239. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  240. pgd = pgd_base + pgd_idx;
  241. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  242. pmd = one_md_table_init(pgd);
  243. if (pfn >= end_pfn)
  244. continue;
  245. #ifdef CONFIG_X86_PAE
  246. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  247. pmd += pmd_idx;
  248. #else
  249. pmd_idx = 0;
  250. #endif
  251. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  252. pmd++, pmd_idx++) {
  253. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  254. /*
  255. * Map with big pages if possible, otherwise
  256. * create normal page tables:
  257. */
  258. if (use_pse) {
  259. unsigned int addr2;
  260. pgprot_t prot = PAGE_KERNEL_LARGE;
  261. /*
  262. * first pass will use the same initial
  263. * identity mapping attribute + _PAGE_PSE.
  264. */
  265. pgprot_t init_prot =
  266. __pgprot(PTE_IDENT_ATTR |
  267. _PAGE_PSE);
  268. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  269. PAGE_OFFSET + PAGE_SIZE-1;
  270. if (is_kernel_text(addr) ||
  271. is_kernel_text(addr2))
  272. prot = PAGE_KERNEL_LARGE_EXEC;
  273. pages_2m++;
  274. if (mapping_iter == 1)
  275. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  276. else
  277. set_pmd(pmd, pfn_pmd(pfn, prot));
  278. pfn += PTRS_PER_PTE;
  279. continue;
  280. }
  281. pte = one_page_table_init(pmd);
  282. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  283. pte += pte_ofs;
  284. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  285. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  286. pgprot_t prot = PAGE_KERNEL;
  287. /*
  288. * first pass will use the same initial
  289. * identity mapping attribute.
  290. */
  291. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  292. if (is_kernel_text(addr))
  293. prot = PAGE_KERNEL_EXEC;
  294. pages_4k++;
  295. if (mapping_iter == 1)
  296. set_pte(pte, pfn_pte(pfn, init_prot));
  297. else
  298. set_pte(pte, pfn_pte(pfn, prot));
  299. }
  300. }
  301. }
  302. if (mapping_iter == 1) {
  303. /*
  304. * update direct mapping page count only in the first
  305. * iteration.
  306. */
  307. update_page_count(PG_LEVEL_2M, pages_2m);
  308. update_page_count(PG_LEVEL_4K, pages_4k);
  309. /*
  310. * local global flush tlb, which will flush the previous
  311. * mappings present in both small and large page TLB's.
  312. */
  313. __flush_tlb_all();
  314. /*
  315. * Second iteration will set the actual desired PTE attributes.
  316. */
  317. mapping_iter = 2;
  318. goto repeat;
  319. }
  320. }
  321. /*
  322. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  323. * is valid. The argument is a physical page number.
  324. *
  325. *
  326. * On x86, access has to be given to the first megabyte of ram because that area
  327. * contains bios code and data regions used by X and dosemu and similar apps.
  328. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  329. * mmio resources as well as potential bios/acpi data regions.
  330. */
  331. int devmem_is_allowed(unsigned long pagenr)
  332. {
  333. if (pagenr <= 256)
  334. return 1;
  335. if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  336. return 0;
  337. if (!page_is_ram(pagenr))
  338. return 1;
  339. return 0;
  340. }
  341. pte_t *kmap_pte;
  342. pgprot_t kmap_prot;
  343. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  344. {
  345. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  346. vaddr), vaddr), vaddr);
  347. }
  348. static void __init kmap_init(void)
  349. {
  350. unsigned long kmap_vstart;
  351. /*
  352. * Cache the first kmap pte:
  353. */
  354. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  355. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  356. kmap_prot = PAGE_KERNEL;
  357. }
  358. #ifdef CONFIG_HIGHMEM
  359. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  360. {
  361. unsigned long vaddr;
  362. pgd_t *pgd;
  363. pud_t *pud;
  364. pmd_t *pmd;
  365. pte_t *pte;
  366. vaddr = PKMAP_BASE;
  367. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  368. pgd = swapper_pg_dir + pgd_index(vaddr);
  369. pud = pud_offset(pgd, vaddr);
  370. pmd = pmd_offset(pud, vaddr);
  371. pte = pte_offset_kernel(pmd, vaddr);
  372. pkmap_page_table = pte;
  373. }
  374. static void __init add_one_highpage_init(struct page *page, int pfn)
  375. {
  376. ClearPageReserved(page);
  377. init_page_count(page);
  378. __free_page(page);
  379. totalhigh_pages++;
  380. }
  381. struct add_highpages_data {
  382. unsigned long start_pfn;
  383. unsigned long end_pfn;
  384. };
  385. static int __init add_highpages_work_fn(unsigned long start_pfn,
  386. unsigned long end_pfn, void *datax)
  387. {
  388. int node_pfn;
  389. struct page *page;
  390. unsigned long final_start_pfn, final_end_pfn;
  391. struct add_highpages_data *data;
  392. data = (struct add_highpages_data *)datax;
  393. final_start_pfn = max(start_pfn, data->start_pfn);
  394. final_end_pfn = min(end_pfn, data->end_pfn);
  395. if (final_start_pfn >= final_end_pfn)
  396. return 0;
  397. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  398. node_pfn++) {
  399. if (!pfn_valid(node_pfn))
  400. continue;
  401. page = pfn_to_page(node_pfn);
  402. add_one_highpage_init(page, node_pfn);
  403. }
  404. return 0;
  405. }
  406. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  407. unsigned long end_pfn)
  408. {
  409. struct add_highpages_data data;
  410. data.start_pfn = start_pfn;
  411. data.end_pfn = end_pfn;
  412. work_with_active_regions(nid, add_highpages_work_fn, &data);
  413. }
  414. #ifndef CONFIG_NUMA
  415. static void __init set_highmem_pages_init(void)
  416. {
  417. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  418. totalram_pages += totalhigh_pages;
  419. }
  420. #endif /* !CONFIG_NUMA */
  421. #else
  422. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  423. {
  424. }
  425. static inline void set_highmem_pages_init(void)
  426. {
  427. }
  428. #endif /* CONFIG_HIGHMEM */
  429. void __init native_pagetable_setup_start(pgd_t *base)
  430. {
  431. unsigned long pfn, va;
  432. pgd_t *pgd;
  433. pud_t *pud;
  434. pmd_t *pmd;
  435. pte_t *pte;
  436. /*
  437. * Remove any mappings which extend past the end of physical
  438. * memory from the boot time page table:
  439. */
  440. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  441. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  442. pgd = base + pgd_index(va);
  443. if (!pgd_present(*pgd))
  444. break;
  445. pud = pud_offset(pgd, va);
  446. pmd = pmd_offset(pud, va);
  447. if (!pmd_present(*pmd))
  448. break;
  449. pte = pte_offset_kernel(pmd, va);
  450. if (!pte_present(*pte))
  451. break;
  452. pte_clear(NULL, va, pte);
  453. }
  454. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  455. }
  456. void __init native_pagetable_setup_done(pgd_t *base)
  457. {
  458. }
  459. /*
  460. * Build a proper pagetable for the kernel mappings. Up until this
  461. * point, we've been running on some set of pagetables constructed by
  462. * the boot process.
  463. *
  464. * If we're booting on native hardware, this will be a pagetable
  465. * constructed in arch/x86/kernel/head_32.S. The root of the
  466. * pagetable will be swapper_pg_dir.
  467. *
  468. * If we're booting paravirtualized under a hypervisor, then there are
  469. * more options: we may already be running PAE, and the pagetable may
  470. * or may not be based in swapper_pg_dir. In any case,
  471. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  472. * appropriately for the rest of the initialization to work.
  473. *
  474. * In general, pagetable_init() assumes that the pagetable may already
  475. * be partially populated, and so it avoids stomping on any existing
  476. * mappings.
  477. */
  478. static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
  479. {
  480. unsigned long vaddr, end;
  481. /*
  482. * Fixed mappings, only the page table structure has to be
  483. * created - mappings will be set by set_fixmap():
  484. */
  485. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  486. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  487. page_table_range_init(vaddr, end, pgd_base);
  488. early_ioremap_reset();
  489. }
  490. static void __init pagetable_init(void)
  491. {
  492. pgd_t *pgd_base = swapper_pg_dir;
  493. permanent_kmaps_init(pgd_base);
  494. }
  495. #ifdef CONFIG_ACPI_SLEEP
  496. /*
  497. * ACPI suspend needs this for resume, because things like the intel-agp
  498. * driver might have split up a kernel 4MB mapping.
  499. */
  500. char swsusp_pg_dir[PAGE_SIZE]
  501. __attribute__ ((aligned(PAGE_SIZE)));
  502. static inline void save_pg_dir(void)
  503. {
  504. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  505. }
  506. #else /* !CONFIG_ACPI_SLEEP */
  507. static inline void save_pg_dir(void)
  508. {
  509. }
  510. #endif /* !CONFIG_ACPI_SLEEP */
  511. void zap_low_mappings(void)
  512. {
  513. int i;
  514. /*
  515. * Zap initial low-memory mappings.
  516. *
  517. * Note that "pgd_clear()" doesn't do it for
  518. * us, because pgd_clear() is a no-op on i386.
  519. */
  520. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  521. #ifdef CONFIG_X86_PAE
  522. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  523. #else
  524. set_pgd(swapper_pg_dir+i, __pgd(0));
  525. #endif
  526. }
  527. flush_tlb_all();
  528. }
  529. int nx_enabled;
  530. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  531. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  532. #ifdef CONFIG_X86_PAE
  533. static int disable_nx __initdata;
  534. /*
  535. * noexec = on|off
  536. *
  537. * Control non executable mappings.
  538. *
  539. * on Enable
  540. * off Disable
  541. */
  542. static int __init noexec_setup(char *str)
  543. {
  544. if (!str || !strcmp(str, "on")) {
  545. if (cpu_has_nx) {
  546. __supported_pte_mask |= _PAGE_NX;
  547. disable_nx = 0;
  548. }
  549. } else {
  550. if (!strcmp(str, "off")) {
  551. disable_nx = 1;
  552. __supported_pte_mask &= ~_PAGE_NX;
  553. } else {
  554. return -EINVAL;
  555. }
  556. }
  557. return 0;
  558. }
  559. early_param("noexec", noexec_setup);
  560. static void __init set_nx(void)
  561. {
  562. unsigned int v[4], l, h;
  563. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  564. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  565. if ((v[3] & (1 << 20)) && !disable_nx) {
  566. rdmsr(MSR_EFER, l, h);
  567. l |= EFER_NX;
  568. wrmsr(MSR_EFER, l, h);
  569. nx_enabled = 1;
  570. __supported_pte_mask |= _PAGE_NX;
  571. }
  572. }
  573. }
  574. #endif
  575. /* user-defined highmem size */
  576. static unsigned int highmem_pages = -1;
  577. /*
  578. * highmem=size forces highmem to be exactly 'size' bytes.
  579. * This works even on boxes that have no highmem otherwise.
  580. * This also works to reduce highmem size on bigger boxes.
  581. */
  582. static int __init parse_highmem(char *arg)
  583. {
  584. if (!arg)
  585. return -EINVAL;
  586. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  587. return 0;
  588. }
  589. early_param("highmem", parse_highmem);
  590. /*
  591. * Determine low and high memory ranges:
  592. */
  593. void __init find_low_pfn_range(void)
  594. {
  595. /* it could update max_pfn */
  596. /* max_low_pfn is 0, we already have early_res support */
  597. max_low_pfn = max_pfn;
  598. if (max_low_pfn > MAXMEM_PFN) {
  599. if (highmem_pages == -1)
  600. highmem_pages = max_pfn - MAXMEM_PFN;
  601. if (highmem_pages + MAXMEM_PFN < max_pfn)
  602. max_pfn = MAXMEM_PFN + highmem_pages;
  603. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  604. printk(KERN_WARNING "only %luMB highmem pages "
  605. "available, ignoring highmem size of %uMB.\n",
  606. pages_to_mb(max_pfn - MAXMEM_PFN),
  607. pages_to_mb(highmem_pages));
  608. highmem_pages = 0;
  609. }
  610. max_low_pfn = MAXMEM_PFN;
  611. #ifndef CONFIG_HIGHMEM
  612. /* Maximum memory usable is what is directly addressable */
  613. printk(KERN_WARNING "Warning only %ldMB will be used.\n",
  614. MAXMEM>>20);
  615. if (max_pfn > MAX_NONPAE_PFN)
  616. printk(KERN_WARNING
  617. "Use a HIGHMEM64G enabled kernel.\n");
  618. else
  619. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  620. max_pfn = MAXMEM_PFN;
  621. #else /* !CONFIG_HIGHMEM */
  622. #ifndef CONFIG_HIGHMEM64G
  623. if (max_pfn > MAX_NONPAE_PFN) {
  624. max_pfn = MAX_NONPAE_PFN;
  625. printk(KERN_WARNING "Warning only 4GB will be used."
  626. "Use a HIGHMEM64G enabled kernel.\n");
  627. }
  628. #endif /* !CONFIG_HIGHMEM64G */
  629. #endif /* !CONFIG_HIGHMEM */
  630. } else {
  631. if (highmem_pages == -1)
  632. highmem_pages = 0;
  633. #ifdef CONFIG_HIGHMEM
  634. if (highmem_pages >= max_pfn) {
  635. printk(KERN_ERR "highmem size specified (%uMB) is "
  636. "bigger than pages available (%luMB)!.\n",
  637. pages_to_mb(highmem_pages),
  638. pages_to_mb(max_pfn));
  639. highmem_pages = 0;
  640. }
  641. if (highmem_pages) {
  642. if (max_low_pfn - highmem_pages <
  643. 64*1024*1024/PAGE_SIZE){
  644. printk(KERN_ERR "highmem size %uMB results in "
  645. "smaller than 64MB lowmem, ignoring it.\n"
  646. , pages_to_mb(highmem_pages));
  647. highmem_pages = 0;
  648. }
  649. max_low_pfn -= highmem_pages;
  650. }
  651. #else
  652. if (highmem_pages)
  653. printk(KERN_ERR "ignoring highmem size on non-highmem"
  654. " kernel!\n");
  655. #endif
  656. }
  657. }
  658. #ifndef CONFIG_NEED_MULTIPLE_NODES
  659. void __init initmem_init(unsigned long start_pfn,
  660. unsigned long end_pfn)
  661. {
  662. #ifdef CONFIG_HIGHMEM
  663. highstart_pfn = highend_pfn = max_pfn;
  664. if (max_pfn > max_low_pfn)
  665. highstart_pfn = max_low_pfn;
  666. memory_present(0, 0, highend_pfn);
  667. e820_register_active_regions(0, 0, highend_pfn);
  668. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  669. pages_to_mb(highend_pfn - highstart_pfn));
  670. num_physpages = highend_pfn;
  671. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  672. #else
  673. memory_present(0, 0, max_low_pfn);
  674. e820_register_active_regions(0, 0, max_low_pfn);
  675. num_physpages = max_low_pfn;
  676. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  677. #endif
  678. #ifdef CONFIG_FLATMEM
  679. max_mapnr = num_physpages;
  680. #endif
  681. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  682. pages_to_mb(max_low_pfn));
  683. setup_bootmem_allocator();
  684. }
  685. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  686. static void __init zone_sizes_init(void)
  687. {
  688. unsigned long max_zone_pfns[MAX_NR_ZONES];
  689. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  690. max_zone_pfns[ZONE_DMA] =
  691. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  692. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  693. #ifdef CONFIG_HIGHMEM
  694. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  695. #endif
  696. free_area_init_nodes(max_zone_pfns);
  697. }
  698. void __init setup_bootmem_allocator(void)
  699. {
  700. int i;
  701. unsigned long bootmap_size, bootmap;
  702. /*
  703. * Initialize the boot-time allocator (with low memory only):
  704. */
  705. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  706. bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  707. max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  708. PAGE_SIZE);
  709. if (bootmap == -1L)
  710. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  711. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  712. /* don't touch min_low_pfn */
  713. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  714. min_low_pfn, max_low_pfn);
  715. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  716. max_pfn_mapped<<PAGE_SHIFT);
  717. printk(KERN_INFO " low ram: %08lx - %08lx\n",
  718. min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
  719. printk(KERN_INFO " bootmap %08lx - %08lx\n",
  720. bootmap, bootmap + bootmap_size);
  721. for_each_online_node(i)
  722. free_bootmem_with_active_regions(i, max_low_pfn);
  723. early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
  724. after_init_bootmem = 1;
  725. }
  726. static void __init find_early_table_space(unsigned long end, int use_pse)
  727. {
  728. unsigned long puds, pmds, ptes, tables, start;
  729. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  730. tables = PAGE_ALIGN(puds * sizeof(pud_t));
  731. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  732. tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
  733. if (use_pse) {
  734. unsigned long extra;
  735. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  736. extra += PMD_SIZE;
  737. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  738. } else
  739. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  740. tables += PAGE_ALIGN(ptes * sizeof(pte_t));
  741. /* for fixmap */
  742. tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
  743. /*
  744. * RED-PEN putting page tables only on node 0 could
  745. * cause a hotspot and fill up ZONE_DMA. The page tables
  746. * need roughly 0.5KB per GB.
  747. */
  748. start = 0x7000;
  749. table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  750. tables, PAGE_SIZE);
  751. if (table_start == -1UL)
  752. panic("Cannot find space for the kernel page tables");
  753. table_start >>= PAGE_SHIFT;
  754. table_end = table_start;
  755. table_top = table_start + (tables>>PAGE_SHIFT);
  756. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  757. end, table_start << PAGE_SHIFT,
  758. (table_start << PAGE_SHIFT) + tables);
  759. }
  760. unsigned long __init_refok init_memory_mapping(unsigned long start,
  761. unsigned long end)
  762. {
  763. pgd_t *pgd_base = swapper_pg_dir;
  764. unsigned long start_pfn, end_pfn;
  765. unsigned long big_page_start;
  766. #ifdef CONFIG_DEBUG_PAGEALLOC
  767. /*
  768. * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
  769. * This will simplify cpa(), which otherwise needs to support splitting
  770. * large pages into small in interrupt context, etc.
  771. */
  772. int use_pse = 0;
  773. #else
  774. int use_pse = cpu_has_pse;
  775. #endif
  776. /*
  777. * Find space for the kernel direct mapping tables.
  778. */
  779. if (!after_init_bootmem)
  780. find_early_table_space(end, use_pse);
  781. #ifdef CONFIG_X86_PAE
  782. set_nx();
  783. if (nx_enabled)
  784. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  785. #endif
  786. /* Enable PSE if available */
  787. if (cpu_has_pse)
  788. set_in_cr4(X86_CR4_PSE);
  789. /* Enable PGE if available */
  790. if (cpu_has_pge) {
  791. set_in_cr4(X86_CR4_PGE);
  792. __supported_pte_mask |= _PAGE_GLOBAL;
  793. }
  794. /*
  795. * Don't use a large page for the first 2/4MB of memory
  796. * because there are often fixed size MTRRs in there
  797. * and overlapping MTRRs into large pages can cause
  798. * slowdowns.
  799. */
  800. big_page_start = PMD_SIZE;
  801. if (start < big_page_start) {
  802. start_pfn = start >> PAGE_SHIFT;
  803. end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
  804. } else {
  805. /* head is not big page alignment ? */
  806. start_pfn = start >> PAGE_SHIFT;
  807. end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  808. << (PMD_SHIFT - PAGE_SHIFT);
  809. }
  810. if (start_pfn < end_pfn)
  811. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
  812. /* big page range */
  813. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  814. << (PMD_SHIFT - PAGE_SHIFT);
  815. if (start_pfn < (big_page_start >> PAGE_SHIFT))
  816. start_pfn = big_page_start >> PAGE_SHIFT;
  817. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  818. if (start_pfn < end_pfn)
  819. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
  820. use_pse);
  821. /* tail is not big page alignment ? */
  822. start_pfn = end_pfn;
  823. if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
  824. end_pfn = end >> PAGE_SHIFT;
  825. if (start_pfn < end_pfn)
  826. kernel_physical_mapping_init(pgd_base, start_pfn,
  827. end_pfn, 0);
  828. }
  829. early_ioremap_page_table_range_init(pgd_base);
  830. load_cr3(swapper_pg_dir);
  831. __flush_tlb_all();
  832. if (!after_init_bootmem)
  833. reserve_early(table_start << PAGE_SHIFT,
  834. table_end << PAGE_SHIFT, "PGTABLE");
  835. if (!after_init_bootmem)
  836. early_memtest(start, end);
  837. return end >> PAGE_SHIFT;
  838. }
  839. /*
  840. * paging_init() sets up the page tables - note that the first 8MB are
  841. * already mapped by head.S.
  842. *
  843. * This routines also unmaps the page at virtual kernel address 0, so
  844. * that we can trap those pesky NULL-reference errors in the kernel.
  845. */
  846. void __init paging_init(void)
  847. {
  848. pagetable_init();
  849. __flush_tlb_all();
  850. kmap_init();
  851. /*
  852. * NOTE: at this point the bootmem allocator is fully available.
  853. */
  854. sparse_init();
  855. zone_sizes_init();
  856. }
  857. /*
  858. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  859. * and also on some strange 486's. All 586+'s are OK. This used to involve
  860. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  861. * switch to using exceptions got rid of all that.
  862. */
  863. static void __init test_wp_bit(void)
  864. {
  865. printk(KERN_INFO
  866. "Checking if this processor honours the WP bit even in supervisor mode...");
  867. /* Any page-aligned address will do, the test is non-destructive */
  868. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  869. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  870. clear_fixmap(FIX_WP_TEST);
  871. if (!boot_cpu_data.wp_works_ok) {
  872. printk(KERN_CONT "No.\n");
  873. #ifdef CONFIG_X86_WP_WORKS_OK
  874. panic(
  875. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  876. #endif
  877. } else {
  878. printk(KERN_CONT "Ok.\n");
  879. }
  880. }
  881. static struct kcore_list kcore_mem, kcore_vmalloc;
  882. void __init mem_init(void)
  883. {
  884. int codesize, reservedpages, datasize, initsize;
  885. int tmp;
  886. pci_iommu_alloc();
  887. #ifdef CONFIG_FLATMEM
  888. BUG_ON(!mem_map);
  889. #endif
  890. /* this will put all low memory onto the freelists */
  891. totalram_pages += free_all_bootmem();
  892. reservedpages = 0;
  893. for (tmp = 0; tmp < max_low_pfn; tmp++)
  894. /*
  895. * Only count reserved RAM pages:
  896. */
  897. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  898. reservedpages++;
  899. set_highmem_pages_init();
  900. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  901. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  902. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  903. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  904. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  905. VMALLOC_END-VMALLOC_START);
  906. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  907. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  908. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  909. num_physpages << (PAGE_SHIFT-10),
  910. codesize >> 10,
  911. reservedpages << (PAGE_SHIFT-10),
  912. datasize >> 10,
  913. initsize >> 10,
  914. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  915. );
  916. printk(KERN_INFO "virtual kernel memory layout:\n"
  917. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  918. #ifdef CONFIG_HIGHMEM
  919. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  920. #endif
  921. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  922. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  923. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  924. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  925. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  926. FIXADDR_START, FIXADDR_TOP,
  927. (FIXADDR_TOP - FIXADDR_START) >> 10,
  928. #ifdef CONFIG_HIGHMEM
  929. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  930. (LAST_PKMAP*PAGE_SIZE) >> 10,
  931. #endif
  932. VMALLOC_START, VMALLOC_END,
  933. (VMALLOC_END - VMALLOC_START) >> 20,
  934. (unsigned long)__va(0), (unsigned long)high_memory,
  935. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  936. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  937. ((unsigned long)&__init_end -
  938. (unsigned long)&__init_begin) >> 10,
  939. (unsigned long)&_etext, (unsigned long)&_edata,
  940. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  941. (unsigned long)&_text, (unsigned long)&_etext,
  942. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  943. /*
  944. * Check boundaries twice: Some fundamental inconsistencies can
  945. * be detected at build time already.
  946. */
  947. #define __FIXADDR_TOP (-PAGE_SIZE)
  948. #ifdef CONFIG_HIGHMEM
  949. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  950. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  951. #endif
  952. #define high_memory (-128UL << 20)
  953. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  954. #undef high_memory
  955. #undef __FIXADDR_TOP
  956. #ifdef CONFIG_HIGHMEM
  957. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  958. BUG_ON(VMALLOC_END > PKMAP_BASE);
  959. #endif
  960. BUG_ON(VMALLOC_START >= VMALLOC_END);
  961. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  962. if (boot_cpu_data.wp_works_ok < 0)
  963. test_wp_bit();
  964. save_pg_dir();
  965. zap_low_mappings();
  966. }
  967. #ifdef CONFIG_MEMORY_HOTPLUG
  968. int arch_add_memory(int nid, u64 start, u64 size)
  969. {
  970. struct pglist_data *pgdata = NODE_DATA(nid);
  971. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  972. unsigned long start_pfn = start >> PAGE_SHIFT;
  973. unsigned long nr_pages = size >> PAGE_SHIFT;
  974. return __add_pages(nid, zone, start_pfn, nr_pages);
  975. }
  976. #endif
  977. /*
  978. * This function cannot be __init, since exceptions don't work in that
  979. * section. Put this after the callers, so that it cannot be inlined.
  980. */
  981. static noinline int do_test_wp_bit(void)
  982. {
  983. char tmp_reg;
  984. int flag;
  985. __asm__ __volatile__(
  986. " movb %0, %1 \n"
  987. "1: movb %1, %0 \n"
  988. " xorl %2, %2 \n"
  989. "2: \n"
  990. _ASM_EXTABLE(1b,2b)
  991. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  992. "=q" (tmp_reg),
  993. "=r" (flag)
  994. :"2" (1)
  995. :"memory");
  996. return flag;
  997. }
  998. #ifdef CONFIG_DEBUG_RODATA
  999. const int rodata_test_data = 0xC3;
  1000. EXPORT_SYMBOL_GPL(rodata_test_data);
  1001. void mark_rodata_ro(void)
  1002. {
  1003. unsigned long start = PFN_ALIGN(_text);
  1004. unsigned long size = PFN_ALIGN(_etext) - start;
  1005. #ifndef CONFIG_DYNAMIC_FTRACE
  1006. /* Dynamic tracing modifies the kernel text section */
  1007. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1008. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  1009. size >> 10);
  1010. #ifdef CONFIG_CPA_DEBUG
  1011. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  1012. start, start+size);
  1013. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  1014. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1015. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  1016. #endif
  1017. #endif /* CONFIG_DYNAMIC_FTRACE */
  1018. start += size;
  1019. size = (unsigned long)__end_rodata - start;
  1020. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1021. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  1022. size >> 10);
  1023. rodata_test();
  1024. #ifdef CONFIG_CPA_DEBUG
  1025. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  1026. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  1027. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1028. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1029. #endif
  1030. }
  1031. #endif
  1032. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  1033. {
  1034. #ifdef CONFIG_DEBUG_PAGEALLOC
  1035. /*
  1036. * If debugging page accesses then do not free this memory but
  1037. * mark them not present - any buggy init-section access will
  1038. * create a kernel page fault:
  1039. */
  1040. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  1041. begin, PAGE_ALIGN(end));
  1042. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  1043. #else
  1044. unsigned long addr;
  1045. /*
  1046. * We just marked the kernel text read only above, now that
  1047. * we are going to free part of that, we need to make that
  1048. * writeable first.
  1049. */
  1050. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  1051. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  1052. ClearPageReserved(virt_to_page(addr));
  1053. init_page_count(virt_to_page(addr));
  1054. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1055. free_page(addr);
  1056. totalram_pages++;
  1057. }
  1058. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  1059. #endif
  1060. }
  1061. void free_initmem(void)
  1062. {
  1063. free_init_pages("unused kernel memory",
  1064. (unsigned long)(&__init_begin),
  1065. (unsigned long)(&__init_end));
  1066. }
  1067. #ifdef CONFIG_BLK_DEV_INITRD
  1068. void free_initrd_mem(unsigned long start, unsigned long end)
  1069. {
  1070. free_init_pages("initrd memory", start, end);
  1071. }
  1072. #endif
  1073. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  1074. int flags)
  1075. {
  1076. return reserve_bootmem(phys, len, flags);
  1077. }