init_32.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/cacheflush.h>
  50. #include <asm/smp.h>
  51. unsigned int __VMALLOC_RESERVE = 128 << 20;
  52. unsigned long max_low_pfn_mapped;
  53. unsigned long max_pfn_mapped;
  54. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  55. unsigned long highstart_pfn, highend_pfn;
  56. static noinline int do_test_wp_bit(void);
  57. static unsigned long __initdata table_start;
  58. static unsigned long __meminitdata table_end;
  59. static unsigned long __meminitdata table_top;
  60. static int __initdata after_init_bootmem;
  61. static __init void *alloc_low_page(void)
  62. {
  63. unsigned long pfn = table_end++;
  64. void *adr;
  65. if (pfn >= table_top)
  66. panic("alloc_low_page: ran out of memory");
  67. adr = __va(pfn * PAGE_SIZE);
  68. memset(adr, 0, PAGE_SIZE);
  69. return adr;
  70. }
  71. /*
  72. * Creates a middle page table and puts a pointer to it in the
  73. * given global directory entry. This only returns the gd entry
  74. * in non-PAE compilation mode, since the middle layer is folded.
  75. */
  76. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  77. {
  78. pud_t *pud;
  79. pmd_t *pmd_table;
  80. #ifdef CONFIG_X86_PAE
  81. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  82. if (after_init_bootmem)
  83. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  84. else
  85. pmd_table = (pmd_t *)alloc_low_page();
  86. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  87. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  88. pud = pud_offset(pgd, 0);
  89. BUG_ON(pmd_table != pmd_offset(pud, 0));
  90. return pmd_table;
  91. }
  92. #endif
  93. pud = pud_offset(pgd, 0);
  94. pmd_table = pmd_offset(pud, 0);
  95. return pmd_table;
  96. }
  97. /*
  98. * Create a page table and place a pointer to it in a middle page
  99. * directory entry:
  100. */
  101. static pte_t * __init one_page_table_init(pmd_t *pmd)
  102. {
  103. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  104. pte_t *page_table = NULL;
  105. if (after_init_bootmem) {
  106. #ifdef CONFIG_DEBUG_PAGEALLOC
  107. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  108. #endif
  109. if (!page_table)
  110. page_table =
  111. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  112. } else
  113. page_table = (pte_t *)alloc_low_page();
  114. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  115. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  116. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  117. }
  118. return pte_offset_kernel(pmd, 0);
  119. }
  120. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  121. unsigned long vaddr, pte_t *lastpte)
  122. {
  123. #ifdef CONFIG_HIGHMEM
  124. /*
  125. * Something (early fixmap) may already have put a pte
  126. * page here, which causes the page table allocation
  127. * to become nonlinear. Attempt to fix it, and if it
  128. * is still nonlinear then we have to bug.
  129. */
  130. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  131. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  132. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  133. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  134. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  135. && ((__pa(pte) >> PAGE_SHIFT) < table_start
  136. || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
  137. pte_t *newpte;
  138. int i;
  139. BUG_ON(after_init_bootmem);
  140. newpte = alloc_low_page();
  141. for (i = 0; i < PTRS_PER_PTE; i++)
  142. set_pte(newpte + i, pte[i]);
  143. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  144. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  145. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  146. __flush_tlb_all();
  147. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  148. pte = newpte;
  149. }
  150. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  151. && vaddr > fix_to_virt(FIX_KMAP_END)
  152. && lastpte && lastpte + PTRS_PER_PTE != pte);
  153. #endif
  154. return pte;
  155. }
  156. /*
  157. * This function initializes a certain range of kernel virtual memory
  158. * with new bootmem page tables, everywhere page tables are missing in
  159. * the given range.
  160. *
  161. * NOTE: The pagetables are allocated contiguous on the physical space
  162. * so we can cache the place of the first one and move around without
  163. * checking the pgd every time.
  164. */
  165. static void __init
  166. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  167. {
  168. int pgd_idx, pmd_idx;
  169. unsigned long vaddr;
  170. pgd_t *pgd;
  171. pmd_t *pmd;
  172. pte_t *pte = NULL;
  173. vaddr = start;
  174. pgd_idx = pgd_index(vaddr);
  175. pmd_idx = pmd_index(vaddr);
  176. pgd = pgd_base + pgd_idx;
  177. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  178. pmd = one_md_table_init(pgd);
  179. pmd = pmd + pmd_index(vaddr);
  180. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  181. pmd++, pmd_idx++) {
  182. pte = page_table_kmap_check(one_page_table_init(pmd),
  183. pmd, vaddr, pte);
  184. vaddr += PMD_SIZE;
  185. }
  186. pmd_idx = 0;
  187. }
  188. }
  189. static inline int is_kernel_text(unsigned long addr)
  190. {
  191. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  192. return 1;
  193. return 0;
  194. }
  195. /*
  196. * This maps the physical memory to kernel virtual address space, a total
  197. * of max_low_pfn pages, by creating page tables starting from address
  198. * PAGE_OFFSET:
  199. */
  200. static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
  201. unsigned long start_pfn,
  202. unsigned long end_pfn,
  203. int use_pse)
  204. {
  205. int pgd_idx, pmd_idx, pte_ofs;
  206. unsigned long pfn;
  207. pgd_t *pgd;
  208. pmd_t *pmd;
  209. pte_t *pte;
  210. unsigned pages_2m, pages_4k;
  211. int mapping_iter;
  212. /*
  213. * First iteration will setup identity mapping using large/small pages
  214. * based on use_pse, with other attributes same as set by
  215. * the early code in head_32.S
  216. *
  217. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  218. * as desired for the kernel identity mapping.
  219. *
  220. * This two pass mechanism conforms to the TLB app note which says:
  221. *
  222. * "Software should not write to a paging-structure entry in a way
  223. * that would change, for any linear address, both the page size
  224. * and either the page frame or attributes."
  225. */
  226. mapping_iter = 1;
  227. if (!cpu_has_pse)
  228. use_pse = 0;
  229. repeat:
  230. pages_2m = pages_4k = 0;
  231. pfn = start_pfn;
  232. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  233. pgd = pgd_base + pgd_idx;
  234. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  235. pmd = one_md_table_init(pgd);
  236. if (pfn >= end_pfn)
  237. continue;
  238. #ifdef CONFIG_X86_PAE
  239. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  240. pmd += pmd_idx;
  241. #else
  242. pmd_idx = 0;
  243. #endif
  244. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  245. pmd++, pmd_idx++) {
  246. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  247. /*
  248. * Map with big pages if possible, otherwise
  249. * create normal page tables:
  250. */
  251. if (use_pse) {
  252. unsigned int addr2;
  253. pgprot_t prot = PAGE_KERNEL_LARGE;
  254. /*
  255. * first pass will use the same initial
  256. * identity mapping attribute + _PAGE_PSE.
  257. */
  258. pgprot_t init_prot =
  259. __pgprot(PTE_IDENT_ATTR |
  260. _PAGE_PSE);
  261. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  262. PAGE_OFFSET + PAGE_SIZE-1;
  263. if (is_kernel_text(addr) ||
  264. is_kernel_text(addr2))
  265. prot = PAGE_KERNEL_LARGE_EXEC;
  266. pages_2m++;
  267. if (mapping_iter == 1)
  268. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  269. else
  270. set_pmd(pmd, pfn_pmd(pfn, prot));
  271. pfn += PTRS_PER_PTE;
  272. continue;
  273. }
  274. pte = one_page_table_init(pmd);
  275. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  276. pte += pte_ofs;
  277. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  278. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  279. pgprot_t prot = PAGE_KERNEL;
  280. /*
  281. * first pass will use the same initial
  282. * identity mapping attribute.
  283. */
  284. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  285. if (is_kernel_text(addr))
  286. prot = PAGE_KERNEL_EXEC;
  287. pages_4k++;
  288. if (mapping_iter == 1)
  289. set_pte(pte, pfn_pte(pfn, init_prot));
  290. else
  291. set_pte(pte, pfn_pte(pfn, prot));
  292. }
  293. }
  294. }
  295. if (mapping_iter == 1) {
  296. /*
  297. * update direct mapping page count only in the first
  298. * iteration.
  299. */
  300. update_page_count(PG_LEVEL_2M, pages_2m);
  301. update_page_count(PG_LEVEL_4K, pages_4k);
  302. /*
  303. * local global flush tlb, which will flush the previous
  304. * mappings present in both small and large page TLB's.
  305. */
  306. __flush_tlb_all();
  307. /*
  308. * Second iteration will set the actual desired PTE attributes.
  309. */
  310. mapping_iter = 2;
  311. goto repeat;
  312. }
  313. }
  314. /*
  315. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  316. * is valid. The argument is a physical page number.
  317. *
  318. *
  319. * On x86, access has to be given to the first megabyte of ram because that area
  320. * contains bios code and data regions used by X and dosemu and similar apps.
  321. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  322. * mmio resources as well as potential bios/acpi data regions.
  323. */
  324. int devmem_is_allowed(unsigned long pagenr)
  325. {
  326. if (pagenr <= 256)
  327. return 1;
  328. if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  329. return 0;
  330. if (!page_is_ram(pagenr))
  331. return 1;
  332. return 0;
  333. }
  334. pte_t *kmap_pte;
  335. pgprot_t kmap_prot;
  336. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  337. {
  338. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  339. vaddr), vaddr), vaddr);
  340. }
  341. static void __init kmap_init(void)
  342. {
  343. unsigned long kmap_vstart;
  344. /*
  345. * Cache the first kmap pte:
  346. */
  347. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  348. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  349. kmap_prot = PAGE_KERNEL;
  350. }
  351. #ifdef CONFIG_HIGHMEM
  352. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  353. {
  354. unsigned long vaddr;
  355. pgd_t *pgd;
  356. pud_t *pud;
  357. pmd_t *pmd;
  358. pte_t *pte;
  359. vaddr = PKMAP_BASE;
  360. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  361. pgd = swapper_pg_dir + pgd_index(vaddr);
  362. pud = pud_offset(pgd, vaddr);
  363. pmd = pmd_offset(pud, vaddr);
  364. pte = pte_offset_kernel(pmd, vaddr);
  365. pkmap_page_table = pte;
  366. }
  367. static void __init add_one_highpage_init(struct page *page, int pfn)
  368. {
  369. ClearPageReserved(page);
  370. init_page_count(page);
  371. __free_page(page);
  372. totalhigh_pages++;
  373. }
  374. struct add_highpages_data {
  375. unsigned long start_pfn;
  376. unsigned long end_pfn;
  377. };
  378. static int __init add_highpages_work_fn(unsigned long start_pfn,
  379. unsigned long end_pfn, void *datax)
  380. {
  381. int node_pfn;
  382. struct page *page;
  383. unsigned long final_start_pfn, final_end_pfn;
  384. struct add_highpages_data *data;
  385. data = (struct add_highpages_data *)datax;
  386. final_start_pfn = max(start_pfn, data->start_pfn);
  387. final_end_pfn = min(end_pfn, data->end_pfn);
  388. if (final_start_pfn >= final_end_pfn)
  389. return 0;
  390. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  391. node_pfn++) {
  392. if (!pfn_valid(node_pfn))
  393. continue;
  394. page = pfn_to_page(node_pfn);
  395. add_one_highpage_init(page, node_pfn);
  396. }
  397. return 0;
  398. }
  399. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  400. unsigned long end_pfn)
  401. {
  402. struct add_highpages_data data;
  403. data.start_pfn = start_pfn;
  404. data.end_pfn = end_pfn;
  405. work_with_active_regions(nid, add_highpages_work_fn, &data);
  406. }
  407. #ifndef CONFIG_NUMA
  408. static void __init set_highmem_pages_init(void)
  409. {
  410. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  411. totalram_pages += totalhigh_pages;
  412. }
  413. #endif /* !CONFIG_NUMA */
  414. #else
  415. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  416. {
  417. }
  418. static inline void set_highmem_pages_init(void)
  419. {
  420. }
  421. #endif /* CONFIG_HIGHMEM */
  422. void __init native_pagetable_setup_start(pgd_t *base)
  423. {
  424. unsigned long pfn, va;
  425. pgd_t *pgd;
  426. pud_t *pud;
  427. pmd_t *pmd;
  428. pte_t *pte;
  429. /*
  430. * Remove any mappings which extend past the end of physical
  431. * memory from the boot time page table:
  432. */
  433. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  434. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  435. pgd = base + pgd_index(va);
  436. if (!pgd_present(*pgd))
  437. break;
  438. pud = pud_offset(pgd, va);
  439. pmd = pmd_offset(pud, va);
  440. if (!pmd_present(*pmd))
  441. break;
  442. pte = pte_offset_kernel(pmd, va);
  443. if (!pte_present(*pte))
  444. break;
  445. pte_clear(NULL, va, pte);
  446. }
  447. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  448. }
  449. void __init native_pagetable_setup_done(pgd_t *base)
  450. {
  451. }
  452. /*
  453. * Build a proper pagetable for the kernel mappings. Up until this
  454. * point, we've been running on some set of pagetables constructed by
  455. * the boot process.
  456. *
  457. * If we're booting on native hardware, this will be a pagetable
  458. * constructed in arch/x86/kernel/head_32.S. The root of the
  459. * pagetable will be swapper_pg_dir.
  460. *
  461. * If we're booting paravirtualized under a hypervisor, then there are
  462. * more options: we may already be running PAE, and the pagetable may
  463. * or may not be based in swapper_pg_dir. In any case,
  464. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  465. * appropriately for the rest of the initialization to work.
  466. *
  467. * In general, pagetable_init() assumes that the pagetable may already
  468. * be partially populated, and so it avoids stomping on any existing
  469. * mappings.
  470. */
  471. static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
  472. {
  473. unsigned long vaddr, end;
  474. /*
  475. * Fixed mappings, only the page table structure has to be
  476. * created - mappings will be set by set_fixmap():
  477. */
  478. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  479. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  480. page_table_range_init(vaddr, end, pgd_base);
  481. early_ioremap_reset();
  482. }
  483. static void __init pagetable_init(void)
  484. {
  485. pgd_t *pgd_base = swapper_pg_dir;
  486. permanent_kmaps_init(pgd_base);
  487. }
  488. #ifdef CONFIG_ACPI_SLEEP
  489. /*
  490. * ACPI suspend needs this for resume, because things like the intel-agp
  491. * driver might have split up a kernel 4MB mapping.
  492. */
  493. char swsusp_pg_dir[PAGE_SIZE]
  494. __attribute__ ((aligned(PAGE_SIZE)));
  495. static inline void save_pg_dir(void)
  496. {
  497. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  498. }
  499. #else /* !CONFIG_ACPI_SLEEP */
  500. static inline void save_pg_dir(void)
  501. {
  502. }
  503. #endif /* !CONFIG_ACPI_SLEEP */
  504. void zap_low_mappings(void)
  505. {
  506. int i;
  507. /*
  508. * Zap initial low-memory mappings.
  509. *
  510. * Note that "pgd_clear()" doesn't do it for
  511. * us, because pgd_clear() is a no-op on i386.
  512. */
  513. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  514. #ifdef CONFIG_X86_PAE
  515. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  516. #else
  517. set_pgd(swapper_pg_dir+i, __pgd(0));
  518. #endif
  519. }
  520. flush_tlb_all();
  521. }
  522. int nx_enabled;
  523. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  524. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  525. #ifdef CONFIG_X86_PAE
  526. static int disable_nx __initdata;
  527. /*
  528. * noexec = on|off
  529. *
  530. * Control non executable mappings.
  531. *
  532. * on Enable
  533. * off Disable
  534. */
  535. static int __init noexec_setup(char *str)
  536. {
  537. if (!str || !strcmp(str, "on")) {
  538. if (cpu_has_nx) {
  539. __supported_pte_mask |= _PAGE_NX;
  540. disable_nx = 0;
  541. }
  542. } else {
  543. if (!strcmp(str, "off")) {
  544. disable_nx = 1;
  545. __supported_pte_mask &= ~_PAGE_NX;
  546. } else {
  547. return -EINVAL;
  548. }
  549. }
  550. return 0;
  551. }
  552. early_param("noexec", noexec_setup);
  553. static void __init set_nx(void)
  554. {
  555. unsigned int v[4], l, h;
  556. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  557. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  558. if ((v[3] & (1 << 20)) && !disable_nx) {
  559. rdmsr(MSR_EFER, l, h);
  560. l |= EFER_NX;
  561. wrmsr(MSR_EFER, l, h);
  562. nx_enabled = 1;
  563. __supported_pte_mask |= _PAGE_NX;
  564. }
  565. }
  566. }
  567. #endif
  568. /* user-defined highmem size */
  569. static unsigned int highmem_pages = -1;
  570. /*
  571. * highmem=size forces highmem to be exactly 'size' bytes.
  572. * This works even on boxes that have no highmem otherwise.
  573. * This also works to reduce highmem size on bigger boxes.
  574. */
  575. static int __init parse_highmem(char *arg)
  576. {
  577. if (!arg)
  578. return -EINVAL;
  579. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  580. return 0;
  581. }
  582. early_param("highmem", parse_highmem);
  583. /*
  584. * Determine low and high memory ranges:
  585. */
  586. void __init find_low_pfn_range(void)
  587. {
  588. /* it could update max_pfn */
  589. /* max_low_pfn is 0, we already have early_res support */
  590. max_low_pfn = max_pfn;
  591. if (max_low_pfn > MAXMEM_PFN) {
  592. if (highmem_pages == -1)
  593. highmem_pages = max_pfn - MAXMEM_PFN;
  594. if (highmem_pages + MAXMEM_PFN < max_pfn)
  595. max_pfn = MAXMEM_PFN + highmem_pages;
  596. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  597. printk(KERN_WARNING "only %luMB highmem pages "
  598. "available, ignoring highmem size of %uMB.\n",
  599. pages_to_mb(max_pfn - MAXMEM_PFN),
  600. pages_to_mb(highmem_pages));
  601. highmem_pages = 0;
  602. }
  603. max_low_pfn = MAXMEM_PFN;
  604. #ifndef CONFIG_HIGHMEM
  605. /* Maximum memory usable is what is directly addressable */
  606. printk(KERN_WARNING "Warning only %ldMB will be used.\n",
  607. MAXMEM>>20);
  608. if (max_pfn > MAX_NONPAE_PFN)
  609. printk(KERN_WARNING
  610. "Use a HIGHMEM64G enabled kernel.\n");
  611. else
  612. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  613. max_pfn = MAXMEM_PFN;
  614. #else /* !CONFIG_HIGHMEM */
  615. #ifndef CONFIG_HIGHMEM64G
  616. if (max_pfn > MAX_NONPAE_PFN) {
  617. max_pfn = MAX_NONPAE_PFN;
  618. printk(KERN_WARNING "Warning only 4GB will be used."
  619. "Use a HIGHMEM64G enabled kernel.\n");
  620. }
  621. #endif /* !CONFIG_HIGHMEM64G */
  622. #endif /* !CONFIG_HIGHMEM */
  623. } else {
  624. if (highmem_pages == -1)
  625. highmem_pages = 0;
  626. #ifdef CONFIG_HIGHMEM
  627. if (highmem_pages >= max_pfn) {
  628. printk(KERN_ERR "highmem size specified (%uMB) is "
  629. "bigger than pages available (%luMB)!.\n",
  630. pages_to_mb(highmem_pages),
  631. pages_to_mb(max_pfn));
  632. highmem_pages = 0;
  633. }
  634. if (highmem_pages) {
  635. if (max_low_pfn - highmem_pages <
  636. 64*1024*1024/PAGE_SIZE){
  637. printk(KERN_ERR "highmem size %uMB results in "
  638. "smaller than 64MB lowmem, ignoring it.\n"
  639. , pages_to_mb(highmem_pages));
  640. highmem_pages = 0;
  641. }
  642. max_low_pfn -= highmem_pages;
  643. }
  644. #else
  645. if (highmem_pages)
  646. printk(KERN_ERR "ignoring highmem size on non-highmem"
  647. " kernel!\n");
  648. #endif
  649. }
  650. }
  651. #ifndef CONFIG_NEED_MULTIPLE_NODES
  652. void __init initmem_init(unsigned long start_pfn,
  653. unsigned long end_pfn)
  654. {
  655. #ifdef CONFIG_HIGHMEM
  656. highstart_pfn = highend_pfn = max_pfn;
  657. if (max_pfn > max_low_pfn)
  658. highstart_pfn = max_low_pfn;
  659. memory_present(0, 0, highend_pfn);
  660. e820_register_active_regions(0, 0, highend_pfn);
  661. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  662. pages_to_mb(highend_pfn - highstart_pfn));
  663. num_physpages = highend_pfn;
  664. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  665. #else
  666. memory_present(0, 0, max_low_pfn);
  667. e820_register_active_regions(0, 0, max_low_pfn);
  668. num_physpages = max_low_pfn;
  669. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  670. #endif
  671. #ifdef CONFIG_FLATMEM
  672. max_mapnr = num_physpages;
  673. #endif
  674. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  675. pages_to_mb(max_low_pfn));
  676. setup_bootmem_allocator();
  677. }
  678. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  679. static void __init zone_sizes_init(void)
  680. {
  681. unsigned long max_zone_pfns[MAX_NR_ZONES];
  682. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  683. max_zone_pfns[ZONE_DMA] =
  684. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  685. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  686. #ifdef CONFIG_HIGHMEM
  687. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  688. #endif
  689. free_area_init_nodes(max_zone_pfns);
  690. }
  691. void __init setup_bootmem_allocator(void)
  692. {
  693. int i;
  694. unsigned long bootmap_size, bootmap;
  695. /*
  696. * Initialize the boot-time allocator (with low memory only):
  697. */
  698. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  699. bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  700. max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  701. PAGE_SIZE);
  702. if (bootmap == -1L)
  703. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  704. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  705. /* don't touch min_low_pfn */
  706. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  707. min_low_pfn, max_low_pfn);
  708. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  709. max_pfn_mapped<<PAGE_SHIFT);
  710. printk(KERN_INFO " low ram: %08lx - %08lx\n",
  711. min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
  712. printk(KERN_INFO " bootmap %08lx - %08lx\n",
  713. bootmap, bootmap + bootmap_size);
  714. for_each_online_node(i)
  715. free_bootmem_with_active_regions(i, max_low_pfn);
  716. early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
  717. after_init_bootmem = 1;
  718. }
  719. static void __init find_early_table_space(unsigned long end, int use_pse)
  720. {
  721. unsigned long puds, pmds, ptes, tables, start;
  722. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  723. tables = PAGE_ALIGN(puds * sizeof(pud_t));
  724. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  725. tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
  726. if (use_pse) {
  727. unsigned long extra;
  728. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  729. extra += PMD_SIZE;
  730. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  731. } else
  732. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  733. tables += PAGE_ALIGN(ptes * sizeof(pte_t));
  734. /* for fixmap */
  735. tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
  736. /*
  737. * RED-PEN putting page tables only on node 0 could
  738. * cause a hotspot and fill up ZONE_DMA. The page tables
  739. * need roughly 0.5KB per GB.
  740. */
  741. start = 0x7000;
  742. table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  743. tables, PAGE_SIZE);
  744. if (table_start == -1UL)
  745. panic("Cannot find space for the kernel page tables");
  746. table_start >>= PAGE_SHIFT;
  747. table_end = table_start;
  748. table_top = table_start + (tables>>PAGE_SHIFT);
  749. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  750. end, table_start << PAGE_SHIFT,
  751. (table_start << PAGE_SHIFT) + tables);
  752. }
  753. unsigned long __init_refok init_memory_mapping(unsigned long start,
  754. unsigned long end)
  755. {
  756. pgd_t *pgd_base = swapper_pg_dir;
  757. unsigned long start_pfn, end_pfn;
  758. unsigned long big_page_start;
  759. #ifdef CONFIG_DEBUG_PAGEALLOC
  760. /*
  761. * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
  762. * This will simplify cpa(), which otherwise needs to support splitting
  763. * large pages into small in interrupt context, etc.
  764. */
  765. int use_pse = 0;
  766. #else
  767. int use_pse = cpu_has_pse;
  768. #endif
  769. /*
  770. * Find space for the kernel direct mapping tables.
  771. */
  772. if (!after_init_bootmem)
  773. find_early_table_space(end, use_pse);
  774. #ifdef CONFIG_X86_PAE
  775. set_nx();
  776. if (nx_enabled)
  777. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  778. #endif
  779. /* Enable PSE if available */
  780. if (cpu_has_pse)
  781. set_in_cr4(X86_CR4_PSE);
  782. /* Enable PGE if available */
  783. if (cpu_has_pge) {
  784. set_in_cr4(X86_CR4_PGE);
  785. __supported_pte_mask |= _PAGE_GLOBAL;
  786. }
  787. /*
  788. * Don't use a large page for the first 2/4MB of memory
  789. * because there are often fixed size MTRRs in there
  790. * and overlapping MTRRs into large pages can cause
  791. * slowdowns.
  792. */
  793. big_page_start = PMD_SIZE;
  794. if (start < big_page_start) {
  795. start_pfn = start >> PAGE_SHIFT;
  796. end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
  797. } else {
  798. /* head is not big page alignment ? */
  799. start_pfn = start >> PAGE_SHIFT;
  800. end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  801. << (PMD_SHIFT - PAGE_SHIFT);
  802. }
  803. if (start_pfn < end_pfn)
  804. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
  805. /* big page range */
  806. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  807. << (PMD_SHIFT - PAGE_SHIFT);
  808. if (start_pfn < (big_page_start >> PAGE_SHIFT))
  809. start_pfn = big_page_start >> PAGE_SHIFT;
  810. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  811. if (start_pfn < end_pfn)
  812. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
  813. use_pse);
  814. /* tail is not big page alignment ? */
  815. start_pfn = end_pfn;
  816. if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
  817. end_pfn = end >> PAGE_SHIFT;
  818. if (start_pfn < end_pfn)
  819. kernel_physical_mapping_init(pgd_base, start_pfn,
  820. end_pfn, 0);
  821. }
  822. early_ioremap_page_table_range_init(pgd_base);
  823. load_cr3(swapper_pg_dir);
  824. __flush_tlb_all();
  825. if (!after_init_bootmem)
  826. reserve_early(table_start << PAGE_SHIFT,
  827. table_end << PAGE_SHIFT, "PGTABLE");
  828. if (!after_init_bootmem)
  829. early_memtest(start, end);
  830. return end >> PAGE_SHIFT;
  831. }
  832. /*
  833. * paging_init() sets up the page tables - note that the first 8MB are
  834. * already mapped by head.S.
  835. *
  836. * This routines also unmaps the page at virtual kernel address 0, so
  837. * that we can trap those pesky NULL-reference errors in the kernel.
  838. */
  839. void __init paging_init(void)
  840. {
  841. pagetable_init();
  842. __flush_tlb_all();
  843. kmap_init();
  844. /*
  845. * NOTE: at this point the bootmem allocator is fully available.
  846. */
  847. sparse_init();
  848. zone_sizes_init();
  849. }
  850. /*
  851. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  852. * and also on some strange 486's. All 586+'s are OK. This used to involve
  853. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  854. * switch to using exceptions got rid of all that.
  855. */
  856. static void __init test_wp_bit(void)
  857. {
  858. printk(KERN_INFO
  859. "Checking if this processor honours the WP bit even in supervisor mode...");
  860. /* Any page-aligned address will do, the test is non-destructive */
  861. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  862. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  863. clear_fixmap(FIX_WP_TEST);
  864. if (!boot_cpu_data.wp_works_ok) {
  865. printk(KERN_CONT "No.\n");
  866. #ifdef CONFIG_X86_WP_WORKS_OK
  867. panic(
  868. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  869. #endif
  870. } else {
  871. printk(KERN_CONT "Ok.\n");
  872. }
  873. }
  874. static struct kcore_list kcore_mem, kcore_vmalloc;
  875. void __init mem_init(void)
  876. {
  877. int codesize, reservedpages, datasize, initsize;
  878. int tmp;
  879. pci_iommu_alloc();
  880. #ifdef CONFIG_FLATMEM
  881. BUG_ON(!mem_map);
  882. #endif
  883. /* this will put all low memory onto the freelists */
  884. totalram_pages += free_all_bootmem();
  885. reservedpages = 0;
  886. for (tmp = 0; tmp < max_low_pfn; tmp++)
  887. /*
  888. * Only count reserved RAM pages:
  889. */
  890. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  891. reservedpages++;
  892. set_highmem_pages_init();
  893. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  894. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  895. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  896. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  897. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  898. VMALLOC_END-VMALLOC_START);
  899. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  900. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  901. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  902. num_physpages << (PAGE_SHIFT-10),
  903. codesize >> 10,
  904. reservedpages << (PAGE_SHIFT-10),
  905. datasize >> 10,
  906. initsize >> 10,
  907. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  908. );
  909. printk(KERN_INFO "virtual kernel memory layout:\n"
  910. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  911. #ifdef CONFIG_HIGHMEM
  912. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  913. #endif
  914. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  915. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  916. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  917. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  918. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  919. FIXADDR_START, FIXADDR_TOP,
  920. (FIXADDR_TOP - FIXADDR_START) >> 10,
  921. #ifdef CONFIG_HIGHMEM
  922. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  923. (LAST_PKMAP*PAGE_SIZE) >> 10,
  924. #endif
  925. VMALLOC_START, VMALLOC_END,
  926. (VMALLOC_END - VMALLOC_START) >> 20,
  927. (unsigned long)__va(0), (unsigned long)high_memory,
  928. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  929. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  930. ((unsigned long)&__init_end -
  931. (unsigned long)&__init_begin) >> 10,
  932. (unsigned long)&_etext, (unsigned long)&_edata,
  933. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  934. (unsigned long)&_text, (unsigned long)&_etext,
  935. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  936. /*
  937. * Check boundaries twice: Some fundamental inconsistencies can
  938. * be detected at build time already.
  939. */
  940. #define __FIXADDR_TOP (-PAGE_SIZE)
  941. #ifdef CONFIG_HIGHMEM
  942. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  943. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  944. #endif
  945. #define high_memory (-128UL << 20)
  946. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  947. #undef high_memory
  948. #undef __FIXADDR_TOP
  949. #ifdef CONFIG_HIGHMEM
  950. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  951. BUG_ON(VMALLOC_END > PKMAP_BASE);
  952. #endif
  953. BUG_ON(VMALLOC_START >= VMALLOC_END);
  954. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  955. if (boot_cpu_data.wp_works_ok < 0)
  956. test_wp_bit();
  957. save_pg_dir();
  958. zap_low_mappings();
  959. }
  960. #ifdef CONFIG_MEMORY_HOTPLUG
  961. int arch_add_memory(int nid, u64 start, u64 size)
  962. {
  963. struct pglist_data *pgdata = NODE_DATA(nid);
  964. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  965. unsigned long start_pfn = start >> PAGE_SHIFT;
  966. unsigned long nr_pages = size >> PAGE_SHIFT;
  967. return __add_pages(nid, zone, start_pfn, nr_pages);
  968. }
  969. #endif
  970. /*
  971. * This function cannot be __init, since exceptions don't work in that
  972. * section. Put this after the callers, so that it cannot be inlined.
  973. */
  974. static noinline int do_test_wp_bit(void)
  975. {
  976. char tmp_reg;
  977. int flag;
  978. __asm__ __volatile__(
  979. " movb %0, %1 \n"
  980. "1: movb %1, %0 \n"
  981. " xorl %2, %2 \n"
  982. "2: \n"
  983. _ASM_EXTABLE(1b,2b)
  984. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  985. "=q" (tmp_reg),
  986. "=r" (flag)
  987. :"2" (1)
  988. :"memory");
  989. return flag;
  990. }
  991. #ifdef CONFIG_DEBUG_RODATA
  992. const int rodata_test_data = 0xC3;
  993. EXPORT_SYMBOL_GPL(rodata_test_data);
  994. static int kernel_set_to_readonly;
  995. void set_kernel_text_rw(void)
  996. {
  997. unsigned long start = PFN_ALIGN(_text);
  998. unsigned long size = PFN_ALIGN(_etext) - start;
  999. if (!kernel_set_to_readonly)
  1000. return;
  1001. pr_debug("Set kernel text: %lx - %lx for read write\n",
  1002. start, start+size);
  1003. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  1004. }
  1005. void set_kernel_text_ro(void)
  1006. {
  1007. unsigned long start = PFN_ALIGN(_text);
  1008. unsigned long size = PFN_ALIGN(_etext) - start;
  1009. if (!kernel_set_to_readonly)
  1010. return;
  1011. pr_debug("Set kernel text: %lx - %lx for read only\n",
  1012. start, start+size);
  1013. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1014. }
  1015. void mark_rodata_ro(void)
  1016. {
  1017. unsigned long start = PFN_ALIGN(_text);
  1018. unsigned long size = PFN_ALIGN(_etext) - start;
  1019. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1020. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  1021. size >> 10);
  1022. kernel_set_to_readonly = 1;
  1023. #ifdef CONFIG_CPA_DEBUG
  1024. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  1025. start, start+size);
  1026. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  1027. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1028. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  1029. #endif
  1030. start += size;
  1031. size = (unsigned long)__end_rodata - start;
  1032. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1033. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  1034. size >> 10);
  1035. rodata_test();
  1036. #ifdef CONFIG_CPA_DEBUG
  1037. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  1038. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  1039. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1040. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1041. #endif
  1042. }
  1043. #endif
  1044. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  1045. {
  1046. #ifdef CONFIG_DEBUG_PAGEALLOC
  1047. /*
  1048. * If debugging page accesses then do not free this memory but
  1049. * mark them not present - any buggy init-section access will
  1050. * create a kernel page fault:
  1051. */
  1052. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  1053. begin, PAGE_ALIGN(end));
  1054. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  1055. #else
  1056. unsigned long addr;
  1057. /*
  1058. * We just marked the kernel text read only above, now that
  1059. * we are going to free part of that, we need to make that
  1060. * writeable first.
  1061. */
  1062. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  1063. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  1064. ClearPageReserved(virt_to_page(addr));
  1065. init_page_count(virt_to_page(addr));
  1066. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1067. free_page(addr);
  1068. totalram_pages++;
  1069. }
  1070. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  1071. #endif
  1072. }
  1073. void free_initmem(void)
  1074. {
  1075. free_init_pages("unused kernel memory",
  1076. (unsigned long)(&__init_begin),
  1077. (unsigned long)(&__init_end));
  1078. }
  1079. #ifdef CONFIG_BLK_DEV_INITRD
  1080. void free_initrd_mem(unsigned long start, unsigned long end)
  1081. {
  1082. free_init_pages("initrd memory", start, end);
  1083. }
  1084. #endif
  1085. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  1086. int flags)
  1087. {
  1088. return reserve_bootmem(phys, len, flags);
  1089. }