pgtable_32.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #include <linux/sched.h>
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/mm.h>
  5. #include <linux/nmi.h>
  6. #include <linux/swap.h>
  7. #include <linux/smp.h>
  8. #include <linux/highmem.h>
  9. #include <linux/slab.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/module.h>
  13. #include <linux/quicklist.h>
  14. #include <asm/system.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/fixmap.h>
  18. #include <asm/e820.h>
  19. #include <asm/tlb.h>
  20. #include <asm/tlbflush.h>
  21. unsigned int __VMALLOC_RESERVE = 128 << 20;
  22. /*
  23. * Associate a virtual page frame with a given physical page frame
  24. * and protection flags for that frame.
  25. */
  26. void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  27. {
  28. pgd_t *pgd;
  29. pud_t *pud;
  30. pmd_t *pmd;
  31. pte_t *pte;
  32. pgd = swapper_pg_dir + pgd_index(vaddr);
  33. if (pgd_none(*pgd)) {
  34. BUG();
  35. return;
  36. }
  37. pud = pud_offset(pgd, vaddr);
  38. if (pud_none(*pud)) {
  39. BUG();
  40. return;
  41. }
  42. pmd = pmd_offset(pud, vaddr);
  43. if (pmd_none(*pmd)) {
  44. BUG();
  45. return;
  46. }
  47. pte = pte_offset_kernel(pmd, vaddr);
  48. if (pte_val(pteval))
  49. set_pte_present(&init_mm, vaddr, pte, pteval);
  50. else
  51. pte_clear(&init_mm, vaddr, pte);
  52. /*
  53. * It's enough to flush this one mapping.
  54. * (PGE mappings get flushed as well)
  55. */
  56. __flush_tlb_one(vaddr);
  57. }
  58. /*
  59. * Associate a large virtual page frame with a given physical page frame
  60. * and protection flags for that frame. pfn is for the base of the page,
  61. * vaddr is what the page gets mapped to - both must be properly aligned.
  62. * The pmd must already be instantiated. Assumes PAE mode.
  63. */
  64. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
  65. {
  66. pgd_t *pgd;
  67. pud_t *pud;
  68. pmd_t *pmd;
  69. if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
  70. printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
  71. return; /* BUG(); */
  72. }
  73. if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
  74. printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
  75. return; /* BUG(); */
  76. }
  77. pgd = swapper_pg_dir + pgd_index(vaddr);
  78. if (pgd_none(*pgd)) {
  79. printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
  80. return; /* BUG(); */
  81. }
  82. pud = pud_offset(pgd, vaddr);
  83. pmd = pmd_offset(pud, vaddr);
  84. set_pmd(pmd, pfn_pmd(pfn, flags));
  85. /*
  86. * It's enough to flush this one mapping.
  87. * (PGE mappings get flushed as well)
  88. */
  89. __flush_tlb_one(vaddr);
  90. }
  91. unsigned long __FIXADDR_TOP = 0xfffff000;
  92. EXPORT_SYMBOL(__FIXADDR_TOP);
  93. /**
  94. * reserve_top_address - reserves a hole in the top of kernel address space
  95. * @reserve - size of hole to reserve
  96. *
  97. * Can be used to relocate the fixmap area and poke a hole in the top
  98. * of kernel address space to make room for a hypervisor.
  99. */
  100. void __init reserve_top_address(unsigned long reserve)
  101. {
  102. BUG_ON(fixmaps_set > 0);
  103. printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
  104. (int)-reserve);
  105. __FIXADDR_TOP = -reserve - PAGE_SIZE;
  106. __VMALLOC_RESERVE += reserve;
  107. }
  108. /*
  109. * vmalloc=size forces the vmalloc area to be exactly 'size'
  110. * bytes. This can be used to increase (or decrease) the
  111. * vmalloc area - the default is 128m.
  112. */
  113. static int __init parse_vmalloc(char *arg)
  114. {
  115. if (!arg)
  116. return -EINVAL;
  117. /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
  118. __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
  119. return 0;
  120. }
  121. early_param("vmalloc", parse_vmalloc);
  122. /*
  123. * reservetop=size reserves a hole at the top of the kernel address space which
  124. * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
  125. * so relocating the fixmap can be done before paging initialization.
  126. */
  127. static int __init parse_reservetop(char *arg)
  128. {
  129. unsigned long address;
  130. if (!arg)
  131. return -EINVAL;
  132. address = memparse(arg, &arg);
  133. reserve_top_address(address);
  134. return 0;
  135. }
  136. early_param("reservetop", parse_reservetop);