pgtable_32.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. #include <linux/sched.h>
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/mm.h>
  5. #include <linux/nmi.h>
  6. #include <linux/swap.h>
  7. #include <linux/smp.h>
  8. #include <linux/highmem.h>
  9. #include <linux/slab.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/module.h>
  13. #include <linux/quicklist.h>
  14. #include <asm/system.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/fixmap.h>
  18. #include <asm/e820.h>
  19. #include <asm/tlb.h>
  20. #include <asm/tlbflush.h>
  21. /*
  22. * Associate a virtual page frame with a given physical page frame
  23. * and protection flags for that frame.
  24. */
  25. void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  26. {
  27. pgd_t *pgd;
  28. pud_t *pud;
  29. pmd_t *pmd;
  30. pte_t *pte;
  31. pgd = swapper_pg_dir + pgd_index(vaddr);
  32. if (pgd_none(*pgd)) {
  33. BUG();
  34. return;
  35. }
  36. pud = pud_offset(pgd, vaddr);
  37. if (pud_none(*pud)) {
  38. BUG();
  39. return;
  40. }
  41. pmd = pmd_offset(pud, vaddr);
  42. if (pmd_none(*pmd)) {
  43. BUG();
  44. return;
  45. }
  46. pte = pte_offset_kernel(pmd, vaddr);
  47. if (pte_val(pteval))
  48. set_pte_present(&init_mm, vaddr, pte, pteval);
  49. else
  50. pte_clear(&init_mm, vaddr, pte);
  51. /*
  52. * It's enough to flush this one mapping.
  53. * (PGE mappings get flushed as well)
  54. */
  55. __flush_tlb_one(vaddr);
  56. }
  57. /*
  58. * Associate a large virtual page frame with a given physical page frame
  59. * and protection flags for that frame. pfn is for the base of the page,
  60. * vaddr is what the page gets mapped to - both must be properly aligned.
  61. * The pmd must already be instantiated. Assumes PAE mode.
  62. */
  63. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
  64. {
  65. pgd_t *pgd;
  66. pud_t *pud;
  67. pmd_t *pmd;
  68. if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
  69. printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
  70. return; /* BUG(); */
  71. }
  72. if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
  73. printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
  74. return; /* BUG(); */
  75. }
  76. pgd = swapper_pg_dir + pgd_index(vaddr);
  77. if (pgd_none(*pgd)) {
  78. printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
  79. return; /* BUG(); */
  80. }
  81. pud = pud_offset(pgd, vaddr);
  82. pmd = pmd_offset(pud, vaddr);
  83. set_pmd(pmd, pfn_pmd(pfn, flags));
  84. /*
  85. * It's enough to flush this one mapping.
  86. * (PGE mappings get flushed as well)
  87. */
  88. __flush_tlb_one(vaddr);
  89. }
  90. unsigned long __FIXADDR_TOP = 0xfffff000;
  91. EXPORT_SYMBOL(__FIXADDR_TOP);
  92. /**
  93. * reserve_top_address - reserves a hole in the top of kernel address space
  94. * @reserve - size of hole to reserve
  95. *
  96. * Can be used to relocate the fixmap area and poke a hole in the top
  97. * of kernel address space to make room for a hypervisor.
  98. */
  99. void __init reserve_top_address(unsigned long reserve)
  100. {
  101. BUG_ON(fixmaps_set > 0);
  102. printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
  103. (int)-reserve);
  104. __FIXADDR_TOP = -reserve - PAGE_SIZE;
  105. __VMALLOC_RESERVE += reserve;
  106. }
  107. /*
  108. * vmalloc=size forces the vmalloc area to be exactly 'size'
  109. * bytes. This can be used to increase (or decrease) the
  110. * vmalloc area - the default is 128m.
  111. */
  112. static int __init parse_vmalloc(char *arg)
  113. {
  114. if (!arg)
  115. return -EINVAL;
  116. /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
  117. __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
  118. return 0;
  119. }
  120. early_param("vmalloc", parse_vmalloc);
  121. /*
  122. * reservetop=size reserves a hole at the top of the kernel address space which
  123. * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
  124. * so relocating the fixmap can be done before paging initialization.
  125. */
  126. static int __init parse_reservetop(char *arg)
  127. {
  128. unsigned long address;
  129. if (!arg)
  130. return -EINVAL;
  131. address = memparse(arg, &arg);
  132. reserve_top_address(address);
  133. return 0;
  134. }
  135. early_param("reservetop", parse_reservetop);