pgtable.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /* MN10300 Page table management
  2. *
  3. * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Modified by David Howells (dhowells@redhat.com)
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/mm.h>
  16. #include <linux/swap.h>
  17. #include <linux/smp.h>
  18. #include <linux/highmem.h>
  19. #include <linux/slab.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/quicklist.h>
  23. #include <asm/system.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/tlb.h>
  27. #include <asm/tlbflush.h>
  28. void show_mem(void)
  29. {
  30. unsigned long i;
  31. int free = 0, total = 0, reserved = 0, shared = 0;
  32. int cached = 0;
  33. printk(KERN_INFO "Mem-info:\n");
  34. show_free_areas();
  35. i = max_mapnr;
  36. while (i-- > 0) {
  37. total++;
  38. if (PageReserved(mem_map + i))
  39. reserved++;
  40. else if (PageSwapCache(mem_map + i))
  41. cached++;
  42. else if (!page_count(mem_map + i))
  43. free++;
  44. else
  45. shared += page_count(mem_map + i) - 1;
  46. }
  47. printk(KERN_INFO "%d pages of RAM\n", total);
  48. printk(KERN_INFO "%d free pages\n", free);
  49. printk(KERN_INFO "%d reserved pages\n", reserved);
  50. printk(KERN_INFO "%d pages shared\n", shared);
  51. printk(KERN_INFO "%d pages swap cached\n", cached);
  52. }
  53. /*
  54. * Associate a large virtual page frame with a given physical page frame
  55. * and protection flags for that frame. pfn is for the base of the page,
  56. * vaddr is what the page gets mapped to - both must be properly aligned.
  57. * The pmd must already be instantiated. Assumes PAE mode.
  58. */
  59. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
  60. {
  61. pgd_t *pgd;
  62. pud_t *pud;
  63. pmd_t *pmd;
  64. if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
  65. printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
  66. return; /* BUG(); */
  67. }
  68. if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
  69. printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
  70. return; /* BUG(); */
  71. }
  72. pgd = swapper_pg_dir + pgd_index(vaddr);
  73. if (pgd_none(*pgd)) {
  74. printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
  75. return; /* BUG(); */
  76. }
  77. pud = pud_offset(pgd, vaddr);
  78. pmd = pmd_offset(pud, vaddr);
  79. set_pmd(pmd, pfn_pmd(pfn, flags));
  80. /*
  81. * It's enough to flush this one mapping.
  82. * (PGE mappings get flushed as well)
  83. */
  84. __flush_tlb_one(vaddr);
  85. }
  86. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  87. {
  88. pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  89. if (pte)
  90. clear_page(pte);
  91. return pte;
  92. }
  93. struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  94. {
  95. struct page *pte;
  96. #ifdef CONFIG_HIGHPTE
  97. pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
  98. #else
  99. pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
  100. #endif
  101. if (pte)
  102. clear_highpage(pte);
  103. return pte;
  104. }
  105. /*
  106. * List of all pgd's needed for non-PAE so it can invalidate entries
  107. * in both cached and uncached pgd's; not needed for PAE since the
  108. * kernel pmd is shared. If PAE were not to share the pmd a similar
  109. * tactic would be needed. This is essentially codepath-based locking
  110. * against pageattr.c; it is the unique case in which a valid change
  111. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  112. * vmalloc faults work because attached pagetables are never freed.
  113. * If the locking proves to be non-performant, a ticketing scheme with
  114. * checks at dup_mmap(), exec(), and other mmlist addition points
  115. * could be used. The locking scheme was chosen on the basis of
  116. * manfred's recommendations and having no core impact whatsoever.
  117. * -- wli
  118. */
  119. DEFINE_SPINLOCK(pgd_lock);
  120. struct page *pgd_list;
  121. static inline void pgd_list_add(pgd_t *pgd)
  122. {
  123. struct page *page = virt_to_page(pgd);
  124. page->index = (unsigned long) pgd_list;
  125. if (pgd_list)
  126. set_page_private(pgd_list, (unsigned long) &page->index);
  127. pgd_list = page;
  128. set_page_private(page, (unsigned long) &pgd_list);
  129. }
  130. static inline void pgd_list_del(pgd_t *pgd)
  131. {
  132. struct page *next, **pprev, *page = virt_to_page(pgd);
  133. next = (struct page *) page->index;
  134. pprev = (struct page **) page_private(page);
  135. *pprev = next;
  136. if (next)
  137. set_page_private(next, (unsigned long) pprev);
  138. }
  139. void pgd_ctor(void *pgd)
  140. {
  141. unsigned long flags;
  142. if (PTRS_PER_PMD == 1)
  143. spin_lock_irqsave(&pgd_lock, flags);
  144. memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
  145. swapper_pg_dir + USER_PTRS_PER_PGD,
  146. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  147. if (PTRS_PER_PMD > 1)
  148. return;
  149. pgd_list_add(pgd);
  150. spin_unlock_irqrestore(&pgd_lock, flags);
  151. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  152. }
  153. /* never called when PTRS_PER_PMD > 1 */
  154. void pgd_dtor(void *pgd)
  155. {
  156. unsigned long flags; /* can be called from interrupt context */
  157. spin_lock_irqsave(&pgd_lock, flags);
  158. pgd_list_del(pgd);
  159. spin_unlock_irqrestore(&pgd_lock, flags);
  160. }
  161. pgd_t *pgd_alloc(struct mm_struct *mm)
  162. {
  163. return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
  164. }
  165. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  166. {
  167. quicklist_free(0, pgd_dtor, pgd);
  168. }
  169. void __init pgtable_cache_init(void)
  170. {
  171. }
  172. void check_pgt_cache(void)
  173. {
  174. quicklist_trim(0, pgd_dtor, 25, 16);
  175. }