pgalloc.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* pgalloc.c: page directory & page table allocation
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/mm.h>
  14. #include <linux/highmem.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/page.h>
  17. #include <asm/cacheflush.h>
  18. pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
  19. kmem_cache_t *pgd_cache;
  20. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  21. {
  22. pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  23. if (pte)
  24. clear_page(pte);
  25. return pte;
  26. }
  27. struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  28. {
  29. struct page *page;
  30. #ifdef CONFIG_HIGHPTE
  31. page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
  32. #else
  33. page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
  34. #endif
  35. if (page)
  36. clear_highpage(page);
  37. flush_dcache_page(page);
  38. return page;
  39. }
  40. void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
  41. {
  42. unsigned long *__ste_p = pmdptr->ste;
  43. int loop;
  44. if (!pmd) {
  45. memset(__ste_p, 0, PME_SIZE);
  46. }
  47. else {
  48. BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
  49. for (loop = PME_SIZE; loop > 0; loop -= 4) {
  50. *__ste_p++ = pmd;
  51. pmd += __frv_PT_SIZE;
  52. }
  53. }
  54. frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
  55. }
  56. /*
  57. * List of all pgd's needed for non-PAE so it can invalidate entries
  58. * in both cached and uncached pgd's; not needed for PAE since the
  59. * kernel pmd is shared. If PAE were not to share the pmd a similar
  60. * tactic would be needed. This is essentially codepath-based locking
  61. * against pageattr.c; it is the unique case in which a valid change
  62. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  63. * vmalloc faults work because attached pagetables are never freed.
  64. * If the locking proves to be non-performant, a ticketing scheme with
  65. * checks at dup_mmap(), exec(), and other mmlist addition points
  66. * could be used. The locking scheme was chosen on the basis of
  67. * manfred's recommendations and having no core impact whatsoever.
  68. * -- wli
  69. */
  70. DEFINE_SPINLOCK(pgd_lock);
  71. struct page *pgd_list;
  72. static inline void pgd_list_add(pgd_t *pgd)
  73. {
  74. struct page *page = virt_to_page(pgd);
  75. page->index = (unsigned long) pgd_list;
  76. if (pgd_list)
  77. pgd_list->private = (unsigned long) &page->index;
  78. pgd_list = page;
  79. page->private = (unsigned long) &pgd_list;
  80. }
  81. static inline void pgd_list_del(pgd_t *pgd)
  82. {
  83. struct page *next, **pprev, *page = virt_to_page(pgd);
  84. next = (struct page *) page->index;
  85. pprev = (struct page **) page->private;
  86. *pprev = next;
  87. if (next)
  88. next->private = (unsigned long) pprev;
  89. }
  90. void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
  91. {
  92. unsigned long flags;
  93. if (PTRS_PER_PMD == 1)
  94. spin_lock_irqsave(&pgd_lock, flags);
  95. memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
  96. swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
  97. (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
  98. if (PTRS_PER_PMD > 1)
  99. return;
  100. pgd_list_add(pgd);
  101. spin_unlock_irqrestore(&pgd_lock, flags);
  102. memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
  103. }
  104. /* never called when PTRS_PER_PMD > 1 */
  105. void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
  106. {
  107. unsigned long flags; /* can be called from interrupt context */
  108. spin_lock_irqsave(&pgd_lock, flags);
  109. pgd_list_del(pgd);
  110. spin_unlock_irqrestore(&pgd_lock, flags);
  111. }
  112. pgd_t *pgd_alloc(struct mm_struct *mm)
  113. {
  114. pgd_t *pgd;
  115. pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
  116. if (!pgd)
  117. return pgd;
  118. return pgd;
  119. }
  120. void pgd_free(pgd_t *pgd)
  121. {
  122. /* in the non-PAE case, clear_page_tables() clears user pgd entries */
  123. kmem_cache_free(pgd_cache, pgd);
  124. }
  125. void __init pgtable_cache_init(void)
  126. {
  127. pgd_cache = kmem_cache_create("pgd",
  128. PTRS_PER_PGD * sizeof(pgd_t),
  129. PTRS_PER_PGD * sizeof(pgd_t),
  130. 0,
  131. pgd_ctor,
  132. pgd_dtor);
  133. if (!pgd_cache)
  134. panic("pgtable_cache_init(): Cannot create pgd cache");
  135. }