pgalloc.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * arch/arm/include/asm/pgalloc.h
  3. *
  4. * Copyright (C) 2000-2001 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_PGALLOC_H
  11. #define _ASMARM_PGALLOC_H
  12. #include <asm/domain.h>
  13. #include <asm/pgtable-hwdef.h>
  14. #include <asm/processor.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/tlbflush.h>
  17. #define check_pgt_cache() do { } while (0)
  18. #ifdef CONFIG_MMU
  19. #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
  20. #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
  21. /*
  22. * Since we have only two-level page tables, these are trivial
  23. */
  24. #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
  25. #define pmd_free(mm, pmd) do { } while (0)
  26. #define pgd_populate(mm,pmd,pte) BUG()
  27. extern pgd_t *get_pgd_slow(struct mm_struct *mm);
  28. extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
  29. #define pgd_alloc(mm) get_pgd_slow(mm)
  30. #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
  31. /*
  32. * Allocate one PTE table.
  33. *
  34. * This actually allocates two hardware PTE tables, but we wrap this up
  35. * into one table thus:
  36. *
  37. * +------------+
  38. * | h/w pt 0 |
  39. * +------------+
  40. * | h/w pt 1 |
  41. * +------------+
  42. * | Linux pt 0 |
  43. * +------------+
  44. * | Linux pt 1 |
  45. * +------------+
  46. */
  47. static inline pte_t *
  48. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
  49. {
  50. pte_t *pte;
  51. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  52. if (pte) {
  53. clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
  54. pte += PTRS_PER_PTE;
  55. }
  56. return pte;
  57. }
  58. static inline pgtable_t
  59. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  60. {
  61. struct page *pte;
  62. pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  63. if (pte) {
  64. void *page = page_address(pte);
  65. clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
  66. pgtable_page_ctor(pte);
  67. }
  68. return pte;
  69. }
  70. /*
  71. * Free one PTE table.
  72. */
  73. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  74. {
  75. if (pte) {
  76. pte -= PTRS_PER_PTE;
  77. free_page((unsigned long)pte);
  78. }
  79. }
  80. static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  81. {
  82. pgtable_page_dtor(pte);
  83. __free_page(pte);
  84. }
  85. static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
  86. {
  87. pmdp[0] = __pmd(pmdval);
  88. pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
  89. flush_pmd_entry(pmdp);
  90. }
  91. /*
  92. * Populate the pmdp entry with a pointer to the pte. This pmd is part
  93. * of the mm address space.
  94. *
  95. * Ensure that we always set both PMD entries.
  96. */
  97. static inline void
  98. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
  99. {
  100. unsigned long pte_ptr = (unsigned long)ptep;
  101. /*
  102. * The pmd must be loaded with the physical
  103. * address of the PTE table
  104. */
  105. pte_ptr -= PTRS_PER_PTE * sizeof(void *);
  106. __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
  107. }
  108. static inline void
  109. pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
  110. {
  111. __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
  112. }
  113. #define pmd_pgtable(pmd) pmd_page(pmd)
  114. #endif /* CONFIG_MMU */
  115. #endif