pgalloc.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * linux/include/asm-arm/pgalloc.h
  3. *
  4. * Copyright (C) 2000-2001 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_PGALLOC_H
  11. #define _ASMARM_PGALLOC_H
  12. #include <asm/processor.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/tlbflush.h>
  15. /*
  16. * Since we have only two-level page tables, these are trivial
  17. */
  18. #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
  19. #define pmd_free(pmd) do { } while (0)
  20. #define pgd_populate(mm,pmd,pte) BUG()
  21. extern pgd_t *get_pgd_slow(struct mm_struct *mm);
  22. extern void free_pgd_slow(pgd_t *pgd);
  23. #define pgd_alloc(mm) get_pgd_slow(mm)
  24. #define pgd_free(pgd) free_pgd_slow(pgd)
  25. #define check_pgt_cache() do { } while (0)
  26. /*
  27. * Allocate one PTE table.
  28. *
  29. * This actually allocates two hardware PTE tables, but we wrap this up
  30. * into one table thus:
  31. *
  32. * +------------+
  33. * | h/w pt 0 |
  34. * +------------+
  35. * | h/w pt 1 |
  36. * +------------+
  37. * | Linux pt 0 |
  38. * +------------+
  39. * | Linux pt 1 |
  40. * +------------+
  41. */
  42. static inline pte_t *
  43. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
  44. {
  45. pte_t *pte;
  46. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  47. if (pte) {
  48. clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
  49. pte += PTRS_PER_PTE;
  50. }
  51. return pte;
  52. }
  53. static inline struct page *
  54. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  55. {
  56. struct page *pte;
  57. pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  58. if (pte) {
  59. void *page = page_address(pte);
  60. clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
  61. }
  62. return pte;
  63. }
  64. /*
  65. * Free one PTE table.
  66. */
  67. static inline void pte_free_kernel(pte_t *pte)
  68. {
  69. if (pte) {
  70. pte -= PTRS_PER_PTE;
  71. free_page((unsigned long)pte);
  72. }
  73. }
  74. static inline void pte_free(struct page *pte)
  75. {
  76. __free_page(pte);
  77. }
  78. /*
  79. * Populate the pmdp entry with a pointer to the pte. This pmd is part
  80. * of the mm address space.
  81. *
  82. * Ensure that we always set both PMD entries.
  83. */
  84. static inline void
  85. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
  86. {
  87. unsigned long pte_ptr = (unsigned long)ptep;
  88. unsigned long pmdval;
  89. BUG_ON(mm != &init_mm);
  90. /*
  91. * The pmd must be loaded with the physical
  92. * address of the PTE table
  93. */
  94. pte_ptr -= PTRS_PER_PTE * sizeof(void *);
  95. pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
  96. pmdp[0] = __pmd(pmdval);
  97. pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
  98. flush_pmd_entry(pmdp);
  99. }
  100. static inline void
  101. pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
  102. {
  103. unsigned long pmdval;
  104. BUG_ON(mm == &init_mm);
  105. pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
  106. pmdp[0] = __pmd(pmdval);
  107. pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
  108. flush_pmd_entry(pmdp);
  109. }
  110. #endif