hugetlb.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  7. */
  8. #ifndef __ASM_HUGETLB_H
  9. #define __ASM_HUGETLB_H
  10. #include <asm/page.h>
  11. #include <asm-generic/hugetlb.h>
  12. static inline int is_hugepage_only_range(struct mm_struct *mm,
  13. unsigned long addr,
  14. unsigned long len)
  15. {
  16. return 0;
  17. }
  18. static inline int prepare_hugepage_range(struct file *file,
  19. unsigned long addr,
  20. unsigned long len)
  21. {
  22. unsigned long task_size = STACK_TOP;
  23. struct hstate *h = hstate_file(file);
  24. if (len & ~huge_page_mask(h))
  25. return -EINVAL;
  26. if (addr & ~huge_page_mask(h))
  27. return -EINVAL;
  28. if (len > task_size)
  29. return -ENOMEM;
  30. if (task_size - len < addr)
  31. return -EINVAL;
  32. return 0;
  33. }
  34. static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
  35. {
  36. }
  37. static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  38. unsigned long addr,
  39. unsigned long end,
  40. unsigned long floor,
  41. unsigned long ceiling)
  42. {
  43. free_pgd_range(tlb, addr, end, floor, ceiling);
  44. }
  45. static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  46. pte_t *ptep, pte_t pte)
  47. {
  48. set_pte_at(mm, addr, ptep, pte);
  49. }
  50. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  51. unsigned long addr, pte_t *ptep)
  52. {
  53. pte_t clear;
  54. pte_t pte = *ptep;
  55. pte_val(clear) = (unsigned long)invalid_pte_table;
  56. set_pte_at(mm, addr, ptep, clear);
  57. return pte;
  58. }
  59. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  60. unsigned long addr, pte_t *ptep)
  61. {
  62. flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
  63. }
  64. static inline int huge_pte_none(pte_t pte)
  65. {
  66. unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
  67. return !val || (val == (unsigned long)invalid_pte_table);
  68. }
  69. static inline pte_t huge_pte_wrprotect(pte_t pte)
  70. {
  71. return pte_wrprotect(pte);
  72. }
  73. static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  74. unsigned long addr, pte_t *ptep)
  75. {
  76. ptep_set_wrprotect(mm, addr, ptep);
  77. }
  78. static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  79. unsigned long addr,
  80. pte_t *ptep, pte_t pte,
  81. int dirty)
  82. {
  83. int changed = !pte_same(*ptep, pte);
  84. if (changed) {
  85. set_pte_at(vma->vm_mm, addr, ptep, pte);
  86. /*
  87. * There could be some standard sized pages in there,
  88. * get them all.
  89. */
  90. flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
  91. }
  92. return changed;
  93. }
  94. static inline pte_t huge_ptep_get(pte_t *ptep)
  95. {
  96. return *ptep;
  97. }
  98. static inline int arch_prepare_hugepage(struct page *page)
  99. {
  100. return 0;
  101. }
  102. static inline void arch_release_hugepage(struct page *page)
  103. {
  104. }
  105. static inline void arch_clear_hugepage_flags(struct page *page)
  106. {
  107. }
  108. #endif /* __ASM_HUGETLB_H */