highmem.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #ifndef _LINUX_HIGHMEM_H
  2. #define _LINUX_HIGHMEM_H
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/uaccess.h>
  6. #include <asm/cacheflush.h>
  7. #ifndef ARCH_HAS_FLUSH_ANON_PAGE
  8. static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  9. {
  10. }
  11. #endif
  12. #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  13. static inline void flush_kernel_dcache_page(struct page *page)
  14. {
  15. }
  16. #endif
  17. #ifdef CONFIG_HIGHMEM
  18. #include <asm/highmem.h>
  19. /* declarations for linux/mm/highmem.c */
  20. unsigned int nr_free_highpages(void);
  21. extern unsigned long totalhigh_pages;
  22. void kmap_flush_unused(void);
  23. #else /* CONFIG_HIGHMEM */
  24. static inline unsigned int nr_free_highpages(void) { return 0; }
  25. #define totalhigh_pages 0
  26. #ifndef ARCH_HAS_KMAP
  27. static inline void *kmap(struct page *page)
  28. {
  29. might_sleep();
  30. return page_address(page);
  31. }
  32. #define kunmap(page) do { (void) (page); } while (0)
  33. #include <asm/kmap_types.h>
  34. static inline void *kmap_atomic(struct page *page, enum km_type idx)
  35. {
  36. pagefault_disable();
  37. return page_address(page);
  38. }
  39. #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
  40. #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
  41. #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
  42. #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
  43. #define kmap_flush_unused() do {} while(0)
  44. #endif
  45. #endif /* CONFIG_HIGHMEM */
  46. /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
  47. static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
  48. {
  49. void *addr = kmap_atomic(page, KM_USER0);
  50. clear_user_page(addr, vaddr, page);
  51. kunmap_atomic(addr, KM_USER0);
  52. /* Make sure this page is cleared on other CPU's too before using it */
  53. smp_wmb();
  54. }
  55. #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  56. /**
  57. * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
  58. * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
  59. * @vma: The VMA the page is to be allocated for
  60. * @vaddr: The virtual address the page will be inserted into
  61. *
  62. * This function will allocate a page for a VMA but the caller is expected
  63. * to specify via movableflags whether the page will be movable in the
  64. * future or not
  65. *
  66. * An architecture may override this function by defining
  67. * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
  68. * implementation.
  69. */
  70. static inline struct page *
  71. __alloc_zeroed_user_highpage(gfp_t movableflags,
  72. struct vm_area_struct *vma,
  73. unsigned long vaddr)
  74. {
  75. struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
  76. vma, vaddr);
  77. if (page)
  78. clear_user_highpage(page, vaddr);
  79. return page;
  80. }
  81. #endif
  82. /**
  83. * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
  84. * @vma: The VMA the page is to be allocated for
  85. * @vaddr: The virtual address the page will be inserted into
  86. *
  87. * This function will allocate a page for a VMA that the caller knows will
  88. * not be able to move in the future using move_pages() or reclaim. If it
  89. * is known that the page can move, use alloc_zeroed_user_highpage_movable
  90. */
  91. static inline struct page *
  92. alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
  93. {
  94. return __alloc_zeroed_user_highpage(0, vma, vaddr);
  95. }
  96. /**
  97. * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
  98. * @vma: The VMA the page is to be allocated for
  99. * @vaddr: The virtual address the page will be inserted into
  100. *
  101. * This function will allocate a page for a VMA that the caller knows will
  102. * be able to migrate in the future using move_pages() or reclaimed
  103. */
  104. static inline struct page *
  105. alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
  106. unsigned long vaddr)
  107. {
  108. return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
  109. }
  110. static inline void clear_highpage(struct page *page)
  111. {
  112. void *kaddr = kmap_atomic(page, KM_USER0);
  113. clear_page(kaddr);
  114. kunmap_atomic(kaddr, KM_USER0);
  115. }
  116. /*
  117. * Same but also flushes aliased cache contents to RAM.
  118. *
  119. * This must be a macro because KM_USER0 and friends aren't defined if
  120. * !CONFIG_HIGHMEM
  121. */
  122. #define zero_user_page(page, offset, size, km_type) \
  123. do { \
  124. void *kaddr; \
  125. \
  126. BUG_ON((offset) + (size) > PAGE_SIZE); \
  127. \
  128. kaddr = kmap_atomic(page, km_type); \
  129. memset((char *)kaddr + (offset), 0, (size)); \
  130. flush_dcache_page(page); \
  131. kunmap_atomic(kaddr, (km_type)); \
  132. } while (0)
  133. static inline void __deprecated memclear_highpage_flush(struct page *page,
  134. unsigned int offset, unsigned int size)
  135. {
  136. zero_user_page(page, offset, size, KM_USER0);
  137. }
  138. #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
  139. static inline void copy_user_highpage(struct page *to, struct page *from,
  140. unsigned long vaddr, struct vm_area_struct *vma)
  141. {
  142. char *vfrom, *vto;
  143. vfrom = kmap_atomic(from, KM_USER0);
  144. vto = kmap_atomic(to, KM_USER1);
  145. copy_user_page(vto, vfrom, vaddr, to);
  146. kunmap_atomic(vfrom, KM_USER0);
  147. kunmap_atomic(vto, KM_USER1);
  148. /* Make sure this page is cleared on other CPU's too before using it */
  149. smp_wmb();
  150. }
  151. #endif
  152. static inline void copy_highpage(struct page *to, struct page *from)
  153. {
  154. char *vfrom, *vto;
  155. vfrom = kmap_atomic(from, KM_USER0);
  156. vto = kmap_atomic(to, KM_USER1);
  157. copy_page(vto, vfrom);
  158. kunmap_atomic(vfrom, KM_USER0);
  159. kunmap_atomic(vto, KM_USER1);
  160. }
  161. #endif /* _LINUX_HIGHMEM_H */