highmem.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifndef _LINUX_HIGHMEM_H
  2. #define _LINUX_HIGHMEM_H
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <asm/cacheflush.h>
  6. #ifndef ARCH_HAS_FLUSH_ANON_PAGE
  7. static inline void flush_anon_page(struct page *page, unsigned long vmaddr)
  8. {
  9. }
  10. #endif
  11. #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  12. static inline void flush_kernel_dcache_page(struct page *page)
  13. {
  14. }
  15. #endif
  16. #ifdef CONFIG_HIGHMEM
  17. #include <asm/highmem.h>
  18. /* declarations for linux/mm/highmem.c */
  19. unsigned int nr_free_highpages(void);
  20. #else /* CONFIG_HIGHMEM */
  21. static inline unsigned int nr_free_highpages(void) { return 0; }
  22. static inline void *kmap(struct page *page)
  23. {
  24. might_sleep();
  25. return page_address(page);
  26. }
  27. #define kunmap(page) do { (void) (page); } while (0)
  28. #define kmap_atomic(page, idx) page_address(page)
  29. #define kunmap_atomic(addr, idx) do { } while (0)
  30. #define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
  31. #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
  32. #endif /* CONFIG_HIGHMEM */
  33. /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
  34. static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
  35. {
  36. void *addr = kmap_atomic(page, KM_USER0);
  37. clear_user_page(addr, vaddr, page);
  38. kunmap_atomic(addr, KM_USER0);
  39. /* Make sure this page is cleared on other CPU's too before using it */
  40. smp_wmb();
  41. }
  42. #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  43. static inline struct page *
  44. alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
  45. {
  46. struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr);
  47. if (page)
  48. clear_user_highpage(page, vaddr);
  49. return page;
  50. }
  51. #endif
  52. static inline void clear_highpage(struct page *page)
  53. {
  54. void *kaddr = kmap_atomic(page, KM_USER0);
  55. clear_page(kaddr);
  56. kunmap_atomic(kaddr, KM_USER0);
  57. }
  58. /*
  59. * Same but also flushes aliased cache contents to RAM.
  60. */
  61. static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
  62. {
  63. void *kaddr;
  64. BUG_ON(offset + size > PAGE_SIZE);
  65. kaddr = kmap_atomic(page, KM_USER0);
  66. memset((char *)kaddr + offset, 0, size);
  67. flush_dcache_page(page);
  68. kunmap_atomic(kaddr, KM_USER0);
  69. }
  70. static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
  71. {
  72. char *vfrom, *vto;
  73. vfrom = kmap_atomic(from, KM_USER0);
  74. vto = kmap_atomic(to, KM_USER1);
  75. copy_user_page(vto, vfrom, vaddr, to);
  76. kunmap_atomic(vfrom, KM_USER0);
  77. kunmap_atomic(vto, KM_USER1);
  78. /* Make sure this page is cleared on other CPU's too before using it */
  79. smp_wmb();
  80. }
  81. static inline void copy_highpage(struct page *to, struct page *from)
  82. {
  83. char *vfrom, *vto;
  84. vfrom = kmap_atomic(from, KM_USER0);
  85. vto = kmap_atomic(to, KM_USER1);
  86. copy_page(vto, vfrom);
  87. kunmap_atomic(vfrom, KM_USER0);
  88. kunmap_atomic(vto, KM_USER1);
  89. }
  90. #endif /* _LINUX_HIGHMEM_H */