pagemap.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. #include <linux/bitops.h>
  14. /*
  15. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  16. * allocation mode flags.
  17. */
  18. #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
  19. #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
  20. static inline void mapping_set_error(struct address_space *mapping, int error)
  21. {
  22. if (unlikely(error)) {
  23. if (error == -ENOSPC)
  24. set_bit(AS_ENOSPC, &mapping->flags);
  25. else
  26. set_bit(AS_EIO, &mapping->flags);
  27. }
  28. }
  29. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  30. {
  31. return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  32. }
  33. /*
  34. * This is non-atomic. Only to be used before the mapping is activated.
  35. * Probably needs a barrier...
  36. */
  37. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  38. {
  39. m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  40. (__force unsigned long)mask;
  41. }
  42. /*
  43. * The page cache can done in larger chunks than
  44. * one page, because it allows for more efficient
  45. * throughput (it can then be mapped into user
  46. * space in smaller chunks for same flexibility).
  47. *
  48. * Or rather, it _will_ be done in larger chunks.
  49. */
  50. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  51. #define PAGE_CACHE_SIZE PAGE_SIZE
  52. #define PAGE_CACHE_MASK PAGE_MASK
  53. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  54. #define page_cache_get(page) get_page(page)
  55. #define page_cache_release(page) put_page(page)
  56. void release_pages(struct page **pages, int nr, int cold);
  57. #ifdef CONFIG_NUMA
  58. extern struct page *__page_cache_alloc(gfp_t gfp);
  59. #else
  60. static inline struct page *__page_cache_alloc(gfp_t gfp)
  61. {
  62. return alloc_pages(gfp, 0);
  63. }
  64. #endif
  65. static inline struct page *page_cache_alloc(struct address_space *x)
  66. {
  67. return __page_cache_alloc(mapping_gfp_mask(x));
  68. }
  69. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  70. {
  71. return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
  72. }
  73. typedef int filler_t(void *, struct page *);
  74. extern struct page * find_get_page(struct address_space *mapping,
  75. pgoff_t index);
  76. extern struct page * find_lock_page(struct address_space *mapping,
  77. pgoff_t index);
  78. extern struct page * find_or_create_page(struct address_space *mapping,
  79. pgoff_t index, gfp_t gfp_mask);
  80. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  81. unsigned int nr_pages, struct page **pages);
  82. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  83. unsigned int nr_pages, struct page **pages);
  84. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  85. int tag, unsigned int nr_pages, struct page **pages);
  86. struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
  87. /*
  88. * Returns locked page at given index in given cache, creating it if needed.
  89. */
  90. static inline struct page *grab_cache_page(struct address_space *mapping,
  91. pgoff_t index)
  92. {
  93. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  94. }
  95. extern struct page * grab_cache_page_nowait(struct address_space *mapping,
  96. pgoff_t index);
  97. extern struct page * read_cache_page_async(struct address_space *mapping,
  98. pgoff_t index, filler_t *filler,
  99. void *data);
  100. extern struct page * read_cache_page(struct address_space *mapping,
  101. pgoff_t index, filler_t *filler,
  102. void *data);
  103. extern int read_cache_pages(struct address_space *mapping,
  104. struct list_head *pages, filler_t *filler, void *data);
  105. static inline struct page *read_mapping_page_async(
  106. struct address_space *mapping,
  107. pgoff_t index, void *data)
  108. {
  109. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  110. return read_cache_page_async(mapping, index, filler, data);
  111. }
  112. static inline struct page *read_mapping_page(struct address_space *mapping,
  113. pgoff_t index, void *data)
  114. {
  115. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  116. return read_cache_page(mapping, index, filler, data);
  117. }
  118. int add_to_page_cache(struct page *page, struct address_space *mapping,
  119. pgoff_t index, gfp_t gfp_mask);
  120. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  121. pgoff_t index, gfp_t gfp_mask);
  122. extern void remove_from_page_cache(struct page *page);
  123. extern void __remove_from_page_cache(struct page *page);
  124. /*
  125. * Return byte-offset into filesystem object for page.
  126. */
  127. static inline loff_t page_offset(struct page *page)
  128. {
  129. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  130. }
  131. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  132. unsigned long address)
  133. {
  134. pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  135. pgoff += vma->vm_pgoff;
  136. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  137. }
  138. extern void __lock_page(struct page *page);
  139. extern int __lock_page_killable(struct page *page);
  140. extern void __lock_page_nosync(struct page *page);
  141. extern void unlock_page(struct page *page);
  142. /*
  143. * lock_page may only be called if we have the page's inode pinned.
  144. */
  145. static inline void lock_page(struct page *page)
  146. {
  147. might_sleep();
  148. if (TestSetPageLocked(page))
  149. __lock_page(page);
  150. }
  151. /*
  152. * lock_page_killable is like lock_page but can be interrupted by fatal
  153. * signals. It returns 0 if it locked the page and -EINTR if it was
  154. * killed while waiting.
  155. */
  156. static inline int lock_page_killable(struct page *page)
  157. {
  158. might_sleep();
  159. if (TestSetPageLocked(page))
  160. return __lock_page_killable(page);
  161. return 0;
  162. }
  163. /*
  164. * lock_page_nosync should only be used if we can't pin the page's inode.
  165. * Doesn't play quite so well with block device plugging.
  166. */
  167. static inline void lock_page_nosync(struct page *page)
  168. {
  169. might_sleep();
  170. if (TestSetPageLocked(page))
  171. __lock_page_nosync(page);
  172. }
  173. /*
  174. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  175. * Never use this directly!
  176. */
  177. extern void wait_on_page_bit(struct page *page, int bit_nr);
  178. /*
  179. * Wait for a page to be unlocked.
  180. *
  181. * This must be called with the caller "holding" the page,
  182. * ie with increased "page->count" so that the page won't
  183. * go away during the wait..
  184. */
  185. static inline void wait_on_page_locked(struct page *page)
  186. {
  187. if (PageLocked(page))
  188. wait_on_page_bit(page, PG_locked);
  189. }
  190. /*
  191. * Wait for a page to complete writeback
  192. */
  193. static inline void wait_on_page_writeback(struct page *page)
  194. {
  195. if (PageWriteback(page))
  196. wait_on_page_bit(page, PG_writeback);
  197. }
  198. extern void end_page_writeback(struct page *page);
  199. /*
  200. * Fault a userspace page into pagetables. Return non-zero on a fault.
  201. *
  202. * This assumes that two userspace pages are always sufficient. That's
  203. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  204. */
  205. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  206. {
  207. int ret;
  208. if (unlikely(size == 0))
  209. return 0;
  210. /*
  211. * Writing zeroes into userspace here is OK, because we know that if
  212. * the zero gets there, we'll be overwriting it.
  213. */
  214. ret = __put_user(0, uaddr);
  215. if (ret == 0) {
  216. char __user *end = uaddr + size - 1;
  217. /*
  218. * If the page was already mapped, this will get a cache miss
  219. * for sure, so try to avoid doing it.
  220. */
  221. if (((unsigned long)uaddr & PAGE_MASK) !=
  222. ((unsigned long)end & PAGE_MASK))
  223. ret = __put_user(0, end);
  224. }
  225. return ret;
  226. }
  227. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  228. {
  229. volatile char c;
  230. int ret;
  231. if (unlikely(size == 0))
  232. return 0;
  233. ret = __get_user(c, uaddr);
  234. if (ret == 0) {
  235. const char __user *end = uaddr + size - 1;
  236. if (((unsigned long)uaddr & PAGE_MASK) !=
  237. ((unsigned long)end & PAGE_MASK))
  238. ret = __get_user(c, end);
  239. }
  240. return ret;
  241. }
  242. #endif /* _LINUX_PAGEMAP_H */