pagemap.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. #include <linux/bitops.h>
  14. #include <linux/hardirq.h> /* for in_interrupt() */
  15. #include <linux/hugetlb_inline.h>
  16. /*
  17. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  18. * allocation mode flags.
  19. */
  20. enum mapping_flags {
  21. AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  22. AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  23. AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  24. AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  25. };
  26. static inline void mapping_set_error(struct address_space *mapping, int error)
  27. {
  28. if (unlikely(error)) {
  29. if (error == -ENOSPC)
  30. set_bit(AS_ENOSPC, &mapping->flags);
  31. else
  32. set_bit(AS_EIO, &mapping->flags);
  33. }
  34. }
  35. static inline void mapping_set_unevictable(struct address_space *mapping)
  36. {
  37. set_bit(AS_UNEVICTABLE, &mapping->flags);
  38. }
  39. static inline void mapping_clear_unevictable(struct address_space *mapping)
  40. {
  41. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  42. }
  43. static inline int mapping_unevictable(struct address_space *mapping)
  44. {
  45. if (likely(mapping))
  46. return test_bit(AS_UNEVICTABLE, &mapping->flags);
  47. return !!mapping;
  48. }
  49. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  50. {
  51. return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  52. }
  53. /*
  54. * This is non-atomic. Only to be used before the mapping is activated.
  55. * Probably needs a barrier...
  56. */
  57. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  58. {
  59. m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  60. (__force unsigned long)mask;
  61. }
  62. /*
  63. * The page cache can done in larger chunks than
  64. * one page, because it allows for more efficient
  65. * throughput (it can then be mapped into user
  66. * space in smaller chunks for same flexibility).
  67. *
  68. * Or rather, it _will_ be done in larger chunks.
  69. */
  70. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  71. #define PAGE_CACHE_SIZE PAGE_SIZE
  72. #define PAGE_CACHE_MASK PAGE_MASK
  73. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  74. #define page_cache_get(page) get_page(page)
  75. #define page_cache_release(page) put_page(page)
  76. void release_pages(struct page **pages, int nr, int cold);
  77. /*
  78. * speculatively take a reference to a page.
  79. * If the page is free (_count == 0), then _count is untouched, and 0
  80. * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
  81. *
  82. * This function must be called inside the same rcu_read_lock() section as has
  83. * been used to lookup the page in the pagecache radix-tree (or page table):
  84. * this allows allocators to use a synchronize_rcu() to stabilize _count.
  85. *
  86. * Unless an RCU grace period has passed, the count of all pages coming out
  87. * of the allocator must be considered unstable. page_count may return higher
  88. * than expected, and put_page must be able to do the right thing when the
  89. * page has been finished with, no matter what it is subsequently allocated
  90. * for (because put_page is what is used here to drop an invalid speculative
  91. * reference).
  92. *
  93. * This is the interesting part of the lockless pagecache (and lockless
  94. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  95. * has the following pattern:
  96. * 1. find page in radix tree
  97. * 2. conditionally increment refcount
  98. * 3. check the page is still in pagecache (if no, goto 1)
  99. *
  100. * Remove-side that cares about stability of _count (eg. reclaim) has the
  101. * following (with tree_lock held for write):
  102. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  103. * B. remove page from pagecache
  104. * C. free the page
  105. *
  106. * There are 2 critical interleavings that matter:
  107. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  108. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  109. * subsequently, B will complete and 1 will find no page, causing the
  110. * lookup to return NULL.
  111. *
  112. * It is possible that between 1 and 2, the page is removed then the exact same
  113. * page is inserted into the same position in pagecache. That's OK: the
  114. * old find_get_page using tree_lock could equally have run before or after
  115. * such a re-insertion, depending on order that locks are granted.
  116. *
  117. * Lookups racing against pagecache insertion isn't a big problem: either 1
  118. * will find the page or it will not. Likewise, the old find_get_page could run
  119. * either before the insertion or afterwards, depending on timing.
  120. */
  121. static inline int page_cache_get_speculative(struct page *page)
  122. {
  123. VM_BUG_ON(in_interrupt());
  124. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  125. # ifdef CONFIG_PREEMPT
  126. VM_BUG_ON(!in_atomic());
  127. # endif
  128. /*
  129. * Preempt must be disabled here - we rely on rcu_read_lock doing
  130. * this for us.
  131. *
  132. * Pagecache won't be truncated from interrupt context, so if we have
  133. * found a page in the radix tree here, we have pinned its refcount by
  134. * disabling preempt, and hence no need for the "speculative get" that
  135. * SMP requires.
  136. */
  137. VM_BUG_ON(page_count(page) == 0);
  138. atomic_inc(&page->_count);
  139. #else
  140. if (unlikely(!get_page_unless_zero(page))) {
  141. /*
  142. * Either the page has been freed, or will be freed.
  143. * In either case, retry here and the caller should
  144. * do the right thing (see comments above).
  145. */
  146. return 0;
  147. }
  148. #endif
  149. VM_BUG_ON(PageTail(page));
  150. return 1;
  151. }
  152. /*
  153. * Same as above, but add instead of inc (could just be merged)
  154. */
  155. static inline int page_cache_add_speculative(struct page *page, int count)
  156. {
  157. VM_BUG_ON(in_interrupt());
  158. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  159. # ifdef CONFIG_PREEMPT
  160. VM_BUG_ON(!in_atomic());
  161. # endif
  162. VM_BUG_ON(page_count(page) == 0);
  163. atomic_add(count, &page->_count);
  164. #else
  165. if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
  166. return 0;
  167. #endif
  168. VM_BUG_ON(PageCompound(page) && page != compound_head(page));
  169. return 1;
  170. }
  171. static inline int page_freeze_refs(struct page *page, int count)
  172. {
  173. return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
  174. }
  175. static inline void page_unfreeze_refs(struct page *page, int count)
  176. {
  177. VM_BUG_ON(page_count(page) != 0);
  178. VM_BUG_ON(count == 0);
  179. atomic_set(&page->_count, count);
  180. }
  181. #ifdef CONFIG_NUMA
  182. extern struct page *__page_cache_alloc(gfp_t gfp);
  183. #else
  184. static inline struct page *__page_cache_alloc(gfp_t gfp)
  185. {
  186. return alloc_pages(gfp, 0);
  187. }
  188. #endif
  189. static inline struct page *page_cache_alloc(struct address_space *x)
  190. {
  191. return __page_cache_alloc(mapping_gfp_mask(x));
  192. }
  193. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  194. {
  195. return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
  196. }
  197. typedef int filler_t(void *, struct page *);
  198. extern struct page * find_get_page(struct address_space *mapping,
  199. pgoff_t index);
  200. extern struct page * find_lock_page(struct address_space *mapping,
  201. pgoff_t index);
  202. extern struct page * find_or_create_page(struct address_space *mapping,
  203. pgoff_t index, gfp_t gfp_mask);
  204. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  205. unsigned int nr_pages, struct page **pages);
  206. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  207. unsigned int nr_pages, struct page **pages);
  208. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  209. int tag, unsigned int nr_pages, struct page **pages);
  210. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  211. pgoff_t index, unsigned flags);
  212. /*
  213. * Returns locked page at given index in given cache, creating it if needed.
  214. */
  215. static inline struct page *grab_cache_page(struct address_space *mapping,
  216. pgoff_t index)
  217. {
  218. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  219. }
  220. extern struct page * grab_cache_page_nowait(struct address_space *mapping,
  221. pgoff_t index);
  222. extern struct page * read_cache_page_async(struct address_space *mapping,
  223. pgoff_t index, filler_t *filler,
  224. void *data);
  225. extern struct page * read_cache_page(struct address_space *mapping,
  226. pgoff_t index, filler_t *filler,
  227. void *data);
  228. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  229. pgoff_t index, gfp_t gfp_mask);
  230. extern int read_cache_pages(struct address_space *mapping,
  231. struct list_head *pages, filler_t *filler, void *data);
  232. static inline struct page *read_mapping_page_async(
  233. struct address_space *mapping,
  234. pgoff_t index, void *data)
  235. {
  236. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  237. return read_cache_page_async(mapping, index, filler, data);
  238. }
  239. static inline struct page *read_mapping_page(struct address_space *mapping,
  240. pgoff_t index, void *data)
  241. {
  242. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  243. return read_cache_page(mapping, index, filler, data);
  244. }
  245. /*
  246. * Return byte-offset into filesystem object for page.
  247. */
  248. static inline loff_t page_offset(struct page *page)
  249. {
  250. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  251. }
  252. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  253. unsigned long address);
  254. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  255. unsigned long address)
  256. {
  257. pgoff_t pgoff;
  258. if (unlikely(is_vm_hugetlb_page(vma)))
  259. return linear_hugepage_index(vma, address);
  260. pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  261. pgoff += vma->vm_pgoff;
  262. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  263. }
  264. extern void __lock_page(struct page *page);
  265. extern int __lock_page_killable(struct page *page);
  266. extern void __lock_page_nosync(struct page *page);
  267. extern void unlock_page(struct page *page);
  268. static inline void __set_page_locked(struct page *page)
  269. {
  270. __set_bit(PG_locked, &page->flags);
  271. }
  272. static inline void __clear_page_locked(struct page *page)
  273. {
  274. __clear_bit(PG_locked, &page->flags);
  275. }
  276. static inline int trylock_page(struct page *page)
  277. {
  278. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  279. }
  280. /*
  281. * lock_page may only be called if we have the page's inode pinned.
  282. */
  283. static inline void lock_page(struct page *page)
  284. {
  285. might_sleep();
  286. if (!trylock_page(page))
  287. __lock_page(page);
  288. }
  289. /*
  290. * lock_page_killable is like lock_page but can be interrupted by fatal
  291. * signals. It returns 0 if it locked the page and -EINTR if it was
  292. * killed while waiting.
  293. */
  294. static inline int lock_page_killable(struct page *page)
  295. {
  296. might_sleep();
  297. if (!trylock_page(page))
  298. return __lock_page_killable(page);
  299. return 0;
  300. }
  301. /*
  302. * lock_page_nosync should only be used if we can't pin the page's inode.
  303. * Doesn't play quite so well with block device plugging.
  304. */
  305. static inline void lock_page_nosync(struct page *page)
  306. {
  307. might_sleep();
  308. if (!trylock_page(page))
  309. __lock_page_nosync(page);
  310. }
  311. /*
  312. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  313. * Never use this directly!
  314. */
  315. extern void wait_on_page_bit(struct page *page, int bit_nr);
  316. /*
  317. * Wait for a page to be unlocked.
  318. *
  319. * This must be called with the caller "holding" the page,
  320. * ie with increased "page->count" so that the page won't
  321. * go away during the wait..
  322. */
  323. static inline void wait_on_page_locked(struct page *page)
  324. {
  325. if (PageLocked(page))
  326. wait_on_page_bit(page, PG_locked);
  327. }
  328. /*
  329. * Wait for a page to complete writeback
  330. */
  331. static inline void wait_on_page_writeback(struct page *page)
  332. {
  333. if (PageWriteback(page))
  334. wait_on_page_bit(page, PG_writeback);
  335. }
  336. extern void end_page_writeback(struct page *page);
  337. /*
  338. * Add an arbitrary waiter to a page's wait queue
  339. */
  340. extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
  341. /*
  342. * Fault a userspace page into pagetables. Return non-zero on a fault.
  343. *
  344. * This assumes that two userspace pages are always sufficient. That's
  345. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  346. */
  347. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  348. {
  349. int ret;
  350. if (unlikely(size == 0))
  351. return 0;
  352. /*
  353. * Writing zeroes into userspace here is OK, because we know that if
  354. * the zero gets there, we'll be overwriting it.
  355. */
  356. ret = __put_user(0, uaddr);
  357. if (ret == 0) {
  358. char __user *end = uaddr + size - 1;
  359. /*
  360. * If the page was already mapped, this will get a cache miss
  361. * for sure, so try to avoid doing it.
  362. */
  363. if (((unsigned long)uaddr & PAGE_MASK) !=
  364. ((unsigned long)end & PAGE_MASK))
  365. ret = __put_user(0, end);
  366. }
  367. return ret;
  368. }
  369. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  370. {
  371. volatile char c;
  372. int ret;
  373. if (unlikely(size == 0))
  374. return 0;
  375. ret = __get_user(c, uaddr);
  376. if (ret == 0) {
  377. const char __user *end = uaddr + size - 1;
  378. if (((unsigned long)uaddr & PAGE_MASK) !=
  379. ((unsigned long)end & PAGE_MASK))
  380. ret = __get_user(c, end);
  381. }
  382. return ret;
  383. }
  384. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  385. pgoff_t index, gfp_t gfp_mask);
  386. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  387. pgoff_t index, gfp_t gfp_mask);
  388. extern void remove_from_page_cache(struct page *page);
  389. extern void __remove_from_page_cache(struct page *page);
  390. /*
  391. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  392. * the page is new, so we can just run __set_page_locked() against it.
  393. */
  394. static inline int add_to_page_cache(struct page *page,
  395. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  396. {
  397. int error;
  398. __set_page_locked(page);
  399. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  400. if (unlikely(error))
  401. __clear_page_locked(page);
  402. return error;
  403. }
  404. #endif /* _LINUX_PAGEMAP_H */