pagemap.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. /*
  14. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  15. * allocation mode flags.
  16. */
  17. #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
  18. #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
  19. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  20. {
  21. return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  22. }
  23. /*
  24. * This is non-atomic. Only to be used before the mapping is activated.
  25. * Probably needs a barrier...
  26. */
  27. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  28. {
  29. m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  30. (__force unsigned long)mask;
  31. }
  32. /*
  33. * The page cache can done in larger chunks than
  34. * one page, because it allows for more efficient
  35. * throughput (it can then be mapped into user
  36. * space in smaller chunks for same flexibility).
  37. *
  38. * Or rather, it _will_ be done in larger chunks.
  39. */
  40. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  41. #define PAGE_CACHE_SIZE PAGE_SIZE
  42. #define PAGE_CACHE_MASK PAGE_MASK
  43. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  44. #define page_cache_get(page) get_page(page)
  45. #define page_cache_release(page) put_page(page)
  46. void release_pages(struct page **pages, int nr, int cold);
  47. static inline struct page *page_cache_alloc(struct address_space *x)
  48. {
  49. return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0);
  50. }
  51. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  52. {
  53. return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0);
  54. }
  55. typedef int filler_t(void *, struct page *);
  56. extern struct page * find_get_page(struct address_space *mapping,
  57. unsigned long index);
  58. extern struct page * find_lock_page(struct address_space *mapping,
  59. unsigned long index);
  60. extern struct page * find_trylock_page(struct address_space *mapping,
  61. unsigned long index);
  62. extern struct page * find_or_create_page(struct address_space *mapping,
  63. unsigned long index, gfp_t gfp_mask);
  64. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  65. unsigned int nr_pages, struct page **pages);
  66. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  67. int tag, unsigned int nr_pages, struct page **pages);
  68. /*
  69. * Returns locked page at given index in given cache, creating it if needed.
  70. */
  71. static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
  72. {
  73. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  74. }
  75. extern struct page * grab_cache_page_nowait(struct address_space *mapping,
  76. unsigned long index);
  77. extern struct page * read_cache_page(struct address_space *mapping,
  78. unsigned long index, filler_t *filler,
  79. void *data);
  80. extern int read_cache_pages(struct address_space *mapping,
  81. struct list_head *pages, filler_t *filler, void *data);
  82. int add_to_page_cache(struct page *page, struct address_space *mapping,
  83. unsigned long index, gfp_t gfp_mask);
  84. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  85. unsigned long index, gfp_t gfp_mask);
  86. extern void remove_from_page_cache(struct page *page);
  87. extern void __remove_from_page_cache(struct page *page);
  88. extern atomic_t nr_pagecache;
  89. #ifdef CONFIG_SMP
  90. #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
  91. DECLARE_PER_CPU(long, nr_pagecache_local);
  92. /*
  93. * pagecache_acct implements approximate accounting for pagecache.
  94. * vm_enough_memory() do not need high accuracy. Writers will keep
  95. * an offset in their per-cpu arena and will spill that into the
  96. * global count whenever the absolute value of the local count
  97. * exceeds the counter's threshold.
  98. *
  99. * MUST be protected from preemption.
  100. * current protection is mapping->page_lock.
  101. */
  102. static inline void pagecache_acct(int count)
  103. {
  104. long *local;
  105. local = &__get_cpu_var(nr_pagecache_local);
  106. *local += count;
  107. if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
  108. atomic_add(*local, &nr_pagecache);
  109. *local = 0;
  110. }
  111. }
  112. #else
  113. static inline void pagecache_acct(int count)
  114. {
  115. atomic_add(count, &nr_pagecache);
  116. }
  117. #endif
  118. static inline unsigned long get_page_cache_size(void)
  119. {
  120. int ret = atomic_read(&nr_pagecache);
  121. if (unlikely(ret < 0))
  122. ret = 0;
  123. return ret;
  124. }
  125. /*
  126. * Return byte-offset into filesystem object for page.
  127. */
  128. static inline loff_t page_offset(struct page *page)
  129. {
  130. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  131. }
  132. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  133. unsigned long address)
  134. {
  135. pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  136. pgoff += vma->vm_pgoff;
  137. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  138. }
  139. extern void FASTCALL(__lock_page(struct page *page));
  140. extern void FASTCALL(unlock_page(struct page *page));
  141. static inline void lock_page(struct page *page)
  142. {
  143. might_sleep();
  144. if (TestSetPageLocked(page))
  145. __lock_page(page);
  146. }
  147. /*
  148. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  149. * Never use this directly!
  150. */
  151. extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
  152. /*
  153. * Wait for a page to be unlocked.
  154. *
  155. * This must be called with the caller "holding" the page,
  156. * ie with increased "page->count" so that the page won't
  157. * go away during the wait..
  158. */
  159. static inline void wait_on_page_locked(struct page *page)
  160. {
  161. if (PageLocked(page))
  162. wait_on_page_bit(page, PG_locked);
  163. }
  164. /*
  165. * Wait for a page to complete writeback
  166. */
  167. static inline void wait_on_page_writeback(struct page *page)
  168. {
  169. if (PageWriteback(page))
  170. wait_on_page_bit(page, PG_writeback);
  171. }
  172. extern void end_page_writeback(struct page *page);
  173. /*
  174. * Fault a userspace page into pagetables. Return non-zero on a fault.
  175. *
  176. * This assumes that two userspace pages are always sufficient. That's
  177. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  178. */
  179. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  180. {
  181. int ret;
  182. /*
  183. * Writing zeroes into userspace here is OK, because we know that if
  184. * the zero gets there, we'll be overwriting it.
  185. */
  186. ret = __put_user(0, uaddr);
  187. if (ret == 0) {
  188. char __user *end = uaddr + size - 1;
  189. /*
  190. * If the page was already mapped, this will get a cache miss
  191. * for sure, so try to avoid doing it.
  192. */
  193. if (((unsigned long)uaddr & PAGE_MASK) !=
  194. ((unsigned long)end & PAGE_MASK))
  195. ret = __put_user(0, end);
  196. }
  197. return ret;
  198. }
  199. static inline void fault_in_pages_readable(const char __user *uaddr, int size)
  200. {
  201. volatile char c;
  202. int ret;
  203. ret = __get_user(c, uaddr);
  204. if (ret == 0) {
  205. const char __user *end = uaddr + size - 1;
  206. if (((unsigned long)uaddr & PAGE_MASK) !=
  207. ((unsigned long)end & PAGE_MASK))
  208. __get_user(c, end);
  209. }
  210. }
  211. #endif /* _LINUX_PAGEMAP_H */