pagemap.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. /*
  14. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  15. * allocation mode flags.
  16. */
  17. #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
  18. #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
  19. static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping)
  20. {
  21. return mapping->flags & __GFP_BITS_MASK;
  22. }
  23. /*
  24. * This is non-atomic. Only to be used before the mapping is activated.
  25. * Probably needs a barrier...
  26. */
  27. static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
  28. {
  29. m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
  30. }
  31. /*
  32. * The page cache can done in larger chunks than
  33. * one page, because it allows for more efficient
  34. * throughput (it can then be mapped into user
  35. * space in smaller chunks for same flexibility).
  36. *
  37. * Or rather, it _will_ be done in larger chunks.
  38. */
  39. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  40. #define PAGE_CACHE_SIZE PAGE_SIZE
  41. #define PAGE_CACHE_MASK PAGE_MASK
  42. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  43. #define page_cache_get(page) get_page(page)
  44. #define page_cache_release(page) put_page(page)
  45. void release_pages(struct page **pages, int nr, int cold);
  46. static inline struct page *page_cache_alloc(struct address_space *x)
  47. {
  48. return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0);
  49. }
  50. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  51. {
  52. return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0);
  53. }
  54. typedef int filler_t(void *, struct page *);
  55. extern struct page * find_get_page(struct address_space *mapping,
  56. unsigned long index);
  57. extern struct page * find_lock_page(struct address_space *mapping,
  58. unsigned long index);
  59. extern struct page * find_trylock_page(struct address_space *mapping,
  60. unsigned long index);
  61. extern struct page * find_or_create_page(struct address_space *mapping,
  62. unsigned long index, unsigned int gfp_mask);
  63. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  64. unsigned int nr_pages, struct page **pages);
  65. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  66. int tag, unsigned int nr_pages, struct page **pages);
  67. /*
  68. * Returns locked page at given index in given cache, creating it if needed.
  69. */
  70. static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
  71. {
  72. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  73. }
  74. extern struct page * grab_cache_page_nowait(struct address_space *mapping,
  75. unsigned long index);
  76. extern struct page * read_cache_page(struct address_space *mapping,
  77. unsigned long index, filler_t *filler,
  78. void *data);
  79. extern int read_cache_pages(struct address_space *mapping,
  80. struct list_head *pages, filler_t *filler, void *data);
  81. int add_to_page_cache(struct page *page, struct address_space *mapping,
  82. unsigned long index, int gfp_mask);
  83. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  84. unsigned long index, int gfp_mask);
  85. extern void remove_from_page_cache(struct page *page);
  86. extern void __remove_from_page_cache(struct page *page);
  87. extern atomic_t nr_pagecache;
  88. #ifdef CONFIG_SMP
  89. #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
  90. DECLARE_PER_CPU(long, nr_pagecache_local);
  91. /*
  92. * pagecache_acct implements approximate accounting for pagecache.
  93. * vm_enough_memory() do not need high accuracy. Writers will keep
  94. * an offset in their per-cpu arena and will spill that into the
  95. * global count whenever the absolute value of the local count
  96. * exceeds the counter's threshold.
  97. *
  98. * MUST be protected from preemption.
  99. * current protection is mapping->page_lock.
  100. */
  101. static inline void pagecache_acct(int count)
  102. {
  103. long *local;
  104. local = &__get_cpu_var(nr_pagecache_local);
  105. *local += count;
  106. if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
  107. atomic_add(*local, &nr_pagecache);
  108. *local = 0;
  109. }
  110. }
  111. #else
  112. static inline void pagecache_acct(int count)
  113. {
  114. atomic_add(count, &nr_pagecache);
  115. }
  116. #endif
  117. static inline unsigned long get_page_cache_size(void)
  118. {
  119. int ret = atomic_read(&nr_pagecache);
  120. if (unlikely(ret < 0))
  121. ret = 0;
  122. return ret;
  123. }
  124. /*
  125. * Return byte-offset into filesystem object for page.
  126. */
  127. static inline loff_t page_offset(struct page *page)
  128. {
  129. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  130. }
  131. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  132. unsigned long address)
  133. {
  134. pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  135. pgoff += vma->vm_pgoff;
  136. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  137. }
  138. extern void FASTCALL(__lock_page(struct page *page));
  139. extern void FASTCALL(unlock_page(struct page *page));
  140. static inline void lock_page(struct page *page)
  141. {
  142. might_sleep();
  143. if (TestSetPageLocked(page))
  144. __lock_page(page);
  145. }
  146. /*
  147. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  148. * Never use this directly!
  149. */
  150. extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
  151. /*
  152. * Wait for a page to be unlocked.
  153. *
  154. * This must be called with the caller "holding" the page,
  155. * ie with increased "page->count" so that the page won't
  156. * go away during the wait..
  157. */
  158. static inline void wait_on_page_locked(struct page *page)
  159. {
  160. if (PageLocked(page))
  161. wait_on_page_bit(page, PG_locked);
  162. }
  163. /*
  164. * Wait for a page to complete writeback
  165. */
  166. static inline void wait_on_page_writeback(struct page *page)
  167. {
  168. if (PageWriteback(page))
  169. wait_on_page_bit(page, PG_writeback);
  170. }
  171. extern void end_page_writeback(struct page *page);
  172. /*
  173. * Fault a userspace page into pagetables. Return non-zero on a fault.
  174. *
  175. * This assumes that two userspace pages are always sufficient. That's
  176. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  177. */
  178. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  179. {
  180. int ret;
  181. /*
  182. * Writing zeroes into userspace here is OK, because we know that if
  183. * the zero gets there, we'll be overwriting it.
  184. */
  185. ret = __put_user(0, uaddr);
  186. if (ret == 0) {
  187. char __user *end = uaddr + size - 1;
  188. /*
  189. * If the page was already mapped, this will get a cache miss
  190. * for sure, so try to avoid doing it.
  191. */
  192. if (((unsigned long)uaddr & PAGE_MASK) !=
  193. ((unsigned long)end & PAGE_MASK))
  194. ret = __put_user(0, end);
  195. }
  196. return ret;
  197. }
  198. static inline void fault_in_pages_readable(const char __user *uaddr, int size)
  199. {
  200. volatile char c;
  201. int ret;
  202. ret = __get_user(c, uaddr);
  203. if (ret == 0) {
  204. const char __user *end = uaddr + size - 1;
  205. if (((unsigned long)uaddr & PAGE_MASK) !=
  206. ((unsigned long)end & PAGE_MASK))
  207. __get_user(c, end);
  208. }
  209. }
  210. #endif /* _LINUX_PAGEMAP_H */