writeback.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * include/linux/writeback.h
  3. */
  4. #ifndef WRITEBACK_H
  5. #define WRITEBACK_H
  6. #include <linux/sched.h>
  7. #include <linux/fs.h>
  8. /*
  9. * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
  10. *
  11. * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
  12. *
  13. * The 1/16 region above the global dirty limit will be put to maximum pauses:
  14. *
  15. * (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
  16. *
  17. * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
  18. * to loops:
  19. *
  20. * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
  21. *
  22. * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
  23. * time) for the dirty pages to drop, unless written enough pages.
  24. *
  25. * The global dirty threshold is normally equal to the global dirty limit,
  26. * except when the system suddenly allocates a lot of anonymous memory and
  27. * knocks down the global dirty threshold quickly, in which case the global
  28. * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
  29. */
  30. #define DIRTY_SCOPE 8
  31. #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
  32. #define DIRTY_MAXPAUSE_AREA 16
  33. #define DIRTY_PASSGOOD_AREA 8
  34. /*
  35. * 4MB minimal write chunk size
  36. */
  37. #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
  38. struct backing_dev_info;
  39. /*
  40. * fs/fs-writeback.c
  41. */
  42. enum writeback_sync_modes {
  43. WB_SYNC_NONE, /* Don't wait on anything */
  44. WB_SYNC_ALL, /* Wait on every mapping */
  45. };
  46. /*
  47. * A control structure which tells the writeback code what to do. These are
  48. * always on the stack, and hence need no locking. They are always initialised
  49. * in a manner such that unspecified fields are set to zero.
  50. */
  51. struct writeback_control {
  52. enum writeback_sync_modes sync_mode;
  53. long nr_to_write; /* Write this many pages, and decrement
  54. this for each page written */
  55. long pages_skipped; /* Pages which were not written */
  56. /*
  57. * For a_ops->writepages(): is start or end are non-zero then this is
  58. * a hint that the filesystem need only write out the pages inside that
  59. * byterange. The byte at `end' is included in the writeout request.
  60. */
  61. loff_t range_start;
  62. loff_t range_end;
  63. unsigned for_kupdate:1; /* A kupdate writeback */
  64. unsigned for_background:1; /* A background writeback */
  65. unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
  66. unsigned for_reclaim:1; /* Invoked from the page allocator */
  67. unsigned range_cyclic:1; /* range_start is cyclic */
  68. };
  69. /*
  70. * fs/fs-writeback.c
  71. */
  72. struct bdi_writeback;
  73. int inode_wait(void *);
  74. void writeback_inodes_sb(struct super_block *);
  75. void writeback_inodes_sb_nr(struct super_block *, unsigned long nr);
  76. int writeback_inodes_sb_if_idle(struct super_block *);
  77. int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
  78. void sync_inodes_sb(struct super_block *);
  79. long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages);
  80. long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
  81. void wakeup_flusher_threads(long nr_pages);
  82. /* writeback.h requires fs.h; it, too, is not included from here. */
  83. static inline void wait_on_inode(struct inode *inode)
  84. {
  85. might_sleep();
  86. wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
  87. }
  88. static inline void inode_sync_wait(struct inode *inode)
  89. {
  90. might_sleep();
  91. wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
  92. TASK_UNINTERRUPTIBLE);
  93. }
  94. /*
  95. * mm/page-writeback.c
  96. */
  97. #ifdef CONFIG_BLOCK
  98. void laptop_io_completion(struct backing_dev_info *info);
  99. void laptop_sync_completion(void);
  100. void laptop_mode_sync(struct work_struct *work);
  101. void laptop_mode_timer_fn(unsigned long data);
  102. #else
  103. static inline void laptop_sync_completion(void) { }
  104. #endif
  105. void throttle_vm_writeout(gfp_t gfp_mask);
  106. extern unsigned long global_dirty_limit;
  107. /* These are exported to sysctl. */
  108. extern int dirty_background_ratio;
  109. extern unsigned long dirty_background_bytes;
  110. extern int vm_dirty_ratio;
  111. extern unsigned long vm_dirty_bytes;
  112. extern unsigned int dirty_writeback_interval;
  113. extern unsigned int dirty_expire_interval;
  114. extern int vm_highmem_is_dirtyable;
  115. extern int block_dump;
  116. extern int laptop_mode;
  117. extern unsigned long determine_dirtyable_memory(void);
  118. extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
  119. void __user *buffer, size_t *lenp,
  120. loff_t *ppos);
  121. extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
  122. void __user *buffer, size_t *lenp,
  123. loff_t *ppos);
  124. extern int dirty_ratio_handler(struct ctl_table *table, int write,
  125. void __user *buffer, size_t *lenp,
  126. loff_t *ppos);
  127. extern int dirty_bytes_handler(struct ctl_table *table, int write,
  128. void __user *buffer, size_t *lenp,
  129. loff_t *ppos);
  130. struct ctl_table;
  131. int dirty_writeback_centisecs_handler(struct ctl_table *, int,
  132. void __user *, size_t *, loff_t *);
  133. void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
  134. unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
  135. unsigned long dirty);
  136. void __bdi_update_bandwidth(struct backing_dev_info *bdi,
  137. unsigned long thresh,
  138. unsigned long dirty,
  139. unsigned long bdi_thresh,
  140. unsigned long bdi_dirty,
  141. unsigned long start_time);
  142. void page_writeback_init(void);
  143. void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
  144. unsigned long nr_pages_dirtied);
  145. static inline void
  146. balance_dirty_pages_ratelimited(struct address_space *mapping)
  147. {
  148. balance_dirty_pages_ratelimited_nr(mapping, 1);
  149. }
  150. typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
  151. void *data);
  152. int generic_writepages(struct address_space *mapping,
  153. struct writeback_control *wbc);
  154. void tag_pages_for_writeback(struct address_space *mapping,
  155. pgoff_t start, pgoff_t end);
  156. int write_cache_pages(struct address_space *mapping,
  157. struct writeback_control *wbc, writepage_t writepage,
  158. void *data);
  159. int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
  160. void set_page_dirty_balance(struct page *page, int page_mkwrite);
  161. void writeback_set_ratelimit(void);
  162. void tag_pages_for_writeback(struct address_space *mapping,
  163. pgoff_t start, pgoff_t end);
  164. /* pdflush.c */
  165. extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
  166. read-only. */
  167. #endif /* WRITEBACK_H */