backing-dev.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * include/linux/backing-dev.h
  3. *
  4. * low-level device information and state which is propagated up through
  5. * to high-level code.
  6. */
  7. #ifndef _LINUX_BACKING_DEV_H
  8. #define _LINUX_BACKING_DEV_H
  9. #include <linux/percpu_counter.h>
  10. #include <linux/log2.h>
  11. #include <linux/proportions.h>
  12. #include <linux/kernel.h>
  13. #include <linux/fs.h>
  14. #include <linux/sched.h>
  15. #include <linux/writeback.h>
  16. #include <asm/atomic.h>
  17. struct page;
  18. struct device;
  19. struct dentry;
  20. /*
  21. * Bits in backing_dev_info.state
  22. */
  23. enum bdi_state {
  24. BDI_pending, /* On its way to being activated */
  25. BDI_wb_alloc, /* Default embedded wb allocated */
  26. BDI_async_congested, /* The async (write) queue is getting full */
  27. BDI_sync_congested, /* The sync queue is getting full */
  28. BDI_registered, /* bdi_register() was done */
  29. BDI_unused, /* Available bits start here */
  30. };
  31. typedef int (congested_fn)(void *, int);
  32. enum bdi_stat_item {
  33. BDI_RECLAIMABLE,
  34. BDI_WRITEBACK,
  35. NR_BDI_STAT_ITEMS
  36. };
  37. #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
  38. struct bdi_writeback {
  39. struct list_head list; /* hangs off the bdi */
  40. struct backing_dev_info *bdi; /* our parent bdi */
  41. unsigned int nr;
  42. unsigned long last_old_flush; /* last old data flush */
  43. struct task_struct *task; /* writeback task */
  44. struct list_head b_dirty; /* dirty inodes */
  45. struct list_head b_io; /* parked for writeback */
  46. struct list_head b_more_io; /* parked for more writeback */
  47. };
  48. struct backing_dev_info {
  49. struct list_head bdi_list;
  50. unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
  51. unsigned long state; /* Always use atomic bitops on this */
  52. unsigned int capabilities; /* Device capabilities */
  53. congested_fn *congested_fn; /* Function pointer if device is md/dm */
  54. void *congested_data; /* Pointer to aux data for congested func */
  55. void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
  56. void *unplug_io_data;
  57. char *name;
  58. struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
  59. struct prop_local_percpu completions;
  60. int dirty_exceeded;
  61. unsigned int min_ratio;
  62. unsigned int max_ratio, max_prop_frac;
  63. struct bdi_writeback wb; /* default writeback info for this bdi */
  64. spinlock_t wb_lock; /* protects update side of wb_list */
  65. struct list_head wb_list; /* the flusher threads hanging off this bdi */
  66. unsigned long wb_mask; /* bitmask of registered tasks */
  67. unsigned int wb_cnt; /* number of registered tasks */
  68. struct list_head work_list;
  69. struct device *dev;
  70. #ifdef CONFIG_DEBUG_FS
  71. struct dentry *debug_dir;
  72. struct dentry *debug_stats;
  73. #endif
  74. };
  75. int bdi_init(struct backing_dev_info *bdi);
  76. void bdi_destroy(struct backing_dev_info *bdi);
  77. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  78. const char *fmt, ...);
  79. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
  80. void bdi_unregister(struct backing_dev_info *bdi);
  81. void bdi_start_writeback(struct writeback_control *wbc);
  82. int bdi_writeback_task(struct bdi_writeback *wb);
  83. int bdi_has_dirty_io(struct backing_dev_info *bdi);
  84. extern spinlock_t bdi_lock;
  85. extern struct list_head bdi_list;
  86. static inline int wb_has_dirty_io(struct bdi_writeback *wb)
  87. {
  88. return !list_empty(&wb->b_dirty) ||
  89. !list_empty(&wb->b_io) ||
  90. !list_empty(&wb->b_more_io);
  91. }
  92. static inline void __add_bdi_stat(struct backing_dev_info *bdi,
  93. enum bdi_stat_item item, s64 amount)
  94. {
  95. __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
  96. }
  97. static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
  98. enum bdi_stat_item item)
  99. {
  100. __add_bdi_stat(bdi, item, 1);
  101. }
  102. static inline void inc_bdi_stat(struct backing_dev_info *bdi,
  103. enum bdi_stat_item item)
  104. {
  105. unsigned long flags;
  106. local_irq_save(flags);
  107. __inc_bdi_stat(bdi, item);
  108. local_irq_restore(flags);
  109. }
  110. static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
  111. enum bdi_stat_item item)
  112. {
  113. __add_bdi_stat(bdi, item, -1);
  114. }
  115. static inline void dec_bdi_stat(struct backing_dev_info *bdi,
  116. enum bdi_stat_item item)
  117. {
  118. unsigned long flags;
  119. local_irq_save(flags);
  120. __dec_bdi_stat(bdi, item);
  121. local_irq_restore(flags);
  122. }
  123. static inline s64 bdi_stat(struct backing_dev_info *bdi,
  124. enum bdi_stat_item item)
  125. {
  126. return percpu_counter_read_positive(&bdi->bdi_stat[item]);
  127. }
  128. static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
  129. enum bdi_stat_item item)
  130. {
  131. return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
  132. }
  133. static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
  134. enum bdi_stat_item item)
  135. {
  136. s64 sum;
  137. unsigned long flags;
  138. local_irq_save(flags);
  139. sum = __bdi_stat_sum(bdi, item);
  140. local_irq_restore(flags);
  141. return sum;
  142. }
  143. extern void bdi_writeout_inc(struct backing_dev_info *bdi);
  144. /*
  145. * maximal error of a stat counter.
  146. */
  147. static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
  148. {
  149. #ifdef CONFIG_SMP
  150. return nr_cpu_ids * BDI_STAT_BATCH;
  151. #else
  152. return 1;
  153. #endif
  154. }
  155. int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
  156. int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
  157. /*
  158. * Flags in backing_dev_info::capability
  159. *
  160. * The first three flags control whether dirty pages will contribute to the
  161. * VM's accounting and whether writepages() should be called for dirty pages
  162. * (something that would not, for example, be appropriate for ramfs)
  163. *
  164. * WARNING: these flags are closely related and should not normally be
  165. * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
  166. * three flags into a single convenience macro.
  167. *
  168. * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
  169. * BDI_CAP_NO_WRITEBACK: Don't write pages back
  170. * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
  171. *
  172. * These flags let !MMU mmap() govern direct device mapping vs immediate
  173. * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
  174. *
  175. * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
  176. * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
  177. * BDI_CAP_READ_MAP: Can be mapped for reading
  178. * BDI_CAP_WRITE_MAP: Can be mapped for writing
  179. * BDI_CAP_EXEC_MAP: Can be mapped for execution
  180. *
  181. * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
  182. */
  183. #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
  184. #define BDI_CAP_NO_WRITEBACK 0x00000002
  185. #define BDI_CAP_MAP_COPY 0x00000004
  186. #define BDI_CAP_MAP_DIRECT 0x00000008
  187. #define BDI_CAP_READ_MAP 0x00000010
  188. #define BDI_CAP_WRITE_MAP 0x00000020
  189. #define BDI_CAP_EXEC_MAP 0x00000040
  190. #define BDI_CAP_NO_ACCT_WB 0x00000080
  191. #define BDI_CAP_SWAP_BACKED 0x00000100
  192. #define BDI_CAP_VMFLAGS \
  193. (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
  194. #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
  195. (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
  196. #if defined(VM_MAYREAD) && \
  197. (BDI_CAP_READ_MAP != VM_MAYREAD || \
  198. BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
  199. BDI_CAP_EXEC_MAP != VM_MAYEXEC)
  200. #error please change backing_dev_info::capabilities flags
  201. #endif
  202. extern struct backing_dev_info default_backing_dev_info;
  203. void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
  204. int writeback_in_progress(struct backing_dev_info *bdi);
  205. static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
  206. {
  207. if (bdi->congested_fn)
  208. return bdi->congested_fn(bdi->congested_data, bdi_bits);
  209. return (bdi->state & bdi_bits);
  210. }
  211. static inline int bdi_read_congested(struct backing_dev_info *bdi)
  212. {
  213. return bdi_congested(bdi, 1 << BDI_sync_congested);
  214. }
  215. static inline int bdi_write_congested(struct backing_dev_info *bdi)
  216. {
  217. return bdi_congested(bdi, 1 << BDI_async_congested);
  218. }
  219. static inline int bdi_rw_congested(struct backing_dev_info *bdi)
  220. {
  221. return bdi_congested(bdi, (1 << BDI_sync_congested) |
  222. (1 << BDI_async_congested));
  223. }
  224. enum {
  225. BLK_RW_ASYNC = 0,
  226. BLK_RW_SYNC = 1,
  227. };
  228. void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
  229. void set_bdi_congested(struct backing_dev_info *bdi, int sync);
  230. long congestion_wait(int sync, long timeout);
  231. static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
  232. {
  233. return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
  234. }
  235. static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
  236. {
  237. return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
  238. }
  239. static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
  240. {
  241. /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
  242. return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
  243. BDI_CAP_NO_WRITEBACK));
  244. }
  245. static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
  246. {
  247. return bdi->capabilities & BDI_CAP_SWAP_BACKED;
  248. }
  249. static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
  250. {
  251. return bdi == &default_backing_dev_info;
  252. }
  253. static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
  254. {
  255. return bdi_cap_writeback_dirty(mapping->backing_dev_info);
  256. }
  257. static inline bool mapping_cap_account_dirty(struct address_space *mapping)
  258. {
  259. return bdi_cap_account_dirty(mapping->backing_dev_info);
  260. }
  261. static inline bool mapping_cap_swap_backed(struct address_space *mapping)
  262. {
  263. return bdi_cap_swap_backed(mapping->backing_dev_info);
  264. }
  265. static inline int bdi_sched_wait(void *word)
  266. {
  267. schedule();
  268. return 0;
  269. }
  270. #endif /* _LINUX_BACKING_DEV_H */