f2fs.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. /*
  2. * fs/f2fs/f2fs.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _LINUX_F2FS_H
  12. #define _LINUX_F2FS_H
  13. #include <linux/types.h>
  14. #include <linux/page-flags.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/slab.h>
  17. #include <linux/crc32.h>
  18. #include <linux/magic.h>
  19. /*
  20. * For mount options
  21. */
  22. #define F2FS_MOUNT_BG_GC 0x00000001
  23. #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  24. #define F2FS_MOUNT_DISCARD 0x00000004
  25. #define F2FS_MOUNT_NOHEAP 0x00000008
  26. #define F2FS_MOUNT_XATTR_USER 0x00000010
  27. #define F2FS_MOUNT_POSIX_ACL 0x00000020
  28. #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  29. #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  30. #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  31. #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
  32. #define ver_after(a, b) (typecheck(unsigned long long, a) && \
  33. typecheck(unsigned long long, b) && \
  34. ((long long)((a) - (b)) > 0))
  35. typedef u32 block_t; /*
  36. * should not change u32, since it is the on-disk block
  37. * address format, __le32.
  38. */
  39. typedef u32 nid_t;
  40. struct f2fs_mount_info {
  41. unsigned int opt;
  42. };
  43. #define CRCPOLY_LE 0xedb88320
  44. static inline __u32 f2fs_crc32(void *buf, size_t len)
  45. {
  46. unsigned char *p = (unsigned char *)buf;
  47. __u32 crc = F2FS_SUPER_MAGIC;
  48. int i;
  49. while (len--) {
  50. crc ^= *p++;
  51. for (i = 0; i < 8; i++)
  52. crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
  53. }
  54. return crc;
  55. }
  56. static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
  57. {
  58. return f2fs_crc32(buf, buf_size) == blk_crc;
  59. }
  60. /*
  61. * For checkpoint manager
  62. */
  63. enum {
  64. NAT_BITMAP,
  65. SIT_BITMAP
  66. };
  67. /* for the list of orphan inodes */
  68. struct orphan_inode_entry {
  69. struct list_head list; /* list head */
  70. nid_t ino; /* inode number */
  71. };
  72. /* for the list of directory inodes */
  73. struct dir_inode_entry {
  74. struct list_head list; /* list head */
  75. struct inode *inode; /* vfs inode pointer */
  76. };
  77. /* for the list of fsync inodes, used only during recovery */
  78. struct fsync_inode_entry {
  79. struct list_head list; /* list head */
  80. struct inode *inode; /* vfs inode pointer */
  81. block_t blkaddr; /* block address locating the last inode */
  82. };
  83. #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
  84. #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
  85. #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
  86. #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
  87. #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
  88. #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
  89. static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
  90. {
  91. int before = nats_in_cursum(rs);
  92. rs->n_nats = cpu_to_le16(before + i);
  93. return before;
  94. }
  95. static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
  96. {
  97. int before = sits_in_cursum(rs);
  98. rs->n_sits = cpu_to_le16(before + i);
  99. return before;
  100. }
  101. /*
  102. * ioctl commands
  103. */
  104. #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
  105. #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
  106. #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
  107. /*
  108. * ioctl commands in 32 bit emulation
  109. */
  110. #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
  111. #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
  112. #endif
  113. /*
  114. * For INODE and NODE manager
  115. */
  116. #define XATTR_NODE_OFFSET (-1) /*
  117. * store xattrs to one node block per
  118. * file keeping -1 as its node offset to
  119. * distinguish from index node blocks.
  120. */
  121. enum {
  122. ALLOC_NODE, /* allocate a new node page if needed */
  123. LOOKUP_NODE, /* look up a node without readahead */
  124. LOOKUP_NODE_RA, /*
  125. * look up a node with readahead called
  126. * by get_datablock_ro.
  127. */
  128. };
  129. #define F2FS_LINK_MAX 32000 /* maximum link count per file */
  130. /* for in-memory extent cache entry */
  131. struct extent_info {
  132. rwlock_t ext_lock; /* rwlock for consistency */
  133. unsigned int fofs; /* start offset in a file */
  134. u32 blk_addr; /* start block address of the extent */
  135. unsigned int len; /* length of the extent */
  136. };
  137. /*
  138. * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  139. */
  140. #define FADVISE_COLD_BIT 0x01
  141. #define FADVISE_LOST_PINO_BIT 0x02
  142. struct f2fs_inode_info {
  143. struct inode vfs_inode; /* serve a vfs inode */
  144. unsigned long i_flags; /* keep an inode flags for ioctl */
  145. unsigned char i_advise; /* use to give file attribute hints */
  146. unsigned int i_current_depth; /* use only in directory structure */
  147. unsigned int i_pino; /* parent inode number */
  148. umode_t i_acl_mode; /* keep file acl mode temporarily */
  149. /* Use below internally in f2fs*/
  150. unsigned long flags; /* use to pass per-file flags */
  151. atomic_t dirty_dents; /* # of dirty dentry pages */
  152. f2fs_hash_t chash; /* hash value of given file name */
  153. unsigned int clevel; /* maximum level of given file name */
  154. nid_t i_xattr_nid; /* node id that contains xattrs */
  155. struct extent_info ext; /* in-memory extent cache entry */
  156. };
  157. static inline void get_extent_info(struct extent_info *ext,
  158. struct f2fs_extent i_ext)
  159. {
  160. write_lock(&ext->ext_lock);
  161. ext->fofs = le32_to_cpu(i_ext.fofs);
  162. ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
  163. ext->len = le32_to_cpu(i_ext.len);
  164. write_unlock(&ext->ext_lock);
  165. }
  166. static inline void set_raw_extent(struct extent_info *ext,
  167. struct f2fs_extent *i_ext)
  168. {
  169. read_lock(&ext->ext_lock);
  170. i_ext->fofs = cpu_to_le32(ext->fofs);
  171. i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
  172. i_ext->len = cpu_to_le32(ext->len);
  173. read_unlock(&ext->ext_lock);
  174. }
  175. struct f2fs_nm_info {
  176. block_t nat_blkaddr; /* base disk address of NAT */
  177. nid_t max_nid; /* maximum possible node ids */
  178. nid_t next_scan_nid; /* the next nid to be scanned */
  179. /* NAT cache management */
  180. struct radix_tree_root nat_root;/* root of the nat entry cache */
  181. rwlock_t nat_tree_lock; /* protect nat_tree_lock */
  182. unsigned int nat_cnt; /* the # of cached nat entries */
  183. struct list_head nat_entries; /* cached nat entry list (clean) */
  184. struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
  185. /* free node ids management */
  186. struct list_head free_nid_list; /* a list for free nids */
  187. spinlock_t free_nid_list_lock; /* protect free nid list */
  188. unsigned int fcnt; /* the number of free node id */
  189. struct mutex build_lock; /* lock for build free nids */
  190. /* for checkpoint */
  191. char *nat_bitmap; /* NAT bitmap pointer */
  192. int bitmap_size; /* bitmap size */
  193. };
  194. /*
  195. * this structure is used as one of function parameters.
  196. * all the information are dedicated to a given direct node block determined
  197. * by the data offset in a file.
  198. */
  199. struct dnode_of_data {
  200. struct inode *inode; /* vfs inode pointer */
  201. struct page *inode_page; /* its inode page, NULL is possible */
  202. struct page *node_page; /* cached direct node page */
  203. nid_t nid; /* node id of the direct node block */
  204. unsigned int ofs_in_node; /* data offset in the node page */
  205. bool inode_page_locked; /* inode page is locked or not */
  206. block_t data_blkaddr; /* block address of the node block */
  207. };
  208. static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
  209. struct page *ipage, struct page *npage, nid_t nid)
  210. {
  211. memset(dn, 0, sizeof(*dn));
  212. dn->inode = inode;
  213. dn->inode_page = ipage;
  214. dn->node_page = npage;
  215. dn->nid = nid;
  216. }
  217. /*
  218. * For SIT manager
  219. *
  220. * By default, there are 6 active log areas across the whole main area.
  221. * When considering hot and cold data separation to reduce cleaning overhead,
  222. * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
  223. * respectively.
  224. * In the current design, you should not change the numbers intentionally.
  225. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
  226. * logs individually according to the underlying devices. (default: 6)
  227. * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
  228. * data and 8 for node logs.
  229. */
  230. #define NR_CURSEG_DATA_TYPE (3)
  231. #define NR_CURSEG_NODE_TYPE (3)
  232. #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
  233. enum {
  234. CURSEG_HOT_DATA = 0, /* directory entry blocks */
  235. CURSEG_WARM_DATA, /* data blocks */
  236. CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
  237. CURSEG_HOT_NODE, /* direct node blocks of directory files */
  238. CURSEG_WARM_NODE, /* direct node blocks of normal files */
  239. CURSEG_COLD_NODE, /* indirect node blocks */
  240. NO_CHECK_TYPE
  241. };
  242. struct f2fs_sm_info {
  243. struct sit_info *sit_info; /* whole segment information */
  244. struct free_segmap_info *free_info; /* free segment information */
  245. struct dirty_seglist_info *dirty_info; /* dirty segment information */
  246. struct curseg_info *curseg_array; /* active segment information */
  247. struct list_head wblist_head; /* list of under-writeback pages */
  248. spinlock_t wblist_lock; /* lock for checkpoint */
  249. block_t seg0_blkaddr; /* block address of 0'th segment */
  250. block_t main_blkaddr; /* start block address of main area */
  251. block_t ssa_blkaddr; /* start block address of SSA area */
  252. unsigned int segment_count; /* total # of segments */
  253. unsigned int main_segments; /* # of segments in main area */
  254. unsigned int reserved_segments; /* # of reserved segments */
  255. unsigned int ovp_segments; /* # of overprovision segments */
  256. };
  257. /*
  258. * For directory operation
  259. */
  260. #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
  261. #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
  262. #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
  263. #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
  264. #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
  265. /*
  266. * For superblock
  267. */
  268. /*
  269. * COUNT_TYPE for monitoring
  270. *
  271. * f2fs monitors the number of several block types such as on-writeback,
  272. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  273. */
  274. enum count_type {
  275. F2FS_WRITEBACK,
  276. F2FS_DIRTY_DENTS,
  277. F2FS_DIRTY_NODES,
  278. F2FS_DIRTY_META,
  279. NR_COUNT_TYPE,
  280. };
  281. /*
  282. * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
  283. * The checkpoint procedure blocks all the locks in this fs_lock array.
  284. * Some FS operations grab free locks, and if there is no free lock,
  285. * then wait to grab a lock in a round-robin manner.
  286. */
  287. #define NR_GLOBAL_LOCKS 8
  288. /*
  289. * The below are the page types of bios used in submti_bio().
  290. * The available types are:
  291. * DATA User data pages. It operates as async mode.
  292. * NODE Node pages. It operates as async mode.
  293. * META FS metadata pages such as SIT, NAT, CP.
  294. * NR_PAGE_TYPE The number of page types.
  295. * META_FLUSH Make sure the previous pages are written
  296. * with waiting the bio's completion
  297. * ... Only can be used with META.
  298. */
  299. enum page_type {
  300. DATA,
  301. NODE,
  302. META,
  303. NR_PAGE_TYPE,
  304. META_FLUSH,
  305. };
  306. struct f2fs_sb_info {
  307. struct super_block *sb; /* pointer to VFS super block */
  308. struct proc_dir_entry *s_proc; /* proc entry */
  309. struct buffer_head *raw_super_buf; /* buffer head of raw sb */
  310. struct f2fs_super_block *raw_super; /* raw super block pointer */
  311. int s_dirty; /* dirty flag for checkpoint */
  312. /* for node-related operations */
  313. struct f2fs_nm_info *nm_info; /* node manager */
  314. struct inode *node_inode; /* cache node blocks */
  315. /* for segment-related operations */
  316. struct f2fs_sm_info *sm_info; /* segment manager */
  317. struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
  318. sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
  319. struct rw_semaphore bio_sem; /* IO semaphore */
  320. /* for checkpoint */
  321. struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
  322. struct inode *meta_inode; /* cache meta blocks */
  323. struct mutex cp_mutex; /* checkpoint procedure lock */
  324. struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */
  325. struct mutex node_write; /* locking node writes */
  326. struct mutex writepages; /* mutex for writepages() */
  327. unsigned char next_lock_num; /* round-robin global locks */
  328. int por_doing; /* recovery is doing or not */
  329. int on_build_free_nids; /* build_free_nids is doing */
  330. /* for orphan inode management */
  331. struct list_head orphan_inode_list; /* orphan inode list */
  332. struct mutex orphan_inode_mutex; /* for orphan inode list */
  333. unsigned int n_orphans; /* # of orphan inodes */
  334. /* for directory inode management */
  335. struct list_head dir_inode_list; /* dir inode list */
  336. spinlock_t dir_inode_lock; /* for dir inode list lock */
  337. /* basic file system units */
  338. unsigned int log_sectors_per_block; /* log2 sectors per block */
  339. unsigned int log_blocksize; /* log2 block size */
  340. unsigned int blocksize; /* block size */
  341. unsigned int root_ino_num; /* root inode number*/
  342. unsigned int node_ino_num; /* node inode number*/
  343. unsigned int meta_ino_num; /* meta inode number*/
  344. unsigned int log_blocks_per_seg; /* log2 blocks per segment */
  345. unsigned int blocks_per_seg; /* blocks per segment */
  346. unsigned int segs_per_sec; /* segments per section */
  347. unsigned int secs_per_zone; /* sections per zone */
  348. unsigned int total_sections; /* total section count */
  349. unsigned int total_node_count; /* total node block count */
  350. unsigned int total_valid_node_count; /* valid node block count */
  351. unsigned int total_valid_inode_count; /* valid inode count */
  352. int active_logs; /* # of active logs */
  353. block_t user_block_count; /* # of user blocks */
  354. block_t total_valid_block_count; /* # of valid blocks */
  355. block_t alloc_valid_block_count; /* # of allocated blocks */
  356. block_t last_valid_block_count; /* for recovery */
  357. u32 s_next_generation; /* for NFS support */
  358. atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
  359. struct f2fs_mount_info mount_opt; /* mount options */
  360. /* for cleaning operations */
  361. struct mutex gc_mutex; /* mutex for GC */
  362. struct f2fs_gc_kthread *gc_thread; /* GC thread */
  363. unsigned int cur_victim_sec; /* current victim section num */
  364. /*
  365. * for stat information.
  366. * one is for the LFS mode, and the other is for the SSR mode.
  367. */
  368. #ifdef CONFIG_F2FS_STAT_FS
  369. struct f2fs_stat_info *stat_info; /* FS status information */
  370. unsigned int segment_count[2]; /* # of allocated segments */
  371. unsigned int block_count[2]; /* # of allocated blocks */
  372. int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
  373. int bg_gc; /* background gc calls */
  374. unsigned int n_dirty_dirs; /* # of dir inodes */
  375. #endif
  376. unsigned int last_victim[2]; /* last victim segment # */
  377. spinlock_t stat_lock; /* lock for stat operations */
  378. };
  379. /*
  380. * Inline functions
  381. */
  382. static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
  383. {
  384. return container_of(inode, struct f2fs_inode_info, vfs_inode);
  385. }
  386. static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
  387. {
  388. return sb->s_fs_info;
  389. }
  390. static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
  391. {
  392. return (struct f2fs_super_block *)(sbi->raw_super);
  393. }
  394. static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
  395. {
  396. return (struct f2fs_checkpoint *)(sbi->ckpt);
  397. }
  398. static inline struct f2fs_node *F2FS_NODE(struct page *page)
  399. {
  400. return (struct f2fs_node *)page_address(page);
  401. }
  402. static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
  403. {
  404. return (struct f2fs_nm_info *)(sbi->nm_info);
  405. }
  406. static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
  407. {
  408. return (struct f2fs_sm_info *)(sbi->sm_info);
  409. }
  410. static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
  411. {
  412. return (struct sit_info *)(SM_I(sbi)->sit_info);
  413. }
  414. static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
  415. {
  416. return (struct free_segmap_info *)(SM_I(sbi)->free_info);
  417. }
  418. static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
  419. {
  420. return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
  421. }
  422. static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
  423. {
  424. sbi->s_dirty = 1;
  425. }
  426. static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
  427. {
  428. sbi->s_dirty = 0;
  429. }
  430. static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  431. {
  432. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  433. return ckpt_flags & f;
  434. }
  435. static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  436. {
  437. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  438. ckpt_flags |= f;
  439. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  440. }
  441. static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  442. {
  443. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  444. ckpt_flags &= (~f);
  445. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  446. }
  447. static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
  448. {
  449. int i;
  450. for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
  451. /*
  452. * This is the only time we take multiple fs_lock[]
  453. * instances; the order is immaterial since we
  454. * always hold cp_mutex, which serializes multiple
  455. * such operations.
  456. */
  457. mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
  458. }
  459. }
  460. static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
  461. {
  462. int i = 0;
  463. for (; i < NR_GLOBAL_LOCKS; i++)
  464. mutex_unlock(&sbi->fs_lock[i]);
  465. }
  466. static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
  467. {
  468. unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
  469. int i = 0;
  470. for (; i < NR_GLOBAL_LOCKS; i++)
  471. if (mutex_trylock(&sbi->fs_lock[i]))
  472. return i;
  473. mutex_lock(&sbi->fs_lock[next_lock]);
  474. sbi->next_lock_num++;
  475. return next_lock;
  476. }
  477. static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
  478. {
  479. if (ilock < 0)
  480. return;
  481. BUG_ON(ilock >= NR_GLOBAL_LOCKS);
  482. mutex_unlock(&sbi->fs_lock[ilock]);
  483. }
  484. /*
  485. * Check whether the given nid is within node id range.
  486. */
  487. static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
  488. {
  489. WARN_ON((nid >= NM_I(sbi)->max_nid));
  490. if (nid >= NM_I(sbi)->max_nid)
  491. return -EINVAL;
  492. return 0;
  493. }
  494. #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
  495. /*
  496. * Check whether the inode has blocks or not
  497. */
  498. static inline int F2FS_HAS_BLOCKS(struct inode *inode)
  499. {
  500. if (F2FS_I(inode)->i_xattr_nid)
  501. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
  502. else
  503. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
  504. }
  505. static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
  506. struct inode *inode, blkcnt_t count)
  507. {
  508. block_t valid_block_count;
  509. spin_lock(&sbi->stat_lock);
  510. valid_block_count =
  511. sbi->total_valid_block_count + (block_t)count;
  512. if (valid_block_count > sbi->user_block_count) {
  513. spin_unlock(&sbi->stat_lock);
  514. return false;
  515. }
  516. inode->i_blocks += count;
  517. sbi->total_valid_block_count = valid_block_count;
  518. sbi->alloc_valid_block_count += (block_t)count;
  519. spin_unlock(&sbi->stat_lock);
  520. return true;
  521. }
  522. static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
  523. struct inode *inode,
  524. blkcnt_t count)
  525. {
  526. spin_lock(&sbi->stat_lock);
  527. BUG_ON(sbi->total_valid_block_count < (block_t) count);
  528. BUG_ON(inode->i_blocks < count);
  529. inode->i_blocks -= count;
  530. sbi->total_valid_block_count -= (block_t)count;
  531. spin_unlock(&sbi->stat_lock);
  532. return 0;
  533. }
  534. static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
  535. {
  536. atomic_inc(&sbi->nr_pages[count_type]);
  537. F2FS_SET_SB_DIRT(sbi);
  538. }
  539. static inline void inode_inc_dirty_dents(struct inode *inode)
  540. {
  541. atomic_inc(&F2FS_I(inode)->dirty_dents);
  542. }
  543. static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
  544. {
  545. atomic_dec(&sbi->nr_pages[count_type]);
  546. }
  547. static inline void inode_dec_dirty_dents(struct inode *inode)
  548. {
  549. atomic_dec(&F2FS_I(inode)->dirty_dents);
  550. }
  551. static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
  552. {
  553. return atomic_read(&sbi->nr_pages[count_type]);
  554. }
  555. static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
  556. {
  557. unsigned int pages_per_sec = sbi->segs_per_sec *
  558. (1 << sbi->log_blocks_per_seg);
  559. return ((get_pages(sbi, block_type) + pages_per_sec - 1)
  560. >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
  561. }
  562. static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
  563. {
  564. block_t ret;
  565. spin_lock(&sbi->stat_lock);
  566. ret = sbi->total_valid_block_count;
  567. spin_unlock(&sbi->stat_lock);
  568. return ret;
  569. }
  570. static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
  571. {
  572. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  573. /* return NAT or SIT bitmap */
  574. if (flag == NAT_BITMAP)
  575. return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  576. else if (flag == SIT_BITMAP)
  577. return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  578. return 0;
  579. }
  580. static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
  581. {
  582. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  583. int offset = (flag == NAT_BITMAP) ?
  584. le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
  585. return &ckpt->sit_nat_version_bitmap + offset;
  586. }
  587. static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
  588. {
  589. block_t start_addr;
  590. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  591. unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
  592. start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  593. /*
  594. * odd numbered checkpoint should at cp segment 0
  595. * and even segent must be at cp segment 1
  596. */
  597. if (!(ckpt_version & 1))
  598. start_addr += sbi->blocks_per_seg;
  599. return start_addr;
  600. }
  601. static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
  602. {
  603. return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
  604. }
  605. static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
  606. struct inode *inode,
  607. unsigned int count)
  608. {
  609. block_t valid_block_count;
  610. unsigned int valid_node_count;
  611. spin_lock(&sbi->stat_lock);
  612. valid_block_count = sbi->total_valid_block_count + (block_t)count;
  613. sbi->alloc_valid_block_count += (block_t)count;
  614. valid_node_count = sbi->total_valid_node_count + count;
  615. if (valid_block_count > sbi->user_block_count) {
  616. spin_unlock(&sbi->stat_lock);
  617. return false;
  618. }
  619. if (valid_node_count > sbi->total_node_count) {
  620. spin_unlock(&sbi->stat_lock);
  621. return false;
  622. }
  623. if (inode)
  624. inode->i_blocks += count;
  625. sbi->total_valid_node_count = valid_node_count;
  626. sbi->total_valid_block_count = valid_block_count;
  627. spin_unlock(&sbi->stat_lock);
  628. return true;
  629. }
  630. static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
  631. struct inode *inode,
  632. unsigned int count)
  633. {
  634. spin_lock(&sbi->stat_lock);
  635. BUG_ON(sbi->total_valid_block_count < count);
  636. BUG_ON(sbi->total_valid_node_count < count);
  637. BUG_ON(inode->i_blocks < count);
  638. inode->i_blocks -= count;
  639. sbi->total_valid_node_count -= count;
  640. sbi->total_valid_block_count -= (block_t)count;
  641. spin_unlock(&sbi->stat_lock);
  642. }
  643. static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
  644. {
  645. unsigned int ret;
  646. spin_lock(&sbi->stat_lock);
  647. ret = sbi->total_valid_node_count;
  648. spin_unlock(&sbi->stat_lock);
  649. return ret;
  650. }
  651. static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
  652. {
  653. spin_lock(&sbi->stat_lock);
  654. BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
  655. sbi->total_valid_inode_count++;
  656. spin_unlock(&sbi->stat_lock);
  657. }
  658. static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
  659. {
  660. spin_lock(&sbi->stat_lock);
  661. BUG_ON(!sbi->total_valid_inode_count);
  662. sbi->total_valid_inode_count--;
  663. spin_unlock(&sbi->stat_lock);
  664. return 0;
  665. }
  666. static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
  667. {
  668. unsigned int ret;
  669. spin_lock(&sbi->stat_lock);
  670. ret = sbi->total_valid_inode_count;
  671. spin_unlock(&sbi->stat_lock);
  672. return ret;
  673. }
  674. static inline void f2fs_put_page(struct page *page, int unlock)
  675. {
  676. if (!page || IS_ERR(page))
  677. return;
  678. if (unlock) {
  679. BUG_ON(!PageLocked(page));
  680. unlock_page(page);
  681. }
  682. page_cache_release(page);
  683. }
  684. static inline void f2fs_put_dnode(struct dnode_of_data *dn)
  685. {
  686. if (dn->node_page)
  687. f2fs_put_page(dn->node_page, 1);
  688. if (dn->inode_page && dn->node_page != dn->inode_page)
  689. f2fs_put_page(dn->inode_page, 0);
  690. dn->node_page = NULL;
  691. dn->inode_page = NULL;
  692. }
  693. static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
  694. size_t size, void (*ctor)(void *))
  695. {
  696. return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
  697. }
  698. #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
  699. static inline bool IS_INODE(struct page *page)
  700. {
  701. struct f2fs_node *p = F2FS_NODE(page);
  702. return RAW_IS_INODE(p);
  703. }
  704. static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
  705. {
  706. return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
  707. }
  708. static inline block_t datablock_addr(struct page *node_page,
  709. unsigned int offset)
  710. {
  711. struct f2fs_node *raw_node;
  712. __le32 *addr_array;
  713. raw_node = F2FS_NODE(node_page);
  714. addr_array = blkaddr_in_node(raw_node);
  715. return le32_to_cpu(addr_array[offset]);
  716. }
  717. static inline int f2fs_test_bit(unsigned int nr, char *addr)
  718. {
  719. int mask;
  720. addr += (nr >> 3);
  721. mask = 1 << (7 - (nr & 0x07));
  722. return mask & *addr;
  723. }
  724. static inline int f2fs_set_bit(unsigned int nr, char *addr)
  725. {
  726. int mask;
  727. int ret;
  728. addr += (nr >> 3);
  729. mask = 1 << (7 - (nr & 0x07));
  730. ret = mask & *addr;
  731. *addr |= mask;
  732. return ret;
  733. }
  734. static inline int f2fs_clear_bit(unsigned int nr, char *addr)
  735. {
  736. int mask;
  737. int ret;
  738. addr += (nr >> 3);
  739. mask = 1 << (7 - (nr & 0x07));
  740. ret = mask & *addr;
  741. *addr &= ~mask;
  742. return ret;
  743. }
  744. /* used for f2fs_inode_info->flags */
  745. enum {
  746. FI_NEW_INODE, /* indicate newly allocated inode */
  747. FI_DIRTY_INODE, /* indicate inode is dirty or not */
  748. FI_INC_LINK, /* need to increment i_nlink */
  749. FI_ACL_MODE, /* indicate acl mode */
  750. FI_NO_ALLOC, /* should not allocate any blocks */
  751. FI_UPDATE_DIR, /* should update inode block for consistency */
  752. FI_DELAY_IPUT, /* used for the recovery */
  753. };
  754. static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
  755. {
  756. set_bit(flag, &fi->flags);
  757. }
  758. static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
  759. {
  760. return test_bit(flag, &fi->flags);
  761. }
  762. static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  763. {
  764. clear_bit(flag, &fi->flags);
  765. }
  766. static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
  767. {
  768. fi->i_acl_mode = mode;
  769. set_inode_flag(fi, FI_ACL_MODE);
  770. }
  771. static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  772. {
  773. if (is_inode_flag_set(fi, FI_ACL_MODE)) {
  774. clear_inode_flag(fi, FI_ACL_MODE);
  775. return 1;
  776. }
  777. return 0;
  778. }
  779. static inline int f2fs_readonly(struct super_block *sb)
  780. {
  781. return sb->s_flags & MS_RDONLY;
  782. }
  783. /*
  784. * file.c
  785. */
  786. int f2fs_sync_file(struct file *, loff_t, loff_t, int);
  787. void truncate_data_blocks(struct dnode_of_data *);
  788. void f2fs_truncate(struct inode *);
  789. int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
  790. int f2fs_setattr(struct dentry *, struct iattr *);
  791. int truncate_hole(struct inode *, pgoff_t, pgoff_t);
  792. int truncate_data_blocks_range(struct dnode_of_data *, int);
  793. long f2fs_ioctl(struct file *, unsigned int, unsigned long);
  794. long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
  795. /*
  796. * inode.c
  797. */
  798. void f2fs_set_inode_flags(struct inode *);
  799. struct inode *f2fs_iget(struct super_block *, unsigned long);
  800. void update_inode(struct inode *, struct page *);
  801. int update_inode_page(struct inode *);
  802. int f2fs_write_inode(struct inode *, struct writeback_control *);
  803. void f2fs_evict_inode(struct inode *);
  804. /*
  805. * namei.c
  806. */
  807. struct dentry *f2fs_get_parent(struct dentry *child);
  808. /*
  809. * dir.c
  810. */
  811. struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
  812. struct page **);
  813. struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
  814. ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
  815. void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
  816. struct page *, struct inode *);
  817. int update_dent_inode(struct inode *, const struct qstr *);
  818. int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
  819. void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
  820. int f2fs_make_empty(struct inode *, struct inode *);
  821. bool f2fs_empty_dir(struct inode *);
  822. static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
  823. {
  824. return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
  825. inode);
  826. }
  827. /*
  828. * super.c
  829. */
  830. int f2fs_sync_fs(struct super_block *, int);
  831. extern __printf(3, 4)
  832. void f2fs_msg(struct super_block *, const char *, const char *, ...);
  833. /*
  834. * hash.c
  835. */
  836. f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
  837. /*
  838. * node.c
  839. */
  840. struct dnode_of_data;
  841. struct node_info;
  842. int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
  843. void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
  844. int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
  845. int truncate_inode_blocks(struct inode *, pgoff_t);
  846. int remove_inode_page(struct inode *);
  847. struct page *new_inode_page(struct inode *, const struct qstr *);
  848. struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
  849. void ra_node_page(struct f2fs_sb_info *, nid_t);
  850. struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
  851. struct page *get_node_page_ra(struct page *, int);
  852. void sync_inode_page(struct dnode_of_data *);
  853. int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
  854. bool alloc_nid(struct f2fs_sb_info *, nid_t *);
  855. void alloc_nid_done(struct f2fs_sb_info *, nid_t);
  856. void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
  857. void recover_node_page(struct f2fs_sb_info *, struct page *,
  858. struct f2fs_summary *, struct node_info *, block_t);
  859. int recover_inode_page(struct f2fs_sb_info *, struct page *);
  860. int restore_node_summary(struct f2fs_sb_info *, unsigned int,
  861. struct f2fs_summary_block *);
  862. void flush_nat_entries(struct f2fs_sb_info *);
  863. int build_node_manager(struct f2fs_sb_info *);
  864. void destroy_node_manager(struct f2fs_sb_info *);
  865. int __init create_node_manager_caches(void);
  866. void destroy_node_manager_caches(void);
  867. /*
  868. * segment.c
  869. */
  870. void f2fs_balance_fs(struct f2fs_sb_info *);
  871. void invalidate_blocks(struct f2fs_sb_info *, block_t);
  872. void clear_prefree_segments(struct f2fs_sb_info *);
  873. int npages_for_summary_flush(struct f2fs_sb_info *);
  874. void allocate_new_segments(struct f2fs_sb_info *);
  875. struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
  876. struct bio *f2fs_bio_alloc(struct block_device *, int);
  877. void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
  878. void write_meta_page(struct f2fs_sb_info *, struct page *);
  879. void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
  880. block_t, block_t *);
  881. void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
  882. block_t, block_t *);
  883. void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
  884. void recover_data_page(struct f2fs_sb_info *, struct page *,
  885. struct f2fs_summary *, block_t, block_t);
  886. void rewrite_node_page(struct f2fs_sb_info *, struct page *,
  887. struct f2fs_summary *, block_t, block_t);
  888. void write_data_summaries(struct f2fs_sb_info *, block_t);
  889. void write_node_summaries(struct f2fs_sb_info *, block_t);
  890. int lookup_journal_in_cursum(struct f2fs_summary_block *,
  891. int, unsigned int, int);
  892. void flush_sit_entries(struct f2fs_sb_info *);
  893. int build_segment_manager(struct f2fs_sb_info *);
  894. void destroy_segment_manager(struct f2fs_sb_info *);
  895. /*
  896. * checkpoint.c
  897. */
  898. struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
  899. struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
  900. long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
  901. int check_orphan_space(struct f2fs_sb_info *);
  902. void add_orphan_inode(struct f2fs_sb_info *, nid_t);
  903. void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
  904. int recover_orphan_inodes(struct f2fs_sb_info *);
  905. int get_valid_checkpoint(struct f2fs_sb_info *);
  906. void set_dirty_dir_page(struct inode *, struct page *);
  907. void add_dirty_dir_inode(struct inode *);
  908. void remove_dirty_dir_inode(struct inode *);
  909. struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
  910. void sync_dirty_dir_inodes(struct f2fs_sb_info *);
  911. void write_checkpoint(struct f2fs_sb_info *, bool);
  912. void init_orphan_info(struct f2fs_sb_info *);
  913. int __init create_checkpoint_caches(void);
  914. void destroy_checkpoint_caches(void);
  915. /*
  916. * data.c
  917. */
  918. int reserve_new_block(struct dnode_of_data *);
  919. void update_extent_cache(block_t, struct dnode_of_data *);
  920. struct page *find_data_page(struct inode *, pgoff_t, bool);
  921. struct page *get_lock_data_page(struct inode *, pgoff_t);
  922. struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
  923. int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
  924. int do_write_data_page(struct page *);
  925. /*
  926. * gc.c
  927. */
  928. int start_gc_thread(struct f2fs_sb_info *);
  929. void stop_gc_thread(struct f2fs_sb_info *);
  930. block_t start_bidx_of_node(unsigned int);
  931. int f2fs_gc(struct f2fs_sb_info *);
  932. void build_gc_manager(struct f2fs_sb_info *);
  933. int __init create_gc_caches(void);
  934. void destroy_gc_caches(void);
  935. /*
  936. * recovery.c
  937. */
  938. int recover_fsync_data(struct f2fs_sb_info *);
  939. bool space_for_roll_forward(struct f2fs_sb_info *);
  940. /*
  941. * debug.c
  942. */
  943. #ifdef CONFIG_F2FS_STAT_FS
  944. struct f2fs_stat_info {
  945. struct list_head stat_list;
  946. struct f2fs_sb_info *sbi;
  947. struct mutex stat_lock;
  948. int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
  949. int main_area_segs, main_area_sections, main_area_zones;
  950. int hit_ext, total_ext;
  951. int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
  952. int nats, sits, fnids;
  953. int total_count, utilization;
  954. int bg_gc;
  955. unsigned int valid_count, valid_node_count, valid_inode_count;
  956. unsigned int bimodal, avg_vblocks;
  957. int util_free, util_valid, util_invalid;
  958. int rsvd_segs, overp_segs;
  959. int dirty_count, node_pages, meta_pages;
  960. int prefree_count, call_count;
  961. int tot_segs, node_segs, data_segs, free_segs, free_secs;
  962. int tot_blks, data_blks, node_blks;
  963. int curseg[NR_CURSEG_TYPE];
  964. int cursec[NR_CURSEG_TYPE];
  965. int curzone[NR_CURSEG_TYPE];
  966. unsigned int segment_count[2];
  967. unsigned int block_count[2];
  968. unsigned base_mem, cache_mem;
  969. };
  970. static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
  971. {
  972. return (struct f2fs_stat_info*)sbi->stat_info;
  973. }
  974. #define stat_inc_call_count(si) ((si)->call_count++)
  975. #define stat_inc_seg_count(sbi, type) \
  976. do { \
  977. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  978. (si)->tot_segs++; \
  979. if (type == SUM_TYPE_DATA) \
  980. si->data_segs++; \
  981. else \
  982. si->node_segs++; \
  983. } while (0)
  984. #define stat_inc_tot_blk_count(si, blks) \
  985. (si->tot_blks += (blks))
  986. #define stat_inc_data_blk_count(sbi, blks) \
  987. do { \
  988. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  989. stat_inc_tot_blk_count(si, blks); \
  990. si->data_blks += (blks); \
  991. } while (0)
  992. #define stat_inc_node_blk_count(sbi, blks) \
  993. do { \
  994. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  995. stat_inc_tot_blk_count(si, blks); \
  996. si->node_blks += (blks); \
  997. } while (0)
  998. int f2fs_build_stats(struct f2fs_sb_info *);
  999. void f2fs_destroy_stats(struct f2fs_sb_info *);
  1000. void __init f2fs_create_root_stats(void);
  1001. void f2fs_destroy_root_stats(void);
  1002. #else
  1003. #define stat_inc_call_count(si)
  1004. #define stat_inc_seg_count(si, type)
  1005. #define stat_inc_tot_blk_count(si, blks)
  1006. #define stat_inc_data_blk_count(si, blks)
  1007. #define stat_inc_node_blk_count(sbi, blks)
  1008. static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
  1009. static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
  1010. static inline void __init f2fs_create_root_stats(void) { }
  1011. static inline void f2fs_destroy_root_stats(void) { }
  1012. #endif
  1013. extern const struct file_operations f2fs_dir_operations;
  1014. extern const struct file_operations f2fs_file_operations;
  1015. extern const struct inode_operations f2fs_file_inode_operations;
  1016. extern const struct address_space_operations f2fs_dblock_aops;
  1017. extern const struct address_space_operations f2fs_node_aops;
  1018. extern const struct address_space_operations f2fs_meta_aops;
  1019. extern const struct inode_operations f2fs_dir_inode_operations;
  1020. extern const struct inode_operations f2fs_symlink_inode_operations;
  1021. extern const struct inode_operations f2fs_special_inode_operations;
  1022. #endif