f2fs.h 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. /*
  2. * fs/f2fs/f2fs.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _LINUX_F2FS_H
  12. #define _LINUX_F2FS_H
  13. #include <linux/types.h>
  14. #include <linux/page-flags.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/slab.h>
  17. #include <linux/crc32.h>
  18. #include <linux/magic.h>
  19. /*
  20. * For mount options
  21. */
  22. #define F2FS_MOUNT_BG_GC 0x00000001
  23. #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  24. #define F2FS_MOUNT_DISCARD 0x00000004
  25. #define F2FS_MOUNT_NOHEAP 0x00000008
  26. #define F2FS_MOUNT_XATTR_USER 0x00000010
  27. #define F2FS_MOUNT_POSIX_ACL 0x00000020
  28. #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  29. #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  30. #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  31. #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
  32. #define ver_after(a, b) (typecheck(unsigned long long, a) && \
  33. typecheck(unsigned long long, b) && \
  34. ((long long)((a) - (b)) > 0))
  35. typedef u32 block_t; /*
  36. * should not change u32, since it is the on-disk block
  37. * address format, __le32.
  38. */
  39. typedef u32 nid_t;
  40. struct f2fs_mount_info {
  41. unsigned int opt;
  42. };
  43. #define CRCPOLY_LE 0xedb88320
  44. static inline __u32 f2fs_crc32(void *buf, size_t len)
  45. {
  46. unsigned char *p = (unsigned char *)buf;
  47. __u32 crc = F2FS_SUPER_MAGIC;
  48. int i;
  49. while (len--) {
  50. crc ^= *p++;
  51. for (i = 0; i < 8; i++)
  52. crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
  53. }
  54. return crc;
  55. }
  56. static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
  57. {
  58. return f2fs_crc32(buf, buf_size) == blk_crc;
  59. }
  60. /*
  61. * For checkpoint manager
  62. */
  63. enum {
  64. NAT_BITMAP,
  65. SIT_BITMAP
  66. };
  67. /* for the list of orphan inodes */
  68. struct orphan_inode_entry {
  69. struct list_head list; /* list head */
  70. nid_t ino; /* inode number */
  71. };
  72. /* for the list of directory inodes */
  73. struct dir_inode_entry {
  74. struct list_head list; /* list head */
  75. struct inode *inode; /* vfs inode pointer */
  76. };
  77. /* for the list of fsync inodes, used only during recovery */
  78. struct fsync_inode_entry {
  79. struct list_head list; /* list head */
  80. struct inode *inode; /* vfs inode pointer */
  81. block_t blkaddr; /* block address locating the last inode */
  82. };
  83. #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
  84. #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
  85. #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
  86. #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
  87. #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
  88. #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
  89. static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
  90. {
  91. int before = nats_in_cursum(rs);
  92. rs->n_nats = cpu_to_le16(before + i);
  93. return before;
  94. }
  95. static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
  96. {
  97. int before = sits_in_cursum(rs);
  98. rs->n_sits = cpu_to_le16(before + i);
  99. return before;
  100. }
  101. /*
  102. * ioctl commands
  103. */
  104. #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
  105. #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
  106. #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
  107. /*
  108. * ioctl commands in 32 bit emulation
  109. */
  110. #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
  111. #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
  112. #endif
  113. /*
  114. * For INODE and NODE manager
  115. */
  116. #define XATTR_NODE_OFFSET (-1) /*
  117. * store xattrs to one node block per
  118. * file keeping -1 as its node offset to
  119. * distinguish from index node blocks.
  120. */
  121. enum {
  122. ALLOC_NODE, /* allocate a new node page if needed */
  123. LOOKUP_NODE, /* look up a node without readahead */
  124. LOOKUP_NODE_RA, /*
  125. * look up a node with readahead called
  126. * by get_datablock_ro.
  127. */
  128. };
  129. #define F2FS_LINK_MAX 32000 /* maximum link count per file */
  130. /* for in-memory extent cache entry */
  131. struct extent_info {
  132. rwlock_t ext_lock; /* rwlock for consistency */
  133. unsigned int fofs; /* start offset in a file */
  134. u32 blk_addr; /* start block address of the extent */
  135. unsigned int len; /* length of the extent */
  136. };
  137. /*
  138. * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  139. */
  140. #define FADVISE_COLD_BIT 0x01
  141. #define FADVISE_LOST_PINO_BIT 0x02
  142. struct f2fs_inode_info {
  143. struct inode vfs_inode; /* serve a vfs inode */
  144. unsigned long i_flags; /* keep an inode flags for ioctl */
  145. unsigned char i_advise; /* use to give file attribute hints */
  146. unsigned int i_current_depth; /* use only in directory structure */
  147. unsigned int i_pino; /* parent inode number */
  148. umode_t i_acl_mode; /* keep file acl mode temporarily */
  149. /* Use below internally in f2fs*/
  150. unsigned long flags; /* use to pass per-file flags */
  151. atomic_t dirty_dents; /* # of dirty dentry pages */
  152. f2fs_hash_t chash; /* hash value of given file name */
  153. unsigned int clevel; /* maximum level of given file name */
  154. nid_t i_xattr_nid; /* node id that contains xattrs */
  155. struct extent_info ext; /* in-memory extent cache entry */
  156. };
  157. static inline void get_extent_info(struct extent_info *ext,
  158. struct f2fs_extent i_ext)
  159. {
  160. write_lock(&ext->ext_lock);
  161. ext->fofs = le32_to_cpu(i_ext.fofs);
  162. ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
  163. ext->len = le32_to_cpu(i_ext.len);
  164. write_unlock(&ext->ext_lock);
  165. }
  166. static inline void set_raw_extent(struct extent_info *ext,
  167. struct f2fs_extent *i_ext)
  168. {
  169. read_lock(&ext->ext_lock);
  170. i_ext->fofs = cpu_to_le32(ext->fofs);
  171. i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
  172. i_ext->len = cpu_to_le32(ext->len);
  173. read_unlock(&ext->ext_lock);
  174. }
  175. struct f2fs_nm_info {
  176. block_t nat_blkaddr; /* base disk address of NAT */
  177. nid_t max_nid; /* maximum possible node ids */
  178. nid_t next_scan_nid; /* the next nid to be scanned */
  179. /* NAT cache management */
  180. struct radix_tree_root nat_root;/* root of the nat entry cache */
  181. rwlock_t nat_tree_lock; /* protect nat_tree_lock */
  182. unsigned int nat_cnt; /* the # of cached nat entries */
  183. struct list_head nat_entries; /* cached nat entry list (clean) */
  184. struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
  185. /* free node ids management */
  186. struct list_head free_nid_list; /* a list for free nids */
  187. spinlock_t free_nid_list_lock; /* protect free nid list */
  188. unsigned int fcnt; /* the number of free node id */
  189. struct mutex build_lock; /* lock for build free nids */
  190. /* for checkpoint */
  191. char *nat_bitmap; /* NAT bitmap pointer */
  192. int bitmap_size; /* bitmap size */
  193. };
  194. /*
  195. * this structure is used as one of function parameters.
  196. * all the information are dedicated to a given direct node block determined
  197. * by the data offset in a file.
  198. */
  199. struct dnode_of_data {
  200. struct inode *inode; /* vfs inode pointer */
  201. struct page *inode_page; /* its inode page, NULL is possible */
  202. struct page *node_page; /* cached direct node page */
  203. nid_t nid; /* node id of the direct node block */
  204. unsigned int ofs_in_node; /* data offset in the node page */
  205. bool inode_page_locked; /* inode page is locked or not */
  206. block_t data_blkaddr; /* block address of the node block */
  207. };
  208. static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
  209. struct page *ipage, struct page *npage, nid_t nid)
  210. {
  211. memset(dn, 0, sizeof(*dn));
  212. dn->inode = inode;
  213. dn->inode_page = ipage;
  214. dn->node_page = npage;
  215. dn->nid = nid;
  216. }
  217. /*
  218. * For SIT manager
  219. *
  220. * By default, there are 6 active log areas across the whole main area.
  221. * When considering hot and cold data separation to reduce cleaning overhead,
  222. * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
  223. * respectively.
  224. * In the current design, you should not change the numbers intentionally.
  225. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
  226. * logs individually according to the underlying devices. (default: 6)
  227. * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
  228. * data and 8 for node logs.
  229. */
  230. #define NR_CURSEG_DATA_TYPE (3)
  231. #define NR_CURSEG_NODE_TYPE (3)
  232. #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
  233. enum {
  234. CURSEG_HOT_DATA = 0, /* directory entry blocks */
  235. CURSEG_WARM_DATA, /* data blocks */
  236. CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
  237. CURSEG_HOT_NODE, /* direct node blocks of directory files */
  238. CURSEG_WARM_NODE, /* direct node blocks of normal files */
  239. CURSEG_COLD_NODE, /* indirect node blocks */
  240. NO_CHECK_TYPE
  241. };
  242. struct f2fs_sm_info {
  243. struct sit_info *sit_info; /* whole segment information */
  244. struct free_segmap_info *free_info; /* free segment information */
  245. struct dirty_seglist_info *dirty_info; /* dirty segment information */
  246. struct curseg_info *curseg_array; /* active segment information */
  247. struct list_head wblist_head; /* list of under-writeback pages */
  248. spinlock_t wblist_lock; /* lock for checkpoint */
  249. block_t seg0_blkaddr; /* block address of 0'th segment */
  250. block_t main_blkaddr; /* start block address of main area */
  251. block_t ssa_blkaddr; /* start block address of SSA area */
  252. unsigned int segment_count; /* total # of segments */
  253. unsigned int main_segments; /* # of segments in main area */
  254. unsigned int reserved_segments; /* # of reserved segments */
  255. unsigned int ovp_segments; /* # of overprovision segments */
  256. };
  257. /*
  258. * For directory operation
  259. */
  260. #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
  261. #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
  262. #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
  263. #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
  264. #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
  265. /*
  266. * For superblock
  267. */
  268. /*
  269. * COUNT_TYPE for monitoring
  270. *
  271. * f2fs monitors the number of several block types such as on-writeback,
  272. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  273. */
  274. enum count_type {
  275. F2FS_WRITEBACK,
  276. F2FS_DIRTY_DENTS,
  277. F2FS_DIRTY_NODES,
  278. F2FS_DIRTY_META,
  279. NR_COUNT_TYPE,
  280. };
  281. /*
  282. * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
  283. * The checkpoint procedure blocks all the locks in this fs_lock array.
  284. * Some FS operations grab free locks, and if there is no free lock,
  285. * then wait to grab a lock in a round-robin manner.
  286. */
  287. #define NR_GLOBAL_LOCKS 8
  288. /*
  289. * The below are the page types of bios used in submti_bio().
  290. * The available types are:
  291. * DATA User data pages. It operates as async mode.
  292. * NODE Node pages. It operates as async mode.
  293. * META FS metadata pages such as SIT, NAT, CP.
  294. * NR_PAGE_TYPE The number of page types.
  295. * META_FLUSH Make sure the previous pages are written
  296. * with waiting the bio's completion
  297. * ... Only can be used with META.
  298. */
  299. enum page_type {
  300. DATA,
  301. NODE,
  302. META,
  303. NR_PAGE_TYPE,
  304. META_FLUSH,
  305. };
  306. struct f2fs_sb_info {
  307. struct super_block *sb; /* pointer to VFS super block */
  308. struct buffer_head *raw_super_buf; /* buffer head of raw sb */
  309. struct f2fs_super_block *raw_super; /* raw super block pointer */
  310. int s_dirty; /* dirty flag for checkpoint */
  311. /* for node-related operations */
  312. struct f2fs_nm_info *nm_info; /* node manager */
  313. struct inode *node_inode; /* cache node blocks */
  314. /* for segment-related operations */
  315. struct f2fs_sm_info *sm_info; /* segment manager */
  316. struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
  317. sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
  318. struct rw_semaphore bio_sem; /* IO semaphore */
  319. /* for checkpoint */
  320. struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
  321. struct inode *meta_inode; /* cache meta blocks */
  322. struct mutex cp_mutex; /* checkpoint procedure lock */
  323. struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */
  324. struct mutex node_write; /* locking node writes */
  325. struct mutex writepages; /* mutex for writepages() */
  326. unsigned char next_lock_num; /* round-robin global locks */
  327. int por_doing; /* recovery is doing or not */
  328. int on_build_free_nids; /* build_free_nids is doing */
  329. /* for orphan inode management */
  330. struct list_head orphan_inode_list; /* orphan inode list */
  331. struct mutex orphan_inode_mutex; /* for orphan inode list */
  332. unsigned int n_orphans; /* # of orphan inodes */
  333. /* for directory inode management */
  334. struct list_head dir_inode_list; /* dir inode list */
  335. spinlock_t dir_inode_lock; /* for dir inode list lock */
  336. /* basic file system units */
  337. unsigned int log_sectors_per_block; /* log2 sectors per block */
  338. unsigned int log_blocksize; /* log2 block size */
  339. unsigned int blocksize; /* block size */
  340. unsigned int root_ino_num; /* root inode number*/
  341. unsigned int node_ino_num; /* node inode number*/
  342. unsigned int meta_ino_num; /* meta inode number*/
  343. unsigned int log_blocks_per_seg; /* log2 blocks per segment */
  344. unsigned int blocks_per_seg; /* blocks per segment */
  345. unsigned int segs_per_sec; /* segments per section */
  346. unsigned int secs_per_zone; /* sections per zone */
  347. unsigned int total_sections; /* total section count */
  348. unsigned int total_node_count; /* total node block count */
  349. unsigned int total_valid_node_count; /* valid node block count */
  350. unsigned int total_valid_inode_count; /* valid inode count */
  351. int active_logs; /* # of active logs */
  352. block_t user_block_count; /* # of user blocks */
  353. block_t total_valid_block_count; /* # of valid blocks */
  354. block_t alloc_valid_block_count; /* # of allocated blocks */
  355. block_t last_valid_block_count; /* for recovery */
  356. u32 s_next_generation; /* for NFS support */
  357. atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
  358. struct f2fs_mount_info mount_opt; /* mount options */
  359. /* for cleaning operations */
  360. struct mutex gc_mutex; /* mutex for GC */
  361. struct f2fs_gc_kthread *gc_thread; /* GC thread */
  362. unsigned int cur_victim_sec; /* current victim section num */
  363. /*
  364. * for stat information.
  365. * one is for the LFS mode, and the other is for the SSR mode.
  366. */
  367. #ifdef CONFIG_F2FS_STAT_FS
  368. struct f2fs_stat_info *stat_info; /* FS status information */
  369. unsigned int segment_count[2]; /* # of allocated segments */
  370. unsigned int block_count[2]; /* # of allocated blocks */
  371. int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
  372. int bg_gc; /* background gc calls */
  373. unsigned int n_dirty_dirs; /* # of dir inodes */
  374. #endif
  375. unsigned int last_victim[2]; /* last victim segment # */
  376. spinlock_t stat_lock; /* lock for stat operations */
  377. };
  378. /*
  379. * Inline functions
  380. */
  381. static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
  382. {
  383. return container_of(inode, struct f2fs_inode_info, vfs_inode);
  384. }
  385. static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
  386. {
  387. return sb->s_fs_info;
  388. }
  389. static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
  390. {
  391. return (struct f2fs_super_block *)(sbi->raw_super);
  392. }
  393. static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
  394. {
  395. return (struct f2fs_checkpoint *)(sbi->ckpt);
  396. }
  397. static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
  398. {
  399. return (struct f2fs_nm_info *)(sbi->nm_info);
  400. }
  401. static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
  402. {
  403. return (struct f2fs_sm_info *)(sbi->sm_info);
  404. }
  405. static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
  406. {
  407. return (struct sit_info *)(SM_I(sbi)->sit_info);
  408. }
  409. static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
  410. {
  411. return (struct free_segmap_info *)(SM_I(sbi)->free_info);
  412. }
  413. static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
  414. {
  415. return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
  416. }
  417. static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
  418. {
  419. sbi->s_dirty = 1;
  420. }
  421. static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
  422. {
  423. sbi->s_dirty = 0;
  424. }
  425. static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  426. {
  427. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  428. return ckpt_flags & f;
  429. }
  430. static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  431. {
  432. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  433. ckpt_flags |= f;
  434. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  435. }
  436. static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  437. {
  438. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  439. ckpt_flags &= (~f);
  440. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  441. }
  442. static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
  443. {
  444. int i;
  445. for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
  446. /*
  447. * This is the only time we take multiple fs_lock[]
  448. * instances; the order is immaterial since we
  449. * always hold cp_mutex, which serializes multiple
  450. * such operations.
  451. */
  452. mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
  453. }
  454. }
  455. static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
  456. {
  457. int i = 0;
  458. for (; i < NR_GLOBAL_LOCKS; i++)
  459. mutex_unlock(&sbi->fs_lock[i]);
  460. }
  461. static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
  462. {
  463. unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
  464. int i = 0;
  465. for (; i < NR_GLOBAL_LOCKS; i++)
  466. if (mutex_trylock(&sbi->fs_lock[i]))
  467. return i;
  468. mutex_lock(&sbi->fs_lock[next_lock]);
  469. sbi->next_lock_num++;
  470. return next_lock;
  471. }
  472. static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
  473. {
  474. if (ilock < 0)
  475. return;
  476. BUG_ON(ilock >= NR_GLOBAL_LOCKS);
  477. mutex_unlock(&sbi->fs_lock[ilock]);
  478. }
  479. /*
  480. * Check whether the given nid is within node id range.
  481. */
  482. static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
  483. {
  484. WARN_ON((nid >= NM_I(sbi)->max_nid));
  485. if (nid >= NM_I(sbi)->max_nid)
  486. return -EINVAL;
  487. return 0;
  488. }
  489. #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
  490. /*
  491. * Check whether the inode has blocks or not
  492. */
  493. static inline int F2FS_HAS_BLOCKS(struct inode *inode)
  494. {
  495. if (F2FS_I(inode)->i_xattr_nid)
  496. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
  497. else
  498. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
  499. }
  500. static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
  501. struct inode *inode, blkcnt_t count)
  502. {
  503. block_t valid_block_count;
  504. spin_lock(&sbi->stat_lock);
  505. valid_block_count =
  506. sbi->total_valid_block_count + (block_t)count;
  507. if (valid_block_count > sbi->user_block_count) {
  508. spin_unlock(&sbi->stat_lock);
  509. return false;
  510. }
  511. inode->i_blocks += count;
  512. sbi->total_valid_block_count = valid_block_count;
  513. sbi->alloc_valid_block_count += (block_t)count;
  514. spin_unlock(&sbi->stat_lock);
  515. return true;
  516. }
  517. static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
  518. struct inode *inode,
  519. blkcnt_t count)
  520. {
  521. spin_lock(&sbi->stat_lock);
  522. BUG_ON(sbi->total_valid_block_count < (block_t) count);
  523. BUG_ON(inode->i_blocks < count);
  524. inode->i_blocks -= count;
  525. sbi->total_valid_block_count -= (block_t)count;
  526. spin_unlock(&sbi->stat_lock);
  527. return 0;
  528. }
  529. static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
  530. {
  531. atomic_inc(&sbi->nr_pages[count_type]);
  532. F2FS_SET_SB_DIRT(sbi);
  533. }
  534. static inline void inode_inc_dirty_dents(struct inode *inode)
  535. {
  536. atomic_inc(&F2FS_I(inode)->dirty_dents);
  537. }
  538. static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
  539. {
  540. atomic_dec(&sbi->nr_pages[count_type]);
  541. }
  542. static inline void inode_dec_dirty_dents(struct inode *inode)
  543. {
  544. atomic_dec(&F2FS_I(inode)->dirty_dents);
  545. }
  546. static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
  547. {
  548. return atomic_read(&sbi->nr_pages[count_type]);
  549. }
  550. static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
  551. {
  552. unsigned int pages_per_sec = sbi->segs_per_sec *
  553. (1 << sbi->log_blocks_per_seg);
  554. return ((get_pages(sbi, block_type) + pages_per_sec - 1)
  555. >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
  556. }
  557. static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
  558. {
  559. block_t ret;
  560. spin_lock(&sbi->stat_lock);
  561. ret = sbi->total_valid_block_count;
  562. spin_unlock(&sbi->stat_lock);
  563. return ret;
  564. }
  565. static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
  566. {
  567. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  568. /* return NAT or SIT bitmap */
  569. if (flag == NAT_BITMAP)
  570. return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  571. else if (flag == SIT_BITMAP)
  572. return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  573. return 0;
  574. }
  575. static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
  576. {
  577. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  578. int offset = (flag == NAT_BITMAP) ?
  579. le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
  580. return &ckpt->sit_nat_version_bitmap + offset;
  581. }
  582. static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
  583. {
  584. block_t start_addr;
  585. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  586. unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
  587. start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  588. /*
  589. * odd numbered checkpoint should at cp segment 0
  590. * and even segent must be at cp segment 1
  591. */
  592. if (!(ckpt_version & 1))
  593. start_addr += sbi->blocks_per_seg;
  594. return start_addr;
  595. }
  596. static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
  597. {
  598. return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
  599. }
  600. static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
  601. struct inode *inode,
  602. unsigned int count)
  603. {
  604. block_t valid_block_count;
  605. unsigned int valid_node_count;
  606. spin_lock(&sbi->stat_lock);
  607. valid_block_count = sbi->total_valid_block_count + (block_t)count;
  608. sbi->alloc_valid_block_count += (block_t)count;
  609. valid_node_count = sbi->total_valid_node_count + count;
  610. if (valid_block_count > sbi->user_block_count) {
  611. spin_unlock(&sbi->stat_lock);
  612. return false;
  613. }
  614. if (valid_node_count > sbi->total_node_count) {
  615. spin_unlock(&sbi->stat_lock);
  616. return false;
  617. }
  618. if (inode)
  619. inode->i_blocks += count;
  620. sbi->total_valid_node_count = valid_node_count;
  621. sbi->total_valid_block_count = valid_block_count;
  622. spin_unlock(&sbi->stat_lock);
  623. return true;
  624. }
  625. static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
  626. struct inode *inode,
  627. unsigned int count)
  628. {
  629. spin_lock(&sbi->stat_lock);
  630. BUG_ON(sbi->total_valid_block_count < count);
  631. BUG_ON(sbi->total_valid_node_count < count);
  632. BUG_ON(inode->i_blocks < count);
  633. inode->i_blocks -= count;
  634. sbi->total_valid_node_count -= count;
  635. sbi->total_valid_block_count -= (block_t)count;
  636. spin_unlock(&sbi->stat_lock);
  637. }
  638. static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
  639. {
  640. unsigned int ret;
  641. spin_lock(&sbi->stat_lock);
  642. ret = sbi->total_valid_node_count;
  643. spin_unlock(&sbi->stat_lock);
  644. return ret;
  645. }
  646. static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
  647. {
  648. spin_lock(&sbi->stat_lock);
  649. BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
  650. sbi->total_valid_inode_count++;
  651. spin_unlock(&sbi->stat_lock);
  652. }
  653. static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
  654. {
  655. spin_lock(&sbi->stat_lock);
  656. BUG_ON(!sbi->total_valid_inode_count);
  657. sbi->total_valid_inode_count--;
  658. spin_unlock(&sbi->stat_lock);
  659. return 0;
  660. }
  661. static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
  662. {
  663. unsigned int ret;
  664. spin_lock(&sbi->stat_lock);
  665. ret = sbi->total_valid_inode_count;
  666. spin_unlock(&sbi->stat_lock);
  667. return ret;
  668. }
  669. static inline void f2fs_put_page(struct page *page, int unlock)
  670. {
  671. if (!page || IS_ERR(page))
  672. return;
  673. if (unlock) {
  674. BUG_ON(!PageLocked(page));
  675. unlock_page(page);
  676. }
  677. page_cache_release(page);
  678. }
  679. static inline void f2fs_put_dnode(struct dnode_of_data *dn)
  680. {
  681. if (dn->node_page)
  682. f2fs_put_page(dn->node_page, 1);
  683. if (dn->inode_page && dn->node_page != dn->inode_page)
  684. f2fs_put_page(dn->inode_page, 0);
  685. dn->node_page = NULL;
  686. dn->inode_page = NULL;
  687. }
  688. static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
  689. size_t size, void (*ctor)(void *))
  690. {
  691. return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
  692. }
  693. #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
  694. static inline bool IS_INODE(struct page *page)
  695. {
  696. struct f2fs_node *p = (struct f2fs_node *)page_address(page);
  697. return RAW_IS_INODE(p);
  698. }
  699. static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
  700. {
  701. return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
  702. }
  703. static inline block_t datablock_addr(struct page *node_page,
  704. unsigned int offset)
  705. {
  706. struct f2fs_node *raw_node;
  707. __le32 *addr_array;
  708. raw_node = (struct f2fs_node *)page_address(node_page);
  709. addr_array = blkaddr_in_node(raw_node);
  710. return le32_to_cpu(addr_array[offset]);
  711. }
  712. static inline int f2fs_test_bit(unsigned int nr, char *addr)
  713. {
  714. int mask;
  715. addr += (nr >> 3);
  716. mask = 1 << (7 - (nr & 0x07));
  717. return mask & *addr;
  718. }
  719. static inline int f2fs_set_bit(unsigned int nr, char *addr)
  720. {
  721. int mask;
  722. int ret;
  723. addr += (nr >> 3);
  724. mask = 1 << (7 - (nr & 0x07));
  725. ret = mask & *addr;
  726. *addr |= mask;
  727. return ret;
  728. }
  729. static inline int f2fs_clear_bit(unsigned int nr, char *addr)
  730. {
  731. int mask;
  732. int ret;
  733. addr += (nr >> 3);
  734. mask = 1 << (7 - (nr & 0x07));
  735. ret = mask & *addr;
  736. *addr &= ~mask;
  737. return ret;
  738. }
  739. /* used for f2fs_inode_info->flags */
  740. enum {
  741. FI_NEW_INODE, /* indicate newly allocated inode */
  742. FI_DIRTY_INODE, /* indicate inode is dirty or not */
  743. FI_INC_LINK, /* need to increment i_nlink */
  744. FI_ACL_MODE, /* indicate acl mode */
  745. FI_NO_ALLOC, /* should not allocate any blocks */
  746. FI_UPDATE_DIR, /* should update inode block for consistency */
  747. FI_DELAY_IPUT, /* used for the recovery */
  748. };
  749. static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
  750. {
  751. set_bit(flag, &fi->flags);
  752. }
  753. static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
  754. {
  755. return test_bit(flag, &fi->flags);
  756. }
  757. static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  758. {
  759. clear_bit(flag, &fi->flags);
  760. }
  761. static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
  762. {
  763. fi->i_acl_mode = mode;
  764. set_inode_flag(fi, FI_ACL_MODE);
  765. }
  766. static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  767. {
  768. if (is_inode_flag_set(fi, FI_ACL_MODE)) {
  769. clear_inode_flag(fi, FI_ACL_MODE);
  770. return 1;
  771. }
  772. return 0;
  773. }
  774. static inline int f2fs_readonly(struct super_block *sb)
  775. {
  776. return sb->s_flags & MS_RDONLY;
  777. }
  778. /*
  779. * file.c
  780. */
  781. int f2fs_sync_file(struct file *, loff_t, loff_t, int);
  782. void truncate_data_blocks(struct dnode_of_data *);
  783. void f2fs_truncate(struct inode *);
  784. int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
  785. int f2fs_setattr(struct dentry *, struct iattr *);
  786. int truncate_hole(struct inode *, pgoff_t, pgoff_t);
  787. int truncate_data_blocks_range(struct dnode_of_data *, int);
  788. long f2fs_ioctl(struct file *, unsigned int, unsigned long);
  789. long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
  790. /*
  791. * inode.c
  792. */
  793. void f2fs_set_inode_flags(struct inode *);
  794. struct inode *f2fs_iget(struct super_block *, unsigned long);
  795. void update_inode(struct inode *, struct page *);
  796. int update_inode_page(struct inode *);
  797. int f2fs_write_inode(struct inode *, struct writeback_control *);
  798. void f2fs_evict_inode(struct inode *);
  799. /*
  800. * namei.c
  801. */
  802. struct dentry *f2fs_get_parent(struct dentry *child);
  803. /*
  804. * dir.c
  805. */
  806. struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
  807. struct page **);
  808. struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
  809. ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
  810. void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
  811. struct page *, struct inode *);
  812. int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
  813. void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
  814. int f2fs_make_empty(struct inode *, struct inode *);
  815. bool f2fs_empty_dir(struct inode *);
  816. static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
  817. {
  818. return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
  819. inode);
  820. }
  821. /*
  822. * super.c
  823. */
  824. int f2fs_sync_fs(struct super_block *, int);
  825. extern __printf(3, 4)
  826. void f2fs_msg(struct super_block *, const char *, const char *, ...);
  827. /*
  828. * hash.c
  829. */
  830. f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
  831. /*
  832. * node.c
  833. */
  834. struct dnode_of_data;
  835. struct node_info;
  836. int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
  837. void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
  838. int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
  839. int truncate_inode_blocks(struct inode *, pgoff_t);
  840. int remove_inode_page(struct inode *);
  841. struct page *new_inode_page(struct inode *, const struct qstr *);
  842. struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
  843. void ra_node_page(struct f2fs_sb_info *, nid_t);
  844. struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
  845. struct page *get_node_page_ra(struct page *, int);
  846. void sync_inode_page(struct dnode_of_data *);
  847. int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
  848. bool alloc_nid(struct f2fs_sb_info *, nid_t *);
  849. void alloc_nid_done(struct f2fs_sb_info *, nid_t);
  850. void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
  851. void recover_node_page(struct f2fs_sb_info *, struct page *,
  852. struct f2fs_summary *, struct node_info *, block_t);
  853. int recover_inode_page(struct f2fs_sb_info *, struct page *);
  854. int restore_node_summary(struct f2fs_sb_info *, unsigned int,
  855. struct f2fs_summary_block *);
  856. void flush_nat_entries(struct f2fs_sb_info *);
  857. int build_node_manager(struct f2fs_sb_info *);
  858. void destroy_node_manager(struct f2fs_sb_info *);
  859. int __init create_node_manager_caches(void);
  860. void destroy_node_manager_caches(void);
  861. /*
  862. * segment.c
  863. */
  864. void f2fs_balance_fs(struct f2fs_sb_info *);
  865. void invalidate_blocks(struct f2fs_sb_info *, block_t);
  866. void clear_prefree_segments(struct f2fs_sb_info *);
  867. int npages_for_summary_flush(struct f2fs_sb_info *);
  868. void allocate_new_segments(struct f2fs_sb_info *);
  869. struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
  870. struct bio *f2fs_bio_alloc(struct block_device *, int);
  871. void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
  872. void write_meta_page(struct f2fs_sb_info *, struct page *);
  873. void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
  874. block_t, block_t *);
  875. void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
  876. block_t, block_t *);
  877. void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
  878. void recover_data_page(struct f2fs_sb_info *, struct page *,
  879. struct f2fs_summary *, block_t, block_t);
  880. void rewrite_node_page(struct f2fs_sb_info *, struct page *,
  881. struct f2fs_summary *, block_t, block_t);
  882. void write_data_summaries(struct f2fs_sb_info *, block_t);
  883. void write_node_summaries(struct f2fs_sb_info *, block_t);
  884. int lookup_journal_in_cursum(struct f2fs_summary_block *,
  885. int, unsigned int, int);
  886. void flush_sit_entries(struct f2fs_sb_info *);
  887. int build_segment_manager(struct f2fs_sb_info *);
  888. void destroy_segment_manager(struct f2fs_sb_info *);
  889. /*
  890. * checkpoint.c
  891. */
  892. struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
  893. struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
  894. long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
  895. int check_orphan_space(struct f2fs_sb_info *);
  896. void add_orphan_inode(struct f2fs_sb_info *, nid_t);
  897. void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
  898. int recover_orphan_inodes(struct f2fs_sb_info *);
  899. int get_valid_checkpoint(struct f2fs_sb_info *);
  900. void set_dirty_dir_page(struct inode *, struct page *);
  901. void add_dirty_dir_inode(struct inode *);
  902. void remove_dirty_dir_inode(struct inode *);
  903. struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
  904. void sync_dirty_dir_inodes(struct f2fs_sb_info *);
  905. void write_checkpoint(struct f2fs_sb_info *, bool);
  906. void init_orphan_info(struct f2fs_sb_info *);
  907. int __init create_checkpoint_caches(void);
  908. void destroy_checkpoint_caches(void);
  909. /*
  910. * data.c
  911. */
  912. int reserve_new_block(struct dnode_of_data *);
  913. void update_extent_cache(block_t, struct dnode_of_data *);
  914. struct page *find_data_page(struct inode *, pgoff_t, bool);
  915. struct page *get_lock_data_page(struct inode *, pgoff_t);
  916. struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
  917. int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
  918. int do_write_data_page(struct page *);
  919. /*
  920. * gc.c
  921. */
  922. int start_gc_thread(struct f2fs_sb_info *);
  923. void stop_gc_thread(struct f2fs_sb_info *);
  924. block_t start_bidx_of_node(unsigned int);
  925. int f2fs_gc(struct f2fs_sb_info *);
  926. void build_gc_manager(struct f2fs_sb_info *);
  927. int __init create_gc_caches(void);
  928. void destroy_gc_caches(void);
  929. /*
  930. * recovery.c
  931. */
  932. int recover_fsync_data(struct f2fs_sb_info *);
  933. bool space_for_roll_forward(struct f2fs_sb_info *);
  934. /*
  935. * debug.c
  936. */
  937. #ifdef CONFIG_F2FS_STAT_FS
  938. struct f2fs_stat_info {
  939. struct list_head stat_list;
  940. struct f2fs_sb_info *sbi;
  941. struct mutex stat_lock;
  942. int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
  943. int main_area_segs, main_area_sections, main_area_zones;
  944. int hit_ext, total_ext;
  945. int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
  946. int nats, sits, fnids;
  947. int total_count, utilization;
  948. int bg_gc;
  949. unsigned int valid_count, valid_node_count, valid_inode_count;
  950. unsigned int bimodal, avg_vblocks;
  951. int util_free, util_valid, util_invalid;
  952. int rsvd_segs, overp_segs;
  953. int dirty_count, node_pages, meta_pages;
  954. int prefree_count, call_count;
  955. int tot_segs, node_segs, data_segs, free_segs, free_secs;
  956. int tot_blks, data_blks, node_blks;
  957. int curseg[NR_CURSEG_TYPE];
  958. int cursec[NR_CURSEG_TYPE];
  959. int curzone[NR_CURSEG_TYPE];
  960. unsigned int segment_count[2];
  961. unsigned int block_count[2];
  962. unsigned base_mem, cache_mem;
  963. };
  964. #define stat_inc_call_count(si) ((si)->call_count++)
  965. #define stat_inc_seg_count(sbi, type) \
  966. do { \
  967. struct f2fs_stat_info *si = sbi->stat_info; \
  968. (si)->tot_segs++; \
  969. if (type == SUM_TYPE_DATA) \
  970. si->data_segs++; \
  971. else \
  972. si->node_segs++; \
  973. } while (0)
  974. #define stat_inc_tot_blk_count(si, blks) \
  975. (si->tot_blks += (blks))
  976. #define stat_inc_data_blk_count(sbi, blks) \
  977. do { \
  978. struct f2fs_stat_info *si = sbi->stat_info; \
  979. stat_inc_tot_blk_count(si, blks); \
  980. si->data_blks += (blks); \
  981. } while (0)
  982. #define stat_inc_node_blk_count(sbi, blks) \
  983. do { \
  984. struct f2fs_stat_info *si = sbi->stat_info; \
  985. stat_inc_tot_blk_count(si, blks); \
  986. si->node_blks += (blks); \
  987. } while (0)
  988. int f2fs_build_stats(struct f2fs_sb_info *);
  989. void f2fs_destroy_stats(struct f2fs_sb_info *);
  990. void __init f2fs_create_root_stats(void);
  991. void f2fs_destroy_root_stats(void);
  992. #else
  993. #define stat_inc_call_count(si)
  994. #define stat_inc_seg_count(si, type)
  995. #define stat_inc_tot_blk_count(si, blks)
  996. #define stat_inc_data_blk_count(si, blks)
  997. #define stat_inc_node_blk_count(sbi, blks)
  998. static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
  999. static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
  1000. static inline void __init f2fs_create_root_stats(void) { }
  1001. static inline void f2fs_destroy_root_stats(void) { }
  1002. #endif
  1003. extern const struct file_operations f2fs_dir_operations;
  1004. extern const struct file_operations f2fs_file_operations;
  1005. extern const struct inode_operations f2fs_file_inode_operations;
  1006. extern const struct address_space_operations f2fs_dblock_aops;
  1007. extern const struct address_space_operations f2fs_node_aops;
  1008. extern const struct address_space_operations f2fs_meta_aops;
  1009. extern const struct inode_operations f2fs_dir_inode_operations;
  1010. extern const struct inode_operations f2fs_symlink_inode_operations;
  1011. extern const struct inode_operations f2fs_special_inode_operations;
  1012. #endif