f2fs.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /**
  2. * fs/f2fs/f2fs.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _LINUX_F2FS_H
  12. #define _LINUX_F2FS_H
  13. #include <linux/types.h>
  14. #include <linux/page-flags.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/version.h>
  17. #include <linux/slab.h>
  18. #include <linux/crc32.h>
  19. #include <linux/magic.h>
  20. /*
  21. * For mount options
  22. */
  23. #define F2FS_MOUNT_BG_GC 0x00000001
  24. #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  25. #define F2FS_MOUNT_DISCARD 0x00000004
  26. #define F2FS_MOUNT_NOHEAP 0x00000008
  27. #define F2FS_MOUNT_XATTR_USER 0x00000010
  28. #define F2FS_MOUNT_POSIX_ACL 0x00000020
  29. #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  30. #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  31. #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  32. #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
  33. #define ver_after(a, b) (typecheck(unsigned long long, a) && \
  34. typecheck(unsigned long long, b) && \
  35. ((long long)((a) - (b)) > 0))
  36. typedef u64 block_t;
  37. typedef u32 nid_t;
  38. struct f2fs_mount_info {
  39. unsigned int opt;
  40. };
  41. static inline __u32 f2fs_crc32(void *buff, size_t len)
  42. {
  43. return crc32_le(F2FS_SUPER_MAGIC, buff, len);
  44. }
  45. static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
  46. {
  47. return f2fs_crc32(buff, buff_size) == blk_crc;
  48. }
  49. /*
  50. * For checkpoint manager
  51. */
  52. enum {
  53. NAT_BITMAP,
  54. SIT_BITMAP
  55. };
  56. /* for the list of orphan inodes */
  57. struct orphan_inode_entry {
  58. struct list_head list; /* list head */
  59. nid_t ino; /* inode number */
  60. };
  61. /* for the list of directory inodes */
  62. struct dir_inode_entry {
  63. struct list_head list; /* list head */
  64. struct inode *inode; /* vfs inode pointer */
  65. };
  66. /* for the list of fsync inodes, used only during recovery */
  67. struct fsync_inode_entry {
  68. struct list_head list; /* list head */
  69. struct inode *inode; /* vfs inode pointer */
  70. block_t blkaddr; /* block address locating the last inode */
  71. };
  72. #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
  73. #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
  74. #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
  75. #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
  76. #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
  77. #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
  78. static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
  79. {
  80. int before = nats_in_cursum(rs);
  81. rs->n_nats = cpu_to_le16(before + i);
  82. return before;
  83. }
  84. static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
  85. {
  86. int before = sits_in_cursum(rs);
  87. rs->n_sits = cpu_to_le16(before + i);
  88. return before;
  89. }
  90. /*
  91. * For INODE and NODE manager
  92. */
  93. #define XATTR_NODE_OFFSET (-1) /*
  94. * store xattrs to one node block per
  95. * file keeping -1 as its node offset to
  96. * distinguish from index node blocks.
  97. */
  98. #define RDONLY_NODE 1 /*
  99. * specify a read-only mode when getting
  100. * a node block. 0 is read-write mode.
  101. * used by get_dnode_of_data().
  102. */
  103. #define F2FS_LINK_MAX 32000 /* maximum link count per file */
  104. /* for in-memory extent cache entry */
  105. struct extent_info {
  106. rwlock_t ext_lock; /* rwlock for consistency */
  107. unsigned int fofs; /* start offset in a file */
  108. u32 blk_addr; /* start block address of the extent */
  109. unsigned int len; /* lenth of the extent */
  110. };
  111. /*
  112. * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  113. */
  114. #define FADVISE_COLD_BIT 0x01
  115. struct f2fs_inode_info {
  116. struct inode vfs_inode; /* serve a vfs inode */
  117. unsigned long i_flags; /* keep an inode flags for ioctl */
  118. unsigned char i_advise; /* use to give file attribute hints */
  119. unsigned int i_current_depth; /* use only in directory structure */
  120. umode_t i_acl_mode; /* keep file acl mode temporarily */
  121. /* Use below internally in f2fs*/
  122. unsigned long flags; /* use to pass per-file flags */
  123. unsigned long long data_version;/* lastes version of data for fsync */
  124. atomic_t dirty_dents; /* # of dirty dentry pages */
  125. f2fs_hash_t chash; /* hash value of given file name */
  126. unsigned int clevel; /* maximum level of given file name */
  127. nid_t i_xattr_nid; /* node id that contains xattrs */
  128. struct extent_info ext; /* in-memory extent cache entry */
  129. };
  130. static inline void get_extent_info(struct extent_info *ext,
  131. struct f2fs_extent i_ext)
  132. {
  133. write_lock(&ext->ext_lock);
  134. ext->fofs = le32_to_cpu(i_ext.fofs);
  135. ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
  136. ext->len = le32_to_cpu(i_ext.len);
  137. write_unlock(&ext->ext_lock);
  138. }
  139. static inline void set_raw_extent(struct extent_info *ext,
  140. struct f2fs_extent *i_ext)
  141. {
  142. read_lock(&ext->ext_lock);
  143. i_ext->fofs = cpu_to_le32(ext->fofs);
  144. i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
  145. i_ext->len = cpu_to_le32(ext->len);
  146. read_unlock(&ext->ext_lock);
  147. }
  148. struct f2fs_nm_info {
  149. block_t nat_blkaddr; /* base disk address of NAT */
  150. nid_t max_nid; /* maximum possible node ids */
  151. nid_t init_scan_nid; /* the first nid to be scanned */
  152. nid_t next_scan_nid; /* the next nid to be scanned */
  153. /* NAT cache management */
  154. struct radix_tree_root nat_root;/* root of the nat entry cache */
  155. rwlock_t nat_tree_lock; /* protect nat_tree_lock */
  156. unsigned int nat_cnt; /* the # of cached nat entries */
  157. struct list_head nat_entries; /* cached nat entry list (clean) */
  158. struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
  159. /* free node ids management */
  160. struct list_head free_nid_list; /* a list for free nids */
  161. spinlock_t free_nid_list_lock; /* protect free nid list */
  162. unsigned int fcnt; /* the number of free node id */
  163. struct mutex build_lock; /* lock for build free nids */
  164. /* for checkpoint */
  165. char *nat_bitmap; /* NAT bitmap pointer */
  166. int bitmap_size; /* bitmap size */
  167. };
  168. /*
  169. * this structure is used as one of function parameters.
  170. * all the information are dedicated to a given direct node block determined
  171. * by the data offset in a file.
  172. */
  173. struct dnode_of_data {
  174. struct inode *inode; /* vfs inode pointer */
  175. struct page *inode_page; /* its inode page, NULL is possible */
  176. struct page *node_page; /* cached direct node page */
  177. nid_t nid; /* node id of the direct node block */
  178. unsigned int ofs_in_node; /* data offset in the node page */
  179. bool inode_page_locked; /* inode page is locked or not */
  180. block_t data_blkaddr; /* block address of the node block */
  181. };
  182. static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
  183. struct page *ipage, struct page *npage, nid_t nid)
  184. {
  185. dn->inode = inode;
  186. dn->inode_page = ipage;
  187. dn->node_page = npage;
  188. dn->nid = nid;
  189. dn->inode_page_locked = 0;
  190. }
  191. /*
  192. * For SIT manager
  193. *
  194. * By default, there are 6 active log areas across the whole main area.
  195. * When considering hot and cold data separation to reduce cleaning overhead,
  196. * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
  197. * respectively.
  198. * In the current design, you should not change the numbers intentionally.
  199. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
  200. * logs individually according to the underlying devices. (default: 6)
  201. * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
  202. * data and 8 for node logs.
  203. */
  204. #define NR_CURSEG_DATA_TYPE (3)
  205. #define NR_CURSEG_NODE_TYPE (3)
  206. #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
  207. enum {
  208. CURSEG_HOT_DATA = 0, /* directory entry blocks */
  209. CURSEG_WARM_DATA, /* data blocks */
  210. CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
  211. CURSEG_HOT_NODE, /* direct node blocks of directory files */
  212. CURSEG_WARM_NODE, /* direct node blocks of normal files */
  213. CURSEG_COLD_NODE, /* indirect node blocks */
  214. NO_CHECK_TYPE
  215. };
  216. struct f2fs_sm_info {
  217. struct sit_info *sit_info; /* whole segment information */
  218. struct free_segmap_info *free_info; /* free segment information */
  219. struct dirty_seglist_info *dirty_info; /* dirty segment information */
  220. struct curseg_info *curseg_array; /* active segment information */
  221. struct list_head wblist_head; /* list of under-writeback pages */
  222. spinlock_t wblist_lock; /* lock for checkpoint */
  223. block_t seg0_blkaddr; /* block address of 0'th segment */
  224. block_t main_blkaddr; /* start block address of main area */
  225. block_t ssa_blkaddr; /* start block address of SSA area */
  226. unsigned int segment_count; /* total # of segments */
  227. unsigned int main_segments; /* # of segments in main area */
  228. unsigned int reserved_segments; /* # of reserved segments */
  229. unsigned int ovp_segments; /* # of overprovision segments */
  230. };
  231. /*
  232. * For directory operation
  233. */
  234. #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
  235. #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
  236. #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
  237. #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
  238. #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
  239. /*
  240. * For superblock
  241. */
  242. /*
  243. * COUNT_TYPE for monitoring
  244. *
  245. * f2fs monitors the number of several block types such as on-writeback,
  246. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  247. */
  248. enum count_type {
  249. F2FS_WRITEBACK,
  250. F2FS_DIRTY_DENTS,
  251. F2FS_DIRTY_NODES,
  252. F2FS_DIRTY_META,
  253. NR_COUNT_TYPE,
  254. };
  255. /*
  256. * FS_LOCK nesting subclasses for the lock validator:
  257. *
  258. * The locking order between these classes is
  259. * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
  260. * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
  261. */
  262. enum lock_type {
  263. RENAME, /* for renaming operations */
  264. DENTRY_OPS, /* for directory operations */
  265. DATA_WRITE, /* for data write */
  266. DATA_NEW, /* for data allocation */
  267. DATA_TRUNC, /* for data truncate */
  268. NODE_NEW, /* for node allocation */
  269. NODE_TRUNC, /* for node truncate */
  270. NODE_WRITE, /* for node write */
  271. NR_LOCK_TYPE,
  272. };
  273. /*
  274. * The below are the page types of bios used in submti_bio().
  275. * The available types are:
  276. * DATA User data pages. It operates as async mode.
  277. * NODE Node pages. It operates as async mode.
  278. * META FS metadata pages such as SIT, NAT, CP.
  279. * NR_PAGE_TYPE The number of page types.
  280. * META_FLUSH Make sure the previous pages are written
  281. * with waiting the bio's completion
  282. * ... Only can be used with META.
  283. */
  284. enum page_type {
  285. DATA,
  286. NODE,
  287. META,
  288. NR_PAGE_TYPE,
  289. META_FLUSH,
  290. };
  291. struct f2fs_sb_info {
  292. struct super_block *sb; /* pointer to VFS super block */
  293. struct buffer_head *raw_super_buf; /* buffer head of raw sb */
  294. struct f2fs_super_block *raw_super; /* raw super block pointer */
  295. int s_dirty; /* dirty flag for checkpoint */
  296. /* for node-related operations */
  297. struct f2fs_nm_info *nm_info; /* node manager */
  298. struct inode *node_inode; /* cache node blocks */
  299. /* for segment-related operations */
  300. struct f2fs_sm_info *sm_info; /* segment manager */
  301. struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
  302. sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
  303. struct rw_semaphore bio_sem; /* IO semaphore */
  304. /* for checkpoint */
  305. struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
  306. struct inode *meta_inode; /* cache meta blocks */
  307. struct mutex cp_mutex; /* for checkpoint procedure */
  308. struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */
  309. struct mutex write_inode; /* mutex for write inode */
  310. struct mutex writepages; /* mutex for writepages() */
  311. int por_doing; /* recovery is doing or not */
  312. /* for orphan inode management */
  313. struct list_head orphan_inode_list; /* orphan inode list */
  314. struct mutex orphan_inode_mutex; /* for orphan inode list */
  315. unsigned int n_orphans; /* # of orphan inodes */
  316. /* for directory inode management */
  317. struct list_head dir_inode_list; /* dir inode list */
  318. spinlock_t dir_inode_lock; /* for dir inode list lock */
  319. unsigned int n_dirty_dirs; /* # of dir inodes */
  320. /* basic file system units */
  321. unsigned int log_sectors_per_block; /* log2 sectors per block */
  322. unsigned int log_blocksize; /* log2 block size */
  323. unsigned int blocksize; /* block size */
  324. unsigned int root_ino_num; /* root inode number*/
  325. unsigned int node_ino_num; /* node inode number*/
  326. unsigned int meta_ino_num; /* meta inode number*/
  327. unsigned int log_blocks_per_seg; /* log2 blocks per segment */
  328. unsigned int blocks_per_seg; /* blocks per segment */
  329. unsigned int segs_per_sec; /* segments per section */
  330. unsigned int secs_per_zone; /* sections per zone */
  331. unsigned int total_sections; /* total section count */
  332. unsigned int total_node_count; /* total node block count */
  333. unsigned int total_valid_node_count; /* valid node block count */
  334. unsigned int total_valid_inode_count; /* valid inode count */
  335. int active_logs; /* # of active logs */
  336. block_t user_block_count; /* # of user blocks */
  337. block_t total_valid_block_count; /* # of valid blocks */
  338. block_t alloc_valid_block_count; /* # of allocated blocks */
  339. block_t last_valid_block_count; /* for recovery */
  340. u32 s_next_generation; /* for NFS support */
  341. atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
  342. struct f2fs_mount_info mount_opt; /* mount options */
  343. /* for cleaning operations */
  344. struct mutex gc_mutex; /* mutex for GC */
  345. struct f2fs_gc_kthread *gc_thread; /* GC thread */
  346. /*
  347. * for stat information.
  348. * one is for the LFS mode, and the other is for the SSR mode.
  349. */
  350. struct f2fs_stat_info *stat_info; /* FS status information */
  351. unsigned int segment_count[2]; /* # of allocated segments */
  352. unsigned int block_count[2]; /* # of allocated blocks */
  353. unsigned int last_victim[2]; /* last victim segment # */
  354. int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
  355. int bg_gc; /* background gc calls */
  356. spinlock_t stat_lock; /* lock for stat operations */
  357. };
  358. /*
  359. * Inline functions
  360. */
  361. static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
  362. {
  363. return container_of(inode, struct f2fs_inode_info, vfs_inode);
  364. }
  365. static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
  366. {
  367. return sb->s_fs_info;
  368. }
  369. static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
  370. {
  371. return (struct f2fs_super_block *)(sbi->raw_super);
  372. }
  373. static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
  374. {
  375. return (struct f2fs_checkpoint *)(sbi->ckpt);
  376. }
  377. static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
  378. {
  379. return (struct f2fs_nm_info *)(sbi->nm_info);
  380. }
  381. static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
  382. {
  383. return (struct f2fs_sm_info *)(sbi->sm_info);
  384. }
  385. static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
  386. {
  387. return (struct sit_info *)(SM_I(sbi)->sit_info);
  388. }
  389. static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
  390. {
  391. return (struct free_segmap_info *)(SM_I(sbi)->free_info);
  392. }
  393. static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
  394. {
  395. return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
  396. }
  397. static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
  398. {
  399. sbi->s_dirty = 1;
  400. }
  401. static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
  402. {
  403. sbi->s_dirty = 0;
  404. }
  405. static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
  406. {
  407. mutex_lock_nested(&sbi->fs_lock[t], t);
  408. }
  409. static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
  410. {
  411. mutex_unlock(&sbi->fs_lock[t]);
  412. }
  413. /*
  414. * Check whether the given nid is within node id range.
  415. */
  416. static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
  417. {
  418. BUG_ON((nid >= NM_I(sbi)->max_nid));
  419. }
  420. #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
  421. /*
  422. * Check whether the inode has blocks or not
  423. */
  424. static inline int F2FS_HAS_BLOCKS(struct inode *inode)
  425. {
  426. if (F2FS_I(inode)->i_xattr_nid)
  427. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
  428. else
  429. return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
  430. }
  431. static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
  432. struct inode *inode, blkcnt_t count)
  433. {
  434. block_t valid_block_count;
  435. spin_lock(&sbi->stat_lock);
  436. valid_block_count =
  437. sbi->total_valid_block_count + (block_t)count;
  438. if (valid_block_count > sbi->user_block_count) {
  439. spin_unlock(&sbi->stat_lock);
  440. return false;
  441. }
  442. inode->i_blocks += count;
  443. sbi->total_valid_block_count = valid_block_count;
  444. sbi->alloc_valid_block_count += (block_t)count;
  445. spin_unlock(&sbi->stat_lock);
  446. return true;
  447. }
  448. static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
  449. struct inode *inode,
  450. blkcnt_t count)
  451. {
  452. spin_lock(&sbi->stat_lock);
  453. BUG_ON(sbi->total_valid_block_count < (block_t) count);
  454. BUG_ON(inode->i_blocks < count);
  455. inode->i_blocks -= count;
  456. sbi->total_valid_block_count -= (block_t)count;
  457. spin_unlock(&sbi->stat_lock);
  458. return 0;
  459. }
  460. static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
  461. {
  462. atomic_inc(&sbi->nr_pages[count_type]);
  463. F2FS_SET_SB_DIRT(sbi);
  464. }
  465. static inline void inode_inc_dirty_dents(struct inode *inode)
  466. {
  467. atomic_inc(&F2FS_I(inode)->dirty_dents);
  468. }
  469. static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
  470. {
  471. atomic_dec(&sbi->nr_pages[count_type]);
  472. }
  473. static inline void inode_dec_dirty_dents(struct inode *inode)
  474. {
  475. atomic_dec(&F2FS_I(inode)->dirty_dents);
  476. }
  477. static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
  478. {
  479. return atomic_read(&sbi->nr_pages[count_type]);
  480. }
  481. static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
  482. {
  483. block_t ret;
  484. spin_lock(&sbi->stat_lock);
  485. ret = sbi->total_valid_block_count;
  486. spin_unlock(&sbi->stat_lock);
  487. return ret;
  488. }
  489. static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
  490. {
  491. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  492. /* return NAT or SIT bitmap */
  493. if (flag == NAT_BITMAP)
  494. return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  495. else if (flag == SIT_BITMAP)
  496. return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  497. return 0;
  498. }
  499. static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
  500. {
  501. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  502. int offset = (flag == NAT_BITMAP) ? ckpt->sit_ver_bitmap_bytesize : 0;
  503. return &ckpt->sit_nat_version_bitmap + offset;
  504. }
  505. static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
  506. {
  507. block_t start_addr;
  508. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  509. unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
  510. start_addr = le64_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  511. /*
  512. * odd numbered checkpoint should at cp segment 0
  513. * and even segent must be at cp segment 1
  514. */
  515. if (!(ckpt_version & 1))
  516. start_addr += sbi->blocks_per_seg;
  517. return start_addr;
  518. }
  519. static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
  520. {
  521. return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
  522. }
  523. static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
  524. struct inode *inode,
  525. unsigned int count)
  526. {
  527. block_t valid_block_count;
  528. unsigned int valid_node_count;
  529. spin_lock(&sbi->stat_lock);
  530. valid_block_count = sbi->total_valid_block_count + (block_t)count;
  531. sbi->alloc_valid_block_count += (block_t)count;
  532. valid_node_count = sbi->total_valid_node_count + count;
  533. if (valid_block_count > sbi->user_block_count) {
  534. spin_unlock(&sbi->stat_lock);
  535. return false;
  536. }
  537. if (valid_node_count > sbi->total_node_count) {
  538. spin_unlock(&sbi->stat_lock);
  539. return false;
  540. }
  541. if (inode)
  542. inode->i_blocks += count;
  543. sbi->total_valid_node_count = valid_node_count;
  544. sbi->total_valid_block_count = valid_block_count;
  545. spin_unlock(&sbi->stat_lock);
  546. return true;
  547. }
  548. static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
  549. struct inode *inode,
  550. unsigned int count)
  551. {
  552. spin_lock(&sbi->stat_lock);
  553. BUG_ON(sbi->total_valid_block_count < count);
  554. BUG_ON(sbi->total_valid_node_count < count);
  555. BUG_ON(inode->i_blocks < count);
  556. inode->i_blocks -= count;
  557. sbi->total_valid_node_count -= count;
  558. sbi->total_valid_block_count -= (block_t)count;
  559. spin_unlock(&sbi->stat_lock);
  560. }
  561. static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
  562. {
  563. unsigned int ret;
  564. spin_lock(&sbi->stat_lock);
  565. ret = sbi->total_valid_node_count;
  566. spin_unlock(&sbi->stat_lock);
  567. return ret;
  568. }
  569. static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
  570. {
  571. spin_lock(&sbi->stat_lock);
  572. BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
  573. sbi->total_valid_inode_count++;
  574. spin_unlock(&sbi->stat_lock);
  575. }
  576. static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
  577. {
  578. spin_lock(&sbi->stat_lock);
  579. BUG_ON(!sbi->total_valid_inode_count);
  580. sbi->total_valid_inode_count--;
  581. spin_unlock(&sbi->stat_lock);
  582. return 0;
  583. }
  584. static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
  585. {
  586. unsigned int ret;
  587. spin_lock(&sbi->stat_lock);
  588. ret = sbi->total_valid_inode_count;
  589. spin_unlock(&sbi->stat_lock);
  590. return ret;
  591. }
  592. static inline void f2fs_put_page(struct page *page, int unlock)
  593. {
  594. if (!page || IS_ERR(page))
  595. return;
  596. if (unlock) {
  597. BUG_ON(!PageLocked(page));
  598. unlock_page(page);
  599. }
  600. page_cache_release(page);
  601. }
  602. static inline void f2fs_put_dnode(struct dnode_of_data *dn)
  603. {
  604. if (dn->node_page)
  605. f2fs_put_page(dn->node_page, 1);
  606. if (dn->inode_page && dn->node_page != dn->inode_page)
  607. f2fs_put_page(dn->inode_page, 0);
  608. dn->node_page = NULL;
  609. dn->inode_page = NULL;
  610. }
  611. static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
  612. size_t size, void (*ctor)(void *))
  613. {
  614. return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
  615. }
  616. #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
  617. static inline bool IS_INODE(struct page *page)
  618. {
  619. struct f2fs_node *p = (struct f2fs_node *)page_address(page);
  620. return RAW_IS_INODE(p);
  621. }
  622. static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
  623. {
  624. return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
  625. }
  626. static inline block_t datablock_addr(struct page *node_page,
  627. unsigned int offset)
  628. {
  629. struct f2fs_node *raw_node;
  630. __le32 *addr_array;
  631. raw_node = (struct f2fs_node *)page_address(node_page);
  632. addr_array = blkaddr_in_node(raw_node);
  633. return le32_to_cpu(addr_array[offset]);
  634. }
  635. static inline int f2fs_test_bit(unsigned int nr, char *addr)
  636. {
  637. int mask;
  638. addr += (nr >> 3);
  639. mask = 1 << (7 - (nr & 0x07));
  640. return mask & *addr;
  641. }
  642. static inline int f2fs_set_bit(unsigned int nr, char *addr)
  643. {
  644. int mask;
  645. int ret;
  646. addr += (nr >> 3);
  647. mask = 1 << (7 - (nr & 0x07));
  648. ret = mask & *addr;
  649. *addr |= mask;
  650. return ret;
  651. }
  652. static inline int f2fs_clear_bit(unsigned int nr, char *addr)
  653. {
  654. int mask;
  655. int ret;
  656. addr += (nr >> 3);
  657. mask = 1 << (7 - (nr & 0x07));
  658. ret = mask & *addr;
  659. *addr &= ~mask;
  660. return ret;
  661. }
  662. /* used for f2fs_inode_info->flags */
  663. enum {
  664. FI_NEW_INODE, /* indicate newly allocated inode */
  665. FI_NEED_CP, /* need to do checkpoint during fsync */
  666. FI_INC_LINK, /* need to increment i_nlink */
  667. FI_ACL_MODE, /* indicate acl mode */
  668. FI_NO_ALLOC, /* should not allocate any blocks */
  669. };
  670. static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
  671. {
  672. set_bit(flag, &fi->flags);
  673. }
  674. static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
  675. {
  676. return test_bit(flag, &fi->flags);
  677. }
  678. static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  679. {
  680. clear_bit(flag, &fi->flags);
  681. }
  682. static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
  683. {
  684. fi->i_acl_mode = mode;
  685. set_inode_flag(fi, FI_ACL_MODE);
  686. }
  687. static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
  688. {
  689. if (is_inode_flag_set(fi, FI_ACL_MODE)) {
  690. clear_inode_flag(fi, FI_ACL_MODE);
  691. return 1;
  692. }
  693. return 0;
  694. }
  695. /*
  696. * file.c
  697. */
  698. int f2fs_sync_file(struct file *, loff_t, loff_t, int);
  699. void truncate_data_blocks(struct dnode_of_data *);
  700. void f2fs_truncate(struct inode *);
  701. int f2fs_setattr(struct dentry *, struct iattr *);
  702. int truncate_hole(struct inode *, pgoff_t, pgoff_t);
  703. long f2fs_ioctl(struct file *, unsigned int, unsigned long);
  704. /*
  705. * inode.c
  706. */
  707. void f2fs_set_inode_flags(struct inode *);
  708. struct inode *f2fs_iget_nowait(struct super_block *, unsigned long);
  709. struct inode *f2fs_iget(struct super_block *, unsigned long);
  710. void update_inode(struct inode *, struct page *);
  711. int f2fs_write_inode(struct inode *, struct writeback_control *);
  712. void f2fs_evict_inode(struct inode *);
  713. /*
  714. * namei.c
  715. */
  716. struct dentry *f2fs_get_parent(struct dentry *child);
  717. /*
  718. * dir.c
  719. */
  720. struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
  721. struct page **);
  722. struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
  723. ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
  724. void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
  725. struct page *, struct inode *);
  726. void init_dent_inode(struct dentry *, struct page *);
  727. int f2fs_add_link(struct dentry *, struct inode *);
  728. void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
  729. int f2fs_make_empty(struct inode *, struct inode *);
  730. bool f2fs_empty_dir(struct inode *);
  731. /*
  732. * super.c
  733. */
  734. int f2fs_sync_fs(struct super_block *, int);
  735. /*
  736. * hash.c
  737. */
  738. f2fs_hash_t f2fs_dentry_hash(const char *, int);
  739. /*
  740. * node.c
  741. */
  742. struct dnode_of_data;
  743. struct node_info;
  744. int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
  745. void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
  746. int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
  747. int truncate_inode_blocks(struct inode *, pgoff_t);
  748. int remove_inode_page(struct inode *);
  749. int new_inode_page(struct inode *, struct dentry *);
  750. struct page *new_node_page(struct dnode_of_data *, unsigned int);
  751. void ra_node_page(struct f2fs_sb_info *, nid_t);
  752. struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
  753. struct page *get_node_page_ra(struct page *, int);
  754. void sync_inode_page(struct dnode_of_data *);
  755. int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
  756. bool alloc_nid(struct f2fs_sb_info *, nid_t *);
  757. void alloc_nid_done(struct f2fs_sb_info *, nid_t);
  758. void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
  759. void recover_node_page(struct f2fs_sb_info *, struct page *,
  760. struct f2fs_summary *, struct node_info *, block_t);
  761. int recover_inode_page(struct f2fs_sb_info *, struct page *);
  762. int restore_node_summary(struct f2fs_sb_info *, unsigned int,
  763. struct f2fs_summary_block *);
  764. void flush_nat_entries(struct f2fs_sb_info *);
  765. int build_node_manager(struct f2fs_sb_info *);
  766. void destroy_node_manager(struct f2fs_sb_info *);
  767. int create_node_manager_caches(void);
  768. void destroy_node_manager_caches(void);
  769. /*
  770. * segment.c
  771. */
  772. void f2fs_balance_fs(struct f2fs_sb_info *);
  773. void invalidate_blocks(struct f2fs_sb_info *, block_t);
  774. void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
  775. void clear_prefree_segments(struct f2fs_sb_info *);
  776. int npages_for_summary_flush(struct f2fs_sb_info *);
  777. void allocate_new_segments(struct f2fs_sb_info *);
  778. struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
  779. struct bio *f2fs_bio_alloc(struct block_device *, sector_t, int, gfp_t);
  780. void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
  781. int write_meta_page(struct f2fs_sb_info *, struct page *,
  782. struct writeback_control *);
  783. void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
  784. block_t, block_t *);
  785. void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
  786. block_t, block_t *);
  787. void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
  788. void recover_data_page(struct f2fs_sb_info *, struct page *,
  789. struct f2fs_summary *, block_t, block_t);
  790. void rewrite_node_page(struct f2fs_sb_info *, struct page *,
  791. struct f2fs_summary *, block_t, block_t);
  792. void write_data_summaries(struct f2fs_sb_info *, block_t);
  793. void write_node_summaries(struct f2fs_sb_info *, block_t);
  794. int lookup_journal_in_cursum(struct f2fs_summary_block *,
  795. int, unsigned int, int);
  796. void flush_sit_entries(struct f2fs_sb_info *);
  797. int build_segment_manager(struct f2fs_sb_info *);
  798. void reset_victim_segmap(struct f2fs_sb_info *);
  799. void destroy_segment_manager(struct f2fs_sb_info *);
  800. /*
  801. * checkpoint.c
  802. */
  803. struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
  804. struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
  805. long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
  806. int check_orphan_space(struct f2fs_sb_info *);
  807. void add_orphan_inode(struct f2fs_sb_info *, nid_t);
  808. void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
  809. int recover_orphan_inodes(struct f2fs_sb_info *);
  810. int get_valid_checkpoint(struct f2fs_sb_info *);
  811. void set_dirty_dir_page(struct inode *, struct page *);
  812. void remove_dirty_dir_inode(struct inode *);
  813. void sync_dirty_dir_inodes(struct f2fs_sb_info *);
  814. void block_operations(struct f2fs_sb_info *);
  815. void write_checkpoint(struct f2fs_sb_info *, bool, bool);
  816. void init_orphan_info(struct f2fs_sb_info *);
  817. int create_checkpoint_caches(void);
  818. void destroy_checkpoint_caches(void);
  819. /*
  820. * data.c
  821. */
  822. int reserve_new_block(struct dnode_of_data *);
  823. void update_extent_cache(block_t, struct dnode_of_data *);
  824. struct page *find_data_page(struct inode *, pgoff_t);
  825. struct page *get_lock_data_page(struct inode *, pgoff_t);
  826. struct page *get_new_data_page(struct inode *, pgoff_t, bool);
  827. int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
  828. int do_write_data_page(struct page *);
  829. /*
  830. * gc.c
  831. */
  832. int start_gc_thread(struct f2fs_sb_info *);
  833. void stop_gc_thread(struct f2fs_sb_info *);
  834. block_t start_bidx_of_node(unsigned int);
  835. int f2fs_gc(struct f2fs_sb_info *, int);
  836. void build_gc_manager(struct f2fs_sb_info *);
  837. int create_gc_caches(void);
  838. void destroy_gc_caches(void);
  839. /*
  840. * recovery.c
  841. */
  842. void recover_fsync_data(struct f2fs_sb_info *);
  843. bool space_for_roll_forward(struct f2fs_sb_info *);
  844. /*
  845. * debug.c
  846. */
  847. #ifdef CONFIG_F2FS_STAT_FS
  848. struct f2fs_stat_info {
  849. struct list_head stat_list;
  850. struct f2fs_sb_info *sbi;
  851. struct mutex stat_lock;
  852. int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
  853. int main_area_segs, main_area_sections, main_area_zones;
  854. int hit_ext, total_ext;
  855. int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
  856. int nats, sits, fnids;
  857. int total_count, utilization;
  858. int bg_gc;
  859. unsigned int valid_count, valid_node_count, valid_inode_count;
  860. unsigned int bimodal, avg_vblocks;
  861. int util_free, util_valid, util_invalid;
  862. int rsvd_segs, overp_segs;
  863. int dirty_count, node_pages, meta_pages;
  864. int prefree_count, call_count;
  865. int tot_segs, node_segs, data_segs, free_segs, free_secs;
  866. int tot_blks, data_blks, node_blks;
  867. int curseg[NR_CURSEG_TYPE];
  868. int cursec[NR_CURSEG_TYPE];
  869. int curzone[NR_CURSEG_TYPE];
  870. unsigned int segment_count[2];
  871. unsigned int block_count[2];
  872. unsigned base_mem, cache_mem;
  873. };
  874. #define stat_inc_call_count(si) ((si)->call_count++)
  875. #define stat_inc_seg_count(sbi, type) \
  876. do { \
  877. struct f2fs_stat_info *si = sbi->stat_info; \
  878. (si)->tot_segs++; \
  879. if (type == SUM_TYPE_DATA) \
  880. si->data_segs++; \
  881. else \
  882. si->node_segs++; \
  883. } while (0)
  884. #define stat_inc_tot_blk_count(si, blks) \
  885. (si->tot_blks += (blks))
  886. #define stat_inc_data_blk_count(sbi, blks) \
  887. do { \
  888. struct f2fs_stat_info *si = sbi->stat_info; \
  889. stat_inc_tot_blk_count(si, blks); \
  890. si->data_blks += (blks); \
  891. } while (0)
  892. #define stat_inc_node_blk_count(sbi, blks) \
  893. do { \
  894. struct f2fs_stat_info *si = sbi->stat_info; \
  895. stat_inc_tot_blk_count(si, blks); \
  896. si->node_blks += (blks); \
  897. } while (0)
  898. int f2fs_build_stats(struct f2fs_sb_info *);
  899. void f2fs_destroy_stats(struct f2fs_sb_info *);
  900. void destroy_root_stats(void);
  901. #else
  902. #define stat_inc_call_count(si)
  903. #define stat_inc_seg_count(si, type)
  904. #define stat_inc_tot_blk_count(si, blks)
  905. #define stat_inc_data_blk_count(si, blks)
  906. #define stat_inc_node_blk_count(sbi, blks)
  907. static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
  908. static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
  909. static inline void destroy_root_stats(void) { }
  910. #endif
  911. extern const struct file_operations f2fs_dir_operations;
  912. extern const struct file_operations f2fs_file_operations;
  913. extern const struct inode_operations f2fs_file_inode_operations;
  914. extern const struct address_space_operations f2fs_dblock_aops;
  915. extern const struct address_space_operations f2fs_node_aops;
  916. extern const struct address_space_operations f2fs_meta_aops;
  917. extern const struct inode_operations f2fs_dir_inode_operations;
  918. extern const struct inode_operations f2fs_symlink_inode_operations;
  919. extern const struct inode_operations f2fs_special_inode_operations;
  920. #endif