super.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * fs/f2fs/super.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/fs.h>
  14. #include <linux/statfs.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/backing-dev.h>
  17. #include <linux/kthread.h>
  18. #include <linux/parser.h>
  19. #include <linux/mount.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/random.h>
  22. #include <linux/exportfs.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/f2fs_fs.h>
  25. #include "f2fs.h"
  26. #include "node.h"
  27. #include "segment.h"
  28. #include "xattr.h"
  29. #define CREATE_TRACE_POINTS
  30. #include <trace/events/f2fs.h>
  31. static struct kmem_cache *f2fs_inode_cachep;
  32. enum {
  33. Opt_gc_background,
  34. Opt_disable_roll_forward,
  35. Opt_discard,
  36. Opt_noheap,
  37. Opt_nouser_xattr,
  38. Opt_noacl,
  39. Opt_active_logs,
  40. Opt_disable_ext_identify,
  41. Opt_err,
  42. };
  43. static match_table_t f2fs_tokens = {
  44. {Opt_gc_background, "background_gc=%s"},
  45. {Opt_disable_roll_forward, "disable_roll_forward"},
  46. {Opt_discard, "discard"},
  47. {Opt_noheap, "no_heap"},
  48. {Opt_nouser_xattr, "nouser_xattr"},
  49. {Opt_noacl, "noacl"},
  50. {Opt_active_logs, "active_logs=%u"},
  51. {Opt_disable_ext_identify, "disable_ext_identify"},
  52. {Opt_err, NULL},
  53. };
  54. void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
  55. {
  56. struct va_format vaf;
  57. va_list args;
  58. va_start(args, fmt);
  59. vaf.fmt = fmt;
  60. vaf.va = &args;
  61. printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
  62. va_end(args);
  63. }
  64. static void init_once(void *foo)
  65. {
  66. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  67. inode_init_once(&fi->vfs_inode);
  68. }
  69. static int parse_options(struct super_block *sb, char *options)
  70. {
  71. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  72. substring_t args[MAX_OPT_ARGS];
  73. char *p, *name;
  74. int arg = 0;
  75. if (!options)
  76. return 0;
  77. while ((p = strsep(&options, ",")) != NULL) {
  78. int token;
  79. if (!*p)
  80. continue;
  81. /*
  82. * Initialize args struct so we know whether arg was
  83. * found; some options take optional arguments.
  84. */
  85. args[0].to = args[0].from = NULL;
  86. token = match_token(p, f2fs_tokens, args);
  87. switch (token) {
  88. case Opt_gc_background:
  89. name = match_strdup(&args[0]);
  90. if (!name)
  91. return -ENOMEM;
  92. if (!strncmp(name, "on", 2))
  93. set_opt(sbi, BG_GC);
  94. else if (!strncmp(name, "off", 3))
  95. clear_opt(sbi, BG_GC);
  96. else {
  97. kfree(name);
  98. return -EINVAL;
  99. }
  100. kfree(name);
  101. break;
  102. case Opt_disable_roll_forward:
  103. set_opt(sbi, DISABLE_ROLL_FORWARD);
  104. break;
  105. case Opt_discard:
  106. set_opt(sbi, DISCARD);
  107. break;
  108. case Opt_noheap:
  109. set_opt(sbi, NOHEAP);
  110. break;
  111. #ifdef CONFIG_F2FS_FS_XATTR
  112. case Opt_nouser_xattr:
  113. clear_opt(sbi, XATTR_USER);
  114. break;
  115. #else
  116. case Opt_nouser_xattr:
  117. f2fs_msg(sb, KERN_INFO,
  118. "nouser_xattr options not supported");
  119. break;
  120. #endif
  121. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  122. case Opt_noacl:
  123. clear_opt(sbi, POSIX_ACL);
  124. break;
  125. #else
  126. case Opt_noacl:
  127. f2fs_msg(sb, KERN_INFO, "noacl options not supported");
  128. break;
  129. #endif
  130. case Opt_active_logs:
  131. if (args->from && match_int(args, &arg))
  132. return -EINVAL;
  133. if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
  134. return -EINVAL;
  135. sbi->active_logs = arg;
  136. break;
  137. case Opt_disable_ext_identify:
  138. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  139. break;
  140. default:
  141. f2fs_msg(sb, KERN_ERR,
  142. "Unrecognized mount option \"%s\" or missing value",
  143. p);
  144. return -EINVAL;
  145. }
  146. }
  147. return 0;
  148. }
  149. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  150. {
  151. struct f2fs_inode_info *fi;
  152. fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
  153. if (!fi)
  154. return NULL;
  155. init_once((void *) fi);
  156. /* Initialize f2fs-specific inode info */
  157. fi->vfs_inode.i_version = 1;
  158. atomic_set(&fi->dirty_dents, 0);
  159. fi->i_current_depth = 1;
  160. fi->i_advise = 0;
  161. rwlock_init(&fi->ext.ext_lock);
  162. set_inode_flag(fi, FI_NEW_INODE);
  163. return &fi->vfs_inode;
  164. }
  165. static int f2fs_drop_inode(struct inode *inode)
  166. {
  167. /*
  168. * This is to avoid a deadlock condition like below.
  169. * writeback_single_inode(inode)
  170. * - f2fs_write_data_page
  171. * - f2fs_gc -> iput -> evict
  172. * - inode_wait_for_writeback(inode)
  173. */
  174. if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
  175. return 0;
  176. return generic_drop_inode(inode);
  177. }
  178. /*
  179. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  180. *
  181. * We should call set_dirty_inode to write the dirty inode through write_inode.
  182. */
  183. static void f2fs_dirty_inode(struct inode *inode, int flags)
  184. {
  185. set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
  186. return;
  187. }
  188. static void f2fs_i_callback(struct rcu_head *head)
  189. {
  190. struct inode *inode = container_of(head, struct inode, i_rcu);
  191. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  192. }
  193. static void f2fs_destroy_inode(struct inode *inode)
  194. {
  195. call_rcu(&inode->i_rcu, f2fs_i_callback);
  196. }
  197. static void f2fs_put_super(struct super_block *sb)
  198. {
  199. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  200. f2fs_destroy_stats(sbi);
  201. stop_gc_thread(sbi);
  202. write_checkpoint(sbi, true);
  203. iput(sbi->node_inode);
  204. iput(sbi->meta_inode);
  205. /* destroy f2fs internal modules */
  206. destroy_node_manager(sbi);
  207. destroy_segment_manager(sbi);
  208. kfree(sbi->ckpt);
  209. sb->s_fs_info = NULL;
  210. brelse(sbi->raw_super_buf);
  211. kfree(sbi);
  212. }
  213. int f2fs_sync_fs(struct super_block *sb, int sync)
  214. {
  215. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  216. trace_f2fs_sync_fs(sb, sync);
  217. if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
  218. return 0;
  219. if (sync) {
  220. mutex_lock(&sbi->gc_mutex);
  221. write_checkpoint(sbi, false);
  222. mutex_unlock(&sbi->gc_mutex);
  223. } else {
  224. f2fs_balance_fs(sbi);
  225. }
  226. return 0;
  227. }
  228. static int f2fs_freeze(struct super_block *sb)
  229. {
  230. int err;
  231. if (f2fs_readonly(sb))
  232. return 0;
  233. err = f2fs_sync_fs(sb, 1);
  234. return err;
  235. }
  236. static int f2fs_unfreeze(struct super_block *sb)
  237. {
  238. return 0;
  239. }
  240. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  241. {
  242. struct super_block *sb = dentry->d_sb;
  243. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  244. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  245. block_t total_count, user_block_count, start_count, ovp_count;
  246. total_count = le64_to_cpu(sbi->raw_super->block_count);
  247. user_block_count = sbi->user_block_count;
  248. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  249. ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
  250. buf->f_type = F2FS_SUPER_MAGIC;
  251. buf->f_bsize = sbi->blocksize;
  252. buf->f_blocks = total_count - start_count;
  253. buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
  254. buf->f_bavail = user_block_count - valid_user_blocks(sbi);
  255. buf->f_files = sbi->total_node_count;
  256. buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
  257. buf->f_namelen = F2FS_NAME_LEN;
  258. buf->f_fsid.val[0] = (u32)id;
  259. buf->f_fsid.val[1] = (u32)(id >> 32);
  260. return 0;
  261. }
  262. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  263. {
  264. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  265. if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC))
  266. seq_printf(seq, ",background_gc=%s", "on");
  267. else
  268. seq_printf(seq, ",background_gc=%s", "off");
  269. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  270. seq_puts(seq, ",disable_roll_forward");
  271. if (test_opt(sbi, DISCARD))
  272. seq_puts(seq, ",discard");
  273. if (test_opt(sbi, NOHEAP))
  274. seq_puts(seq, ",no_heap_alloc");
  275. #ifdef CONFIG_F2FS_FS_XATTR
  276. if (test_opt(sbi, XATTR_USER))
  277. seq_puts(seq, ",user_xattr");
  278. else
  279. seq_puts(seq, ",nouser_xattr");
  280. #endif
  281. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  282. if (test_opt(sbi, POSIX_ACL))
  283. seq_puts(seq, ",acl");
  284. else
  285. seq_puts(seq, ",noacl");
  286. #endif
  287. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  288. seq_puts(seq, ",disable_ext_identify");
  289. seq_printf(seq, ",active_logs=%u", sbi->active_logs);
  290. return 0;
  291. }
  292. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  293. {
  294. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  295. struct f2fs_mount_info org_mount_opt;
  296. int err, active_logs;
  297. /*
  298. * Save the old mount options in case we
  299. * need to restore them.
  300. */
  301. org_mount_opt = sbi->mount_opt;
  302. active_logs = sbi->active_logs;
  303. /* parse mount options */
  304. err = parse_options(sb, data);
  305. if (err)
  306. goto restore_opts;
  307. /*
  308. * Previous and new state of filesystem is RO,
  309. * so no point in checking GC conditions.
  310. */
  311. if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
  312. goto skip;
  313. /*
  314. * We stop the GC thread if FS is mounted as RO
  315. * or if background_gc = off is passed in mount
  316. * option. Also sync the filesystem.
  317. */
  318. if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
  319. if (sbi->gc_thread) {
  320. stop_gc_thread(sbi);
  321. f2fs_sync_fs(sb, 1);
  322. }
  323. } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) {
  324. err = start_gc_thread(sbi);
  325. if (err)
  326. goto restore_opts;
  327. }
  328. skip:
  329. /* Update the POSIXACL Flag */
  330. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  331. (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
  332. return 0;
  333. restore_opts:
  334. sbi->mount_opt = org_mount_opt;
  335. sbi->active_logs = active_logs;
  336. return err;
  337. }
  338. static struct super_operations f2fs_sops = {
  339. .alloc_inode = f2fs_alloc_inode,
  340. .drop_inode = f2fs_drop_inode,
  341. .destroy_inode = f2fs_destroy_inode,
  342. .write_inode = f2fs_write_inode,
  343. .dirty_inode = f2fs_dirty_inode,
  344. .show_options = f2fs_show_options,
  345. .evict_inode = f2fs_evict_inode,
  346. .put_super = f2fs_put_super,
  347. .sync_fs = f2fs_sync_fs,
  348. .freeze_fs = f2fs_freeze,
  349. .unfreeze_fs = f2fs_unfreeze,
  350. .statfs = f2fs_statfs,
  351. .remount_fs = f2fs_remount,
  352. };
  353. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  354. u64 ino, u32 generation)
  355. {
  356. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  357. struct inode *inode;
  358. if (ino < F2FS_ROOT_INO(sbi))
  359. return ERR_PTR(-ESTALE);
  360. /*
  361. * f2fs_iget isn't quite right if the inode is currently unallocated!
  362. * However f2fs_iget currently does appropriate checks to handle stale
  363. * inodes so everything is OK.
  364. */
  365. inode = f2fs_iget(sb, ino);
  366. if (IS_ERR(inode))
  367. return ERR_CAST(inode);
  368. if (generation && inode->i_generation != generation) {
  369. /* we didn't find the right inode.. */
  370. iput(inode);
  371. return ERR_PTR(-ESTALE);
  372. }
  373. return inode;
  374. }
  375. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  376. int fh_len, int fh_type)
  377. {
  378. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  379. f2fs_nfs_get_inode);
  380. }
  381. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  382. int fh_len, int fh_type)
  383. {
  384. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  385. f2fs_nfs_get_inode);
  386. }
  387. static const struct export_operations f2fs_export_ops = {
  388. .fh_to_dentry = f2fs_fh_to_dentry,
  389. .fh_to_parent = f2fs_fh_to_parent,
  390. .get_parent = f2fs_get_parent,
  391. };
  392. static loff_t max_file_size(unsigned bits)
  393. {
  394. loff_t result = ADDRS_PER_INODE;
  395. loff_t leaf_count = ADDRS_PER_BLOCK;
  396. /* two direct node blocks */
  397. result += (leaf_count * 2);
  398. /* two indirect node blocks */
  399. leaf_count *= NIDS_PER_BLOCK;
  400. result += (leaf_count * 2);
  401. /* one double indirect node block */
  402. leaf_count *= NIDS_PER_BLOCK;
  403. result += leaf_count;
  404. result <<= bits;
  405. return result;
  406. }
  407. static int sanity_check_raw_super(struct super_block *sb,
  408. struct f2fs_super_block *raw_super)
  409. {
  410. unsigned int blocksize;
  411. if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
  412. f2fs_msg(sb, KERN_INFO,
  413. "Magic Mismatch, valid(0x%x) - read(0x%x)",
  414. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  415. return 1;
  416. }
  417. /* Currently, support only 4KB page cache size */
  418. if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
  419. f2fs_msg(sb, KERN_INFO,
  420. "Invalid page_cache_size (%lu), supports only 4KB\n",
  421. PAGE_CACHE_SIZE);
  422. return 1;
  423. }
  424. /* Currently, support only 4KB block size */
  425. blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
  426. if (blocksize != F2FS_BLKSIZE) {
  427. f2fs_msg(sb, KERN_INFO,
  428. "Invalid blocksize (%u), supports only 4KB\n",
  429. blocksize);
  430. return 1;
  431. }
  432. if (le32_to_cpu(raw_super->log_sectorsize) !=
  433. F2FS_LOG_SECTOR_SIZE) {
  434. f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
  435. return 1;
  436. }
  437. if (le32_to_cpu(raw_super->log_sectors_per_block) !=
  438. F2FS_LOG_SECTORS_PER_BLOCK) {
  439. f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
  440. return 1;
  441. }
  442. return 0;
  443. }
  444. static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
  445. {
  446. unsigned int total, fsmeta;
  447. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  448. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  449. total = le32_to_cpu(raw_super->segment_count);
  450. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  451. fsmeta += le32_to_cpu(raw_super->segment_count_sit);
  452. fsmeta += le32_to_cpu(raw_super->segment_count_nat);
  453. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  454. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  455. if (fsmeta >= total)
  456. return 1;
  457. if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
  458. f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
  459. return 1;
  460. }
  461. return 0;
  462. }
  463. static void init_sb_info(struct f2fs_sb_info *sbi)
  464. {
  465. struct f2fs_super_block *raw_super = sbi->raw_super;
  466. int i;
  467. sbi->log_sectors_per_block =
  468. le32_to_cpu(raw_super->log_sectors_per_block);
  469. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  470. sbi->blocksize = 1 << sbi->log_blocksize;
  471. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  472. sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
  473. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  474. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  475. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  476. sbi->total_node_count =
  477. (le32_to_cpu(raw_super->segment_count_nat) / 2)
  478. * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
  479. sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
  480. sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
  481. sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
  482. sbi->cur_victim_sec = NULL_SECNO;
  483. for (i = 0; i < NR_COUNT_TYPE; i++)
  484. atomic_set(&sbi->nr_pages[i], 0);
  485. }
  486. static int validate_superblock(struct super_block *sb,
  487. struct f2fs_super_block **raw_super,
  488. struct buffer_head **raw_super_buf, sector_t block)
  489. {
  490. const char *super = (block == 0 ? "first" : "second");
  491. /* read f2fs raw super block */
  492. *raw_super_buf = sb_bread(sb, block);
  493. if (!*raw_super_buf) {
  494. f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
  495. super);
  496. return -EIO;
  497. }
  498. *raw_super = (struct f2fs_super_block *)
  499. ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
  500. /* sanity checking of raw super */
  501. if (!sanity_check_raw_super(sb, *raw_super))
  502. return 0;
  503. f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
  504. "in %s superblock", super);
  505. return -EINVAL;
  506. }
  507. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  508. {
  509. struct f2fs_sb_info *sbi;
  510. struct f2fs_super_block *raw_super;
  511. struct buffer_head *raw_super_buf;
  512. struct inode *root;
  513. long err = -EINVAL;
  514. int i;
  515. /* allocate memory for f2fs-specific super block info */
  516. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  517. if (!sbi)
  518. return -ENOMEM;
  519. /* set a block size */
  520. if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
  521. f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
  522. goto free_sbi;
  523. }
  524. err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
  525. if (err) {
  526. brelse(raw_super_buf);
  527. /* check secondary superblock when primary failed */
  528. err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
  529. if (err)
  530. goto free_sb_buf;
  531. }
  532. sb->s_fs_info = sbi;
  533. /* init some FS parameters */
  534. sbi->active_logs = NR_CURSEG_TYPE;
  535. set_opt(sbi, BG_GC);
  536. #ifdef CONFIG_F2FS_FS_XATTR
  537. set_opt(sbi, XATTR_USER);
  538. #endif
  539. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  540. set_opt(sbi, POSIX_ACL);
  541. #endif
  542. /* parse mount options */
  543. err = parse_options(sb, (char *)data);
  544. if (err)
  545. goto free_sb_buf;
  546. sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
  547. sb->s_max_links = F2FS_LINK_MAX;
  548. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  549. sb->s_op = &f2fs_sops;
  550. sb->s_xattr = f2fs_xattr_handlers;
  551. sb->s_export_op = &f2fs_export_ops;
  552. sb->s_magic = F2FS_SUPER_MAGIC;
  553. sb->s_time_gran = 1;
  554. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  555. (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
  556. memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  557. /* init f2fs-specific super block info */
  558. sbi->sb = sb;
  559. sbi->raw_super = raw_super;
  560. sbi->raw_super_buf = raw_super_buf;
  561. mutex_init(&sbi->gc_mutex);
  562. mutex_init(&sbi->writepages);
  563. mutex_init(&sbi->cp_mutex);
  564. for (i = 0; i < NR_GLOBAL_LOCKS; i++)
  565. mutex_init(&sbi->fs_lock[i]);
  566. mutex_init(&sbi->node_write);
  567. sbi->por_doing = 0;
  568. spin_lock_init(&sbi->stat_lock);
  569. init_rwsem(&sbi->bio_sem);
  570. init_sb_info(sbi);
  571. /* get an inode for meta space */
  572. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  573. if (IS_ERR(sbi->meta_inode)) {
  574. f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
  575. err = PTR_ERR(sbi->meta_inode);
  576. goto free_sb_buf;
  577. }
  578. err = get_valid_checkpoint(sbi);
  579. if (err) {
  580. f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
  581. goto free_meta_inode;
  582. }
  583. /* sanity checking of checkpoint */
  584. err = -EINVAL;
  585. if (sanity_check_ckpt(sbi)) {
  586. f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
  587. goto free_cp;
  588. }
  589. sbi->total_valid_node_count =
  590. le32_to_cpu(sbi->ckpt->valid_node_count);
  591. sbi->total_valid_inode_count =
  592. le32_to_cpu(sbi->ckpt->valid_inode_count);
  593. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  594. sbi->total_valid_block_count =
  595. le64_to_cpu(sbi->ckpt->valid_block_count);
  596. sbi->last_valid_block_count = sbi->total_valid_block_count;
  597. sbi->alloc_valid_block_count = 0;
  598. INIT_LIST_HEAD(&sbi->dir_inode_list);
  599. spin_lock_init(&sbi->dir_inode_lock);
  600. init_orphan_info(sbi);
  601. /* setup f2fs internal modules */
  602. err = build_segment_manager(sbi);
  603. if (err) {
  604. f2fs_msg(sb, KERN_ERR,
  605. "Failed to initialize F2FS segment manager");
  606. goto free_sm;
  607. }
  608. err = build_node_manager(sbi);
  609. if (err) {
  610. f2fs_msg(sb, KERN_ERR,
  611. "Failed to initialize F2FS node manager");
  612. goto free_nm;
  613. }
  614. build_gc_manager(sbi);
  615. /* get an inode for node space */
  616. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  617. if (IS_ERR(sbi->node_inode)) {
  618. f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
  619. err = PTR_ERR(sbi->node_inode);
  620. goto free_nm;
  621. }
  622. /* if there are nt orphan nodes free them */
  623. err = -EINVAL;
  624. if (recover_orphan_inodes(sbi))
  625. goto free_node_inode;
  626. /* read root inode and dentry */
  627. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  628. if (IS_ERR(root)) {
  629. f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
  630. err = PTR_ERR(root);
  631. goto free_node_inode;
  632. }
  633. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
  634. goto free_root_inode;
  635. sb->s_root = d_make_root(root); /* allocate root dentry */
  636. if (!sb->s_root) {
  637. err = -ENOMEM;
  638. goto free_root_inode;
  639. }
  640. /* recover fsynced data */
  641. if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
  642. err = recover_fsync_data(sbi);
  643. if (err)
  644. f2fs_msg(sb, KERN_ERR,
  645. "Cannot recover all fsync data errno=%ld", err);
  646. }
  647. /*
  648. * If filesystem is not mounted as read-only then
  649. * do start the gc_thread.
  650. */
  651. if (!(sb->s_flags & MS_RDONLY)) {
  652. /* After POR, we can run background GC thread.*/
  653. err = start_gc_thread(sbi);
  654. if (err)
  655. goto fail;
  656. }
  657. err = f2fs_build_stats(sbi);
  658. if (err)
  659. goto fail;
  660. if (test_opt(sbi, DISCARD)) {
  661. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  662. if (!blk_queue_discard(q))
  663. f2fs_msg(sb, KERN_WARNING,
  664. "mounting with \"discard\" option, but "
  665. "the device does not support discard");
  666. }
  667. return 0;
  668. fail:
  669. stop_gc_thread(sbi);
  670. free_root_inode:
  671. dput(sb->s_root);
  672. sb->s_root = NULL;
  673. free_node_inode:
  674. iput(sbi->node_inode);
  675. free_nm:
  676. destroy_node_manager(sbi);
  677. free_sm:
  678. destroy_segment_manager(sbi);
  679. free_cp:
  680. kfree(sbi->ckpt);
  681. free_meta_inode:
  682. make_bad_inode(sbi->meta_inode);
  683. iput(sbi->meta_inode);
  684. free_sb_buf:
  685. brelse(raw_super_buf);
  686. free_sbi:
  687. kfree(sbi);
  688. return err;
  689. }
  690. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  691. const char *dev_name, void *data)
  692. {
  693. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  694. }
  695. static struct file_system_type f2fs_fs_type = {
  696. .owner = THIS_MODULE,
  697. .name = "f2fs",
  698. .mount = f2fs_mount,
  699. .kill_sb = kill_block_super,
  700. .fs_flags = FS_REQUIRES_DEV,
  701. };
  702. MODULE_ALIAS_FS("f2fs");
  703. static int __init init_inodecache(void)
  704. {
  705. f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
  706. sizeof(struct f2fs_inode_info), NULL);
  707. if (f2fs_inode_cachep == NULL)
  708. return -ENOMEM;
  709. return 0;
  710. }
  711. static void destroy_inodecache(void)
  712. {
  713. /*
  714. * Make sure all delayed rcu free inodes are flushed before we
  715. * destroy cache.
  716. */
  717. rcu_barrier();
  718. kmem_cache_destroy(f2fs_inode_cachep);
  719. }
  720. static int __init init_f2fs_fs(void)
  721. {
  722. int err;
  723. err = init_inodecache();
  724. if (err)
  725. goto fail;
  726. err = create_node_manager_caches();
  727. if (err)
  728. goto fail;
  729. err = create_gc_caches();
  730. if (err)
  731. goto fail;
  732. err = create_checkpoint_caches();
  733. if (err)
  734. goto fail;
  735. err = register_filesystem(&f2fs_fs_type);
  736. if (err)
  737. goto fail;
  738. f2fs_create_root_stats();
  739. fail:
  740. return err;
  741. }
  742. static void __exit exit_f2fs_fs(void)
  743. {
  744. f2fs_destroy_root_stats();
  745. unregister_filesystem(&f2fs_fs_type);
  746. destroy_checkpoint_caches();
  747. destroy_gc_caches();
  748. destroy_node_manager_caches();
  749. destroy_inodecache();
  750. }
  751. module_init(init_f2fs_fs)
  752. module_exit(exit_f2fs_fs)
  753. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  754. MODULE_DESCRIPTION("Flash Friendly File System");
  755. MODULE_LICENSE("GPL");