inode.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/buffer_head.h>
  24. #include <linux/gfp.h>
  25. #include <linux/mpage.h>
  26. #include <linux/writeback.h>
  27. #include <linux/uio.h>
  28. #include "nilfs.h"
  29. #include "segment.h"
  30. #include "page.h"
  31. #include "mdt.h"
  32. #include "cpfile.h"
  33. #include "ifile.h"
  34. /**
  35. * nilfs_get_block() - get a file block on the filesystem (callback function)
  36. * @inode - inode struct of the target file
  37. * @blkoff - file block number
  38. * @bh_result - buffer head to be mapped on
  39. * @create - indicate whether allocating the block or not when it has not
  40. * been allocated yet.
  41. *
  42. * This function does not issue actual read request of the specified data
  43. * block. It is done by VFS.
  44. */
  45. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  46. struct buffer_head *bh_result, int create)
  47. {
  48. struct nilfs_inode_info *ii = NILFS_I(inode);
  49. __u64 blknum = 0;
  50. int err = 0, ret;
  51. struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
  52. unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  53. down_read(&NILFS_MDT(dat)->mi_sem);
  54. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  55. up_read(&NILFS_MDT(dat)->mi_sem);
  56. if (ret >= 0) { /* found */
  57. map_bh(bh_result, inode->i_sb, blknum);
  58. if (ret > 0)
  59. bh_result->b_size = (ret << inode->i_blkbits);
  60. goto out;
  61. }
  62. /* data block was not found */
  63. if (ret == -ENOENT && create) {
  64. struct nilfs_transaction_info ti;
  65. bh_result->b_blocknr = 0;
  66. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  67. if (unlikely(err))
  68. goto out;
  69. err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
  70. (unsigned long)bh_result);
  71. if (unlikely(err != 0)) {
  72. if (err == -EEXIST) {
  73. /*
  74. * The get_block() function could be called
  75. * from multiple callers for an inode.
  76. * However, the page having this block must
  77. * be locked in this case.
  78. */
  79. printk(KERN_WARNING
  80. "nilfs_get_block: a race condition "
  81. "while inserting a data block. "
  82. "(inode number=%lu, file block "
  83. "offset=%llu)\n",
  84. inode->i_ino,
  85. (unsigned long long)blkoff);
  86. err = 0;
  87. } else if (err == -EINVAL) {
  88. nilfs_error(inode->i_sb, __func__,
  89. "broken bmap (inode=%lu)\n",
  90. inode->i_ino);
  91. err = -EIO;
  92. }
  93. nilfs_transaction_abort(inode->i_sb);
  94. goto out;
  95. }
  96. nilfs_mark_inode_dirty(inode);
  97. nilfs_transaction_commit(inode->i_sb); /* never fails */
  98. /* Error handling should be detailed */
  99. set_buffer_new(bh_result);
  100. map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
  101. to proper value */
  102. } else if (ret == -ENOENT) {
  103. /* not found is not error (e.g. hole); must return without
  104. the mapped state flag. */
  105. ;
  106. } else {
  107. err = ret;
  108. }
  109. out:
  110. return err;
  111. }
  112. /**
  113. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  114. * address_space_operations.
  115. * @file - file struct of the file to be read
  116. * @page - the page to be read
  117. */
  118. static int nilfs_readpage(struct file *file, struct page *page)
  119. {
  120. return mpage_readpage(page, nilfs_get_block);
  121. }
  122. /**
  123. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  124. * address_space_operations.
  125. * @file - file struct of the file to be read
  126. * @mapping - address_space struct used for reading multiple pages
  127. * @pages - the pages to be read
  128. * @nr_pages - number of pages to be read
  129. */
  130. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  131. struct list_head *pages, unsigned nr_pages)
  132. {
  133. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  134. }
  135. static int nilfs_writepages(struct address_space *mapping,
  136. struct writeback_control *wbc)
  137. {
  138. struct inode *inode = mapping->host;
  139. int err = 0;
  140. if (wbc->sync_mode == WB_SYNC_ALL)
  141. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  142. wbc->range_start,
  143. wbc->range_end);
  144. return err;
  145. }
  146. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  147. {
  148. struct inode *inode = page->mapping->host;
  149. int err;
  150. redirty_page_for_writepage(wbc, page);
  151. unlock_page(page);
  152. if (wbc->sync_mode == WB_SYNC_ALL) {
  153. err = nilfs_construct_segment(inode->i_sb);
  154. if (unlikely(err))
  155. return err;
  156. } else if (wbc->for_reclaim)
  157. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  158. return 0;
  159. }
  160. static int nilfs_set_page_dirty(struct page *page)
  161. {
  162. int ret = __set_page_dirty_buffers(page);
  163. if (ret) {
  164. struct inode *inode = page->mapping->host;
  165. struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
  166. unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
  167. nilfs_set_file_dirty(sbi, inode, nr_dirty);
  168. }
  169. return ret;
  170. }
  171. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  172. loff_t pos, unsigned len, unsigned flags,
  173. struct page **pagep, void **fsdata)
  174. {
  175. struct inode *inode = mapping->host;
  176. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  177. if (unlikely(err))
  178. return err;
  179. *pagep = NULL;
  180. err = block_write_begin(file, mapping, pos, len, flags, pagep,
  181. fsdata, nilfs_get_block);
  182. if (unlikely(err))
  183. nilfs_transaction_abort(inode->i_sb);
  184. return err;
  185. }
  186. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  187. loff_t pos, unsigned len, unsigned copied,
  188. struct page *page, void *fsdata)
  189. {
  190. struct inode *inode = mapping->host;
  191. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  192. unsigned nr_dirty;
  193. int err;
  194. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  195. start + copied);
  196. copied = generic_write_end(file, mapping, pos, len, copied, page,
  197. fsdata);
  198. nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
  199. err = nilfs_transaction_commit(inode->i_sb);
  200. return err ? : copied;
  201. }
  202. static ssize_t
  203. nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  204. loff_t offset, unsigned long nr_segs)
  205. {
  206. struct file *file = iocb->ki_filp;
  207. struct inode *inode = file->f_mapping->host;
  208. ssize_t size;
  209. if (rw == WRITE)
  210. return 0;
  211. /* Needs synchronization with the cleaner */
  212. size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
  213. offset, nr_segs, nilfs_get_block, NULL);
  214. return size;
  215. }
  216. const struct address_space_operations nilfs_aops = {
  217. .writepage = nilfs_writepage,
  218. .readpage = nilfs_readpage,
  219. .sync_page = block_sync_page,
  220. .writepages = nilfs_writepages,
  221. .set_page_dirty = nilfs_set_page_dirty,
  222. .readpages = nilfs_readpages,
  223. .write_begin = nilfs_write_begin,
  224. .write_end = nilfs_write_end,
  225. /* .releasepage = nilfs_releasepage, */
  226. .invalidatepage = block_invalidatepage,
  227. .direct_IO = nilfs_direct_IO,
  228. .is_partially_uptodate = block_is_partially_uptodate,
  229. };
  230. struct inode *nilfs_new_inode(struct inode *dir, int mode)
  231. {
  232. struct super_block *sb = dir->i_sb;
  233. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  234. struct inode *inode;
  235. struct nilfs_inode_info *ii;
  236. int err = -ENOMEM;
  237. ino_t ino;
  238. inode = new_inode(sb);
  239. if (unlikely(!inode))
  240. goto failed;
  241. mapping_set_gfp_mask(inode->i_mapping,
  242. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  243. ii = NILFS_I(inode);
  244. ii->i_state = 1 << NILFS_I_NEW;
  245. err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh);
  246. if (unlikely(err))
  247. goto failed_ifile_create_inode;
  248. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  249. atomic_inc(&sbi->s_inodes_count);
  250. inode_init_owner(inode, dir, mode);
  251. inode->i_ino = ino;
  252. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  253. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  254. err = nilfs_bmap_read(ii->i_bmap, NULL);
  255. if (err < 0)
  256. goto failed_bmap;
  257. set_bit(NILFS_I_BMAP, &ii->i_state);
  258. /* No lock is needed; iget() ensures it. */
  259. }
  260. ii->i_flags = NILFS_I(dir)->i_flags;
  261. if (S_ISLNK(mode))
  262. ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
  263. if (!S_ISDIR(mode))
  264. ii->i_flags &= ~NILFS_DIRSYNC_FL;
  265. /* ii->i_file_acl = 0; */
  266. /* ii->i_dir_acl = 0; */
  267. ii->i_dir_start_lookup = 0;
  268. ii->i_cno = 0;
  269. nilfs_set_inode_flags(inode);
  270. spin_lock(&sbi->s_next_gen_lock);
  271. inode->i_generation = sbi->s_next_generation++;
  272. spin_unlock(&sbi->s_next_gen_lock);
  273. insert_inode_hash(inode);
  274. err = nilfs_init_acl(inode, dir);
  275. if (unlikely(err))
  276. goto failed_acl; /* never occur. When supporting
  277. nilfs_init_acl(), proper cancellation of
  278. above jobs should be considered */
  279. return inode;
  280. failed_acl:
  281. failed_bmap:
  282. inode->i_nlink = 0;
  283. iput(inode); /* raw_inode will be deleted through
  284. generic_delete_inode() */
  285. goto failed;
  286. failed_ifile_create_inode:
  287. make_bad_inode(inode);
  288. iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
  289. called */
  290. failed:
  291. return ERR_PTR(err);
  292. }
  293. void nilfs_free_inode(struct inode *inode)
  294. {
  295. struct super_block *sb = inode->i_sb;
  296. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  297. clear_inode(inode);
  298. /* XXX: check error code? Is there any thing I can do? */
  299. (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
  300. atomic_dec(&sbi->s_inodes_count);
  301. }
  302. void nilfs_set_inode_flags(struct inode *inode)
  303. {
  304. unsigned int flags = NILFS_I(inode)->i_flags;
  305. inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
  306. S_DIRSYNC);
  307. if (flags & NILFS_SYNC_FL)
  308. inode->i_flags |= S_SYNC;
  309. if (flags & NILFS_APPEND_FL)
  310. inode->i_flags |= S_APPEND;
  311. if (flags & NILFS_IMMUTABLE_FL)
  312. inode->i_flags |= S_IMMUTABLE;
  313. #ifndef NILFS_ATIME_DISABLE
  314. if (flags & NILFS_NOATIME_FL)
  315. #endif
  316. inode->i_flags |= S_NOATIME;
  317. if (flags & NILFS_DIRSYNC_FL)
  318. inode->i_flags |= S_DIRSYNC;
  319. mapping_set_gfp_mask(inode->i_mapping,
  320. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  321. }
  322. int nilfs_read_inode_common(struct inode *inode,
  323. struct nilfs_inode *raw_inode)
  324. {
  325. struct nilfs_inode_info *ii = NILFS_I(inode);
  326. int err;
  327. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  328. inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
  329. inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
  330. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  331. inode->i_size = le64_to_cpu(raw_inode->i_size);
  332. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  333. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  334. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  335. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  336. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  337. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  338. if (inode->i_nlink == 0 && inode->i_mode == 0)
  339. return -EINVAL; /* this inode is deleted */
  340. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  341. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  342. #if 0
  343. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  344. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  345. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  346. #endif
  347. ii->i_dir_start_lookup = 0;
  348. ii->i_cno = 0;
  349. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  350. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  351. S_ISLNK(inode->i_mode)) {
  352. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  353. if (err < 0)
  354. return err;
  355. set_bit(NILFS_I_BMAP, &ii->i_state);
  356. /* No lock is needed; iget() ensures it. */
  357. }
  358. return 0;
  359. }
  360. static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
  361. struct inode *inode)
  362. {
  363. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  364. struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
  365. struct buffer_head *bh;
  366. struct nilfs_inode *raw_inode;
  367. int err;
  368. down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
  369. err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
  370. if (unlikely(err))
  371. goto bad_inode;
  372. raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
  373. err = nilfs_read_inode_common(inode, raw_inode);
  374. if (err)
  375. goto failed_unmap;
  376. if (S_ISREG(inode->i_mode)) {
  377. inode->i_op = &nilfs_file_inode_operations;
  378. inode->i_fop = &nilfs_file_operations;
  379. inode->i_mapping->a_ops = &nilfs_aops;
  380. } else if (S_ISDIR(inode->i_mode)) {
  381. inode->i_op = &nilfs_dir_inode_operations;
  382. inode->i_fop = &nilfs_dir_operations;
  383. inode->i_mapping->a_ops = &nilfs_aops;
  384. } else if (S_ISLNK(inode->i_mode)) {
  385. inode->i_op = &nilfs_symlink_inode_operations;
  386. inode->i_mapping->a_ops = &nilfs_aops;
  387. } else {
  388. inode->i_op = &nilfs_special_inode_operations;
  389. init_special_inode(
  390. inode, inode->i_mode,
  391. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  392. }
  393. nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
  394. brelse(bh);
  395. up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
  396. nilfs_set_inode_flags(inode);
  397. return 0;
  398. failed_unmap:
  399. nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
  400. brelse(bh);
  401. bad_inode:
  402. up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
  403. return err;
  404. }
  405. struct inode *nilfs_iget(struct super_block *sb, unsigned long ino)
  406. {
  407. struct inode *inode;
  408. int err;
  409. inode = iget_locked(sb, ino);
  410. if (unlikely(!inode))
  411. return ERR_PTR(-ENOMEM);
  412. if (!(inode->i_state & I_NEW))
  413. return inode;
  414. err = __nilfs_read_inode(sb, ino, inode);
  415. if (unlikely(err)) {
  416. iget_failed(inode);
  417. return ERR_PTR(err);
  418. }
  419. unlock_new_inode(inode);
  420. return inode;
  421. }
  422. void nilfs_write_inode_common(struct inode *inode,
  423. struct nilfs_inode *raw_inode, int has_bmap)
  424. {
  425. struct nilfs_inode_info *ii = NILFS_I(inode);
  426. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  427. raw_inode->i_uid = cpu_to_le32(inode->i_uid);
  428. raw_inode->i_gid = cpu_to_le32(inode->i_gid);
  429. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  430. raw_inode->i_size = cpu_to_le64(inode->i_size);
  431. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  432. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  433. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  434. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  435. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  436. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  437. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  438. if (has_bmap)
  439. nilfs_bmap_write(ii->i_bmap, raw_inode);
  440. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  441. raw_inode->i_device_code =
  442. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  443. /* When extending inode, nilfs->ns_inode_size should be checked
  444. for substitutions of appended fields */
  445. }
  446. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
  447. {
  448. ino_t ino = inode->i_ino;
  449. struct nilfs_inode_info *ii = NILFS_I(inode);
  450. struct super_block *sb = inode->i_sb;
  451. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  452. struct nilfs_inode *raw_inode;
  453. raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
  454. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  455. memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
  456. set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
  457. nilfs_write_inode_common(inode, raw_inode, 0);
  458. /* XXX: call with has_bmap = 0 is a workaround to avoid
  459. deadlock of bmap. This delays update of i_bmap to just
  460. before writing */
  461. nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
  462. }
  463. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  464. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  465. unsigned long from)
  466. {
  467. unsigned long b;
  468. int ret;
  469. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  470. return;
  471. repeat:
  472. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  473. if (ret == -ENOENT)
  474. return;
  475. else if (ret < 0)
  476. goto failed;
  477. if (b < from)
  478. return;
  479. b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  480. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  481. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  482. if (!ret || (ret == -ENOMEM &&
  483. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  484. goto repeat;
  485. failed:
  486. if (ret == -EINVAL)
  487. nilfs_error(ii->vfs_inode.i_sb, __func__,
  488. "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino);
  489. else
  490. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  491. "failed to truncate bmap (ino=%lu, err=%d)",
  492. ii->vfs_inode.i_ino, ret);
  493. }
  494. void nilfs_truncate(struct inode *inode)
  495. {
  496. unsigned long blkoff;
  497. unsigned int blocksize;
  498. struct nilfs_transaction_info ti;
  499. struct super_block *sb = inode->i_sb;
  500. struct nilfs_inode_info *ii = NILFS_I(inode);
  501. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  502. return;
  503. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  504. return;
  505. blocksize = sb->s_blocksize;
  506. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  507. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  508. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  509. nilfs_truncate_bmap(ii, blkoff);
  510. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  511. if (IS_SYNC(inode))
  512. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  513. nilfs_mark_inode_dirty(inode);
  514. nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
  515. nilfs_transaction_commit(sb);
  516. /* May construct a logical segment and may fail in sync mode.
  517. But truncate has no return value. */
  518. }
  519. void nilfs_delete_inode(struct inode *inode)
  520. {
  521. struct nilfs_transaction_info ti;
  522. struct super_block *sb = inode->i_sb;
  523. struct nilfs_inode_info *ii = NILFS_I(inode);
  524. if (unlikely(is_bad_inode(inode))) {
  525. if (inode->i_data.nrpages)
  526. truncate_inode_pages(&inode->i_data, 0);
  527. clear_inode(inode);
  528. return;
  529. }
  530. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  531. if (inode->i_data.nrpages)
  532. truncate_inode_pages(&inode->i_data, 0);
  533. nilfs_truncate_bmap(ii, 0);
  534. nilfs_mark_inode_dirty(inode);
  535. nilfs_free_inode(inode);
  536. /* nilfs_free_inode() marks inode buffer dirty */
  537. if (IS_SYNC(inode))
  538. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  539. nilfs_transaction_commit(sb);
  540. /* May construct a logical segment and may fail in sync mode.
  541. But delete_inode has no return value. */
  542. }
  543. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  544. {
  545. struct nilfs_transaction_info ti;
  546. struct inode *inode = dentry->d_inode;
  547. struct super_block *sb = inode->i_sb;
  548. int err;
  549. err = inode_change_ok(inode, iattr);
  550. if (err)
  551. return err;
  552. err = nilfs_transaction_begin(sb, &ti, 0);
  553. if (unlikely(err))
  554. return err;
  555. err = inode_setattr(inode, iattr);
  556. if (!err && (iattr->ia_valid & ATTR_MODE))
  557. err = nilfs_acl_chmod(inode);
  558. if (likely(!err))
  559. err = nilfs_transaction_commit(sb);
  560. else
  561. nilfs_transaction_abort(sb);
  562. return err;
  563. }
  564. int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
  565. struct buffer_head **pbh)
  566. {
  567. struct nilfs_inode_info *ii = NILFS_I(inode);
  568. int err;
  569. spin_lock(&sbi->s_inode_lock);
  570. if (ii->i_bh == NULL) {
  571. spin_unlock(&sbi->s_inode_lock);
  572. err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
  573. pbh);
  574. if (unlikely(err))
  575. return err;
  576. spin_lock(&sbi->s_inode_lock);
  577. if (ii->i_bh == NULL)
  578. ii->i_bh = *pbh;
  579. else {
  580. brelse(*pbh);
  581. *pbh = ii->i_bh;
  582. }
  583. } else
  584. *pbh = ii->i_bh;
  585. get_bh(*pbh);
  586. spin_unlock(&sbi->s_inode_lock);
  587. return 0;
  588. }
  589. int nilfs_inode_dirty(struct inode *inode)
  590. {
  591. struct nilfs_inode_info *ii = NILFS_I(inode);
  592. struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
  593. int ret = 0;
  594. if (!list_empty(&ii->i_dirty)) {
  595. spin_lock(&sbi->s_inode_lock);
  596. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  597. test_bit(NILFS_I_BUSY, &ii->i_state);
  598. spin_unlock(&sbi->s_inode_lock);
  599. }
  600. return ret;
  601. }
  602. int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
  603. unsigned nr_dirty)
  604. {
  605. struct nilfs_inode_info *ii = NILFS_I(inode);
  606. atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
  607. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  608. return 0;
  609. spin_lock(&sbi->s_inode_lock);
  610. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  611. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  612. /* Because this routine may race with nilfs_dispose_list(),
  613. we have to check NILFS_I_QUEUED here, too. */
  614. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  615. /* This will happen when somebody is freeing
  616. this inode. */
  617. nilfs_warning(sbi->s_super, __func__,
  618. "cannot get inode (ino=%lu)\n",
  619. inode->i_ino);
  620. spin_unlock(&sbi->s_inode_lock);
  621. return -EINVAL; /* NILFS_I_DIRTY may remain for
  622. freeing inode */
  623. }
  624. list_del(&ii->i_dirty);
  625. list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
  626. set_bit(NILFS_I_QUEUED, &ii->i_state);
  627. }
  628. spin_unlock(&sbi->s_inode_lock);
  629. return 0;
  630. }
  631. int nilfs_mark_inode_dirty(struct inode *inode)
  632. {
  633. struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
  634. struct buffer_head *ibh;
  635. int err;
  636. err = nilfs_load_inode_block(sbi, inode, &ibh);
  637. if (unlikely(err)) {
  638. nilfs_warning(inode->i_sb, __func__,
  639. "failed to reget inode block.\n");
  640. return err;
  641. }
  642. nilfs_update_inode(inode, ibh);
  643. nilfs_mdt_mark_buffer_dirty(ibh);
  644. nilfs_mdt_mark_dirty(sbi->s_ifile);
  645. brelse(ibh);
  646. return 0;
  647. }
  648. /**
  649. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  650. * @inode: inode of the file to be registered.
  651. *
  652. * nilfs_dirty_inode() loads a inode block containing the specified
  653. * @inode and copies data from a nilfs_inode to a corresponding inode
  654. * entry in the inode block. This operation is excluded from the segment
  655. * construction. This function can be called both as a single operation
  656. * and as a part of indivisible file operations.
  657. */
  658. void nilfs_dirty_inode(struct inode *inode)
  659. {
  660. struct nilfs_transaction_info ti;
  661. if (is_bad_inode(inode)) {
  662. nilfs_warning(inode->i_sb, __func__,
  663. "tried to mark bad_inode dirty. ignored.\n");
  664. dump_stack();
  665. return;
  666. }
  667. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  668. nilfs_mark_inode_dirty(inode);
  669. nilfs_transaction_commit(inode->i_sb); /* never fails */
  670. }