inode.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/buffer_head.h>
  24. #include <linux/gfp.h>
  25. #include <linux/mpage.h>
  26. #include <linux/writeback.h>
  27. #include <linux/uio.h>
  28. #include "nilfs.h"
  29. #include "btnode.h"
  30. #include "segment.h"
  31. #include "page.h"
  32. #include "mdt.h"
  33. #include "cpfile.h"
  34. #include "ifile.h"
  35. /**
  36. * struct nilfs_iget_args - arguments used during comparison between inodes
  37. * @ino: inode number
  38. * @cno: checkpoint number
  39. * @root: pointer on NILFS root object (mounted checkpoint)
  40. * @for_gc: inode for GC flag
  41. */
  42. struct nilfs_iget_args {
  43. u64 ino;
  44. __u64 cno;
  45. struct nilfs_root *root;
  46. int for_gc;
  47. };
  48. void nilfs_inode_add_blocks(struct inode *inode, int n)
  49. {
  50. struct nilfs_root *root = NILFS_I(inode)->i_root;
  51. inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  52. if (root)
  53. atomic_add(n, &root->blocks_count);
  54. }
  55. void nilfs_inode_sub_blocks(struct inode *inode, int n)
  56. {
  57. struct nilfs_root *root = NILFS_I(inode)->i_root;
  58. inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  59. if (root)
  60. atomic_sub(n, &root->blocks_count);
  61. }
  62. /**
  63. * nilfs_get_block() - get a file block on the filesystem (callback function)
  64. * @inode - inode struct of the target file
  65. * @blkoff - file block number
  66. * @bh_result - buffer head to be mapped on
  67. * @create - indicate whether allocating the block or not when it has not
  68. * been allocated yet.
  69. *
  70. * This function does not issue actual read request of the specified data
  71. * block. It is done by VFS.
  72. */
  73. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  74. struct buffer_head *bh_result, int create)
  75. {
  76. struct nilfs_inode_info *ii = NILFS_I(inode);
  77. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  78. __u64 blknum = 0;
  79. int err = 0, ret;
  80. unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  81. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  82. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  83. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  84. if (ret >= 0) { /* found */
  85. map_bh(bh_result, inode->i_sb, blknum);
  86. if (ret > 0)
  87. bh_result->b_size = (ret << inode->i_blkbits);
  88. goto out;
  89. }
  90. /* data block was not found */
  91. if (ret == -ENOENT && create) {
  92. struct nilfs_transaction_info ti;
  93. bh_result->b_blocknr = 0;
  94. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95. if (unlikely(err))
  96. goto out;
  97. err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
  98. (unsigned long)bh_result);
  99. if (unlikely(err != 0)) {
  100. if (err == -EEXIST) {
  101. /*
  102. * The get_block() function could be called
  103. * from multiple callers for an inode.
  104. * However, the page having this block must
  105. * be locked in this case.
  106. */
  107. printk(KERN_WARNING
  108. "nilfs_get_block: a race condition "
  109. "while inserting a data block. "
  110. "(inode number=%lu, file block "
  111. "offset=%llu)\n",
  112. inode->i_ino,
  113. (unsigned long long)blkoff);
  114. err = 0;
  115. }
  116. nilfs_transaction_abort(inode->i_sb);
  117. goto out;
  118. }
  119. nilfs_mark_inode_dirty(inode);
  120. nilfs_transaction_commit(inode->i_sb); /* never fails */
  121. /* Error handling should be detailed */
  122. set_buffer_new(bh_result);
  123. set_buffer_delay(bh_result);
  124. map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
  125. to proper value */
  126. } else if (ret == -ENOENT) {
  127. /* not found is not error (e.g. hole); must return without
  128. the mapped state flag. */
  129. ;
  130. } else {
  131. err = ret;
  132. }
  133. out:
  134. return err;
  135. }
  136. /**
  137. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  138. * address_space_operations.
  139. * @file - file struct of the file to be read
  140. * @page - the page to be read
  141. */
  142. static int nilfs_readpage(struct file *file, struct page *page)
  143. {
  144. return mpage_readpage(page, nilfs_get_block);
  145. }
  146. /**
  147. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  148. * address_space_operations.
  149. * @file - file struct of the file to be read
  150. * @mapping - address_space struct used for reading multiple pages
  151. * @pages - the pages to be read
  152. * @nr_pages - number of pages to be read
  153. */
  154. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  155. struct list_head *pages, unsigned nr_pages)
  156. {
  157. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  158. }
  159. static int nilfs_writepages(struct address_space *mapping,
  160. struct writeback_control *wbc)
  161. {
  162. struct inode *inode = mapping->host;
  163. int err = 0;
  164. if (wbc->sync_mode == WB_SYNC_ALL)
  165. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  166. wbc->range_start,
  167. wbc->range_end);
  168. return err;
  169. }
  170. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  171. {
  172. struct inode *inode = page->mapping->host;
  173. int err;
  174. redirty_page_for_writepage(wbc, page);
  175. unlock_page(page);
  176. if (wbc->sync_mode == WB_SYNC_ALL) {
  177. err = nilfs_construct_segment(inode->i_sb);
  178. if (unlikely(err))
  179. return err;
  180. } else if (wbc->for_reclaim)
  181. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  182. return 0;
  183. }
  184. static int nilfs_set_page_dirty(struct page *page)
  185. {
  186. int ret = __set_page_dirty_buffers(page);
  187. if (ret) {
  188. struct inode *inode = page->mapping->host;
  189. unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
  190. nilfs_set_file_dirty(inode, nr_dirty);
  191. }
  192. return ret;
  193. }
  194. void nilfs_write_failed(struct address_space *mapping, loff_t to)
  195. {
  196. struct inode *inode = mapping->host;
  197. if (to > inode->i_size) {
  198. truncate_pagecache(inode, to, inode->i_size);
  199. nilfs_truncate(inode);
  200. }
  201. }
  202. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  203. loff_t pos, unsigned len, unsigned flags,
  204. struct page **pagep, void **fsdata)
  205. {
  206. struct inode *inode = mapping->host;
  207. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  208. if (unlikely(err))
  209. return err;
  210. err = block_write_begin(mapping, pos, len, flags, pagep,
  211. nilfs_get_block);
  212. if (unlikely(err)) {
  213. nilfs_write_failed(mapping, pos + len);
  214. nilfs_transaction_abort(inode->i_sb);
  215. }
  216. return err;
  217. }
  218. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  219. loff_t pos, unsigned len, unsigned copied,
  220. struct page *page, void *fsdata)
  221. {
  222. struct inode *inode = mapping->host;
  223. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  224. unsigned nr_dirty;
  225. int err;
  226. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  227. start + copied);
  228. copied = generic_write_end(file, mapping, pos, len, copied, page,
  229. fsdata);
  230. nilfs_set_file_dirty(inode, nr_dirty);
  231. err = nilfs_transaction_commit(inode->i_sb);
  232. return err ? : copied;
  233. }
  234. static ssize_t
  235. nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  236. loff_t offset, unsigned long nr_segs)
  237. {
  238. struct file *file = iocb->ki_filp;
  239. struct address_space *mapping = file->f_mapping;
  240. struct inode *inode = file->f_mapping->host;
  241. ssize_t size;
  242. if (rw == WRITE)
  243. return 0;
  244. /* Needs synchronization with the cleaner */
  245. size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
  246. nilfs_get_block);
  247. /*
  248. * In case of error extending write may have instantiated a few
  249. * blocks outside i_size. Trim these off again.
  250. */
  251. if (unlikely((rw & WRITE) && size < 0)) {
  252. loff_t isize = i_size_read(inode);
  253. loff_t end = offset + iov_length(iov, nr_segs);
  254. if (end > isize)
  255. nilfs_write_failed(mapping, end);
  256. }
  257. return size;
  258. }
  259. const struct address_space_operations nilfs_aops = {
  260. .writepage = nilfs_writepage,
  261. .readpage = nilfs_readpage,
  262. .writepages = nilfs_writepages,
  263. .set_page_dirty = nilfs_set_page_dirty,
  264. .readpages = nilfs_readpages,
  265. .write_begin = nilfs_write_begin,
  266. .write_end = nilfs_write_end,
  267. /* .releasepage = nilfs_releasepage, */
  268. .invalidatepage = block_invalidatepage,
  269. .direct_IO = nilfs_direct_IO,
  270. .is_partially_uptodate = block_is_partially_uptodate,
  271. };
  272. struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
  273. {
  274. struct super_block *sb = dir->i_sb;
  275. struct the_nilfs *nilfs = sb->s_fs_info;
  276. struct inode *inode;
  277. struct nilfs_inode_info *ii;
  278. struct nilfs_root *root;
  279. int err = -ENOMEM;
  280. ino_t ino;
  281. inode = new_inode(sb);
  282. if (unlikely(!inode))
  283. goto failed;
  284. mapping_set_gfp_mask(inode->i_mapping,
  285. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  286. root = NILFS_I(dir)->i_root;
  287. ii = NILFS_I(inode);
  288. ii->i_state = 1 << NILFS_I_NEW;
  289. ii->i_root = root;
  290. err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
  291. if (unlikely(err))
  292. goto failed_ifile_create_inode;
  293. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  294. atomic_inc(&root->inodes_count);
  295. inode_init_owner(inode, dir, mode);
  296. inode->i_ino = ino;
  297. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  298. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  299. err = nilfs_bmap_read(ii->i_bmap, NULL);
  300. if (err < 0)
  301. goto failed_bmap;
  302. set_bit(NILFS_I_BMAP, &ii->i_state);
  303. /* No lock is needed; iget() ensures it. */
  304. }
  305. ii->i_flags = nilfs_mask_flags(
  306. mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
  307. /* ii->i_file_acl = 0; */
  308. /* ii->i_dir_acl = 0; */
  309. ii->i_dir_start_lookup = 0;
  310. nilfs_set_inode_flags(inode);
  311. spin_lock(&nilfs->ns_next_gen_lock);
  312. inode->i_generation = nilfs->ns_next_generation++;
  313. spin_unlock(&nilfs->ns_next_gen_lock);
  314. insert_inode_hash(inode);
  315. err = nilfs_init_acl(inode, dir);
  316. if (unlikely(err))
  317. goto failed_acl; /* never occur. When supporting
  318. nilfs_init_acl(), proper cancellation of
  319. above jobs should be considered */
  320. return inode;
  321. failed_acl:
  322. failed_bmap:
  323. clear_nlink(inode);
  324. iput(inode); /* raw_inode will be deleted through
  325. generic_delete_inode() */
  326. goto failed;
  327. failed_ifile_create_inode:
  328. make_bad_inode(inode);
  329. iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
  330. called */
  331. failed:
  332. return ERR_PTR(err);
  333. }
  334. void nilfs_set_inode_flags(struct inode *inode)
  335. {
  336. unsigned int flags = NILFS_I(inode)->i_flags;
  337. inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
  338. S_DIRSYNC);
  339. if (flags & FS_SYNC_FL)
  340. inode->i_flags |= S_SYNC;
  341. if (flags & FS_APPEND_FL)
  342. inode->i_flags |= S_APPEND;
  343. if (flags & FS_IMMUTABLE_FL)
  344. inode->i_flags |= S_IMMUTABLE;
  345. if (flags & FS_NOATIME_FL)
  346. inode->i_flags |= S_NOATIME;
  347. if (flags & FS_DIRSYNC_FL)
  348. inode->i_flags |= S_DIRSYNC;
  349. mapping_set_gfp_mask(inode->i_mapping,
  350. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  351. }
  352. int nilfs_read_inode_common(struct inode *inode,
  353. struct nilfs_inode *raw_inode)
  354. {
  355. struct nilfs_inode_info *ii = NILFS_I(inode);
  356. int err;
  357. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  358. i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
  359. i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
  360. set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
  361. inode->i_size = le64_to_cpu(raw_inode->i_size);
  362. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  363. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  364. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  365. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  366. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  367. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  368. if (inode->i_nlink == 0 && inode->i_mode == 0)
  369. return -EINVAL; /* this inode is deleted */
  370. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  371. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  372. #if 0
  373. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  374. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  375. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  376. #endif
  377. ii->i_dir_start_lookup = 0;
  378. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  379. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  380. S_ISLNK(inode->i_mode)) {
  381. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  382. if (err < 0)
  383. return err;
  384. set_bit(NILFS_I_BMAP, &ii->i_state);
  385. /* No lock is needed; iget() ensures it. */
  386. }
  387. return 0;
  388. }
  389. static int __nilfs_read_inode(struct super_block *sb,
  390. struct nilfs_root *root, unsigned long ino,
  391. struct inode *inode)
  392. {
  393. struct the_nilfs *nilfs = sb->s_fs_info;
  394. struct buffer_head *bh;
  395. struct nilfs_inode *raw_inode;
  396. int err;
  397. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  398. err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
  399. if (unlikely(err))
  400. goto bad_inode;
  401. raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
  402. err = nilfs_read_inode_common(inode, raw_inode);
  403. if (err)
  404. goto failed_unmap;
  405. if (S_ISREG(inode->i_mode)) {
  406. inode->i_op = &nilfs_file_inode_operations;
  407. inode->i_fop = &nilfs_file_operations;
  408. inode->i_mapping->a_ops = &nilfs_aops;
  409. } else if (S_ISDIR(inode->i_mode)) {
  410. inode->i_op = &nilfs_dir_inode_operations;
  411. inode->i_fop = &nilfs_dir_operations;
  412. inode->i_mapping->a_ops = &nilfs_aops;
  413. } else if (S_ISLNK(inode->i_mode)) {
  414. inode->i_op = &nilfs_symlink_inode_operations;
  415. inode->i_mapping->a_ops = &nilfs_aops;
  416. } else {
  417. inode->i_op = &nilfs_special_inode_operations;
  418. init_special_inode(
  419. inode, inode->i_mode,
  420. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  421. }
  422. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  423. brelse(bh);
  424. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  425. nilfs_set_inode_flags(inode);
  426. return 0;
  427. failed_unmap:
  428. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  429. brelse(bh);
  430. bad_inode:
  431. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  432. return err;
  433. }
  434. static int nilfs_iget_test(struct inode *inode, void *opaque)
  435. {
  436. struct nilfs_iget_args *args = opaque;
  437. struct nilfs_inode_info *ii;
  438. if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
  439. return 0;
  440. ii = NILFS_I(inode);
  441. if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
  442. return !args->for_gc;
  443. return args->for_gc && args->cno == ii->i_cno;
  444. }
  445. static int nilfs_iget_set(struct inode *inode, void *opaque)
  446. {
  447. struct nilfs_iget_args *args = opaque;
  448. inode->i_ino = args->ino;
  449. if (args->for_gc) {
  450. NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
  451. NILFS_I(inode)->i_cno = args->cno;
  452. NILFS_I(inode)->i_root = NULL;
  453. } else {
  454. if (args->root && args->ino == NILFS_ROOT_INO)
  455. nilfs_get_root(args->root);
  456. NILFS_I(inode)->i_root = args->root;
  457. }
  458. return 0;
  459. }
  460. struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
  461. unsigned long ino)
  462. {
  463. struct nilfs_iget_args args = {
  464. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  465. };
  466. return ilookup5(sb, ino, nilfs_iget_test, &args);
  467. }
  468. struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
  469. unsigned long ino)
  470. {
  471. struct nilfs_iget_args args = {
  472. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  473. };
  474. return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  475. }
  476. struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
  477. unsigned long ino)
  478. {
  479. struct inode *inode;
  480. int err;
  481. inode = nilfs_iget_locked(sb, root, ino);
  482. if (unlikely(!inode))
  483. return ERR_PTR(-ENOMEM);
  484. if (!(inode->i_state & I_NEW))
  485. return inode;
  486. err = __nilfs_read_inode(sb, root, ino, inode);
  487. if (unlikely(err)) {
  488. iget_failed(inode);
  489. return ERR_PTR(err);
  490. }
  491. unlock_new_inode(inode);
  492. return inode;
  493. }
  494. struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
  495. __u64 cno)
  496. {
  497. struct nilfs_iget_args args = {
  498. .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
  499. };
  500. struct inode *inode;
  501. int err;
  502. inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  503. if (unlikely(!inode))
  504. return ERR_PTR(-ENOMEM);
  505. if (!(inode->i_state & I_NEW))
  506. return inode;
  507. err = nilfs_init_gcinode(inode);
  508. if (unlikely(err)) {
  509. iget_failed(inode);
  510. return ERR_PTR(err);
  511. }
  512. unlock_new_inode(inode);
  513. return inode;
  514. }
  515. void nilfs_write_inode_common(struct inode *inode,
  516. struct nilfs_inode *raw_inode, int has_bmap)
  517. {
  518. struct nilfs_inode_info *ii = NILFS_I(inode);
  519. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  520. raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
  521. raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
  522. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  523. raw_inode->i_size = cpu_to_le64(inode->i_size);
  524. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  525. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  526. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  527. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  528. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  529. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  530. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  531. if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
  532. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  533. /* zero-fill unused portion in the case of super root block */
  534. raw_inode->i_xattr = 0;
  535. raw_inode->i_pad = 0;
  536. memset((void *)raw_inode + sizeof(*raw_inode), 0,
  537. nilfs->ns_inode_size - sizeof(*raw_inode));
  538. }
  539. if (has_bmap)
  540. nilfs_bmap_write(ii->i_bmap, raw_inode);
  541. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  542. raw_inode->i_device_code =
  543. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  544. /* When extending inode, nilfs->ns_inode_size should be checked
  545. for substitutions of appended fields */
  546. }
  547. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
  548. {
  549. ino_t ino = inode->i_ino;
  550. struct nilfs_inode_info *ii = NILFS_I(inode);
  551. struct inode *ifile = ii->i_root->ifile;
  552. struct nilfs_inode *raw_inode;
  553. raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
  554. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  555. memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
  556. set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
  557. nilfs_write_inode_common(inode, raw_inode, 0);
  558. /* XXX: call with has_bmap = 0 is a workaround to avoid
  559. deadlock of bmap. This delays update of i_bmap to just
  560. before writing */
  561. nilfs_ifile_unmap_inode(ifile, ino, ibh);
  562. }
  563. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  564. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  565. unsigned long from)
  566. {
  567. unsigned long b;
  568. int ret;
  569. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  570. return;
  571. repeat:
  572. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  573. if (ret == -ENOENT)
  574. return;
  575. else if (ret < 0)
  576. goto failed;
  577. if (b < from)
  578. return;
  579. b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  580. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  581. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  582. if (!ret || (ret == -ENOMEM &&
  583. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  584. goto repeat;
  585. failed:
  586. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  587. "failed to truncate bmap (ino=%lu, err=%d)",
  588. ii->vfs_inode.i_ino, ret);
  589. }
  590. void nilfs_truncate(struct inode *inode)
  591. {
  592. unsigned long blkoff;
  593. unsigned int blocksize;
  594. struct nilfs_transaction_info ti;
  595. struct super_block *sb = inode->i_sb;
  596. struct nilfs_inode_info *ii = NILFS_I(inode);
  597. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  598. return;
  599. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  600. return;
  601. blocksize = sb->s_blocksize;
  602. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  603. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  604. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  605. nilfs_truncate_bmap(ii, blkoff);
  606. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  607. if (IS_SYNC(inode))
  608. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  609. nilfs_mark_inode_dirty(inode);
  610. nilfs_set_file_dirty(inode, 0);
  611. nilfs_transaction_commit(sb);
  612. /* May construct a logical segment and may fail in sync mode.
  613. But truncate has no return value. */
  614. }
  615. static void nilfs_clear_inode(struct inode *inode)
  616. {
  617. struct nilfs_inode_info *ii = NILFS_I(inode);
  618. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  619. /*
  620. * Free resources allocated in nilfs_read_inode(), here.
  621. */
  622. BUG_ON(!list_empty(&ii->i_dirty));
  623. brelse(ii->i_bh);
  624. ii->i_bh = NULL;
  625. if (mdi && mdi->mi_palloc_cache)
  626. nilfs_palloc_destroy_cache(inode);
  627. if (test_bit(NILFS_I_BMAP, &ii->i_state))
  628. nilfs_bmap_clear(ii->i_bmap);
  629. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  630. if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
  631. nilfs_put_root(ii->i_root);
  632. }
  633. void nilfs_evict_inode(struct inode *inode)
  634. {
  635. struct nilfs_transaction_info ti;
  636. struct super_block *sb = inode->i_sb;
  637. struct nilfs_inode_info *ii = NILFS_I(inode);
  638. int ret;
  639. if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
  640. if (inode->i_data.nrpages)
  641. truncate_inode_pages(&inode->i_data, 0);
  642. clear_inode(inode);
  643. nilfs_clear_inode(inode);
  644. return;
  645. }
  646. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  647. if (inode->i_data.nrpages)
  648. truncate_inode_pages(&inode->i_data, 0);
  649. /* TODO: some of the following operations may fail. */
  650. nilfs_truncate_bmap(ii, 0);
  651. nilfs_mark_inode_dirty(inode);
  652. clear_inode(inode);
  653. ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
  654. if (!ret)
  655. atomic_dec(&ii->i_root->inodes_count);
  656. nilfs_clear_inode(inode);
  657. if (IS_SYNC(inode))
  658. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  659. nilfs_transaction_commit(sb);
  660. /* May construct a logical segment and may fail in sync mode.
  661. But delete_inode has no return value. */
  662. }
  663. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  664. {
  665. struct nilfs_transaction_info ti;
  666. struct inode *inode = dentry->d_inode;
  667. struct super_block *sb = inode->i_sb;
  668. int err;
  669. err = inode_change_ok(inode, iattr);
  670. if (err)
  671. return err;
  672. err = nilfs_transaction_begin(sb, &ti, 0);
  673. if (unlikely(err))
  674. return err;
  675. if ((iattr->ia_valid & ATTR_SIZE) &&
  676. iattr->ia_size != i_size_read(inode)) {
  677. inode_dio_wait(inode);
  678. truncate_setsize(inode, iattr->ia_size);
  679. nilfs_truncate(inode);
  680. }
  681. setattr_copy(inode, iattr);
  682. mark_inode_dirty(inode);
  683. if (iattr->ia_valid & ATTR_MODE) {
  684. err = nilfs_acl_chmod(inode);
  685. if (unlikely(err))
  686. goto out_err;
  687. }
  688. return nilfs_transaction_commit(sb);
  689. out_err:
  690. nilfs_transaction_abort(sb);
  691. return err;
  692. }
  693. int nilfs_permission(struct inode *inode, int mask)
  694. {
  695. struct nilfs_root *root = NILFS_I(inode)->i_root;
  696. if ((mask & MAY_WRITE) && root &&
  697. root->cno != NILFS_CPTREE_CURRENT_CNO)
  698. return -EROFS; /* snapshot is not writable */
  699. return generic_permission(inode, mask);
  700. }
  701. int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
  702. {
  703. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  704. struct nilfs_inode_info *ii = NILFS_I(inode);
  705. int err;
  706. spin_lock(&nilfs->ns_inode_lock);
  707. if (ii->i_bh == NULL) {
  708. spin_unlock(&nilfs->ns_inode_lock);
  709. err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
  710. inode->i_ino, pbh);
  711. if (unlikely(err))
  712. return err;
  713. spin_lock(&nilfs->ns_inode_lock);
  714. if (ii->i_bh == NULL)
  715. ii->i_bh = *pbh;
  716. else {
  717. brelse(*pbh);
  718. *pbh = ii->i_bh;
  719. }
  720. } else
  721. *pbh = ii->i_bh;
  722. get_bh(*pbh);
  723. spin_unlock(&nilfs->ns_inode_lock);
  724. return 0;
  725. }
  726. int nilfs_inode_dirty(struct inode *inode)
  727. {
  728. struct nilfs_inode_info *ii = NILFS_I(inode);
  729. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  730. int ret = 0;
  731. if (!list_empty(&ii->i_dirty)) {
  732. spin_lock(&nilfs->ns_inode_lock);
  733. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  734. test_bit(NILFS_I_BUSY, &ii->i_state);
  735. spin_unlock(&nilfs->ns_inode_lock);
  736. }
  737. return ret;
  738. }
  739. int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
  740. {
  741. struct nilfs_inode_info *ii = NILFS_I(inode);
  742. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  743. atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
  744. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  745. return 0;
  746. spin_lock(&nilfs->ns_inode_lock);
  747. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  748. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  749. /* Because this routine may race with nilfs_dispose_list(),
  750. we have to check NILFS_I_QUEUED here, too. */
  751. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  752. /* This will happen when somebody is freeing
  753. this inode. */
  754. nilfs_warning(inode->i_sb, __func__,
  755. "cannot get inode (ino=%lu)\n",
  756. inode->i_ino);
  757. spin_unlock(&nilfs->ns_inode_lock);
  758. return -EINVAL; /* NILFS_I_DIRTY may remain for
  759. freeing inode */
  760. }
  761. list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
  762. set_bit(NILFS_I_QUEUED, &ii->i_state);
  763. }
  764. spin_unlock(&nilfs->ns_inode_lock);
  765. return 0;
  766. }
  767. int nilfs_mark_inode_dirty(struct inode *inode)
  768. {
  769. struct buffer_head *ibh;
  770. int err;
  771. err = nilfs_load_inode_block(inode, &ibh);
  772. if (unlikely(err)) {
  773. nilfs_warning(inode->i_sb, __func__,
  774. "failed to reget inode block.\n");
  775. return err;
  776. }
  777. nilfs_update_inode(inode, ibh);
  778. mark_buffer_dirty(ibh);
  779. nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
  780. brelse(ibh);
  781. return 0;
  782. }
  783. /**
  784. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  785. * @inode: inode of the file to be registered.
  786. *
  787. * nilfs_dirty_inode() loads a inode block containing the specified
  788. * @inode and copies data from a nilfs_inode to a corresponding inode
  789. * entry in the inode block. This operation is excluded from the segment
  790. * construction. This function can be called both as a single operation
  791. * and as a part of indivisible file operations.
  792. */
  793. void nilfs_dirty_inode(struct inode *inode, int flags)
  794. {
  795. struct nilfs_transaction_info ti;
  796. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  797. if (is_bad_inode(inode)) {
  798. nilfs_warning(inode->i_sb, __func__,
  799. "tried to mark bad_inode dirty. ignored.\n");
  800. dump_stack();
  801. return;
  802. }
  803. if (mdi) {
  804. nilfs_mdt_mark_dirty(inode);
  805. return;
  806. }
  807. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  808. nilfs_mark_inode_dirty(inode);
  809. nilfs_transaction_commit(inode->i_sb); /* never fails */
  810. }
  811. int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  812. __u64 start, __u64 len)
  813. {
  814. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  815. __u64 logical = 0, phys = 0, size = 0;
  816. __u32 flags = 0;
  817. loff_t isize;
  818. sector_t blkoff, end_blkoff;
  819. sector_t delalloc_blkoff;
  820. unsigned long delalloc_blklen;
  821. unsigned int blkbits = inode->i_blkbits;
  822. int ret, n;
  823. ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  824. if (ret)
  825. return ret;
  826. mutex_lock(&inode->i_mutex);
  827. isize = i_size_read(inode);
  828. blkoff = start >> blkbits;
  829. end_blkoff = (start + len - 1) >> blkbits;
  830. delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
  831. &delalloc_blkoff);
  832. do {
  833. __u64 blkphy;
  834. unsigned int maxblocks;
  835. if (delalloc_blklen && blkoff == delalloc_blkoff) {
  836. if (size) {
  837. /* End of the current extent */
  838. ret = fiemap_fill_next_extent(
  839. fieinfo, logical, phys, size, flags);
  840. if (ret)
  841. break;
  842. }
  843. if (blkoff > end_blkoff)
  844. break;
  845. flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
  846. logical = blkoff << blkbits;
  847. phys = 0;
  848. size = delalloc_blklen << blkbits;
  849. blkoff = delalloc_blkoff + delalloc_blklen;
  850. delalloc_blklen = nilfs_find_uncommitted_extent(
  851. inode, blkoff, &delalloc_blkoff);
  852. continue;
  853. }
  854. /*
  855. * Limit the number of blocks that we look up so as
  856. * not to get into the next delayed allocation extent.
  857. */
  858. maxblocks = INT_MAX;
  859. if (delalloc_blklen)
  860. maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
  861. maxblocks);
  862. blkphy = 0;
  863. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  864. n = nilfs_bmap_lookup_contig(
  865. NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
  866. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  867. if (n < 0) {
  868. int past_eof;
  869. if (unlikely(n != -ENOENT))
  870. break; /* error */
  871. /* HOLE */
  872. blkoff++;
  873. past_eof = ((blkoff << blkbits) >= isize);
  874. if (size) {
  875. /* End of the current extent */
  876. if (past_eof)
  877. flags |= FIEMAP_EXTENT_LAST;
  878. ret = fiemap_fill_next_extent(
  879. fieinfo, logical, phys, size, flags);
  880. if (ret)
  881. break;
  882. size = 0;
  883. }
  884. if (blkoff > end_blkoff || past_eof)
  885. break;
  886. } else {
  887. if (size) {
  888. if (phys && blkphy << blkbits == phys + size) {
  889. /* The current extent goes on */
  890. size += n << blkbits;
  891. } else {
  892. /* Terminate the current extent */
  893. ret = fiemap_fill_next_extent(
  894. fieinfo, logical, phys, size,
  895. flags);
  896. if (ret || blkoff > end_blkoff)
  897. break;
  898. /* Start another extent */
  899. flags = FIEMAP_EXTENT_MERGED;
  900. logical = blkoff << blkbits;
  901. phys = blkphy << blkbits;
  902. size = n << blkbits;
  903. }
  904. } else {
  905. /* Start a new extent */
  906. flags = FIEMAP_EXTENT_MERGED;
  907. logical = blkoff << blkbits;
  908. phys = blkphy << blkbits;
  909. size = n << blkbits;
  910. }
  911. blkoff += n;
  912. }
  913. cond_resched();
  914. } while (true);
  915. /* If ret is 1 then we just hit the end of the extent array */
  916. if (ret == 1)
  917. ret = 0;
  918. mutex_unlock(&inode->i_mutex);
  919. return ret;
  920. }