truncate.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. /*
  2. * linux/fs/ufs/truncate.c
  3. *
  4. * Copyright (C) 1998
  5. * Daniel Pirkl <daniel.pirkl@email.cz>
  6. * Charles University, Faculty of Mathematics and Physics
  7. *
  8. * from
  9. *
  10. * linux/fs/ext2/truncate.c
  11. *
  12. * Copyright (C) 1992, 1993, 1994, 1995
  13. * Remy Card (card@masi.ibp.fr)
  14. * Laboratoire MASI - Institut Blaise Pascal
  15. * Universite Pierre et Marie Curie (Paris VI)
  16. *
  17. * from
  18. *
  19. * linux/fs/minix/truncate.c
  20. *
  21. * Copyright (C) 1991, 1992 Linus Torvalds
  22. *
  23. * Big-endian to little-endian byte-swapping/bitmaps by
  24. * David S. Miller (davem@caip.rutgers.edu), 1995
  25. */
  26. /*
  27. * Real random numbers for secure rm added 94/02/18
  28. * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
  29. */
  30. /*
  31. * Modified to avoid infinite loop on 2006 by
  32. * Evgeniy Dushistov <dushistov@mail.ru>
  33. */
  34. #include <linux/errno.h>
  35. #include <linux/fs.h>
  36. #include <linux/ufs_fs.h>
  37. #include <linux/fcntl.h>
  38. #include <linux/time.h>
  39. #include <linux/stat.h>
  40. #include <linux/string.h>
  41. #include <linux/smp_lock.h>
  42. #include <linux/buffer_head.h>
  43. #include <linux/blkdev.h>
  44. #include <linux/sched.h>
  45. #include "swab.h"
  46. #include "util.h"
  47. /*
  48. * Secure deletion currently doesn't work. It interacts very badly
  49. * with buffers shared with memory mappings, and for that reason
  50. * can't be done in the truncate() routines. It should instead be
  51. * done separately in "release()" before calling the truncate routines
  52. * that will release the actual file blocks.
  53. *
  54. * Linus
  55. */
  56. #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
  57. #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
  58. static int ufs_trunc_direct (struct inode * inode)
  59. {
  60. struct ufs_inode_info *ufsi = UFS_I(inode);
  61. struct super_block * sb;
  62. struct ufs_sb_private_info * uspi;
  63. __fs32 * p;
  64. unsigned frag1, frag2, frag3, frag4, block1, block2;
  65. unsigned frag_to_free, free_count;
  66. unsigned i, tmp;
  67. int retry;
  68. UFSD("ENTER\n");
  69. sb = inode->i_sb;
  70. uspi = UFS_SB(sb)->s_uspi;
  71. frag_to_free = 0;
  72. free_count = 0;
  73. retry = 0;
  74. frag1 = DIRECT_FRAGMENT;
  75. frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
  76. frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
  77. frag3 = frag4 & ~uspi->s_fpbmask;
  78. block1 = block2 = 0;
  79. if (frag2 > frag3) {
  80. frag2 = frag4;
  81. frag3 = frag4 = 0;
  82. }
  83. else if (frag2 < frag3) {
  84. block1 = ufs_fragstoblks (frag2);
  85. block2 = ufs_fragstoblks (frag3);
  86. }
  87. UFSD("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4);
  88. if (frag1 >= frag2)
  89. goto next1;
  90. /*
  91. * Free first free fragments
  92. */
  93. p = ufsi->i_u1.i_data + ufs_fragstoblks (frag1);
  94. tmp = fs32_to_cpu(sb, *p);
  95. if (!tmp )
  96. ufs_panic (sb, "ufs_trunc_direct", "internal error");
  97. frag1 = ufs_fragnum (frag1);
  98. frag2 = ufs_fragnum (frag2);
  99. ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
  100. mark_inode_dirty(inode);
  101. frag_to_free = tmp + frag1;
  102. next1:
  103. /*
  104. * Free whole blocks
  105. */
  106. for (i = block1 ; i < block2; i++) {
  107. p = ufsi->i_u1.i_data + i;
  108. tmp = fs32_to_cpu(sb, *p);
  109. if (!tmp)
  110. continue;
  111. *p = 0;
  112. if (free_count == 0) {
  113. frag_to_free = tmp;
  114. free_count = uspi->s_fpb;
  115. } else if (free_count > 0 && frag_to_free == tmp - free_count)
  116. free_count += uspi->s_fpb;
  117. else {
  118. ufs_free_blocks (inode, frag_to_free, free_count);
  119. frag_to_free = tmp;
  120. free_count = uspi->s_fpb;
  121. }
  122. mark_inode_dirty(inode);
  123. }
  124. if (free_count > 0)
  125. ufs_free_blocks (inode, frag_to_free, free_count);
  126. if (frag3 >= frag4)
  127. goto next3;
  128. /*
  129. * Free last free fragments
  130. */
  131. p = ufsi->i_u1.i_data + ufs_fragstoblks (frag3);
  132. tmp = fs32_to_cpu(sb, *p);
  133. if (!tmp )
  134. ufs_panic(sb, "ufs_truncate_direct", "internal error");
  135. frag4 = ufs_fragnum (frag4);
  136. *p = 0;
  137. ufs_free_fragments (inode, tmp, frag4);
  138. mark_inode_dirty(inode);
  139. next3:
  140. UFSD("EXIT\n");
  141. return retry;
  142. }
  143. static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
  144. {
  145. struct super_block * sb;
  146. struct ufs_sb_private_info * uspi;
  147. struct ufs_buffer_head * ind_ubh;
  148. __fs32 * ind;
  149. unsigned indirect_block, i, tmp;
  150. unsigned frag_to_free, free_count;
  151. int retry;
  152. UFSD("ENTER\n");
  153. sb = inode->i_sb;
  154. uspi = UFS_SB(sb)->s_uspi;
  155. frag_to_free = 0;
  156. free_count = 0;
  157. retry = 0;
  158. tmp = fs32_to_cpu(sb, *p);
  159. if (!tmp)
  160. return 0;
  161. ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
  162. if (tmp != fs32_to_cpu(sb, *p)) {
  163. ubh_brelse (ind_ubh);
  164. return 1;
  165. }
  166. if (!ind_ubh) {
  167. *p = 0;
  168. return 0;
  169. }
  170. indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
  171. for (i = indirect_block; i < uspi->s_apb; i++) {
  172. ind = ubh_get_addr32 (ind_ubh, i);
  173. tmp = fs32_to_cpu(sb, *ind);
  174. if (!tmp)
  175. continue;
  176. *ind = 0;
  177. ubh_mark_buffer_dirty(ind_ubh);
  178. if (free_count == 0) {
  179. frag_to_free = tmp;
  180. free_count = uspi->s_fpb;
  181. } else if (free_count > 0 && frag_to_free == tmp - free_count)
  182. free_count += uspi->s_fpb;
  183. else {
  184. ufs_free_blocks (inode, frag_to_free, free_count);
  185. frag_to_free = tmp;
  186. free_count = uspi->s_fpb;
  187. }
  188. mark_inode_dirty(inode);
  189. }
  190. if (free_count > 0) {
  191. ufs_free_blocks (inode, frag_to_free, free_count);
  192. }
  193. for (i = 0; i < uspi->s_apb; i++)
  194. if (*ubh_get_addr32(ind_ubh,i))
  195. break;
  196. if (i >= uspi->s_apb) {
  197. tmp = fs32_to_cpu(sb, *p);
  198. *p = 0;
  199. ufs_free_blocks (inode, tmp, uspi->s_fpb);
  200. mark_inode_dirty(inode);
  201. ubh_bforget(ind_ubh);
  202. ind_ubh = NULL;
  203. }
  204. if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
  205. ubh_ll_rw_block(SWRITE, ind_ubh);
  206. ubh_wait_on_buffer (ind_ubh);
  207. }
  208. ubh_brelse (ind_ubh);
  209. UFSD("EXIT\n");
  210. return retry;
  211. }
  212. static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
  213. {
  214. struct super_block * sb;
  215. struct ufs_sb_private_info * uspi;
  216. struct ufs_buffer_head * dind_bh;
  217. unsigned i, tmp, dindirect_block;
  218. __fs32 * dind;
  219. int retry = 0;
  220. UFSD("ENTER\n");
  221. sb = inode->i_sb;
  222. uspi = UFS_SB(sb)->s_uspi;
  223. dindirect_block = (DIRECT_BLOCK > offset)
  224. ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
  225. retry = 0;
  226. tmp = fs32_to_cpu(sb, *p);
  227. if (!tmp)
  228. return 0;
  229. dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
  230. if (tmp != fs32_to_cpu(sb, *p)) {
  231. ubh_brelse (dind_bh);
  232. return 1;
  233. }
  234. if (!dind_bh) {
  235. *p = 0;
  236. return 0;
  237. }
  238. for (i = dindirect_block ; i < uspi->s_apb ; i++) {
  239. dind = ubh_get_addr32 (dind_bh, i);
  240. tmp = fs32_to_cpu(sb, *dind);
  241. if (!tmp)
  242. continue;
  243. retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
  244. ubh_mark_buffer_dirty(dind_bh);
  245. }
  246. for (i = 0; i < uspi->s_apb; i++)
  247. if (*ubh_get_addr32 (dind_bh, i))
  248. break;
  249. if (i >= uspi->s_apb) {
  250. tmp = fs32_to_cpu(sb, *p);
  251. *p = 0;
  252. ufs_free_blocks(inode, tmp, uspi->s_fpb);
  253. mark_inode_dirty(inode);
  254. ubh_bforget(dind_bh);
  255. dind_bh = NULL;
  256. }
  257. if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
  258. ubh_ll_rw_block(SWRITE, dind_bh);
  259. ubh_wait_on_buffer (dind_bh);
  260. }
  261. ubh_brelse (dind_bh);
  262. UFSD("EXIT\n");
  263. return retry;
  264. }
  265. static int ufs_trunc_tindirect (struct inode * inode)
  266. {
  267. struct ufs_inode_info *ufsi = UFS_I(inode);
  268. struct super_block * sb;
  269. struct ufs_sb_private_info * uspi;
  270. struct ufs_buffer_head * tind_bh;
  271. unsigned tindirect_block, tmp, i;
  272. __fs32 * tind, * p;
  273. int retry;
  274. UFSD("ENTER\n");
  275. sb = inode->i_sb;
  276. uspi = UFS_SB(sb)->s_uspi;
  277. retry = 0;
  278. tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
  279. ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
  280. p = ufsi->i_u1.i_data + UFS_TIND_BLOCK;
  281. if (!(tmp = fs32_to_cpu(sb, *p)))
  282. return 0;
  283. tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
  284. if (tmp != fs32_to_cpu(sb, *p)) {
  285. ubh_brelse (tind_bh);
  286. return 1;
  287. }
  288. if (!tind_bh) {
  289. *p = 0;
  290. return 0;
  291. }
  292. for (i = tindirect_block ; i < uspi->s_apb ; i++) {
  293. tind = ubh_get_addr32 (tind_bh, i);
  294. retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
  295. uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
  296. ubh_mark_buffer_dirty(tind_bh);
  297. }
  298. for (i = 0; i < uspi->s_apb; i++)
  299. if (*ubh_get_addr32 (tind_bh, i))
  300. break;
  301. if (i >= uspi->s_apb) {
  302. tmp = fs32_to_cpu(sb, *p);
  303. *p = 0;
  304. ufs_free_blocks(inode, tmp, uspi->s_fpb);
  305. mark_inode_dirty(inode);
  306. ubh_bforget(tind_bh);
  307. tind_bh = NULL;
  308. }
  309. if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
  310. ubh_ll_rw_block(SWRITE, tind_bh);
  311. ubh_wait_on_buffer (tind_bh);
  312. }
  313. ubh_brelse (tind_bh);
  314. UFSD("EXIT\n");
  315. return retry;
  316. }
  317. static int ufs_alloc_lastblock(struct inode *inode)
  318. {
  319. int err = 0;
  320. struct address_space *mapping = inode->i_mapping;
  321. struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  322. unsigned lastfrag, i, end;
  323. struct page *lastpage;
  324. struct buffer_head *bh;
  325. lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
  326. if (!lastfrag)
  327. goto out;
  328. lastfrag--;
  329. lastpage = ufs_get_locked_page(mapping, lastfrag >>
  330. (PAGE_CACHE_SHIFT - inode->i_blkbits));
  331. if (IS_ERR(lastpage)) {
  332. err = -EIO;
  333. goto out;
  334. }
  335. end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
  336. bh = page_buffers(lastpage);
  337. for (i = 0; i < end; ++i)
  338. bh = bh->b_this_page;
  339. err = ufs_getfrag_block(inode, lastfrag, bh, 1);
  340. if (unlikely(err))
  341. goto out_unlock;
  342. if (buffer_new(bh)) {
  343. clear_buffer_new(bh);
  344. unmap_underlying_metadata(bh->b_bdev,
  345. bh->b_blocknr);
  346. /*
  347. * we do not zeroize fragment, because of
  348. * if it maped to hole, it already contains zeroes
  349. */
  350. set_buffer_uptodate(bh);
  351. mark_buffer_dirty(bh);
  352. set_page_dirty(lastpage);
  353. }
  354. out_unlock:
  355. ufs_put_locked_page(lastpage);
  356. out:
  357. return err;
  358. }
  359. int ufs_truncate(struct inode *inode, loff_t old_i_size)
  360. {
  361. struct ufs_inode_info *ufsi = UFS_I(inode);
  362. struct super_block *sb = inode->i_sb;
  363. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  364. int retry, err = 0;
  365. UFSD("ENTER\n");
  366. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  367. S_ISLNK(inode->i_mode)))
  368. return -EINVAL;
  369. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  370. return -EPERM;
  371. err = ufs_alloc_lastblock(inode);
  372. if (err) {
  373. i_size_write(inode, old_i_size);
  374. goto out;
  375. }
  376. block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
  377. lock_kernel();
  378. while (1) {
  379. retry = ufs_trunc_direct(inode);
  380. retry |= ufs_trunc_indirect (inode, UFS_IND_BLOCK,
  381. (__fs32 *) &ufsi->i_u1.i_data[UFS_IND_BLOCK]);
  382. retry |= ufs_trunc_dindirect (inode, UFS_IND_BLOCK + uspi->s_apb,
  383. (__fs32 *) &ufsi->i_u1.i_data[UFS_DIND_BLOCK]);
  384. retry |= ufs_trunc_tindirect (inode);
  385. if (!retry)
  386. break;
  387. if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
  388. ufs_sync_inode (inode);
  389. blk_run_address_space(inode->i_mapping);
  390. yield();
  391. }
  392. inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
  393. ufsi->i_lastfrag = DIRECT_FRAGMENT;
  394. unlock_kernel();
  395. mark_inode_dirty(inode);
  396. out:
  397. UFSD("EXIT: err %d\n", err);
  398. return err;
  399. }
  400. /*
  401. * We don't define our `inode->i_op->truncate', and call it here,
  402. * because of:
  403. * - there is no way to know old size
  404. * - there is no way inform user about error, if it happens in `truncate'
  405. */
  406. static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
  407. {
  408. struct inode *inode = dentry->d_inode;
  409. unsigned int ia_valid = attr->ia_valid;
  410. int error;
  411. error = inode_change_ok(inode, attr);
  412. if (error)
  413. return error;
  414. if (ia_valid & ATTR_SIZE &&
  415. attr->ia_size != i_size_read(inode)) {
  416. loff_t old_i_size = inode->i_size;
  417. error = vmtruncate(inode, attr->ia_size);
  418. if (error)
  419. return error;
  420. error = ufs_truncate(inode, old_i_size);
  421. if (error)
  422. return error;
  423. }
  424. return inode_setattr(inode, attr);
  425. }
  426. struct inode_operations ufs_file_inode_operations = {
  427. .setattr = ufs_setattr,
  428. };