xfs_lrw.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_inum.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_dir2.h"
  27. #include "xfs_alloc.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_quota.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_dinode.h"
  37. #include "xfs_inode.h"
  38. #include "xfs_bmap.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_ialloc.h"
  41. #include "xfs_rtalloc.h"
  42. #include "xfs_error.h"
  43. #include "xfs_itable.h"
  44. #include "xfs_rw.h"
  45. #include "xfs_attr.h"
  46. #include "xfs_inode_item.h"
  47. #include "xfs_buf_item.h"
  48. #include "xfs_utils.h"
  49. #include "xfs_iomap.h"
  50. #include "xfs_vnodeops.h"
  51. #include "xfs_trace.h"
  52. #include <linux/capability.h>
  53. #include <linux/writeback.h>
  54. /*
  55. * xfs_iozero
  56. *
  57. * xfs_iozero clears the specified range of buffer supplied,
  58. * and marks all the affected blocks as valid and modified. If
  59. * an affected block is not allocated, it will be allocated. If
  60. * an affected block is not completely overwritten, and is not
  61. * valid before the operation, it will be read from disk before
  62. * being partially zeroed.
  63. */
  64. STATIC int
  65. xfs_iozero(
  66. struct xfs_inode *ip, /* inode */
  67. loff_t pos, /* offset in file */
  68. size_t count) /* size of data to zero */
  69. {
  70. struct page *page;
  71. struct address_space *mapping;
  72. int status;
  73. mapping = VFS_I(ip)->i_mapping;
  74. do {
  75. unsigned offset, bytes;
  76. void *fsdata;
  77. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  78. bytes = PAGE_CACHE_SIZE - offset;
  79. if (bytes > count)
  80. bytes = count;
  81. status = pagecache_write_begin(NULL, mapping, pos, bytes,
  82. AOP_FLAG_UNINTERRUPTIBLE,
  83. &page, &fsdata);
  84. if (status)
  85. break;
  86. zero_user(page, offset, bytes);
  87. status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
  88. page, fsdata);
  89. WARN_ON(status <= 0); /* can't return less than zero! */
  90. pos += bytes;
  91. count -= bytes;
  92. status = 0;
  93. } while (count);
  94. return (-status);
  95. }
  96. ssize_t /* bytes read, or (-) error */
  97. xfs_read(
  98. xfs_inode_t *ip,
  99. struct kiocb *iocb,
  100. const struct iovec *iovp,
  101. unsigned int segs,
  102. loff_t *offset,
  103. int ioflags)
  104. {
  105. struct file *file = iocb->ki_filp;
  106. struct inode *inode = file->f_mapping->host;
  107. xfs_mount_t *mp = ip->i_mount;
  108. size_t size = 0;
  109. ssize_t ret = 0;
  110. xfs_fsize_t n;
  111. unsigned long seg;
  112. XFS_STATS_INC(xs_read_calls);
  113. /* START copy & waste from filemap.c */
  114. for (seg = 0; seg < segs; seg++) {
  115. const struct iovec *iv = &iovp[seg];
  116. /*
  117. * If any segment has a negative length, or the cumulative
  118. * length ever wraps negative then return -EINVAL.
  119. */
  120. size += iv->iov_len;
  121. if (unlikely((ssize_t)(size|iv->iov_len) < 0))
  122. return XFS_ERROR(-EINVAL);
  123. }
  124. /* END copy & waste from filemap.c */
  125. if (unlikely(ioflags & IO_ISDIRECT)) {
  126. xfs_buftarg_t *target =
  127. XFS_IS_REALTIME_INODE(ip) ?
  128. mp->m_rtdev_targp : mp->m_ddev_targp;
  129. if ((*offset & target->bt_smask) ||
  130. (size & target->bt_smask)) {
  131. if (*offset == ip->i_size) {
  132. return (0);
  133. }
  134. return -XFS_ERROR(EINVAL);
  135. }
  136. }
  137. n = XFS_MAXIOFFSET(mp) - *offset;
  138. if ((n <= 0) || (size == 0))
  139. return 0;
  140. if (n < size)
  141. size = n;
  142. if (XFS_FORCED_SHUTDOWN(mp))
  143. return -EIO;
  144. if (unlikely(ioflags & IO_ISDIRECT))
  145. mutex_lock(&inode->i_mutex);
  146. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  147. if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
  148. int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
  149. int iolock = XFS_IOLOCK_SHARED;
  150. ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
  151. dmflags, &iolock);
  152. if (ret) {
  153. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  154. if (unlikely(ioflags & IO_ISDIRECT))
  155. mutex_unlock(&inode->i_mutex);
  156. return ret;
  157. }
  158. }
  159. if (unlikely(ioflags & IO_ISDIRECT)) {
  160. if (inode->i_mapping->nrpages)
  161. ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
  162. -1, FI_REMAPF_LOCKED);
  163. mutex_unlock(&inode->i_mutex);
  164. if (ret) {
  165. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  166. return ret;
  167. }
  168. }
  169. trace_xfs_file_read(ip, size, *offset, ioflags);
  170. iocb->ki_pos = *offset;
  171. ret = generic_file_aio_read(iocb, iovp, segs, *offset);
  172. if (ret > 0)
  173. XFS_STATS_ADD(xs_read_bytes, ret);
  174. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  175. return ret;
  176. }
  177. ssize_t
  178. xfs_splice_read(
  179. xfs_inode_t *ip,
  180. struct file *infilp,
  181. loff_t *ppos,
  182. struct pipe_inode_info *pipe,
  183. size_t count,
  184. int flags,
  185. int ioflags)
  186. {
  187. xfs_mount_t *mp = ip->i_mount;
  188. ssize_t ret;
  189. XFS_STATS_INC(xs_read_calls);
  190. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  191. return -EIO;
  192. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  193. if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
  194. int iolock = XFS_IOLOCK_SHARED;
  195. int error;
  196. error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
  197. FILP_DELAY_FLAG(infilp), &iolock);
  198. if (error) {
  199. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  200. return -error;
  201. }
  202. }
  203. trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
  204. ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
  205. if (ret > 0)
  206. XFS_STATS_ADD(xs_read_bytes, ret);
  207. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  208. return ret;
  209. }
  210. ssize_t
  211. xfs_splice_write(
  212. xfs_inode_t *ip,
  213. struct pipe_inode_info *pipe,
  214. struct file *outfilp,
  215. loff_t *ppos,
  216. size_t count,
  217. int flags,
  218. int ioflags)
  219. {
  220. xfs_mount_t *mp = ip->i_mount;
  221. ssize_t ret;
  222. struct inode *inode = outfilp->f_mapping->host;
  223. xfs_fsize_t isize, new_size;
  224. XFS_STATS_INC(xs_write_calls);
  225. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  226. return -EIO;
  227. xfs_ilock(ip, XFS_IOLOCK_EXCL);
  228. if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
  229. int iolock = XFS_IOLOCK_EXCL;
  230. int error;
  231. error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
  232. FILP_DELAY_FLAG(outfilp), &iolock);
  233. if (error) {
  234. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  235. return -error;
  236. }
  237. }
  238. new_size = *ppos + count;
  239. xfs_ilock(ip, XFS_ILOCK_EXCL);
  240. if (new_size > ip->i_size)
  241. ip->i_new_size = new_size;
  242. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  243. trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
  244. ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
  245. if (ret > 0)
  246. XFS_STATS_ADD(xs_write_bytes, ret);
  247. isize = i_size_read(inode);
  248. if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
  249. *ppos = isize;
  250. if (*ppos > ip->i_size) {
  251. xfs_ilock(ip, XFS_ILOCK_EXCL);
  252. if (*ppos > ip->i_size)
  253. ip->i_size = *ppos;
  254. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  255. }
  256. if (ip->i_new_size) {
  257. xfs_ilock(ip, XFS_ILOCK_EXCL);
  258. ip->i_new_size = 0;
  259. if (ip->i_d.di_size > ip->i_size)
  260. ip->i_d.di_size = ip->i_size;
  261. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  262. }
  263. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  264. return ret;
  265. }
  266. /*
  267. * This routine is called to handle zeroing any space in the last
  268. * block of the file that is beyond the EOF. We do this since the
  269. * size is being increased without writing anything to that block
  270. * and we don't want anyone to read the garbage on the disk.
  271. */
  272. STATIC int /* error (positive) */
  273. xfs_zero_last_block(
  274. xfs_inode_t *ip,
  275. xfs_fsize_t offset,
  276. xfs_fsize_t isize)
  277. {
  278. xfs_fileoff_t last_fsb;
  279. xfs_mount_t *mp = ip->i_mount;
  280. int nimaps;
  281. int zero_offset;
  282. int zero_len;
  283. int error = 0;
  284. xfs_bmbt_irec_t imap;
  285. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  286. zero_offset = XFS_B_FSB_OFFSET(mp, isize);
  287. if (zero_offset == 0) {
  288. /*
  289. * There are no extra bytes in the last block on disk to
  290. * zero, so return.
  291. */
  292. return 0;
  293. }
  294. last_fsb = XFS_B_TO_FSBT(mp, isize);
  295. nimaps = 1;
  296. error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
  297. &nimaps, NULL, NULL);
  298. if (error) {
  299. return error;
  300. }
  301. ASSERT(nimaps > 0);
  302. /*
  303. * If the block underlying isize is just a hole, then there
  304. * is nothing to zero.
  305. */
  306. if (imap.br_startblock == HOLESTARTBLOCK) {
  307. return 0;
  308. }
  309. /*
  310. * Zero the part of the last block beyond the EOF, and write it
  311. * out sync. We need to drop the ilock while we do this so we
  312. * don't deadlock when the buffer cache calls back to us.
  313. */
  314. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  315. zero_len = mp->m_sb.sb_blocksize - zero_offset;
  316. if (isize + zero_len > offset)
  317. zero_len = offset - isize;
  318. error = xfs_iozero(ip, isize, zero_len);
  319. xfs_ilock(ip, XFS_ILOCK_EXCL);
  320. ASSERT(error >= 0);
  321. return error;
  322. }
  323. /*
  324. * Zero any on disk space between the current EOF and the new,
  325. * larger EOF. This handles the normal case of zeroing the remainder
  326. * of the last block in the file and the unusual case of zeroing blocks
  327. * out beyond the size of the file. This second case only happens
  328. * with fixed size extents and when the system crashes before the inode
  329. * size was updated but after blocks were allocated. If fill is set,
  330. * then any holes in the range are filled and zeroed. If not, the holes
  331. * are left alone as holes.
  332. */
  333. int /* error (positive) */
  334. xfs_zero_eof(
  335. xfs_inode_t *ip,
  336. xfs_off_t offset, /* starting I/O offset */
  337. xfs_fsize_t isize) /* current inode size */
  338. {
  339. xfs_mount_t *mp = ip->i_mount;
  340. xfs_fileoff_t start_zero_fsb;
  341. xfs_fileoff_t end_zero_fsb;
  342. xfs_fileoff_t zero_count_fsb;
  343. xfs_fileoff_t last_fsb;
  344. xfs_fileoff_t zero_off;
  345. xfs_fsize_t zero_len;
  346. int nimaps;
  347. int error = 0;
  348. xfs_bmbt_irec_t imap;
  349. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  350. ASSERT(offset > isize);
  351. /*
  352. * First handle zeroing the block on which isize resides.
  353. * We only zero a part of that block so it is handled specially.
  354. */
  355. error = xfs_zero_last_block(ip, offset, isize);
  356. if (error) {
  357. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  358. return error;
  359. }
  360. /*
  361. * Calculate the range between the new size and the old
  362. * where blocks needing to be zeroed may exist. To get the
  363. * block where the last byte in the file currently resides,
  364. * we need to subtract one from the size and truncate back
  365. * to a block boundary. We subtract 1 in case the size is
  366. * exactly on a block boundary.
  367. */
  368. last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
  369. start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
  370. end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
  371. ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
  372. if (last_fsb == end_zero_fsb) {
  373. /*
  374. * The size was only incremented on its last block.
  375. * We took care of that above, so just return.
  376. */
  377. return 0;
  378. }
  379. ASSERT(start_zero_fsb <= end_zero_fsb);
  380. while (start_zero_fsb <= end_zero_fsb) {
  381. nimaps = 1;
  382. zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
  383. error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
  384. 0, NULL, 0, &imap, &nimaps, NULL, NULL);
  385. if (error) {
  386. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  387. return error;
  388. }
  389. ASSERT(nimaps > 0);
  390. if (imap.br_state == XFS_EXT_UNWRITTEN ||
  391. imap.br_startblock == HOLESTARTBLOCK) {
  392. /*
  393. * This loop handles initializing pages that were
  394. * partially initialized by the code below this
  395. * loop. It basically zeroes the part of the page
  396. * that sits on a hole and sets the page as P_HOLE
  397. * and calls remapf if it is a mapped file.
  398. */
  399. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  400. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  401. continue;
  402. }
  403. /*
  404. * There are blocks we need to zero.
  405. * Drop the inode lock while we're doing the I/O.
  406. * We'll still have the iolock to protect us.
  407. */
  408. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  409. zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
  410. zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
  411. if ((zero_off + zero_len) > offset)
  412. zero_len = offset - zero_off;
  413. error = xfs_iozero(ip, zero_off, zero_len);
  414. if (error) {
  415. goto out_lock;
  416. }
  417. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  418. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  419. xfs_ilock(ip, XFS_ILOCK_EXCL);
  420. }
  421. return 0;
  422. out_lock:
  423. xfs_ilock(ip, XFS_ILOCK_EXCL);
  424. ASSERT(error >= 0);
  425. return error;
  426. }
  427. ssize_t /* bytes written, or (-) error */
  428. xfs_write(
  429. struct xfs_inode *xip,
  430. struct kiocb *iocb,
  431. const struct iovec *iovp,
  432. unsigned int nsegs,
  433. loff_t *offset,
  434. int ioflags)
  435. {
  436. struct file *file = iocb->ki_filp;
  437. struct address_space *mapping = file->f_mapping;
  438. struct inode *inode = mapping->host;
  439. unsigned long segs = nsegs;
  440. xfs_mount_t *mp;
  441. ssize_t ret = 0, error = 0;
  442. xfs_fsize_t isize, new_size;
  443. int iolock;
  444. int eventsent = 0;
  445. size_t ocount = 0, count;
  446. loff_t pos;
  447. int need_i_mutex;
  448. XFS_STATS_INC(xs_write_calls);
  449. error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
  450. if (error)
  451. return error;
  452. count = ocount;
  453. pos = *offset;
  454. if (count == 0)
  455. return 0;
  456. mp = xip->i_mount;
  457. xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
  458. if (XFS_FORCED_SHUTDOWN(mp))
  459. return -EIO;
  460. relock:
  461. if (ioflags & IO_ISDIRECT) {
  462. iolock = XFS_IOLOCK_SHARED;
  463. need_i_mutex = 0;
  464. } else {
  465. iolock = XFS_IOLOCK_EXCL;
  466. need_i_mutex = 1;
  467. mutex_lock(&inode->i_mutex);
  468. }
  469. xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
  470. start:
  471. error = -generic_write_checks(file, &pos, &count,
  472. S_ISBLK(inode->i_mode));
  473. if (error) {
  474. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  475. goto out_unlock_mutex;
  476. }
  477. if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
  478. !(ioflags & IO_INVIS) && !eventsent)) {
  479. int dmflags = FILP_DELAY_FLAG(file);
  480. if (need_i_mutex)
  481. dmflags |= DM_FLAGS_IMUX;
  482. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  483. error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
  484. pos, count, dmflags, &iolock);
  485. if (error) {
  486. goto out_unlock_internal;
  487. }
  488. xfs_ilock(xip, XFS_ILOCK_EXCL);
  489. eventsent = 1;
  490. /*
  491. * The iolock was dropped and reacquired in XFS_SEND_DATA
  492. * so we have to recheck the size when appending.
  493. * We will only "goto start;" once, since having sent the
  494. * event prevents another call to XFS_SEND_DATA, which is
  495. * what allows the size to change in the first place.
  496. */
  497. if ((file->f_flags & O_APPEND) && pos != xip->i_size)
  498. goto start;
  499. }
  500. if (ioflags & IO_ISDIRECT) {
  501. xfs_buftarg_t *target =
  502. XFS_IS_REALTIME_INODE(xip) ?
  503. mp->m_rtdev_targp : mp->m_ddev_targp;
  504. if ((pos & target->bt_smask) || (count & target->bt_smask)) {
  505. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  506. return XFS_ERROR(-EINVAL);
  507. }
  508. if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
  509. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  510. iolock = XFS_IOLOCK_EXCL;
  511. need_i_mutex = 1;
  512. mutex_lock(&inode->i_mutex);
  513. xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
  514. goto start;
  515. }
  516. }
  517. new_size = pos + count;
  518. if (new_size > xip->i_size)
  519. xip->i_new_size = new_size;
  520. if (likely(!(ioflags & IO_INVIS)))
  521. file_update_time(file);
  522. /*
  523. * If the offset is beyond the size of the file, we have a couple
  524. * of things to do. First, if there is already space allocated
  525. * we need to either create holes or zero the disk or ...
  526. *
  527. * If there is a page where the previous size lands, we need
  528. * to zero it out up to the new size.
  529. */
  530. if (pos > xip->i_size) {
  531. error = xfs_zero_eof(xip, pos, xip->i_size);
  532. if (error) {
  533. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  534. goto out_unlock_internal;
  535. }
  536. }
  537. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  538. /*
  539. * If we're writing the file then make sure to clear the
  540. * setuid and setgid bits if the process is not being run
  541. * by root. This keeps people from modifying setuid and
  542. * setgid binaries.
  543. */
  544. if (((xip->i_d.di_mode & S_ISUID) ||
  545. ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
  546. (S_ISGID | S_IXGRP))) &&
  547. !capable(CAP_FSETID)) {
  548. error = xfs_write_clear_setuid(xip);
  549. if (likely(!error))
  550. error = -file_remove_suid(file);
  551. if (unlikely(error)) {
  552. goto out_unlock_internal;
  553. }
  554. }
  555. /* We can write back this queue in page reclaim */
  556. current->backing_dev_info = mapping->backing_dev_info;
  557. if ((ioflags & IO_ISDIRECT)) {
  558. if (mapping->nrpages) {
  559. WARN_ON(need_i_mutex == 0);
  560. error = xfs_flushinval_pages(xip,
  561. (pos & PAGE_CACHE_MASK),
  562. -1, FI_REMAPF_LOCKED);
  563. if (error)
  564. goto out_unlock_internal;
  565. }
  566. if (need_i_mutex) {
  567. /* demote the lock now the cached pages are gone */
  568. xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
  569. mutex_unlock(&inode->i_mutex);
  570. iolock = XFS_IOLOCK_SHARED;
  571. need_i_mutex = 0;
  572. }
  573. trace_xfs_file_direct_write(xip, count, *offset, ioflags);
  574. ret = generic_file_direct_write(iocb, iovp,
  575. &segs, pos, offset, count, ocount);
  576. /*
  577. * direct-io write to a hole: fall through to buffered I/O
  578. * for completing the rest of the request.
  579. */
  580. if (ret >= 0 && ret != count) {
  581. XFS_STATS_ADD(xs_write_bytes, ret);
  582. pos += ret;
  583. count -= ret;
  584. ioflags &= ~IO_ISDIRECT;
  585. xfs_iunlock(xip, iolock);
  586. goto relock;
  587. }
  588. } else {
  589. int enospc = 0;
  590. ssize_t ret2 = 0;
  591. write_retry:
  592. trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
  593. ret2 = generic_file_buffered_write(iocb, iovp, segs,
  594. pos, offset, count, ret);
  595. /*
  596. * if we just got an ENOSPC, flush the inode now we
  597. * aren't holding any page locks and retry *once*
  598. */
  599. if (ret2 == -ENOSPC && !enospc) {
  600. error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
  601. if (error)
  602. goto out_unlock_internal;
  603. enospc = 1;
  604. goto write_retry;
  605. }
  606. ret = ret2;
  607. }
  608. current->backing_dev_info = NULL;
  609. isize = i_size_read(inode);
  610. if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
  611. *offset = isize;
  612. if (*offset > xip->i_size) {
  613. xfs_ilock(xip, XFS_ILOCK_EXCL);
  614. if (*offset > xip->i_size)
  615. xip->i_size = *offset;
  616. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  617. }
  618. if (ret == -ENOSPC &&
  619. DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
  620. xfs_iunlock(xip, iolock);
  621. if (need_i_mutex)
  622. mutex_unlock(&inode->i_mutex);
  623. error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
  624. DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
  625. 0, 0, 0); /* Delay flag intentionally unused */
  626. if (need_i_mutex)
  627. mutex_lock(&inode->i_mutex);
  628. xfs_ilock(xip, iolock);
  629. if (error)
  630. goto out_unlock_internal;
  631. goto start;
  632. }
  633. error = -ret;
  634. if (ret <= 0)
  635. goto out_unlock_internal;
  636. XFS_STATS_ADD(xs_write_bytes, ret);
  637. /* Handle various SYNC-type writes */
  638. if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
  639. loff_t end = pos + ret - 1;
  640. int error2;
  641. xfs_iunlock(xip, iolock);
  642. if (need_i_mutex)
  643. mutex_unlock(&inode->i_mutex);
  644. error2 = filemap_write_and_wait_range(mapping, pos, end);
  645. if (!error)
  646. error = error2;
  647. if (need_i_mutex)
  648. mutex_lock(&inode->i_mutex);
  649. xfs_ilock(xip, iolock);
  650. error2 = xfs_fsync(xip);
  651. if (!error)
  652. error = error2;
  653. }
  654. out_unlock_internal:
  655. if (xip->i_new_size) {
  656. xfs_ilock(xip, XFS_ILOCK_EXCL);
  657. xip->i_new_size = 0;
  658. /*
  659. * If this was a direct or synchronous I/O that failed (such
  660. * as ENOSPC) then part of the I/O may have been written to
  661. * disk before the error occured. In this case the on-disk
  662. * file size may have been adjusted beyond the in-memory file
  663. * size and now needs to be truncated back.
  664. */
  665. if (xip->i_d.di_size > xip->i_size)
  666. xip->i_d.di_size = xip->i_size;
  667. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  668. }
  669. xfs_iunlock(xip, iolock);
  670. out_unlock_mutex:
  671. if (need_i_mutex)
  672. mutex_unlock(&inode->i_mutex);
  673. return -error;
  674. }
  675. /*
  676. * All xfs metadata buffers except log state machine buffers
  677. * get this attached as their b_bdstrat callback function.
  678. * This is so that we can catch a buffer
  679. * after prematurely unpinning it to forcibly shutdown the filesystem.
  680. */
  681. int
  682. xfs_bdstrat_cb(struct xfs_buf *bp)
  683. {
  684. if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
  685. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  686. /*
  687. * Metadata write that didn't get logged but
  688. * written delayed anyway. These aren't associated
  689. * with a transaction, and can be ignored.
  690. */
  691. if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
  692. (XFS_BUF_ISREAD(bp)) == 0)
  693. return (xfs_bioerror_relse(bp));
  694. else
  695. return (xfs_bioerror(bp));
  696. }
  697. xfs_buf_iorequest(bp);
  698. return 0;
  699. }
  700. /*
  701. * Wrapper around bdstrat so that we can stop data from going to disk in case
  702. * we are shutting down the filesystem. Typically user data goes thru this
  703. * path; one of the exceptions is the superblock.
  704. */
  705. void
  706. xfsbdstrat(
  707. struct xfs_mount *mp,
  708. struct xfs_buf *bp)
  709. {
  710. ASSERT(mp);
  711. if (!XFS_FORCED_SHUTDOWN(mp)) {
  712. xfs_buf_iorequest(bp);
  713. return;
  714. }
  715. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  716. xfs_bioerror_relse(bp);
  717. }
  718. /*
  719. * If the underlying (data/log/rt) device is readonly, there are some
  720. * operations that cannot proceed.
  721. */
  722. int
  723. xfs_dev_is_read_only(
  724. xfs_mount_t *mp,
  725. char *message)
  726. {
  727. if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
  728. xfs_readonly_buftarg(mp->m_logdev_targp) ||
  729. (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
  730. cmn_err(CE_NOTE,
  731. "XFS: %s required on read-only device.", message);
  732. cmn_err(CE_NOTE,
  733. "XFS: write access unavailable, cannot proceed.");
  734. return EROFS;
  735. }
  736. return 0;
  737. }