xfs_lrw.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_inum.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_dir2.h"
  27. #include "xfs_alloc.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_quota.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_dinode.h"
  37. #include "xfs_inode.h"
  38. #include "xfs_bmap.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_ialloc.h"
  41. #include "xfs_rtalloc.h"
  42. #include "xfs_error.h"
  43. #include "xfs_itable.h"
  44. #include "xfs_rw.h"
  45. #include "xfs_acl.h"
  46. #include "xfs_attr.h"
  47. #include "xfs_inode_item.h"
  48. #include "xfs_buf_item.h"
  49. #include "xfs_utils.h"
  50. #include "xfs_iomap.h"
  51. #include "xfs_vnodeops.h"
  52. #include <linux/capability.h>
  53. #include <linux/writeback.h>
  54. #if defined(XFS_RW_TRACE)
  55. void
  56. xfs_rw_enter_trace(
  57. int tag,
  58. xfs_inode_t *ip,
  59. void *data,
  60. size_t segs,
  61. loff_t offset,
  62. int ioflags)
  63. {
  64. if (ip->i_rwtrace == NULL)
  65. return;
  66. ktrace_enter(ip->i_rwtrace,
  67. (void *)(unsigned long)tag,
  68. (void *)ip,
  69. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  70. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  71. (void *)data,
  72. (void *)((unsigned long)segs),
  73. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  74. (void *)((unsigned long)(offset & 0xffffffff)),
  75. (void *)((unsigned long)ioflags),
  76. (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
  77. (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
  78. (void *)((unsigned long)current_pid()),
  79. (void *)NULL,
  80. (void *)NULL,
  81. (void *)NULL,
  82. (void *)NULL);
  83. }
  84. void
  85. xfs_inval_cached_trace(
  86. xfs_inode_t *ip,
  87. xfs_off_t offset,
  88. xfs_off_t len,
  89. xfs_off_t first,
  90. xfs_off_t last)
  91. {
  92. if (ip->i_rwtrace == NULL)
  93. return;
  94. ktrace_enter(ip->i_rwtrace,
  95. (void *)(__psint_t)XFS_INVAL_CACHED,
  96. (void *)ip,
  97. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  98. (void *)((unsigned long)(offset & 0xffffffff)),
  99. (void *)((unsigned long)((len >> 32) & 0xffffffff)),
  100. (void *)((unsigned long)(len & 0xffffffff)),
  101. (void *)((unsigned long)((first >> 32) & 0xffffffff)),
  102. (void *)((unsigned long)(first & 0xffffffff)),
  103. (void *)((unsigned long)((last >> 32) & 0xffffffff)),
  104. (void *)((unsigned long)(last & 0xffffffff)),
  105. (void *)((unsigned long)current_pid()),
  106. (void *)NULL,
  107. (void *)NULL,
  108. (void *)NULL,
  109. (void *)NULL,
  110. (void *)NULL);
  111. }
  112. #endif
  113. /*
  114. * xfs_iozero
  115. *
  116. * xfs_iozero clears the specified range of buffer supplied,
  117. * and marks all the affected blocks as valid and modified. If
  118. * an affected block is not allocated, it will be allocated. If
  119. * an affected block is not completely overwritten, and is not
  120. * valid before the operation, it will be read from disk before
  121. * being partially zeroed.
  122. */
  123. STATIC int
  124. xfs_iozero(
  125. struct xfs_inode *ip, /* inode */
  126. loff_t pos, /* offset in file */
  127. size_t count) /* size of data to zero */
  128. {
  129. struct page *page;
  130. struct address_space *mapping;
  131. int status;
  132. mapping = VFS_I(ip)->i_mapping;
  133. do {
  134. unsigned offset, bytes;
  135. void *fsdata;
  136. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  137. bytes = PAGE_CACHE_SIZE - offset;
  138. if (bytes > count)
  139. bytes = count;
  140. status = pagecache_write_begin(NULL, mapping, pos, bytes,
  141. AOP_FLAG_UNINTERRUPTIBLE,
  142. &page, &fsdata);
  143. if (status)
  144. break;
  145. zero_user(page, offset, bytes);
  146. status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
  147. page, fsdata);
  148. WARN_ON(status <= 0); /* can't return less than zero! */
  149. pos += bytes;
  150. count -= bytes;
  151. status = 0;
  152. } while (count);
  153. return (-status);
  154. }
  155. ssize_t /* bytes read, or (-) error */
  156. xfs_read(
  157. xfs_inode_t *ip,
  158. struct kiocb *iocb,
  159. const struct iovec *iovp,
  160. unsigned int segs,
  161. loff_t *offset,
  162. int ioflags)
  163. {
  164. struct file *file = iocb->ki_filp;
  165. struct inode *inode = file->f_mapping->host;
  166. xfs_mount_t *mp = ip->i_mount;
  167. size_t size = 0;
  168. ssize_t ret = 0;
  169. xfs_fsize_t n;
  170. unsigned long seg;
  171. XFS_STATS_INC(xs_read_calls);
  172. /* START copy & waste from filemap.c */
  173. for (seg = 0; seg < segs; seg++) {
  174. const struct iovec *iv = &iovp[seg];
  175. /*
  176. * If any segment has a negative length, or the cumulative
  177. * length ever wraps negative then return -EINVAL.
  178. */
  179. size += iv->iov_len;
  180. if (unlikely((ssize_t)(size|iv->iov_len) < 0))
  181. return XFS_ERROR(-EINVAL);
  182. }
  183. /* END copy & waste from filemap.c */
  184. if (unlikely(ioflags & IO_ISDIRECT)) {
  185. xfs_buftarg_t *target =
  186. XFS_IS_REALTIME_INODE(ip) ?
  187. mp->m_rtdev_targp : mp->m_ddev_targp;
  188. if ((*offset & target->bt_smask) ||
  189. (size & target->bt_smask)) {
  190. if (*offset == ip->i_size) {
  191. return (0);
  192. }
  193. return -XFS_ERROR(EINVAL);
  194. }
  195. }
  196. n = XFS_MAXIOFFSET(mp) - *offset;
  197. if ((n <= 0) || (size == 0))
  198. return 0;
  199. if (n < size)
  200. size = n;
  201. if (XFS_FORCED_SHUTDOWN(mp))
  202. return -EIO;
  203. if (unlikely(ioflags & IO_ISDIRECT))
  204. mutex_lock(&inode->i_mutex);
  205. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  206. if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
  207. int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
  208. int iolock = XFS_IOLOCK_SHARED;
  209. ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
  210. dmflags, &iolock);
  211. if (ret) {
  212. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  213. if (unlikely(ioflags & IO_ISDIRECT))
  214. mutex_unlock(&inode->i_mutex);
  215. return ret;
  216. }
  217. }
  218. if (unlikely(ioflags & IO_ISDIRECT)) {
  219. if (inode->i_mapping->nrpages)
  220. ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
  221. -1, FI_REMAPF_LOCKED);
  222. mutex_unlock(&inode->i_mutex);
  223. if (ret) {
  224. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  225. return ret;
  226. }
  227. }
  228. xfs_rw_enter_trace(XFS_READ_ENTER, ip,
  229. (void *)iovp, segs, *offset, ioflags);
  230. iocb->ki_pos = *offset;
  231. ret = generic_file_aio_read(iocb, iovp, segs, *offset);
  232. if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
  233. ret = wait_on_sync_kiocb(iocb);
  234. if (ret > 0)
  235. XFS_STATS_ADD(xs_read_bytes, ret);
  236. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  237. return ret;
  238. }
  239. ssize_t
  240. xfs_splice_read(
  241. xfs_inode_t *ip,
  242. struct file *infilp,
  243. loff_t *ppos,
  244. struct pipe_inode_info *pipe,
  245. size_t count,
  246. int flags,
  247. int ioflags)
  248. {
  249. xfs_mount_t *mp = ip->i_mount;
  250. ssize_t ret;
  251. XFS_STATS_INC(xs_read_calls);
  252. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  253. return -EIO;
  254. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  255. if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
  256. int iolock = XFS_IOLOCK_SHARED;
  257. int error;
  258. error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
  259. FILP_DELAY_FLAG(infilp), &iolock);
  260. if (error) {
  261. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  262. return -error;
  263. }
  264. }
  265. xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
  266. pipe, count, *ppos, ioflags);
  267. ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
  268. if (ret > 0)
  269. XFS_STATS_ADD(xs_read_bytes, ret);
  270. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  271. return ret;
  272. }
  273. ssize_t
  274. xfs_splice_write(
  275. xfs_inode_t *ip,
  276. struct pipe_inode_info *pipe,
  277. struct file *outfilp,
  278. loff_t *ppos,
  279. size_t count,
  280. int flags,
  281. int ioflags)
  282. {
  283. xfs_mount_t *mp = ip->i_mount;
  284. ssize_t ret;
  285. struct inode *inode = outfilp->f_mapping->host;
  286. xfs_fsize_t isize, new_size;
  287. XFS_STATS_INC(xs_write_calls);
  288. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  289. return -EIO;
  290. xfs_ilock(ip, XFS_IOLOCK_EXCL);
  291. if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
  292. int iolock = XFS_IOLOCK_EXCL;
  293. int error;
  294. error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
  295. FILP_DELAY_FLAG(outfilp), &iolock);
  296. if (error) {
  297. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  298. return -error;
  299. }
  300. }
  301. new_size = *ppos + count;
  302. xfs_ilock(ip, XFS_ILOCK_EXCL);
  303. if (new_size > ip->i_size)
  304. ip->i_new_size = new_size;
  305. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  306. xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
  307. pipe, count, *ppos, ioflags);
  308. ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
  309. if (ret > 0)
  310. XFS_STATS_ADD(xs_write_bytes, ret);
  311. isize = i_size_read(inode);
  312. if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
  313. *ppos = isize;
  314. if (*ppos > ip->i_size) {
  315. xfs_ilock(ip, XFS_ILOCK_EXCL);
  316. if (*ppos > ip->i_size)
  317. ip->i_size = *ppos;
  318. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  319. }
  320. if (ip->i_new_size) {
  321. xfs_ilock(ip, XFS_ILOCK_EXCL);
  322. ip->i_new_size = 0;
  323. if (ip->i_d.di_size > ip->i_size)
  324. ip->i_d.di_size = ip->i_size;
  325. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  326. }
  327. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  328. return ret;
  329. }
  330. /*
  331. * This routine is called to handle zeroing any space in the last
  332. * block of the file that is beyond the EOF. We do this since the
  333. * size is being increased without writing anything to that block
  334. * and we don't want anyone to read the garbage on the disk.
  335. */
  336. STATIC int /* error (positive) */
  337. xfs_zero_last_block(
  338. xfs_inode_t *ip,
  339. xfs_fsize_t offset,
  340. xfs_fsize_t isize)
  341. {
  342. xfs_fileoff_t last_fsb;
  343. xfs_mount_t *mp = ip->i_mount;
  344. int nimaps;
  345. int zero_offset;
  346. int zero_len;
  347. int error = 0;
  348. xfs_bmbt_irec_t imap;
  349. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  350. zero_offset = XFS_B_FSB_OFFSET(mp, isize);
  351. if (zero_offset == 0) {
  352. /*
  353. * There are no extra bytes in the last block on disk to
  354. * zero, so return.
  355. */
  356. return 0;
  357. }
  358. last_fsb = XFS_B_TO_FSBT(mp, isize);
  359. nimaps = 1;
  360. error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
  361. &nimaps, NULL, NULL);
  362. if (error) {
  363. return error;
  364. }
  365. ASSERT(nimaps > 0);
  366. /*
  367. * If the block underlying isize is just a hole, then there
  368. * is nothing to zero.
  369. */
  370. if (imap.br_startblock == HOLESTARTBLOCK) {
  371. return 0;
  372. }
  373. /*
  374. * Zero the part of the last block beyond the EOF, and write it
  375. * out sync. We need to drop the ilock while we do this so we
  376. * don't deadlock when the buffer cache calls back to us.
  377. */
  378. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  379. zero_len = mp->m_sb.sb_blocksize - zero_offset;
  380. if (isize + zero_len > offset)
  381. zero_len = offset - isize;
  382. error = xfs_iozero(ip, isize, zero_len);
  383. xfs_ilock(ip, XFS_ILOCK_EXCL);
  384. ASSERT(error >= 0);
  385. return error;
  386. }
  387. /*
  388. * Zero any on disk space between the current EOF and the new,
  389. * larger EOF. This handles the normal case of zeroing the remainder
  390. * of the last block in the file and the unusual case of zeroing blocks
  391. * out beyond the size of the file. This second case only happens
  392. * with fixed size extents and when the system crashes before the inode
  393. * size was updated but after blocks were allocated. If fill is set,
  394. * then any holes in the range are filled and zeroed. If not, the holes
  395. * are left alone as holes.
  396. */
  397. int /* error (positive) */
  398. xfs_zero_eof(
  399. xfs_inode_t *ip,
  400. xfs_off_t offset, /* starting I/O offset */
  401. xfs_fsize_t isize) /* current inode size */
  402. {
  403. xfs_mount_t *mp = ip->i_mount;
  404. xfs_fileoff_t start_zero_fsb;
  405. xfs_fileoff_t end_zero_fsb;
  406. xfs_fileoff_t zero_count_fsb;
  407. xfs_fileoff_t last_fsb;
  408. xfs_fileoff_t zero_off;
  409. xfs_fsize_t zero_len;
  410. int nimaps;
  411. int error = 0;
  412. xfs_bmbt_irec_t imap;
  413. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  414. ASSERT(offset > isize);
  415. /*
  416. * First handle zeroing the block on which isize resides.
  417. * We only zero a part of that block so it is handled specially.
  418. */
  419. error = xfs_zero_last_block(ip, offset, isize);
  420. if (error) {
  421. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  422. return error;
  423. }
  424. /*
  425. * Calculate the range between the new size and the old
  426. * where blocks needing to be zeroed may exist. To get the
  427. * block where the last byte in the file currently resides,
  428. * we need to subtract one from the size and truncate back
  429. * to a block boundary. We subtract 1 in case the size is
  430. * exactly on a block boundary.
  431. */
  432. last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
  433. start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
  434. end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
  435. ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
  436. if (last_fsb == end_zero_fsb) {
  437. /*
  438. * The size was only incremented on its last block.
  439. * We took care of that above, so just return.
  440. */
  441. return 0;
  442. }
  443. ASSERT(start_zero_fsb <= end_zero_fsb);
  444. while (start_zero_fsb <= end_zero_fsb) {
  445. nimaps = 1;
  446. zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
  447. error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
  448. 0, NULL, 0, &imap, &nimaps, NULL, NULL);
  449. if (error) {
  450. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
  451. return error;
  452. }
  453. ASSERT(nimaps > 0);
  454. if (imap.br_state == XFS_EXT_UNWRITTEN ||
  455. imap.br_startblock == HOLESTARTBLOCK) {
  456. /*
  457. * This loop handles initializing pages that were
  458. * partially initialized by the code below this
  459. * loop. It basically zeroes the part of the page
  460. * that sits on a hole and sets the page as P_HOLE
  461. * and calls remapf if it is a mapped file.
  462. */
  463. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  464. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  465. continue;
  466. }
  467. /*
  468. * There are blocks we need to zero.
  469. * Drop the inode lock while we're doing the I/O.
  470. * We'll still have the iolock to protect us.
  471. */
  472. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  473. zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
  474. zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
  475. if ((zero_off + zero_len) > offset)
  476. zero_len = offset - zero_off;
  477. error = xfs_iozero(ip, zero_off, zero_len);
  478. if (error) {
  479. goto out_lock;
  480. }
  481. start_zero_fsb = imap.br_startoff + imap.br_blockcount;
  482. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  483. xfs_ilock(ip, XFS_ILOCK_EXCL);
  484. }
  485. return 0;
  486. out_lock:
  487. xfs_ilock(ip, XFS_ILOCK_EXCL);
  488. ASSERT(error >= 0);
  489. return error;
  490. }
  491. ssize_t /* bytes written, or (-) error */
  492. xfs_write(
  493. struct xfs_inode *xip,
  494. struct kiocb *iocb,
  495. const struct iovec *iovp,
  496. unsigned int nsegs,
  497. loff_t *offset,
  498. int ioflags)
  499. {
  500. struct file *file = iocb->ki_filp;
  501. struct address_space *mapping = file->f_mapping;
  502. struct inode *inode = mapping->host;
  503. unsigned long segs = nsegs;
  504. xfs_mount_t *mp;
  505. ssize_t ret = 0, error = 0;
  506. xfs_fsize_t isize, new_size;
  507. int iolock;
  508. int eventsent = 0;
  509. size_t ocount = 0, count;
  510. loff_t pos;
  511. int need_i_mutex;
  512. XFS_STATS_INC(xs_write_calls);
  513. error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
  514. if (error)
  515. return error;
  516. count = ocount;
  517. pos = *offset;
  518. if (count == 0)
  519. return 0;
  520. mp = xip->i_mount;
  521. xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
  522. if (XFS_FORCED_SHUTDOWN(mp))
  523. return -EIO;
  524. relock:
  525. if (ioflags & IO_ISDIRECT) {
  526. iolock = XFS_IOLOCK_SHARED;
  527. need_i_mutex = 0;
  528. } else {
  529. iolock = XFS_IOLOCK_EXCL;
  530. need_i_mutex = 1;
  531. mutex_lock(&inode->i_mutex);
  532. }
  533. xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
  534. start:
  535. error = -generic_write_checks(file, &pos, &count,
  536. S_ISBLK(inode->i_mode));
  537. if (error) {
  538. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  539. goto out_unlock_mutex;
  540. }
  541. if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
  542. !(ioflags & IO_INVIS) && !eventsent)) {
  543. int dmflags = FILP_DELAY_FLAG(file);
  544. if (need_i_mutex)
  545. dmflags |= DM_FLAGS_IMUX;
  546. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  547. error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
  548. pos, count, dmflags, &iolock);
  549. if (error) {
  550. goto out_unlock_internal;
  551. }
  552. xfs_ilock(xip, XFS_ILOCK_EXCL);
  553. eventsent = 1;
  554. /*
  555. * The iolock was dropped and reacquired in XFS_SEND_DATA
  556. * so we have to recheck the size when appending.
  557. * We will only "goto start;" once, since having sent the
  558. * event prevents another call to XFS_SEND_DATA, which is
  559. * what allows the size to change in the first place.
  560. */
  561. if ((file->f_flags & O_APPEND) && pos != xip->i_size)
  562. goto start;
  563. }
  564. if (ioflags & IO_ISDIRECT) {
  565. xfs_buftarg_t *target =
  566. XFS_IS_REALTIME_INODE(xip) ?
  567. mp->m_rtdev_targp : mp->m_ddev_targp;
  568. if ((pos & target->bt_smask) || (count & target->bt_smask)) {
  569. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  570. return XFS_ERROR(-EINVAL);
  571. }
  572. if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
  573. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  574. iolock = XFS_IOLOCK_EXCL;
  575. need_i_mutex = 1;
  576. mutex_lock(&inode->i_mutex);
  577. xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
  578. goto start;
  579. }
  580. }
  581. new_size = pos + count;
  582. if (new_size > xip->i_size)
  583. xip->i_new_size = new_size;
  584. if (likely(!(ioflags & IO_INVIS)))
  585. xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  586. /*
  587. * If the offset is beyond the size of the file, we have a couple
  588. * of things to do. First, if there is already space allocated
  589. * we need to either create holes or zero the disk or ...
  590. *
  591. * If there is a page where the previous size lands, we need
  592. * to zero it out up to the new size.
  593. */
  594. if (pos > xip->i_size) {
  595. error = xfs_zero_eof(xip, pos, xip->i_size);
  596. if (error) {
  597. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  598. goto out_unlock_internal;
  599. }
  600. }
  601. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  602. /*
  603. * If we're writing the file then make sure to clear the
  604. * setuid and setgid bits if the process is not being run
  605. * by root. This keeps people from modifying setuid and
  606. * setgid binaries.
  607. */
  608. if (((xip->i_d.di_mode & S_ISUID) ||
  609. ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
  610. (S_ISGID | S_IXGRP))) &&
  611. !capable(CAP_FSETID)) {
  612. error = xfs_write_clear_setuid(xip);
  613. if (likely(!error))
  614. error = -file_remove_suid(file);
  615. if (unlikely(error)) {
  616. goto out_unlock_internal;
  617. }
  618. }
  619. /* We can write back this queue in page reclaim */
  620. current->backing_dev_info = mapping->backing_dev_info;
  621. if ((ioflags & IO_ISDIRECT)) {
  622. if (mapping->nrpages) {
  623. WARN_ON(need_i_mutex == 0);
  624. xfs_inval_cached_trace(xip, pos, -1,
  625. (pos & PAGE_CACHE_MASK), -1);
  626. error = xfs_flushinval_pages(xip,
  627. (pos & PAGE_CACHE_MASK),
  628. -1, FI_REMAPF_LOCKED);
  629. if (error)
  630. goto out_unlock_internal;
  631. }
  632. if (need_i_mutex) {
  633. /* demote the lock now the cached pages are gone */
  634. xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
  635. mutex_unlock(&inode->i_mutex);
  636. iolock = XFS_IOLOCK_SHARED;
  637. need_i_mutex = 0;
  638. }
  639. xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
  640. *offset, ioflags);
  641. ret = generic_file_direct_write(iocb, iovp,
  642. &segs, pos, offset, count, ocount);
  643. /*
  644. * direct-io write to a hole: fall through to buffered I/O
  645. * for completing the rest of the request.
  646. */
  647. if (ret >= 0 && ret != count) {
  648. XFS_STATS_ADD(xs_write_bytes, ret);
  649. pos += ret;
  650. count -= ret;
  651. ioflags &= ~IO_ISDIRECT;
  652. xfs_iunlock(xip, iolock);
  653. goto relock;
  654. }
  655. } else {
  656. xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
  657. *offset, ioflags);
  658. ret = generic_file_buffered_write(iocb, iovp, segs,
  659. pos, offset, count, ret);
  660. }
  661. current->backing_dev_info = NULL;
  662. if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
  663. ret = wait_on_sync_kiocb(iocb);
  664. isize = i_size_read(inode);
  665. if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
  666. *offset = isize;
  667. if (*offset > xip->i_size) {
  668. xfs_ilock(xip, XFS_ILOCK_EXCL);
  669. if (*offset > xip->i_size)
  670. xip->i_size = *offset;
  671. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  672. }
  673. if (ret == -ENOSPC &&
  674. DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
  675. xfs_iunlock(xip, iolock);
  676. if (need_i_mutex)
  677. mutex_unlock(&inode->i_mutex);
  678. error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
  679. DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
  680. 0, 0, 0); /* Delay flag intentionally unused */
  681. if (need_i_mutex)
  682. mutex_lock(&inode->i_mutex);
  683. xfs_ilock(xip, iolock);
  684. if (error)
  685. goto out_unlock_internal;
  686. goto start;
  687. }
  688. error = -ret;
  689. if (ret <= 0)
  690. goto out_unlock_internal;
  691. XFS_STATS_ADD(xs_write_bytes, ret);
  692. /* Handle various SYNC-type writes */
  693. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  694. int error2;
  695. xfs_iunlock(xip, iolock);
  696. if (need_i_mutex)
  697. mutex_unlock(&inode->i_mutex);
  698. error2 = sync_page_range(inode, mapping, pos, ret);
  699. if (!error)
  700. error = error2;
  701. if (need_i_mutex)
  702. mutex_lock(&inode->i_mutex);
  703. xfs_ilock(xip, iolock);
  704. error2 = xfs_write_sync_logforce(mp, xip);
  705. if (!error)
  706. error = error2;
  707. }
  708. out_unlock_internal:
  709. if (xip->i_new_size) {
  710. xfs_ilock(xip, XFS_ILOCK_EXCL);
  711. xip->i_new_size = 0;
  712. /*
  713. * If this was a direct or synchronous I/O that failed (such
  714. * as ENOSPC) then part of the I/O may have been written to
  715. * disk before the error occured. In this case the on-disk
  716. * file size may have been adjusted beyond the in-memory file
  717. * size and now needs to be truncated back.
  718. */
  719. if (xip->i_d.di_size > xip->i_size)
  720. xip->i_d.di_size = xip->i_size;
  721. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  722. }
  723. xfs_iunlock(xip, iolock);
  724. out_unlock_mutex:
  725. if (need_i_mutex)
  726. mutex_unlock(&inode->i_mutex);
  727. return -error;
  728. }
  729. /*
  730. * All xfs metadata buffers except log state machine buffers
  731. * get this attached as their b_bdstrat callback function.
  732. * This is so that we can catch a buffer
  733. * after prematurely unpinning it to forcibly shutdown the filesystem.
  734. */
  735. int
  736. xfs_bdstrat_cb(struct xfs_buf *bp)
  737. {
  738. if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
  739. xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
  740. /*
  741. * Metadata write that didn't get logged but
  742. * written delayed anyway. These aren't associated
  743. * with a transaction, and can be ignored.
  744. */
  745. if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
  746. (XFS_BUF_ISREAD(bp)) == 0)
  747. return (xfs_bioerror_relse(bp));
  748. else
  749. return (xfs_bioerror(bp));
  750. }
  751. xfs_buf_iorequest(bp);
  752. return 0;
  753. }
  754. /*
  755. * Wrapper around bdstrat so that we can stop data from going to disk in case
  756. * we are shutting down the filesystem. Typically user data goes thru this
  757. * path; one of the exceptions is the superblock.
  758. */
  759. void
  760. xfsbdstrat(
  761. struct xfs_mount *mp,
  762. struct xfs_buf *bp)
  763. {
  764. ASSERT(mp);
  765. if (!XFS_FORCED_SHUTDOWN(mp)) {
  766. xfs_buf_iorequest(bp);
  767. return;
  768. }
  769. xfs_buftrace("XFSBDSTRAT IOERROR", bp);
  770. xfs_bioerror_relse(bp);
  771. }
  772. /*
  773. * If the underlying (data/log/rt) device is readonly, there are some
  774. * operations that cannot proceed.
  775. */
  776. int
  777. xfs_dev_is_read_only(
  778. xfs_mount_t *mp,
  779. char *message)
  780. {
  781. if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
  782. xfs_readonly_buftarg(mp->m_logdev_targp) ||
  783. (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
  784. cmn_err(CE_NOTE,
  785. "XFS: %s required on read-only device.", message);
  786. cmn_err(CE_NOTE,
  787. "XFS: write access unavailable, cannot proceed.");
  788. return EROFS;
  789. }
  790. return 0;
  791. }