xfs_lrw.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. /*
  2. * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it would be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. *
  12. * Further, this software is distributed without any warranty that it is
  13. * free of the rightful claim of any third person regarding infringement
  14. * or the like. Any license provided herein, whether implied or
  15. * otherwise, applies only to this software file. Patent licenses, if
  16. * any, provided herein do not apply to combinations of this program with
  17. * other software, or any other product whatsoever.
  18. *
  19. * You should have received a copy of the GNU General Public License along
  20. * with this program; if not, write the Free Software Foundation, Inc., 59
  21. * Temple Place - Suite 330, Boston MA 02111-1307, USA.
  22. *
  23. * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
  24. * Mountain View, CA 94043, or:
  25. *
  26. * http://www.sgi.com
  27. *
  28. * For further information regarding this notice, see:
  29. *
  30. * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
  31. */
  32. /*
  33. * fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
  34. *
  35. */
  36. #include "xfs.h"
  37. #include "xfs_fs.h"
  38. #include "xfs_inum.h"
  39. #include "xfs_log.h"
  40. #include "xfs_trans.h"
  41. #include "xfs_sb.h"
  42. #include "xfs_ag.h"
  43. #include "xfs_dir.h"
  44. #include "xfs_dir2.h"
  45. #include "xfs_alloc.h"
  46. #include "xfs_dmapi.h"
  47. #include "xfs_quota.h"
  48. #include "xfs_mount.h"
  49. #include "xfs_alloc_btree.h"
  50. #include "xfs_bmap_btree.h"
  51. #include "xfs_ialloc_btree.h"
  52. #include "xfs_btree.h"
  53. #include "xfs_ialloc.h"
  54. #include "xfs_attr_sf.h"
  55. #include "xfs_dir_sf.h"
  56. #include "xfs_dir2_sf.h"
  57. #include "xfs_dinode.h"
  58. #include "xfs_inode.h"
  59. #include "xfs_bmap.h"
  60. #include "xfs_bit.h"
  61. #include "xfs_rtalloc.h"
  62. #include "xfs_error.h"
  63. #include "xfs_itable.h"
  64. #include "xfs_rw.h"
  65. #include "xfs_acl.h"
  66. #include "xfs_cap.h"
  67. #include "xfs_mac.h"
  68. #include "xfs_attr.h"
  69. #include "xfs_inode_item.h"
  70. #include "xfs_buf_item.h"
  71. #include "xfs_utils.h"
  72. #include "xfs_iomap.h"
  73. #include <linux/capability.h>
  74. #include <linux/writeback.h>
  75. #if defined(XFS_RW_TRACE)
  76. void
  77. xfs_rw_enter_trace(
  78. int tag,
  79. xfs_iocore_t *io,
  80. void *data,
  81. size_t segs,
  82. loff_t offset,
  83. int ioflags)
  84. {
  85. xfs_inode_t *ip = XFS_IO_INODE(io);
  86. if (ip->i_rwtrace == NULL)
  87. return;
  88. ktrace_enter(ip->i_rwtrace,
  89. (void *)(unsigned long)tag,
  90. (void *)ip,
  91. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  92. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  93. (void *)data,
  94. (void *)((unsigned long)segs),
  95. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  96. (void *)((unsigned long)(offset & 0xffffffff)),
  97. (void *)((unsigned long)ioflags),
  98. (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
  99. (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
  100. (void *)NULL,
  101. (void *)NULL,
  102. (void *)NULL,
  103. (void *)NULL,
  104. (void *)NULL);
  105. }
  106. void
  107. xfs_inval_cached_trace(
  108. xfs_iocore_t *io,
  109. xfs_off_t offset,
  110. xfs_off_t len,
  111. xfs_off_t first,
  112. xfs_off_t last)
  113. {
  114. xfs_inode_t *ip = XFS_IO_INODE(io);
  115. if (ip->i_rwtrace == NULL)
  116. return;
  117. ktrace_enter(ip->i_rwtrace,
  118. (void *)(__psint_t)XFS_INVAL_CACHED,
  119. (void *)ip,
  120. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  121. (void *)((unsigned long)(offset & 0xffffffff)),
  122. (void *)((unsigned long)((len >> 32) & 0xffffffff)),
  123. (void *)((unsigned long)(len & 0xffffffff)),
  124. (void *)((unsigned long)((first >> 32) & 0xffffffff)),
  125. (void *)((unsigned long)(first & 0xffffffff)),
  126. (void *)((unsigned long)((last >> 32) & 0xffffffff)),
  127. (void *)((unsigned long)(last & 0xffffffff)),
  128. (void *)NULL,
  129. (void *)NULL,
  130. (void *)NULL,
  131. (void *)NULL,
  132. (void *)NULL,
  133. (void *)NULL);
  134. }
  135. #endif
  136. /*
  137. * xfs_iozero
  138. *
  139. * xfs_iozero clears the specified range of buffer supplied,
  140. * and marks all the affected blocks as valid and modified. If
  141. * an affected block is not allocated, it will be allocated. If
  142. * an affected block is not completely overwritten, and is not
  143. * valid before the operation, it will be read from disk before
  144. * being partially zeroed.
  145. */
  146. STATIC int
  147. xfs_iozero(
  148. struct inode *ip, /* inode */
  149. loff_t pos, /* offset in file */
  150. size_t count, /* size of data to zero */
  151. loff_t end_size) /* max file size to set */
  152. {
  153. unsigned bytes;
  154. struct page *page;
  155. struct address_space *mapping;
  156. char *kaddr;
  157. int status;
  158. mapping = ip->i_mapping;
  159. do {
  160. unsigned long index, offset;
  161. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  162. index = pos >> PAGE_CACHE_SHIFT;
  163. bytes = PAGE_CACHE_SIZE - offset;
  164. if (bytes > count)
  165. bytes = count;
  166. status = -ENOMEM;
  167. page = grab_cache_page(mapping, index);
  168. if (!page)
  169. break;
  170. kaddr = kmap(page);
  171. status = mapping->a_ops->prepare_write(NULL, page, offset,
  172. offset + bytes);
  173. if (status) {
  174. goto unlock;
  175. }
  176. memset((void *) (kaddr + offset), 0, bytes);
  177. flush_dcache_page(page);
  178. status = mapping->a_ops->commit_write(NULL, page, offset,
  179. offset + bytes);
  180. if (!status) {
  181. pos += bytes;
  182. count -= bytes;
  183. if (pos > i_size_read(ip))
  184. i_size_write(ip, pos < end_size ? pos : end_size);
  185. }
  186. unlock:
  187. kunmap(page);
  188. unlock_page(page);
  189. page_cache_release(page);
  190. if (status)
  191. break;
  192. } while (count);
  193. return (-status);
  194. }
  195. /*
  196. * xfs_inval_cached_pages
  197. *
  198. * This routine is responsible for keeping direct I/O and buffered I/O
  199. * somewhat coherent. From here we make sure that we're at least
  200. * temporarily holding the inode I/O lock exclusively and then call
  201. * the page cache to flush and invalidate any cached pages. If there
  202. * are no cached pages this routine will be very quick.
  203. */
  204. void
  205. xfs_inval_cached_pages(
  206. vnode_t *vp,
  207. xfs_iocore_t *io,
  208. xfs_off_t offset,
  209. int write,
  210. int relock)
  211. {
  212. if (VN_CACHED(vp)) {
  213. xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
  214. VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
  215. }
  216. }
  217. ssize_t /* bytes read, or (-) error */
  218. xfs_read(
  219. bhv_desc_t *bdp,
  220. struct kiocb *iocb,
  221. const struct iovec *iovp,
  222. unsigned int segs,
  223. loff_t *offset,
  224. int ioflags,
  225. cred_t *credp)
  226. {
  227. struct file *file = iocb->ki_filp;
  228. struct inode *inode = file->f_mapping->host;
  229. size_t size = 0;
  230. ssize_t ret;
  231. xfs_fsize_t n;
  232. xfs_inode_t *ip;
  233. xfs_mount_t *mp;
  234. vnode_t *vp;
  235. unsigned long seg;
  236. ip = XFS_BHVTOI(bdp);
  237. vp = BHV_TO_VNODE(bdp);
  238. mp = ip->i_mount;
  239. XFS_STATS_INC(xs_read_calls);
  240. /* START copy & waste from filemap.c */
  241. for (seg = 0; seg < segs; seg++) {
  242. const struct iovec *iv = &iovp[seg];
  243. /*
  244. * If any segment has a negative length, or the cumulative
  245. * length ever wraps negative then return -EINVAL.
  246. */
  247. size += iv->iov_len;
  248. if (unlikely((ssize_t)(size|iv->iov_len) < 0))
  249. return XFS_ERROR(-EINVAL);
  250. }
  251. /* END copy & waste from filemap.c */
  252. if (unlikely(ioflags & IO_ISDIRECT)) {
  253. xfs_buftarg_t *target =
  254. (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
  255. mp->m_rtdev_targp : mp->m_ddev_targp;
  256. if ((*offset & target->pbr_smask) ||
  257. (size & target->pbr_smask)) {
  258. if (*offset == ip->i_d.di_size) {
  259. return (0);
  260. }
  261. return -XFS_ERROR(EINVAL);
  262. }
  263. }
  264. n = XFS_MAXIOFFSET(mp) - *offset;
  265. if ((n <= 0) || (size == 0))
  266. return 0;
  267. if (n < size)
  268. size = n;
  269. if (XFS_FORCED_SHUTDOWN(mp)) {
  270. return -EIO;
  271. }
  272. if (unlikely(ioflags & IO_ISDIRECT))
  273. down(&inode->i_sem);
  274. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  275. if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
  276. !(ioflags & IO_INVIS)) {
  277. vrwlock_t locktype = VRWLOCK_READ;
  278. ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
  279. BHV_TO_VNODE(bdp), *offset, size,
  280. FILP_DELAY_FLAG(file), &locktype);
  281. if (ret) {
  282. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  283. goto unlock_isem;
  284. }
  285. }
  286. xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
  287. (void *)iovp, segs, *offset, ioflags);
  288. ret = __generic_file_aio_read(iocb, iovp, segs, offset);
  289. if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
  290. ret = wait_on_sync_kiocb(iocb);
  291. if (ret > 0)
  292. XFS_STATS_ADD(xs_read_bytes, ret);
  293. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  294. if (likely(!(ioflags & IO_INVIS)))
  295. xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
  296. unlock_isem:
  297. if (unlikely(ioflags & IO_ISDIRECT))
  298. up(&inode->i_sem);
  299. return ret;
  300. }
  301. ssize_t
  302. xfs_sendfile(
  303. bhv_desc_t *bdp,
  304. struct file *filp,
  305. loff_t *offset,
  306. int ioflags,
  307. size_t count,
  308. read_actor_t actor,
  309. void *target,
  310. cred_t *credp)
  311. {
  312. ssize_t ret;
  313. xfs_fsize_t n;
  314. xfs_inode_t *ip;
  315. xfs_mount_t *mp;
  316. vnode_t *vp;
  317. ip = XFS_BHVTOI(bdp);
  318. vp = BHV_TO_VNODE(bdp);
  319. mp = ip->i_mount;
  320. XFS_STATS_INC(xs_read_calls);
  321. n = XFS_MAXIOFFSET(mp) - *offset;
  322. if ((n <= 0) || (count == 0))
  323. return 0;
  324. if (n < count)
  325. count = n;
  326. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  327. return -EIO;
  328. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  329. if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
  330. (!(ioflags & IO_INVIS))) {
  331. vrwlock_t locktype = VRWLOCK_READ;
  332. int error;
  333. error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
  334. FILP_DELAY_FLAG(filp), &locktype);
  335. if (error) {
  336. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  337. return -error;
  338. }
  339. }
  340. xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
  341. (void *)(unsigned long)target, count, *offset, ioflags);
  342. ret = generic_file_sendfile(filp, offset, count, actor, target);
  343. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  344. if (ret > 0)
  345. XFS_STATS_ADD(xs_read_bytes, ret);
  346. if (likely(!(ioflags & IO_INVIS)))
  347. xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
  348. return ret;
  349. }
  350. /*
  351. * This routine is called to handle zeroing any space in the last
  352. * block of the file that is beyond the EOF. We do this since the
  353. * size is being increased without writing anything to that block
  354. * and we don't want anyone to read the garbage on the disk.
  355. */
  356. STATIC int /* error (positive) */
  357. xfs_zero_last_block(
  358. struct inode *ip,
  359. xfs_iocore_t *io,
  360. xfs_off_t offset,
  361. xfs_fsize_t isize,
  362. xfs_fsize_t end_size)
  363. {
  364. xfs_fileoff_t last_fsb;
  365. xfs_mount_t *mp;
  366. int nimaps;
  367. int zero_offset;
  368. int zero_len;
  369. int isize_fsb_offset;
  370. int error = 0;
  371. xfs_bmbt_irec_t imap;
  372. loff_t loff;
  373. size_t lsize;
  374. ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
  375. ASSERT(offset > isize);
  376. mp = io->io_mount;
  377. isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
  378. if (isize_fsb_offset == 0) {
  379. /*
  380. * There are no extra bytes in the last block on disk to
  381. * zero, so return.
  382. */
  383. return 0;
  384. }
  385. last_fsb = XFS_B_TO_FSBT(mp, isize);
  386. nimaps = 1;
  387. error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
  388. &nimaps, NULL);
  389. if (error) {
  390. return error;
  391. }
  392. ASSERT(nimaps > 0);
  393. /*
  394. * If the block underlying isize is just a hole, then there
  395. * is nothing to zero.
  396. */
  397. if (imap.br_startblock == HOLESTARTBLOCK) {
  398. return 0;
  399. }
  400. /*
  401. * Zero the part of the last block beyond the EOF, and write it
  402. * out sync. We need to drop the ilock while we do this so we
  403. * don't deadlock when the buffer cache calls back to us.
  404. */
  405. XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
  406. loff = XFS_FSB_TO_B(mp, last_fsb);
  407. lsize = XFS_FSB_TO_B(mp, 1);
  408. zero_offset = isize_fsb_offset;
  409. zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
  410. error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
  411. XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
  412. ASSERT(error >= 0);
  413. return error;
  414. }
  415. /*
  416. * Zero any on disk space between the current EOF and the new,
  417. * larger EOF. This handles the normal case of zeroing the remainder
  418. * of the last block in the file and the unusual case of zeroing blocks
  419. * out beyond the size of the file. This second case only happens
  420. * with fixed size extents and when the system crashes before the inode
  421. * size was updated but after blocks were allocated. If fill is set,
  422. * then any holes in the range are filled and zeroed. If not, the holes
  423. * are left alone as holes.
  424. */
  425. int /* error (positive) */
  426. xfs_zero_eof(
  427. vnode_t *vp,
  428. xfs_iocore_t *io,
  429. xfs_off_t offset, /* starting I/O offset */
  430. xfs_fsize_t isize, /* current inode size */
  431. xfs_fsize_t end_size) /* terminal inode size */
  432. {
  433. struct inode *ip = LINVFS_GET_IP(vp);
  434. xfs_fileoff_t start_zero_fsb;
  435. xfs_fileoff_t end_zero_fsb;
  436. xfs_fileoff_t prev_zero_fsb;
  437. xfs_fileoff_t zero_count_fsb;
  438. xfs_fileoff_t last_fsb;
  439. xfs_extlen_t buf_len_fsb;
  440. xfs_extlen_t prev_zero_count;
  441. xfs_mount_t *mp;
  442. int nimaps;
  443. int error = 0;
  444. xfs_bmbt_irec_t imap;
  445. loff_t loff;
  446. size_t lsize;
  447. ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
  448. ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
  449. mp = io->io_mount;
  450. /*
  451. * First handle zeroing the block on which isize resides.
  452. * We only zero a part of that block so it is handled specially.
  453. */
  454. error = xfs_zero_last_block(ip, io, offset, isize, end_size);
  455. if (error) {
  456. ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
  457. ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
  458. return error;
  459. }
  460. /*
  461. * Calculate the range between the new size and the old
  462. * where blocks needing to be zeroed may exist. To get the
  463. * block where the last byte in the file currently resides,
  464. * we need to subtract one from the size and truncate back
  465. * to a block boundary. We subtract 1 in case the size is
  466. * exactly on a block boundary.
  467. */
  468. last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
  469. start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
  470. end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
  471. ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
  472. if (last_fsb == end_zero_fsb) {
  473. /*
  474. * The size was only incremented on its last block.
  475. * We took care of that above, so just return.
  476. */
  477. return 0;
  478. }
  479. ASSERT(start_zero_fsb <= end_zero_fsb);
  480. prev_zero_fsb = NULLFILEOFF;
  481. prev_zero_count = 0;
  482. while (start_zero_fsb <= end_zero_fsb) {
  483. nimaps = 1;
  484. zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
  485. error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
  486. 0, NULL, 0, &imap, &nimaps, NULL);
  487. if (error) {
  488. ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
  489. ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
  490. return error;
  491. }
  492. ASSERT(nimaps > 0);
  493. if (imap.br_state == XFS_EXT_UNWRITTEN ||
  494. imap.br_startblock == HOLESTARTBLOCK) {
  495. /*
  496. * This loop handles initializing pages that were
  497. * partially initialized by the code below this
  498. * loop. It basically zeroes the part of the page
  499. * that sits on a hole and sets the page as P_HOLE
  500. * and calls remapf if it is a mapped file.
  501. */
  502. prev_zero_fsb = NULLFILEOFF;
  503. prev_zero_count = 0;
  504. start_zero_fsb = imap.br_startoff +
  505. imap.br_blockcount;
  506. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  507. continue;
  508. }
  509. /*
  510. * There are blocks in the range requested.
  511. * Zero them a single write at a time. We actually
  512. * don't zero the entire range returned if it is
  513. * too big and simply loop around to get the rest.
  514. * That is not the most efficient thing to do, but it
  515. * is simple and this path should not be exercised often.
  516. */
  517. buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
  518. mp->m_writeio_blocks << 8);
  519. /*
  520. * Drop the inode lock while we're doing the I/O.
  521. * We'll still have the iolock to protect us.
  522. */
  523. XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
  524. loff = XFS_FSB_TO_B(mp, start_zero_fsb);
  525. lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
  526. error = xfs_iozero(ip, loff, lsize, end_size);
  527. if (error) {
  528. goto out_lock;
  529. }
  530. prev_zero_fsb = start_zero_fsb;
  531. prev_zero_count = buf_len_fsb;
  532. start_zero_fsb = imap.br_startoff + buf_len_fsb;
  533. ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
  534. XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
  535. }
  536. return 0;
  537. out_lock:
  538. XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
  539. ASSERT(error >= 0);
  540. return error;
  541. }
  542. ssize_t /* bytes written, or (-) error */
  543. xfs_write(
  544. bhv_desc_t *bdp,
  545. struct kiocb *iocb,
  546. const struct iovec *iovp,
  547. unsigned int nsegs,
  548. loff_t *offset,
  549. int ioflags,
  550. cred_t *credp)
  551. {
  552. struct file *file = iocb->ki_filp;
  553. struct address_space *mapping = file->f_mapping;
  554. struct inode *inode = mapping->host;
  555. unsigned long segs = nsegs;
  556. xfs_inode_t *xip;
  557. xfs_mount_t *mp;
  558. ssize_t ret = 0, error = 0;
  559. xfs_fsize_t isize, new_size;
  560. xfs_iocore_t *io;
  561. vnode_t *vp;
  562. unsigned long seg;
  563. int iolock;
  564. int eventsent = 0;
  565. vrwlock_t locktype;
  566. size_t ocount = 0, count;
  567. loff_t pos;
  568. int need_isem = 1, need_flush = 0;
  569. XFS_STATS_INC(xs_write_calls);
  570. vp = BHV_TO_VNODE(bdp);
  571. xip = XFS_BHVTOI(bdp);
  572. for (seg = 0; seg < segs; seg++) {
  573. const struct iovec *iv = &iovp[seg];
  574. /*
  575. * If any segment has a negative length, or the cumulative
  576. * length ever wraps negative then return -EINVAL.
  577. */
  578. ocount += iv->iov_len;
  579. if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
  580. return -EINVAL;
  581. if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
  582. continue;
  583. if (seg == 0)
  584. return -EFAULT;
  585. segs = seg;
  586. ocount -= iv->iov_len; /* This segment is no good */
  587. break;
  588. }
  589. count = ocount;
  590. pos = *offset;
  591. if (count == 0)
  592. return 0;
  593. io = &xip->i_iocore;
  594. mp = io->io_mount;
  595. if (XFS_FORCED_SHUTDOWN(mp))
  596. return -EIO;
  597. fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
  598. if (ioflags & IO_ISDIRECT) {
  599. xfs_buftarg_t *target =
  600. (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
  601. mp->m_rtdev_targp : mp->m_ddev_targp;
  602. if (ioflags & IO_ISAIO)
  603. return XFS_ERROR(-ENOSYS);
  604. if ((pos & target->pbr_smask) || (count & target->pbr_smask))
  605. return XFS_ERROR(-EINVAL);
  606. if (!VN_CACHED(vp) && pos < i_size_read(inode))
  607. need_isem = 0;
  608. if (VN_CACHED(vp))
  609. need_flush = 1;
  610. }
  611. relock:
  612. if (need_isem) {
  613. iolock = XFS_IOLOCK_EXCL;
  614. locktype = VRWLOCK_WRITE;
  615. down(&inode->i_sem);
  616. } else {
  617. iolock = XFS_IOLOCK_SHARED;
  618. locktype = VRWLOCK_WRITE_DIRECT;
  619. }
  620. xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
  621. isize = i_size_read(inode);
  622. if (file->f_flags & O_APPEND)
  623. *offset = isize;
  624. start:
  625. error = -generic_write_checks(file, &pos, &count,
  626. S_ISBLK(inode->i_mode));
  627. if (error) {
  628. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  629. goto out_unlock_isem;
  630. }
  631. new_size = pos + count;
  632. if (new_size > isize)
  633. io->io_new_size = new_size;
  634. if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
  635. !(ioflags & IO_INVIS) && !eventsent)) {
  636. loff_t savedsize = pos;
  637. int dmflags = FILP_DELAY_FLAG(file);
  638. if (need_isem)
  639. dmflags |= DM_FLAGS_ISEM;
  640. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  641. error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
  642. pos, count,
  643. dmflags, &locktype);
  644. if (error) {
  645. xfs_iunlock(xip, iolock);
  646. goto out_unlock_isem;
  647. }
  648. xfs_ilock(xip, XFS_ILOCK_EXCL);
  649. eventsent = 1;
  650. /*
  651. * The iolock was dropped and reaquired in XFS_SEND_DATA
  652. * so we have to recheck the size when appending.
  653. * We will only "goto start;" once, since having sent the
  654. * event prevents another call to XFS_SEND_DATA, which is
  655. * what allows the size to change in the first place.
  656. */
  657. if ((file->f_flags & O_APPEND) && savedsize != isize) {
  658. pos = isize = xip->i_d.di_size;
  659. goto start;
  660. }
  661. }
  662. /*
  663. * On Linux, generic_file_write updates the times even if
  664. * no data is copied in so long as the write had a size.
  665. *
  666. * We must update xfs' times since revalidate will overcopy xfs.
  667. */
  668. if (!(ioflags & IO_INVIS)) {
  669. xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  670. inode_update_time(inode, 1);
  671. }
  672. /*
  673. * If the offset is beyond the size of the file, we have a couple
  674. * of things to do. First, if there is already space allocated
  675. * we need to either create holes or zero the disk or ...
  676. *
  677. * If there is a page where the previous size lands, we need
  678. * to zero it out up to the new size.
  679. */
  680. if (pos > isize) {
  681. error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
  682. isize, pos + count);
  683. if (error) {
  684. xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
  685. goto out_unlock_isem;
  686. }
  687. }
  688. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  689. /*
  690. * If we're writing the file then make sure to clear the
  691. * setuid and setgid bits if the process is not being run
  692. * by root. This keeps people from modifying setuid and
  693. * setgid binaries.
  694. */
  695. if (((xip->i_d.di_mode & S_ISUID) ||
  696. ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
  697. (S_ISGID | S_IXGRP))) &&
  698. !capable(CAP_FSETID)) {
  699. error = xfs_write_clear_setuid(xip);
  700. if (likely(!error))
  701. error = -remove_suid(file->f_dentry);
  702. if (unlikely(error)) {
  703. xfs_iunlock(xip, iolock);
  704. goto out_unlock_isem;
  705. }
  706. }
  707. retry:
  708. /* We can write back this queue in page reclaim */
  709. current->backing_dev_info = mapping->backing_dev_info;
  710. if ((ioflags & IO_ISDIRECT)) {
  711. if (need_flush) {
  712. xfs_inval_cached_trace(io, pos, -1,
  713. ctooff(offtoct(pos)), -1);
  714. VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
  715. -1, FI_REMAPF_LOCKED);
  716. }
  717. if (need_isem) {
  718. /* demote the lock now the cached pages are gone */
  719. XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
  720. up(&inode->i_sem);
  721. iolock = XFS_IOLOCK_SHARED;
  722. locktype = VRWLOCK_WRITE_DIRECT;
  723. need_isem = 0;
  724. }
  725. xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
  726. *offset, ioflags);
  727. ret = generic_file_direct_write(iocb, iovp,
  728. &segs, pos, offset, count, ocount);
  729. /*
  730. * direct-io write to a hole: fall through to buffered I/O
  731. * for completing the rest of the request.
  732. */
  733. if (ret >= 0 && ret != count) {
  734. XFS_STATS_ADD(xs_write_bytes, ret);
  735. pos += ret;
  736. count -= ret;
  737. need_isem = 1;
  738. ioflags &= ~IO_ISDIRECT;
  739. xfs_iunlock(xip, iolock);
  740. goto relock;
  741. }
  742. } else {
  743. xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
  744. *offset, ioflags);
  745. ret = generic_file_buffered_write(iocb, iovp, segs,
  746. pos, offset, count, ret);
  747. }
  748. current->backing_dev_info = NULL;
  749. if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
  750. ret = wait_on_sync_kiocb(iocb);
  751. if ((ret == -ENOSPC) &&
  752. DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
  753. !(ioflags & IO_INVIS)) {
  754. xfs_rwunlock(bdp, locktype);
  755. error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
  756. DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
  757. 0, 0, 0); /* Delay flag intentionally unused */
  758. if (error)
  759. goto out_unlock_isem;
  760. xfs_rwlock(bdp, locktype);
  761. pos = xip->i_d.di_size;
  762. ret = 0;
  763. goto retry;
  764. }
  765. if (*offset > xip->i_d.di_size) {
  766. xfs_ilock(xip, XFS_ILOCK_EXCL);
  767. if (*offset > xip->i_d.di_size) {
  768. xip->i_d.di_size = *offset;
  769. i_size_write(inode, *offset);
  770. xip->i_update_core = 1;
  771. xip->i_update_size = 1;
  772. }
  773. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  774. }
  775. error = -ret;
  776. if (ret <= 0)
  777. goto out_unlock_internal;
  778. XFS_STATS_ADD(xs_write_bytes, ret);
  779. /* Handle various SYNC-type writes */
  780. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  781. /*
  782. * If we're treating this as O_DSYNC and we have not updated the
  783. * size, force the log.
  784. */
  785. if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
  786. !(xip->i_update_size)) {
  787. xfs_inode_log_item_t *iip = xip->i_itemp;
  788. /*
  789. * If an allocation transaction occurred
  790. * without extending the size, then we have to force
  791. * the log up the proper point to ensure that the
  792. * allocation is permanent. We can't count on
  793. * the fact that buffered writes lock out direct I/O
  794. * writes - the direct I/O write could have extended
  795. * the size nontransactionally, then finished before
  796. * we started. xfs_write_file will think that the file
  797. * didn't grow but the update isn't safe unless the
  798. * size change is logged.
  799. *
  800. * Force the log if we've committed a transaction
  801. * against the inode or if someone else has and
  802. * the commit record hasn't gone to disk (e.g.
  803. * the inode is pinned). This guarantees that
  804. * all changes affecting the inode are permanent
  805. * when we return.
  806. */
  807. if (iip && iip->ili_last_lsn) {
  808. xfs_log_force(mp, iip->ili_last_lsn,
  809. XFS_LOG_FORCE | XFS_LOG_SYNC);
  810. } else if (xfs_ipincount(xip) > 0) {
  811. xfs_log_force(mp, (xfs_lsn_t)0,
  812. XFS_LOG_FORCE | XFS_LOG_SYNC);
  813. }
  814. } else {
  815. xfs_trans_t *tp;
  816. /*
  817. * O_SYNC or O_DSYNC _with_ a size update are handled
  818. * the same way.
  819. *
  820. * If the write was synchronous then we need to make
  821. * sure that the inode modification time is permanent.
  822. * We'll have updated the timestamp above, so here
  823. * we use a synchronous transaction to log the inode.
  824. * It's not fast, but it's necessary.
  825. *
  826. * If this a dsync write and the size got changed
  827. * non-transactionally, then we need to ensure that
  828. * the size change gets logged in a synchronous
  829. * transaction.
  830. */
  831. tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
  832. if ((error = xfs_trans_reserve(tp, 0,
  833. XFS_SWRITE_LOG_RES(mp),
  834. 0, 0, 0))) {
  835. /* Transaction reserve failed */
  836. xfs_trans_cancel(tp, 0);
  837. } else {
  838. /* Transaction reserve successful */
  839. xfs_ilock(xip, XFS_ILOCK_EXCL);
  840. xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
  841. xfs_trans_ihold(tp, xip);
  842. xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
  843. xfs_trans_set_sync(tp);
  844. error = xfs_trans_commit(tp, 0, NULL);
  845. xfs_iunlock(xip, XFS_ILOCK_EXCL);
  846. }
  847. if (error)
  848. goto out_unlock_internal;
  849. }
  850. xfs_rwunlock(bdp, locktype);
  851. if (need_isem)
  852. up(&inode->i_sem);
  853. error = sync_page_range(inode, mapping, pos, ret);
  854. if (!error)
  855. error = ret;
  856. return error;
  857. }
  858. out_unlock_internal:
  859. xfs_rwunlock(bdp, locktype);
  860. out_unlock_isem:
  861. if (need_isem)
  862. up(&inode->i_sem);
  863. return -error;
  864. }
  865. /*
  866. * All xfs metadata buffers except log state machine buffers
  867. * get this attached as their b_bdstrat callback function.
  868. * This is so that we can catch a buffer
  869. * after prematurely unpinning it to forcibly shutdown the filesystem.
  870. */
  871. int
  872. xfs_bdstrat_cb(struct xfs_buf *bp)
  873. {
  874. xfs_mount_t *mp;
  875. mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
  876. if (!XFS_FORCED_SHUTDOWN(mp)) {
  877. pagebuf_iorequest(bp);
  878. return 0;
  879. } else {
  880. xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
  881. /*
  882. * Metadata write that didn't get logged but
  883. * written delayed anyway. These aren't associated
  884. * with a transaction, and can be ignored.
  885. */
  886. if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
  887. (XFS_BUF_ISREAD(bp)) == 0)
  888. return (xfs_bioerror_relse(bp));
  889. else
  890. return (xfs_bioerror(bp));
  891. }
  892. }
  893. int
  894. xfs_bmap(bhv_desc_t *bdp,
  895. xfs_off_t offset,
  896. ssize_t count,
  897. int flags,
  898. xfs_iomap_t *iomapp,
  899. int *niomaps)
  900. {
  901. xfs_inode_t *ip = XFS_BHVTOI(bdp);
  902. xfs_iocore_t *io = &ip->i_iocore;
  903. ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
  904. ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
  905. ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
  906. return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
  907. }
  908. /*
  909. * Wrapper around bdstrat so that we can stop data
  910. * from going to disk in case we are shutting down the filesystem.
  911. * Typically user data goes thru this path; one of the exceptions
  912. * is the superblock.
  913. */
  914. int
  915. xfsbdstrat(
  916. struct xfs_mount *mp,
  917. struct xfs_buf *bp)
  918. {
  919. ASSERT(mp);
  920. if (!XFS_FORCED_SHUTDOWN(mp)) {
  921. /* Grio redirection would go here
  922. * if (XFS_BUF_IS_GRIO(bp)) {
  923. */
  924. pagebuf_iorequest(bp);
  925. return 0;
  926. }
  927. xfs_buftrace("XFSBDSTRAT IOERROR", bp);
  928. return (xfs_bioerror_relse(bp));
  929. }
  930. /*
  931. * If the underlying (data/log/rt) device is readonly, there are some
  932. * operations that cannot proceed.
  933. */
  934. int
  935. xfs_dev_is_read_only(
  936. xfs_mount_t *mp,
  937. char *message)
  938. {
  939. if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
  940. xfs_readonly_buftarg(mp->m_logdev_targp) ||
  941. (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
  942. cmn_err(CE_NOTE,
  943. "XFS: %s required on read-only device.", message);
  944. cmn_err(CE_NOTE,
  945. "XFS: write access unavailable, cannot proceed.");
  946. return EROFS;
  947. }
  948. return 0;
  949. }