xfs_aops.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_log.h"
  20. #include "xfs_sb.h"
  21. #include "xfs_ag.h"
  22. #include "xfs_trans.h"
  23. #include "xfs_mount.h"
  24. #include "xfs_bmap_btree.h"
  25. #include "xfs_dinode.h"
  26. #include "xfs_inode.h"
  27. #include "xfs_inode_item.h"
  28. #include "xfs_alloc.h"
  29. #include "xfs_error.h"
  30. #include "xfs_iomap.h"
  31. #include "xfs_vnodeops.h"
  32. #include "xfs_trace.h"
  33. #include "xfs_bmap.h"
  34. #include <linux/gfp.h>
  35. #include <linux/mpage.h>
  36. #include <linux/pagevec.h>
  37. #include <linux/writeback.h>
  38. void
  39. xfs_count_page_state(
  40. struct page *page,
  41. int *delalloc,
  42. int *unwritten)
  43. {
  44. struct buffer_head *bh, *head;
  45. *delalloc = *unwritten = 0;
  46. bh = head = page_buffers(page);
  47. do {
  48. if (buffer_unwritten(bh))
  49. (*unwritten) = 1;
  50. else if (buffer_delay(bh))
  51. (*delalloc) = 1;
  52. } while ((bh = bh->b_this_page) != head);
  53. }
  54. STATIC struct block_device *
  55. xfs_find_bdev_for_inode(
  56. struct inode *inode)
  57. {
  58. struct xfs_inode *ip = XFS_I(inode);
  59. struct xfs_mount *mp = ip->i_mount;
  60. if (XFS_IS_REALTIME_INODE(ip))
  61. return mp->m_rtdev_targp->bt_bdev;
  62. else
  63. return mp->m_ddev_targp->bt_bdev;
  64. }
  65. /*
  66. * We're now finished for good with this ioend structure.
  67. * Update the page state via the associated buffer_heads,
  68. * release holds on the inode and bio, and finally free
  69. * up memory. Do not use the ioend after this.
  70. */
  71. STATIC void
  72. xfs_destroy_ioend(
  73. xfs_ioend_t *ioend)
  74. {
  75. struct buffer_head *bh, *next;
  76. for (bh = ioend->io_buffer_head; bh; bh = next) {
  77. next = bh->b_private;
  78. bh->b_end_io(bh, !ioend->io_error);
  79. }
  80. if (ioend->io_iocb) {
  81. if (ioend->io_isasync) {
  82. aio_complete(ioend->io_iocb, ioend->io_error ?
  83. ioend->io_error : ioend->io_result, 0);
  84. }
  85. inode_dio_done(ioend->io_inode);
  86. }
  87. mempool_free(ioend, xfs_ioend_pool);
  88. }
  89. /*
  90. * Fast and loose check if this write could update the on-disk inode size.
  91. */
  92. static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
  93. {
  94. return ioend->io_offset + ioend->io_size >
  95. XFS_I(ioend->io_inode)->i_d.di_size;
  96. }
  97. STATIC int
  98. xfs_setfilesize_trans_alloc(
  99. struct xfs_ioend *ioend)
  100. {
  101. struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
  102. struct xfs_trans *tp;
  103. int error;
  104. tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
  105. error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
  106. if (error) {
  107. xfs_trans_cancel(tp, 0);
  108. return error;
  109. }
  110. ioend->io_append_trans = tp;
  111. /*
  112. * We hand off the transaction to the completion thread now, so
  113. * clear the flag here.
  114. */
  115. current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
  116. return 0;
  117. }
  118. /*
  119. * Update on-disk file size now that data has been written to disk.
  120. */
  121. STATIC int
  122. xfs_setfilesize(
  123. struct xfs_ioend *ioend)
  124. {
  125. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  126. struct xfs_trans *tp = ioend->io_append_trans;
  127. xfs_fsize_t isize;
  128. /*
  129. * The transaction was allocated in the I/O submission thread,
  130. * thus we need to mark ourselves as beeing in a transaction
  131. * manually.
  132. */
  133. current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
  134. xfs_ilock(ip, XFS_ILOCK_EXCL);
  135. isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
  136. if (!isize) {
  137. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  138. xfs_trans_cancel(tp, 0);
  139. return 0;
  140. }
  141. trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
  142. ip->i_d.di_size = isize;
  143. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  144. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  145. return xfs_trans_commit(tp, 0);
  146. }
  147. /*
  148. * Schedule IO completion handling on the final put of an ioend.
  149. *
  150. * If there is no work to do we might as well call it a day and free the
  151. * ioend right now.
  152. */
  153. STATIC void
  154. xfs_finish_ioend(
  155. struct xfs_ioend *ioend)
  156. {
  157. if (atomic_dec_and_test(&ioend->io_remaining)) {
  158. struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
  159. if (ioend->io_type == IO_UNWRITTEN)
  160. queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
  161. else if (ioend->io_append_trans)
  162. queue_work(mp->m_data_workqueue, &ioend->io_work);
  163. else
  164. xfs_destroy_ioend(ioend);
  165. }
  166. }
  167. /*
  168. * IO write completion.
  169. */
  170. STATIC void
  171. xfs_end_io(
  172. struct work_struct *work)
  173. {
  174. xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
  175. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  176. int error = 0;
  177. if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  178. ioend->io_error = -EIO;
  179. goto done;
  180. }
  181. if (ioend->io_error)
  182. goto done;
  183. /*
  184. * For unwritten extents we need to issue transactions to convert a
  185. * range to normal written extens after the data I/O has finished.
  186. */
  187. if (ioend->io_type == IO_UNWRITTEN) {
  188. /*
  189. * For buffered I/O we never preallocate a transaction when
  190. * doing the unwritten extent conversion, but for direct I/O
  191. * we do not know if we are converting an unwritten extent
  192. * or not at the point where we preallocate the transaction.
  193. */
  194. if (ioend->io_append_trans) {
  195. ASSERT(ioend->io_isdirect);
  196. current_set_flags_nested(
  197. &ioend->io_append_trans->t_pflags, PF_FSTRANS);
  198. xfs_trans_cancel(ioend->io_append_trans, 0);
  199. }
  200. error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
  201. ioend->io_size);
  202. if (error) {
  203. ioend->io_error = -error;
  204. goto done;
  205. }
  206. } else if (ioend->io_append_trans) {
  207. error = xfs_setfilesize(ioend);
  208. if (error)
  209. ioend->io_error = -error;
  210. } else {
  211. ASSERT(!xfs_ioend_is_append(ioend));
  212. }
  213. done:
  214. xfs_destroy_ioend(ioend);
  215. }
  216. /*
  217. * Call IO completion handling in caller context on the final put of an ioend.
  218. */
  219. STATIC void
  220. xfs_finish_ioend_sync(
  221. struct xfs_ioend *ioend)
  222. {
  223. if (atomic_dec_and_test(&ioend->io_remaining))
  224. xfs_end_io(&ioend->io_work);
  225. }
  226. /*
  227. * Allocate and initialise an IO completion structure.
  228. * We need to track unwritten extent write completion here initially.
  229. * We'll need to extend this for updating the ondisk inode size later
  230. * (vs. incore size).
  231. */
  232. STATIC xfs_ioend_t *
  233. xfs_alloc_ioend(
  234. struct inode *inode,
  235. unsigned int type)
  236. {
  237. xfs_ioend_t *ioend;
  238. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  239. /*
  240. * Set the count to 1 initially, which will prevent an I/O
  241. * completion callback from happening before we have started
  242. * all the I/O from calling the completion routine too early.
  243. */
  244. atomic_set(&ioend->io_remaining, 1);
  245. ioend->io_isasync = 0;
  246. ioend->io_isdirect = 0;
  247. ioend->io_error = 0;
  248. ioend->io_list = NULL;
  249. ioend->io_type = type;
  250. ioend->io_inode = inode;
  251. ioend->io_buffer_head = NULL;
  252. ioend->io_buffer_tail = NULL;
  253. ioend->io_offset = 0;
  254. ioend->io_size = 0;
  255. ioend->io_iocb = NULL;
  256. ioend->io_result = 0;
  257. ioend->io_append_trans = NULL;
  258. INIT_WORK(&ioend->io_work, xfs_end_io);
  259. return ioend;
  260. }
  261. STATIC int
  262. xfs_map_blocks(
  263. struct inode *inode,
  264. loff_t offset,
  265. struct xfs_bmbt_irec *imap,
  266. int type,
  267. int nonblocking)
  268. {
  269. struct xfs_inode *ip = XFS_I(inode);
  270. struct xfs_mount *mp = ip->i_mount;
  271. ssize_t count = 1 << inode->i_blkbits;
  272. xfs_fileoff_t offset_fsb, end_fsb;
  273. int error = 0;
  274. int bmapi_flags = XFS_BMAPI_ENTIRE;
  275. int nimaps = 1;
  276. if (XFS_FORCED_SHUTDOWN(mp))
  277. return -XFS_ERROR(EIO);
  278. if (type == IO_UNWRITTEN)
  279. bmapi_flags |= XFS_BMAPI_IGSTATE;
  280. if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
  281. if (nonblocking)
  282. return -XFS_ERROR(EAGAIN);
  283. xfs_ilock(ip, XFS_ILOCK_SHARED);
  284. }
  285. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  286. (ip->i_df.if_flags & XFS_IFEXTENTS));
  287. ASSERT(offset <= mp->m_maxioffset);
  288. if (offset + count > mp->m_maxioffset)
  289. count = mp->m_maxioffset - offset;
  290. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  291. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  292. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
  293. imap, &nimaps, bmapi_flags);
  294. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  295. if (error)
  296. return -XFS_ERROR(error);
  297. if (type == IO_DELALLOC &&
  298. (!nimaps || isnullstartblock(imap->br_startblock))) {
  299. error = xfs_iomap_write_allocate(ip, offset, count, imap);
  300. if (!error)
  301. trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
  302. return -XFS_ERROR(error);
  303. }
  304. #ifdef DEBUG
  305. if (type == IO_UNWRITTEN) {
  306. ASSERT(nimaps);
  307. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  308. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  309. }
  310. #endif
  311. if (nimaps)
  312. trace_xfs_map_blocks_found(ip, offset, count, type, imap);
  313. return 0;
  314. }
  315. STATIC int
  316. xfs_imap_valid(
  317. struct inode *inode,
  318. struct xfs_bmbt_irec *imap,
  319. xfs_off_t offset)
  320. {
  321. offset >>= inode->i_blkbits;
  322. return offset >= imap->br_startoff &&
  323. offset < imap->br_startoff + imap->br_blockcount;
  324. }
  325. /*
  326. * BIO completion handler for buffered IO.
  327. */
  328. STATIC void
  329. xfs_end_bio(
  330. struct bio *bio,
  331. int error)
  332. {
  333. xfs_ioend_t *ioend = bio->bi_private;
  334. ASSERT(atomic_read(&bio->bi_cnt) >= 1);
  335. ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
  336. /* Toss bio and pass work off to an xfsdatad thread */
  337. bio->bi_private = NULL;
  338. bio->bi_end_io = NULL;
  339. bio_put(bio);
  340. xfs_finish_ioend(ioend);
  341. }
  342. STATIC void
  343. xfs_submit_ioend_bio(
  344. struct writeback_control *wbc,
  345. xfs_ioend_t *ioend,
  346. struct bio *bio)
  347. {
  348. atomic_inc(&ioend->io_remaining);
  349. bio->bi_private = ioend;
  350. bio->bi_end_io = xfs_end_bio;
  351. submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
  352. }
  353. STATIC struct bio *
  354. xfs_alloc_ioend_bio(
  355. struct buffer_head *bh)
  356. {
  357. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  358. struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
  359. ASSERT(bio->bi_private == NULL);
  360. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  361. bio->bi_bdev = bh->b_bdev;
  362. return bio;
  363. }
  364. STATIC void
  365. xfs_start_buffer_writeback(
  366. struct buffer_head *bh)
  367. {
  368. ASSERT(buffer_mapped(bh));
  369. ASSERT(buffer_locked(bh));
  370. ASSERT(!buffer_delay(bh));
  371. ASSERT(!buffer_unwritten(bh));
  372. mark_buffer_async_write(bh);
  373. set_buffer_uptodate(bh);
  374. clear_buffer_dirty(bh);
  375. }
  376. STATIC void
  377. xfs_start_page_writeback(
  378. struct page *page,
  379. int clear_dirty,
  380. int buffers)
  381. {
  382. ASSERT(PageLocked(page));
  383. ASSERT(!PageWriteback(page));
  384. if (clear_dirty)
  385. clear_page_dirty_for_io(page);
  386. set_page_writeback(page);
  387. unlock_page(page);
  388. /* If no buffers on the page are to be written, finish it here */
  389. if (!buffers)
  390. end_page_writeback(page);
  391. }
  392. static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  393. {
  394. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  395. }
  396. /*
  397. * Submit all of the bios for all of the ioends we have saved up, covering the
  398. * initial writepage page and also any probed pages.
  399. *
  400. * Because we may have multiple ioends spanning a page, we need to start
  401. * writeback on all the buffers before we submit them for I/O. If we mark the
  402. * buffers as we got, then we can end up with a page that only has buffers
  403. * marked async write and I/O complete on can occur before we mark the other
  404. * buffers async write.
  405. *
  406. * The end result of this is that we trip a bug in end_page_writeback() because
  407. * we call it twice for the one page as the code in end_buffer_async_write()
  408. * assumes that all buffers on the page are started at the same time.
  409. *
  410. * The fix is two passes across the ioend list - one to start writeback on the
  411. * buffer_heads, and then submit them for I/O on the second pass.
  412. */
  413. STATIC void
  414. xfs_submit_ioend(
  415. struct writeback_control *wbc,
  416. xfs_ioend_t *ioend)
  417. {
  418. xfs_ioend_t *head = ioend;
  419. xfs_ioend_t *next;
  420. struct buffer_head *bh;
  421. struct bio *bio;
  422. sector_t lastblock = 0;
  423. /* Pass 1 - start writeback */
  424. do {
  425. next = ioend->io_list;
  426. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
  427. xfs_start_buffer_writeback(bh);
  428. } while ((ioend = next) != NULL);
  429. /* Pass 2 - submit I/O */
  430. ioend = head;
  431. do {
  432. next = ioend->io_list;
  433. bio = NULL;
  434. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  435. if (!bio) {
  436. retry:
  437. bio = xfs_alloc_ioend_bio(bh);
  438. } else if (bh->b_blocknr != lastblock + 1) {
  439. xfs_submit_ioend_bio(wbc, ioend, bio);
  440. goto retry;
  441. }
  442. if (bio_add_buffer(bio, bh) != bh->b_size) {
  443. xfs_submit_ioend_bio(wbc, ioend, bio);
  444. goto retry;
  445. }
  446. lastblock = bh->b_blocknr;
  447. }
  448. if (bio)
  449. xfs_submit_ioend_bio(wbc, ioend, bio);
  450. xfs_finish_ioend(ioend);
  451. } while ((ioend = next) != NULL);
  452. }
  453. /*
  454. * Cancel submission of all buffer_heads so far in this endio.
  455. * Toss the endio too. Only ever called for the initial page
  456. * in a writepage request, so only ever one page.
  457. */
  458. STATIC void
  459. xfs_cancel_ioend(
  460. xfs_ioend_t *ioend)
  461. {
  462. xfs_ioend_t *next;
  463. struct buffer_head *bh, *next_bh;
  464. do {
  465. next = ioend->io_list;
  466. bh = ioend->io_buffer_head;
  467. do {
  468. next_bh = bh->b_private;
  469. clear_buffer_async_write(bh);
  470. unlock_buffer(bh);
  471. } while ((bh = next_bh) != NULL);
  472. mempool_free(ioend, xfs_ioend_pool);
  473. } while ((ioend = next) != NULL);
  474. }
  475. /*
  476. * Test to see if we've been building up a completion structure for
  477. * earlier buffers -- if so, we try to append to this ioend if we
  478. * can, otherwise we finish off any current ioend and start another.
  479. * Return true if we've finished the given ioend.
  480. */
  481. STATIC void
  482. xfs_add_to_ioend(
  483. struct inode *inode,
  484. struct buffer_head *bh,
  485. xfs_off_t offset,
  486. unsigned int type,
  487. xfs_ioend_t **result,
  488. int need_ioend)
  489. {
  490. xfs_ioend_t *ioend = *result;
  491. if (!ioend || need_ioend || type != ioend->io_type) {
  492. xfs_ioend_t *previous = *result;
  493. ioend = xfs_alloc_ioend(inode, type);
  494. ioend->io_offset = offset;
  495. ioend->io_buffer_head = bh;
  496. ioend->io_buffer_tail = bh;
  497. if (previous)
  498. previous->io_list = ioend;
  499. *result = ioend;
  500. } else {
  501. ioend->io_buffer_tail->b_private = bh;
  502. ioend->io_buffer_tail = bh;
  503. }
  504. bh->b_private = NULL;
  505. ioend->io_size += bh->b_size;
  506. }
  507. STATIC void
  508. xfs_map_buffer(
  509. struct inode *inode,
  510. struct buffer_head *bh,
  511. struct xfs_bmbt_irec *imap,
  512. xfs_off_t offset)
  513. {
  514. sector_t bn;
  515. struct xfs_mount *m = XFS_I(inode)->i_mount;
  516. xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
  517. xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
  518. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  519. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  520. bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
  521. ((offset - iomap_offset) >> inode->i_blkbits);
  522. ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
  523. bh->b_blocknr = bn;
  524. set_buffer_mapped(bh);
  525. }
  526. STATIC void
  527. xfs_map_at_offset(
  528. struct inode *inode,
  529. struct buffer_head *bh,
  530. struct xfs_bmbt_irec *imap,
  531. xfs_off_t offset)
  532. {
  533. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  534. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  535. xfs_map_buffer(inode, bh, imap, offset);
  536. set_buffer_mapped(bh);
  537. clear_buffer_delay(bh);
  538. clear_buffer_unwritten(bh);
  539. }
  540. /*
  541. * Test if a given page is suitable for writing as part of an unwritten
  542. * or delayed allocate extent.
  543. */
  544. STATIC int
  545. xfs_check_page_type(
  546. struct page *page,
  547. unsigned int type)
  548. {
  549. if (PageWriteback(page))
  550. return 0;
  551. if (page->mapping && page_has_buffers(page)) {
  552. struct buffer_head *bh, *head;
  553. int acceptable = 0;
  554. bh = head = page_buffers(page);
  555. do {
  556. if (buffer_unwritten(bh))
  557. acceptable += (type == IO_UNWRITTEN);
  558. else if (buffer_delay(bh))
  559. acceptable += (type == IO_DELALLOC);
  560. else if (buffer_dirty(bh) && buffer_mapped(bh))
  561. acceptable += (type == IO_OVERWRITE);
  562. else
  563. break;
  564. } while ((bh = bh->b_this_page) != head);
  565. if (acceptable)
  566. return 1;
  567. }
  568. return 0;
  569. }
  570. /*
  571. * Allocate & map buffers for page given the extent map. Write it out.
  572. * except for the original page of a writepage, this is called on
  573. * delalloc/unwritten pages only, for the original page it is possible
  574. * that the page has no mapping at all.
  575. */
  576. STATIC int
  577. xfs_convert_page(
  578. struct inode *inode,
  579. struct page *page,
  580. loff_t tindex,
  581. struct xfs_bmbt_irec *imap,
  582. xfs_ioend_t **ioendp,
  583. struct writeback_control *wbc)
  584. {
  585. struct buffer_head *bh, *head;
  586. xfs_off_t end_offset;
  587. unsigned long p_offset;
  588. unsigned int type;
  589. int len, page_dirty;
  590. int count = 0, done = 0, uptodate = 1;
  591. xfs_off_t offset = page_offset(page);
  592. if (page->index != tindex)
  593. goto fail;
  594. if (!trylock_page(page))
  595. goto fail;
  596. if (PageWriteback(page))
  597. goto fail_unlock_page;
  598. if (page->mapping != inode->i_mapping)
  599. goto fail_unlock_page;
  600. if (!xfs_check_page_type(page, (*ioendp)->io_type))
  601. goto fail_unlock_page;
  602. /*
  603. * page_dirty is initially a count of buffers on the page before
  604. * EOF and is decremented as we move each into a cleanable state.
  605. *
  606. * Derivation:
  607. *
  608. * End offset is the highest offset that this page should represent.
  609. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  610. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  611. * hence give us the correct page_dirty count. On any other page,
  612. * it will be zero and in that case we need page_dirty to be the
  613. * count of buffers on the page.
  614. */
  615. end_offset = min_t(unsigned long long,
  616. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  617. i_size_read(inode));
  618. len = 1 << inode->i_blkbits;
  619. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  620. PAGE_CACHE_SIZE);
  621. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  622. page_dirty = p_offset / len;
  623. bh = head = page_buffers(page);
  624. do {
  625. if (offset >= end_offset)
  626. break;
  627. if (!buffer_uptodate(bh))
  628. uptodate = 0;
  629. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  630. done = 1;
  631. continue;
  632. }
  633. if (buffer_unwritten(bh) || buffer_delay(bh) ||
  634. buffer_mapped(bh)) {
  635. if (buffer_unwritten(bh))
  636. type = IO_UNWRITTEN;
  637. else if (buffer_delay(bh))
  638. type = IO_DELALLOC;
  639. else
  640. type = IO_OVERWRITE;
  641. if (!xfs_imap_valid(inode, imap, offset)) {
  642. done = 1;
  643. continue;
  644. }
  645. lock_buffer(bh);
  646. if (type != IO_OVERWRITE)
  647. xfs_map_at_offset(inode, bh, imap, offset);
  648. xfs_add_to_ioend(inode, bh, offset, type,
  649. ioendp, done);
  650. page_dirty--;
  651. count++;
  652. } else {
  653. done = 1;
  654. }
  655. } while (offset += len, (bh = bh->b_this_page) != head);
  656. if (uptodate && bh == head)
  657. SetPageUptodate(page);
  658. if (count) {
  659. if (--wbc->nr_to_write <= 0 &&
  660. wbc->sync_mode == WB_SYNC_NONE)
  661. done = 1;
  662. }
  663. xfs_start_page_writeback(page, !page_dirty, count);
  664. return done;
  665. fail_unlock_page:
  666. unlock_page(page);
  667. fail:
  668. return 1;
  669. }
  670. /*
  671. * Convert & write out a cluster of pages in the same extent as defined
  672. * by mp and following the start page.
  673. */
  674. STATIC void
  675. xfs_cluster_write(
  676. struct inode *inode,
  677. pgoff_t tindex,
  678. struct xfs_bmbt_irec *imap,
  679. xfs_ioend_t **ioendp,
  680. struct writeback_control *wbc,
  681. pgoff_t tlast)
  682. {
  683. struct pagevec pvec;
  684. int done = 0, i;
  685. pagevec_init(&pvec, 0);
  686. while (!done && tindex <= tlast) {
  687. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  688. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  689. break;
  690. for (i = 0; i < pagevec_count(&pvec); i++) {
  691. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  692. imap, ioendp, wbc);
  693. if (done)
  694. break;
  695. }
  696. pagevec_release(&pvec);
  697. cond_resched();
  698. }
  699. }
  700. STATIC void
  701. xfs_vm_invalidatepage(
  702. struct page *page,
  703. unsigned long offset)
  704. {
  705. trace_xfs_invalidatepage(page->mapping->host, page, offset);
  706. block_invalidatepage(page, offset);
  707. }
  708. /*
  709. * If the page has delalloc buffers on it, we need to punch them out before we
  710. * invalidate the page. If we don't, we leave a stale delalloc mapping on the
  711. * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
  712. * is done on that same region - the delalloc extent is returned when none is
  713. * supposed to be there.
  714. *
  715. * We prevent this by truncating away the delalloc regions on the page before
  716. * invalidating it. Because they are delalloc, we can do this without needing a
  717. * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
  718. * truncation without a transaction as there is no space left for block
  719. * reservation (typically why we see a ENOSPC in writeback).
  720. *
  721. * This is not a performance critical path, so for now just do the punching a
  722. * buffer head at a time.
  723. */
  724. STATIC void
  725. xfs_aops_discard_page(
  726. struct page *page)
  727. {
  728. struct inode *inode = page->mapping->host;
  729. struct xfs_inode *ip = XFS_I(inode);
  730. struct buffer_head *bh, *head;
  731. loff_t offset = page_offset(page);
  732. if (!xfs_check_page_type(page, IO_DELALLOC))
  733. goto out_invalidate;
  734. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  735. goto out_invalidate;
  736. xfs_alert(ip->i_mount,
  737. "page discard on page %p, inode 0x%llx, offset %llu.",
  738. page, ip->i_ino, offset);
  739. xfs_ilock(ip, XFS_ILOCK_EXCL);
  740. bh = head = page_buffers(page);
  741. do {
  742. int error;
  743. xfs_fileoff_t start_fsb;
  744. if (!buffer_delay(bh))
  745. goto next_buffer;
  746. start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
  747. error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
  748. if (error) {
  749. /* something screwed, just bail */
  750. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  751. xfs_alert(ip->i_mount,
  752. "page discard unable to remove delalloc mapping.");
  753. }
  754. break;
  755. }
  756. next_buffer:
  757. offset += 1 << inode->i_blkbits;
  758. } while ((bh = bh->b_this_page) != head);
  759. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  760. out_invalidate:
  761. xfs_vm_invalidatepage(page, 0);
  762. return;
  763. }
  764. /*
  765. * Write out a dirty page.
  766. *
  767. * For delalloc space on the page we need to allocate space and flush it.
  768. * For unwritten space on the page we need to start the conversion to
  769. * regular allocated space.
  770. * For any other dirty buffer heads on the page we should flush them.
  771. */
  772. STATIC int
  773. xfs_vm_writepage(
  774. struct page *page,
  775. struct writeback_control *wbc)
  776. {
  777. struct inode *inode = page->mapping->host;
  778. struct buffer_head *bh, *head;
  779. struct xfs_bmbt_irec imap;
  780. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  781. loff_t offset;
  782. unsigned int type;
  783. __uint64_t end_offset;
  784. pgoff_t end_index, last_index;
  785. ssize_t len;
  786. int err, imap_valid = 0, uptodate = 1;
  787. int count = 0;
  788. int nonblocking = 0;
  789. trace_xfs_writepage(inode, page, 0);
  790. ASSERT(page_has_buffers(page));
  791. /*
  792. * Refuse to write the page out if we are called from reclaim context.
  793. *
  794. * This avoids stack overflows when called from deeply used stacks in
  795. * random callers for direct reclaim or memcg reclaim. We explicitly
  796. * allow reclaim from kswapd as the stack usage there is relatively low.
  797. *
  798. * This should never happen except in the case of a VM regression so
  799. * warn about it.
  800. */
  801. if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
  802. PF_MEMALLOC))
  803. goto redirty;
  804. /*
  805. * Given that we do not allow direct reclaim to call us, we should
  806. * never be called while in a filesystem transaction.
  807. */
  808. if (WARN_ON(current->flags & PF_FSTRANS))
  809. goto redirty;
  810. /* Is this page beyond the end of the file? */
  811. offset = i_size_read(inode);
  812. end_index = offset >> PAGE_CACHE_SHIFT;
  813. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  814. if (page->index >= end_index) {
  815. if ((page->index >= end_index + 1) ||
  816. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  817. unlock_page(page);
  818. return 0;
  819. }
  820. }
  821. end_offset = min_t(unsigned long long,
  822. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  823. offset);
  824. len = 1 << inode->i_blkbits;
  825. bh = head = page_buffers(page);
  826. offset = page_offset(page);
  827. type = IO_OVERWRITE;
  828. if (wbc->sync_mode == WB_SYNC_NONE)
  829. nonblocking = 1;
  830. do {
  831. int new_ioend = 0;
  832. if (offset >= end_offset)
  833. break;
  834. if (!buffer_uptodate(bh))
  835. uptodate = 0;
  836. /*
  837. * set_page_dirty dirties all buffers in a page, independent
  838. * of their state. The dirty state however is entirely
  839. * meaningless for holes (!mapped && uptodate), so skip
  840. * buffers covering holes here.
  841. */
  842. if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
  843. imap_valid = 0;
  844. continue;
  845. }
  846. if (buffer_unwritten(bh)) {
  847. if (type != IO_UNWRITTEN) {
  848. type = IO_UNWRITTEN;
  849. imap_valid = 0;
  850. }
  851. } else if (buffer_delay(bh)) {
  852. if (type != IO_DELALLOC) {
  853. type = IO_DELALLOC;
  854. imap_valid = 0;
  855. }
  856. } else if (buffer_uptodate(bh)) {
  857. if (type != IO_OVERWRITE) {
  858. type = IO_OVERWRITE;
  859. imap_valid = 0;
  860. }
  861. } else {
  862. if (PageUptodate(page))
  863. ASSERT(buffer_mapped(bh));
  864. /*
  865. * This buffer is not uptodate and will not be
  866. * written to disk. Ensure that we will put any
  867. * subsequent writeable buffers into a new
  868. * ioend.
  869. */
  870. imap_valid = 0;
  871. continue;
  872. }
  873. if (imap_valid)
  874. imap_valid = xfs_imap_valid(inode, &imap, offset);
  875. if (!imap_valid) {
  876. /*
  877. * If we didn't have a valid mapping then we need to
  878. * put the new mapping into a separate ioend structure.
  879. * This ensures non-contiguous extents always have
  880. * separate ioends, which is particularly important
  881. * for unwritten extent conversion at I/O completion
  882. * time.
  883. */
  884. new_ioend = 1;
  885. err = xfs_map_blocks(inode, offset, &imap, type,
  886. nonblocking);
  887. if (err)
  888. goto error;
  889. imap_valid = xfs_imap_valid(inode, &imap, offset);
  890. }
  891. if (imap_valid) {
  892. lock_buffer(bh);
  893. if (type != IO_OVERWRITE)
  894. xfs_map_at_offset(inode, bh, &imap, offset);
  895. xfs_add_to_ioend(inode, bh, offset, type, &ioend,
  896. new_ioend);
  897. count++;
  898. }
  899. if (!iohead)
  900. iohead = ioend;
  901. } while (offset += len, ((bh = bh->b_this_page) != head));
  902. if (uptodate && bh == head)
  903. SetPageUptodate(page);
  904. xfs_start_page_writeback(page, 1, count);
  905. if (ioend && imap_valid) {
  906. xfs_off_t end_index;
  907. end_index = imap.br_startoff + imap.br_blockcount;
  908. /* to bytes */
  909. end_index <<= inode->i_blkbits;
  910. /* to pages */
  911. end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
  912. /* check against file size */
  913. if (end_index > last_index)
  914. end_index = last_index;
  915. xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
  916. wbc, end_index);
  917. }
  918. if (iohead) {
  919. /*
  920. * Reserve log space if we might write beyond the on-disk
  921. * inode size.
  922. */
  923. if (ioend->io_type != IO_UNWRITTEN &&
  924. xfs_ioend_is_append(ioend)) {
  925. err = xfs_setfilesize_trans_alloc(ioend);
  926. if (err)
  927. goto error;
  928. }
  929. xfs_submit_ioend(wbc, iohead);
  930. }
  931. return 0;
  932. error:
  933. if (iohead)
  934. xfs_cancel_ioend(iohead);
  935. if (err == -EAGAIN)
  936. goto redirty;
  937. xfs_aops_discard_page(page);
  938. ClearPageUptodate(page);
  939. unlock_page(page);
  940. return err;
  941. redirty:
  942. redirty_page_for_writepage(wbc, page);
  943. unlock_page(page);
  944. return 0;
  945. }
  946. STATIC int
  947. xfs_vm_writepages(
  948. struct address_space *mapping,
  949. struct writeback_control *wbc)
  950. {
  951. xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
  952. return generic_writepages(mapping, wbc);
  953. }
  954. /*
  955. * Called to move a page into cleanable state - and from there
  956. * to be released. The page should already be clean. We always
  957. * have buffer heads in this call.
  958. *
  959. * Returns 1 if the page is ok to release, 0 otherwise.
  960. */
  961. STATIC int
  962. xfs_vm_releasepage(
  963. struct page *page,
  964. gfp_t gfp_mask)
  965. {
  966. int delalloc, unwritten;
  967. trace_xfs_releasepage(page->mapping->host, page, 0);
  968. xfs_count_page_state(page, &delalloc, &unwritten);
  969. if (WARN_ON(delalloc))
  970. return 0;
  971. if (WARN_ON(unwritten))
  972. return 0;
  973. return try_to_free_buffers(page);
  974. }
  975. STATIC int
  976. __xfs_get_blocks(
  977. struct inode *inode,
  978. sector_t iblock,
  979. struct buffer_head *bh_result,
  980. int create,
  981. int direct)
  982. {
  983. struct xfs_inode *ip = XFS_I(inode);
  984. struct xfs_mount *mp = ip->i_mount;
  985. xfs_fileoff_t offset_fsb, end_fsb;
  986. int error = 0;
  987. int lockmode = 0;
  988. struct xfs_bmbt_irec imap;
  989. int nimaps = 1;
  990. xfs_off_t offset;
  991. ssize_t size;
  992. int new = 0;
  993. if (XFS_FORCED_SHUTDOWN(mp))
  994. return -XFS_ERROR(EIO);
  995. offset = (xfs_off_t)iblock << inode->i_blkbits;
  996. ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
  997. size = bh_result->b_size;
  998. if (!create && direct && offset >= i_size_read(inode))
  999. return 0;
  1000. /*
  1001. * Direct I/O is usually done on preallocated files, so try getting
  1002. * a block mapping without an exclusive lock first. For buffered
  1003. * writes we already have the exclusive iolock anyway, so avoiding
  1004. * a lock roundtrip here by taking the ilock exclusive from the
  1005. * beginning is a useful micro optimization.
  1006. */
  1007. if (create && !direct) {
  1008. lockmode = XFS_ILOCK_EXCL;
  1009. xfs_ilock(ip, lockmode);
  1010. } else {
  1011. lockmode = xfs_ilock_map_shared(ip);
  1012. }
  1013. ASSERT(offset <= mp->m_maxioffset);
  1014. if (offset + size > mp->m_maxioffset)
  1015. size = mp->m_maxioffset - offset;
  1016. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
  1017. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1018. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
  1019. &imap, &nimaps, XFS_BMAPI_ENTIRE);
  1020. if (error)
  1021. goto out_unlock;
  1022. if (create &&
  1023. (!nimaps ||
  1024. (imap.br_startblock == HOLESTARTBLOCK ||
  1025. imap.br_startblock == DELAYSTARTBLOCK))) {
  1026. if (direct || xfs_get_extsz_hint(ip)) {
  1027. /*
  1028. * Drop the ilock in preparation for starting the block
  1029. * allocation transaction. It will be retaken
  1030. * exclusively inside xfs_iomap_write_direct for the
  1031. * actual allocation.
  1032. */
  1033. xfs_iunlock(ip, lockmode);
  1034. error = xfs_iomap_write_direct(ip, offset, size,
  1035. &imap, nimaps);
  1036. if (error)
  1037. return -error;
  1038. new = 1;
  1039. } else {
  1040. /*
  1041. * Delalloc reservations do not require a transaction,
  1042. * we can go on without dropping the lock here. If we
  1043. * are allocating a new delalloc block, make sure that
  1044. * we set the new flag so that we mark the buffer new so
  1045. * that we know that it is newly allocated if the write
  1046. * fails.
  1047. */
  1048. if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
  1049. new = 1;
  1050. error = xfs_iomap_write_delay(ip, offset, size, &imap);
  1051. if (error)
  1052. goto out_unlock;
  1053. xfs_iunlock(ip, lockmode);
  1054. }
  1055. trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
  1056. } else if (nimaps) {
  1057. trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
  1058. xfs_iunlock(ip, lockmode);
  1059. } else {
  1060. trace_xfs_get_blocks_notfound(ip, offset, size);
  1061. goto out_unlock;
  1062. }
  1063. if (imap.br_startblock != HOLESTARTBLOCK &&
  1064. imap.br_startblock != DELAYSTARTBLOCK) {
  1065. /*
  1066. * For unwritten extents do not report a disk address on
  1067. * the read case (treat as if we're reading into a hole).
  1068. */
  1069. if (create || !ISUNWRITTEN(&imap))
  1070. xfs_map_buffer(inode, bh_result, &imap, offset);
  1071. if (create && ISUNWRITTEN(&imap)) {
  1072. if (direct)
  1073. bh_result->b_private = inode;
  1074. set_buffer_unwritten(bh_result);
  1075. }
  1076. }
  1077. /*
  1078. * If this is a realtime file, data may be on a different device.
  1079. * to that pointed to from the buffer_head b_bdev currently.
  1080. */
  1081. bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
  1082. /*
  1083. * If we previously allocated a block out beyond eof and we are now
  1084. * coming back to use it then we will need to flag it as new even if it
  1085. * has a disk address.
  1086. *
  1087. * With sub-block writes into unwritten extents we also need to mark
  1088. * the buffer as new so that the unwritten parts of the buffer gets
  1089. * correctly zeroed.
  1090. */
  1091. if (create &&
  1092. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  1093. (offset >= i_size_read(inode)) ||
  1094. (new || ISUNWRITTEN(&imap))))
  1095. set_buffer_new(bh_result);
  1096. if (imap.br_startblock == DELAYSTARTBLOCK) {
  1097. BUG_ON(direct);
  1098. if (create) {
  1099. set_buffer_uptodate(bh_result);
  1100. set_buffer_mapped(bh_result);
  1101. set_buffer_delay(bh_result);
  1102. }
  1103. }
  1104. /*
  1105. * If this is O_DIRECT or the mpage code calling tell them how large
  1106. * the mapping is, so that we can avoid repeated get_blocks calls.
  1107. */
  1108. if (direct || size > (1 << inode->i_blkbits)) {
  1109. xfs_off_t mapping_size;
  1110. mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
  1111. mapping_size <<= inode->i_blkbits;
  1112. ASSERT(mapping_size > 0);
  1113. if (mapping_size > size)
  1114. mapping_size = size;
  1115. if (mapping_size > LONG_MAX)
  1116. mapping_size = LONG_MAX;
  1117. bh_result->b_size = mapping_size;
  1118. }
  1119. return 0;
  1120. out_unlock:
  1121. xfs_iunlock(ip, lockmode);
  1122. return -error;
  1123. }
  1124. int
  1125. xfs_get_blocks(
  1126. struct inode *inode,
  1127. sector_t iblock,
  1128. struct buffer_head *bh_result,
  1129. int create)
  1130. {
  1131. return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
  1132. }
  1133. STATIC int
  1134. xfs_get_blocks_direct(
  1135. struct inode *inode,
  1136. sector_t iblock,
  1137. struct buffer_head *bh_result,
  1138. int create)
  1139. {
  1140. return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
  1141. }
  1142. /*
  1143. * Complete a direct I/O write request.
  1144. *
  1145. * If the private argument is non-NULL __xfs_get_blocks signals us that we
  1146. * need to issue a transaction to convert the range from unwritten to written
  1147. * extents. In case this is regular synchronous I/O we just call xfs_end_io
  1148. * to do this and we are done. But in case this was a successful AIO
  1149. * request this handler is called from interrupt context, from which we
  1150. * can't start transactions. In that case offload the I/O completion to
  1151. * the workqueues we also use for buffered I/O completion.
  1152. */
  1153. STATIC void
  1154. xfs_end_io_direct_write(
  1155. struct kiocb *iocb,
  1156. loff_t offset,
  1157. ssize_t size,
  1158. void *private,
  1159. int ret,
  1160. bool is_async)
  1161. {
  1162. struct xfs_ioend *ioend = iocb->private;
  1163. /*
  1164. * While the generic direct I/O code updates the inode size, it does
  1165. * so only after the end_io handler is called, which means our
  1166. * end_io handler thinks the on-disk size is outside the in-core
  1167. * size. To prevent this just update it a little bit earlier here.
  1168. */
  1169. if (offset + size > i_size_read(ioend->io_inode))
  1170. i_size_write(ioend->io_inode, offset + size);
  1171. /*
  1172. * blockdev_direct_IO can return an error even after the I/O
  1173. * completion handler was called. Thus we need to protect
  1174. * against double-freeing.
  1175. */
  1176. iocb->private = NULL;
  1177. ioend->io_offset = offset;
  1178. ioend->io_size = size;
  1179. ioend->io_iocb = iocb;
  1180. ioend->io_result = ret;
  1181. if (private && size > 0)
  1182. ioend->io_type = IO_UNWRITTEN;
  1183. if (is_async) {
  1184. ioend->io_isasync = 1;
  1185. xfs_finish_ioend(ioend);
  1186. } else {
  1187. xfs_finish_ioend_sync(ioend);
  1188. }
  1189. }
  1190. STATIC ssize_t
  1191. xfs_vm_direct_IO(
  1192. int rw,
  1193. struct kiocb *iocb,
  1194. const struct iovec *iov,
  1195. loff_t offset,
  1196. unsigned long nr_segs)
  1197. {
  1198. struct inode *inode = iocb->ki_filp->f_mapping->host;
  1199. struct block_device *bdev = xfs_find_bdev_for_inode(inode);
  1200. struct xfs_ioend *ioend = NULL;
  1201. ssize_t ret;
  1202. if (rw & WRITE) {
  1203. size_t size = iov_length(iov, nr_segs);
  1204. /*
  1205. * We need to preallocate a transaction for a size update
  1206. * here. In the case that this write both updates the size
  1207. * and converts at least on unwritten extent we will cancel
  1208. * the still clean transaction after the I/O has finished.
  1209. */
  1210. iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT);
  1211. if (offset + size > XFS_I(inode)->i_d.di_size) {
  1212. ret = xfs_setfilesize_trans_alloc(ioend);
  1213. if (ret)
  1214. goto out_destroy_ioend;
  1215. ioend->io_isdirect = 1;
  1216. }
  1217. ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
  1218. offset, nr_segs,
  1219. xfs_get_blocks_direct,
  1220. xfs_end_io_direct_write, NULL, 0);
  1221. if (ret != -EIOCBQUEUED && iocb->private)
  1222. goto out_trans_cancel;
  1223. } else {
  1224. ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
  1225. offset, nr_segs,
  1226. xfs_get_blocks_direct,
  1227. NULL, NULL, 0);
  1228. }
  1229. return ret;
  1230. out_trans_cancel:
  1231. if (ioend->io_append_trans) {
  1232. current_set_flags_nested(&ioend->io_append_trans->t_pflags,
  1233. PF_FSTRANS);
  1234. xfs_trans_cancel(ioend->io_append_trans, 0);
  1235. }
  1236. out_destroy_ioend:
  1237. xfs_destroy_ioend(ioend);
  1238. return ret;
  1239. }
  1240. /*
  1241. * Punch out the delalloc blocks we have already allocated.
  1242. *
  1243. * Don't bother with xfs_setattr given that nothing can have made it to disk yet
  1244. * as the page is still locked at this point.
  1245. */
  1246. STATIC void
  1247. xfs_vm_kill_delalloc_range(
  1248. struct inode *inode,
  1249. loff_t start,
  1250. loff_t end)
  1251. {
  1252. struct xfs_inode *ip = XFS_I(inode);
  1253. xfs_fileoff_t start_fsb;
  1254. xfs_fileoff_t end_fsb;
  1255. int error;
  1256. start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
  1257. end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
  1258. if (end_fsb <= start_fsb)
  1259. return;
  1260. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1261. error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
  1262. end_fsb - start_fsb);
  1263. if (error) {
  1264. /* something screwed, just bail */
  1265. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  1266. xfs_alert(ip->i_mount,
  1267. "xfs_vm_write_failed: unable to clean up ino %lld",
  1268. ip->i_ino);
  1269. }
  1270. }
  1271. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1272. }
  1273. STATIC void
  1274. xfs_vm_write_failed(
  1275. struct inode *inode,
  1276. struct page *page,
  1277. loff_t pos,
  1278. unsigned len)
  1279. {
  1280. loff_t block_offset = pos & PAGE_MASK;
  1281. loff_t block_start;
  1282. loff_t block_end;
  1283. loff_t from = pos & (PAGE_CACHE_SIZE - 1);
  1284. loff_t to = from + len;
  1285. struct buffer_head *bh, *head;
  1286. ASSERT(block_offset + from == pos);
  1287. head = page_buffers(page);
  1288. block_start = 0;
  1289. for (bh = head; bh != head || !block_start;
  1290. bh = bh->b_this_page, block_start = block_end,
  1291. block_offset += bh->b_size) {
  1292. block_end = block_start + bh->b_size;
  1293. /* skip buffers before the write */
  1294. if (block_end <= from)
  1295. continue;
  1296. /* if the buffer is after the write, we're done */
  1297. if (block_start >= to)
  1298. break;
  1299. if (!buffer_delay(bh))
  1300. continue;
  1301. if (!buffer_new(bh) && block_offset < i_size_read(inode))
  1302. continue;
  1303. xfs_vm_kill_delalloc_range(inode, block_offset,
  1304. block_offset + bh->b_size);
  1305. }
  1306. }
  1307. /*
  1308. * This used to call block_write_begin(), but it unlocks and releases the page
  1309. * on error, and we need that page to be able to punch stale delalloc blocks out
  1310. * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
  1311. * the appropriate point.
  1312. */
  1313. STATIC int
  1314. xfs_vm_write_begin(
  1315. struct file *file,
  1316. struct address_space *mapping,
  1317. loff_t pos,
  1318. unsigned len,
  1319. unsigned flags,
  1320. struct page **pagep,
  1321. void **fsdata)
  1322. {
  1323. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  1324. struct page *page;
  1325. int status;
  1326. ASSERT(len <= PAGE_CACHE_SIZE);
  1327. page = grab_cache_page_write_begin(mapping, index,
  1328. flags | AOP_FLAG_NOFS);
  1329. if (!page)
  1330. return -ENOMEM;
  1331. status = __block_write_begin(page, pos, len, xfs_get_blocks);
  1332. if (unlikely(status)) {
  1333. struct inode *inode = mapping->host;
  1334. xfs_vm_write_failed(inode, page, pos, len);
  1335. unlock_page(page);
  1336. if (pos + len > i_size_read(inode))
  1337. truncate_pagecache(inode, pos + len, i_size_read(inode));
  1338. page_cache_release(page);
  1339. page = NULL;
  1340. }
  1341. *pagep = page;
  1342. return status;
  1343. }
  1344. /*
  1345. * On failure, we only need to kill delalloc blocks beyond EOF because they
  1346. * will never be written. For blocks within EOF, generic_write_end() zeros them
  1347. * so they are safe to leave alone and be written with all the other valid data.
  1348. */
  1349. STATIC int
  1350. xfs_vm_write_end(
  1351. struct file *file,
  1352. struct address_space *mapping,
  1353. loff_t pos,
  1354. unsigned len,
  1355. unsigned copied,
  1356. struct page *page,
  1357. void *fsdata)
  1358. {
  1359. int ret;
  1360. ASSERT(len <= PAGE_CACHE_SIZE);
  1361. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  1362. if (unlikely(ret < len)) {
  1363. struct inode *inode = mapping->host;
  1364. size_t isize = i_size_read(inode);
  1365. loff_t to = pos + len;
  1366. if (to > isize) {
  1367. truncate_pagecache(inode, to, isize);
  1368. xfs_vm_kill_delalloc_range(inode, isize, to);
  1369. }
  1370. }
  1371. return ret;
  1372. }
  1373. STATIC sector_t
  1374. xfs_vm_bmap(
  1375. struct address_space *mapping,
  1376. sector_t block)
  1377. {
  1378. struct inode *inode = (struct inode *)mapping->host;
  1379. struct xfs_inode *ip = XFS_I(inode);
  1380. trace_xfs_vm_bmap(XFS_I(inode));
  1381. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  1382. xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
  1383. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  1384. return generic_block_bmap(mapping, block, xfs_get_blocks);
  1385. }
  1386. STATIC int
  1387. xfs_vm_readpage(
  1388. struct file *unused,
  1389. struct page *page)
  1390. {
  1391. return mpage_readpage(page, xfs_get_blocks);
  1392. }
  1393. STATIC int
  1394. xfs_vm_readpages(
  1395. struct file *unused,
  1396. struct address_space *mapping,
  1397. struct list_head *pages,
  1398. unsigned nr_pages)
  1399. {
  1400. return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
  1401. }
  1402. const struct address_space_operations xfs_address_space_operations = {
  1403. .readpage = xfs_vm_readpage,
  1404. .readpages = xfs_vm_readpages,
  1405. .writepage = xfs_vm_writepage,
  1406. .writepages = xfs_vm_writepages,
  1407. .releasepage = xfs_vm_releasepage,
  1408. .invalidatepage = xfs_vm_invalidatepage,
  1409. .write_begin = xfs_vm_write_begin,
  1410. .write_end = xfs_vm_write_end,
  1411. .bmap = xfs_vm_bmap,
  1412. .direct_IO = xfs_vm_direct_IO,
  1413. .migratepage = buffer_migrate_page,
  1414. .is_partially_uptodate = block_is_partially_uptodate,
  1415. .error_remove_page = generic_error_remove_page,
  1416. };