xfs_aops.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_mount.h"
  26. #include "xfs_bmap_btree.h"
  27. #include "xfs_dinode.h"
  28. #include "xfs_inode.h"
  29. #include "xfs_alloc.h"
  30. #include "xfs_error.h"
  31. #include "xfs_rw.h"
  32. #include "xfs_iomap.h"
  33. #include "xfs_vnodeops.h"
  34. #include "xfs_trace.h"
  35. #include "xfs_bmap.h"
  36. #include <linux/gfp.h>
  37. #include <linux/mpage.h>
  38. #include <linux/pagevec.h>
  39. #include <linux/writeback.h>
  40. /*
  41. * Types of I/O for bmap clustering and I/O completion tracking.
  42. */
  43. enum {
  44. IO_READ, /* mapping for a read */
  45. IO_DELAY, /* mapping covers delalloc region */
  46. IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
  47. IO_NEW /* just allocated */
  48. };
  49. /*
  50. * Prime number of hash buckets since address is used as the key.
  51. */
  52. #define NVSYNC 37
  53. #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
  54. static wait_queue_head_t xfs_ioend_wq[NVSYNC];
  55. void __init
  56. xfs_ioend_init(void)
  57. {
  58. int i;
  59. for (i = 0; i < NVSYNC; i++)
  60. init_waitqueue_head(&xfs_ioend_wq[i]);
  61. }
  62. void
  63. xfs_ioend_wait(
  64. xfs_inode_t *ip)
  65. {
  66. wait_queue_head_t *wq = to_ioend_wq(ip);
  67. wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
  68. }
  69. STATIC void
  70. xfs_ioend_wake(
  71. xfs_inode_t *ip)
  72. {
  73. if (atomic_dec_and_test(&ip->i_iocount))
  74. wake_up(to_ioend_wq(ip));
  75. }
  76. void
  77. xfs_count_page_state(
  78. struct page *page,
  79. int *delalloc,
  80. int *unwritten)
  81. {
  82. struct buffer_head *bh, *head;
  83. *delalloc = *unwritten = 0;
  84. bh = head = page_buffers(page);
  85. do {
  86. if (buffer_unwritten(bh))
  87. (*unwritten) = 1;
  88. else if (buffer_delay(bh))
  89. (*delalloc) = 1;
  90. } while ((bh = bh->b_this_page) != head);
  91. }
  92. STATIC struct block_device *
  93. xfs_find_bdev_for_inode(
  94. struct inode *inode)
  95. {
  96. struct xfs_inode *ip = XFS_I(inode);
  97. struct xfs_mount *mp = ip->i_mount;
  98. if (XFS_IS_REALTIME_INODE(ip))
  99. return mp->m_rtdev_targp->bt_bdev;
  100. else
  101. return mp->m_ddev_targp->bt_bdev;
  102. }
  103. /*
  104. * We're now finished for good with this ioend structure.
  105. * Update the page state via the associated buffer_heads,
  106. * release holds on the inode and bio, and finally free
  107. * up memory. Do not use the ioend after this.
  108. */
  109. STATIC void
  110. xfs_destroy_ioend(
  111. xfs_ioend_t *ioend)
  112. {
  113. struct buffer_head *bh, *next;
  114. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  115. for (bh = ioend->io_buffer_head; bh; bh = next) {
  116. next = bh->b_private;
  117. bh->b_end_io(bh, !ioend->io_error);
  118. }
  119. /*
  120. * Volume managers supporting multiple paths can send back ENODEV
  121. * when the final path disappears. In this case continuing to fill
  122. * the page cache with dirty data which cannot be written out is
  123. * evil, so prevent that.
  124. */
  125. if (unlikely(ioend->io_error == -ENODEV)) {
  126. xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
  127. __FILE__, __LINE__);
  128. }
  129. xfs_ioend_wake(ip);
  130. mempool_free(ioend, xfs_ioend_pool);
  131. }
  132. /*
  133. * If the end of the current ioend is beyond the current EOF,
  134. * return the new EOF value, otherwise zero.
  135. */
  136. STATIC xfs_fsize_t
  137. xfs_ioend_new_eof(
  138. xfs_ioend_t *ioend)
  139. {
  140. xfs_inode_t *ip = XFS_I(ioend->io_inode);
  141. xfs_fsize_t isize;
  142. xfs_fsize_t bsize;
  143. bsize = ioend->io_offset + ioend->io_size;
  144. isize = MAX(ip->i_size, ip->i_new_size);
  145. isize = MIN(isize, bsize);
  146. return isize > ip->i_d.di_size ? isize : 0;
  147. }
  148. /*
  149. * Update on-disk file size now that data has been written to disk. The
  150. * current in-memory file size is i_size. If a write is beyond eof i_new_size
  151. * will be the intended file size until i_size is updated. If this write does
  152. * not extend all the way to the valid file size then restrict this update to
  153. * the end of the write.
  154. *
  155. * This function does not block as blocking on the inode lock in IO completion
  156. * can lead to IO completion order dependency deadlocks.. If it can't get the
  157. * inode ilock it will return EAGAIN. Callers must handle this.
  158. */
  159. STATIC int
  160. xfs_setfilesize(
  161. xfs_ioend_t *ioend)
  162. {
  163. xfs_inode_t *ip = XFS_I(ioend->io_inode);
  164. xfs_fsize_t isize;
  165. ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
  166. ASSERT(ioend->io_type != IO_READ);
  167. if (unlikely(ioend->io_error))
  168. return 0;
  169. if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
  170. return EAGAIN;
  171. isize = xfs_ioend_new_eof(ioend);
  172. if (isize) {
  173. ip->i_d.di_size = isize;
  174. xfs_mark_inode_dirty(ip);
  175. }
  176. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  177. return 0;
  178. }
  179. /*
  180. * Schedule IO completion handling on the final put of an ioend.
  181. */
  182. STATIC void
  183. xfs_finish_ioend(
  184. struct xfs_ioend *ioend)
  185. {
  186. if (atomic_dec_and_test(&ioend->io_remaining)) {
  187. if (ioend->io_type == IO_UNWRITTEN)
  188. queue_work(xfsconvertd_workqueue, &ioend->io_work);
  189. else
  190. queue_work(xfsdatad_workqueue, &ioend->io_work);
  191. }
  192. }
  193. /*
  194. * IO write completion.
  195. */
  196. STATIC void
  197. xfs_end_io(
  198. struct work_struct *work)
  199. {
  200. xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
  201. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  202. int error = 0;
  203. /*
  204. * For unwritten extents we need to issue transactions to convert a
  205. * range to normal written extens after the data I/O has finished.
  206. */
  207. if (ioend->io_type == IO_UNWRITTEN &&
  208. likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
  209. error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
  210. ioend->io_size);
  211. if (error)
  212. ioend->io_error = error;
  213. }
  214. /*
  215. * We might have to update the on-disk file size after extending
  216. * writes.
  217. */
  218. if (ioend->io_type != IO_READ) {
  219. error = xfs_setfilesize(ioend);
  220. ASSERT(!error || error == EAGAIN);
  221. }
  222. /*
  223. * If we didn't complete processing of the ioend, requeue it to the
  224. * tail of the workqueue for another attempt later. Otherwise destroy
  225. * it.
  226. */
  227. if (error == EAGAIN) {
  228. atomic_inc(&ioend->io_remaining);
  229. xfs_finish_ioend(ioend);
  230. /* ensure we don't spin on blocked ioends */
  231. delay(1);
  232. } else {
  233. if (ioend->io_iocb)
  234. aio_complete(ioend->io_iocb, ioend->io_result, 0);
  235. xfs_destroy_ioend(ioend);
  236. }
  237. }
  238. /*
  239. * Call IO completion handling in caller context on the final put of an ioend.
  240. */
  241. STATIC void
  242. xfs_finish_ioend_sync(
  243. struct xfs_ioend *ioend)
  244. {
  245. if (atomic_dec_and_test(&ioend->io_remaining))
  246. xfs_end_io(&ioend->io_work);
  247. }
  248. /*
  249. * Allocate and initialise an IO completion structure.
  250. * We need to track unwritten extent write completion here initially.
  251. * We'll need to extend this for updating the ondisk inode size later
  252. * (vs. incore size).
  253. */
  254. STATIC xfs_ioend_t *
  255. xfs_alloc_ioend(
  256. struct inode *inode,
  257. unsigned int type)
  258. {
  259. xfs_ioend_t *ioend;
  260. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  261. /*
  262. * Set the count to 1 initially, which will prevent an I/O
  263. * completion callback from happening before we have started
  264. * all the I/O from calling the completion routine too early.
  265. */
  266. atomic_set(&ioend->io_remaining, 1);
  267. ioend->io_error = 0;
  268. ioend->io_list = NULL;
  269. ioend->io_type = type;
  270. ioend->io_inode = inode;
  271. ioend->io_buffer_head = NULL;
  272. ioend->io_buffer_tail = NULL;
  273. atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
  274. ioend->io_offset = 0;
  275. ioend->io_size = 0;
  276. ioend->io_iocb = NULL;
  277. ioend->io_result = 0;
  278. INIT_WORK(&ioend->io_work, xfs_end_io);
  279. return ioend;
  280. }
  281. STATIC int
  282. xfs_map_blocks(
  283. struct inode *inode,
  284. loff_t offset,
  285. ssize_t count,
  286. struct xfs_bmbt_irec *imap,
  287. int flags)
  288. {
  289. int nmaps = 1;
  290. int new = 0;
  291. return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
  292. }
  293. STATIC int
  294. xfs_imap_valid(
  295. struct inode *inode,
  296. struct xfs_bmbt_irec *imap,
  297. xfs_off_t offset)
  298. {
  299. offset >>= inode->i_blkbits;
  300. return offset >= imap->br_startoff &&
  301. offset < imap->br_startoff + imap->br_blockcount;
  302. }
  303. /*
  304. * BIO completion handler for buffered IO.
  305. */
  306. STATIC void
  307. xfs_end_bio(
  308. struct bio *bio,
  309. int error)
  310. {
  311. xfs_ioend_t *ioend = bio->bi_private;
  312. ASSERT(atomic_read(&bio->bi_cnt) >= 1);
  313. ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
  314. /* Toss bio and pass work off to an xfsdatad thread */
  315. bio->bi_private = NULL;
  316. bio->bi_end_io = NULL;
  317. bio_put(bio);
  318. xfs_finish_ioend(ioend);
  319. }
  320. STATIC void
  321. xfs_submit_ioend_bio(
  322. struct writeback_control *wbc,
  323. xfs_ioend_t *ioend,
  324. struct bio *bio)
  325. {
  326. atomic_inc(&ioend->io_remaining);
  327. bio->bi_private = ioend;
  328. bio->bi_end_io = xfs_end_bio;
  329. /*
  330. * If the I/O is beyond EOF we mark the inode dirty immediately
  331. * but don't update the inode size until I/O completion.
  332. */
  333. if (xfs_ioend_new_eof(ioend))
  334. xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
  335. submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
  336. WRITE_SYNC_PLUG : WRITE, bio);
  337. }
  338. STATIC struct bio *
  339. xfs_alloc_ioend_bio(
  340. struct buffer_head *bh)
  341. {
  342. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  343. struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
  344. ASSERT(bio->bi_private == NULL);
  345. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  346. bio->bi_bdev = bh->b_bdev;
  347. return bio;
  348. }
  349. STATIC void
  350. xfs_start_buffer_writeback(
  351. struct buffer_head *bh)
  352. {
  353. ASSERT(buffer_mapped(bh));
  354. ASSERT(buffer_locked(bh));
  355. ASSERT(!buffer_delay(bh));
  356. ASSERT(!buffer_unwritten(bh));
  357. mark_buffer_async_write(bh);
  358. set_buffer_uptodate(bh);
  359. clear_buffer_dirty(bh);
  360. }
  361. STATIC void
  362. xfs_start_page_writeback(
  363. struct page *page,
  364. int clear_dirty,
  365. int buffers)
  366. {
  367. ASSERT(PageLocked(page));
  368. ASSERT(!PageWriteback(page));
  369. if (clear_dirty)
  370. clear_page_dirty_for_io(page);
  371. set_page_writeback(page);
  372. unlock_page(page);
  373. /* If no buffers on the page are to be written, finish it here */
  374. if (!buffers)
  375. end_page_writeback(page);
  376. }
  377. static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  378. {
  379. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  380. }
  381. /*
  382. * Submit all of the bios for all of the ioends we have saved up, covering the
  383. * initial writepage page and also any probed pages.
  384. *
  385. * Because we may have multiple ioends spanning a page, we need to start
  386. * writeback on all the buffers before we submit them for I/O. If we mark the
  387. * buffers as we got, then we can end up with a page that only has buffers
  388. * marked async write and I/O complete on can occur before we mark the other
  389. * buffers async write.
  390. *
  391. * The end result of this is that we trip a bug in end_page_writeback() because
  392. * we call it twice for the one page as the code in end_buffer_async_write()
  393. * assumes that all buffers on the page are started at the same time.
  394. *
  395. * The fix is two passes across the ioend list - one to start writeback on the
  396. * buffer_heads, and then submit them for I/O on the second pass.
  397. */
  398. STATIC void
  399. xfs_submit_ioend(
  400. struct writeback_control *wbc,
  401. xfs_ioend_t *ioend)
  402. {
  403. xfs_ioend_t *head = ioend;
  404. xfs_ioend_t *next;
  405. struct buffer_head *bh;
  406. struct bio *bio;
  407. sector_t lastblock = 0;
  408. /* Pass 1 - start writeback */
  409. do {
  410. next = ioend->io_list;
  411. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
  412. xfs_start_buffer_writeback(bh);
  413. } while ((ioend = next) != NULL);
  414. /* Pass 2 - submit I/O */
  415. ioend = head;
  416. do {
  417. next = ioend->io_list;
  418. bio = NULL;
  419. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  420. if (!bio) {
  421. retry:
  422. bio = xfs_alloc_ioend_bio(bh);
  423. } else if (bh->b_blocknr != lastblock + 1) {
  424. xfs_submit_ioend_bio(wbc, ioend, bio);
  425. goto retry;
  426. }
  427. if (bio_add_buffer(bio, bh) != bh->b_size) {
  428. xfs_submit_ioend_bio(wbc, ioend, bio);
  429. goto retry;
  430. }
  431. lastblock = bh->b_blocknr;
  432. }
  433. if (bio)
  434. xfs_submit_ioend_bio(wbc, ioend, bio);
  435. xfs_finish_ioend(ioend);
  436. } while ((ioend = next) != NULL);
  437. }
  438. /*
  439. * Cancel submission of all buffer_heads so far in this endio.
  440. * Toss the endio too. Only ever called for the initial page
  441. * in a writepage request, so only ever one page.
  442. */
  443. STATIC void
  444. xfs_cancel_ioend(
  445. xfs_ioend_t *ioend)
  446. {
  447. xfs_ioend_t *next;
  448. struct buffer_head *bh, *next_bh;
  449. do {
  450. next = ioend->io_list;
  451. bh = ioend->io_buffer_head;
  452. do {
  453. next_bh = bh->b_private;
  454. clear_buffer_async_write(bh);
  455. unlock_buffer(bh);
  456. } while ((bh = next_bh) != NULL);
  457. xfs_ioend_wake(XFS_I(ioend->io_inode));
  458. mempool_free(ioend, xfs_ioend_pool);
  459. } while ((ioend = next) != NULL);
  460. }
  461. /*
  462. * Test to see if we've been building up a completion structure for
  463. * earlier buffers -- if so, we try to append to this ioend if we
  464. * can, otherwise we finish off any current ioend and start another.
  465. * Return true if we've finished the given ioend.
  466. */
  467. STATIC void
  468. xfs_add_to_ioend(
  469. struct inode *inode,
  470. struct buffer_head *bh,
  471. xfs_off_t offset,
  472. unsigned int type,
  473. xfs_ioend_t **result,
  474. int need_ioend)
  475. {
  476. xfs_ioend_t *ioend = *result;
  477. if (!ioend || need_ioend || type != ioend->io_type) {
  478. xfs_ioend_t *previous = *result;
  479. ioend = xfs_alloc_ioend(inode, type);
  480. ioend->io_offset = offset;
  481. ioend->io_buffer_head = bh;
  482. ioend->io_buffer_tail = bh;
  483. if (previous)
  484. previous->io_list = ioend;
  485. *result = ioend;
  486. } else {
  487. ioend->io_buffer_tail->b_private = bh;
  488. ioend->io_buffer_tail = bh;
  489. }
  490. bh->b_private = NULL;
  491. ioend->io_size += bh->b_size;
  492. }
  493. STATIC void
  494. xfs_map_buffer(
  495. struct inode *inode,
  496. struct buffer_head *bh,
  497. struct xfs_bmbt_irec *imap,
  498. xfs_off_t offset)
  499. {
  500. sector_t bn;
  501. struct xfs_mount *m = XFS_I(inode)->i_mount;
  502. xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
  503. xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
  504. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  505. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  506. bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
  507. ((offset - iomap_offset) >> inode->i_blkbits);
  508. ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
  509. bh->b_blocknr = bn;
  510. set_buffer_mapped(bh);
  511. }
  512. STATIC void
  513. xfs_map_at_offset(
  514. struct inode *inode,
  515. struct buffer_head *bh,
  516. struct xfs_bmbt_irec *imap,
  517. xfs_off_t offset)
  518. {
  519. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  520. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  521. lock_buffer(bh);
  522. xfs_map_buffer(inode, bh, imap, offset);
  523. bh->b_bdev = xfs_find_bdev_for_inode(inode);
  524. set_buffer_mapped(bh);
  525. clear_buffer_delay(bh);
  526. clear_buffer_unwritten(bh);
  527. }
  528. /*
  529. * Look for a page at index that is suitable for clustering.
  530. */
  531. STATIC unsigned int
  532. xfs_probe_page(
  533. struct page *page,
  534. unsigned int pg_offset)
  535. {
  536. struct buffer_head *bh, *head;
  537. int ret = 0;
  538. if (PageWriteback(page))
  539. return 0;
  540. if (!PageDirty(page))
  541. return 0;
  542. if (!page->mapping)
  543. return 0;
  544. if (!page_has_buffers(page))
  545. return 0;
  546. bh = head = page_buffers(page);
  547. do {
  548. if (!buffer_uptodate(bh))
  549. break;
  550. if (!buffer_mapped(bh))
  551. break;
  552. ret += bh->b_size;
  553. if (ret >= pg_offset)
  554. break;
  555. } while ((bh = bh->b_this_page) != head);
  556. return ret;
  557. }
  558. STATIC size_t
  559. xfs_probe_cluster(
  560. struct inode *inode,
  561. struct page *startpage,
  562. struct buffer_head *bh,
  563. struct buffer_head *head)
  564. {
  565. struct pagevec pvec;
  566. pgoff_t tindex, tlast, tloff;
  567. size_t total = 0;
  568. int done = 0, i;
  569. /* First sum forwards in this page */
  570. do {
  571. if (!buffer_uptodate(bh) || !buffer_mapped(bh))
  572. return total;
  573. total += bh->b_size;
  574. } while ((bh = bh->b_this_page) != head);
  575. /* if we reached the end of the page, sum forwards in following pages */
  576. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  577. tindex = startpage->index + 1;
  578. /* Prune this back to avoid pathological behavior */
  579. tloff = min(tlast, startpage->index + 64);
  580. pagevec_init(&pvec, 0);
  581. while (!done && tindex <= tloff) {
  582. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  583. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  584. break;
  585. for (i = 0; i < pagevec_count(&pvec); i++) {
  586. struct page *page = pvec.pages[i];
  587. size_t pg_offset, pg_len = 0;
  588. if (tindex == tlast) {
  589. pg_offset =
  590. i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
  591. if (!pg_offset) {
  592. done = 1;
  593. break;
  594. }
  595. } else
  596. pg_offset = PAGE_CACHE_SIZE;
  597. if (page->index == tindex && trylock_page(page)) {
  598. pg_len = xfs_probe_page(page, pg_offset);
  599. unlock_page(page);
  600. }
  601. if (!pg_len) {
  602. done = 1;
  603. break;
  604. }
  605. total += pg_len;
  606. tindex++;
  607. }
  608. pagevec_release(&pvec);
  609. cond_resched();
  610. }
  611. return total;
  612. }
  613. /*
  614. * Test if a given page is suitable for writing as part of an unwritten
  615. * or delayed allocate extent.
  616. */
  617. STATIC int
  618. xfs_is_delayed_page(
  619. struct page *page,
  620. unsigned int type)
  621. {
  622. if (PageWriteback(page))
  623. return 0;
  624. if (page->mapping && page_has_buffers(page)) {
  625. struct buffer_head *bh, *head;
  626. int acceptable = 0;
  627. bh = head = page_buffers(page);
  628. do {
  629. if (buffer_unwritten(bh))
  630. acceptable = (type == IO_UNWRITTEN);
  631. else if (buffer_delay(bh))
  632. acceptable = (type == IO_DELAY);
  633. else if (buffer_dirty(bh) && buffer_mapped(bh))
  634. acceptable = (type == IO_NEW);
  635. else
  636. break;
  637. } while ((bh = bh->b_this_page) != head);
  638. if (acceptable)
  639. return 1;
  640. }
  641. return 0;
  642. }
  643. /*
  644. * Allocate & map buffers for page given the extent map. Write it out.
  645. * except for the original page of a writepage, this is called on
  646. * delalloc/unwritten pages only, for the original page it is possible
  647. * that the page has no mapping at all.
  648. */
  649. STATIC int
  650. xfs_convert_page(
  651. struct inode *inode,
  652. struct page *page,
  653. loff_t tindex,
  654. struct xfs_bmbt_irec *imap,
  655. xfs_ioend_t **ioendp,
  656. struct writeback_control *wbc,
  657. int all_bh)
  658. {
  659. struct buffer_head *bh, *head;
  660. xfs_off_t end_offset;
  661. unsigned long p_offset;
  662. unsigned int type;
  663. int len, page_dirty;
  664. int count = 0, done = 0, uptodate = 1;
  665. xfs_off_t offset = page_offset(page);
  666. if (page->index != tindex)
  667. goto fail;
  668. if (!trylock_page(page))
  669. goto fail;
  670. if (PageWriteback(page))
  671. goto fail_unlock_page;
  672. if (page->mapping != inode->i_mapping)
  673. goto fail_unlock_page;
  674. if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
  675. goto fail_unlock_page;
  676. /*
  677. * page_dirty is initially a count of buffers on the page before
  678. * EOF and is decremented as we move each into a cleanable state.
  679. *
  680. * Derivation:
  681. *
  682. * End offset is the highest offset that this page should represent.
  683. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  684. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  685. * hence give us the correct page_dirty count. On any other page,
  686. * it will be zero and in that case we need page_dirty to be the
  687. * count of buffers on the page.
  688. */
  689. end_offset = min_t(unsigned long long,
  690. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  691. i_size_read(inode));
  692. len = 1 << inode->i_blkbits;
  693. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  694. PAGE_CACHE_SIZE);
  695. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  696. page_dirty = p_offset / len;
  697. bh = head = page_buffers(page);
  698. do {
  699. if (offset >= end_offset)
  700. break;
  701. if (!buffer_uptodate(bh))
  702. uptodate = 0;
  703. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  704. done = 1;
  705. continue;
  706. }
  707. if (buffer_unwritten(bh) || buffer_delay(bh)) {
  708. if (buffer_unwritten(bh))
  709. type = IO_UNWRITTEN;
  710. else
  711. type = IO_DELAY;
  712. if (!xfs_imap_valid(inode, imap, offset)) {
  713. done = 1;
  714. continue;
  715. }
  716. ASSERT(imap->br_startblock != HOLESTARTBLOCK);
  717. ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
  718. xfs_map_at_offset(inode, bh, imap, offset);
  719. xfs_add_to_ioend(inode, bh, offset, type,
  720. ioendp, done);
  721. page_dirty--;
  722. count++;
  723. } else {
  724. type = IO_NEW;
  725. if (buffer_mapped(bh) && all_bh) {
  726. lock_buffer(bh);
  727. xfs_add_to_ioend(inode, bh, offset,
  728. type, ioendp, done);
  729. count++;
  730. page_dirty--;
  731. } else {
  732. done = 1;
  733. }
  734. }
  735. } while (offset += len, (bh = bh->b_this_page) != head);
  736. if (uptodate && bh == head)
  737. SetPageUptodate(page);
  738. if (count) {
  739. if (--wbc->nr_to_write <= 0 &&
  740. wbc->sync_mode == WB_SYNC_NONE)
  741. done = 1;
  742. }
  743. xfs_start_page_writeback(page, !page_dirty, count);
  744. return done;
  745. fail_unlock_page:
  746. unlock_page(page);
  747. fail:
  748. return 1;
  749. }
  750. /*
  751. * Convert & write out a cluster of pages in the same extent as defined
  752. * by mp and following the start page.
  753. */
  754. STATIC void
  755. xfs_cluster_write(
  756. struct inode *inode,
  757. pgoff_t tindex,
  758. struct xfs_bmbt_irec *imap,
  759. xfs_ioend_t **ioendp,
  760. struct writeback_control *wbc,
  761. int all_bh,
  762. pgoff_t tlast)
  763. {
  764. struct pagevec pvec;
  765. int done = 0, i;
  766. pagevec_init(&pvec, 0);
  767. while (!done && tindex <= tlast) {
  768. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  769. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  770. break;
  771. for (i = 0; i < pagevec_count(&pvec); i++) {
  772. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  773. imap, ioendp, wbc, all_bh);
  774. if (done)
  775. break;
  776. }
  777. pagevec_release(&pvec);
  778. cond_resched();
  779. }
  780. }
  781. STATIC void
  782. xfs_vm_invalidatepage(
  783. struct page *page,
  784. unsigned long offset)
  785. {
  786. trace_xfs_invalidatepage(page->mapping->host, page, offset);
  787. block_invalidatepage(page, offset);
  788. }
  789. /*
  790. * If the page has delalloc buffers on it, we need to punch them out before we
  791. * invalidate the page. If we don't, we leave a stale delalloc mapping on the
  792. * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
  793. * is done on that same region - the delalloc extent is returned when none is
  794. * supposed to be there.
  795. *
  796. * We prevent this by truncating away the delalloc regions on the page before
  797. * invalidating it. Because they are delalloc, we can do this without needing a
  798. * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
  799. * truncation without a transaction as there is no space left for block
  800. * reservation (typically why we see a ENOSPC in writeback).
  801. *
  802. * This is not a performance critical path, so for now just do the punching a
  803. * buffer head at a time.
  804. */
  805. STATIC void
  806. xfs_aops_discard_page(
  807. struct page *page)
  808. {
  809. struct inode *inode = page->mapping->host;
  810. struct xfs_inode *ip = XFS_I(inode);
  811. struct buffer_head *bh, *head;
  812. loff_t offset = page_offset(page);
  813. if (!xfs_is_delayed_page(page, IO_DELAY))
  814. goto out_invalidate;
  815. if (XFS_FORCED_SHUTDOWN(ip->i_mount))
  816. goto out_invalidate;
  817. xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
  818. "page discard on page %p, inode 0x%llx, offset %llu.",
  819. page, ip->i_ino, offset);
  820. xfs_ilock(ip, XFS_ILOCK_EXCL);
  821. bh = head = page_buffers(page);
  822. do {
  823. int error;
  824. xfs_fileoff_t start_fsb;
  825. if (!buffer_delay(bh))
  826. goto next_buffer;
  827. start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
  828. error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
  829. if (error) {
  830. /* something screwed, just bail */
  831. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  832. xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
  833. "page discard unable to remove delalloc mapping.");
  834. }
  835. break;
  836. }
  837. next_buffer:
  838. offset += 1 << inode->i_blkbits;
  839. } while ((bh = bh->b_this_page) != head);
  840. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  841. out_invalidate:
  842. xfs_vm_invalidatepage(page, 0);
  843. return;
  844. }
  845. /*
  846. * Write out a dirty page.
  847. *
  848. * For delalloc space on the page we need to allocate space and flush it.
  849. * For unwritten space on the page we need to start the conversion to
  850. * regular allocated space.
  851. * For any other dirty buffer heads on the page we should flush them.
  852. *
  853. * If we detect that a transaction would be required to flush the page, we
  854. * have to check the process flags first, if we are already in a transaction
  855. * or disk I/O during allocations is off, we need to fail the writepage and
  856. * redirty the page.
  857. */
  858. STATIC int
  859. xfs_vm_writepage(
  860. struct page *page,
  861. struct writeback_control *wbc)
  862. {
  863. struct inode *inode = page->mapping->host;
  864. int delalloc, unwritten;
  865. struct buffer_head *bh, *head;
  866. struct xfs_bmbt_irec imap;
  867. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  868. loff_t offset;
  869. unsigned int type;
  870. __uint64_t end_offset;
  871. pgoff_t end_index, last_index;
  872. ssize_t size, len;
  873. int flags, err, imap_valid = 0, uptodate = 1;
  874. int count = 0;
  875. int all_bh = 0;
  876. trace_xfs_writepage(inode, page, 0);
  877. ASSERT(page_has_buffers(page));
  878. /*
  879. * Refuse to write the page out if we are called from reclaim context.
  880. *
  881. * This avoids stack overflows when called from deeply used stacks in
  882. * random callers for direct reclaim or memcg reclaim. We explicitly
  883. * allow reclaim from kswapd as the stack usage there is relatively low.
  884. *
  885. * This should really be done by the core VM, but until that happens
  886. * filesystems like XFS, btrfs and ext4 have to take care of this
  887. * by themselves.
  888. */
  889. if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
  890. goto redirty;
  891. /*
  892. * We need a transaction if there are delalloc or unwritten buffers
  893. * on the page.
  894. *
  895. * If we need a transaction and the process flags say we are already
  896. * in a transaction, or no IO is allowed then mark the page dirty
  897. * again and leave the page as is.
  898. */
  899. xfs_count_page_state(page, &delalloc, &unwritten);
  900. if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
  901. goto redirty;
  902. /* Is this page beyond the end of the file? */
  903. offset = i_size_read(inode);
  904. end_index = offset >> PAGE_CACHE_SHIFT;
  905. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  906. if (page->index >= end_index) {
  907. if ((page->index >= end_index + 1) ||
  908. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  909. unlock_page(page);
  910. return 0;
  911. }
  912. }
  913. end_offset = min_t(unsigned long long,
  914. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  915. offset);
  916. len = 1 << inode->i_blkbits;
  917. bh = head = page_buffers(page);
  918. offset = page_offset(page);
  919. flags = BMAPI_READ;
  920. type = IO_NEW;
  921. do {
  922. int new_ioend = 0;
  923. if (offset >= end_offset)
  924. break;
  925. if (!buffer_uptodate(bh))
  926. uptodate = 0;
  927. /*
  928. * set_page_dirty dirties all buffers in a page, independent
  929. * of their state. The dirty state however is entirely
  930. * meaningless for holes (!mapped && uptodate), so skip
  931. * buffers covering holes here.
  932. */
  933. if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
  934. imap_valid = 0;
  935. continue;
  936. }
  937. if (imap_valid)
  938. imap_valid = xfs_imap_valid(inode, &imap, offset);
  939. if (buffer_unwritten(bh) || buffer_delay(bh)) {
  940. if (buffer_unwritten(bh)) {
  941. if (type != IO_UNWRITTEN) {
  942. type = IO_UNWRITTEN;
  943. imap_valid = 0;
  944. }
  945. flags = BMAPI_WRITE | BMAPI_IGNSTATE;
  946. } else if (buffer_delay(bh)) {
  947. if (type != IO_DELAY) {
  948. type = IO_DELAY;
  949. imap_valid = 0;
  950. }
  951. flags = BMAPI_ALLOCATE;
  952. if (wbc->sync_mode == WB_SYNC_NONE)
  953. flags |= BMAPI_TRYLOCK;
  954. }
  955. if (!imap_valid) {
  956. /*
  957. * If we didn't have a valid mapping then we
  958. * need to ensure that we put the new mapping
  959. * in a new ioend structure. This needs to be
  960. * done to ensure that the ioends correctly
  961. * reflect the block mappings at io completion
  962. * for unwritten extent conversion.
  963. */
  964. new_ioend = 1;
  965. err = xfs_map_blocks(inode, offset, len,
  966. &imap, flags);
  967. if (err)
  968. goto error;
  969. imap_valid = xfs_imap_valid(inode, &imap,
  970. offset);
  971. }
  972. if (imap_valid) {
  973. xfs_map_at_offset(inode, bh, &imap, offset);
  974. xfs_add_to_ioend(inode, bh, offset, type,
  975. &ioend, new_ioend);
  976. count++;
  977. }
  978. } else if (buffer_uptodate(bh)) {
  979. /*
  980. * we got here because the buffer is already mapped.
  981. * That means it must already have extents allocated
  982. * underneath it. Map the extent by reading it.
  983. */
  984. if (flags != BMAPI_READ) {
  985. flags = BMAPI_READ;
  986. imap_valid = 0;
  987. }
  988. if (!imap_valid) {
  989. new_ioend = 1;
  990. size = xfs_probe_cluster(inode, page, bh, head);
  991. err = xfs_map_blocks(inode, offset, size,
  992. &imap, flags);
  993. if (err)
  994. goto error;
  995. imap_valid = xfs_imap_valid(inode, &imap,
  996. offset);
  997. }
  998. /*
  999. * We set the type to IO_NEW in case we are doing a
  1000. * small write at EOF that is extending the file but
  1001. * without needing an allocation. We need to update the
  1002. * file size on I/O completion in this case so it is
  1003. * the same case as having just allocated a new extent
  1004. * that we are writing into for the first time.
  1005. */
  1006. type = IO_NEW;
  1007. if (imap_valid) {
  1008. all_bh = 1;
  1009. lock_buffer(bh);
  1010. xfs_add_to_ioend(inode, bh, offset, type,
  1011. &ioend, new_ioend);
  1012. count++;
  1013. }
  1014. } else if (PageUptodate(page)) {
  1015. ASSERT(buffer_mapped(bh));
  1016. imap_valid = 0;
  1017. }
  1018. if (!iohead)
  1019. iohead = ioend;
  1020. } while (offset += len, ((bh = bh->b_this_page) != head));
  1021. if (uptodate && bh == head)
  1022. SetPageUptodate(page);
  1023. xfs_start_page_writeback(page, 1, count);
  1024. if (ioend && imap_valid) {
  1025. xfs_off_t end_index;
  1026. end_index = imap.br_startoff + imap.br_blockcount;
  1027. /* to bytes */
  1028. end_index <<= inode->i_blkbits;
  1029. /* to pages */
  1030. end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
  1031. /* check against file size */
  1032. if (end_index > last_index)
  1033. end_index = last_index;
  1034. xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
  1035. wbc, all_bh, end_index);
  1036. }
  1037. if (iohead)
  1038. xfs_submit_ioend(wbc, iohead);
  1039. return 0;
  1040. error:
  1041. if (iohead)
  1042. xfs_cancel_ioend(iohead);
  1043. if (err == -EAGAIN)
  1044. goto redirty;
  1045. xfs_aops_discard_page(page);
  1046. ClearPageUptodate(page);
  1047. unlock_page(page);
  1048. return err;
  1049. redirty:
  1050. redirty_page_for_writepage(wbc, page);
  1051. unlock_page(page);
  1052. return 0;
  1053. }
  1054. STATIC int
  1055. xfs_vm_writepages(
  1056. struct address_space *mapping,
  1057. struct writeback_control *wbc)
  1058. {
  1059. xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
  1060. return generic_writepages(mapping, wbc);
  1061. }
  1062. /*
  1063. * Called to move a page into cleanable state - and from there
  1064. * to be released. The page should already be clean. We always
  1065. * have buffer heads in this call.
  1066. *
  1067. * Returns 1 if the page is ok to release, 0 otherwise.
  1068. */
  1069. STATIC int
  1070. xfs_vm_releasepage(
  1071. struct page *page,
  1072. gfp_t gfp_mask)
  1073. {
  1074. int delalloc, unwritten;
  1075. trace_xfs_releasepage(page->mapping->host, page, 0);
  1076. xfs_count_page_state(page, &delalloc, &unwritten);
  1077. if (WARN_ON(delalloc))
  1078. return 0;
  1079. if (WARN_ON(unwritten))
  1080. return 0;
  1081. return try_to_free_buffers(page);
  1082. }
  1083. STATIC int
  1084. __xfs_get_blocks(
  1085. struct inode *inode,
  1086. sector_t iblock,
  1087. struct buffer_head *bh_result,
  1088. int create,
  1089. int direct)
  1090. {
  1091. int flags = create ? BMAPI_WRITE : BMAPI_READ;
  1092. struct xfs_bmbt_irec imap;
  1093. xfs_off_t offset;
  1094. ssize_t size;
  1095. int nimap = 1;
  1096. int new = 0;
  1097. int error;
  1098. offset = (xfs_off_t)iblock << inode->i_blkbits;
  1099. ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
  1100. size = bh_result->b_size;
  1101. if (!create && direct && offset >= i_size_read(inode))
  1102. return 0;
  1103. if (direct && create)
  1104. flags |= BMAPI_DIRECT;
  1105. error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
  1106. &new);
  1107. if (error)
  1108. return -error;
  1109. if (nimap == 0)
  1110. return 0;
  1111. if (imap.br_startblock != HOLESTARTBLOCK &&
  1112. imap.br_startblock != DELAYSTARTBLOCK) {
  1113. /*
  1114. * For unwritten extents do not report a disk address on
  1115. * the read case (treat as if we're reading into a hole).
  1116. */
  1117. if (create || !ISUNWRITTEN(&imap))
  1118. xfs_map_buffer(inode, bh_result, &imap, offset);
  1119. if (create && ISUNWRITTEN(&imap)) {
  1120. if (direct)
  1121. bh_result->b_private = inode;
  1122. set_buffer_unwritten(bh_result);
  1123. }
  1124. }
  1125. /*
  1126. * If this is a realtime file, data may be on a different device.
  1127. * to that pointed to from the buffer_head b_bdev currently.
  1128. */
  1129. bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
  1130. /*
  1131. * If we previously allocated a block out beyond eof and we are now
  1132. * coming back to use it then we will need to flag it as new even if it
  1133. * has a disk address.
  1134. *
  1135. * With sub-block writes into unwritten extents we also need to mark
  1136. * the buffer as new so that the unwritten parts of the buffer gets
  1137. * correctly zeroed.
  1138. */
  1139. if (create &&
  1140. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  1141. (offset >= i_size_read(inode)) ||
  1142. (new || ISUNWRITTEN(&imap))))
  1143. set_buffer_new(bh_result);
  1144. if (imap.br_startblock == DELAYSTARTBLOCK) {
  1145. BUG_ON(direct);
  1146. if (create) {
  1147. set_buffer_uptodate(bh_result);
  1148. set_buffer_mapped(bh_result);
  1149. set_buffer_delay(bh_result);
  1150. }
  1151. }
  1152. /*
  1153. * If this is O_DIRECT or the mpage code calling tell them how large
  1154. * the mapping is, so that we can avoid repeated get_blocks calls.
  1155. */
  1156. if (direct || size > (1 << inode->i_blkbits)) {
  1157. xfs_off_t mapping_size;
  1158. mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
  1159. mapping_size <<= inode->i_blkbits;
  1160. ASSERT(mapping_size > 0);
  1161. if (mapping_size > size)
  1162. mapping_size = size;
  1163. if (mapping_size > LONG_MAX)
  1164. mapping_size = LONG_MAX;
  1165. bh_result->b_size = mapping_size;
  1166. }
  1167. return 0;
  1168. }
  1169. int
  1170. xfs_get_blocks(
  1171. struct inode *inode,
  1172. sector_t iblock,
  1173. struct buffer_head *bh_result,
  1174. int create)
  1175. {
  1176. return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
  1177. }
  1178. STATIC int
  1179. xfs_get_blocks_direct(
  1180. struct inode *inode,
  1181. sector_t iblock,
  1182. struct buffer_head *bh_result,
  1183. int create)
  1184. {
  1185. return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
  1186. }
  1187. /*
  1188. * Complete a direct I/O write request.
  1189. *
  1190. * If the private argument is non-NULL __xfs_get_blocks signals us that we
  1191. * need to issue a transaction to convert the range from unwritten to written
  1192. * extents. In case this is regular synchronous I/O we just call xfs_end_io
  1193. * to do this and we are done. But in case this was a successfull AIO
  1194. * request this handler is called from interrupt context, from which we
  1195. * can't start transactions. In that case offload the I/O completion to
  1196. * the workqueues we also use for buffered I/O completion.
  1197. */
  1198. STATIC void
  1199. xfs_end_io_direct_write(
  1200. struct kiocb *iocb,
  1201. loff_t offset,
  1202. ssize_t size,
  1203. void *private,
  1204. int ret,
  1205. bool is_async)
  1206. {
  1207. struct xfs_ioend *ioend = iocb->private;
  1208. /*
  1209. * blockdev_direct_IO can return an error even after the I/O
  1210. * completion handler was called. Thus we need to protect
  1211. * against double-freeing.
  1212. */
  1213. iocb->private = NULL;
  1214. ioend->io_offset = offset;
  1215. ioend->io_size = size;
  1216. if (private && size > 0)
  1217. ioend->io_type = IO_UNWRITTEN;
  1218. if (is_async) {
  1219. /*
  1220. * If we are converting an unwritten extent we need to delay
  1221. * the AIO completion until after the unwrittent extent
  1222. * conversion has completed, otherwise do it ASAP.
  1223. */
  1224. if (ioend->io_type == IO_UNWRITTEN) {
  1225. ioend->io_iocb = iocb;
  1226. ioend->io_result = ret;
  1227. } else {
  1228. aio_complete(iocb, ret, 0);
  1229. }
  1230. xfs_finish_ioend(ioend);
  1231. } else {
  1232. xfs_finish_ioend_sync(ioend);
  1233. }
  1234. }
  1235. STATIC ssize_t
  1236. xfs_vm_direct_IO(
  1237. int rw,
  1238. struct kiocb *iocb,
  1239. const struct iovec *iov,
  1240. loff_t offset,
  1241. unsigned long nr_segs)
  1242. {
  1243. struct inode *inode = iocb->ki_filp->f_mapping->host;
  1244. struct block_device *bdev = xfs_find_bdev_for_inode(inode);
  1245. ssize_t ret;
  1246. if (rw & WRITE) {
  1247. iocb->private = xfs_alloc_ioend(inode, IO_NEW);
  1248. ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
  1249. offset, nr_segs,
  1250. xfs_get_blocks_direct,
  1251. xfs_end_io_direct_write, NULL, 0);
  1252. if (ret != -EIOCBQUEUED && iocb->private)
  1253. xfs_destroy_ioend(iocb->private);
  1254. } else {
  1255. ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
  1256. offset, nr_segs,
  1257. xfs_get_blocks_direct,
  1258. NULL, NULL, 0);
  1259. }
  1260. return ret;
  1261. }
  1262. STATIC void
  1263. xfs_vm_write_failed(
  1264. struct address_space *mapping,
  1265. loff_t to)
  1266. {
  1267. struct inode *inode = mapping->host;
  1268. if (to > inode->i_size) {
  1269. /*
  1270. * punch out the delalloc blocks we have already allocated. We
  1271. * don't call xfs_setattr() to do this as we may be in the
  1272. * middle of a multi-iovec write and so the vfs inode->i_size
  1273. * will not match the xfs ip->i_size and so it will zero too
  1274. * much. Hence we jus truncate the page cache to zero what is
  1275. * necessary and punch the delalloc blocks directly.
  1276. */
  1277. struct xfs_inode *ip = XFS_I(inode);
  1278. xfs_fileoff_t start_fsb;
  1279. xfs_fileoff_t end_fsb;
  1280. int error;
  1281. truncate_pagecache(inode, to, inode->i_size);
  1282. /*
  1283. * Check if there are any blocks that are outside of i_size
  1284. * that need to be trimmed back.
  1285. */
  1286. start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
  1287. end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
  1288. if (end_fsb <= start_fsb)
  1289. return;
  1290. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1291. error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
  1292. end_fsb - start_fsb);
  1293. if (error) {
  1294. /* something screwed, just bail */
  1295. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  1296. xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
  1297. "xfs_vm_write_failed: unable to clean up ino %lld",
  1298. ip->i_ino);
  1299. }
  1300. }
  1301. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1302. }
  1303. }
  1304. STATIC int
  1305. xfs_vm_write_begin(
  1306. struct file *file,
  1307. struct address_space *mapping,
  1308. loff_t pos,
  1309. unsigned len,
  1310. unsigned flags,
  1311. struct page **pagep,
  1312. void **fsdata)
  1313. {
  1314. int ret;
  1315. ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
  1316. pagep, xfs_get_blocks);
  1317. if (unlikely(ret))
  1318. xfs_vm_write_failed(mapping, pos + len);
  1319. return ret;
  1320. }
  1321. STATIC int
  1322. xfs_vm_write_end(
  1323. struct file *file,
  1324. struct address_space *mapping,
  1325. loff_t pos,
  1326. unsigned len,
  1327. unsigned copied,
  1328. struct page *page,
  1329. void *fsdata)
  1330. {
  1331. int ret;
  1332. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  1333. if (unlikely(ret < len))
  1334. xfs_vm_write_failed(mapping, pos + len);
  1335. return ret;
  1336. }
  1337. STATIC sector_t
  1338. xfs_vm_bmap(
  1339. struct address_space *mapping,
  1340. sector_t block)
  1341. {
  1342. struct inode *inode = (struct inode *)mapping->host;
  1343. struct xfs_inode *ip = XFS_I(inode);
  1344. trace_xfs_vm_bmap(XFS_I(inode));
  1345. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  1346. xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
  1347. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  1348. return generic_block_bmap(mapping, block, xfs_get_blocks);
  1349. }
  1350. STATIC int
  1351. xfs_vm_readpage(
  1352. struct file *unused,
  1353. struct page *page)
  1354. {
  1355. return mpage_readpage(page, xfs_get_blocks);
  1356. }
  1357. STATIC int
  1358. xfs_vm_readpages(
  1359. struct file *unused,
  1360. struct address_space *mapping,
  1361. struct list_head *pages,
  1362. unsigned nr_pages)
  1363. {
  1364. return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
  1365. }
  1366. const struct address_space_operations xfs_address_space_operations = {
  1367. .readpage = xfs_vm_readpage,
  1368. .readpages = xfs_vm_readpages,
  1369. .writepage = xfs_vm_writepage,
  1370. .writepages = xfs_vm_writepages,
  1371. .sync_page = block_sync_page,
  1372. .releasepage = xfs_vm_releasepage,
  1373. .invalidatepage = xfs_vm_invalidatepage,
  1374. .write_begin = xfs_vm_write_begin,
  1375. .write_end = xfs_vm_write_end,
  1376. .bmap = xfs_vm_bmap,
  1377. .direct_IO = xfs_vm_direct_IO,
  1378. .migratepage = buffer_migrate_page,
  1379. .is_partially_uptodate = block_is_partially_uptodate,
  1380. .error_remove_page = generic_error_remove_page,
  1381. };