xfs_aops.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_dir2.h"
  25. #include "xfs_trans.h"
  26. #include "xfs_dmapi.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_alloc_btree.h"
  30. #include "xfs_ialloc_btree.h"
  31. #include "xfs_dir2_sf.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_alloc.h"
  36. #include "xfs_btree.h"
  37. #include "xfs_error.h"
  38. #include "xfs_rw.h"
  39. #include "xfs_iomap.h"
  40. #include <linux/mpage.h>
  41. #include <linux/pagevec.h>
  42. #include <linux/writeback.h>
  43. STATIC void
  44. xfs_count_page_state(
  45. struct page *page,
  46. int *delalloc,
  47. int *unmapped,
  48. int *unwritten)
  49. {
  50. struct buffer_head *bh, *head;
  51. *delalloc = *unmapped = *unwritten = 0;
  52. bh = head = page_buffers(page);
  53. do {
  54. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  55. (*unmapped) = 1;
  56. else if (buffer_unwritten(bh))
  57. (*unwritten) = 1;
  58. else if (buffer_delay(bh))
  59. (*delalloc) = 1;
  60. } while ((bh = bh->b_this_page) != head);
  61. }
  62. #if defined(XFS_RW_TRACE)
  63. void
  64. xfs_page_trace(
  65. int tag,
  66. struct inode *inode,
  67. struct page *page,
  68. unsigned long pgoff)
  69. {
  70. xfs_inode_t *ip;
  71. bhv_vnode_t *vp = vn_from_inode(inode);
  72. loff_t isize = i_size_read(inode);
  73. loff_t offset = page_offset(page);
  74. int delalloc = -1, unmapped = -1, unwritten = -1;
  75. if (page_has_buffers(page))
  76. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  77. ip = xfs_vtoi(vp);
  78. if (!ip->i_rwtrace)
  79. return;
  80. ktrace_enter(ip->i_rwtrace,
  81. (void *)((unsigned long)tag),
  82. (void *)ip,
  83. (void *)inode,
  84. (void *)page,
  85. (void *)pgoff,
  86. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  87. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  88. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  89. (void *)((unsigned long)(isize & 0xffffffff)),
  90. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  91. (void *)((unsigned long)(offset & 0xffffffff)),
  92. (void *)((unsigned long)delalloc),
  93. (void *)((unsigned long)unmapped),
  94. (void *)((unsigned long)unwritten),
  95. (void *)((unsigned long)current_pid()),
  96. (void *)NULL);
  97. }
  98. #else
  99. #define xfs_page_trace(tag, inode, page, pgoff)
  100. #endif
  101. /*
  102. * Schedule IO completion handling on a xfsdatad if this was
  103. * the final hold on this ioend. If we are asked to wait,
  104. * flush the workqueue.
  105. */
  106. STATIC void
  107. xfs_finish_ioend(
  108. xfs_ioend_t *ioend,
  109. int wait)
  110. {
  111. if (atomic_dec_and_test(&ioend->io_remaining)) {
  112. queue_work(xfsdatad_workqueue, &ioend->io_work);
  113. if (wait)
  114. flush_workqueue(xfsdatad_workqueue);
  115. }
  116. }
  117. /*
  118. * We're now finished for good with this ioend structure.
  119. * Update the page state via the associated buffer_heads,
  120. * release holds on the inode and bio, and finally free
  121. * up memory. Do not use the ioend after this.
  122. */
  123. STATIC void
  124. xfs_destroy_ioend(
  125. xfs_ioend_t *ioend)
  126. {
  127. struct buffer_head *bh, *next;
  128. for (bh = ioend->io_buffer_head; bh; bh = next) {
  129. next = bh->b_private;
  130. bh->b_end_io(bh, !ioend->io_error);
  131. }
  132. if (unlikely(ioend->io_error))
  133. vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
  134. vn_iowake(ioend->io_vnode);
  135. mempool_free(ioend, xfs_ioend_pool);
  136. }
  137. /*
  138. * Update on-disk file size now that data has been written to disk.
  139. * The current in-memory file size is i_size. If a write is beyond
  140. * eof io_new_size will be the intended file size until i_size is
  141. * updated. If this write does not extend all the way to the valid
  142. * file size then restrict this update to the end of the write.
  143. */
  144. STATIC void
  145. xfs_setfilesize(
  146. xfs_ioend_t *ioend)
  147. {
  148. xfs_inode_t *ip;
  149. xfs_fsize_t isize;
  150. xfs_fsize_t bsize;
  151. ip = xfs_vtoi(ioend->io_vnode);
  152. if (!ip)
  153. return;
  154. ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
  155. ASSERT(ioend->io_type != IOMAP_READ);
  156. if (unlikely(ioend->io_error))
  157. return;
  158. bsize = ioend->io_offset + ioend->io_size;
  159. xfs_ilock(ip, XFS_ILOCK_EXCL);
  160. isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
  161. isize = MIN(isize, bsize);
  162. if (ip->i_d.di_size < isize) {
  163. ip->i_d.di_size = isize;
  164. ip->i_update_core = 1;
  165. ip->i_update_size = 1;
  166. mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
  167. }
  168. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  169. }
  170. /*
  171. * Buffered IO write completion for delayed allocate extents.
  172. */
  173. STATIC void
  174. xfs_end_bio_delalloc(
  175. struct work_struct *work)
  176. {
  177. xfs_ioend_t *ioend =
  178. container_of(work, xfs_ioend_t, io_work);
  179. xfs_setfilesize(ioend);
  180. xfs_destroy_ioend(ioend);
  181. }
  182. /*
  183. * Buffered IO write completion for regular, written extents.
  184. */
  185. STATIC void
  186. xfs_end_bio_written(
  187. struct work_struct *work)
  188. {
  189. xfs_ioend_t *ioend =
  190. container_of(work, xfs_ioend_t, io_work);
  191. xfs_setfilesize(ioend);
  192. xfs_destroy_ioend(ioend);
  193. }
  194. /*
  195. * IO write completion for unwritten extents.
  196. *
  197. * Issue transactions to convert a buffer range from unwritten
  198. * to written extents.
  199. */
  200. STATIC void
  201. xfs_end_bio_unwritten(
  202. struct work_struct *work)
  203. {
  204. xfs_ioend_t *ioend =
  205. container_of(work, xfs_ioend_t, io_work);
  206. bhv_vnode_t *vp = ioend->io_vnode;
  207. xfs_off_t offset = ioend->io_offset;
  208. size_t size = ioend->io_size;
  209. if (likely(!ioend->io_error)) {
  210. bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
  211. xfs_setfilesize(ioend);
  212. }
  213. xfs_destroy_ioend(ioend);
  214. }
  215. /*
  216. * IO read completion for regular, written extents.
  217. */
  218. STATIC void
  219. xfs_end_bio_read(
  220. struct work_struct *work)
  221. {
  222. xfs_ioend_t *ioend =
  223. container_of(work, xfs_ioend_t, io_work);
  224. xfs_destroy_ioend(ioend);
  225. }
  226. /*
  227. * Allocate and initialise an IO completion structure.
  228. * We need to track unwritten extent write completion here initially.
  229. * We'll need to extend this for updating the ondisk inode size later
  230. * (vs. incore size).
  231. */
  232. STATIC xfs_ioend_t *
  233. xfs_alloc_ioend(
  234. struct inode *inode,
  235. unsigned int type)
  236. {
  237. xfs_ioend_t *ioend;
  238. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  239. /*
  240. * Set the count to 1 initially, which will prevent an I/O
  241. * completion callback from happening before we have started
  242. * all the I/O from calling the completion routine too early.
  243. */
  244. atomic_set(&ioend->io_remaining, 1);
  245. ioend->io_error = 0;
  246. ioend->io_list = NULL;
  247. ioend->io_type = type;
  248. ioend->io_vnode = vn_from_inode(inode);
  249. ioend->io_buffer_head = NULL;
  250. ioend->io_buffer_tail = NULL;
  251. atomic_inc(&ioend->io_vnode->v_iocount);
  252. ioend->io_offset = 0;
  253. ioend->io_size = 0;
  254. if (type == IOMAP_UNWRITTEN)
  255. INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
  256. else if (type == IOMAP_DELAY)
  257. INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
  258. else if (type == IOMAP_READ)
  259. INIT_WORK(&ioend->io_work, xfs_end_bio_read);
  260. else
  261. INIT_WORK(&ioend->io_work, xfs_end_bio_written);
  262. return ioend;
  263. }
  264. STATIC int
  265. xfs_map_blocks(
  266. struct inode *inode,
  267. loff_t offset,
  268. ssize_t count,
  269. xfs_iomap_t *mapp,
  270. int flags)
  271. {
  272. bhv_vnode_t *vp = vn_from_inode(inode);
  273. int error, nmaps = 1;
  274. error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
  275. if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
  276. VMODIFY(vp);
  277. return -error;
  278. }
  279. STATIC_INLINE int
  280. xfs_iomap_valid(
  281. xfs_iomap_t *iomapp,
  282. loff_t offset)
  283. {
  284. return offset >= iomapp->iomap_offset &&
  285. offset < iomapp->iomap_offset + iomapp->iomap_bsize;
  286. }
  287. /*
  288. * BIO completion handler for buffered IO.
  289. */
  290. STATIC int
  291. xfs_end_bio(
  292. struct bio *bio,
  293. unsigned int bytes_done,
  294. int error)
  295. {
  296. xfs_ioend_t *ioend = bio->bi_private;
  297. if (bio->bi_size)
  298. return 1;
  299. ASSERT(atomic_read(&bio->bi_cnt) >= 1);
  300. ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
  301. /* Toss bio and pass work off to an xfsdatad thread */
  302. bio->bi_private = NULL;
  303. bio->bi_end_io = NULL;
  304. bio_put(bio);
  305. xfs_finish_ioend(ioend, 0);
  306. return 0;
  307. }
  308. STATIC void
  309. xfs_submit_ioend_bio(
  310. xfs_ioend_t *ioend,
  311. struct bio *bio)
  312. {
  313. atomic_inc(&ioend->io_remaining);
  314. bio->bi_private = ioend;
  315. bio->bi_end_io = xfs_end_bio;
  316. submit_bio(WRITE, bio);
  317. ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
  318. bio_put(bio);
  319. }
  320. STATIC struct bio *
  321. xfs_alloc_ioend_bio(
  322. struct buffer_head *bh)
  323. {
  324. struct bio *bio;
  325. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  326. do {
  327. bio = bio_alloc(GFP_NOIO, nvecs);
  328. nvecs >>= 1;
  329. } while (!bio);
  330. ASSERT(bio->bi_private == NULL);
  331. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  332. bio->bi_bdev = bh->b_bdev;
  333. bio_get(bio);
  334. return bio;
  335. }
  336. STATIC void
  337. xfs_start_buffer_writeback(
  338. struct buffer_head *bh)
  339. {
  340. ASSERT(buffer_mapped(bh));
  341. ASSERT(buffer_locked(bh));
  342. ASSERT(!buffer_delay(bh));
  343. ASSERT(!buffer_unwritten(bh));
  344. mark_buffer_async_write(bh);
  345. set_buffer_uptodate(bh);
  346. clear_buffer_dirty(bh);
  347. }
  348. STATIC void
  349. xfs_start_page_writeback(
  350. struct page *page,
  351. struct writeback_control *wbc,
  352. int clear_dirty,
  353. int buffers)
  354. {
  355. ASSERT(PageLocked(page));
  356. ASSERT(!PageWriteback(page));
  357. if (clear_dirty)
  358. clear_page_dirty_for_io(page);
  359. set_page_writeback(page);
  360. unlock_page(page);
  361. if (!buffers) {
  362. end_page_writeback(page);
  363. wbc->pages_skipped++; /* We didn't write this page */
  364. }
  365. }
  366. static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  367. {
  368. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  369. }
  370. /*
  371. * Submit all of the bios for all of the ioends we have saved up, covering the
  372. * initial writepage page and also any probed pages.
  373. *
  374. * Because we may have multiple ioends spanning a page, we need to start
  375. * writeback on all the buffers before we submit them for I/O. If we mark the
  376. * buffers as we got, then we can end up with a page that only has buffers
  377. * marked async write and I/O complete on can occur before we mark the other
  378. * buffers async write.
  379. *
  380. * The end result of this is that we trip a bug in end_page_writeback() because
  381. * we call it twice for the one page as the code in end_buffer_async_write()
  382. * assumes that all buffers on the page are started at the same time.
  383. *
  384. * The fix is two passes across the ioend list - one to start writeback on the
  385. * buffer_heads, and then submit them for I/O on the second pass.
  386. */
  387. STATIC void
  388. xfs_submit_ioend(
  389. xfs_ioend_t *ioend)
  390. {
  391. xfs_ioend_t *head = ioend;
  392. xfs_ioend_t *next;
  393. struct buffer_head *bh;
  394. struct bio *bio;
  395. sector_t lastblock = 0;
  396. /* Pass 1 - start writeback */
  397. do {
  398. next = ioend->io_list;
  399. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  400. xfs_start_buffer_writeback(bh);
  401. }
  402. } while ((ioend = next) != NULL);
  403. /* Pass 2 - submit I/O */
  404. ioend = head;
  405. do {
  406. next = ioend->io_list;
  407. bio = NULL;
  408. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  409. if (!bio) {
  410. retry:
  411. bio = xfs_alloc_ioend_bio(bh);
  412. } else if (bh->b_blocknr != lastblock + 1) {
  413. xfs_submit_ioend_bio(ioend, bio);
  414. goto retry;
  415. }
  416. if (bio_add_buffer(bio, bh) != bh->b_size) {
  417. xfs_submit_ioend_bio(ioend, bio);
  418. goto retry;
  419. }
  420. lastblock = bh->b_blocknr;
  421. }
  422. if (bio)
  423. xfs_submit_ioend_bio(ioend, bio);
  424. xfs_finish_ioend(ioend, 0);
  425. } while ((ioend = next) != NULL);
  426. }
  427. /*
  428. * Cancel submission of all buffer_heads so far in this endio.
  429. * Toss the endio too. Only ever called for the initial page
  430. * in a writepage request, so only ever one page.
  431. */
  432. STATIC void
  433. xfs_cancel_ioend(
  434. xfs_ioend_t *ioend)
  435. {
  436. xfs_ioend_t *next;
  437. struct buffer_head *bh, *next_bh;
  438. do {
  439. next = ioend->io_list;
  440. bh = ioend->io_buffer_head;
  441. do {
  442. next_bh = bh->b_private;
  443. clear_buffer_async_write(bh);
  444. unlock_buffer(bh);
  445. } while ((bh = next_bh) != NULL);
  446. vn_iowake(ioend->io_vnode);
  447. mempool_free(ioend, xfs_ioend_pool);
  448. } while ((ioend = next) != NULL);
  449. }
  450. /*
  451. * Test to see if we've been building up a completion structure for
  452. * earlier buffers -- if so, we try to append to this ioend if we
  453. * can, otherwise we finish off any current ioend and start another.
  454. * Return true if we've finished the given ioend.
  455. */
  456. STATIC void
  457. xfs_add_to_ioend(
  458. struct inode *inode,
  459. struct buffer_head *bh,
  460. xfs_off_t offset,
  461. unsigned int type,
  462. xfs_ioend_t **result,
  463. int need_ioend)
  464. {
  465. xfs_ioend_t *ioend = *result;
  466. if (!ioend || need_ioend || type != ioend->io_type) {
  467. xfs_ioend_t *previous = *result;
  468. ioend = xfs_alloc_ioend(inode, type);
  469. ioend->io_offset = offset;
  470. ioend->io_buffer_head = bh;
  471. ioend->io_buffer_tail = bh;
  472. if (previous)
  473. previous->io_list = ioend;
  474. *result = ioend;
  475. } else {
  476. ioend->io_buffer_tail->b_private = bh;
  477. ioend->io_buffer_tail = bh;
  478. }
  479. bh->b_private = NULL;
  480. ioend->io_size += bh->b_size;
  481. }
  482. STATIC void
  483. xfs_map_buffer(
  484. struct buffer_head *bh,
  485. xfs_iomap_t *mp,
  486. xfs_off_t offset,
  487. uint block_bits)
  488. {
  489. sector_t bn;
  490. ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
  491. bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
  492. ((offset - mp->iomap_offset) >> block_bits);
  493. ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
  494. bh->b_blocknr = bn;
  495. set_buffer_mapped(bh);
  496. }
  497. STATIC void
  498. xfs_map_at_offset(
  499. struct buffer_head *bh,
  500. loff_t offset,
  501. int block_bits,
  502. xfs_iomap_t *iomapp)
  503. {
  504. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  505. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  506. lock_buffer(bh);
  507. xfs_map_buffer(bh, iomapp, offset, block_bits);
  508. bh->b_bdev = iomapp->iomap_target->bt_bdev;
  509. set_buffer_mapped(bh);
  510. clear_buffer_delay(bh);
  511. clear_buffer_unwritten(bh);
  512. }
  513. /*
  514. * Look for a page at index that is suitable for clustering.
  515. */
  516. STATIC unsigned int
  517. xfs_probe_page(
  518. struct page *page,
  519. unsigned int pg_offset,
  520. int mapped)
  521. {
  522. int ret = 0;
  523. if (PageWriteback(page))
  524. return 0;
  525. if (page->mapping && PageDirty(page)) {
  526. if (page_has_buffers(page)) {
  527. struct buffer_head *bh, *head;
  528. bh = head = page_buffers(page);
  529. do {
  530. if (!buffer_uptodate(bh))
  531. break;
  532. if (mapped != buffer_mapped(bh))
  533. break;
  534. ret += bh->b_size;
  535. if (ret >= pg_offset)
  536. break;
  537. } while ((bh = bh->b_this_page) != head);
  538. } else
  539. ret = mapped ? 0 : PAGE_CACHE_SIZE;
  540. }
  541. return ret;
  542. }
  543. STATIC size_t
  544. xfs_probe_cluster(
  545. struct inode *inode,
  546. struct page *startpage,
  547. struct buffer_head *bh,
  548. struct buffer_head *head,
  549. int mapped)
  550. {
  551. struct pagevec pvec;
  552. pgoff_t tindex, tlast, tloff;
  553. size_t total = 0;
  554. int done = 0, i;
  555. /* First sum forwards in this page */
  556. do {
  557. if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
  558. return total;
  559. total += bh->b_size;
  560. } while ((bh = bh->b_this_page) != head);
  561. /* if we reached the end of the page, sum forwards in following pages */
  562. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  563. tindex = startpage->index + 1;
  564. /* Prune this back to avoid pathological behavior */
  565. tloff = min(tlast, startpage->index + 64);
  566. pagevec_init(&pvec, 0);
  567. while (!done && tindex <= tloff) {
  568. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  569. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  570. break;
  571. for (i = 0; i < pagevec_count(&pvec); i++) {
  572. struct page *page = pvec.pages[i];
  573. size_t pg_offset, pg_len = 0;
  574. if (tindex == tlast) {
  575. pg_offset =
  576. i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
  577. if (!pg_offset) {
  578. done = 1;
  579. break;
  580. }
  581. } else
  582. pg_offset = PAGE_CACHE_SIZE;
  583. if (page->index == tindex && !TestSetPageLocked(page)) {
  584. pg_len = xfs_probe_page(page, pg_offset, mapped);
  585. unlock_page(page);
  586. }
  587. if (!pg_len) {
  588. done = 1;
  589. break;
  590. }
  591. total += pg_len;
  592. tindex++;
  593. }
  594. pagevec_release(&pvec);
  595. cond_resched();
  596. }
  597. return total;
  598. }
  599. /*
  600. * Test if a given page is suitable for writing as part of an unwritten
  601. * or delayed allocate extent.
  602. */
  603. STATIC int
  604. xfs_is_delayed_page(
  605. struct page *page,
  606. unsigned int type)
  607. {
  608. if (PageWriteback(page))
  609. return 0;
  610. if (page->mapping && page_has_buffers(page)) {
  611. struct buffer_head *bh, *head;
  612. int acceptable = 0;
  613. bh = head = page_buffers(page);
  614. do {
  615. if (buffer_unwritten(bh))
  616. acceptable = (type == IOMAP_UNWRITTEN);
  617. else if (buffer_delay(bh))
  618. acceptable = (type == IOMAP_DELAY);
  619. else if (buffer_dirty(bh) && buffer_mapped(bh))
  620. acceptable = (type == IOMAP_NEW);
  621. else
  622. break;
  623. } while ((bh = bh->b_this_page) != head);
  624. if (acceptable)
  625. return 1;
  626. }
  627. return 0;
  628. }
  629. /*
  630. * Allocate & map buffers for page given the extent map. Write it out.
  631. * except for the original page of a writepage, this is called on
  632. * delalloc/unwritten pages only, for the original page it is possible
  633. * that the page has no mapping at all.
  634. */
  635. STATIC int
  636. xfs_convert_page(
  637. struct inode *inode,
  638. struct page *page,
  639. loff_t tindex,
  640. xfs_iomap_t *mp,
  641. xfs_ioend_t **ioendp,
  642. struct writeback_control *wbc,
  643. int startio,
  644. int all_bh)
  645. {
  646. struct buffer_head *bh, *head;
  647. xfs_off_t end_offset;
  648. unsigned long p_offset;
  649. unsigned int type;
  650. int bbits = inode->i_blkbits;
  651. int len, page_dirty;
  652. int count = 0, done = 0, uptodate = 1;
  653. xfs_off_t offset = page_offset(page);
  654. if (page->index != tindex)
  655. goto fail;
  656. if (TestSetPageLocked(page))
  657. goto fail;
  658. if (PageWriteback(page))
  659. goto fail_unlock_page;
  660. if (page->mapping != inode->i_mapping)
  661. goto fail_unlock_page;
  662. if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
  663. goto fail_unlock_page;
  664. /*
  665. * page_dirty is initially a count of buffers on the page before
  666. * EOF and is decremented as we move each into a cleanable state.
  667. *
  668. * Derivation:
  669. *
  670. * End offset is the highest offset that this page should represent.
  671. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  672. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  673. * hence give us the correct page_dirty count. On any other page,
  674. * it will be zero and in that case we need page_dirty to be the
  675. * count of buffers on the page.
  676. */
  677. end_offset = min_t(unsigned long long,
  678. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  679. i_size_read(inode));
  680. len = 1 << inode->i_blkbits;
  681. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  682. PAGE_CACHE_SIZE);
  683. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  684. page_dirty = p_offset / len;
  685. bh = head = page_buffers(page);
  686. do {
  687. if (offset >= end_offset)
  688. break;
  689. if (!buffer_uptodate(bh))
  690. uptodate = 0;
  691. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  692. done = 1;
  693. continue;
  694. }
  695. if (buffer_unwritten(bh) || buffer_delay(bh)) {
  696. if (buffer_unwritten(bh))
  697. type = IOMAP_UNWRITTEN;
  698. else
  699. type = IOMAP_DELAY;
  700. if (!xfs_iomap_valid(mp, offset)) {
  701. done = 1;
  702. continue;
  703. }
  704. ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
  705. ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
  706. xfs_map_at_offset(bh, offset, bbits, mp);
  707. if (startio) {
  708. xfs_add_to_ioend(inode, bh, offset,
  709. type, ioendp, done);
  710. } else {
  711. set_buffer_dirty(bh);
  712. unlock_buffer(bh);
  713. mark_buffer_dirty(bh);
  714. }
  715. page_dirty--;
  716. count++;
  717. } else {
  718. type = IOMAP_NEW;
  719. if (buffer_mapped(bh) && all_bh && startio) {
  720. lock_buffer(bh);
  721. xfs_add_to_ioend(inode, bh, offset,
  722. type, ioendp, done);
  723. count++;
  724. page_dirty--;
  725. } else {
  726. done = 1;
  727. }
  728. }
  729. } while (offset += len, (bh = bh->b_this_page) != head);
  730. if (uptodate && bh == head)
  731. SetPageUptodate(page);
  732. if (startio) {
  733. if (count) {
  734. struct backing_dev_info *bdi;
  735. bdi = inode->i_mapping->backing_dev_info;
  736. wbc->nr_to_write--;
  737. if (bdi_write_congested(bdi)) {
  738. wbc->encountered_congestion = 1;
  739. done = 1;
  740. } else if (wbc->nr_to_write <= 0) {
  741. done = 1;
  742. }
  743. }
  744. xfs_start_page_writeback(page, wbc, !page_dirty, count);
  745. }
  746. return done;
  747. fail_unlock_page:
  748. unlock_page(page);
  749. fail:
  750. return 1;
  751. }
  752. /*
  753. * Convert & write out a cluster of pages in the same extent as defined
  754. * by mp and following the start page.
  755. */
  756. STATIC void
  757. xfs_cluster_write(
  758. struct inode *inode,
  759. pgoff_t tindex,
  760. xfs_iomap_t *iomapp,
  761. xfs_ioend_t **ioendp,
  762. struct writeback_control *wbc,
  763. int startio,
  764. int all_bh,
  765. pgoff_t tlast)
  766. {
  767. struct pagevec pvec;
  768. int done = 0, i;
  769. pagevec_init(&pvec, 0);
  770. while (!done && tindex <= tlast) {
  771. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  772. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  773. break;
  774. for (i = 0; i < pagevec_count(&pvec); i++) {
  775. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  776. iomapp, ioendp, wbc, startio, all_bh);
  777. if (done)
  778. break;
  779. }
  780. pagevec_release(&pvec);
  781. cond_resched();
  782. }
  783. }
  784. /*
  785. * Calling this without startio set means we are being asked to make a dirty
  786. * page ready for freeing it's buffers. When called with startio set then
  787. * we are coming from writepage.
  788. *
  789. * When called with startio set it is important that we write the WHOLE
  790. * page if possible.
  791. * The bh->b_state's cannot know if any of the blocks or which block for
  792. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  793. * only valid if the page itself isn't completely uptodate. Some layers
  794. * may clear the page dirty flag prior to calling write page, under the
  795. * assumption the entire page will be written out; by not writing out the
  796. * whole page the page can be reused before all valid dirty data is
  797. * written out. Note: in the case of a page that has been dirty'd by
  798. * mapwrite and but partially setup by block_prepare_write the
  799. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  800. * valid state, thus the whole page must be written out thing.
  801. */
  802. STATIC int
  803. xfs_page_state_convert(
  804. struct inode *inode,
  805. struct page *page,
  806. struct writeback_control *wbc,
  807. int startio,
  808. int unmapped) /* also implies page uptodate */
  809. {
  810. struct buffer_head *bh, *head;
  811. xfs_iomap_t iomap;
  812. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  813. loff_t offset;
  814. unsigned long p_offset = 0;
  815. unsigned int type;
  816. __uint64_t end_offset;
  817. pgoff_t end_index, last_index, tlast;
  818. ssize_t size, len;
  819. int flags, err, iomap_valid = 0, uptodate = 1;
  820. int page_dirty, count = 0;
  821. int trylock = 0;
  822. int all_bh = unmapped;
  823. if (startio) {
  824. if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
  825. trylock |= BMAPI_TRYLOCK;
  826. }
  827. /* Is this page beyond the end of the file? */
  828. offset = i_size_read(inode);
  829. end_index = offset >> PAGE_CACHE_SHIFT;
  830. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  831. if (page->index >= end_index) {
  832. if ((page->index >= end_index + 1) ||
  833. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  834. if (startio)
  835. unlock_page(page);
  836. return 0;
  837. }
  838. }
  839. /*
  840. * page_dirty is initially a count of buffers on the page before
  841. * EOF and is decremented as we move each into a cleanable state.
  842. *
  843. * Derivation:
  844. *
  845. * End offset is the highest offset that this page should represent.
  846. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  847. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  848. * hence give us the correct page_dirty count. On any other page,
  849. * it will be zero and in that case we need page_dirty to be the
  850. * count of buffers on the page.
  851. */
  852. end_offset = min_t(unsigned long long,
  853. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  854. len = 1 << inode->i_blkbits;
  855. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  856. PAGE_CACHE_SIZE);
  857. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  858. page_dirty = p_offset / len;
  859. bh = head = page_buffers(page);
  860. offset = page_offset(page);
  861. flags = BMAPI_READ;
  862. type = IOMAP_NEW;
  863. /* TODO: cleanup count and page_dirty */
  864. do {
  865. if (offset >= end_offset)
  866. break;
  867. if (!buffer_uptodate(bh))
  868. uptodate = 0;
  869. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
  870. /*
  871. * the iomap is actually still valid, but the ioend
  872. * isn't. shouldn't happen too often.
  873. */
  874. iomap_valid = 0;
  875. continue;
  876. }
  877. if (iomap_valid)
  878. iomap_valid = xfs_iomap_valid(&iomap, offset);
  879. /*
  880. * First case, map an unwritten extent and prepare for
  881. * extent state conversion transaction on completion.
  882. *
  883. * Second case, allocate space for a delalloc buffer.
  884. * We can return EAGAIN here in the release page case.
  885. *
  886. * Third case, an unmapped buffer was found, and we are
  887. * in a path where we need to write the whole page out.
  888. */
  889. if (buffer_unwritten(bh) || buffer_delay(bh) ||
  890. ((buffer_uptodate(bh) || PageUptodate(page)) &&
  891. !buffer_mapped(bh) && (unmapped || startio))) {
  892. int new_ioend = 0;
  893. /*
  894. * Make sure we don't use a read-only iomap
  895. */
  896. if (flags == BMAPI_READ)
  897. iomap_valid = 0;
  898. if (buffer_unwritten(bh)) {
  899. type = IOMAP_UNWRITTEN;
  900. flags = BMAPI_WRITE | BMAPI_IGNSTATE;
  901. } else if (buffer_delay(bh)) {
  902. type = IOMAP_DELAY;
  903. flags = BMAPI_ALLOCATE | trylock;
  904. } else {
  905. type = IOMAP_NEW;
  906. flags = BMAPI_WRITE | BMAPI_MMAP;
  907. }
  908. if (!iomap_valid) {
  909. /*
  910. * if we didn't have a valid mapping then we
  911. * need to ensure that we put the new mapping
  912. * in a new ioend structure. This needs to be
  913. * done to ensure that the ioends correctly
  914. * reflect the block mappings at io completion
  915. * for unwritten extent conversion.
  916. */
  917. new_ioend = 1;
  918. if (type == IOMAP_NEW) {
  919. size = xfs_probe_cluster(inode,
  920. page, bh, head, 0);
  921. } else {
  922. size = len;
  923. }
  924. err = xfs_map_blocks(inode, offset, size,
  925. &iomap, flags);
  926. if (err)
  927. goto error;
  928. iomap_valid = xfs_iomap_valid(&iomap, offset);
  929. }
  930. if (iomap_valid) {
  931. xfs_map_at_offset(bh, offset,
  932. inode->i_blkbits, &iomap);
  933. if (startio) {
  934. xfs_add_to_ioend(inode, bh, offset,
  935. type, &ioend,
  936. new_ioend);
  937. } else {
  938. set_buffer_dirty(bh);
  939. unlock_buffer(bh);
  940. mark_buffer_dirty(bh);
  941. }
  942. page_dirty--;
  943. count++;
  944. }
  945. } else if (buffer_uptodate(bh) && startio) {
  946. /*
  947. * we got here because the buffer is already mapped.
  948. * That means it must already have extents allocated
  949. * underneath it. Map the extent by reading it.
  950. */
  951. if (!iomap_valid || flags != BMAPI_READ) {
  952. flags = BMAPI_READ;
  953. size = xfs_probe_cluster(inode, page, bh,
  954. head, 1);
  955. err = xfs_map_blocks(inode, offset, size,
  956. &iomap, flags);
  957. if (err)
  958. goto error;
  959. iomap_valid = xfs_iomap_valid(&iomap, offset);
  960. }
  961. /*
  962. * We set the type to IOMAP_NEW in case we are doing a
  963. * small write at EOF that is extending the file but
  964. * without needing an allocation. We need to update the
  965. * file size on I/O completion in this case so it is
  966. * the same case as having just allocated a new extent
  967. * that we are writing into for the first time.
  968. */
  969. type = IOMAP_NEW;
  970. if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
  971. ASSERT(buffer_mapped(bh));
  972. if (iomap_valid)
  973. all_bh = 1;
  974. xfs_add_to_ioend(inode, bh, offset, type,
  975. &ioend, !iomap_valid);
  976. page_dirty--;
  977. count++;
  978. } else {
  979. iomap_valid = 0;
  980. }
  981. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  982. (unmapped || startio)) {
  983. iomap_valid = 0;
  984. }
  985. if (!iohead)
  986. iohead = ioend;
  987. } while (offset += len, ((bh = bh->b_this_page) != head));
  988. if (uptodate && bh == head)
  989. SetPageUptodate(page);
  990. if (startio)
  991. xfs_start_page_writeback(page, wbc, 1, count);
  992. if (ioend && iomap_valid) {
  993. offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
  994. PAGE_CACHE_SHIFT;
  995. tlast = min_t(pgoff_t, offset, last_index);
  996. xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
  997. wbc, startio, all_bh, tlast);
  998. }
  999. if (iohead)
  1000. xfs_submit_ioend(iohead);
  1001. return page_dirty;
  1002. error:
  1003. if (iohead)
  1004. xfs_cancel_ioend(iohead);
  1005. /*
  1006. * If it's delalloc and we have nowhere to put it,
  1007. * throw it away, unless the lower layers told
  1008. * us to try again.
  1009. */
  1010. if (err != -EAGAIN) {
  1011. if (!unmapped)
  1012. block_invalidatepage(page, 0);
  1013. ClearPageUptodate(page);
  1014. }
  1015. return err;
  1016. }
  1017. /*
  1018. * writepage: Called from one of two places:
  1019. *
  1020. * 1. we are flushing a delalloc buffer head.
  1021. *
  1022. * 2. we are writing out a dirty page. Typically the page dirty
  1023. * state is cleared before we get here. In this case is it
  1024. * conceivable we have no buffer heads.
  1025. *
  1026. * For delalloc space on the page we need to allocate space and
  1027. * flush it. For unmapped buffer heads on the page we should
  1028. * allocate space if the page is uptodate. For any other dirty
  1029. * buffer heads on the page we should flush them.
  1030. *
  1031. * If we detect that a transaction would be required to flush
  1032. * the page, we have to check the process flags first, if we
  1033. * are already in a transaction or disk I/O during allocations
  1034. * is off, we need to fail the writepage and redirty the page.
  1035. */
  1036. STATIC int
  1037. xfs_vm_writepage(
  1038. struct page *page,
  1039. struct writeback_control *wbc)
  1040. {
  1041. int error;
  1042. int need_trans;
  1043. int delalloc, unmapped, unwritten;
  1044. struct inode *inode = page->mapping->host;
  1045. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1046. /*
  1047. * We need a transaction if:
  1048. * 1. There are delalloc buffers on the page
  1049. * 2. The page is uptodate and we have unmapped buffers
  1050. * 3. The page is uptodate and we have no buffers
  1051. * 4. There are unwritten buffers on the page
  1052. */
  1053. if (!page_has_buffers(page)) {
  1054. unmapped = 1;
  1055. need_trans = 1;
  1056. } else {
  1057. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1058. if (!PageUptodate(page))
  1059. unmapped = 0;
  1060. need_trans = delalloc + unmapped + unwritten;
  1061. }
  1062. /*
  1063. * If we need a transaction and the process flags say
  1064. * we are already in a transaction, or no IO is allowed
  1065. * then mark the page dirty again and leave the page
  1066. * as is.
  1067. */
  1068. if (current_test_flags(PF_FSTRANS) && need_trans)
  1069. goto out_fail;
  1070. /*
  1071. * Delay hooking up buffer heads until we have
  1072. * made our go/no-go decision.
  1073. */
  1074. if (!page_has_buffers(page))
  1075. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1076. /*
  1077. * Convert delayed allocate, unwritten or unmapped space
  1078. * to real space and flush out to disk.
  1079. */
  1080. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1081. if (error == -EAGAIN)
  1082. goto out_fail;
  1083. if (unlikely(error < 0))
  1084. goto out_unlock;
  1085. return 0;
  1086. out_fail:
  1087. redirty_page_for_writepage(wbc, page);
  1088. unlock_page(page);
  1089. return 0;
  1090. out_unlock:
  1091. unlock_page(page);
  1092. return error;
  1093. }
  1094. STATIC int
  1095. xfs_vm_writepages(
  1096. struct address_space *mapping,
  1097. struct writeback_control *wbc)
  1098. {
  1099. struct bhv_vnode *vp = vn_from_inode(mapping->host);
  1100. if (VN_TRUNC(vp))
  1101. VUNTRUNCATE(vp);
  1102. return generic_writepages(mapping, wbc);
  1103. }
  1104. /*
  1105. * Called to move a page into cleanable state - and from there
  1106. * to be released. Possibly the page is already clean. We always
  1107. * have buffer heads in this call.
  1108. *
  1109. * Returns 0 if the page is ok to release, 1 otherwise.
  1110. *
  1111. * Possible scenarios are:
  1112. *
  1113. * 1. We are being called to release a page which has been written
  1114. * to via regular I/O. buffer heads will be dirty and possibly
  1115. * delalloc. If no delalloc buffer heads in this case then we
  1116. * can just return zero.
  1117. *
  1118. * 2. We are called to release a page which has been written via
  1119. * mmap, all we need to do is ensure there is no delalloc
  1120. * state in the buffer heads, if not we can let the caller
  1121. * free them and we should come back later via writepage.
  1122. */
  1123. STATIC int
  1124. xfs_vm_releasepage(
  1125. struct page *page,
  1126. gfp_t gfp_mask)
  1127. {
  1128. struct inode *inode = page->mapping->host;
  1129. int dirty, delalloc, unmapped, unwritten;
  1130. struct writeback_control wbc = {
  1131. .sync_mode = WB_SYNC_ALL,
  1132. .nr_to_write = 1,
  1133. };
  1134. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
  1135. if (!page_has_buffers(page))
  1136. return 0;
  1137. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1138. if (!delalloc && !unwritten)
  1139. goto free_buffers;
  1140. if (!(gfp_mask & __GFP_FS))
  1141. return 0;
  1142. /* If we are already inside a transaction or the thread cannot
  1143. * do I/O, we cannot release this page.
  1144. */
  1145. if (current_test_flags(PF_FSTRANS))
  1146. return 0;
  1147. /*
  1148. * Convert delalloc space to real space, do not flush the
  1149. * data out to disk, that will be done by the caller.
  1150. * Never need to allocate space here - we will always
  1151. * come back to writepage in that case.
  1152. */
  1153. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1154. if (dirty == 0 && !unwritten)
  1155. goto free_buffers;
  1156. return 0;
  1157. free_buffers:
  1158. return try_to_free_buffers(page);
  1159. }
  1160. STATIC int
  1161. __xfs_get_blocks(
  1162. struct inode *inode,
  1163. sector_t iblock,
  1164. struct buffer_head *bh_result,
  1165. int create,
  1166. int direct,
  1167. bmapi_flags_t flags)
  1168. {
  1169. bhv_vnode_t *vp = vn_from_inode(inode);
  1170. xfs_iomap_t iomap;
  1171. xfs_off_t offset;
  1172. ssize_t size;
  1173. int niomap = 1;
  1174. int error;
  1175. offset = (xfs_off_t)iblock << inode->i_blkbits;
  1176. ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
  1177. size = bh_result->b_size;
  1178. error = bhv_vop_bmap(vp, offset, size,
  1179. create ? flags : BMAPI_READ, &iomap, &niomap);
  1180. if (error)
  1181. return -error;
  1182. if (niomap == 0)
  1183. return 0;
  1184. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  1185. /*
  1186. * For unwritten extents do not report a disk address on
  1187. * the read case (treat as if we're reading into a hole).
  1188. */
  1189. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  1190. xfs_map_buffer(bh_result, &iomap, offset,
  1191. inode->i_blkbits);
  1192. }
  1193. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  1194. if (direct)
  1195. bh_result->b_private = inode;
  1196. set_buffer_unwritten(bh_result);
  1197. }
  1198. }
  1199. /*
  1200. * If this is a realtime file, data may be on a different device.
  1201. * to that pointed to from the buffer_head b_bdev currently.
  1202. */
  1203. bh_result->b_bdev = iomap.iomap_target->bt_bdev;
  1204. /*
  1205. * If we previously allocated a block out beyond eof and we are now
  1206. * coming back to use it then we will need to flag it as new even if it
  1207. * has a disk address.
  1208. *
  1209. * With sub-block writes into unwritten extents we also need to mark
  1210. * the buffer as new so that the unwritten parts of the buffer gets
  1211. * correctly zeroed.
  1212. */
  1213. if (create &&
  1214. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  1215. (offset >= i_size_read(inode)) ||
  1216. (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
  1217. set_buffer_new(bh_result);
  1218. if (iomap.iomap_flags & IOMAP_DELAY) {
  1219. BUG_ON(direct);
  1220. if (create) {
  1221. set_buffer_uptodate(bh_result);
  1222. set_buffer_mapped(bh_result);
  1223. set_buffer_delay(bh_result);
  1224. }
  1225. }
  1226. if (direct || size > (1 << inode->i_blkbits)) {
  1227. ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
  1228. offset = min_t(xfs_off_t,
  1229. iomap.iomap_bsize - iomap.iomap_delta, size);
  1230. bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
  1231. }
  1232. return 0;
  1233. }
  1234. int
  1235. xfs_get_blocks(
  1236. struct inode *inode,
  1237. sector_t iblock,
  1238. struct buffer_head *bh_result,
  1239. int create)
  1240. {
  1241. return __xfs_get_blocks(inode, iblock,
  1242. bh_result, create, 0, BMAPI_WRITE);
  1243. }
  1244. STATIC int
  1245. xfs_get_blocks_direct(
  1246. struct inode *inode,
  1247. sector_t iblock,
  1248. struct buffer_head *bh_result,
  1249. int create)
  1250. {
  1251. return __xfs_get_blocks(inode, iblock,
  1252. bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  1253. }
  1254. STATIC void
  1255. xfs_end_io_direct(
  1256. struct kiocb *iocb,
  1257. loff_t offset,
  1258. ssize_t size,
  1259. void *private)
  1260. {
  1261. xfs_ioend_t *ioend = iocb->private;
  1262. /*
  1263. * Non-NULL private data means we need to issue a transaction to
  1264. * convert a range from unwritten to written extents. This needs
  1265. * to happen from process context but aio+dio I/O completion
  1266. * happens from irq context so we need to defer it to a workqueue.
  1267. * This is not necessary for synchronous direct I/O, but we do
  1268. * it anyway to keep the code uniform and simpler.
  1269. *
  1270. * Well, if only it were that simple. Because synchronous direct I/O
  1271. * requires extent conversion to occur *before* we return to userspace,
  1272. * we have to wait for extent conversion to complete. Look at the
  1273. * iocb that has been passed to us to determine if this is AIO or
  1274. * not. If it is synchronous, tell xfs_finish_ioend() to kick the
  1275. * workqueue and wait for it to complete.
  1276. *
  1277. * The core direct I/O code might be changed to always call the
  1278. * completion handler in the future, in which case all this can
  1279. * go away.
  1280. */
  1281. ioend->io_offset = offset;
  1282. ioend->io_size = size;
  1283. if (ioend->io_type == IOMAP_READ) {
  1284. xfs_finish_ioend(ioend, 0);
  1285. } else if (private && size > 0) {
  1286. xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
  1287. } else {
  1288. /*
  1289. * A direct I/O write ioend starts it's life in unwritten
  1290. * state in case they map an unwritten extent. This write
  1291. * didn't map an unwritten extent so switch it's completion
  1292. * handler.
  1293. */
  1294. INIT_WORK(&ioend->io_work, xfs_end_bio_written);
  1295. xfs_finish_ioend(ioend, 0);
  1296. }
  1297. /*
  1298. * blockdev_direct_IO can return an error even after the I/O
  1299. * completion handler was called. Thus we need to protect
  1300. * against double-freeing.
  1301. */
  1302. iocb->private = NULL;
  1303. }
  1304. STATIC ssize_t
  1305. xfs_vm_direct_IO(
  1306. int rw,
  1307. struct kiocb *iocb,
  1308. const struct iovec *iov,
  1309. loff_t offset,
  1310. unsigned long nr_segs)
  1311. {
  1312. struct file *file = iocb->ki_filp;
  1313. struct inode *inode = file->f_mapping->host;
  1314. bhv_vnode_t *vp = vn_from_inode(inode);
  1315. xfs_iomap_t iomap;
  1316. int maps = 1;
  1317. int error;
  1318. ssize_t ret;
  1319. error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
  1320. if (error)
  1321. return -error;
  1322. if (rw == WRITE) {
  1323. iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
  1324. ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
  1325. iomap.iomap_target->bt_bdev,
  1326. iov, offset, nr_segs,
  1327. xfs_get_blocks_direct,
  1328. xfs_end_io_direct);
  1329. } else {
  1330. iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
  1331. ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
  1332. iomap.iomap_target->bt_bdev,
  1333. iov, offset, nr_segs,
  1334. xfs_get_blocks_direct,
  1335. xfs_end_io_direct);
  1336. }
  1337. if (unlikely(ret != -EIOCBQUEUED && iocb->private))
  1338. xfs_destroy_ioend(iocb->private);
  1339. return ret;
  1340. }
  1341. STATIC int
  1342. xfs_vm_prepare_write(
  1343. struct file *file,
  1344. struct page *page,
  1345. unsigned int from,
  1346. unsigned int to)
  1347. {
  1348. return block_prepare_write(page, from, to, xfs_get_blocks);
  1349. }
  1350. STATIC sector_t
  1351. xfs_vm_bmap(
  1352. struct address_space *mapping,
  1353. sector_t block)
  1354. {
  1355. struct inode *inode = (struct inode *)mapping->host;
  1356. bhv_vnode_t *vp = vn_from_inode(inode);
  1357. vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
  1358. bhv_vop_rwlock(vp, VRWLOCK_READ);
  1359. bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
  1360. bhv_vop_rwunlock(vp, VRWLOCK_READ);
  1361. return generic_block_bmap(mapping, block, xfs_get_blocks);
  1362. }
  1363. STATIC int
  1364. xfs_vm_readpage(
  1365. struct file *unused,
  1366. struct page *page)
  1367. {
  1368. return mpage_readpage(page, xfs_get_blocks);
  1369. }
  1370. STATIC int
  1371. xfs_vm_readpages(
  1372. struct file *unused,
  1373. struct address_space *mapping,
  1374. struct list_head *pages,
  1375. unsigned nr_pages)
  1376. {
  1377. return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
  1378. }
  1379. STATIC void
  1380. xfs_vm_invalidatepage(
  1381. struct page *page,
  1382. unsigned long offset)
  1383. {
  1384. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1385. page->mapping->host, page, offset);
  1386. block_invalidatepage(page, offset);
  1387. }
  1388. const struct address_space_operations xfs_address_space_operations = {
  1389. .readpage = xfs_vm_readpage,
  1390. .readpages = xfs_vm_readpages,
  1391. .writepage = xfs_vm_writepage,
  1392. .writepages = xfs_vm_writepages,
  1393. .sync_page = block_sync_page,
  1394. .releasepage = xfs_vm_releasepage,
  1395. .invalidatepage = xfs_vm_invalidatepage,
  1396. .prepare_write = xfs_vm_prepare_write,
  1397. .commit_write = generic_commit_write,
  1398. .bmap = xfs_vm_bmap,
  1399. .direct_IO = xfs_vm_direct_IO,
  1400. .migratepage = buffer_migrate_page,
  1401. };