xfs_aops.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_dir2.h"
  25. #include "xfs_trans.h"
  26. #include "xfs_dmapi.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_alloc_btree.h"
  30. #include "xfs_ialloc_btree.h"
  31. #include "xfs_dir2_sf.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_alloc.h"
  36. #include "xfs_btree.h"
  37. #include "xfs_error.h"
  38. #include "xfs_rw.h"
  39. #include "xfs_iomap.h"
  40. #include "xfs_vnodeops.h"
  41. #include <linux/mpage.h>
  42. #include <linux/pagevec.h>
  43. #include <linux/writeback.h>
  44. /*
  45. * Prime number of hash buckets since address is used as the key.
  46. */
  47. #define NVSYNC 37
  48. #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
  49. static wait_queue_head_t xfs_ioend_wq[NVSYNC];
  50. void __init
  51. xfs_ioend_init(void)
  52. {
  53. int i;
  54. for (i = 0; i < NVSYNC; i++)
  55. init_waitqueue_head(&xfs_ioend_wq[i]);
  56. }
  57. void
  58. xfs_ioend_wait(
  59. xfs_inode_t *ip)
  60. {
  61. wait_queue_head_t *wq = to_ioend_wq(ip);
  62. wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
  63. }
  64. STATIC void
  65. xfs_ioend_wake(
  66. xfs_inode_t *ip)
  67. {
  68. if (atomic_dec_and_test(&ip->i_iocount))
  69. wake_up(to_ioend_wq(ip));
  70. }
  71. STATIC void
  72. xfs_count_page_state(
  73. struct page *page,
  74. int *delalloc,
  75. int *unmapped,
  76. int *unwritten)
  77. {
  78. struct buffer_head *bh, *head;
  79. *delalloc = *unmapped = *unwritten = 0;
  80. bh = head = page_buffers(page);
  81. do {
  82. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  83. (*unmapped) = 1;
  84. else if (buffer_unwritten(bh))
  85. (*unwritten) = 1;
  86. else if (buffer_delay(bh))
  87. (*delalloc) = 1;
  88. } while ((bh = bh->b_this_page) != head);
  89. }
  90. #if defined(XFS_RW_TRACE)
  91. void
  92. xfs_page_trace(
  93. int tag,
  94. struct inode *inode,
  95. struct page *page,
  96. unsigned long pgoff)
  97. {
  98. xfs_inode_t *ip;
  99. loff_t isize = i_size_read(inode);
  100. loff_t offset = page_offset(page);
  101. int delalloc = -1, unmapped = -1, unwritten = -1;
  102. if (page_has_buffers(page))
  103. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  104. ip = XFS_I(inode);
  105. if (!ip->i_rwtrace)
  106. return;
  107. ktrace_enter(ip->i_rwtrace,
  108. (void *)((unsigned long)tag),
  109. (void *)ip,
  110. (void *)inode,
  111. (void *)page,
  112. (void *)pgoff,
  113. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  114. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  115. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  116. (void *)((unsigned long)(isize & 0xffffffff)),
  117. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  118. (void *)((unsigned long)(offset & 0xffffffff)),
  119. (void *)((unsigned long)delalloc),
  120. (void *)((unsigned long)unmapped),
  121. (void *)((unsigned long)unwritten),
  122. (void *)((unsigned long)current_pid()),
  123. (void *)NULL);
  124. }
  125. #else
  126. #define xfs_page_trace(tag, inode, page, pgoff)
  127. #endif
  128. STATIC struct block_device *
  129. xfs_find_bdev_for_inode(
  130. struct xfs_inode *ip)
  131. {
  132. struct xfs_mount *mp = ip->i_mount;
  133. if (XFS_IS_REALTIME_INODE(ip))
  134. return mp->m_rtdev_targp->bt_bdev;
  135. else
  136. return mp->m_ddev_targp->bt_bdev;
  137. }
  138. /*
  139. * We're now finished for good with this ioend structure.
  140. * Update the page state via the associated buffer_heads,
  141. * release holds on the inode and bio, and finally free
  142. * up memory. Do not use the ioend after this.
  143. */
  144. STATIC void
  145. xfs_destroy_ioend(
  146. xfs_ioend_t *ioend)
  147. {
  148. struct buffer_head *bh, *next;
  149. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  150. for (bh = ioend->io_buffer_head; bh; bh = next) {
  151. next = bh->b_private;
  152. bh->b_end_io(bh, !ioend->io_error);
  153. }
  154. /*
  155. * Volume managers supporting multiple paths can send back ENODEV
  156. * when the final path disappears. In this case continuing to fill
  157. * the page cache with dirty data which cannot be written out is
  158. * evil, so prevent that.
  159. */
  160. if (unlikely(ioend->io_error == -ENODEV)) {
  161. xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
  162. __FILE__, __LINE__);
  163. }
  164. xfs_ioend_wake(ip);
  165. mempool_free(ioend, xfs_ioend_pool);
  166. }
  167. /*
  168. * Update on-disk file size now that data has been written to disk.
  169. * The current in-memory file size is i_size. If a write is beyond
  170. * eof i_new_size will be the intended file size until i_size is
  171. * updated. If this write does not extend all the way to the valid
  172. * file size then restrict this update to the end of the write.
  173. */
  174. STATIC void
  175. xfs_setfilesize(
  176. xfs_ioend_t *ioend)
  177. {
  178. xfs_inode_t *ip = XFS_I(ioend->io_inode);
  179. xfs_fsize_t isize;
  180. xfs_fsize_t bsize;
  181. ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
  182. ASSERT(ioend->io_type != IOMAP_READ);
  183. if (unlikely(ioend->io_error))
  184. return;
  185. bsize = ioend->io_offset + ioend->io_size;
  186. xfs_ilock(ip, XFS_ILOCK_EXCL);
  187. isize = MAX(ip->i_size, ip->i_new_size);
  188. isize = MIN(isize, bsize);
  189. if (ip->i_d.di_size < isize) {
  190. ip->i_d.di_size = isize;
  191. ip->i_update_core = 1;
  192. ip->i_update_size = 1;
  193. xfs_mark_inode_dirty_sync(ip);
  194. }
  195. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  196. }
  197. /*
  198. * Buffered IO write completion for delayed allocate extents.
  199. */
  200. STATIC void
  201. xfs_end_bio_delalloc(
  202. struct work_struct *work)
  203. {
  204. xfs_ioend_t *ioend =
  205. container_of(work, xfs_ioend_t, io_work);
  206. xfs_setfilesize(ioend);
  207. xfs_destroy_ioend(ioend);
  208. }
  209. /*
  210. * Buffered IO write completion for regular, written extents.
  211. */
  212. STATIC void
  213. xfs_end_bio_written(
  214. struct work_struct *work)
  215. {
  216. xfs_ioend_t *ioend =
  217. container_of(work, xfs_ioend_t, io_work);
  218. xfs_setfilesize(ioend);
  219. xfs_destroy_ioend(ioend);
  220. }
  221. /*
  222. * IO write completion for unwritten extents.
  223. *
  224. * Issue transactions to convert a buffer range from unwritten
  225. * to written extents.
  226. */
  227. STATIC void
  228. xfs_end_bio_unwritten(
  229. struct work_struct *work)
  230. {
  231. xfs_ioend_t *ioend =
  232. container_of(work, xfs_ioend_t, io_work);
  233. struct xfs_inode *ip = XFS_I(ioend->io_inode);
  234. xfs_off_t offset = ioend->io_offset;
  235. size_t size = ioend->io_size;
  236. if (likely(!ioend->io_error)) {
  237. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  238. int error;
  239. error = xfs_iomap_write_unwritten(ip, offset, size);
  240. if (error)
  241. ioend->io_error = error;
  242. }
  243. xfs_setfilesize(ioend);
  244. }
  245. xfs_destroy_ioend(ioend);
  246. }
  247. /*
  248. * IO read completion for regular, written extents.
  249. */
  250. STATIC void
  251. xfs_end_bio_read(
  252. struct work_struct *work)
  253. {
  254. xfs_ioend_t *ioend =
  255. container_of(work, xfs_ioend_t, io_work);
  256. xfs_destroy_ioend(ioend);
  257. }
  258. /*
  259. * Schedule IO completion handling on a xfsdatad if this was
  260. * the final hold on this ioend. If we are asked to wait,
  261. * flush the workqueue.
  262. */
  263. STATIC void
  264. xfs_finish_ioend(
  265. xfs_ioend_t *ioend,
  266. int wait)
  267. {
  268. if (atomic_dec_and_test(&ioend->io_remaining)) {
  269. struct workqueue_struct *wq = xfsdatad_workqueue;
  270. if (ioend->io_work.func == xfs_end_bio_unwritten)
  271. wq = xfsconvertd_workqueue;
  272. queue_work(wq, &ioend->io_work);
  273. if (wait)
  274. flush_workqueue(wq);
  275. }
  276. }
  277. /*
  278. * Allocate and initialise an IO completion structure.
  279. * We need to track unwritten extent write completion here initially.
  280. * We'll need to extend this for updating the ondisk inode size later
  281. * (vs. incore size).
  282. */
  283. STATIC xfs_ioend_t *
  284. xfs_alloc_ioend(
  285. struct inode *inode,
  286. unsigned int type)
  287. {
  288. xfs_ioend_t *ioend;
  289. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  290. /*
  291. * Set the count to 1 initially, which will prevent an I/O
  292. * completion callback from happening before we have started
  293. * all the I/O from calling the completion routine too early.
  294. */
  295. atomic_set(&ioend->io_remaining, 1);
  296. ioend->io_error = 0;
  297. ioend->io_list = NULL;
  298. ioend->io_type = type;
  299. ioend->io_inode = inode;
  300. ioend->io_buffer_head = NULL;
  301. ioend->io_buffer_tail = NULL;
  302. atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
  303. ioend->io_offset = 0;
  304. ioend->io_size = 0;
  305. if (type == IOMAP_UNWRITTEN)
  306. INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
  307. else if (type == IOMAP_DELAY)
  308. INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
  309. else if (type == IOMAP_READ)
  310. INIT_WORK(&ioend->io_work, xfs_end_bio_read);
  311. else
  312. INIT_WORK(&ioend->io_work, xfs_end_bio_written);
  313. return ioend;
  314. }
  315. STATIC int
  316. xfs_map_blocks(
  317. struct inode *inode,
  318. loff_t offset,
  319. ssize_t count,
  320. xfs_iomap_t *mapp,
  321. int flags)
  322. {
  323. int nmaps = 1;
  324. return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
  325. }
  326. STATIC_INLINE int
  327. xfs_iomap_valid(
  328. xfs_iomap_t *iomapp,
  329. loff_t offset)
  330. {
  331. return offset >= iomapp->iomap_offset &&
  332. offset < iomapp->iomap_offset + iomapp->iomap_bsize;
  333. }
  334. /*
  335. * BIO completion handler for buffered IO.
  336. */
  337. STATIC void
  338. xfs_end_bio(
  339. struct bio *bio,
  340. int error)
  341. {
  342. xfs_ioend_t *ioend = bio->bi_private;
  343. ASSERT(atomic_read(&bio->bi_cnt) >= 1);
  344. ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
  345. /* Toss bio and pass work off to an xfsdatad thread */
  346. bio->bi_private = NULL;
  347. bio->bi_end_io = NULL;
  348. bio_put(bio);
  349. xfs_finish_ioend(ioend, 0);
  350. }
  351. STATIC void
  352. xfs_submit_ioend_bio(
  353. xfs_ioend_t *ioend,
  354. struct bio *bio)
  355. {
  356. atomic_inc(&ioend->io_remaining);
  357. bio->bi_private = ioend;
  358. bio->bi_end_io = xfs_end_bio;
  359. submit_bio(WRITE, bio);
  360. ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
  361. bio_put(bio);
  362. }
  363. STATIC struct bio *
  364. xfs_alloc_ioend_bio(
  365. struct buffer_head *bh)
  366. {
  367. struct bio *bio;
  368. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  369. do {
  370. bio = bio_alloc(GFP_NOIO, nvecs);
  371. nvecs >>= 1;
  372. } while (!bio);
  373. ASSERT(bio->bi_private == NULL);
  374. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  375. bio->bi_bdev = bh->b_bdev;
  376. bio_get(bio);
  377. return bio;
  378. }
  379. STATIC void
  380. xfs_start_buffer_writeback(
  381. struct buffer_head *bh)
  382. {
  383. ASSERT(buffer_mapped(bh));
  384. ASSERT(buffer_locked(bh));
  385. ASSERT(!buffer_delay(bh));
  386. ASSERT(!buffer_unwritten(bh));
  387. mark_buffer_async_write(bh);
  388. set_buffer_uptodate(bh);
  389. clear_buffer_dirty(bh);
  390. }
  391. STATIC void
  392. xfs_start_page_writeback(
  393. struct page *page,
  394. int clear_dirty,
  395. int buffers)
  396. {
  397. ASSERT(PageLocked(page));
  398. ASSERT(!PageWriteback(page));
  399. if (clear_dirty)
  400. clear_page_dirty_for_io(page);
  401. set_page_writeback(page);
  402. unlock_page(page);
  403. /* If no buffers on the page are to be written, finish it here */
  404. if (!buffers)
  405. end_page_writeback(page);
  406. }
  407. static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  408. {
  409. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  410. }
  411. /*
  412. * Submit all of the bios for all of the ioends we have saved up, covering the
  413. * initial writepage page and also any probed pages.
  414. *
  415. * Because we may have multiple ioends spanning a page, we need to start
  416. * writeback on all the buffers before we submit them for I/O. If we mark the
  417. * buffers as we got, then we can end up with a page that only has buffers
  418. * marked async write and I/O complete on can occur before we mark the other
  419. * buffers async write.
  420. *
  421. * The end result of this is that we trip a bug in end_page_writeback() because
  422. * we call it twice for the one page as the code in end_buffer_async_write()
  423. * assumes that all buffers on the page are started at the same time.
  424. *
  425. * The fix is two passes across the ioend list - one to start writeback on the
  426. * buffer_heads, and then submit them for I/O on the second pass.
  427. */
  428. STATIC void
  429. xfs_submit_ioend(
  430. xfs_ioend_t *ioend)
  431. {
  432. xfs_ioend_t *head = ioend;
  433. xfs_ioend_t *next;
  434. struct buffer_head *bh;
  435. struct bio *bio;
  436. sector_t lastblock = 0;
  437. /* Pass 1 - start writeback */
  438. do {
  439. next = ioend->io_list;
  440. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  441. xfs_start_buffer_writeback(bh);
  442. }
  443. } while ((ioend = next) != NULL);
  444. /* Pass 2 - submit I/O */
  445. ioend = head;
  446. do {
  447. next = ioend->io_list;
  448. bio = NULL;
  449. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  450. if (!bio) {
  451. retry:
  452. bio = xfs_alloc_ioend_bio(bh);
  453. } else if (bh->b_blocknr != lastblock + 1) {
  454. xfs_submit_ioend_bio(ioend, bio);
  455. goto retry;
  456. }
  457. if (bio_add_buffer(bio, bh) != bh->b_size) {
  458. xfs_submit_ioend_bio(ioend, bio);
  459. goto retry;
  460. }
  461. lastblock = bh->b_blocknr;
  462. }
  463. if (bio)
  464. xfs_submit_ioend_bio(ioend, bio);
  465. xfs_finish_ioend(ioend, 0);
  466. } while ((ioend = next) != NULL);
  467. }
  468. /*
  469. * Cancel submission of all buffer_heads so far in this endio.
  470. * Toss the endio too. Only ever called for the initial page
  471. * in a writepage request, so only ever one page.
  472. */
  473. STATIC void
  474. xfs_cancel_ioend(
  475. xfs_ioend_t *ioend)
  476. {
  477. xfs_ioend_t *next;
  478. struct buffer_head *bh, *next_bh;
  479. do {
  480. next = ioend->io_list;
  481. bh = ioend->io_buffer_head;
  482. do {
  483. next_bh = bh->b_private;
  484. clear_buffer_async_write(bh);
  485. unlock_buffer(bh);
  486. } while ((bh = next_bh) != NULL);
  487. xfs_ioend_wake(XFS_I(ioend->io_inode));
  488. mempool_free(ioend, xfs_ioend_pool);
  489. } while ((ioend = next) != NULL);
  490. }
  491. /*
  492. * Test to see if we've been building up a completion structure for
  493. * earlier buffers -- if so, we try to append to this ioend if we
  494. * can, otherwise we finish off any current ioend and start another.
  495. * Return true if we've finished the given ioend.
  496. */
  497. STATIC void
  498. xfs_add_to_ioend(
  499. struct inode *inode,
  500. struct buffer_head *bh,
  501. xfs_off_t offset,
  502. unsigned int type,
  503. xfs_ioend_t **result,
  504. int need_ioend)
  505. {
  506. xfs_ioend_t *ioend = *result;
  507. if (!ioend || need_ioend || type != ioend->io_type) {
  508. xfs_ioend_t *previous = *result;
  509. ioend = xfs_alloc_ioend(inode, type);
  510. ioend->io_offset = offset;
  511. ioend->io_buffer_head = bh;
  512. ioend->io_buffer_tail = bh;
  513. if (previous)
  514. previous->io_list = ioend;
  515. *result = ioend;
  516. } else {
  517. ioend->io_buffer_tail->b_private = bh;
  518. ioend->io_buffer_tail = bh;
  519. }
  520. bh->b_private = NULL;
  521. ioend->io_size += bh->b_size;
  522. }
  523. STATIC void
  524. xfs_map_buffer(
  525. struct buffer_head *bh,
  526. xfs_iomap_t *mp,
  527. xfs_off_t offset,
  528. uint block_bits)
  529. {
  530. sector_t bn;
  531. ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
  532. bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
  533. ((offset - mp->iomap_offset) >> block_bits);
  534. ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
  535. bh->b_blocknr = bn;
  536. set_buffer_mapped(bh);
  537. }
  538. STATIC void
  539. xfs_map_at_offset(
  540. struct buffer_head *bh,
  541. loff_t offset,
  542. int block_bits,
  543. xfs_iomap_t *iomapp)
  544. {
  545. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  546. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  547. lock_buffer(bh);
  548. xfs_map_buffer(bh, iomapp, offset, block_bits);
  549. bh->b_bdev = iomapp->iomap_target->bt_bdev;
  550. set_buffer_mapped(bh);
  551. clear_buffer_delay(bh);
  552. clear_buffer_unwritten(bh);
  553. }
  554. /*
  555. * Look for a page at index that is suitable for clustering.
  556. */
  557. STATIC unsigned int
  558. xfs_probe_page(
  559. struct page *page,
  560. unsigned int pg_offset,
  561. int mapped)
  562. {
  563. int ret = 0;
  564. if (PageWriteback(page))
  565. return 0;
  566. if (page->mapping && PageDirty(page)) {
  567. if (page_has_buffers(page)) {
  568. struct buffer_head *bh, *head;
  569. bh = head = page_buffers(page);
  570. do {
  571. if (!buffer_uptodate(bh))
  572. break;
  573. if (mapped != buffer_mapped(bh))
  574. break;
  575. ret += bh->b_size;
  576. if (ret >= pg_offset)
  577. break;
  578. } while ((bh = bh->b_this_page) != head);
  579. } else
  580. ret = mapped ? 0 : PAGE_CACHE_SIZE;
  581. }
  582. return ret;
  583. }
  584. STATIC size_t
  585. xfs_probe_cluster(
  586. struct inode *inode,
  587. struct page *startpage,
  588. struct buffer_head *bh,
  589. struct buffer_head *head,
  590. int mapped)
  591. {
  592. struct pagevec pvec;
  593. pgoff_t tindex, tlast, tloff;
  594. size_t total = 0;
  595. int done = 0, i;
  596. /* First sum forwards in this page */
  597. do {
  598. if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
  599. return total;
  600. total += bh->b_size;
  601. } while ((bh = bh->b_this_page) != head);
  602. /* if we reached the end of the page, sum forwards in following pages */
  603. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  604. tindex = startpage->index + 1;
  605. /* Prune this back to avoid pathological behavior */
  606. tloff = min(tlast, startpage->index + 64);
  607. pagevec_init(&pvec, 0);
  608. while (!done && tindex <= tloff) {
  609. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  610. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  611. break;
  612. for (i = 0; i < pagevec_count(&pvec); i++) {
  613. struct page *page = pvec.pages[i];
  614. size_t pg_offset, pg_len = 0;
  615. if (tindex == tlast) {
  616. pg_offset =
  617. i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
  618. if (!pg_offset) {
  619. done = 1;
  620. break;
  621. }
  622. } else
  623. pg_offset = PAGE_CACHE_SIZE;
  624. if (page->index == tindex && trylock_page(page)) {
  625. pg_len = xfs_probe_page(page, pg_offset, mapped);
  626. unlock_page(page);
  627. }
  628. if (!pg_len) {
  629. done = 1;
  630. break;
  631. }
  632. total += pg_len;
  633. tindex++;
  634. }
  635. pagevec_release(&pvec);
  636. cond_resched();
  637. }
  638. return total;
  639. }
  640. /*
  641. * Test if a given page is suitable for writing as part of an unwritten
  642. * or delayed allocate extent.
  643. */
  644. STATIC int
  645. xfs_is_delayed_page(
  646. struct page *page,
  647. unsigned int type)
  648. {
  649. if (PageWriteback(page))
  650. return 0;
  651. if (page->mapping && page_has_buffers(page)) {
  652. struct buffer_head *bh, *head;
  653. int acceptable = 0;
  654. bh = head = page_buffers(page);
  655. do {
  656. if (buffer_unwritten(bh))
  657. acceptable = (type == IOMAP_UNWRITTEN);
  658. else if (buffer_delay(bh))
  659. acceptable = (type == IOMAP_DELAY);
  660. else if (buffer_dirty(bh) && buffer_mapped(bh))
  661. acceptable = (type == IOMAP_NEW);
  662. else
  663. break;
  664. } while ((bh = bh->b_this_page) != head);
  665. if (acceptable)
  666. return 1;
  667. }
  668. return 0;
  669. }
  670. /*
  671. * Allocate & map buffers for page given the extent map. Write it out.
  672. * except for the original page of a writepage, this is called on
  673. * delalloc/unwritten pages only, for the original page it is possible
  674. * that the page has no mapping at all.
  675. */
  676. STATIC int
  677. xfs_convert_page(
  678. struct inode *inode,
  679. struct page *page,
  680. loff_t tindex,
  681. xfs_iomap_t *mp,
  682. xfs_ioend_t **ioendp,
  683. struct writeback_control *wbc,
  684. int startio,
  685. int all_bh)
  686. {
  687. struct buffer_head *bh, *head;
  688. xfs_off_t end_offset;
  689. unsigned long p_offset;
  690. unsigned int type;
  691. int bbits = inode->i_blkbits;
  692. int len, page_dirty;
  693. int count = 0, done = 0, uptodate = 1;
  694. xfs_off_t offset = page_offset(page);
  695. if (page->index != tindex)
  696. goto fail;
  697. if (!trylock_page(page))
  698. goto fail;
  699. if (PageWriteback(page))
  700. goto fail_unlock_page;
  701. if (page->mapping != inode->i_mapping)
  702. goto fail_unlock_page;
  703. if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
  704. goto fail_unlock_page;
  705. /*
  706. * page_dirty is initially a count of buffers on the page before
  707. * EOF and is decremented as we move each into a cleanable state.
  708. *
  709. * Derivation:
  710. *
  711. * End offset is the highest offset that this page should represent.
  712. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  713. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  714. * hence give us the correct page_dirty count. On any other page,
  715. * it will be zero and in that case we need page_dirty to be the
  716. * count of buffers on the page.
  717. */
  718. end_offset = min_t(unsigned long long,
  719. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
  720. i_size_read(inode));
  721. len = 1 << inode->i_blkbits;
  722. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  723. PAGE_CACHE_SIZE);
  724. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  725. page_dirty = p_offset / len;
  726. bh = head = page_buffers(page);
  727. do {
  728. if (offset >= end_offset)
  729. break;
  730. if (!buffer_uptodate(bh))
  731. uptodate = 0;
  732. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  733. done = 1;
  734. continue;
  735. }
  736. if (buffer_unwritten(bh) || buffer_delay(bh)) {
  737. if (buffer_unwritten(bh))
  738. type = IOMAP_UNWRITTEN;
  739. else
  740. type = IOMAP_DELAY;
  741. if (!xfs_iomap_valid(mp, offset)) {
  742. done = 1;
  743. continue;
  744. }
  745. ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
  746. ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
  747. xfs_map_at_offset(bh, offset, bbits, mp);
  748. if (startio) {
  749. xfs_add_to_ioend(inode, bh, offset,
  750. type, ioendp, done);
  751. } else {
  752. set_buffer_dirty(bh);
  753. unlock_buffer(bh);
  754. mark_buffer_dirty(bh);
  755. }
  756. page_dirty--;
  757. count++;
  758. } else {
  759. type = IOMAP_NEW;
  760. if (buffer_mapped(bh) && all_bh && startio) {
  761. lock_buffer(bh);
  762. xfs_add_to_ioend(inode, bh, offset,
  763. type, ioendp, done);
  764. count++;
  765. page_dirty--;
  766. } else {
  767. done = 1;
  768. }
  769. }
  770. } while (offset += len, (bh = bh->b_this_page) != head);
  771. if (uptodate && bh == head)
  772. SetPageUptodate(page);
  773. if (startio) {
  774. if (count) {
  775. struct backing_dev_info *bdi;
  776. bdi = inode->i_mapping->backing_dev_info;
  777. wbc->nr_to_write--;
  778. if (bdi_write_congested(bdi)) {
  779. wbc->encountered_congestion = 1;
  780. done = 1;
  781. } else if (wbc->nr_to_write <= 0) {
  782. done = 1;
  783. }
  784. }
  785. xfs_start_page_writeback(page, !page_dirty, count);
  786. }
  787. return done;
  788. fail_unlock_page:
  789. unlock_page(page);
  790. fail:
  791. return 1;
  792. }
  793. /*
  794. * Convert & write out a cluster of pages in the same extent as defined
  795. * by mp and following the start page.
  796. */
  797. STATIC void
  798. xfs_cluster_write(
  799. struct inode *inode,
  800. pgoff_t tindex,
  801. xfs_iomap_t *iomapp,
  802. xfs_ioend_t **ioendp,
  803. struct writeback_control *wbc,
  804. int startio,
  805. int all_bh,
  806. pgoff_t tlast)
  807. {
  808. struct pagevec pvec;
  809. int done = 0, i;
  810. pagevec_init(&pvec, 0);
  811. while (!done && tindex <= tlast) {
  812. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  813. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  814. break;
  815. for (i = 0; i < pagevec_count(&pvec); i++) {
  816. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  817. iomapp, ioendp, wbc, startio, all_bh);
  818. if (done)
  819. break;
  820. }
  821. pagevec_release(&pvec);
  822. cond_resched();
  823. }
  824. }
  825. /*
  826. * Calling this without startio set means we are being asked to make a dirty
  827. * page ready for freeing it's buffers. When called with startio set then
  828. * we are coming from writepage.
  829. *
  830. * When called with startio set it is important that we write the WHOLE
  831. * page if possible.
  832. * The bh->b_state's cannot know if any of the blocks or which block for
  833. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  834. * only valid if the page itself isn't completely uptodate. Some layers
  835. * may clear the page dirty flag prior to calling write page, under the
  836. * assumption the entire page will be written out; by not writing out the
  837. * whole page the page can be reused before all valid dirty data is
  838. * written out. Note: in the case of a page that has been dirty'd by
  839. * mapwrite and but partially setup by block_prepare_write the
  840. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  841. * valid state, thus the whole page must be written out thing.
  842. */
  843. STATIC int
  844. xfs_page_state_convert(
  845. struct inode *inode,
  846. struct page *page,
  847. struct writeback_control *wbc,
  848. int startio,
  849. int unmapped) /* also implies page uptodate */
  850. {
  851. struct buffer_head *bh, *head;
  852. xfs_iomap_t iomap;
  853. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  854. loff_t offset;
  855. unsigned long p_offset = 0;
  856. unsigned int type;
  857. __uint64_t end_offset;
  858. pgoff_t end_index, last_index, tlast;
  859. ssize_t size, len;
  860. int flags, err, iomap_valid = 0, uptodate = 1;
  861. int page_dirty, count = 0;
  862. int trylock = 0;
  863. int all_bh = unmapped;
  864. if (startio) {
  865. if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
  866. trylock |= BMAPI_TRYLOCK;
  867. }
  868. /* Is this page beyond the end of the file? */
  869. offset = i_size_read(inode);
  870. end_index = offset >> PAGE_CACHE_SHIFT;
  871. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  872. if (page->index >= end_index) {
  873. if ((page->index >= end_index + 1) ||
  874. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  875. if (startio)
  876. unlock_page(page);
  877. return 0;
  878. }
  879. }
  880. /*
  881. * page_dirty is initially a count of buffers on the page before
  882. * EOF and is decremented as we move each into a cleanable state.
  883. *
  884. * Derivation:
  885. *
  886. * End offset is the highest offset that this page should represent.
  887. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  888. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  889. * hence give us the correct page_dirty count. On any other page,
  890. * it will be zero and in that case we need page_dirty to be the
  891. * count of buffers on the page.
  892. */
  893. end_offset = min_t(unsigned long long,
  894. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  895. len = 1 << inode->i_blkbits;
  896. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  897. PAGE_CACHE_SIZE);
  898. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  899. page_dirty = p_offset / len;
  900. bh = head = page_buffers(page);
  901. offset = page_offset(page);
  902. flags = BMAPI_READ;
  903. type = IOMAP_NEW;
  904. /* TODO: cleanup count and page_dirty */
  905. do {
  906. if (offset >= end_offset)
  907. break;
  908. if (!buffer_uptodate(bh))
  909. uptodate = 0;
  910. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
  911. /*
  912. * the iomap is actually still valid, but the ioend
  913. * isn't. shouldn't happen too often.
  914. */
  915. iomap_valid = 0;
  916. continue;
  917. }
  918. if (iomap_valid)
  919. iomap_valid = xfs_iomap_valid(&iomap, offset);
  920. /*
  921. * First case, map an unwritten extent and prepare for
  922. * extent state conversion transaction on completion.
  923. *
  924. * Second case, allocate space for a delalloc buffer.
  925. * We can return EAGAIN here in the release page case.
  926. *
  927. * Third case, an unmapped buffer was found, and we are
  928. * in a path where we need to write the whole page out.
  929. */
  930. if (buffer_unwritten(bh) || buffer_delay(bh) ||
  931. ((buffer_uptodate(bh) || PageUptodate(page)) &&
  932. !buffer_mapped(bh) && (unmapped || startio))) {
  933. int new_ioend = 0;
  934. /*
  935. * Make sure we don't use a read-only iomap
  936. */
  937. if (flags == BMAPI_READ)
  938. iomap_valid = 0;
  939. if (buffer_unwritten(bh)) {
  940. type = IOMAP_UNWRITTEN;
  941. flags = BMAPI_WRITE | BMAPI_IGNSTATE;
  942. } else if (buffer_delay(bh)) {
  943. type = IOMAP_DELAY;
  944. flags = BMAPI_ALLOCATE | trylock;
  945. } else {
  946. type = IOMAP_NEW;
  947. flags = BMAPI_WRITE | BMAPI_MMAP;
  948. }
  949. if (!iomap_valid) {
  950. /*
  951. * if we didn't have a valid mapping then we
  952. * need to ensure that we put the new mapping
  953. * in a new ioend structure. This needs to be
  954. * done to ensure that the ioends correctly
  955. * reflect the block mappings at io completion
  956. * for unwritten extent conversion.
  957. */
  958. new_ioend = 1;
  959. if (type == IOMAP_NEW) {
  960. size = xfs_probe_cluster(inode,
  961. page, bh, head, 0);
  962. } else {
  963. size = len;
  964. }
  965. err = xfs_map_blocks(inode, offset, size,
  966. &iomap, flags);
  967. if (err)
  968. goto error;
  969. iomap_valid = xfs_iomap_valid(&iomap, offset);
  970. }
  971. if (iomap_valid) {
  972. xfs_map_at_offset(bh, offset,
  973. inode->i_blkbits, &iomap);
  974. if (startio) {
  975. xfs_add_to_ioend(inode, bh, offset,
  976. type, &ioend,
  977. new_ioend);
  978. } else {
  979. set_buffer_dirty(bh);
  980. unlock_buffer(bh);
  981. mark_buffer_dirty(bh);
  982. }
  983. page_dirty--;
  984. count++;
  985. }
  986. } else if (buffer_uptodate(bh) && startio) {
  987. /*
  988. * we got here because the buffer is already mapped.
  989. * That means it must already have extents allocated
  990. * underneath it. Map the extent by reading it.
  991. */
  992. if (!iomap_valid || flags != BMAPI_READ) {
  993. flags = BMAPI_READ;
  994. size = xfs_probe_cluster(inode, page, bh,
  995. head, 1);
  996. err = xfs_map_blocks(inode, offset, size,
  997. &iomap, flags);
  998. if (err)
  999. goto error;
  1000. iomap_valid = xfs_iomap_valid(&iomap, offset);
  1001. }
  1002. /*
  1003. * We set the type to IOMAP_NEW in case we are doing a
  1004. * small write at EOF that is extending the file but
  1005. * without needing an allocation. We need to update the
  1006. * file size on I/O completion in this case so it is
  1007. * the same case as having just allocated a new extent
  1008. * that we are writing into for the first time.
  1009. */
  1010. type = IOMAP_NEW;
  1011. if (trylock_buffer(bh)) {
  1012. ASSERT(buffer_mapped(bh));
  1013. if (iomap_valid)
  1014. all_bh = 1;
  1015. xfs_add_to_ioend(inode, bh, offset, type,
  1016. &ioend, !iomap_valid);
  1017. page_dirty--;
  1018. count++;
  1019. } else {
  1020. iomap_valid = 0;
  1021. }
  1022. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  1023. (unmapped || startio)) {
  1024. iomap_valid = 0;
  1025. }
  1026. if (!iohead)
  1027. iohead = ioend;
  1028. } while (offset += len, ((bh = bh->b_this_page) != head));
  1029. if (uptodate && bh == head)
  1030. SetPageUptodate(page);
  1031. if (startio)
  1032. xfs_start_page_writeback(page, 1, count);
  1033. if (ioend && iomap_valid) {
  1034. offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
  1035. PAGE_CACHE_SHIFT;
  1036. tlast = min_t(pgoff_t, offset, last_index);
  1037. xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
  1038. wbc, startio, all_bh, tlast);
  1039. }
  1040. if (iohead)
  1041. xfs_submit_ioend(iohead);
  1042. return page_dirty;
  1043. error:
  1044. if (iohead)
  1045. xfs_cancel_ioend(iohead);
  1046. /*
  1047. * If it's delalloc and we have nowhere to put it,
  1048. * throw it away, unless the lower layers told
  1049. * us to try again.
  1050. */
  1051. if (err != -EAGAIN) {
  1052. if (!unmapped)
  1053. block_invalidatepage(page, 0);
  1054. ClearPageUptodate(page);
  1055. }
  1056. return err;
  1057. }
  1058. /*
  1059. * writepage: Called from one of two places:
  1060. *
  1061. * 1. we are flushing a delalloc buffer head.
  1062. *
  1063. * 2. we are writing out a dirty page. Typically the page dirty
  1064. * state is cleared before we get here. In this case is it
  1065. * conceivable we have no buffer heads.
  1066. *
  1067. * For delalloc space on the page we need to allocate space and
  1068. * flush it. For unmapped buffer heads on the page we should
  1069. * allocate space if the page is uptodate. For any other dirty
  1070. * buffer heads on the page we should flush them.
  1071. *
  1072. * If we detect that a transaction would be required to flush
  1073. * the page, we have to check the process flags first, if we
  1074. * are already in a transaction or disk I/O during allocations
  1075. * is off, we need to fail the writepage and redirty the page.
  1076. */
  1077. STATIC int
  1078. xfs_vm_writepage(
  1079. struct page *page,
  1080. struct writeback_control *wbc)
  1081. {
  1082. int error;
  1083. int need_trans;
  1084. int delalloc, unmapped, unwritten;
  1085. struct inode *inode = page->mapping->host;
  1086. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1087. /*
  1088. * We need a transaction if:
  1089. * 1. There are delalloc buffers on the page
  1090. * 2. The page is uptodate and we have unmapped buffers
  1091. * 3. The page is uptodate and we have no buffers
  1092. * 4. There are unwritten buffers on the page
  1093. */
  1094. if (!page_has_buffers(page)) {
  1095. unmapped = 1;
  1096. need_trans = 1;
  1097. } else {
  1098. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1099. if (!PageUptodate(page))
  1100. unmapped = 0;
  1101. need_trans = delalloc + unmapped + unwritten;
  1102. }
  1103. /*
  1104. * If we need a transaction and the process flags say
  1105. * we are already in a transaction, or no IO is allowed
  1106. * then mark the page dirty again and leave the page
  1107. * as is.
  1108. */
  1109. if (current_test_flags(PF_FSTRANS) && need_trans)
  1110. goto out_fail;
  1111. /*
  1112. * Delay hooking up buffer heads until we have
  1113. * made our go/no-go decision.
  1114. */
  1115. if (!page_has_buffers(page))
  1116. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1117. /*
  1118. * Convert delayed allocate, unwritten or unmapped space
  1119. * to real space and flush out to disk.
  1120. */
  1121. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1122. if (error == -EAGAIN)
  1123. goto out_fail;
  1124. if (unlikely(error < 0))
  1125. goto out_unlock;
  1126. return 0;
  1127. out_fail:
  1128. redirty_page_for_writepage(wbc, page);
  1129. unlock_page(page);
  1130. return 0;
  1131. out_unlock:
  1132. unlock_page(page);
  1133. return error;
  1134. }
  1135. STATIC int
  1136. xfs_vm_writepages(
  1137. struct address_space *mapping,
  1138. struct writeback_control *wbc)
  1139. {
  1140. xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
  1141. return generic_writepages(mapping, wbc);
  1142. }
  1143. /*
  1144. * Called to move a page into cleanable state - and from there
  1145. * to be released. Possibly the page is already clean. We always
  1146. * have buffer heads in this call.
  1147. *
  1148. * Returns 0 if the page is ok to release, 1 otherwise.
  1149. *
  1150. * Possible scenarios are:
  1151. *
  1152. * 1. We are being called to release a page which has been written
  1153. * to via regular I/O. buffer heads will be dirty and possibly
  1154. * delalloc. If no delalloc buffer heads in this case then we
  1155. * can just return zero.
  1156. *
  1157. * 2. We are called to release a page which has been written via
  1158. * mmap, all we need to do is ensure there is no delalloc
  1159. * state in the buffer heads, if not we can let the caller
  1160. * free them and we should come back later via writepage.
  1161. */
  1162. STATIC int
  1163. xfs_vm_releasepage(
  1164. struct page *page,
  1165. gfp_t gfp_mask)
  1166. {
  1167. struct inode *inode = page->mapping->host;
  1168. int dirty, delalloc, unmapped, unwritten;
  1169. struct writeback_control wbc = {
  1170. .sync_mode = WB_SYNC_ALL,
  1171. .nr_to_write = 1,
  1172. };
  1173. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
  1174. if (!page_has_buffers(page))
  1175. return 0;
  1176. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1177. if (!delalloc && !unwritten)
  1178. goto free_buffers;
  1179. if (!(gfp_mask & __GFP_FS))
  1180. return 0;
  1181. /* If we are already inside a transaction or the thread cannot
  1182. * do I/O, we cannot release this page.
  1183. */
  1184. if (current_test_flags(PF_FSTRANS))
  1185. return 0;
  1186. /*
  1187. * Convert delalloc space to real space, do not flush the
  1188. * data out to disk, that will be done by the caller.
  1189. * Never need to allocate space here - we will always
  1190. * come back to writepage in that case.
  1191. */
  1192. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1193. if (dirty == 0 && !unwritten)
  1194. goto free_buffers;
  1195. return 0;
  1196. free_buffers:
  1197. return try_to_free_buffers(page);
  1198. }
  1199. STATIC int
  1200. __xfs_get_blocks(
  1201. struct inode *inode,
  1202. sector_t iblock,
  1203. struct buffer_head *bh_result,
  1204. int create,
  1205. int direct,
  1206. bmapi_flags_t flags)
  1207. {
  1208. xfs_iomap_t iomap;
  1209. xfs_off_t offset;
  1210. ssize_t size;
  1211. int niomap = 1;
  1212. int error;
  1213. offset = (xfs_off_t)iblock << inode->i_blkbits;
  1214. ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
  1215. size = bh_result->b_size;
  1216. if (!create && direct && offset >= i_size_read(inode))
  1217. return 0;
  1218. error = xfs_iomap(XFS_I(inode), offset, size,
  1219. create ? flags : BMAPI_READ, &iomap, &niomap);
  1220. if (error)
  1221. return -error;
  1222. if (niomap == 0)
  1223. return 0;
  1224. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  1225. /*
  1226. * For unwritten extents do not report a disk address on
  1227. * the read case (treat as if we're reading into a hole).
  1228. */
  1229. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  1230. xfs_map_buffer(bh_result, &iomap, offset,
  1231. inode->i_blkbits);
  1232. }
  1233. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  1234. if (direct)
  1235. bh_result->b_private = inode;
  1236. set_buffer_unwritten(bh_result);
  1237. }
  1238. }
  1239. /*
  1240. * If this is a realtime file, data may be on a different device.
  1241. * to that pointed to from the buffer_head b_bdev currently.
  1242. */
  1243. bh_result->b_bdev = iomap.iomap_target->bt_bdev;
  1244. /*
  1245. * If we previously allocated a block out beyond eof and we are now
  1246. * coming back to use it then we will need to flag it as new even if it
  1247. * has a disk address.
  1248. *
  1249. * With sub-block writes into unwritten extents we also need to mark
  1250. * the buffer as new so that the unwritten parts of the buffer gets
  1251. * correctly zeroed.
  1252. */
  1253. if (create &&
  1254. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  1255. (offset >= i_size_read(inode)) ||
  1256. (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
  1257. set_buffer_new(bh_result);
  1258. if (iomap.iomap_flags & IOMAP_DELAY) {
  1259. BUG_ON(direct);
  1260. if (create) {
  1261. set_buffer_uptodate(bh_result);
  1262. set_buffer_mapped(bh_result);
  1263. set_buffer_delay(bh_result);
  1264. }
  1265. }
  1266. if (direct || size > (1 << inode->i_blkbits)) {
  1267. ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
  1268. offset = min_t(xfs_off_t,
  1269. iomap.iomap_bsize - iomap.iomap_delta, size);
  1270. bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
  1271. }
  1272. return 0;
  1273. }
  1274. int
  1275. xfs_get_blocks(
  1276. struct inode *inode,
  1277. sector_t iblock,
  1278. struct buffer_head *bh_result,
  1279. int create)
  1280. {
  1281. return __xfs_get_blocks(inode, iblock,
  1282. bh_result, create, 0, BMAPI_WRITE);
  1283. }
  1284. STATIC int
  1285. xfs_get_blocks_direct(
  1286. struct inode *inode,
  1287. sector_t iblock,
  1288. struct buffer_head *bh_result,
  1289. int create)
  1290. {
  1291. return __xfs_get_blocks(inode, iblock,
  1292. bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  1293. }
  1294. STATIC void
  1295. xfs_end_io_direct(
  1296. struct kiocb *iocb,
  1297. loff_t offset,
  1298. ssize_t size,
  1299. void *private)
  1300. {
  1301. xfs_ioend_t *ioend = iocb->private;
  1302. /*
  1303. * Non-NULL private data means we need to issue a transaction to
  1304. * convert a range from unwritten to written extents. This needs
  1305. * to happen from process context but aio+dio I/O completion
  1306. * happens from irq context so we need to defer it to a workqueue.
  1307. * This is not necessary for synchronous direct I/O, but we do
  1308. * it anyway to keep the code uniform and simpler.
  1309. *
  1310. * Well, if only it were that simple. Because synchronous direct I/O
  1311. * requires extent conversion to occur *before* we return to userspace,
  1312. * we have to wait for extent conversion to complete. Look at the
  1313. * iocb that has been passed to us to determine if this is AIO or
  1314. * not. If it is synchronous, tell xfs_finish_ioend() to kick the
  1315. * workqueue and wait for it to complete.
  1316. *
  1317. * The core direct I/O code might be changed to always call the
  1318. * completion handler in the future, in which case all this can
  1319. * go away.
  1320. */
  1321. ioend->io_offset = offset;
  1322. ioend->io_size = size;
  1323. if (ioend->io_type == IOMAP_READ) {
  1324. xfs_finish_ioend(ioend, 0);
  1325. } else if (private && size > 0) {
  1326. xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
  1327. } else {
  1328. /*
  1329. * A direct I/O write ioend starts it's life in unwritten
  1330. * state in case they map an unwritten extent. This write
  1331. * didn't map an unwritten extent so switch it's completion
  1332. * handler.
  1333. */
  1334. INIT_WORK(&ioend->io_work, xfs_end_bio_written);
  1335. xfs_finish_ioend(ioend, 0);
  1336. }
  1337. /*
  1338. * blockdev_direct_IO can return an error even after the I/O
  1339. * completion handler was called. Thus we need to protect
  1340. * against double-freeing.
  1341. */
  1342. iocb->private = NULL;
  1343. }
  1344. STATIC ssize_t
  1345. xfs_vm_direct_IO(
  1346. int rw,
  1347. struct kiocb *iocb,
  1348. const struct iovec *iov,
  1349. loff_t offset,
  1350. unsigned long nr_segs)
  1351. {
  1352. struct file *file = iocb->ki_filp;
  1353. struct inode *inode = file->f_mapping->host;
  1354. struct block_device *bdev;
  1355. ssize_t ret;
  1356. bdev = xfs_find_bdev_for_inode(XFS_I(inode));
  1357. if (rw == WRITE) {
  1358. iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
  1359. ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
  1360. bdev, iov, offset, nr_segs,
  1361. xfs_get_blocks_direct,
  1362. xfs_end_io_direct);
  1363. } else {
  1364. iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
  1365. ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
  1366. bdev, iov, offset, nr_segs,
  1367. xfs_get_blocks_direct,
  1368. xfs_end_io_direct);
  1369. }
  1370. if (unlikely(ret != -EIOCBQUEUED && iocb->private))
  1371. xfs_destroy_ioend(iocb->private);
  1372. return ret;
  1373. }
  1374. STATIC int
  1375. xfs_vm_write_begin(
  1376. struct file *file,
  1377. struct address_space *mapping,
  1378. loff_t pos,
  1379. unsigned len,
  1380. unsigned flags,
  1381. struct page **pagep,
  1382. void **fsdata)
  1383. {
  1384. *pagep = NULL;
  1385. return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  1386. xfs_get_blocks);
  1387. }
  1388. STATIC sector_t
  1389. xfs_vm_bmap(
  1390. struct address_space *mapping,
  1391. sector_t block)
  1392. {
  1393. struct inode *inode = (struct inode *)mapping->host;
  1394. struct xfs_inode *ip = XFS_I(inode);
  1395. xfs_itrace_entry(XFS_I(inode));
  1396. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  1397. xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
  1398. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  1399. return generic_block_bmap(mapping, block, xfs_get_blocks);
  1400. }
  1401. STATIC int
  1402. xfs_vm_readpage(
  1403. struct file *unused,
  1404. struct page *page)
  1405. {
  1406. return mpage_readpage(page, xfs_get_blocks);
  1407. }
  1408. STATIC int
  1409. xfs_vm_readpages(
  1410. struct file *unused,
  1411. struct address_space *mapping,
  1412. struct list_head *pages,
  1413. unsigned nr_pages)
  1414. {
  1415. return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
  1416. }
  1417. STATIC void
  1418. xfs_vm_invalidatepage(
  1419. struct page *page,
  1420. unsigned long offset)
  1421. {
  1422. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1423. page->mapping->host, page, offset);
  1424. block_invalidatepage(page, offset);
  1425. }
  1426. const struct address_space_operations xfs_address_space_operations = {
  1427. .readpage = xfs_vm_readpage,
  1428. .readpages = xfs_vm_readpages,
  1429. .writepage = xfs_vm_writepage,
  1430. .writepages = xfs_vm_writepages,
  1431. .sync_page = block_sync_page,
  1432. .releasepage = xfs_vm_releasepage,
  1433. .invalidatepage = xfs_vm_invalidatepage,
  1434. .write_begin = xfs_vm_write_begin,
  1435. .write_end = generic_write_end,
  1436. .bmap = xfs_vm_bmap,
  1437. .direct_IO = xfs_vm_direct_IO,
  1438. .migratepage = buffer_migrate_page,
  1439. .is_partially_uptodate = block_is_partially_uptodate,
  1440. };