xfs_aops.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_dir.h"
  25. #include "xfs_dir2.h"
  26. #include "xfs_trans.h"
  27. #include "xfs_dmapi.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_dir_sf.h"
  33. #include "xfs_dir2_sf.h"
  34. #include "xfs_attr_sf.h"
  35. #include "xfs_dinode.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_alloc.h"
  38. #include "xfs_btree.h"
  39. #include "xfs_error.h"
  40. #include "xfs_rw.h"
  41. #include "xfs_iomap.h"
  42. #include <linux/mpage.h>
  43. #include <linux/pagevec.h>
  44. #include <linux/writeback.h>
  45. STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
  46. #if defined(XFS_RW_TRACE)
  47. void
  48. xfs_page_trace(
  49. int tag,
  50. struct inode *inode,
  51. struct page *page,
  52. int mask)
  53. {
  54. xfs_inode_t *ip;
  55. bhv_desc_t *bdp;
  56. vnode_t *vp = LINVFS_GET_VP(inode);
  57. loff_t isize = i_size_read(inode);
  58. loff_t offset = page_offset(page);
  59. int delalloc = -1, unmapped = -1, unwritten = -1;
  60. if (page_has_buffers(page))
  61. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  62. bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
  63. ip = XFS_BHVTOI(bdp);
  64. if (!ip->i_rwtrace)
  65. return;
  66. ktrace_enter(ip->i_rwtrace,
  67. (void *)((unsigned long)tag),
  68. (void *)ip,
  69. (void *)inode,
  70. (void *)page,
  71. (void *)((unsigned long)mask),
  72. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  73. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  74. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  75. (void *)((unsigned long)(isize & 0xffffffff)),
  76. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  77. (void *)((unsigned long)(offset & 0xffffffff)),
  78. (void *)((unsigned long)delalloc),
  79. (void *)((unsigned long)unmapped),
  80. (void *)((unsigned long)unwritten),
  81. (void *)NULL,
  82. (void *)NULL);
  83. }
  84. #else
  85. #define xfs_page_trace(tag, inode, page, mask)
  86. #endif
  87. /*
  88. * Schedule IO completion handling on a xfsdatad if this was
  89. * the final hold on this ioend.
  90. */
  91. STATIC void
  92. xfs_finish_ioend(
  93. xfs_ioend_t *ioend)
  94. {
  95. if (atomic_dec_and_test(&ioend->io_remaining))
  96. queue_work(xfsdatad_workqueue, &ioend->io_work);
  97. }
  98. /*
  99. * We're now finished for good with this ioend structure.
  100. * Update the page state via the associated buffer_heads,
  101. * release holds on the inode and bio, and finally free
  102. * up memory. Do not use the ioend after this.
  103. */
  104. STATIC void
  105. xfs_destroy_ioend(
  106. xfs_ioend_t *ioend)
  107. {
  108. struct buffer_head *bh, *next;
  109. for (bh = ioend->io_buffer_head; bh; bh = next) {
  110. next = bh->b_private;
  111. bh->b_end_io(bh, ioend->io_uptodate);
  112. }
  113. vn_iowake(ioend->io_vnode);
  114. mempool_free(ioend, xfs_ioend_pool);
  115. }
  116. /*
  117. * Buffered IO write completion for delayed allocate extents.
  118. * TODO: Update ondisk isize now that we know the file data
  119. * has been flushed (i.e. the notorious "NULL file" problem).
  120. */
  121. STATIC void
  122. xfs_end_bio_delalloc(
  123. void *data)
  124. {
  125. xfs_ioend_t *ioend = data;
  126. xfs_destroy_ioend(ioend);
  127. }
  128. /*
  129. * Buffered IO write completion for regular, written extents.
  130. */
  131. STATIC void
  132. xfs_end_bio_written(
  133. void *data)
  134. {
  135. xfs_ioend_t *ioend = data;
  136. xfs_destroy_ioend(ioend);
  137. }
  138. /*
  139. * IO write completion for unwritten extents.
  140. *
  141. * Issue transactions to convert a buffer range from unwritten
  142. * to written extents.
  143. */
  144. STATIC void
  145. xfs_end_bio_unwritten(
  146. void *data)
  147. {
  148. xfs_ioend_t *ioend = data;
  149. vnode_t *vp = ioend->io_vnode;
  150. xfs_off_t offset = ioend->io_offset;
  151. size_t size = ioend->io_size;
  152. int error;
  153. if (ioend->io_uptodate)
  154. VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
  155. xfs_destroy_ioend(ioend);
  156. }
  157. /*
  158. * Allocate and initialise an IO completion structure.
  159. * We need to track unwritten extent write completion here initially.
  160. * We'll need to extend this for updating the ondisk inode size later
  161. * (vs. incore size).
  162. */
  163. STATIC xfs_ioend_t *
  164. xfs_alloc_ioend(
  165. struct inode *inode,
  166. unsigned int type)
  167. {
  168. xfs_ioend_t *ioend;
  169. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  170. /*
  171. * Set the count to 1 initially, which will prevent an I/O
  172. * completion callback from happening before we have started
  173. * all the I/O from calling the completion routine too early.
  174. */
  175. atomic_set(&ioend->io_remaining, 1);
  176. ioend->io_uptodate = 1; /* cleared if any I/O fails */
  177. ioend->io_list = NULL;
  178. ioend->io_type = type;
  179. ioend->io_vnode = LINVFS_GET_VP(inode);
  180. ioend->io_buffer_head = NULL;
  181. ioend->io_buffer_tail = NULL;
  182. atomic_inc(&ioend->io_vnode->v_iocount);
  183. ioend->io_offset = 0;
  184. ioend->io_size = 0;
  185. if (type == IOMAP_UNWRITTEN)
  186. INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
  187. else if (type == IOMAP_DELAY)
  188. INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
  189. else
  190. INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
  191. return ioend;
  192. }
  193. STATIC int
  194. xfs_map_blocks(
  195. struct inode *inode,
  196. loff_t offset,
  197. ssize_t count,
  198. xfs_iomap_t *mapp,
  199. int flags)
  200. {
  201. vnode_t *vp = LINVFS_GET_VP(inode);
  202. int error, nmaps = 1;
  203. VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
  204. if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
  205. VMODIFY(vp);
  206. return -error;
  207. }
  208. /*
  209. * Finds the corresponding mapping in block @map array of the
  210. * given @offset within a @page.
  211. */
  212. STATIC xfs_iomap_t *
  213. xfs_offset_to_map(
  214. struct page *page,
  215. xfs_iomap_t *iomapp,
  216. unsigned long offset)
  217. {
  218. xfs_off_t full_offset; /* offset from start of file */
  219. ASSERT(offset < PAGE_CACHE_SIZE);
  220. full_offset = page->index; /* NB: using 64bit number */
  221. full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
  222. full_offset += offset; /* offset from page start */
  223. if (full_offset < iomapp->iomap_offset)
  224. return NULL;
  225. if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
  226. return iomapp;
  227. return NULL;
  228. }
  229. /*
  230. * BIO completion handler for buffered IO.
  231. */
  232. STATIC int
  233. xfs_end_bio(
  234. struct bio *bio,
  235. unsigned int bytes_done,
  236. int error)
  237. {
  238. xfs_ioend_t *ioend = bio->bi_private;
  239. if (bio->bi_size)
  240. return 1;
  241. ASSERT(ioend);
  242. ASSERT(atomic_read(&bio->bi_cnt) >= 1);
  243. /* Toss bio and pass work off to an xfsdatad thread */
  244. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  245. ioend->io_uptodate = 0;
  246. bio->bi_private = NULL;
  247. bio->bi_end_io = NULL;
  248. bio_put(bio);
  249. xfs_finish_ioend(ioend);
  250. return 0;
  251. }
  252. STATIC void
  253. xfs_submit_ioend_bio(
  254. xfs_ioend_t *ioend,
  255. struct bio *bio)
  256. {
  257. atomic_inc(&ioend->io_remaining);
  258. bio->bi_private = ioend;
  259. bio->bi_end_io = xfs_end_bio;
  260. submit_bio(WRITE, bio);
  261. ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
  262. bio_put(bio);
  263. }
  264. STATIC struct bio *
  265. xfs_alloc_ioend_bio(
  266. struct buffer_head *bh)
  267. {
  268. struct bio *bio;
  269. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  270. do {
  271. bio = bio_alloc(GFP_NOIO, nvecs);
  272. nvecs >>= 1;
  273. } while (!bio);
  274. ASSERT(bio->bi_private == NULL);
  275. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  276. bio->bi_bdev = bh->b_bdev;
  277. bio_get(bio);
  278. return bio;
  279. }
  280. STATIC void
  281. xfs_start_buffer_writeback(
  282. struct buffer_head *bh)
  283. {
  284. ASSERT(buffer_mapped(bh));
  285. ASSERT(buffer_locked(bh));
  286. ASSERT(!buffer_delay(bh));
  287. ASSERT(!buffer_unwritten(bh));
  288. mark_buffer_async_write(bh);
  289. set_buffer_uptodate(bh);
  290. clear_buffer_dirty(bh);
  291. }
  292. STATIC void
  293. xfs_start_page_writeback(
  294. struct page *page,
  295. struct writeback_control *wbc,
  296. int clear_dirty,
  297. int buffers)
  298. {
  299. ASSERT(PageLocked(page));
  300. ASSERT(!PageWriteback(page));
  301. set_page_writeback(page);
  302. if (clear_dirty)
  303. clear_page_dirty(page);
  304. unlock_page(page);
  305. if (!buffers) {
  306. end_page_writeback(page);
  307. wbc->pages_skipped++; /* We didn't write this page */
  308. }
  309. }
  310. static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  311. {
  312. return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
  313. }
  314. /*
  315. * Submit all of the bios for all of the ioends we have saved up,
  316. * covering the initial writepage page and also any probed pages.
  317. */
  318. STATIC void
  319. xfs_submit_ioend(
  320. xfs_ioend_t *ioend)
  321. {
  322. xfs_ioend_t *next;
  323. struct buffer_head *bh;
  324. struct bio *bio;
  325. sector_t lastblock = 0;
  326. do {
  327. next = ioend->io_list;
  328. bio = NULL;
  329. for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
  330. xfs_start_buffer_writeback(bh);
  331. if (!bio) {
  332. retry:
  333. bio = xfs_alloc_ioend_bio(bh);
  334. } else if (bh->b_blocknr != lastblock + 1) {
  335. xfs_submit_ioend_bio(ioend, bio);
  336. goto retry;
  337. }
  338. if (bio_add_buffer(bio, bh) != bh->b_size) {
  339. xfs_submit_ioend_bio(ioend, bio);
  340. goto retry;
  341. }
  342. lastblock = bh->b_blocknr;
  343. }
  344. if (bio)
  345. xfs_submit_ioend_bio(ioend, bio);
  346. xfs_finish_ioend(ioend);
  347. } while ((ioend = next) != NULL);
  348. }
  349. /*
  350. * Cancel submission of all buffer_heads so far in this endio.
  351. * Toss the endio too. Only ever called for the initial page
  352. * in a writepage request, so only ever one page.
  353. */
  354. STATIC void
  355. xfs_cancel_ioend(
  356. xfs_ioend_t *ioend)
  357. {
  358. xfs_ioend_t *next;
  359. struct buffer_head *bh, *next_bh;
  360. do {
  361. next = ioend->io_list;
  362. bh = ioend->io_buffer_head;
  363. do {
  364. next_bh = bh->b_private;
  365. clear_buffer_async_write(bh);
  366. unlock_buffer(bh);
  367. } while ((bh = next_bh) != NULL);
  368. vn_iowake(ioend->io_vnode);
  369. mempool_free(ioend, xfs_ioend_pool);
  370. } while ((ioend = next) != NULL);
  371. }
  372. /*
  373. * Test to see if we've been building up a completion structure for
  374. * earlier buffers -- if so, we try to append to this ioend if we
  375. * can, otherwise we finish off any current ioend and start another.
  376. * Return true if we've finished the given ioend.
  377. */
  378. STATIC void
  379. xfs_add_to_ioend(
  380. struct inode *inode,
  381. struct buffer_head *bh,
  382. unsigned int p_offset,
  383. unsigned int type,
  384. xfs_ioend_t **result,
  385. int need_ioend)
  386. {
  387. xfs_ioend_t *ioend = *result;
  388. if (!ioend || need_ioend || type != ioend->io_type) {
  389. xfs_ioend_t *previous = *result;
  390. xfs_off_t offset;
  391. offset = (xfs_off_t)bh->b_page->index << PAGE_CACHE_SHIFT;
  392. offset += p_offset;
  393. ioend = xfs_alloc_ioend(inode, type);
  394. ioend->io_offset = offset;
  395. ioend->io_buffer_head = bh;
  396. ioend->io_buffer_tail = bh;
  397. if (previous)
  398. previous->io_list = ioend;
  399. *result = ioend;
  400. } else {
  401. ioend->io_buffer_tail->b_private = bh;
  402. ioend->io_buffer_tail = bh;
  403. }
  404. bh->b_private = NULL;
  405. ioend->io_size += bh->b_size;
  406. }
  407. STATIC void
  408. xfs_map_at_offset(
  409. struct page *page,
  410. struct buffer_head *bh,
  411. unsigned long offset,
  412. int block_bits,
  413. xfs_iomap_t *iomapp,
  414. xfs_ioend_t *ioend)
  415. {
  416. xfs_daddr_t bn;
  417. xfs_off_t delta;
  418. int sector_shift;
  419. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  420. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  421. ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
  422. delta = page->index;
  423. delta <<= PAGE_CACHE_SHIFT;
  424. delta += offset;
  425. delta -= iomapp->iomap_offset;
  426. delta >>= block_bits;
  427. sector_shift = block_bits - BBSHIFT;
  428. bn = iomapp->iomap_bn >> sector_shift;
  429. bn += delta;
  430. BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
  431. ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
  432. lock_buffer(bh);
  433. bh->b_blocknr = bn;
  434. bh->b_bdev = iomapp->iomap_target->bt_bdev;
  435. set_buffer_mapped(bh);
  436. clear_buffer_delay(bh);
  437. clear_buffer_unwritten(bh);
  438. }
  439. /*
  440. * Look for a page at index which is unlocked and not mapped
  441. * yet - clustering for mmap write case.
  442. */
  443. STATIC unsigned int
  444. xfs_probe_unmapped_page(
  445. struct page *page,
  446. unsigned int pg_offset)
  447. {
  448. int ret = 0;
  449. if (PageWriteback(page))
  450. return 0;
  451. if (page->mapping && PageDirty(page)) {
  452. if (page_has_buffers(page)) {
  453. struct buffer_head *bh, *head;
  454. bh = head = page_buffers(page);
  455. do {
  456. if (buffer_mapped(bh) || !buffer_uptodate(bh))
  457. break;
  458. ret += bh->b_size;
  459. if (ret >= pg_offset)
  460. break;
  461. } while ((bh = bh->b_this_page) != head);
  462. } else
  463. ret = PAGE_CACHE_SIZE;
  464. }
  465. return ret;
  466. }
  467. STATIC size_t
  468. xfs_probe_unmapped_cluster(
  469. struct inode *inode,
  470. struct page *startpage,
  471. struct buffer_head *bh,
  472. struct buffer_head *head)
  473. {
  474. struct pagevec pvec;
  475. pgoff_t tindex, tlast, tloff;
  476. size_t total = 0;
  477. int done = 0, i;
  478. /* First sum forwards in this page */
  479. do {
  480. if (buffer_mapped(bh))
  481. return total;
  482. total += bh->b_size;
  483. } while ((bh = bh->b_this_page) != head);
  484. /* if we reached the end of the page, sum forwards in following pages */
  485. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  486. tindex = startpage->index + 1;
  487. /* Prune this back to avoid pathological behavior */
  488. tloff = min(tlast, startpage->index + 64);
  489. pagevec_init(&pvec, 0);
  490. while (!done && tindex <= tloff) {
  491. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  492. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  493. break;
  494. for (i = 0; i < pagevec_count(&pvec); i++) {
  495. struct page *page = pvec.pages[i];
  496. size_t pg_offset, len = 0;
  497. if (tindex == tlast) {
  498. pg_offset =
  499. i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
  500. if (!pg_offset)
  501. break;
  502. } else
  503. pg_offset = PAGE_CACHE_SIZE;
  504. if (page->index == tindex && !TestSetPageLocked(page)) {
  505. len = xfs_probe_unmapped_page(page, pg_offset);
  506. unlock_page(page);
  507. }
  508. if (!len) {
  509. done = 1;
  510. break;
  511. }
  512. total += len;
  513. }
  514. pagevec_release(&pvec);
  515. cond_resched();
  516. }
  517. return total;
  518. }
  519. /*
  520. * Test if a given page is suitable for writing as part of an unwritten
  521. * or delayed allocate extent.
  522. */
  523. STATIC int
  524. xfs_is_delayed_page(
  525. struct page *page,
  526. unsigned int type)
  527. {
  528. if (PageWriteback(page))
  529. return 0;
  530. if (page->mapping && page_has_buffers(page)) {
  531. struct buffer_head *bh, *head;
  532. int acceptable = 0;
  533. bh = head = page_buffers(page);
  534. do {
  535. if (buffer_unwritten(bh))
  536. acceptable = (type == IOMAP_UNWRITTEN);
  537. else if (buffer_delay(bh))
  538. acceptable = (type == IOMAP_DELAY);
  539. else
  540. break;
  541. } while ((bh = bh->b_this_page) != head);
  542. if (acceptable)
  543. return 1;
  544. }
  545. return 0;
  546. }
  547. /*
  548. * Allocate & map buffers for page given the extent map. Write it out.
  549. * except for the original page of a writepage, this is called on
  550. * delalloc/unwritten pages only, for the original page it is possible
  551. * that the page has no mapping at all.
  552. */
  553. STATIC int
  554. xfs_convert_page(
  555. struct inode *inode,
  556. struct page *page,
  557. loff_t tindex,
  558. xfs_iomap_t *iomapp,
  559. xfs_ioend_t **ioendp,
  560. struct writeback_control *wbc,
  561. int startio,
  562. int all_bh)
  563. {
  564. struct buffer_head *bh, *head;
  565. xfs_iomap_t *mp = iomapp, *tmp;
  566. unsigned long p_offset, end_offset;
  567. unsigned int type;
  568. int bbits = inode->i_blkbits;
  569. int len, page_dirty;
  570. int count = 0, done = 0, uptodate = 1;
  571. if (page->index != tindex)
  572. goto fail;
  573. if (TestSetPageLocked(page))
  574. goto fail;
  575. if (PageWriteback(page))
  576. goto fail_unlock_page;
  577. if (page->mapping != inode->i_mapping)
  578. goto fail_unlock_page;
  579. if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
  580. goto fail_unlock_page;
  581. end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
  582. /*
  583. * page_dirty is initially a count of buffers on the page before
  584. * EOF and is decrememted as we move each into a cleanable state.
  585. */
  586. len = 1 << inode->i_blkbits;
  587. end_offset = max(end_offset, PAGE_CACHE_SIZE);
  588. end_offset = roundup(end_offset, len);
  589. page_dirty = end_offset / len;
  590. p_offset = 0;
  591. bh = head = page_buffers(page);
  592. do {
  593. if (p_offset >= end_offset)
  594. break;
  595. if (!buffer_uptodate(bh))
  596. uptodate = 0;
  597. if (!(PageUptodate(page) || buffer_uptodate(bh))) {
  598. done = 1;
  599. continue;
  600. }
  601. if (buffer_unwritten(bh))
  602. type = IOMAP_UNWRITTEN;
  603. else if (buffer_delay(bh))
  604. type = IOMAP_DELAY;
  605. else {
  606. type = 0;
  607. if (!(buffer_mapped(bh) && all_bh && startio)) {
  608. done = 1;
  609. } else if (startio) {
  610. lock_buffer(bh);
  611. xfs_add_to_ioend(inode, bh, p_offset,
  612. type, ioendp, done);
  613. count++;
  614. page_dirty--;
  615. }
  616. continue;
  617. }
  618. tmp = xfs_offset_to_map(page, mp, p_offset);
  619. if (!tmp) {
  620. done = 1;
  621. continue;
  622. }
  623. ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
  624. ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
  625. xfs_map_at_offset(page, bh, p_offset, bbits, tmp, *ioendp);
  626. if (startio) {
  627. xfs_add_to_ioend(inode, bh, p_offset,
  628. type, ioendp, done);
  629. count++;
  630. } else {
  631. set_buffer_dirty(bh);
  632. unlock_buffer(bh);
  633. mark_buffer_dirty(bh);
  634. }
  635. page_dirty--;
  636. } while (p_offset += len, (bh = bh->b_this_page) != head);
  637. if (uptodate && bh == head)
  638. SetPageUptodate(page);
  639. if (startio) {
  640. if (count)
  641. wbc->nr_to_write--;
  642. xfs_start_page_writeback(page, wbc, !page_dirty, count);
  643. }
  644. return done;
  645. fail_unlock_page:
  646. unlock_page(page);
  647. fail:
  648. return 1;
  649. }
  650. /*
  651. * Convert & write out a cluster of pages in the same extent as defined
  652. * by mp and following the start page.
  653. */
  654. STATIC void
  655. xfs_cluster_write(
  656. struct inode *inode,
  657. pgoff_t tindex,
  658. xfs_iomap_t *iomapp,
  659. xfs_ioend_t **ioendp,
  660. struct writeback_control *wbc,
  661. int startio,
  662. int all_bh,
  663. pgoff_t tlast)
  664. {
  665. struct pagevec pvec;
  666. int done = 0, i;
  667. pagevec_init(&pvec, 0);
  668. while (!done && tindex <= tlast) {
  669. unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
  670. if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
  671. break;
  672. for (i = 0; i < pagevec_count(&pvec); i++) {
  673. done = xfs_convert_page(inode, pvec.pages[i], tindex++,
  674. iomapp, ioendp, wbc, startio, all_bh);
  675. if (done)
  676. break;
  677. }
  678. pagevec_release(&pvec);
  679. cond_resched();
  680. }
  681. }
  682. /*
  683. * Calling this without startio set means we are being asked to make a dirty
  684. * page ready for freeing it's buffers. When called with startio set then
  685. * we are coming from writepage.
  686. *
  687. * When called with startio set it is important that we write the WHOLE
  688. * page if possible.
  689. * The bh->b_state's cannot know if any of the blocks or which block for
  690. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  691. * only vaild if the page itself isn't completely uptodate. Some layers
  692. * may clear the page dirty flag prior to calling write page, under the
  693. * assumption the entire page will be written out; by not writing out the
  694. * whole page the page can be reused before all valid dirty data is
  695. * written out. Note: in the case of a page that has been dirty'd by
  696. * mapwrite and but partially setup by block_prepare_write the
  697. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  698. * valid state, thus the whole page must be written out thing.
  699. */
  700. STATIC int
  701. xfs_page_state_convert(
  702. struct inode *inode,
  703. struct page *page,
  704. struct writeback_control *wbc,
  705. int startio,
  706. int unmapped) /* also implies page uptodate */
  707. {
  708. struct buffer_head *bh, *head;
  709. xfs_iomap_t *iomp, iomap;
  710. xfs_ioend_t *ioend = NULL, *iohead = NULL;
  711. loff_t offset;
  712. unsigned long p_offset = 0;
  713. unsigned int type;
  714. __uint64_t end_offset;
  715. pgoff_t end_index, last_index, tlast;
  716. int flags, len, err, done = 1;
  717. int uptodate = 1;
  718. int page_dirty, count = 0, trylock_flag = 0;
  719. /* wait for other IO threads? */
  720. if (startio && wbc->sync_mode != WB_SYNC_NONE)
  721. trylock_flag |= BMAPI_TRYLOCK;
  722. /* Is this page beyond the end of the file? */
  723. offset = i_size_read(inode);
  724. end_index = offset >> PAGE_CACHE_SHIFT;
  725. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  726. if (page->index >= end_index) {
  727. if ((page->index >= end_index + 1) ||
  728. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  729. if (startio)
  730. unlock_page(page);
  731. return 0;
  732. }
  733. }
  734. /*
  735. * page_dirty is initially a count of buffers on the page before
  736. * EOF and is decrememted as we move each into a cleanable state.
  737. *
  738. * Derivation:
  739. *
  740. * End offset is the highest offset that this page should represent.
  741. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
  742. * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
  743. * hence give us the correct page_dirty count. On any other page,
  744. * it will be zero and in that case we need page_dirty to be the
  745. * count of buffers on the page.
  746. */
  747. end_offset = min_t(unsigned long long,
  748. (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  749. len = 1 << inode->i_blkbits;
  750. p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
  751. PAGE_CACHE_SIZE);
  752. p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
  753. page_dirty = p_offset / len;
  754. iomp = NULL;
  755. bh = head = page_buffers(page);
  756. offset = page_offset(page);
  757. /* TODO: fix up "done" variable and iomap pointer (boolean) */
  758. /* TODO: cleanup count and page_dirty */
  759. do {
  760. if (offset >= end_offset)
  761. break;
  762. if (!buffer_uptodate(bh))
  763. uptodate = 0;
  764. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
  765. done = 1;
  766. continue;
  767. }
  768. if (iomp) {
  769. iomp = xfs_offset_to_map(page, &iomap, p_offset);
  770. done = (iomp == NULL);
  771. }
  772. /*
  773. * First case, map an unwritten extent and prepare for
  774. * extent state conversion transaction on completion.
  775. *
  776. * Second case, allocate space for a delalloc buffer.
  777. * We can return EAGAIN here in the release page case.
  778. */
  779. if (buffer_unwritten(bh) || buffer_delay(bh)) {
  780. if (buffer_unwritten(bh)) {
  781. type = IOMAP_UNWRITTEN;
  782. flags = BMAPI_WRITE|BMAPI_IGNSTATE;
  783. } else {
  784. type = IOMAP_DELAY;
  785. flags = BMAPI_ALLOCATE;
  786. if (!startio)
  787. flags |= trylock_flag;
  788. }
  789. if (!iomp) {
  790. done = 1;
  791. err = xfs_map_blocks(inode, offset, len, &iomap,
  792. flags);
  793. if (err)
  794. goto error;
  795. iomp = xfs_offset_to_map(page, &iomap,
  796. p_offset);
  797. done = (iomp == NULL);
  798. }
  799. if (iomp) {
  800. xfs_map_at_offset(page, bh, p_offset,
  801. inode->i_blkbits, iomp, ioend);
  802. if (startio) {
  803. xfs_add_to_ioend(inode, bh, p_offset,
  804. type, &ioend, done);
  805. } else {
  806. set_buffer_dirty(bh);
  807. unlock_buffer(bh);
  808. mark_buffer_dirty(bh);
  809. }
  810. page_dirty--;
  811. count++;
  812. } else {
  813. done = 1;
  814. }
  815. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  816. (unmapped || startio)) {
  817. type = 0;
  818. if (!buffer_mapped(bh)) {
  819. /*
  820. * Getting here implies an unmapped buffer
  821. * was found, and we are in a path where we
  822. * need to write the whole page out.
  823. */
  824. if (!iomp) {
  825. int size;
  826. size = xfs_probe_unmapped_cluster(
  827. inode, page, bh, head);
  828. err = xfs_map_blocks(inode, offset,
  829. size, &iomap,
  830. BMAPI_WRITE|BMAPI_MMAP);
  831. if (err) {
  832. goto error;
  833. }
  834. iomp = xfs_offset_to_map(page, &iomap,
  835. p_offset);
  836. done = (iomp == NULL);
  837. }
  838. if (iomp) {
  839. xfs_map_at_offset(page, bh, p_offset,
  840. inode->i_blkbits, iomp,
  841. ioend);
  842. if (startio) {
  843. xfs_add_to_ioend(inode,
  844. bh, p_offset, type,
  845. &ioend, done);
  846. } else {
  847. set_buffer_dirty(bh);
  848. unlock_buffer(bh);
  849. mark_buffer_dirty(bh);
  850. }
  851. page_dirty--;
  852. count++;
  853. } else {
  854. done = 1;
  855. }
  856. } else if (startio) {
  857. if (buffer_uptodate(bh) &&
  858. !test_and_set_bit(BH_Lock, &bh->b_state)) {
  859. ASSERT(buffer_mapped(bh));
  860. xfs_add_to_ioend(inode,
  861. bh, p_offset, type,
  862. &ioend, done);
  863. page_dirty--;
  864. count++;
  865. } else {
  866. done = 1;
  867. }
  868. } else {
  869. done = 1;
  870. }
  871. }
  872. if (!iohead)
  873. iohead = ioend;
  874. } while (offset += len, ((bh = bh->b_this_page) != head));
  875. if (uptodate && bh == head)
  876. SetPageUptodate(page);
  877. if (startio)
  878. xfs_start_page_writeback(page, wbc, 1, count);
  879. if (ioend && iomp && !done) {
  880. offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
  881. PAGE_CACHE_SHIFT;
  882. tlast = min_t(pgoff_t, offset, last_index);
  883. xfs_cluster_write(inode, page->index + 1, iomp, &ioend,
  884. wbc, startio, unmapped, tlast);
  885. }
  886. if (iohead)
  887. xfs_submit_ioend(iohead);
  888. return page_dirty;
  889. error:
  890. if (iohead)
  891. xfs_cancel_ioend(iohead);
  892. /*
  893. * If it's delalloc and we have nowhere to put it,
  894. * throw it away, unless the lower layers told
  895. * us to try again.
  896. */
  897. if (err != -EAGAIN) {
  898. if (!unmapped)
  899. block_invalidatepage(page, 0);
  900. ClearPageUptodate(page);
  901. }
  902. return err;
  903. }
  904. STATIC int
  905. __linvfs_get_block(
  906. struct inode *inode,
  907. sector_t iblock,
  908. unsigned long blocks,
  909. struct buffer_head *bh_result,
  910. int create,
  911. int direct,
  912. bmapi_flags_t flags)
  913. {
  914. vnode_t *vp = LINVFS_GET_VP(inode);
  915. xfs_iomap_t iomap;
  916. xfs_off_t offset;
  917. ssize_t size;
  918. int retpbbm = 1;
  919. int error;
  920. offset = (xfs_off_t)iblock << inode->i_blkbits;
  921. if (blocks)
  922. size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
  923. (xfs_off_t)blocks << inode->i_blkbits);
  924. else
  925. size = 1 << inode->i_blkbits;
  926. VOP_BMAP(vp, offset, size,
  927. create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
  928. if (error)
  929. return -error;
  930. if (retpbbm == 0)
  931. return 0;
  932. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  933. xfs_daddr_t bn;
  934. xfs_off_t delta;
  935. /* For unwritten extents do not report a disk address on
  936. * the read case (treat as if we're reading into a hole).
  937. */
  938. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  939. delta = offset - iomap.iomap_offset;
  940. delta >>= inode->i_blkbits;
  941. bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
  942. bn += delta;
  943. BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
  944. bh_result->b_blocknr = bn;
  945. set_buffer_mapped(bh_result);
  946. }
  947. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  948. if (direct)
  949. bh_result->b_private = inode;
  950. set_buffer_unwritten(bh_result);
  951. set_buffer_delay(bh_result);
  952. }
  953. }
  954. /* If this is a realtime file, data might be on a new device */
  955. bh_result->b_bdev = iomap.iomap_target->bt_bdev;
  956. /* If we previously allocated a block out beyond eof and
  957. * we are now coming back to use it then we will need to
  958. * flag it as new even if it has a disk address.
  959. */
  960. if (create &&
  961. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  962. (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
  963. set_buffer_new(bh_result);
  964. if (iomap.iomap_flags & IOMAP_DELAY) {
  965. BUG_ON(direct);
  966. if (create) {
  967. set_buffer_uptodate(bh_result);
  968. set_buffer_mapped(bh_result);
  969. set_buffer_delay(bh_result);
  970. }
  971. }
  972. if (blocks) {
  973. ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
  974. offset = min_t(xfs_off_t,
  975. iomap.iomap_bsize - iomap.iomap_delta,
  976. (xfs_off_t)blocks << inode->i_blkbits);
  977. bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
  978. }
  979. return 0;
  980. }
  981. int
  982. linvfs_get_block(
  983. struct inode *inode,
  984. sector_t iblock,
  985. struct buffer_head *bh_result,
  986. int create)
  987. {
  988. return __linvfs_get_block(inode, iblock, 0, bh_result,
  989. create, 0, BMAPI_WRITE);
  990. }
  991. STATIC int
  992. linvfs_get_blocks_direct(
  993. struct inode *inode,
  994. sector_t iblock,
  995. unsigned long max_blocks,
  996. struct buffer_head *bh_result,
  997. int create)
  998. {
  999. return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
  1000. create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  1001. }
  1002. STATIC void
  1003. linvfs_end_io_direct(
  1004. struct kiocb *iocb,
  1005. loff_t offset,
  1006. ssize_t size,
  1007. void *private)
  1008. {
  1009. xfs_ioend_t *ioend = iocb->private;
  1010. /*
  1011. * Non-NULL private data means we need to issue a transaction to
  1012. * convert a range from unwritten to written extents. This needs
  1013. * to happen from process contect but aio+dio I/O completion
  1014. * happens from irq context so we need to defer it to a workqueue.
  1015. * This is not nessecary for synchronous direct I/O, but we do
  1016. * it anyway to keep the code uniform and simpler.
  1017. *
  1018. * The core direct I/O code might be changed to always call the
  1019. * completion handler in the future, in which case all this can
  1020. * go away.
  1021. */
  1022. if (private && size > 0) {
  1023. ioend->io_offset = offset;
  1024. ioend->io_size = size;
  1025. xfs_finish_ioend(ioend);
  1026. } else {
  1027. ASSERT(size >= 0);
  1028. xfs_destroy_ioend(ioend);
  1029. }
  1030. /*
  1031. * blockdev_direct_IO can return an error even afer the I/O
  1032. * completion handler was called. Thus we need to protect
  1033. * against double-freeing.
  1034. */
  1035. iocb->private = NULL;
  1036. }
  1037. STATIC ssize_t
  1038. linvfs_direct_IO(
  1039. int rw,
  1040. struct kiocb *iocb,
  1041. const struct iovec *iov,
  1042. loff_t offset,
  1043. unsigned long nr_segs)
  1044. {
  1045. struct file *file = iocb->ki_filp;
  1046. struct inode *inode = file->f_mapping->host;
  1047. vnode_t *vp = LINVFS_GET_VP(inode);
  1048. xfs_iomap_t iomap;
  1049. int maps = 1;
  1050. int error;
  1051. ssize_t ret;
  1052. VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
  1053. if (error)
  1054. return -error;
  1055. iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
  1056. ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
  1057. iomap.iomap_target->bt_bdev,
  1058. iov, offset, nr_segs,
  1059. linvfs_get_blocks_direct,
  1060. linvfs_end_io_direct);
  1061. if (unlikely(ret <= 0 && iocb->private))
  1062. xfs_destroy_ioend(iocb->private);
  1063. return ret;
  1064. }
  1065. STATIC sector_t
  1066. linvfs_bmap(
  1067. struct address_space *mapping,
  1068. sector_t block)
  1069. {
  1070. struct inode *inode = (struct inode *)mapping->host;
  1071. vnode_t *vp = LINVFS_GET_VP(inode);
  1072. int error;
  1073. vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
  1074. VOP_RWLOCK(vp, VRWLOCK_READ);
  1075. VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
  1076. VOP_RWUNLOCK(vp, VRWLOCK_READ);
  1077. return generic_block_bmap(mapping, block, linvfs_get_block);
  1078. }
  1079. STATIC int
  1080. linvfs_readpage(
  1081. struct file *unused,
  1082. struct page *page)
  1083. {
  1084. return mpage_readpage(page, linvfs_get_block);
  1085. }
  1086. STATIC int
  1087. linvfs_readpages(
  1088. struct file *unused,
  1089. struct address_space *mapping,
  1090. struct list_head *pages,
  1091. unsigned nr_pages)
  1092. {
  1093. return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
  1094. }
  1095. STATIC void
  1096. xfs_count_page_state(
  1097. struct page *page,
  1098. int *delalloc,
  1099. int *unmapped,
  1100. int *unwritten)
  1101. {
  1102. struct buffer_head *bh, *head;
  1103. *delalloc = *unmapped = *unwritten = 0;
  1104. bh = head = page_buffers(page);
  1105. do {
  1106. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  1107. (*unmapped) = 1;
  1108. else if (buffer_unwritten(bh) && !buffer_delay(bh))
  1109. clear_buffer_unwritten(bh);
  1110. else if (buffer_unwritten(bh))
  1111. (*unwritten) = 1;
  1112. else if (buffer_delay(bh))
  1113. (*delalloc) = 1;
  1114. } while ((bh = bh->b_this_page) != head);
  1115. }
  1116. /*
  1117. * writepage: Called from one of two places:
  1118. *
  1119. * 1. we are flushing a delalloc buffer head.
  1120. *
  1121. * 2. we are writing out a dirty page. Typically the page dirty
  1122. * state is cleared before we get here. In this case is it
  1123. * conceivable we have no buffer heads.
  1124. *
  1125. * For delalloc space on the page we need to allocate space and
  1126. * flush it. For unmapped buffer heads on the page we should
  1127. * allocate space if the page is uptodate. For any other dirty
  1128. * buffer heads on the page we should flush them.
  1129. *
  1130. * If we detect that a transaction would be required to flush
  1131. * the page, we have to check the process flags first, if we
  1132. * are already in a transaction or disk I/O during allocations
  1133. * is off, we need to fail the writepage and redirty the page.
  1134. */
  1135. STATIC int
  1136. linvfs_writepage(
  1137. struct page *page,
  1138. struct writeback_control *wbc)
  1139. {
  1140. int error;
  1141. int need_trans;
  1142. int delalloc, unmapped, unwritten;
  1143. struct inode *inode = page->mapping->host;
  1144. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1145. /*
  1146. * We need a transaction if:
  1147. * 1. There are delalloc buffers on the page
  1148. * 2. The page is uptodate and we have unmapped buffers
  1149. * 3. The page is uptodate and we have no buffers
  1150. * 4. There are unwritten buffers on the page
  1151. */
  1152. if (!page_has_buffers(page)) {
  1153. unmapped = 1;
  1154. need_trans = 1;
  1155. } else {
  1156. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1157. if (!PageUptodate(page))
  1158. unmapped = 0;
  1159. need_trans = delalloc + unmapped + unwritten;
  1160. }
  1161. /*
  1162. * If we need a transaction and the process flags say
  1163. * we are already in a transaction, or no IO is allowed
  1164. * then mark the page dirty again and leave the page
  1165. * as is.
  1166. */
  1167. if (PFLAGS_TEST_FSTRANS() && need_trans)
  1168. goto out_fail;
  1169. /*
  1170. * Delay hooking up buffer heads until we have
  1171. * made our go/no-go decision.
  1172. */
  1173. if (!page_has_buffers(page))
  1174. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1175. /*
  1176. * Convert delayed allocate, unwritten or unmapped space
  1177. * to real space and flush out to disk.
  1178. */
  1179. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1180. if (error == -EAGAIN)
  1181. goto out_fail;
  1182. if (unlikely(error < 0))
  1183. goto out_unlock;
  1184. return 0;
  1185. out_fail:
  1186. redirty_page_for_writepage(wbc, page);
  1187. unlock_page(page);
  1188. return 0;
  1189. out_unlock:
  1190. unlock_page(page);
  1191. return error;
  1192. }
  1193. STATIC int
  1194. linvfs_invalidate_page(
  1195. struct page *page,
  1196. unsigned long offset)
  1197. {
  1198. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1199. page->mapping->host, page, offset);
  1200. return block_invalidatepage(page, offset);
  1201. }
  1202. /*
  1203. * Called to move a page into cleanable state - and from there
  1204. * to be released. Possibly the page is already clean. We always
  1205. * have buffer heads in this call.
  1206. *
  1207. * Returns 0 if the page is ok to release, 1 otherwise.
  1208. *
  1209. * Possible scenarios are:
  1210. *
  1211. * 1. We are being called to release a page which has been written
  1212. * to via regular I/O. buffer heads will be dirty and possibly
  1213. * delalloc. If no delalloc buffer heads in this case then we
  1214. * can just return zero.
  1215. *
  1216. * 2. We are called to release a page which has been written via
  1217. * mmap, all we need to do is ensure there is no delalloc
  1218. * state in the buffer heads, if not we can let the caller
  1219. * free them and we should come back later via writepage.
  1220. */
  1221. STATIC int
  1222. linvfs_release_page(
  1223. struct page *page,
  1224. gfp_t gfp_mask)
  1225. {
  1226. struct inode *inode = page->mapping->host;
  1227. int dirty, delalloc, unmapped, unwritten;
  1228. struct writeback_control wbc = {
  1229. .sync_mode = WB_SYNC_ALL,
  1230. .nr_to_write = 1,
  1231. };
  1232. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
  1233. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1234. if (!delalloc && !unwritten)
  1235. goto free_buffers;
  1236. if (!(gfp_mask & __GFP_FS))
  1237. return 0;
  1238. /* If we are already inside a transaction or the thread cannot
  1239. * do I/O, we cannot release this page.
  1240. */
  1241. if (PFLAGS_TEST_FSTRANS())
  1242. return 0;
  1243. /*
  1244. * Convert delalloc space to real space, do not flush the
  1245. * data out to disk, that will be done by the caller.
  1246. * Never need to allocate space here - we will always
  1247. * come back to writepage in that case.
  1248. */
  1249. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1250. if (dirty == 0 && !unwritten)
  1251. goto free_buffers;
  1252. return 0;
  1253. free_buffers:
  1254. return try_to_free_buffers(page);
  1255. }
  1256. STATIC int
  1257. linvfs_prepare_write(
  1258. struct file *file,
  1259. struct page *page,
  1260. unsigned int from,
  1261. unsigned int to)
  1262. {
  1263. return block_prepare_write(page, from, to, linvfs_get_block);
  1264. }
  1265. struct address_space_operations linvfs_aops = {
  1266. .readpage = linvfs_readpage,
  1267. .readpages = linvfs_readpages,
  1268. .writepage = linvfs_writepage,
  1269. .sync_page = block_sync_page,
  1270. .releasepage = linvfs_release_page,
  1271. .invalidatepage = linvfs_invalidate_page,
  1272. .prepare_write = linvfs_prepare_write,
  1273. .commit_write = generic_commit_write,
  1274. .bmap = linvfs_bmap,
  1275. .direct_IO = linvfs_direct_IO,
  1276. };