xfs_aops.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_dir.h"
  25. #include "xfs_dir2.h"
  26. #include "xfs_trans.h"
  27. #include "xfs_dmapi.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_dir_sf.h"
  33. #include "xfs_dir2_sf.h"
  34. #include "xfs_attr_sf.h"
  35. #include "xfs_dinode.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_alloc.h"
  38. #include "xfs_btree.h"
  39. #include "xfs_error.h"
  40. #include "xfs_rw.h"
  41. #include "xfs_iomap.h"
  42. #include <linux/mpage.h>
  43. #include <linux/writeback.h>
  44. STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
  45. STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
  46. struct writeback_control *wbc, void *, int, int);
  47. #if defined(XFS_RW_TRACE)
  48. void
  49. xfs_page_trace(
  50. int tag,
  51. struct inode *inode,
  52. struct page *page,
  53. int mask)
  54. {
  55. xfs_inode_t *ip;
  56. bhv_desc_t *bdp;
  57. vnode_t *vp = LINVFS_GET_VP(inode);
  58. loff_t isize = i_size_read(inode);
  59. loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  60. int delalloc = -1, unmapped = -1, unwritten = -1;
  61. if (page_has_buffers(page))
  62. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  63. bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
  64. ip = XFS_BHVTOI(bdp);
  65. if (!ip->i_rwtrace)
  66. return;
  67. ktrace_enter(ip->i_rwtrace,
  68. (void *)((unsigned long)tag),
  69. (void *)ip,
  70. (void *)inode,
  71. (void *)page,
  72. (void *)((unsigned long)mask),
  73. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  74. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  75. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  76. (void *)((unsigned long)(isize & 0xffffffff)),
  77. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  78. (void *)((unsigned long)(offset & 0xffffffff)),
  79. (void *)((unsigned long)delalloc),
  80. (void *)((unsigned long)unmapped),
  81. (void *)((unsigned long)unwritten),
  82. (void *)NULL,
  83. (void *)NULL);
  84. }
  85. #else
  86. #define xfs_page_trace(tag, inode, page, mask)
  87. #endif
  88. /*
  89. * Schedule IO completion handling on a xfsdatad if this was
  90. * the final hold on this ioend.
  91. */
  92. STATIC void
  93. xfs_finish_ioend(
  94. xfs_ioend_t *ioend)
  95. {
  96. if (atomic_dec_and_test(&ioend->io_remaining))
  97. queue_work(xfsdatad_workqueue, &ioend->io_work);
  98. }
  99. STATIC void
  100. xfs_destroy_ioend(
  101. xfs_ioend_t *ioend)
  102. {
  103. vn_iowake(ioend->io_vnode);
  104. mempool_free(ioend, xfs_ioend_pool);
  105. }
  106. /*
  107. * Issue transactions to convert a buffer range from unwritten
  108. * to written extents.
  109. */
  110. STATIC void
  111. xfs_end_bio_unwritten(
  112. void *data)
  113. {
  114. xfs_ioend_t *ioend = data;
  115. vnode_t *vp = ioend->io_vnode;
  116. xfs_off_t offset = ioend->io_offset;
  117. size_t size = ioend->io_size;
  118. struct buffer_head *bh, *next;
  119. int error;
  120. if (ioend->io_uptodate)
  121. VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
  122. /* ioend->io_buffer_head is only non-NULL for buffered I/O */
  123. for (bh = ioend->io_buffer_head; bh; bh = next) {
  124. next = bh->b_private;
  125. bh->b_end_io = NULL;
  126. clear_buffer_unwritten(bh);
  127. end_buffer_async_write(bh, ioend->io_uptodate);
  128. }
  129. xfs_destroy_ioend(ioend);
  130. }
  131. /*
  132. * Allocate and initialise an IO completion structure.
  133. * We need to track unwritten extent write completion here initially.
  134. * We'll need to extend this for updating the ondisk inode size later
  135. * (vs. incore size).
  136. */
  137. STATIC xfs_ioend_t *
  138. xfs_alloc_ioend(
  139. struct inode *inode)
  140. {
  141. xfs_ioend_t *ioend;
  142. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  143. /*
  144. * Set the count to 1 initially, which will prevent an I/O
  145. * completion callback from happening before we have started
  146. * all the I/O from calling the completion routine too early.
  147. */
  148. atomic_set(&ioend->io_remaining, 1);
  149. ioend->io_uptodate = 1; /* cleared if any I/O fails */
  150. ioend->io_vnode = LINVFS_GET_VP(inode);
  151. ioend->io_buffer_head = NULL;
  152. atomic_inc(&ioend->io_vnode->v_iocount);
  153. ioend->io_offset = 0;
  154. ioend->io_size = 0;
  155. INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
  156. return ioend;
  157. }
  158. void
  159. linvfs_unwritten_done(
  160. struct buffer_head *bh,
  161. int uptodate)
  162. {
  163. xfs_ioend_t *ioend = bh->b_private;
  164. static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
  165. unsigned long flags;
  166. ASSERT(buffer_unwritten(bh));
  167. bh->b_end_io = NULL;
  168. if (!uptodate)
  169. ioend->io_uptodate = 0;
  170. /*
  171. * Deep magic here. We reuse b_private in the buffer_heads to build
  172. * a chain for completing the I/O from user context after we've issued
  173. * a transaction to convert the unwritten extent.
  174. */
  175. spin_lock_irqsave(&unwritten_done_lock, flags);
  176. bh->b_private = ioend->io_buffer_head;
  177. ioend->io_buffer_head = bh;
  178. spin_unlock_irqrestore(&unwritten_done_lock, flags);
  179. xfs_finish_ioend(ioend);
  180. }
  181. STATIC int
  182. xfs_map_blocks(
  183. struct inode *inode,
  184. loff_t offset,
  185. ssize_t count,
  186. xfs_iomap_t *mapp,
  187. int flags)
  188. {
  189. vnode_t *vp = LINVFS_GET_VP(inode);
  190. int error, nmaps = 1;
  191. VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
  192. if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
  193. VMODIFY(vp);
  194. return -error;
  195. }
  196. /*
  197. * Finds the corresponding mapping in block @map array of the
  198. * given @offset within a @page.
  199. */
  200. STATIC xfs_iomap_t *
  201. xfs_offset_to_map(
  202. struct page *page,
  203. xfs_iomap_t *iomapp,
  204. unsigned long offset)
  205. {
  206. loff_t full_offset; /* offset from start of file */
  207. ASSERT(offset < PAGE_CACHE_SIZE);
  208. full_offset = page->index; /* NB: using 64bit number */
  209. full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
  210. full_offset += offset; /* offset from page start */
  211. if (full_offset < iomapp->iomap_offset)
  212. return NULL;
  213. if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
  214. return iomapp;
  215. return NULL;
  216. }
  217. STATIC void
  218. xfs_map_at_offset(
  219. struct page *page,
  220. struct buffer_head *bh,
  221. unsigned long offset,
  222. int block_bits,
  223. xfs_iomap_t *iomapp)
  224. {
  225. xfs_daddr_t bn;
  226. loff_t delta;
  227. int sector_shift;
  228. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  229. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  230. ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
  231. delta = page->index;
  232. delta <<= PAGE_CACHE_SHIFT;
  233. delta += offset;
  234. delta -= iomapp->iomap_offset;
  235. delta >>= block_bits;
  236. sector_shift = block_bits - BBSHIFT;
  237. bn = iomapp->iomap_bn >> sector_shift;
  238. bn += delta;
  239. BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
  240. ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
  241. lock_buffer(bh);
  242. bh->b_blocknr = bn;
  243. bh->b_bdev = iomapp->iomap_target->pbr_bdev;
  244. set_buffer_mapped(bh);
  245. clear_buffer_delay(bh);
  246. }
  247. /*
  248. * Look for a page at index which is unlocked and contains our
  249. * unwritten extent flagged buffers at its head. Returns page
  250. * locked and with an extra reference count, and length of the
  251. * unwritten extent component on this page that we can write,
  252. * in units of filesystem blocks.
  253. */
  254. STATIC struct page *
  255. xfs_probe_unwritten_page(
  256. struct address_space *mapping,
  257. pgoff_t index,
  258. xfs_iomap_t *iomapp,
  259. xfs_ioend_t *ioend,
  260. unsigned long max_offset,
  261. unsigned long *fsbs,
  262. unsigned int bbits)
  263. {
  264. struct page *page;
  265. page = find_trylock_page(mapping, index);
  266. if (!page)
  267. return NULL;
  268. if (PageWriteback(page))
  269. goto out;
  270. if (page->mapping && page_has_buffers(page)) {
  271. struct buffer_head *bh, *head;
  272. unsigned long p_offset = 0;
  273. *fsbs = 0;
  274. bh = head = page_buffers(page);
  275. do {
  276. if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
  277. break;
  278. if (!xfs_offset_to_map(page, iomapp, p_offset))
  279. break;
  280. if (p_offset >= max_offset)
  281. break;
  282. xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
  283. set_buffer_unwritten_io(bh);
  284. bh->b_private = ioend;
  285. p_offset += bh->b_size;
  286. (*fsbs)++;
  287. } while ((bh = bh->b_this_page) != head);
  288. if (p_offset)
  289. return page;
  290. }
  291. out:
  292. unlock_page(page);
  293. return NULL;
  294. }
  295. /*
  296. * Look for a page at index which is unlocked and not mapped
  297. * yet - clustering for mmap write case.
  298. */
  299. STATIC unsigned int
  300. xfs_probe_unmapped_page(
  301. struct address_space *mapping,
  302. pgoff_t index,
  303. unsigned int pg_offset)
  304. {
  305. struct page *page;
  306. int ret = 0;
  307. page = find_trylock_page(mapping, index);
  308. if (!page)
  309. return 0;
  310. if (PageWriteback(page))
  311. goto out;
  312. if (page->mapping && PageDirty(page)) {
  313. if (page_has_buffers(page)) {
  314. struct buffer_head *bh, *head;
  315. bh = head = page_buffers(page);
  316. do {
  317. if (buffer_mapped(bh) || !buffer_uptodate(bh))
  318. break;
  319. ret += bh->b_size;
  320. if (ret >= pg_offset)
  321. break;
  322. } while ((bh = bh->b_this_page) != head);
  323. } else
  324. ret = PAGE_CACHE_SIZE;
  325. }
  326. out:
  327. unlock_page(page);
  328. return ret;
  329. }
  330. STATIC unsigned int
  331. xfs_probe_unmapped_cluster(
  332. struct inode *inode,
  333. struct page *startpage,
  334. struct buffer_head *bh,
  335. struct buffer_head *head)
  336. {
  337. pgoff_t tindex, tlast, tloff;
  338. unsigned int pg_offset, len, total = 0;
  339. struct address_space *mapping = inode->i_mapping;
  340. /* First sum forwards in this page */
  341. do {
  342. if (buffer_mapped(bh))
  343. break;
  344. total += bh->b_size;
  345. } while ((bh = bh->b_this_page) != head);
  346. /* If we reached the end of the page, sum forwards in
  347. * following pages.
  348. */
  349. if (bh == head) {
  350. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  351. /* Prune this back to avoid pathological behavior */
  352. tloff = min(tlast, startpage->index + 64);
  353. for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
  354. len = xfs_probe_unmapped_page(mapping, tindex,
  355. PAGE_CACHE_SIZE);
  356. if (!len)
  357. return total;
  358. total += len;
  359. }
  360. if (tindex == tlast &&
  361. (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  362. total += xfs_probe_unmapped_page(mapping,
  363. tindex, pg_offset);
  364. }
  365. }
  366. return total;
  367. }
  368. /*
  369. * Probe for a given page (index) in the inode and test if it is delayed
  370. * and without unwritten buffers. Returns page locked and with an extra
  371. * reference count.
  372. */
  373. STATIC struct page *
  374. xfs_probe_delalloc_page(
  375. struct inode *inode,
  376. pgoff_t index)
  377. {
  378. struct page *page;
  379. page = find_trylock_page(inode->i_mapping, index);
  380. if (!page)
  381. return NULL;
  382. if (PageWriteback(page))
  383. goto out;
  384. if (page->mapping && page_has_buffers(page)) {
  385. struct buffer_head *bh, *head;
  386. int acceptable = 0;
  387. bh = head = page_buffers(page);
  388. do {
  389. if (buffer_unwritten(bh)) {
  390. acceptable = 0;
  391. break;
  392. } else if (buffer_delay(bh)) {
  393. acceptable = 1;
  394. }
  395. } while ((bh = bh->b_this_page) != head);
  396. if (acceptable)
  397. return page;
  398. }
  399. out:
  400. unlock_page(page);
  401. return NULL;
  402. }
  403. STATIC int
  404. xfs_map_unwritten(
  405. struct inode *inode,
  406. struct page *start_page,
  407. struct buffer_head *head,
  408. struct buffer_head *curr,
  409. unsigned long p_offset,
  410. int block_bits,
  411. xfs_iomap_t *iomapp,
  412. struct writeback_control *wbc,
  413. int startio,
  414. int all_bh)
  415. {
  416. struct buffer_head *bh = curr;
  417. xfs_iomap_t *tmp;
  418. xfs_ioend_t *ioend;
  419. loff_t offset;
  420. unsigned long nblocks = 0;
  421. offset = start_page->index;
  422. offset <<= PAGE_CACHE_SHIFT;
  423. offset += p_offset;
  424. ioend = xfs_alloc_ioend(inode);
  425. /* First map forwards in the page consecutive buffers
  426. * covering this unwritten extent
  427. */
  428. do {
  429. if (!buffer_unwritten(bh))
  430. break;
  431. tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
  432. if (!tmp)
  433. break;
  434. xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
  435. set_buffer_unwritten_io(bh);
  436. bh->b_private = ioend;
  437. p_offset += bh->b_size;
  438. nblocks++;
  439. } while ((bh = bh->b_this_page) != head);
  440. atomic_add(nblocks, &ioend->io_remaining);
  441. /* If we reached the end of the page, map forwards in any
  442. * following pages which are also covered by this extent.
  443. */
  444. if (bh == head) {
  445. struct address_space *mapping = inode->i_mapping;
  446. pgoff_t tindex, tloff, tlast;
  447. unsigned long bs;
  448. unsigned int pg_offset, bbits = inode->i_blkbits;
  449. struct page *page;
  450. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  451. tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
  452. tloff = min(tlast, tloff);
  453. for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
  454. page = xfs_probe_unwritten_page(mapping,
  455. tindex, iomapp, ioend,
  456. PAGE_CACHE_SIZE, &bs, bbits);
  457. if (!page)
  458. break;
  459. nblocks += bs;
  460. atomic_add(bs, &ioend->io_remaining);
  461. xfs_convert_page(inode, page, iomapp, wbc, ioend,
  462. startio, all_bh);
  463. /* stop if converting the next page might add
  464. * enough blocks that the corresponding byte
  465. * count won't fit in our ulong page buf length */
  466. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  467. goto enough;
  468. }
  469. if (tindex == tlast &&
  470. (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
  471. page = xfs_probe_unwritten_page(mapping,
  472. tindex, iomapp, ioend,
  473. pg_offset, &bs, bbits);
  474. if (page) {
  475. nblocks += bs;
  476. atomic_add(bs, &ioend->io_remaining);
  477. xfs_convert_page(inode, page, iomapp, wbc, ioend,
  478. startio, all_bh);
  479. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  480. goto enough;
  481. }
  482. }
  483. }
  484. enough:
  485. ioend->io_size = (xfs_off_t)nblocks << block_bits;
  486. ioend->io_offset = offset;
  487. xfs_finish_ioend(ioend);
  488. return 0;
  489. }
  490. STATIC void
  491. xfs_submit_page(
  492. struct page *page,
  493. struct writeback_control *wbc,
  494. struct buffer_head *bh_arr[],
  495. int bh_count,
  496. int probed_page,
  497. int clear_dirty)
  498. {
  499. struct buffer_head *bh;
  500. int i;
  501. BUG_ON(PageWriteback(page));
  502. if (bh_count)
  503. set_page_writeback(page);
  504. if (clear_dirty)
  505. clear_page_dirty(page);
  506. unlock_page(page);
  507. if (bh_count) {
  508. for (i = 0; i < bh_count; i++) {
  509. bh = bh_arr[i];
  510. mark_buffer_async_write(bh);
  511. if (buffer_unwritten(bh))
  512. set_buffer_unwritten_io(bh);
  513. set_buffer_uptodate(bh);
  514. clear_buffer_dirty(bh);
  515. }
  516. for (i = 0; i < bh_count; i++)
  517. submit_bh(WRITE, bh_arr[i]);
  518. if (probed_page && clear_dirty)
  519. wbc->nr_to_write--; /* Wrote an "extra" page */
  520. }
  521. }
  522. /*
  523. * Allocate & map buffers for page given the extent map. Write it out.
  524. * except for the original page of a writepage, this is called on
  525. * delalloc/unwritten pages only, for the original page it is possible
  526. * that the page has no mapping at all.
  527. */
  528. STATIC void
  529. xfs_convert_page(
  530. struct inode *inode,
  531. struct page *page,
  532. xfs_iomap_t *iomapp,
  533. struct writeback_control *wbc,
  534. void *private,
  535. int startio,
  536. int all_bh)
  537. {
  538. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  539. xfs_iomap_t *mp = iomapp, *tmp;
  540. unsigned long offset, end_offset;
  541. int index = 0;
  542. int bbits = inode->i_blkbits;
  543. int len, page_dirty;
  544. end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
  545. /*
  546. * page_dirty is initially a count of buffers on the page before
  547. * EOF and is decrememted as we move each into a cleanable state.
  548. */
  549. len = 1 << inode->i_blkbits;
  550. end_offset = max(end_offset, PAGE_CACHE_SIZE);
  551. end_offset = roundup(end_offset, len);
  552. page_dirty = end_offset / len;
  553. offset = 0;
  554. bh = head = page_buffers(page);
  555. do {
  556. if (offset >= end_offset)
  557. break;
  558. if (!(PageUptodate(page) || buffer_uptodate(bh)))
  559. continue;
  560. if (buffer_mapped(bh) && all_bh &&
  561. !(buffer_unwritten(bh) || buffer_delay(bh))) {
  562. if (startio) {
  563. lock_buffer(bh);
  564. bh_arr[index++] = bh;
  565. page_dirty--;
  566. }
  567. continue;
  568. }
  569. tmp = xfs_offset_to_map(page, mp, offset);
  570. if (!tmp)
  571. continue;
  572. ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
  573. ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
  574. /* If this is a new unwritten extent buffer (i.e. one
  575. * that we haven't passed in private data for, we must
  576. * now map this buffer too.
  577. */
  578. if (buffer_unwritten(bh) && !bh->b_end_io) {
  579. ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
  580. xfs_map_unwritten(inode, page, head, bh, offset,
  581. bbits, tmp, wbc, startio, all_bh);
  582. } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
  583. xfs_map_at_offset(page, bh, offset, bbits, tmp);
  584. if (buffer_unwritten(bh)) {
  585. set_buffer_unwritten_io(bh);
  586. bh->b_private = private;
  587. ASSERT(private);
  588. }
  589. }
  590. if (startio) {
  591. bh_arr[index++] = bh;
  592. } else {
  593. set_buffer_dirty(bh);
  594. unlock_buffer(bh);
  595. mark_buffer_dirty(bh);
  596. }
  597. page_dirty--;
  598. } while (offset += len, (bh = bh->b_this_page) != head);
  599. if (startio && index) {
  600. xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
  601. } else {
  602. unlock_page(page);
  603. }
  604. }
  605. /*
  606. * Convert & write out a cluster of pages in the same extent as defined
  607. * by mp and following the start page.
  608. */
  609. STATIC void
  610. xfs_cluster_write(
  611. struct inode *inode,
  612. pgoff_t tindex,
  613. xfs_iomap_t *iomapp,
  614. struct writeback_control *wbc,
  615. int startio,
  616. int all_bh,
  617. pgoff_t tlast)
  618. {
  619. struct page *page;
  620. for (; tindex <= tlast; tindex++) {
  621. page = xfs_probe_delalloc_page(inode, tindex);
  622. if (!page)
  623. break;
  624. xfs_convert_page(inode, page, iomapp, wbc, NULL,
  625. startio, all_bh);
  626. }
  627. }
  628. /*
  629. * Calling this without startio set means we are being asked to make a dirty
  630. * page ready for freeing it's buffers. When called with startio set then
  631. * we are coming from writepage.
  632. *
  633. * When called with startio set it is important that we write the WHOLE
  634. * page if possible.
  635. * The bh->b_state's cannot know if any of the blocks or which block for
  636. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  637. * only vaild if the page itself isn't completely uptodate. Some layers
  638. * may clear the page dirty flag prior to calling write page, under the
  639. * assumption the entire page will be written out; by not writing out the
  640. * whole page the page can be reused before all valid dirty data is
  641. * written out. Note: in the case of a page that has been dirty'd by
  642. * mapwrite and but partially setup by block_prepare_write the
  643. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  644. * valid state, thus the whole page must be written out thing.
  645. */
  646. STATIC int
  647. xfs_page_state_convert(
  648. struct inode *inode,
  649. struct page *page,
  650. struct writeback_control *wbc,
  651. int startio,
  652. int unmapped) /* also implies page uptodate */
  653. {
  654. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  655. xfs_iomap_t *iomp, iomap;
  656. loff_t offset;
  657. unsigned long p_offset = 0;
  658. __uint64_t end_offset;
  659. pgoff_t end_index, last_index, tlast;
  660. int len, err, i, cnt = 0, uptodate = 1;
  661. int flags;
  662. int page_dirty;
  663. /* wait for other IO threads? */
  664. flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
  665. /* Is this page beyond the end of the file? */
  666. offset = i_size_read(inode);
  667. end_index = offset >> PAGE_CACHE_SHIFT;
  668. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  669. if (page->index >= end_index) {
  670. if ((page->index >= end_index + 1) ||
  671. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  672. if (startio)
  673. unlock_page(page);
  674. return 0;
  675. }
  676. }
  677. end_offset = min_t(unsigned long long,
  678. (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  679. offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  680. /*
  681. * page_dirty is initially a count of buffers on the page before
  682. * EOF and is decrememted as we move each into a cleanable state.
  683. */
  684. len = 1 << inode->i_blkbits;
  685. p_offset = max(p_offset, PAGE_CACHE_SIZE);
  686. p_offset = roundup(p_offset, len);
  687. page_dirty = p_offset / len;
  688. iomp = NULL;
  689. p_offset = 0;
  690. bh = head = page_buffers(page);
  691. do {
  692. if (offset >= end_offset)
  693. break;
  694. if (!buffer_uptodate(bh))
  695. uptodate = 0;
  696. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
  697. continue;
  698. if (iomp) {
  699. iomp = xfs_offset_to_map(page, &iomap, p_offset);
  700. }
  701. /*
  702. * First case, map an unwritten extent and prepare for
  703. * extent state conversion transaction on completion.
  704. */
  705. if (buffer_unwritten(bh)) {
  706. if (!startio)
  707. continue;
  708. if (!iomp) {
  709. err = xfs_map_blocks(inode, offset, len, &iomap,
  710. BMAPI_WRITE|BMAPI_IGNSTATE);
  711. if (err) {
  712. goto error;
  713. }
  714. iomp = xfs_offset_to_map(page, &iomap,
  715. p_offset);
  716. }
  717. if (iomp) {
  718. if (!bh->b_end_io) {
  719. err = xfs_map_unwritten(inode, page,
  720. head, bh, p_offset,
  721. inode->i_blkbits, iomp,
  722. wbc, startio, unmapped);
  723. if (err) {
  724. goto error;
  725. }
  726. } else {
  727. set_bit(BH_Lock, &bh->b_state);
  728. }
  729. BUG_ON(!buffer_locked(bh));
  730. bh_arr[cnt++] = bh;
  731. page_dirty--;
  732. }
  733. /*
  734. * Second case, allocate space for a delalloc buffer.
  735. * We can return EAGAIN here in the release page case.
  736. */
  737. } else if (buffer_delay(bh)) {
  738. if (!iomp) {
  739. err = xfs_map_blocks(inode, offset, len, &iomap,
  740. BMAPI_ALLOCATE | flags);
  741. if (err) {
  742. goto error;
  743. }
  744. iomp = xfs_offset_to_map(page, &iomap,
  745. p_offset);
  746. }
  747. if (iomp) {
  748. xfs_map_at_offset(page, bh, p_offset,
  749. inode->i_blkbits, iomp);
  750. if (startio) {
  751. bh_arr[cnt++] = bh;
  752. } else {
  753. set_buffer_dirty(bh);
  754. unlock_buffer(bh);
  755. mark_buffer_dirty(bh);
  756. }
  757. page_dirty--;
  758. }
  759. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  760. (unmapped || startio)) {
  761. if (!buffer_mapped(bh)) {
  762. int size;
  763. /*
  764. * Getting here implies an unmapped buffer
  765. * was found, and we are in a path where we
  766. * need to write the whole page out.
  767. */
  768. if (!iomp) {
  769. size = xfs_probe_unmapped_cluster(
  770. inode, page, bh, head);
  771. err = xfs_map_blocks(inode, offset,
  772. size, &iomap,
  773. BMAPI_WRITE|BMAPI_MMAP);
  774. if (err) {
  775. goto error;
  776. }
  777. iomp = xfs_offset_to_map(page, &iomap,
  778. p_offset);
  779. }
  780. if (iomp) {
  781. xfs_map_at_offset(page,
  782. bh, p_offset,
  783. inode->i_blkbits, iomp);
  784. if (startio) {
  785. bh_arr[cnt++] = bh;
  786. } else {
  787. set_buffer_dirty(bh);
  788. unlock_buffer(bh);
  789. mark_buffer_dirty(bh);
  790. }
  791. page_dirty--;
  792. }
  793. } else if (startio) {
  794. if (buffer_uptodate(bh) &&
  795. !test_and_set_bit(BH_Lock, &bh->b_state)) {
  796. bh_arr[cnt++] = bh;
  797. page_dirty--;
  798. }
  799. }
  800. }
  801. } while (offset += len, p_offset += len,
  802. ((bh = bh->b_this_page) != head));
  803. if (uptodate && bh == head)
  804. SetPageUptodate(page);
  805. if (startio) {
  806. xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
  807. }
  808. if (iomp) {
  809. offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
  810. PAGE_CACHE_SHIFT;
  811. tlast = min_t(pgoff_t, offset, last_index);
  812. xfs_cluster_write(inode, page->index + 1, iomp, wbc,
  813. startio, unmapped, tlast);
  814. }
  815. return page_dirty;
  816. error:
  817. for (i = 0; i < cnt; i++) {
  818. unlock_buffer(bh_arr[i]);
  819. }
  820. /*
  821. * If it's delalloc and we have nowhere to put it,
  822. * throw it away, unless the lower layers told
  823. * us to try again.
  824. */
  825. if (err != -EAGAIN) {
  826. if (!unmapped) {
  827. block_invalidatepage(page, 0);
  828. }
  829. ClearPageUptodate(page);
  830. }
  831. return err;
  832. }
  833. STATIC int
  834. __linvfs_get_block(
  835. struct inode *inode,
  836. sector_t iblock,
  837. unsigned long blocks,
  838. struct buffer_head *bh_result,
  839. int create,
  840. int direct,
  841. bmapi_flags_t flags)
  842. {
  843. vnode_t *vp = LINVFS_GET_VP(inode);
  844. xfs_iomap_t iomap;
  845. xfs_off_t offset;
  846. ssize_t size;
  847. int retpbbm = 1;
  848. int error;
  849. if (blocks) {
  850. offset = blocks << inode->i_blkbits; /* 64 bit goodness */
  851. size = (ssize_t) min_t(xfs_off_t, offset, LONG_MAX);
  852. } else {
  853. size = 1 << inode->i_blkbits;
  854. }
  855. offset = (xfs_off_t)iblock << inode->i_blkbits;
  856. VOP_BMAP(vp, offset, size,
  857. create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
  858. if (error)
  859. return -error;
  860. if (retpbbm == 0)
  861. return 0;
  862. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  863. xfs_daddr_t bn;
  864. xfs_off_t delta;
  865. /* For unwritten extents do not report a disk address on
  866. * the read case (treat as if we're reading into a hole).
  867. */
  868. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  869. delta = offset - iomap.iomap_offset;
  870. delta >>= inode->i_blkbits;
  871. bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
  872. bn += delta;
  873. BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
  874. bh_result->b_blocknr = bn;
  875. set_buffer_mapped(bh_result);
  876. }
  877. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  878. if (direct)
  879. bh_result->b_private = inode;
  880. set_buffer_unwritten(bh_result);
  881. set_buffer_delay(bh_result);
  882. }
  883. }
  884. /* If this is a realtime file, data might be on a new device */
  885. bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
  886. /* If we previously allocated a block out beyond eof and
  887. * we are now coming back to use it then we will need to
  888. * flag it as new even if it has a disk address.
  889. */
  890. if (create &&
  891. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  892. (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
  893. set_buffer_new(bh_result);
  894. if (iomap.iomap_flags & IOMAP_DELAY) {
  895. BUG_ON(direct);
  896. if (create) {
  897. set_buffer_uptodate(bh_result);
  898. set_buffer_mapped(bh_result);
  899. set_buffer_delay(bh_result);
  900. }
  901. }
  902. if (blocks) {
  903. ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
  904. offset = min_t(xfs_off_t,
  905. iomap.iomap_bsize - iomap.iomap_delta,
  906. blocks << inode->i_blkbits);
  907. bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
  908. }
  909. return 0;
  910. }
  911. int
  912. linvfs_get_block(
  913. struct inode *inode,
  914. sector_t iblock,
  915. struct buffer_head *bh_result,
  916. int create)
  917. {
  918. return __linvfs_get_block(inode, iblock, 0, bh_result,
  919. create, 0, BMAPI_WRITE);
  920. }
  921. STATIC int
  922. linvfs_get_blocks_direct(
  923. struct inode *inode,
  924. sector_t iblock,
  925. unsigned long max_blocks,
  926. struct buffer_head *bh_result,
  927. int create)
  928. {
  929. return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
  930. create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  931. }
  932. STATIC void
  933. linvfs_end_io_direct(
  934. struct kiocb *iocb,
  935. loff_t offset,
  936. ssize_t size,
  937. void *private)
  938. {
  939. xfs_ioend_t *ioend = iocb->private;
  940. /*
  941. * Non-NULL private data means we need to issue a transaction to
  942. * convert a range from unwritten to written extents. This needs
  943. * to happen from process contect but aio+dio I/O completion
  944. * happens from irq context so we need to defer it to a workqueue.
  945. * This is not nessecary for synchronous direct I/O, but we do
  946. * it anyway to keep the code uniform and simpler.
  947. *
  948. * The core direct I/O code might be changed to always call the
  949. * completion handler in the future, in which case all this can
  950. * go away.
  951. */
  952. if (private && size > 0) {
  953. ioend->io_offset = offset;
  954. ioend->io_size = size;
  955. xfs_finish_ioend(ioend);
  956. } else {
  957. ASSERT(size >= 0);
  958. xfs_destroy_ioend(ioend);
  959. }
  960. /*
  961. * blockdev_direct_IO can return an error even afer the I/O
  962. * completion handler was called. Thus we need to protect
  963. * against double-freeing.
  964. */
  965. iocb->private = NULL;
  966. }
  967. STATIC ssize_t
  968. linvfs_direct_IO(
  969. int rw,
  970. struct kiocb *iocb,
  971. const struct iovec *iov,
  972. loff_t offset,
  973. unsigned long nr_segs)
  974. {
  975. struct file *file = iocb->ki_filp;
  976. struct inode *inode = file->f_mapping->host;
  977. vnode_t *vp = LINVFS_GET_VP(inode);
  978. xfs_iomap_t iomap;
  979. int maps = 1;
  980. int error;
  981. ssize_t ret;
  982. VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
  983. if (error)
  984. return -error;
  985. iocb->private = xfs_alloc_ioend(inode);
  986. ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
  987. iomap.iomap_target->pbr_bdev,
  988. iov, offset, nr_segs,
  989. linvfs_get_blocks_direct,
  990. linvfs_end_io_direct);
  991. if (unlikely(ret <= 0 && iocb->private))
  992. xfs_destroy_ioend(iocb->private);
  993. return ret;
  994. }
  995. STATIC sector_t
  996. linvfs_bmap(
  997. struct address_space *mapping,
  998. sector_t block)
  999. {
  1000. struct inode *inode = (struct inode *)mapping->host;
  1001. vnode_t *vp = LINVFS_GET_VP(inode);
  1002. int error;
  1003. vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
  1004. VOP_RWLOCK(vp, VRWLOCK_READ);
  1005. VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
  1006. VOP_RWUNLOCK(vp, VRWLOCK_READ);
  1007. return generic_block_bmap(mapping, block, linvfs_get_block);
  1008. }
  1009. STATIC int
  1010. linvfs_readpage(
  1011. struct file *unused,
  1012. struct page *page)
  1013. {
  1014. return mpage_readpage(page, linvfs_get_block);
  1015. }
  1016. STATIC int
  1017. linvfs_readpages(
  1018. struct file *unused,
  1019. struct address_space *mapping,
  1020. struct list_head *pages,
  1021. unsigned nr_pages)
  1022. {
  1023. return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
  1024. }
  1025. STATIC void
  1026. xfs_count_page_state(
  1027. struct page *page,
  1028. int *delalloc,
  1029. int *unmapped,
  1030. int *unwritten)
  1031. {
  1032. struct buffer_head *bh, *head;
  1033. *delalloc = *unmapped = *unwritten = 0;
  1034. bh = head = page_buffers(page);
  1035. do {
  1036. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  1037. (*unmapped) = 1;
  1038. else if (buffer_unwritten(bh) && !buffer_delay(bh))
  1039. clear_buffer_unwritten(bh);
  1040. else if (buffer_unwritten(bh))
  1041. (*unwritten) = 1;
  1042. else if (buffer_delay(bh))
  1043. (*delalloc) = 1;
  1044. } while ((bh = bh->b_this_page) != head);
  1045. }
  1046. /*
  1047. * writepage: Called from one of two places:
  1048. *
  1049. * 1. we are flushing a delalloc buffer head.
  1050. *
  1051. * 2. we are writing out a dirty page. Typically the page dirty
  1052. * state is cleared before we get here. In this case is it
  1053. * conceivable we have no buffer heads.
  1054. *
  1055. * For delalloc space on the page we need to allocate space and
  1056. * flush it. For unmapped buffer heads on the page we should
  1057. * allocate space if the page is uptodate. For any other dirty
  1058. * buffer heads on the page we should flush them.
  1059. *
  1060. * If we detect that a transaction would be required to flush
  1061. * the page, we have to check the process flags first, if we
  1062. * are already in a transaction or disk I/O during allocations
  1063. * is off, we need to fail the writepage and redirty the page.
  1064. */
  1065. STATIC int
  1066. linvfs_writepage(
  1067. struct page *page,
  1068. struct writeback_control *wbc)
  1069. {
  1070. int error;
  1071. int need_trans;
  1072. int delalloc, unmapped, unwritten;
  1073. struct inode *inode = page->mapping->host;
  1074. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1075. /*
  1076. * We need a transaction if:
  1077. * 1. There are delalloc buffers on the page
  1078. * 2. The page is uptodate and we have unmapped buffers
  1079. * 3. The page is uptodate and we have no buffers
  1080. * 4. There are unwritten buffers on the page
  1081. */
  1082. if (!page_has_buffers(page)) {
  1083. unmapped = 1;
  1084. need_trans = 1;
  1085. } else {
  1086. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1087. if (!PageUptodate(page))
  1088. unmapped = 0;
  1089. need_trans = delalloc + unmapped + unwritten;
  1090. }
  1091. /*
  1092. * If we need a transaction and the process flags say
  1093. * we are already in a transaction, or no IO is allowed
  1094. * then mark the page dirty again and leave the page
  1095. * as is.
  1096. */
  1097. if (PFLAGS_TEST_FSTRANS() && need_trans)
  1098. goto out_fail;
  1099. /*
  1100. * Delay hooking up buffer heads until we have
  1101. * made our go/no-go decision.
  1102. */
  1103. if (!page_has_buffers(page))
  1104. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1105. /*
  1106. * Convert delayed allocate, unwritten or unmapped space
  1107. * to real space and flush out to disk.
  1108. */
  1109. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1110. if (error == -EAGAIN)
  1111. goto out_fail;
  1112. if (unlikely(error < 0))
  1113. goto out_unlock;
  1114. return 0;
  1115. out_fail:
  1116. redirty_page_for_writepage(wbc, page);
  1117. unlock_page(page);
  1118. return 0;
  1119. out_unlock:
  1120. unlock_page(page);
  1121. return error;
  1122. }
  1123. STATIC int
  1124. linvfs_invalidate_page(
  1125. struct page *page,
  1126. unsigned long offset)
  1127. {
  1128. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1129. page->mapping->host, page, offset);
  1130. return block_invalidatepage(page, offset);
  1131. }
  1132. /*
  1133. * Called to move a page into cleanable state - and from there
  1134. * to be released. Possibly the page is already clean. We always
  1135. * have buffer heads in this call.
  1136. *
  1137. * Returns 0 if the page is ok to release, 1 otherwise.
  1138. *
  1139. * Possible scenarios are:
  1140. *
  1141. * 1. We are being called to release a page which has been written
  1142. * to via regular I/O. buffer heads will be dirty and possibly
  1143. * delalloc. If no delalloc buffer heads in this case then we
  1144. * can just return zero.
  1145. *
  1146. * 2. We are called to release a page which has been written via
  1147. * mmap, all we need to do is ensure there is no delalloc
  1148. * state in the buffer heads, if not we can let the caller
  1149. * free them and we should come back later via writepage.
  1150. */
  1151. STATIC int
  1152. linvfs_release_page(
  1153. struct page *page,
  1154. gfp_t gfp_mask)
  1155. {
  1156. struct inode *inode = page->mapping->host;
  1157. int dirty, delalloc, unmapped, unwritten;
  1158. struct writeback_control wbc = {
  1159. .sync_mode = WB_SYNC_ALL,
  1160. .nr_to_write = 1,
  1161. };
  1162. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
  1163. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1164. if (!delalloc && !unwritten)
  1165. goto free_buffers;
  1166. if (!(gfp_mask & __GFP_FS))
  1167. return 0;
  1168. /* If we are already inside a transaction or the thread cannot
  1169. * do I/O, we cannot release this page.
  1170. */
  1171. if (PFLAGS_TEST_FSTRANS())
  1172. return 0;
  1173. /*
  1174. * Convert delalloc space to real space, do not flush the
  1175. * data out to disk, that will be done by the caller.
  1176. * Never need to allocate space here - we will always
  1177. * come back to writepage in that case.
  1178. */
  1179. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1180. if (dirty == 0 && !unwritten)
  1181. goto free_buffers;
  1182. return 0;
  1183. free_buffers:
  1184. return try_to_free_buffers(page);
  1185. }
  1186. STATIC int
  1187. linvfs_prepare_write(
  1188. struct file *file,
  1189. struct page *page,
  1190. unsigned int from,
  1191. unsigned int to)
  1192. {
  1193. return block_prepare_write(page, from, to, linvfs_get_block);
  1194. }
  1195. struct address_space_operations linvfs_aops = {
  1196. .readpage = linvfs_readpage,
  1197. .readpages = linvfs_readpages,
  1198. .writepage = linvfs_writepage,
  1199. .sync_page = block_sync_page,
  1200. .releasepage = linvfs_release_page,
  1201. .invalidatepage = linvfs_invalidate_page,
  1202. .prepare_write = linvfs_prepare_write,
  1203. .commit_write = generic_commit_write,
  1204. .bmap = linvfs_bmap,
  1205. .direct_IO = linvfs_direct_IO,
  1206. };