xfs_aops.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_bit.h"
  20. #include "xfs_log.h"
  21. #include "xfs_inum.h"
  22. #include "xfs_sb.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_dir.h"
  25. #include "xfs_dir2.h"
  26. #include "xfs_trans.h"
  27. #include "xfs_dmapi.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_dir_sf.h"
  33. #include "xfs_dir2_sf.h"
  34. #include "xfs_attr_sf.h"
  35. #include "xfs_dinode.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_alloc.h"
  38. #include "xfs_btree.h"
  39. #include "xfs_error.h"
  40. #include "xfs_rw.h"
  41. #include "xfs_iomap.h"
  42. #include <linux/mpage.h>
  43. #include <linux/writeback.h>
  44. STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
  45. STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
  46. struct writeback_control *wbc, void *, int, int);
  47. #if defined(XFS_RW_TRACE)
  48. void
  49. xfs_page_trace(
  50. int tag,
  51. struct inode *inode,
  52. struct page *page,
  53. int mask)
  54. {
  55. xfs_inode_t *ip;
  56. bhv_desc_t *bdp;
  57. vnode_t *vp = LINVFS_GET_VP(inode);
  58. loff_t isize = i_size_read(inode);
  59. loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  60. int delalloc = -1, unmapped = -1, unwritten = -1;
  61. if (page_has_buffers(page))
  62. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  63. bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
  64. ip = XFS_BHVTOI(bdp);
  65. if (!ip->i_rwtrace)
  66. return;
  67. ktrace_enter(ip->i_rwtrace,
  68. (void *)((unsigned long)tag),
  69. (void *)ip,
  70. (void *)inode,
  71. (void *)page,
  72. (void *)((unsigned long)mask),
  73. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  74. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  75. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  76. (void *)((unsigned long)(isize & 0xffffffff)),
  77. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  78. (void *)((unsigned long)(offset & 0xffffffff)),
  79. (void *)((unsigned long)delalloc),
  80. (void *)((unsigned long)unmapped),
  81. (void *)((unsigned long)unwritten),
  82. (void *)NULL,
  83. (void *)NULL);
  84. }
  85. #else
  86. #define xfs_page_trace(tag, inode, page, mask)
  87. #endif
  88. /*
  89. * Schedule IO completion handling on a xfsdatad if this was
  90. * the final hold on this ioend.
  91. */
  92. STATIC void
  93. xfs_finish_ioend(
  94. xfs_ioend_t *ioend)
  95. {
  96. if (atomic_dec_and_test(&ioend->io_remaining))
  97. queue_work(xfsdatad_workqueue, &ioend->io_work);
  98. }
  99. STATIC void
  100. xfs_destroy_ioend(
  101. xfs_ioend_t *ioend)
  102. {
  103. vn_iowake(ioend->io_vnode);
  104. mempool_free(ioend, xfs_ioend_pool);
  105. }
  106. /*
  107. * Issue transactions to convert a buffer range from unwritten
  108. * to written extents.
  109. */
  110. STATIC void
  111. xfs_end_bio_unwritten(
  112. void *data)
  113. {
  114. xfs_ioend_t *ioend = data;
  115. vnode_t *vp = ioend->io_vnode;
  116. xfs_off_t offset = ioend->io_offset;
  117. size_t size = ioend->io_size;
  118. struct buffer_head *bh, *next;
  119. int error;
  120. if (ioend->io_uptodate)
  121. VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
  122. /* ioend->io_buffer_head is only non-NULL for buffered I/O */
  123. for (bh = ioend->io_buffer_head; bh; bh = next) {
  124. next = bh->b_private;
  125. bh->b_end_io = NULL;
  126. clear_buffer_unwritten(bh);
  127. end_buffer_async_write(bh, ioend->io_uptodate);
  128. }
  129. xfs_destroy_ioend(ioend);
  130. }
  131. /*
  132. * Allocate and initialise an IO completion structure.
  133. * We need to track unwritten extent write completion here initially.
  134. * We'll need to extend this for updating the ondisk inode size later
  135. * (vs. incore size).
  136. */
  137. STATIC xfs_ioend_t *
  138. xfs_alloc_ioend(
  139. struct inode *inode)
  140. {
  141. xfs_ioend_t *ioend;
  142. ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
  143. /*
  144. * Set the count to 1 initially, which will prevent an I/O
  145. * completion callback from happening before we have started
  146. * all the I/O from calling the completion routine too early.
  147. */
  148. atomic_set(&ioend->io_remaining, 1);
  149. ioend->io_uptodate = 1; /* cleared if any I/O fails */
  150. ioend->io_vnode = LINVFS_GET_VP(inode);
  151. ioend->io_buffer_head = NULL;
  152. atomic_inc(&ioend->io_vnode->v_iocount);
  153. ioend->io_offset = 0;
  154. ioend->io_size = 0;
  155. INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
  156. return ioend;
  157. }
  158. void
  159. linvfs_unwritten_done(
  160. struct buffer_head *bh,
  161. int uptodate)
  162. {
  163. xfs_ioend_t *ioend = bh->b_private;
  164. static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
  165. unsigned long flags;
  166. ASSERT(buffer_unwritten(bh));
  167. bh->b_end_io = NULL;
  168. if (!uptodate)
  169. ioend->io_uptodate = 0;
  170. /*
  171. * Deep magic here. We reuse b_private in the buffer_heads to build
  172. * a chain for completing the I/O from user context after we've issued
  173. * a transaction to convert the unwritten extent.
  174. */
  175. spin_lock_irqsave(&unwritten_done_lock, flags);
  176. bh->b_private = ioend->io_buffer_head;
  177. ioend->io_buffer_head = bh;
  178. spin_unlock_irqrestore(&unwritten_done_lock, flags);
  179. xfs_finish_ioend(ioend);
  180. }
  181. STATIC int
  182. xfs_map_blocks(
  183. struct inode *inode,
  184. loff_t offset,
  185. ssize_t count,
  186. xfs_iomap_t *mapp,
  187. int flags)
  188. {
  189. vnode_t *vp = LINVFS_GET_VP(inode);
  190. int error, nmaps = 1;
  191. VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
  192. if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
  193. VMODIFY(vp);
  194. return -error;
  195. }
  196. /*
  197. * Finds the corresponding mapping in block @map array of the
  198. * given @offset within a @page.
  199. */
  200. STATIC xfs_iomap_t *
  201. xfs_offset_to_map(
  202. struct page *page,
  203. xfs_iomap_t *iomapp,
  204. unsigned long offset)
  205. {
  206. loff_t full_offset; /* offset from start of file */
  207. ASSERT(offset < PAGE_CACHE_SIZE);
  208. full_offset = page->index; /* NB: using 64bit number */
  209. full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
  210. full_offset += offset; /* offset from page start */
  211. if (full_offset < iomapp->iomap_offset)
  212. return NULL;
  213. if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
  214. return iomapp;
  215. return NULL;
  216. }
  217. STATIC void
  218. xfs_map_at_offset(
  219. struct page *page,
  220. struct buffer_head *bh,
  221. unsigned long offset,
  222. int block_bits,
  223. xfs_iomap_t *iomapp)
  224. {
  225. xfs_daddr_t bn;
  226. loff_t delta;
  227. int sector_shift;
  228. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  229. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  230. ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
  231. delta = page->index;
  232. delta <<= PAGE_CACHE_SHIFT;
  233. delta += offset;
  234. delta -= iomapp->iomap_offset;
  235. delta >>= block_bits;
  236. sector_shift = block_bits - BBSHIFT;
  237. bn = iomapp->iomap_bn >> sector_shift;
  238. bn += delta;
  239. BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
  240. ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
  241. lock_buffer(bh);
  242. bh->b_blocknr = bn;
  243. bh->b_bdev = iomapp->iomap_target->pbr_bdev;
  244. set_buffer_mapped(bh);
  245. clear_buffer_delay(bh);
  246. }
  247. /*
  248. * Look for a page at index which is unlocked and contains our
  249. * unwritten extent flagged buffers at its head. Returns page
  250. * locked and with an extra reference count, and length of the
  251. * unwritten extent component on this page that we can write,
  252. * in units of filesystem blocks.
  253. */
  254. STATIC struct page *
  255. xfs_probe_unwritten_page(
  256. struct address_space *mapping,
  257. pgoff_t index,
  258. xfs_iomap_t *iomapp,
  259. xfs_ioend_t *ioend,
  260. unsigned long max_offset,
  261. unsigned long *fsbs,
  262. unsigned int bbits)
  263. {
  264. struct page *page;
  265. page = find_trylock_page(mapping, index);
  266. if (!page)
  267. return NULL;
  268. if (PageWriteback(page))
  269. goto out;
  270. if (page->mapping && page_has_buffers(page)) {
  271. struct buffer_head *bh, *head;
  272. unsigned long p_offset = 0;
  273. *fsbs = 0;
  274. bh = head = page_buffers(page);
  275. do {
  276. if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
  277. break;
  278. if (!xfs_offset_to_map(page, iomapp, p_offset))
  279. break;
  280. if (p_offset >= max_offset)
  281. break;
  282. xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
  283. set_buffer_unwritten_io(bh);
  284. bh->b_private = ioend;
  285. p_offset += bh->b_size;
  286. (*fsbs)++;
  287. } while ((bh = bh->b_this_page) != head);
  288. if (p_offset)
  289. return page;
  290. }
  291. out:
  292. unlock_page(page);
  293. return NULL;
  294. }
  295. /*
  296. * Look for a page at index which is unlocked and not mapped
  297. * yet - clustering for mmap write case.
  298. */
  299. STATIC unsigned int
  300. xfs_probe_unmapped_page(
  301. struct address_space *mapping,
  302. pgoff_t index,
  303. unsigned int pg_offset)
  304. {
  305. struct page *page;
  306. int ret = 0;
  307. page = find_trylock_page(mapping, index);
  308. if (!page)
  309. return 0;
  310. if (PageWriteback(page))
  311. goto out;
  312. if (page->mapping && PageDirty(page)) {
  313. if (page_has_buffers(page)) {
  314. struct buffer_head *bh, *head;
  315. bh = head = page_buffers(page);
  316. do {
  317. if (buffer_mapped(bh) || !buffer_uptodate(bh))
  318. break;
  319. ret += bh->b_size;
  320. if (ret >= pg_offset)
  321. break;
  322. } while ((bh = bh->b_this_page) != head);
  323. } else
  324. ret = PAGE_CACHE_SIZE;
  325. }
  326. out:
  327. unlock_page(page);
  328. return ret;
  329. }
  330. STATIC unsigned int
  331. xfs_probe_unmapped_cluster(
  332. struct inode *inode,
  333. struct page *startpage,
  334. struct buffer_head *bh,
  335. struct buffer_head *head)
  336. {
  337. pgoff_t tindex, tlast, tloff;
  338. unsigned int pg_offset, len, total = 0;
  339. struct address_space *mapping = inode->i_mapping;
  340. /* First sum forwards in this page */
  341. do {
  342. if (buffer_mapped(bh))
  343. break;
  344. total += bh->b_size;
  345. } while ((bh = bh->b_this_page) != head);
  346. /* If we reached the end of the page, sum forwards in
  347. * following pages.
  348. */
  349. if (bh == head) {
  350. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  351. /* Prune this back to avoid pathological behavior */
  352. tloff = min(tlast, startpage->index + 64);
  353. for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
  354. len = xfs_probe_unmapped_page(mapping, tindex,
  355. PAGE_CACHE_SIZE);
  356. if (!len)
  357. return total;
  358. total += len;
  359. }
  360. if (tindex == tlast &&
  361. (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  362. total += xfs_probe_unmapped_page(mapping,
  363. tindex, pg_offset);
  364. }
  365. }
  366. return total;
  367. }
  368. /*
  369. * Probe for a given page (index) in the inode and test if it is delayed
  370. * and without unwritten buffers. Returns page locked and with an extra
  371. * reference count.
  372. */
  373. STATIC struct page *
  374. xfs_probe_delalloc_page(
  375. struct inode *inode,
  376. pgoff_t index)
  377. {
  378. struct page *page;
  379. page = find_trylock_page(inode->i_mapping, index);
  380. if (!page)
  381. return NULL;
  382. if (PageWriteback(page))
  383. goto out;
  384. if (page->mapping && page_has_buffers(page)) {
  385. struct buffer_head *bh, *head;
  386. int acceptable = 0;
  387. bh = head = page_buffers(page);
  388. do {
  389. if (buffer_unwritten(bh)) {
  390. acceptable = 0;
  391. break;
  392. } else if (buffer_delay(bh)) {
  393. acceptable = 1;
  394. }
  395. } while ((bh = bh->b_this_page) != head);
  396. if (acceptable)
  397. return page;
  398. }
  399. out:
  400. unlock_page(page);
  401. return NULL;
  402. }
  403. STATIC int
  404. xfs_map_unwritten(
  405. struct inode *inode,
  406. struct page *start_page,
  407. struct buffer_head *head,
  408. struct buffer_head *curr,
  409. unsigned long p_offset,
  410. int block_bits,
  411. xfs_iomap_t *iomapp,
  412. struct writeback_control *wbc,
  413. int startio,
  414. int all_bh)
  415. {
  416. struct buffer_head *bh = curr;
  417. xfs_iomap_t *tmp;
  418. xfs_ioend_t *ioend;
  419. loff_t offset;
  420. unsigned long nblocks = 0;
  421. offset = start_page->index;
  422. offset <<= PAGE_CACHE_SHIFT;
  423. offset += p_offset;
  424. ioend = xfs_alloc_ioend(inode);
  425. /* First map forwards in the page consecutive buffers
  426. * covering this unwritten extent
  427. */
  428. do {
  429. if (!buffer_unwritten(bh))
  430. break;
  431. tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
  432. if (!tmp)
  433. break;
  434. xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
  435. set_buffer_unwritten_io(bh);
  436. bh->b_private = ioend;
  437. p_offset += bh->b_size;
  438. nblocks++;
  439. } while ((bh = bh->b_this_page) != head);
  440. atomic_add(nblocks, &ioend->io_remaining);
  441. /* If we reached the end of the page, map forwards in any
  442. * following pages which are also covered by this extent.
  443. */
  444. if (bh == head) {
  445. struct address_space *mapping = inode->i_mapping;
  446. pgoff_t tindex, tloff, tlast;
  447. unsigned long bs;
  448. unsigned int pg_offset, bbits = inode->i_blkbits;
  449. struct page *page;
  450. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  451. tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
  452. tloff = min(tlast, tloff);
  453. for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
  454. page = xfs_probe_unwritten_page(mapping,
  455. tindex, iomapp, ioend,
  456. PAGE_CACHE_SIZE, &bs, bbits);
  457. if (!page)
  458. break;
  459. nblocks += bs;
  460. atomic_add(bs, &ioend->io_remaining);
  461. xfs_convert_page(inode, page, iomapp, wbc, ioend,
  462. startio, all_bh);
  463. /* stop if converting the next page might add
  464. * enough blocks that the corresponding byte
  465. * count won't fit in our ulong page buf length */
  466. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  467. goto enough;
  468. }
  469. if (tindex == tlast &&
  470. (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
  471. page = xfs_probe_unwritten_page(mapping,
  472. tindex, iomapp, ioend,
  473. pg_offset, &bs, bbits);
  474. if (page) {
  475. nblocks += bs;
  476. atomic_add(bs, &ioend->io_remaining);
  477. xfs_convert_page(inode, page, iomapp, wbc, ioend,
  478. startio, all_bh);
  479. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  480. goto enough;
  481. }
  482. }
  483. }
  484. enough:
  485. ioend->io_size = (xfs_off_t)nblocks << block_bits;
  486. ioend->io_offset = offset;
  487. xfs_finish_ioend(ioend);
  488. return 0;
  489. }
  490. STATIC void
  491. xfs_submit_page(
  492. struct page *page,
  493. struct writeback_control *wbc,
  494. struct buffer_head *bh_arr[],
  495. int bh_count,
  496. int probed_page,
  497. int clear_dirty)
  498. {
  499. struct buffer_head *bh;
  500. int i;
  501. BUG_ON(PageWriteback(page));
  502. if (bh_count)
  503. set_page_writeback(page);
  504. if (clear_dirty)
  505. clear_page_dirty(page);
  506. unlock_page(page);
  507. if (bh_count) {
  508. for (i = 0; i < bh_count; i++) {
  509. bh = bh_arr[i];
  510. mark_buffer_async_write(bh);
  511. if (buffer_unwritten(bh))
  512. set_buffer_unwritten_io(bh);
  513. set_buffer_uptodate(bh);
  514. clear_buffer_dirty(bh);
  515. }
  516. for (i = 0; i < bh_count; i++)
  517. submit_bh(WRITE, bh_arr[i]);
  518. if (probed_page && clear_dirty)
  519. wbc->nr_to_write--; /* Wrote an "extra" page */
  520. }
  521. }
  522. /*
  523. * Allocate & map buffers for page given the extent map. Write it out.
  524. * except for the original page of a writepage, this is called on
  525. * delalloc/unwritten pages only, for the original page it is possible
  526. * that the page has no mapping at all.
  527. */
  528. STATIC void
  529. xfs_convert_page(
  530. struct inode *inode,
  531. struct page *page,
  532. xfs_iomap_t *iomapp,
  533. struct writeback_control *wbc,
  534. void *private,
  535. int startio,
  536. int all_bh)
  537. {
  538. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  539. xfs_iomap_t *mp = iomapp, *tmp;
  540. unsigned long offset, end_offset;
  541. int index = 0;
  542. int bbits = inode->i_blkbits;
  543. int len, page_dirty;
  544. end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
  545. /*
  546. * page_dirty is initially a count of buffers on the page before
  547. * EOF and is decrememted as we move each into a cleanable state.
  548. */
  549. len = 1 << inode->i_blkbits;
  550. end_offset = max(end_offset, PAGE_CACHE_SIZE);
  551. end_offset = roundup(end_offset, len);
  552. page_dirty = end_offset / len;
  553. offset = 0;
  554. bh = head = page_buffers(page);
  555. do {
  556. if (offset >= end_offset)
  557. break;
  558. if (!(PageUptodate(page) || buffer_uptodate(bh)))
  559. continue;
  560. if (buffer_mapped(bh) && all_bh &&
  561. !(buffer_unwritten(bh) || buffer_delay(bh))) {
  562. if (startio) {
  563. lock_buffer(bh);
  564. bh_arr[index++] = bh;
  565. page_dirty--;
  566. }
  567. continue;
  568. }
  569. tmp = xfs_offset_to_map(page, mp, offset);
  570. if (!tmp)
  571. continue;
  572. ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
  573. ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
  574. /* If this is a new unwritten extent buffer (i.e. one
  575. * that we haven't passed in private data for, we must
  576. * now map this buffer too.
  577. */
  578. if (buffer_unwritten(bh) && !bh->b_end_io) {
  579. ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
  580. xfs_map_unwritten(inode, page, head, bh, offset,
  581. bbits, tmp, wbc, startio, all_bh);
  582. } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
  583. xfs_map_at_offset(page, bh, offset, bbits, tmp);
  584. if (buffer_unwritten(bh)) {
  585. set_buffer_unwritten_io(bh);
  586. bh->b_private = private;
  587. ASSERT(private);
  588. }
  589. }
  590. if (startio) {
  591. bh_arr[index++] = bh;
  592. } else {
  593. set_buffer_dirty(bh);
  594. unlock_buffer(bh);
  595. mark_buffer_dirty(bh);
  596. }
  597. page_dirty--;
  598. } while (offset += len, (bh = bh->b_this_page) != head);
  599. if (startio && index) {
  600. xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
  601. } else {
  602. unlock_page(page);
  603. }
  604. }
  605. /*
  606. * Convert & write out a cluster of pages in the same extent as defined
  607. * by mp and following the start page.
  608. */
  609. STATIC void
  610. xfs_cluster_write(
  611. struct inode *inode,
  612. pgoff_t tindex,
  613. xfs_iomap_t *iomapp,
  614. struct writeback_control *wbc,
  615. int startio,
  616. int all_bh,
  617. pgoff_t tlast)
  618. {
  619. struct page *page;
  620. for (; tindex <= tlast; tindex++) {
  621. page = xfs_probe_delalloc_page(inode, tindex);
  622. if (!page)
  623. break;
  624. xfs_convert_page(inode, page, iomapp, wbc, NULL,
  625. startio, all_bh);
  626. }
  627. }
  628. /*
  629. * Calling this without startio set means we are being asked to make a dirty
  630. * page ready for freeing it's buffers. When called with startio set then
  631. * we are coming from writepage.
  632. *
  633. * When called with startio set it is important that we write the WHOLE
  634. * page if possible.
  635. * The bh->b_state's cannot know if any of the blocks or which block for
  636. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  637. * only vaild if the page itself isn't completely uptodate. Some layers
  638. * may clear the page dirty flag prior to calling write page, under the
  639. * assumption the entire page will be written out; by not writing out the
  640. * whole page the page can be reused before all valid dirty data is
  641. * written out. Note: in the case of a page that has been dirty'd by
  642. * mapwrite and but partially setup by block_prepare_write the
  643. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  644. * valid state, thus the whole page must be written out thing.
  645. */
  646. STATIC int
  647. xfs_page_state_convert(
  648. struct inode *inode,
  649. struct page *page,
  650. struct writeback_control *wbc,
  651. int startio,
  652. int unmapped) /* also implies page uptodate */
  653. {
  654. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  655. xfs_iomap_t *iomp, iomap;
  656. loff_t offset;
  657. unsigned long p_offset = 0;
  658. __uint64_t end_offset;
  659. pgoff_t end_index, last_index, tlast;
  660. int len, err, i, cnt = 0, uptodate = 1;
  661. int flags;
  662. int page_dirty;
  663. /* wait for other IO threads? */
  664. flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
  665. /* Is this page beyond the end of the file? */
  666. offset = i_size_read(inode);
  667. end_index = offset >> PAGE_CACHE_SHIFT;
  668. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  669. if (page->index >= end_index) {
  670. if ((page->index >= end_index + 1) ||
  671. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  672. err = -EIO;
  673. goto error;
  674. }
  675. }
  676. end_offset = min_t(unsigned long long,
  677. (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  678. offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  679. /*
  680. * page_dirty is initially a count of buffers on the page before
  681. * EOF and is decrememted as we move each into a cleanable state.
  682. */
  683. len = 1 << inode->i_blkbits;
  684. p_offset = max(p_offset, PAGE_CACHE_SIZE);
  685. p_offset = roundup(p_offset, len);
  686. page_dirty = p_offset / len;
  687. iomp = NULL;
  688. p_offset = 0;
  689. bh = head = page_buffers(page);
  690. do {
  691. if (offset >= end_offset)
  692. break;
  693. if (!buffer_uptodate(bh))
  694. uptodate = 0;
  695. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
  696. continue;
  697. if (iomp) {
  698. iomp = xfs_offset_to_map(page, &iomap, p_offset);
  699. }
  700. /*
  701. * First case, map an unwritten extent and prepare for
  702. * extent state conversion transaction on completion.
  703. */
  704. if (buffer_unwritten(bh)) {
  705. if (!startio)
  706. continue;
  707. if (!iomp) {
  708. err = xfs_map_blocks(inode, offset, len, &iomap,
  709. BMAPI_WRITE|BMAPI_IGNSTATE);
  710. if (err) {
  711. goto error;
  712. }
  713. iomp = xfs_offset_to_map(page, &iomap,
  714. p_offset);
  715. }
  716. if (iomp) {
  717. if (!bh->b_end_io) {
  718. err = xfs_map_unwritten(inode, page,
  719. head, bh, p_offset,
  720. inode->i_blkbits, iomp,
  721. wbc, startio, unmapped);
  722. if (err) {
  723. goto error;
  724. }
  725. } else {
  726. set_bit(BH_Lock, &bh->b_state);
  727. }
  728. BUG_ON(!buffer_locked(bh));
  729. bh_arr[cnt++] = bh;
  730. page_dirty--;
  731. }
  732. /*
  733. * Second case, allocate space for a delalloc buffer.
  734. * We can return EAGAIN here in the release page case.
  735. */
  736. } else if (buffer_delay(bh)) {
  737. if (!iomp) {
  738. err = xfs_map_blocks(inode, offset, len, &iomap,
  739. BMAPI_ALLOCATE | flags);
  740. if (err) {
  741. goto error;
  742. }
  743. iomp = xfs_offset_to_map(page, &iomap,
  744. p_offset);
  745. }
  746. if (iomp) {
  747. xfs_map_at_offset(page, bh, p_offset,
  748. inode->i_blkbits, iomp);
  749. if (startio) {
  750. bh_arr[cnt++] = bh;
  751. } else {
  752. set_buffer_dirty(bh);
  753. unlock_buffer(bh);
  754. mark_buffer_dirty(bh);
  755. }
  756. page_dirty--;
  757. }
  758. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  759. (unmapped || startio)) {
  760. if (!buffer_mapped(bh)) {
  761. int size;
  762. /*
  763. * Getting here implies an unmapped buffer
  764. * was found, and we are in a path where we
  765. * need to write the whole page out.
  766. */
  767. if (!iomp) {
  768. size = xfs_probe_unmapped_cluster(
  769. inode, page, bh, head);
  770. err = xfs_map_blocks(inode, offset,
  771. size, &iomap,
  772. BMAPI_WRITE|BMAPI_MMAP);
  773. if (err) {
  774. goto error;
  775. }
  776. iomp = xfs_offset_to_map(page, &iomap,
  777. p_offset);
  778. }
  779. if (iomp) {
  780. xfs_map_at_offset(page,
  781. bh, p_offset,
  782. inode->i_blkbits, iomp);
  783. if (startio) {
  784. bh_arr[cnt++] = bh;
  785. } else {
  786. set_buffer_dirty(bh);
  787. unlock_buffer(bh);
  788. mark_buffer_dirty(bh);
  789. }
  790. page_dirty--;
  791. }
  792. } else if (startio) {
  793. if (buffer_uptodate(bh) &&
  794. !test_and_set_bit(BH_Lock, &bh->b_state)) {
  795. bh_arr[cnt++] = bh;
  796. page_dirty--;
  797. }
  798. }
  799. }
  800. } while (offset += len, p_offset += len,
  801. ((bh = bh->b_this_page) != head));
  802. if (uptodate && bh == head)
  803. SetPageUptodate(page);
  804. if (startio) {
  805. xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
  806. }
  807. if (iomp) {
  808. offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
  809. PAGE_CACHE_SHIFT;
  810. tlast = min_t(pgoff_t, offset, last_index);
  811. xfs_cluster_write(inode, page->index + 1, iomp, wbc,
  812. startio, unmapped, tlast);
  813. }
  814. return page_dirty;
  815. error:
  816. for (i = 0; i < cnt; i++) {
  817. unlock_buffer(bh_arr[i]);
  818. }
  819. /*
  820. * If it's delalloc and we have nowhere to put it,
  821. * throw it away, unless the lower layers told
  822. * us to try again.
  823. */
  824. if (err != -EAGAIN) {
  825. if (!unmapped) {
  826. block_invalidatepage(page, 0);
  827. }
  828. ClearPageUptodate(page);
  829. }
  830. return err;
  831. }
  832. STATIC int
  833. __linvfs_get_block(
  834. struct inode *inode,
  835. sector_t iblock,
  836. unsigned long blocks,
  837. struct buffer_head *bh_result,
  838. int create,
  839. int direct,
  840. bmapi_flags_t flags)
  841. {
  842. vnode_t *vp = LINVFS_GET_VP(inode);
  843. xfs_iomap_t iomap;
  844. int retpbbm = 1;
  845. int error;
  846. ssize_t size;
  847. loff_t offset = (loff_t)iblock << inode->i_blkbits;
  848. if (blocks)
  849. size = blocks << inode->i_blkbits;
  850. else
  851. size = 1 << inode->i_blkbits;
  852. VOP_BMAP(vp, offset, size,
  853. create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
  854. if (error)
  855. return -error;
  856. if (retpbbm == 0)
  857. return 0;
  858. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  859. xfs_daddr_t bn;
  860. loff_t delta;
  861. /* For unwritten extents do not report a disk address on
  862. * the read case (treat as if we're reading into a hole).
  863. */
  864. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  865. delta = offset - iomap.iomap_offset;
  866. delta >>= inode->i_blkbits;
  867. bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
  868. bn += delta;
  869. BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
  870. bh_result->b_blocknr = bn;
  871. set_buffer_mapped(bh_result);
  872. }
  873. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  874. if (direct)
  875. bh_result->b_private = inode;
  876. set_buffer_unwritten(bh_result);
  877. set_buffer_delay(bh_result);
  878. }
  879. }
  880. /* If this is a realtime file, data might be on a new device */
  881. bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
  882. /* If we previously allocated a block out beyond eof and
  883. * we are now coming back to use it then we will need to
  884. * flag it as new even if it has a disk address.
  885. */
  886. if (create &&
  887. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  888. (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
  889. set_buffer_new(bh_result);
  890. }
  891. if (iomap.iomap_flags & IOMAP_DELAY) {
  892. BUG_ON(direct);
  893. if (create) {
  894. set_buffer_uptodate(bh_result);
  895. set_buffer_mapped(bh_result);
  896. set_buffer_delay(bh_result);
  897. }
  898. }
  899. if (blocks) {
  900. bh_result->b_size = (ssize_t)min(
  901. (loff_t)(iomap.iomap_bsize - iomap.iomap_delta),
  902. (loff_t)(blocks << inode->i_blkbits));
  903. }
  904. return 0;
  905. }
  906. int
  907. linvfs_get_block(
  908. struct inode *inode,
  909. sector_t iblock,
  910. struct buffer_head *bh_result,
  911. int create)
  912. {
  913. return __linvfs_get_block(inode, iblock, 0, bh_result,
  914. create, 0, BMAPI_WRITE);
  915. }
  916. STATIC int
  917. linvfs_get_blocks_direct(
  918. struct inode *inode,
  919. sector_t iblock,
  920. unsigned long max_blocks,
  921. struct buffer_head *bh_result,
  922. int create)
  923. {
  924. return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
  925. create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  926. }
  927. STATIC void
  928. linvfs_end_io_direct(
  929. struct kiocb *iocb,
  930. loff_t offset,
  931. ssize_t size,
  932. void *private)
  933. {
  934. xfs_ioend_t *ioend = iocb->private;
  935. /*
  936. * Non-NULL private data means we need to issue a transaction to
  937. * convert a range from unwritten to written extents. This needs
  938. * to happen from process contect but aio+dio I/O completion
  939. * happens from irq context so we need to defer it to a workqueue.
  940. * This is not nessecary for synchronous direct I/O, but we do
  941. * it anyway to keep the code uniform and simpler.
  942. *
  943. * The core direct I/O code might be changed to always call the
  944. * completion handler in the future, in which case all this can
  945. * go away.
  946. */
  947. if (private && size > 0) {
  948. ioend->io_offset = offset;
  949. ioend->io_size = size;
  950. xfs_finish_ioend(ioend);
  951. } else {
  952. ASSERT(size >= 0);
  953. xfs_destroy_ioend(ioend);
  954. }
  955. /*
  956. * blockdev_direct_IO can return an error even afer the I/O
  957. * completion handler was called. Thus we need to protect
  958. * against double-freeing.
  959. */
  960. iocb->private = NULL;
  961. }
  962. STATIC ssize_t
  963. linvfs_direct_IO(
  964. int rw,
  965. struct kiocb *iocb,
  966. const struct iovec *iov,
  967. loff_t offset,
  968. unsigned long nr_segs)
  969. {
  970. struct file *file = iocb->ki_filp;
  971. struct inode *inode = file->f_mapping->host;
  972. vnode_t *vp = LINVFS_GET_VP(inode);
  973. xfs_iomap_t iomap;
  974. int maps = 1;
  975. int error;
  976. ssize_t ret;
  977. VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
  978. if (error)
  979. return -error;
  980. iocb->private = xfs_alloc_ioend(inode);
  981. ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
  982. iomap.iomap_target->pbr_bdev,
  983. iov, offset, nr_segs,
  984. linvfs_get_blocks_direct,
  985. linvfs_end_io_direct);
  986. if (unlikely(ret <= 0 && iocb->private))
  987. xfs_destroy_ioend(iocb->private);
  988. return ret;
  989. }
  990. STATIC sector_t
  991. linvfs_bmap(
  992. struct address_space *mapping,
  993. sector_t block)
  994. {
  995. struct inode *inode = (struct inode *)mapping->host;
  996. vnode_t *vp = LINVFS_GET_VP(inode);
  997. int error;
  998. vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
  999. VOP_RWLOCK(vp, VRWLOCK_READ);
  1000. VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
  1001. VOP_RWUNLOCK(vp, VRWLOCK_READ);
  1002. return generic_block_bmap(mapping, block, linvfs_get_block);
  1003. }
  1004. STATIC int
  1005. linvfs_readpage(
  1006. struct file *unused,
  1007. struct page *page)
  1008. {
  1009. return mpage_readpage(page, linvfs_get_block);
  1010. }
  1011. STATIC int
  1012. linvfs_readpages(
  1013. struct file *unused,
  1014. struct address_space *mapping,
  1015. struct list_head *pages,
  1016. unsigned nr_pages)
  1017. {
  1018. return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
  1019. }
  1020. STATIC void
  1021. xfs_count_page_state(
  1022. struct page *page,
  1023. int *delalloc,
  1024. int *unmapped,
  1025. int *unwritten)
  1026. {
  1027. struct buffer_head *bh, *head;
  1028. *delalloc = *unmapped = *unwritten = 0;
  1029. bh = head = page_buffers(page);
  1030. do {
  1031. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  1032. (*unmapped) = 1;
  1033. else if (buffer_unwritten(bh) && !buffer_delay(bh))
  1034. clear_buffer_unwritten(bh);
  1035. else if (buffer_unwritten(bh))
  1036. (*unwritten) = 1;
  1037. else if (buffer_delay(bh))
  1038. (*delalloc) = 1;
  1039. } while ((bh = bh->b_this_page) != head);
  1040. }
  1041. /*
  1042. * writepage: Called from one of two places:
  1043. *
  1044. * 1. we are flushing a delalloc buffer head.
  1045. *
  1046. * 2. we are writing out a dirty page. Typically the page dirty
  1047. * state is cleared before we get here. In this case is it
  1048. * conceivable we have no buffer heads.
  1049. *
  1050. * For delalloc space on the page we need to allocate space and
  1051. * flush it. For unmapped buffer heads on the page we should
  1052. * allocate space if the page is uptodate. For any other dirty
  1053. * buffer heads on the page we should flush them.
  1054. *
  1055. * If we detect that a transaction would be required to flush
  1056. * the page, we have to check the process flags first, if we
  1057. * are already in a transaction or disk I/O during allocations
  1058. * is off, we need to fail the writepage and redirty the page.
  1059. */
  1060. STATIC int
  1061. linvfs_writepage(
  1062. struct page *page,
  1063. struct writeback_control *wbc)
  1064. {
  1065. int error;
  1066. int need_trans;
  1067. int delalloc, unmapped, unwritten;
  1068. struct inode *inode = page->mapping->host;
  1069. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1070. /*
  1071. * We need a transaction if:
  1072. * 1. There are delalloc buffers on the page
  1073. * 2. The page is uptodate and we have unmapped buffers
  1074. * 3. The page is uptodate and we have no buffers
  1075. * 4. There are unwritten buffers on the page
  1076. */
  1077. if (!page_has_buffers(page)) {
  1078. unmapped = 1;
  1079. need_trans = 1;
  1080. } else {
  1081. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1082. if (!PageUptodate(page))
  1083. unmapped = 0;
  1084. need_trans = delalloc + unmapped + unwritten;
  1085. }
  1086. /*
  1087. * If we need a transaction and the process flags say
  1088. * we are already in a transaction, or no IO is allowed
  1089. * then mark the page dirty again and leave the page
  1090. * as is.
  1091. */
  1092. if (PFLAGS_TEST_FSTRANS() && need_trans)
  1093. goto out_fail;
  1094. /*
  1095. * Delay hooking up buffer heads until we have
  1096. * made our go/no-go decision.
  1097. */
  1098. if (!page_has_buffers(page))
  1099. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1100. /*
  1101. * Convert delayed allocate, unwritten or unmapped space
  1102. * to real space and flush out to disk.
  1103. */
  1104. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1105. if (error == -EAGAIN)
  1106. goto out_fail;
  1107. if (unlikely(error < 0))
  1108. goto out_unlock;
  1109. return 0;
  1110. out_fail:
  1111. redirty_page_for_writepage(wbc, page);
  1112. unlock_page(page);
  1113. return 0;
  1114. out_unlock:
  1115. unlock_page(page);
  1116. return error;
  1117. }
  1118. STATIC int
  1119. linvfs_invalidate_page(
  1120. struct page *page,
  1121. unsigned long offset)
  1122. {
  1123. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1124. page->mapping->host, page, offset);
  1125. return block_invalidatepage(page, offset);
  1126. }
  1127. /*
  1128. * Called to move a page into cleanable state - and from there
  1129. * to be released. Possibly the page is already clean. We always
  1130. * have buffer heads in this call.
  1131. *
  1132. * Returns 0 if the page is ok to release, 1 otherwise.
  1133. *
  1134. * Possible scenarios are:
  1135. *
  1136. * 1. We are being called to release a page which has been written
  1137. * to via regular I/O. buffer heads will be dirty and possibly
  1138. * delalloc. If no delalloc buffer heads in this case then we
  1139. * can just return zero.
  1140. *
  1141. * 2. We are called to release a page which has been written via
  1142. * mmap, all we need to do is ensure there is no delalloc
  1143. * state in the buffer heads, if not we can let the caller
  1144. * free them and we should come back later via writepage.
  1145. */
  1146. STATIC int
  1147. linvfs_release_page(
  1148. struct page *page,
  1149. gfp_t gfp_mask)
  1150. {
  1151. struct inode *inode = page->mapping->host;
  1152. int dirty, delalloc, unmapped, unwritten;
  1153. struct writeback_control wbc = {
  1154. .sync_mode = WB_SYNC_ALL,
  1155. .nr_to_write = 1,
  1156. };
  1157. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
  1158. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1159. if (!delalloc && !unwritten)
  1160. goto free_buffers;
  1161. if (!(gfp_mask & __GFP_FS))
  1162. return 0;
  1163. /* If we are already inside a transaction or the thread cannot
  1164. * do I/O, we cannot release this page.
  1165. */
  1166. if (PFLAGS_TEST_FSTRANS())
  1167. return 0;
  1168. /*
  1169. * Convert delalloc space to real space, do not flush the
  1170. * data out to disk, that will be done by the caller.
  1171. * Never need to allocate space here - we will always
  1172. * come back to writepage in that case.
  1173. */
  1174. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1175. if (dirty == 0 && !unwritten)
  1176. goto free_buffers;
  1177. return 0;
  1178. free_buffers:
  1179. return try_to_free_buffers(page);
  1180. }
  1181. STATIC int
  1182. linvfs_prepare_write(
  1183. struct file *file,
  1184. struct page *page,
  1185. unsigned int from,
  1186. unsigned int to)
  1187. {
  1188. return block_prepare_write(page, from, to, linvfs_get_block);
  1189. }
  1190. struct address_space_operations linvfs_aops = {
  1191. .readpage = linvfs_readpage,
  1192. .readpages = linvfs_readpages,
  1193. .writepage = linvfs_writepage,
  1194. .sync_page = block_sync_page,
  1195. .releasepage = linvfs_release_page,
  1196. .invalidatepage = linvfs_invalidate_page,
  1197. .prepare_write = linvfs_prepare_write,
  1198. .commit_write = generic_commit_write,
  1199. .bmap = linvfs_bmap,
  1200. .direct_IO = linvfs_direct_IO,
  1201. };