xfs_aops.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it would be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. *
  12. * Further, this software is distributed without any warranty that it is
  13. * free of the rightful claim of any third person regarding infringement
  14. * or the like. Any license provided herein, whether implied or
  15. * otherwise, applies only to this software file. Patent licenses, if
  16. * any, provided herein do not apply to combinations of this program with
  17. * other software, or any other product whatsoever.
  18. *
  19. * You should have received a copy of the GNU General Public License along
  20. * with this program; if not, write the Free Software Foundation, Inc., 59
  21. * Temple Place - Suite 330, Boston MA 02111-1307, USA.
  22. *
  23. * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
  24. * Mountain View, CA 94043, or:
  25. *
  26. * http://www.sgi.com
  27. *
  28. * For further information regarding this notice, see:
  29. *
  30. * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
  31. */
  32. #include "xfs.h"
  33. #include "xfs_inum.h"
  34. #include "xfs_log.h"
  35. #include "xfs_sb.h"
  36. #include "xfs_dir.h"
  37. #include "xfs_dir2.h"
  38. #include "xfs_trans.h"
  39. #include "xfs_dmapi.h"
  40. #include "xfs_mount.h"
  41. #include "xfs_bmap_btree.h"
  42. #include "xfs_alloc_btree.h"
  43. #include "xfs_ialloc_btree.h"
  44. #include "xfs_alloc.h"
  45. #include "xfs_btree.h"
  46. #include "xfs_attr_sf.h"
  47. #include "xfs_dir_sf.h"
  48. #include "xfs_dir2_sf.h"
  49. #include "xfs_dinode.h"
  50. #include "xfs_inode.h"
  51. #include "xfs_error.h"
  52. #include "xfs_rw.h"
  53. #include "xfs_iomap.h"
  54. #include <linux/mpage.h>
  55. #include <linux/writeback.h>
  56. STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
  57. STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
  58. struct writeback_control *wbc, void *, int, int);
  59. #if defined(XFS_RW_TRACE)
  60. void
  61. xfs_page_trace(
  62. int tag,
  63. struct inode *inode,
  64. struct page *page,
  65. int mask)
  66. {
  67. xfs_inode_t *ip;
  68. bhv_desc_t *bdp;
  69. vnode_t *vp = LINVFS_GET_VP(inode);
  70. loff_t isize = i_size_read(inode);
  71. loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  72. int delalloc = -1, unmapped = -1, unwritten = -1;
  73. if (page_has_buffers(page))
  74. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  75. bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
  76. ip = XFS_BHVTOI(bdp);
  77. if (!ip->i_rwtrace)
  78. return;
  79. ktrace_enter(ip->i_rwtrace,
  80. (void *)((unsigned long)tag),
  81. (void *)ip,
  82. (void *)inode,
  83. (void *)page,
  84. (void *)((unsigned long)mask),
  85. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  86. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  87. (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
  88. (void *)((unsigned long)(isize & 0xffffffff)),
  89. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  90. (void *)((unsigned long)(offset & 0xffffffff)),
  91. (void *)((unsigned long)delalloc),
  92. (void *)((unsigned long)unmapped),
  93. (void *)((unsigned long)unwritten),
  94. (void *)NULL,
  95. (void *)NULL);
  96. }
  97. #else
  98. #define xfs_page_trace(tag, inode, page, mask)
  99. #endif
  100. void
  101. linvfs_unwritten_done(
  102. struct buffer_head *bh,
  103. int uptodate)
  104. {
  105. xfs_buf_t *pb = (xfs_buf_t *)bh->b_private;
  106. ASSERT(buffer_unwritten(bh));
  107. bh->b_end_io = NULL;
  108. clear_buffer_unwritten(bh);
  109. if (!uptodate)
  110. pagebuf_ioerror(pb, EIO);
  111. if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
  112. pagebuf_iodone(pb, 1, 1);
  113. }
  114. end_buffer_async_write(bh, uptodate);
  115. }
  116. /*
  117. * Issue transactions to convert a buffer range from unwritten
  118. * to written extents (buffered IO).
  119. */
  120. STATIC void
  121. linvfs_unwritten_convert(
  122. xfs_buf_t *bp)
  123. {
  124. vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
  125. int error;
  126. BUG_ON(atomic_read(&bp->pb_hold) < 1);
  127. VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
  128. BMAPI_UNWRITTEN, NULL, NULL, error);
  129. XFS_BUF_SET_FSPRIVATE(bp, NULL);
  130. XFS_BUF_CLR_IODONE_FUNC(bp);
  131. XFS_BUF_UNDATAIO(bp);
  132. vn_iowake(vp);
  133. pagebuf_iodone(bp, 0, 0);
  134. }
  135. /*
  136. * Issue transactions to convert a buffer range from unwritten
  137. * to written extents (direct IO).
  138. */
  139. STATIC void
  140. linvfs_unwritten_convert_direct(
  141. struct kiocb *iocb,
  142. loff_t offset,
  143. ssize_t size,
  144. void *private)
  145. {
  146. struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
  147. ASSERT(!private || inode == (struct inode *)private);
  148. /* private indicates an unwritten extent lay beneath this IO */
  149. if (private && size > 0) {
  150. vnode_t *vp = LINVFS_GET_VP(inode);
  151. int error;
  152. VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
  153. }
  154. }
  155. STATIC int
  156. xfs_map_blocks(
  157. struct inode *inode,
  158. loff_t offset,
  159. ssize_t count,
  160. xfs_iomap_t *mapp,
  161. int flags)
  162. {
  163. vnode_t *vp = LINVFS_GET_VP(inode);
  164. int error, nmaps = 1;
  165. VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
  166. if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
  167. VMODIFY(vp);
  168. return -error;
  169. }
  170. /*
  171. * Finds the corresponding mapping in block @map array of the
  172. * given @offset within a @page.
  173. */
  174. STATIC xfs_iomap_t *
  175. xfs_offset_to_map(
  176. struct page *page,
  177. xfs_iomap_t *iomapp,
  178. unsigned long offset)
  179. {
  180. loff_t full_offset; /* offset from start of file */
  181. ASSERT(offset < PAGE_CACHE_SIZE);
  182. full_offset = page->index; /* NB: using 64bit number */
  183. full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
  184. full_offset += offset; /* offset from page start */
  185. if (full_offset < iomapp->iomap_offset)
  186. return NULL;
  187. if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
  188. return iomapp;
  189. return NULL;
  190. }
  191. STATIC void
  192. xfs_map_at_offset(
  193. struct page *page,
  194. struct buffer_head *bh,
  195. unsigned long offset,
  196. int block_bits,
  197. xfs_iomap_t *iomapp)
  198. {
  199. xfs_daddr_t bn;
  200. loff_t delta;
  201. int sector_shift;
  202. ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
  203. ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
  204. ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
  205. delta = page->index;
  206. delta <<= PAGE_CACHE_SHIFT;
  207. delta += offset;
  208. delta -= iomapp->iomap_offset;
  209. delta >>= block_bits;
  210. sector_shift = block_bits - BBSHIFT;
  211. bn = iomapp->iomap_bn >> sector_shift;
  212. bn += delta;
  213. BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
  214. ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
  215. lock_buffer(bh);
  216. bh->b_blocknr = bn;
  217. bh->b_bdev = iomapp->iomap_target->pbr_bdev;
  218. set_buffer_mapped(bh);
  219. clear_buffer_delay(bh);
  220. }
  221. /*
  222. * Look for a page at index which is unlocked and contains our
  223. * unwritten extent flagged buffers at its head. Returns page
  224. * locked and with an extra reference count, and length of the
  225. * unwritten extent component on this page that we can write,
  226. * in units of filesystem blocks.
  227. */
  228. STATIC struct page *
  229. xfs_probe_unwritten_page(
  230. struct address_space *mapping,
  231. pgoff_t index,
  232. xfs_iomap_t *iomapp,
  233. xfs_buf_t *pb,
  234. unsigned long max_offset,
  235. unsigned long *fsbs,
  236. unsigned int bbits)
  237. {
  238. struct page *page;
  239. page = find_trylock_page(mapping, index);
  240. if (!page)
  241. return NULL;
  242. if (PageWriteback(page))
  243. goto out;
  244. if (page->mapping && page_has_buffers(page)) {
  245. struct buffer_head *bh, *head;
  246. unsigned long p_offset = 0;
  247. *fsbs = 0;
  248. bh = head = page_buffers(page);
  249. do {
  250. if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
  251. break;
  252. if (!xfs_offset_to_map(page, iomapp, p_offset))
  253. break;
  254. if (p_offset >= max_offset)
  255. break;
  256. xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
  257. set_buffer_unwritten_io(bh);
  258. bh->b_private = pb;
  259. p_offset += bh->b_size;
  260. (*fsbs)++;
  261. } while ((bh = bh->b_this_page) != head);
  262. if (p_offset)
  263. return page;
  264. }
  265. out:
  266. unlock_page(page);
  267. return NULL;
  268. }
  269. /*
  270. * Look for a page at index which is unlocked and not mapped
  271. * yet - clustering for mmap write case.
  272. */
  273. STATIC unsigned int
  274. xfs_probe_unmapped_page(
  275. struct address_space *mapping,
  276. pgoff_t index,
  277. unsigned int pg_offset)
  278. {
  279. struct page *page;
  280. int ret = 0;
  281. page = find_trylock_page(mapping, index);
  282. if (!page)
  283. return 0;
  284. if (PageWriteback(page))
  285. goto out;
  286. if (page->mapping && PageDirty(page)) {
  287. if (page_has_buffers(page)) {
  288. struct buffer_head *bh, *head;
  289. bh = head = page_buffers(page);
  290. do {
  291. if (buffer_mapped(bh) || !buffer_uptodate(bh))
  292. break;
  293. ret += bh->b_size;
  294. if (ret >= pg_offset)
  295. break;
  296. } while ((bh = bh->b_this_page) != head);
  297. } else
  298. ret = PAGE_CACHE_SIZE;
  299. }
  300. out:
  301. unlock_page(page);
  302. return ret;
  303. }
  304. STATIC unsigned int
  305. xfs_probe_unmapped_cluster(
  306. struct inode *inode,
  307. struct page *startpage,
  308. struct buffer_head *bh,
  309. struct buffer_head *head)
  310. {
  311. pgoff_t tindex, tlast, tloff;
  312. unsigned int pg_offset, len, total = 0;
  313. struct address_space *mapping = inode->i_mapping;
  314. /* First sum forwards in this page */
  315. do {
  316. if (buffer_mapped(bh))
  317. break;
  318. total += bh->b_size;
  319. } while ((bh = bh->b_this_page) != head);
  320. /* If we reached the end of the page, sum forwards in
  321. * following pages.
  322. */
  323. if (bh == head) {
  324. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  325. /* Prune this back to avoid pathological behavior */
  326. tloff = min(tlast, startpage->index + 64);
  327. for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
  328. len = xfs_probe_unmapped_page(mapping, tindex,
  329. PAGE_CACHE_SIZE);
  330. if (!len)
  331. return total;
  332. total += len;
  333. }
  334. if (tindex == tlast &&
  335. (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  336. total += xfs_probe_unmapped_page(mapping,
  337. tindex, pg_offset);
  338. }
  339. }
  340. return total;
  341. }
  342. /*
  343. * Probe for a given page (index) in the inode and test if it is delayed
  344. * and without unwritten buffers. Returns page locked and with an extra
  345. * reference count.
  346. */
  347. STATIC struct page *
  348. xfs_probe_delalloc_page(
  349. struct inode *inode,
  350. pgoff_t index)
  351. {
  352. struct page *page;
  353. page = find_trylock_page(inode->i_mapping, index);
  354. if (!page)
  355. return NULL;
  356. if (PageWriteback(page))
  357. goto out;
  358. if (page->mapping && page_has_buffers(page)) {
  359. struct buffer_head *bh, *head;
  360. int acceptable = 0;
  361. bh = head = page_buffers(page);
  362. do {
  363. if (buffer_unwritten(bh)) {
  364. acceptable = 0;
  365. break;
  366. } else if (buffer_delay(bh)) {
  367. acceptable = 1;
  368. }
  369. } while ((bh = bh->b_this_page) != head);
  370. if (acceptable)
  371. return page;
  372. }
  373. out:
  374. unlock_page(page);
  375. return NULL;
  376. }
  377. STATIC int
  378. xfs_map_unwritten(
  379. struct inode *inode,
  380. struct page *start_page,
  381. struct buffer_head *head,
  382. struct buffer_head *curr,
  383. unsigned long p_offset,
  384. int block_bits,
  385. xfs_iomap_t *iomapp,
  386. struct writeback_control *wbc,
  387. int startio,
  388. int all_bh)
  389. {
  390. struct buffer_head *bh = curr;
  391. xfs_iomap_t *tmp;
  392. xfs_buf_t *pb;
  393. loff_t offset, size;
  394. unsigned long nblocks = 0;
  395. offset = start_page->index;
  396. offset <<= PAGE_CACHE_SHIFT;
  397. offset += p_offset;
  398. /* get an "empty" pagebuf to manage IO completion
  399. * Proper values will be set before returning */
  400. pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
  401. if (!pb)
  402. return -EAGAIN;
  403. atomic_inc(&LINVFS_GET_VP(inode)->v_iocount);
  404. /* Set the count to 1 initially, this will stop an I/O
  405. * completion callout which happens before we have started
  406. * all the I/O from calling pagebuf_iodone too early.
  407. */
  408. atomic_set(&pb->pb_io_remaining, 1);
  409. /* First map forwards in the page consecutive buffers
  410. * covering this unwritten extent
  411. */
  412. do {
  413. if (!buffer_unwritten(bh))
  414. break;
  415. tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
  416. if (!tmp)
  417. break;
  418. xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
  419. set_buffer_unwritten_io(bh);
  420. bh->b_private = pb;
  421. p_offset += bh->b_size;
  422. nblocks++;
  423. } while ((bh = bh->b_this_page) != head);
  424. atomic_add(nblocks, &pb->pb_io_remaining);
  425. /* If we reached the end of the page, map forwards in any
  426. * following pages which are also covered by this extent.
  427. */
  428. if (bh == head) {
  429. struct address_space *mapping = inode->i_mapping;
  430. pgoff_t tindex, tloff, tlast;
  431. unsigned long bs;
  432. unsigned int pg_offset, bbits = inode->i_blkbits;
  433. struct page *page;
  434. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  435. tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
  436. tloff = min(tlast, tloff);
  437. for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
  438. page = xfs_probe_unwritten_page(mapping,
  439. tindex, iomapp, pb,
  440. PAGE_CACHE_SIZE, &bs, bbits);
  441. if (!page)
  442. break;
  443. nblocks += bs;
  444. atomic_add(bs, &pb->pb_io_remaining);
  445. xfs_convert_page(inode, page, iomapp, wbc, pb,
  446. startio, all_bh);
  447. /* stop if converting the next page might add
  448. * enough blocks that the corresponding byte
  449. * count won't fit in our ulong page buf length */
  450. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  451. goto enough;
  452. }
  453. if (tindex == tlast &&
  454. (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
  455. page = xfs_probe_unwritten_page(mapping,
  456. tindex, iomapp, pb,
  457. pg_offset, &bs, bbits);
  458. if (page) {
  459. nblocks += bs;
  460. atomic_add(bs, &pb->pb_io_remaining);
  461. xfs_convert_page(inode, page, iomapp, wbc, pb,
  462. startio, all_bh);
  463. if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
  464. goto enough;
  465. }
  466. }
  467. }
  468. enough:
  469. size = nblocks; /* NB: using 64bit number here */
  470. size <<= block_bits; /* convert fsb's to byte range */
  471. XFS_BUF_DATAIO(pb);
  472. XFS_BUF_ASYNC(pb);
  473. XFS_BUF_SET_SIZE(pb, size);
  474. XFS_BUF_SET_COUNT(pb, size);
  475. XFS_BUF_SET_OFFSET(pb, offset);
  476. XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
  477. XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
  478. if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
  479. pagebuf_iodone(pb, 1, 1);
  480. }
  481. return 0;
  482. }
  483. STATIC void
  484. xfs_submit_page(
  485. struct page *page,
  486. struct writeback_control *wbc,
  487. struct buffer_head *bh_arr[],
  488. int bh_count,
  489. int probed_page,
  490. int clear_dirty)
  491. {
  492. struct buffer_head *bh;
  493. int i;
  494. BUG_ON(PageWriteback(page));
  495. if (bh_count)
  496. set_page_writeback(page);
  497. if (clear_dirty)
  498. clear_page_dirty(page);
  499. unlock_page(page);
  500. if (bh_count) {
  501. for (i = 0; i < bh_count; i++) {
  502. bh = bh_arr[i];
  503. mark_buffer_async_write(bh);
  504. if (buffer_unwritten(bh))
  505. set_buffer_unwritten_io(bh);
  506. set_buffer_uptodate(bh);
  507. clear_buffer_dirty(bh);
  508. }
  509. for (i = 0; i < bh_count; i++)
  510. submit_bh(WRITE, bh_arr[i]);
  511. if (probed_page && clear_dirty)
  512. wbc->nr_to_write--; /* Wrote an "extra" page */
  513. }
  514. }
  515. /*
  516. * Allocate & map buffers for page given the extent map. Write it out.
  517. * except for the original page of a writepage, this is called on
  518. * delalloc/unwritten pages only, for the original page it is possible
  519. * that the page has no mapping at all.
  520. */
  521. STATIC void
  522. xfs_convert_page(
  523. struct inode *inode,
  524. struct page *page,
  525. xfs_iomap_t *iomapp,
  526. struct writeback_control *wbc,
  527. void *private,
  528. int startio,
  529. int all_bh)
  530. {
  531. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  532. xfs_iomap_t *mp = iomapp, *tmp;
  533. unsigned long offset, end_offset;
  534. int index = 0;
  535. int bbits = inode->i_blkbits;
  536. int len, page_dirty;
  537. end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
  538. /*
  539. * page_dirty is initially a count of buffers on the page before
  540. * EOF and is decrememted as we move each into a cleanable state.
  541. */
  542. len = 1 << inode->i_blkbits;
  543. end_offset = max(end_offset, PAGE_CACHE_SIZE);
  544. end_offset = roundup(end_offset, len);
  545. page_dirty = end_offset / len;
  546. offset = 0;
  547. bh = head = page_buffers(page);
  548. do {
  549. if (offset >= end_offset)
  550. break;
  551. if (!(PageUptodate(page) || buffer_uptodate(bh)))
  552. continue;
  553. if (buffer_mapped(bh) && all_bh &&
  554. !(buffer_unwritten(bh) || buffer_delay(bh))) {
  555. if (startio) {
  556. lock_buffer(bh);
  557. bh_arr[index++] = bh;
  558. page_dirty--;
  559. }
  560. continue;
  561. }
  562. tmp = xfs_offset_to_map(page, mp, offset);
  563. if (!tmp)
  564. continue;
  565. ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
  566. ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
  567. /* If this is a new unwritten extent buffer (i.e. one
  568. * that we haven't passed in private data for, we must
  569. * now map this buffer too.
  570. */
  571. if (buffer_unwritten(bh) && !bh->b_end_io) {
  572. ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
  573. xfs_map_unwritten(inode, page, head, bh, offset,
  574. bbits, tmp, wbc, startio, all_bh);
  575. } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
  576. xfs_map_at_offset(page, bh, offset, bbits, tmp);
  577. if (buffer_unwritten(bh)) {
  578. set_buffer_unwritten_io(bh);
  579. bh->b_private = private;
  580. ASSERT(private);
  581. }
  582. }
  583. if (startio) {
  584. bh_arr[index++] = bh;
  585. } else {
  586. set_buffer_dirty(bh);
  587. unlock_buffer(bh);
  588. mark_buffer_dirty(bh);
  589. }
  590. page_dirty--;
  591. } while (offset += len, (bh = bh->b_this_page) != head);
  592. if (startio && index) {
  593. xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
  594. } else {
  595. unlock_page(page);
  596. }
  597. }
  598. /*
  599. * Convert & write out a cluster of pages in the same extent as defined
  600. * by mp and following the start page.
  601. */
  602. STATIC void
  603. xfs_cluster_write(
  604. struct inode *inode,
  605. pgoff_t tindex,
  606. xfs_iomap_t *iomapp,
  607. struct writeback_control *wbc,
  608. int startio,
  609. int all_bh,
  610. pgoff_t tlast)
  611. {
  612. struct page *page;
  613. for (; tindex <= tlast; tindex++) {
  614. page = xfs_probe_delalloc_page(inode, tindex);
  615. if (!page)
  616. break;
  617. xfs_convert_page(inode, page, iomapp, wbc, NULL,
  618. startio, all_bh);
  619. }
  620. }
  621. /*
  622. * Calling this without startio set means we are being asked to make a dirty
  623. * page ready for freeing it's buffers. When called with startio set then
  624. * we are coming from writepage.
  625. *
  626. * When called with startio set it is important that we write the WHOLE
  627. * page if possible.
  628. * The bh->b_state's cannot know if any of the blocks or which block for
  629. * that matter are dirty due to mmap writes, and therefore bh uptodate is
  630. * only vaild if the page itself isn't completely uptodate. Some layers
  631. * may clear the page dirty flag prior to calling write page, under the
  632. * assumption the entire page will be written out; by not writing out the
  633. * whole page the page can be reused before all valid dirty data is
  634. * written out. Note: in the case of a page that has been dirty'd by
  635. * mapwrite and but partially setup by block_prepare_write the
  636. * bh->b_states's will not agree and only ones setup by BPW/BCW will have
  637. * valid state, thus the whole page must be written out thing.
  638. */
  639. STATIC int
  640. xfs_page_state_convert(
  641. struct inode *inode,
  642. struct page *page,
  643. struct writeback_control *wbc,
  644. int startio,
  645. int unmapped) /* also implies page uptodate */
  646. {
  647. struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
  648. xfs_iomap_t *iomp, iomap;
  649. loff_t offset;
  650. unsigned long p_offset = 0;
  651. __uint64_t end_offset;
  652. pgoff_t end_index, last_index, tlast;
  653. int len, err, i, cnt = 0, uptodate = 1;
  654. int flags;
  655. int page_dirty;
  656. /* wait for other IO threads? */
  657. flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
  658. /* Is this page beyond the end of the file? */
  659. offset = i_size_read(inode);
  660. end_index = offset >> PAGE_CACHE_SHIFT;
  661. last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
  662. if (page->index >= end_index) {
  663. if ((page->index >= end_index + 1) ||
  664. !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
  665. err = -EIO;
  666. goto error;
  667. }
  668. }
  669. end_offset = min_t(unsigned long long,
  670. (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
  671. offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
  672. /*
  673. * page_dirty is initially a count of buffers on the page before
  674. * EOF and is decrememted as we move each into a cleanable state.
  675. */
  676. len = 1 << inode->i_blkbits;
  677. p_offset = max(p_offset, PAGE_CACHE_SIZE);
  678. p_offset = roundup(p_offset, len);
  679. page_dirty = p_offset / len;
  680. iomp = NULL;
  681. p_offset = 0;
  682. bh = head = page_buffers(page);
  683. do {
  684. if (offset >= end_offset)
  685. break;
  686. if (!buffer_uptodate(bh))
  687. uptodate = 0;
  688. if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
  689. continue;
  690. if (iomp) {
  691. iomp = xfs_offset_to_map(page, &iomap, p_offset);
  692. }
  693. /*
  694. * First case, map an unwritten extent and prepare for
  695. * extent state conversion transaction on completion.
  696. */
  697. if (buffer_unwritten(bh)) {
  698. if (!startio)
  699. continue;
  700. if (!iomp) {
  701. err = xfs_map_blocks(inode, offset, len, &iomap,
  702. BMAPI_READ|BMAPI_IGNSTATE);
  703. if (err) {
  704. goto error;
  705. }
  706. iomp = xfs_offset_to_map(page, &iomap,
  707. p_offset);
  708. }
  709. if (iomp) {
  710. if (!bh->b_end_io) {
  711. err = xfs_map_unwritten(inode, page,
  712. head, bh, p_offset,
  713. inode->i_blkbits, iomp,
  714. wbc, startio, unmapped);
  715. if (err) {
  716. goto error;
  717. }
  718. } else {
  719. set_bit(BH_Lock, &bh->b_state);
  720. }
  721. BUG_ON(!buffer_locked(bh));
  722. bh_arr[cnt++] = bh;
  723. page_dirty--;
  724. }
  725. /*
  726. * Second case, allocate space for a delalloc buffer.
  727. * We can return EAGAIN here in the release page case.
  728. */
  729. } else if (buffer_delay(bh)) {
  730. if (!iomp) {
  731. err = xfs_map_blocks(inode, offset, len, &iomap,
  732. BMAPI_ALLOCATE | flags);
  733. if (err) {
  734. goto error;
  735. }
  736. iomp = xfs_offset_to_map(page, &iomap,
  737. p_offset);
  738. }
  739. if (iomp) {
  740. xfs_map_at_offset(page, bh, p_offset,
  741. inode->i_blkbits, iomp);
  742. if (startio) {
  743. bh_arr[cnt++] = bh;
  744. } else {
  745. set_buffer_dirty(bh);
  746. unlock_buffer(bh);
  747. mark_buffer_dirty(bh);
  748. }
  749. page_dirty--;
  750. }
  751. } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
  752. (unmapped || startio)) {
  753. if (!buffer_mapped(bh)) {
  754. int size;
  755. /*
  756. * Getting here implies an unmapped buffer
  757. * was found, and we are in a path where we
  758. * need to write the whole page out.
  759. */
  760. if (!iomp) {
  761. size = xfs_probe_unmapped_cluster(
  762. inode, page, bh, head);
  763. err = xfs_map_blocks(inode, offset,
  764. size, &iomap,
  765. BMAPI_WRITE|BMAPI_MMAP);
  766. if (err) {
  767. goto error;
  768. }
  769. iomp = xfs_offset_to_map(page, &iomap,
  770. p_offset);
  771. }
  772. if (iomp) {
  773. xfs_map_at_offset(page,
  774. bh, p_offset,
  775. inode->i_blkbits, iomp);
  776. if (startio) {
  777. bh_arr[cnt++] = bh;
  778. } else {
  779. set_buffer_dirty(bh);
  780. unlock_buffer(bh);
  781. mark_buffer_dirty(bh);
  782. }
  783. page_dirty--;
  784. }
  785. } else if (startio) {
  786. if (buffer_uptodate(bh) &&
  787. !test_and_set_bit(BH_Lock, &bh->b_state)) {
  788. bh_arr[cnt++] = bh;
  789. page_dirty--;
  790. }
  791. }
  792. }
  793. } while (offset += len, p_offset += len,
  794. ((bh = bh->b_this_page) != head));
  795. if (uptodate && bh == head)
  796. SetPageUptodate(page);
  797. if (startio) {
  798. xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
  799. }
  800. if (iomp) {
  801. offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
  802. PAGE_CACHE_SHIFT;
  803. tlast = min_t(pgoff_t, offset, last_index);
  804. xfs_cluster_write(inode, page->index + 1, iomp, wbc,
  805. startio, unmapped, tlast);
  806. }
  807. return page_dirty;
  808. error:
  809. for (i = 0; i < cnt; i++) {
  810. unlock_buffer(bh_arr[i]);
  811. }
  812. /*
  813. * If it's delalloc and we have nowhere to put it,
  814. * throw it away, unless the lower layers told
  815. * us to try again.
  816. */
  817. if (err != -EAGAIN) {
  818. if (!unmapped) {
  819. block_invalidatepage(page, 0);
  820. }
  821. ClearPageUptodate(page);
  822. }
  823. return err;
  824. }
  825. STATIC int
  826. __linvfs_get_block(
  827. struct inode *inode,
  828. sector_t iblock,
  829. unsigned long blocks,
  830. struct buffer_head *bh_result,
  831. int create,
  832. int direct,
  833. bmapi_flags_t flags)
  834. {
  835. vnode_t *vp = LINVFS_GET_VP(inode);
  836. xfs_iomap_t iomap;
  837. int retpbbm = 1;
  838. int error;
  839. ssize_t size;
  840. loff_t offset = (loff_t)iblock << inode->i_blkbits;
  841. if (blocks)
  842. size = blocks << inode->i_blkbits;
  843. else
  844. size = 1 << inode->i_blkbits;
  845. VOP_BMAP(vp, offset, size,
  846. create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
  847. if (error)
  848. return -error;
  849. if (retpbbm == 0)
  850. return 0;
  851. if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
  852. xfs_daddr_t bn;
  853. loff_t delta;
  854. /* For unwritten extents do not report a disk address on
  855. * the read case (treat as if we're reading into a hole).
  856. */
  857. if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  858. delta = offset - iomap.iomap_offset;
  859. delta >>= inode->i_blkbits;
  860. bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
  861. bn += delta;
  862. BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
  863. bh_result->b_blocknr = bn;
  864. set_buffer_mapped(bh_result);
  865. }
  866. if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
  867. if (direct)
  868. bh_result->b_private = inode;
  869. set_buffer_unwritten(bh_result);
  870. set_buffer_delay(bh_result);
  871. }
  872. }
  873. /* If this is a realtime file, data might be on a new device */
  874. bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
  875. /* If we previously allocated a block out beyond eof and
  876. * we are now coming back to use it then we will need to
  877. * flag it as new even if it has a disk address.
  878. */
  879. if (create &&
  880. ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
  881. (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
  882. set_buffer_new(bh_result);
  883. }
  884. if (iomap.iomap_flags & IOMAP_DELAY) {
  885. BUG_ON(direct);
  886. if (create) {
  887. set_buffer_uptodate(bh_result);
  888. set_buffer_mapped(bh_result);
  889. set_buffer_delay(bh_result);
  890. }
  891. }
  892. if (blocks) {
  893. bh_result->b_size = (ssize_t)min(
  894. (loff_t)(iomap.iomap_bsize - iomap.iomap_delta),
  895. (loff_t)(blocks << inode->i_blkbits));
  896. }
  897. return 0;
  898. }
  899. int
  900. linvfs_get_block(
  901. struct inode *inode,
  902. sector_t iblock,
  903. struct buffer_head *bh_result,
  904. int create)
  905. {
  906. return __linvfs_get_block(inode, iblock, 0, bh_result,
  907. create, 0, BMAPI_WRITE);
  908. }
  909. STATIC int
  910. linvfs_get_blocks_direct(
  911. struct inode *inode,
  912. sector_t iblock,
  913. unsigned long max_blocks,
  914. struct buffer_head *bh_result,
  915. int create)
  916. {
  917. return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
  918. create, 1, BMAPI_WRITE|BMAPI_DIRECT);
  919. }
  920. STATIC ssize_t
  921. linvfs_direct_IO(
  922. int rw,
  923. struct kiocb *iocb,
  924. const struct iovec *iov,
  925. loff_t offset,
  926. unsigned long nr_segs)
  927. {
  928. struct file *file = iocb->ki_filp;
  929. struct inode *inode = file->f_mapping->host;
  930. vnode_t *vp = LINVFS_GET_VP(inode);
  931. xfs_iomap_t iomap;
  932. int maps = 1;
  933. int error;
  934. VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
  935. if (error)
  936. return -error;
  937. return blockdev_direct_IO_own_locking(rw, iocb, inode,
  938. iomap.iomap_target->pbr_bdev,
  939. iov, offset, nr_segs,
  940. linvfs_get_blocks_direct,
  941. linvfs_unwritten_convert_direct);
  942. }
  943. STATIC sector_t
  944. linvfs_bmap(
  945. struct address_space *mapping,
  946. sector_t block)
  947. {
  948. struct inode *inode = (struct inode *)mapping->host;
  949. vnode_t *vp = LINVFS_GET_VP(inode);
  950. int error;
  951. vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
  952. VOP_RWLOCK(vp, VRWLOCK_READ);
  953. VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
  954. VOP_RWUNLOCK(vp, VRWLOCK_READ);
  955. return generic_block_bmap(mapping, block, linvfs_get_block);
  956. }
  957. STATIC int
  958. linvfs_readpage(
  959. struct file *unused,
  960. struct page *page)
  961. {
  962. return mpage_readpage(page, linvfs_get_block);
  963. }
  964. STATIC int
  965. linvfs_readpages(
  966. struct file *unused,
  967. struct address_space *mapping,
  968. struct list_head *pages,
  969. unsigned nr_pages)
  970. {
  971. return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
  972. }
  973. STATIC void
  974. xfs_count_page_state(
  975. struct page *page,
  976. int *delalloc,
  977. int *unmapped,
  978. int *unwritten)
  979. {
  980. struct buffer_head *bh, *head;
  981. *delalloc = *unmapped = *unwritten = 0;
  982. bh = head = page_buffers(page);
  983. do {
  984. if (buffer_uptodate(bh) && !buffer_mapped(bh))
  985. (*unmapped) = 1;
  986. else if (buffer_unwritten(bh) && !buffer_delay(bh))
  987. clear_buffer_unwritten(bh);
  988. else if (buffer_unwritten(bh))
  989. (*unwritten) = 1;
  990. else if (buffer_delay(bh))
  991. (*delalloc) = 1;
  992. } while ((bh = bh->b_this_page) != head);
  993. }
  994. /*
  995. * writepage: Called from one of two places:
  996. *
  997. * 1. we are flushing a delalloc buffer head.
  998. *
  999. * 2. we are writing out a dirty page. Typically the page dirty
  1000. * state is cleared before we get here. In this case is it
  1001. * conceivable we have no buffer heads.
  1002. *
  1003. * For delalloc space on the page we need to allocate space and
  1004. * flush it. For unmapped buffer heads on the page we should
  1005. * allocate space if the page is uptodate. For any other dirty
  1006. * buffer heads on the page we should flush them.
  1007. *
  1008. * If we detect that a transaction would be required to flush
  1009. * the page, we have to check the process flags first, if we
  1010. * are already in a transaction or disk I/O during allocations
  1011. * is off, we need to fail the writepage and redirty the page.
  1012. */
  1013. STATIC int
  1014. linvfs_writepage(
  1015. struct page *page,
  1016. struct writeback_control *wbc)
  1017. {
  1018. int error;
  1019. int need_trans;
  1020. int delalloc, unmapped, unwritten;
  1021. struct inode *inode = page->mapping->host;
  1022. xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
  1023. /*
  1024. * We need a transaction if:
  1025. * 1. There are delalloc buffers on the page
  1026. * 2. The page is uptodate and we have unmapped buffers
  1027. * 3. The page is uptodate and we have no buffers
  1028. * 4. There are unwritten buffers on the page
  1029. */
  1030. if (!page_has_buffers(page)) {
  1031. unmapped = 1;
  1032. need_trans = 1;
  1033. } else {
  1034. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1035. if (!PageUptodate(page))
  1036. unmapped = 0;
  1037. need_trans = delalloc + unmapped + unwritten;
  1038. }
  1039. /*
  1040. * If we need a transaction and the process flags say
  1041. * we are already in a transaction, or no IO is allowed
  1042. * then mark the page dirty again and leave the page
  1043. * as is.
  1044. */
  1045. if (PFLAGS_TEST_FSTRANS() && need_trans)
  1046. goto out_fail;
  1047. /*
  1048. * Delay hooking up buffer heads until we have
  1049. * made our go/no-go decision.
  1050. */
  1051. if (!page_has_buffers(page))
  1052. create_empty_buffers(page, 1 << inode->i_blkbits, 0);
  1053. /*
  1054. * Convert delayed allocate, unwritten or unmapped space
  1055. * to real space and flush out to disk.
  1056. */
  1057. error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
  1058. if (error == -EAGAIN)
  1059. goto out_fail;
  1060. if (unlikely(error < 0))
  1061. goto out_unlock;
  1062. return 0;
  1063. out_fail:
  1064. redirty_page_for_writepage(wbc, page);
  1065. unlock_page(page);
  1066. return 0;
  1067. out_unlock:
  1068. unlock_page(page);
  1069. return error;
  1070. }
  1071. STATIC int
  1072. linvfs_invalidate_page(
  1073. struct page *page,
  1074. unsigned long offset)
  1075. {
  1076. xfs_page_trace(XFS_INVALIDPAGE_ENTER,
  1077. page->mapping->host, page, offset);
  1078. return block_invalidatepage(page, offset);
  1079. }
  1080. /*
  1081. * Called to move a page into cleanable state - and from there
  1082. * to be released. Possibly the page is already clean. We always
  1083. * have buffer heads in this call.
  1084. *
  1085. * Returns 0 if the page is ok to release, 1 otherwise.
  1086. *
  1087. * Possible scenarios are:
  1088. *
  1089. * 1. We are being called to release a page which has been written
  1090. * to via regular I/O. buffer heads will be dirty and possibly
  1091. * delalloc. If no delalloc buffer heads in this case then we
  1092. * can just return zero.
  1093. *
  1094. * 2. We are called to release a page which has been written via
  1095. * mmap, all we need to do is ensure there is no delalloc
  1096. * state in the buffer heads, if not we can let the caller
  1097. * free them and we should come back later via writepage.
  1098. */
  1099. STATIC int
  1100. linvfs_release_page(
  1101. struct page *page,
  1102. int gfp_mask)
  1103. {
  1104. struct inode *inode = page->mapping->host;
  1105. int dirty, delalloc, unmapped, unwritten;
  1106. struct writeback_control wbc = {
  1107. .sync_mode = WB_SYNC_ALL,
  1108. .nr_to_write = 1,
  1109. };
  1110. xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
  1111. xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
  1112. if (!delalloc && !unwritten)
  1113. goto free_buffers;
  1114. if (!(gfp_mask & __GFP_FS))
  1115. return 0;
  1116. /* If we are already inside a transaction or the thread cannot
  1117. * do I/O, we cannot release this page.
  1118. */
  1119. if (PFLAGS_TEST_FSTRANS())
  1120. return 0;
  1121. /*
  1122. * Convert delalloc space to real space, do not flush the
  1123. * data out to disk, that will be done by the caller.
  1124. * Never need to allocate space here - we will always
  1125. * come back to writepage in that case.
  1126. */
  1127. dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
  1128. if (dirty == 0 && !unwritten)
  1129. goto free_buffers;
  1130. return 0;
  1131. free_buffers:
  1132. return try_to_free_buffers(page);
  1133. }
  1134. STATIC int
  1135. linvfs_prepare_write(
  1136. struct file *file,
  1137. struct page *page,
  1138. unsigned int from,
  1139. unsigned int to)
  1140. {
  1141. return block_prepare_write(page, from, to, linvfs_get_block);
  1142. }
  1143. struct address_space_operations linvfs_aops = {
  1144. .readpage = linvfs_readpage,
  1145. .readpages = linvfs_readpages,
  1146. .writepage = linvfs_writepage,
  1147. .sync_page = block_sync_page,
  1148. .releasepage = linvfs_release_page,
  1149. .invalidatepage = linvfs_invalidate_page,
  1150. .prepare_write = linvfs_prepare_write,
  1151. .commit_write = generic_commit_write,
  1152. .bmap = linvfs_bmap,
  1153. .direct_IO = linvfs_direct_IO,
  1154. };