ops_address.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/mpage.h>
  17. #include <linux/fs.h>
  18. #include <linux/writeback.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/lm_interface.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "bmap.h"
  24. #include "glock.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "meta_io.h"
  28. #include "ops_address.h"
  29. #include "quota.h"
  30. #include "trans.h"
  31. #include "rgrp.h"
  32. #include "ops_file.h"
  33. #include "util.h"
  34. #include "glops.h"
  35. static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  36. unsigned int from, unsigned int to)
  37. {
  38. struct buffer_head *head = page_buffers(page);
  39. unsigned int bsize = head->b_size;
  40. struct buffer_head *bh;
  41. unsigned int start, end;
  42. for (bh = head, start = 0; bh != head || !start;
  43. bh = bh->b_this_page, start = end) {
  44. end = start + bsize;
  45. if (end <= from || start >= to)
  46. continue;
  47. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  48. }
  49. }
  50. /**
  51. * gfs2_get_block - Fills in a buffer head with details about a block
  52. * @inode: The inode
  53. * @lblock: The block number to look up
  54. * @bh_result: The buffer head to return the result in
  55. * @create: Non-zero if we may add block to the file
  56. *
  57. * Returns: errno
  58. */
  59. int gfs2_get_block(struct inode *inode, sector_t lblock,
  60. struct buffer_head *bh_result, int create)
  61. {
  62. return gfs2_block_map(inode, lblock, create, bh_result);
  63. }
  64. /**
  65. * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  66. * @inode: The inode
  67. * @lblock: The block number to look up
  68. * @bh_result: The buffer head to return the result in
  69. * @create: Non-zero if we may add block to the file
  70. *
  71. * Returns: errno
  72. */
  73. static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  74. struct buffer_head *bh_result, int create)
  75. {
  76. int error;
  77. error = gfs2_block_map(inode, lblock, 0, bh_result);
  78. if (error)
  79. return error;
  80. if (bh_result->b_blocknr == 0)
  81. return -EIO;
  82. return 0;
  83. }
  84. static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
  85. struct buffer_head *bh_result, int create)
  86. {
  87. return gfs2_block_map(inode, lblock, 0, bh_result);
  88. }
  89. /**
  90. * gfs2_writepage - Write complete page
  91. * @page: Page to write
  92. *
  93. * Returns: errno
  94. *
  95. * Some of this is copied from block_write_full_page() although we still
  96. * call it to do most of the work.
  97. */
  98. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  99. {
  100. struct inode *inode = page->mapping->host;
  101. struct gfs2_inode *ip = GFS2_I(inode);
  102. struct gfs2_sbd *sdp = GFS2_SB(inode);
  103. loff_t i_size = i_size_read(inode);
  104. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  105. unsigned offset;
  106. int error;
  107. int done_trans = 0;
  108. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
  109. unlock_page(page);
  110. return -EIO;
  111. }
  112. if (current->journal_info)
  113. goto out_ignore;
  114. /* Is the page fully outside i_size? (truncate in progress) */
  115. offset = i_size & (PAGE_CACHE_SIZE-1);
  116. if (page->index > end_index || (page->index == end_index && !offset)) {
  117. page->mapping->a_ops->invalidatepage(page, 0);
  118. unlock_page(page);
  119. return 0; /* don't care */
  120. }
  121. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
  122. error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
  123. if (error)
  124. goto out_ignore;
  125. if (!page_has_buffers(page)) {
  126. create_empty_buffers(page, inode->i_sb->s_blocksize,
  127. (1 << BH_Dirty)|(1 << BH_Uptodate));
  128. }
  129. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
  130. done_trans = 1;
  131. }
  132. error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
  133. if (done_trans)
  134. gfs2_trans_end(sdp);
  135. gfs2_meta_cache_flush(ip);
  136. return error;
  137. out_ignore:
  138. redirty_page_for_writepage(wbc, page);
  139. unlock_page(page);
  140. return 0;
  141. }
  142. /**
  143. * gfs2_writepages - Write a bunch of dirty pages back to disk
  144. * @mapping: The mapping to write
  145. * @wbc: Write-back control
  146. *
  147. * For journaled files and/or ordered writes this just falls back to the
  148. * kernel's default writepages path for now. We will probably want to change
  149. * that eventually (i.e. when we look at allocate on flush).
  150. *
  151. * For the data=writeback case though we can already ignore buffer heads
  152. * and write whole extents at once. This is a big reduction in the
  153. * number of I/O requests we send and the bmap calls we make in this case.
  154. */
  155. static int gfs2_writepages(struct address_space *mapping,
  156. struct writeback_control *wbc)
  157. {
  158. struct inode *inode = mapping->host;
  159. struct gfs2_inode *ip = GFS2_I(inode);
  160. struct gfs2_sbd *sdp = GFS2_SB(inode);
  161. if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
  162. return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
  163. return generic_writepages(mapping, wbc);
  164. }
  165. /**
  166. * stuffed_readpage - Fill in a Linux page with stuffed file data
  167. * @ip: the inode
  168. * @page: the page
  169. *
  170. * Returns: errno
  171. */
  172. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  173. {
  174. struct buffer_head *dibh;
  175. void *kaddr;
  176. int error;
  177. BUG_ON(page->index);
  178. error = gfs2_meta_inode_buffer(ip, &dibh);
  179. if (error)
  180. return error;
  181. kaddr = kmap_atomic(page, KM_USER0);
  182. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
  183. ip->i_di.di_size);
  184. memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
  185. kunmap_atomic(kaddr, KM_USER0);
  186. brelse(dibh);
  187. SetPageUptodate(page);
  188. return 0;
  189. }
  190. /**
  191. * gfs2_readpage - readpage with locking
  192. * @file: The file to read a page for. N.B. This may be NULL if we are
  193. * reading an internal file.
  194. * @page: The page to read
  195. *
  196. * Returns: errno
  197. */
  198. static int gfs2_readpage(struct file *file, struct page *page)
  199. {
  200. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  201. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  202. struct gfs2_file *gf = NULL;
  203. struct gfs2_holder gh;
  204. int error;
  205. int do_unlock = 0;
  206. if (likely(file != &gfs2_internal_file_sentinel)) {
  207. if (file) {
  208. gf = file->private_data;
  209. if (test_bit(GFF_EXLOCK, &gf->f_flags))
  210. /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
  211. goto skip_lock;
  212. }
  213. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
  214. do_unlock = 1;
  215. error = gfs2_glock_nq_atime(&gh);
  216. if (unlikely(error))
  217. goto out_unlock;
  218. }
  219. skip_lock:
  220. if (gfs2_is_stuffed(ip)) {
  221. error = stuffed_readpage(ip, page);
  222. unlock_page(page);
  223. } else
  224. error = mpage_readpage(page, gfs2_get_block);
  225. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  226. error = -EIO;
  227. if (do_unlock) {
  228. gfs2_glock_dq_m(1, &gh);
  229. gfs2_holder_uninit(&gh);
  230. }
  231. out:
  232. return error;
  233. out_unlock:
  234. if (error == GLR_TRYFAILED)
  235. error = AOP_TRUNCATED_PAGE;
  236. unlock_page(page);
  237. if (do_unlock)
  238. gfs2_holder_uninit(&gh);
  239. goto out;
  240. }
  241. /**
  242. * gfs2_readpages - Read a bunch of pages at once
  243. *
  244. * Some notes:
  245. * 1. This is only for readahead, so we can simply ignore any things
  246. * which are slightly inconvenient (such as locking conflicts between
  247. * the page lock and the glock) and return having done no I/O. Its
  248. * obviously not something we'd want to do on too regular a basis.
  249. * Any I/O we ignore at this time will be done via readpage later.
  250. * 2. We don't handle stuffed files here we let readpage do the honours.
  251. * 3. mpage_readpages() does most of the heavy lifting in the common case.
  252. * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
  253. * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
  254. * well as read-ahead.
  255. */
  256. static int gfs2_readpages(struct file *file, struct address_space *mapping,
  257. struct list_head *pages, unsigned nr_pages)
  258. {
  259. struct inode *inode = mapping->host;
  260. struct gfs2_inode *ip = GFS2_I(inode);
  261. struct gfs2_sbd *sdp = GFS2_SB(inode);
  262. struct gfs2_holder gh;
  263. int ret = 0;
  264. int do_unlock = 0;
  265. if (likely(file != &gfs2_internal_file_sentinel)) {
  266. if (file) {
  267. struct gfs2_file *gf = file->private_data;
  268. if (test_bit(GFF_EXLOCK, &gf->f_flags))
  269. goto skip_lock;
  270. }
  271. gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
  272. LM_FLAG_TRY_1CB|GL_ATIME, &gh);
  273. do_unlock = 1;
  274. ret = gfs2_glock_nq_atime(&gh);
  275. if (ret == GLR_TRYFAILED)
  276. goto out_noerror;
  277. if (unlikely(ret))
  278. goto out_unlock;
  279. }
  280. skip_lock:
  281. if (!gfs2_is_stuffed(ip))
  282. ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
  283. if (do_unlock) {
  284. gfs2_glock_dq_m(1, &gh);
  285. gfs2_holder_uninit(&gh);
  286. }
  287. out:
  288. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  289. ret = -EIO;
  290. return ret;
  291. out_noerror:
  292. ret = 0;
  293. out_unlock:
  294. if (do_unlock)
  295. gfs2_holder_uninit(&gh);
  296. goto out;
  297. }
  298. /**
  299. * gfs2_prepare_write - Prepare to write a page to a file
  300. * @file: The file to write to
  301. * @page: The page which is to be prepared for writing
  302. * @from: From (byte range within page)
  303. * @to: To (byte range within page)
  304. *
  305. * Returns: errno
  306. */
  307. static int gfs2_prepare_write(struct file *file, struct page *page,
  308. unsigned from, unsigned to)
  309. {
  310. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  311. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  312. unsigned int data_blocks, ind_blocks, rblocks;
  313. int alloc_required;
  314. int error = 0;
  315. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
  316. loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  317. struct gfs2_alloc *al;
  318. unsigned int write_len = to - from;
  319. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
  320. error = gfs2_glock_nq_atime(&ip->i_gh);
  321. if (unlikely(error)) {
  322. if (error == GLR_TRYFAILED) {
  323. unlock_page(page);
  324. error = AOP_TRUNCATED_PAGE;
  325. }
  326. goto out_uninit;
  327. }
  328. gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
  329. error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
  330. if (error)
  331. goto out_unlock;
  332. ip->i_alloc.al_requested = 0;
  333. if (alloc_required) {
  334. al = gfs2_alloc_get(ip);
  335. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  336. if (error)
  337. goto out_alloc_put;
  338. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  339. if (error)
  340. goto out_qunlock;
  341. al->al_requested = data_blocks + ind_blocks;
  342. error = gfs2_inplace_reserve(ip);
  343. if (error)
  344. goto out_qunlock;
  345. }
  346. rblocks = RES_DINODE + ind_blocks;
  347. if (gfs2_is_jdata(ip))
  348. rblocks += data_blocks ? data_blocks : 1;
  349. if (ind_blocks || data_blocks)
  350. rblocks += RES_STATFS + RES_QUOTA;
  351. error = gfs2_trans_begin(sdp, rblocks, 0);
  352. if (error)
  353. goto out;
  354. if (gfs2_is_stuffed(ip)) {
  355. if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
  356. error = gfs2_unstuff_dinode(ip, page);
  357. if (error == 0)
  358. goto prepare_write;
  359. } else if (!PageUptodate(page))
  360. error = stuffed_readpage(ip, page);
  361. goto out;
  362. }
  363. prepare_write:
  364. error = block_prepare_write(page, from, to, gfs2_get_block);
  365. out:
  366. if (error) {
  367. gfs2_trans_end(sdp);
  368. if (alloc_required) {
  369. gfs2_inplace_release(ip);
  370. out_qunlock:
  371. gfs2_quota_unlock(ip);
  372. out_alloc_put:
  373. gfs2_alloc_put(ip);
  374. }
  375. out_unlock:
  376. gfs2_glock_dq_m(1, &ip->i_gh);
  377. out_uninit:
  378. gfs2_holder_uninit(&ip->i_gh);
  379. }
  380. return error;
  381. }
  382. /**
  383. * gfs2_commit_write - Commit write to a file
  384. * @file: The file to write to
  385. * @page: The page containing the data
  386. * @from: From (byte range within page)
  387. * @to: To (byte range within page)
  388. *
  389. * Returns: errno
  390. */
  391. static int gfs2_commit_write(struct file *file, struct page *page,
  392. unsigned from, unsigned to)
  393. {
  394. struct inode *inode = page->mapping->host;
  395. struct gfs2_inode *ip = GFS2_I(inode);
  396. struct gfs2_sbd *sdp = GFS2_SB(inode);
  397. int error = -EOPNOTSUPP;
  398. struct buffer_head *dibh;
  399. struct gfs2_alloc *al = &ip->i_alloc;
  400. struct gfs2_dinode *di;
  401. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
  402. goto fail_nounlock;
  403. error = gfs2_meta_inode_buffer(ip, &dibh);
  404. if (error)
  405. goto fail_endtrans;
  406. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  407. di = (struct gfs2_dinode *)dibh->b_data;
  408. if (gfs2_is_stuffed(ip)) {
  409. u64 file_size;
  410. void *kaddr;
  411. file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
  412. kaddr = kmap_atomic(page, KM_USER0);
  413. memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
  414. kaddr + from, to - from);
  415. kunmap_atomic(kaddr, KM_USER0);
  416. SetPageUptodate(page);
  417. if (inode->i_size < file_size) {
  418. i_size_write(inode, file_size);
  419. mark_inode_dirty(inode);
  420. }
  421. } else {
  422. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
  423. gfs2_is_jdata(ip))
  424. gfs2_page_add_databufs(ip, page, from, to);
  425. error = generic_commit_write(file, page, from, to);
  426. if (error)
  427. goto fail;
  428. }
  429. if (ip->i_di.di_size < inode->i_size) {
  430. ip->i_di.di_size = inode->i_size;
  431. di->di_size = cpu_to_be64(inode->i_size);
  432. }
  433. brelse(dibh);
  434. gfs2_trans_end(sdp);
  435. if (al->al_requested) {
  436. gfs2_inplace_release(ip);
  437. gfs2_quota_unlock(ip);
  438. gfs2_alloc_put(ip);
  439. }
  440. gfs2_glock_dq_m(1, &ip->i_gh);
  441. gfs2_holder_uninit(&ip->i_gh);
  442. return 0;
  443. fail:
  444. brelse(dibh);
  445. fail_endtrans:
  446. gfs2_trans_end(sdp);
  447. if (al->al_requested) {
  448. gfs2_inplace_release(ip);
  449. gfs2_quota_unlock(ip);
  450. gfs2_alloc_put(ip);
  451. }
  452. gfs2_glock_dq_m(1, &ip->i_gh);
  453. gfs2_holder_uninit(&ip->i_gh);
  454. fail_nounlock:
  455. ClearPageUptodate(page);
  456. return error;
  457. }
  458. /**
  459. * gfs2_bmap - Block map function
  460. * @mapping: Address space info
  461. * @lblock: The block to map
  462. *
  463. * Returns: The disk address for the block or 0 on hole or error
  464. */
  465. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  466. {
  467. struct gfs2_inode *ip = GFS2_I(mapping->host);
  468. struct gfs2_holder i_gh;
  469. sector_t dblock = 0;
  470. int error;
  471. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  472. if (error)
  473. return 0;
  474. if (!gfs2_is_stuffed(ip))
  475. dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
  476. gfs2_glock_dq_uninit(&i_gh);
  477. return dblock;
  478. }
  479. static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
  480. {
  481. struct gfs2_bufdata *bd;
  482. gfs2_log_lock(sdp);
  483. bd = bh->b_private;
  484. if (bd) {
  485. bd->bd_bh = NULL;
  486. bh->b_private = NULL;
  487. }
  488. gfs2_log_unlock(sdp);
  489. lock_buffer(bh);
  490. clear_buffer_dirty(bh);
  491. bh->b_bdev = NULL;
  492. clear_buffer_mapped(bh);
  493. clear_buffer_req(bh);
  494. clear_buffer_new(bh);
  495. clear_buffer_delay(bh);
  496. unlock_buffer(bh);
  497. }
  498. static void gfs2_invalidatepage(struct page *page, unsigned long offset)
  499. {
  500. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  501. struct buffer_head *head, *bh, *next;
  502. unsigned int curr_off = 0;
  503. BUG_ON(!PageLocked(page));
  504. if (!page_has_buffers(page))
  505. return;
  506. bh = head = page_buffers(page);
  507. do {
  508. unsigned int next_off = curr_off + bh->b_size;
  509. next = bh->b_this_page;
  510. if (offset <= curr_off)
  511. discard_buffer(sdp, bh);
  512. curr_off = next_off;
  513. bh = next;
  514. } while (bh != head);
  515. if (!offset)
  516. try_to_release_page(page, 0);
  517. return;
  518. }
  519. /**
  520. * gfs2_ok_for_dio - check that dio is valid on this file
  521. * @ip: The inode
  522. * @rw: READ or WRITE
  523. * @offset: The offset at which we are reading or writing
  524. *
  525. * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
  526. * 1 (to accept the i/o request)
  527. */
  528. static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
  529. {
  530. /*
  531. * Should we return an error here? I can't see that O_DIRECT for
  532. * a journaled file makes any sense. For now we'll silently fall
  533. * back to buffered I/O, likewise we do the same for stuffed
  534. * files since they are (a) small and (b) unaligned.
  535. */
  536. if (gfs2_is_jdata(ip))
  537. return 0;
  538. if (gfs2_is_stuffed(ip))
  539. return 0;
  540. if (offset > i_size_read(&ip->i_inode))
  541. return 0;
  542. return 1;
  543. }
  544. static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
  545. const struct iovec *iov, loff_t offset,
  546. unsigned long nr_segs)
  547. {
  548. struct file *file = iocb->ki_filp;
  549. struct inode *inode = file->f_mapping->host;
  550. struct gfs2_inode *ip = GFS2_I(inode);
  551. struct gfs2_holder gh;
  552. int rv;
  553. /*
  554. * Deferred lock, even if its a write, since we do no allocation
  555. * on this path. All we need change is atime, and this lock mode
  556. * ensures that other nodes have flushed their buffered read caches
  557. * (i.e. their page cache entries for this inode). We do not,
  558. * unfortunately have the option of only flushing a range like
  559. * the VFS does.
  560. */
  561. gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
  562. rv = gfs2_glock_nq_atime(&gh);
  563. if (rv)
  564. return rv;
  565. rv = gfs2_ok_for_dio(ip, rw, offset);
  566. if (rv != 1)
  567. goto out; /* dio not valid, fall back to buffered i/o */
  568. rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
  569. iov, offset, nr_segs,
  570. gfs2_get_block_direct, NULL);
  571. out:
  572. gfs2_glock_dq_m(1, &gh);
  573. gfs2_holder_uninit(&gh);
  574. return rv;
  575. }
  576. /**
  577. * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
  578. * @bh: the buffer we're stuck on
  579. *
  580. */
  581. static void stuck_releasepage(struct buffer_head *bh)
  582. {
  583. struct inode *inode = bh->b_page->mapping->host;
  584. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  585. struct gfs2_bufdata *bd = bh->b_private;
  586. struct gfs2_glock *gl;
  587. static unsigned limit = 0;
  588. if (limit > 3)
  589. return;
  590. limit++;
  591. fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
  592. fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
  593. (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
  594. fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
  595. fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
  596. if (!bd)
  597. return;
  598. gl = bd->bd_gl;
  599. fs_warn(sdp, "gl = (%u, %llu)\n",
  600. gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
  601. fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
  602. (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
  603. (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
  604. if (gl->gl_ops == &gfs2_inode_glops) {
  605. struct gfs2_inode *ip = gl->gl_object;
  606. unsigned int x;
  607. if (!ip)
  608. return;
  609. fs_warn(sdp, "ip = %llu %llu\n",
  610. (unsigned long long)ip->i_num.no_formal_ino,
  611. (unsigned long long)ip->i_num.no_addr);
  612. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
  613. fs_warn(sdp, "ip->i_cache[%u] = %s\n",
  614. x, (ip->i_cache[x]) ? "!NULL" : "NULL");
  615. }
  616. }
  617. /**
  618. * gfs2_releasepage - free the metadata associated with a page
  619. * @page: the page that's being released
  620. * @gfp_mask: passed from Linux VFS, ignored by us
  621. *
  622. * Call try_to_free_buffers() if the buffers in this page can be
  623. * released.
  624. *
  625. * Returns: 0
  626. */
  627. int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
  628. {
  629. struct inode *aspace = page->mapping->host;
  630. struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
  631. struct buffer_head *bh, *head;
  632. struct gfs2_bufdata *bd;
  633. unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
  634. if (!page_has_buffers(page))
  635. goto out;
  636. head = bh = page_buffers(page);
  637. do {
  638. while (atomic_read(&bh->b_count)) {
  639. if (!atomic_read(&aspace->i_writecount))
  640. return 0;
  641. if (!(gfp_mask & __GFP_WAIT))
  642. return 0;
  643. if (time_after_eq(jiffies, t)) {
  644. stuck_releasepage(bh);
  645. /* should we withdraw here? */
  646. return 0;
  647. }
  648. yield();
  649. }
  650. gfs2_assert_warn(sdp, !buffer_pinned(bh));
  651. gfs2_assert_warn(sdp, !buffer_dirty(bh));
  652. gfs2_log_lock(sdp);
  653. bd = bh->b_private;
  654. if (bd) {
  655. gfs2_assert_warn(sdp, bd->bd_bh == bh);
  656. gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
  657. gfs2_assert_warn(sdp, !bd->bd_ail);
  658. bd->bd_bh = NULL;
  659. if (!list_empty(&bd->bd_le.le_list))
  660. bd = NULL;
  661. bh->b_private = NULL;
  662. }
  663. gfs2_log_unlock(sdp);
  664. if (bd)
  665. kmem_cache_free(gfs2_bufdata_cachep, bd);
  666. bh = bh->b_this_page;
  667. } while (bh != head);
  668. out:
  669. return try_to_free_buffers(page);
  670. }
  671. const struct address_space_operations gfs2_file_aops = {
  672. .writepage = gfs2_writepage,
  673. .writepages = gfs2_writepages,
  674. .readpage = gfs2_readpage,
  675. .readpages = gfs2_readpages,
  676. .sync_page = block_sync_page,
  677. .prepare_write = gfs2_prepare_write,
  678. .commit_write = gfs2_commit_write,
  679. .bmap = gfs2_bmap,
  680. .invalidatepage = gfs2_invalidatepage,
  681. .releasepage = gfs2_releasepage,
  682. .direct_IO = gfs2_direct_IO,
  683. };