ops_address.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/mpage.h>
  17. #include <linux/fs.h>
  18. #include <linux/writeback.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/lm_interface.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "bmap.h"
  24. #include "glock.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "meta_io.h"
  28. #include "ops_address.h"
  29. #include "quota.h"
  30. #include "trans.h"
  31. #include "rgrp.h"
  32. #include "ops_file.h"
  33. #include "super.h"
  34. #include "util.h"
  35. #include "glops.h"
  36. static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  37. unsigned int from, unsigned int to)
  38. {
  39. struct buffer_head *head = page_buffers(page);
  40. unsigned int bsize = head->b_size;
  41. struct buffer_head *bh;
  42. unsigned int start, end;
  43. for (bh = head, start = 0; bh != head || !start;
  44. bh = bh->b_this_page, start = end) {
  45. end = start + bsize;
  46. if (end <= from || start >= to)
  47. continue;
  48. if (gfs2_is_jdata(ip))
  49. set_buffer_uptodate(bh);
  50. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  51. }
  52. }
  53. /**
  54. * gfs2_get_block - Fills in a buffer head with details about a block
  55. * @inode: The inode
  56. * @lblock: The block number to look up
  57. * @bh_result: The buffer head to return the result in
  58. * @create: Non-zero if we may add block to the file
  59. *
  60. * Returns: errno
  61. */
  62. int gfs2_get_block(struct inode *inode, sector_t lblock,
  63. struct buffer_head *bh_result, int create)
  64. {
  65. return gfs2_block_map(inode, lblock, create, bh_result);
  66. }
  67. /**
  68. * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  69. * @inode: The inode
  70. * @lblock: The block number to look up
  71. * @bh_result: The buffer head to return the result in
  72. * @create: Non-zero if we may add block to the file
  73. *
  74. * Returns: errno
  75. */
  76. static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  77. struct buffer_head *bh_result, int create)
  78. {
  79. int error;
  80. error = gfs2_block_map(inode, lblock, 0, bh_result);
  81. if (error)
  82. return error;
  83. if (bh_result->b_blocknr == 0)
  84. return -EIO;
  85. return 0;
  86. }
  87. static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
  88. struct buffer_head *bh_result, int create)
  89. {
  90. return gfs2_block_map(inode, lblock, 0, bh_result);
  91. }
  92. /**
  93. * gfs2_writepage - Write complete page
  94. * @page: Page to write
  95. *
  96. * Returns: errno
  97. *
  98. * Some of this is copied from block_write_full_page() although we still
  99. * call it to do most of the work.
  100. */
  101. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  102. {
  103. struct inode *inode = page->mapping->host;
  104. struct gfs2_inode *ip = GFS2_I(inode);
  105. struct gfs2_sbd *sdp = GFS2_SB(inode);
  106. loff_t i_size = i_size_read(inode);
  107. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  108. unsigned offset;
  109. int error;
  110. int done_trans = 0;
  111. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
  112. unlock_page(page);
  113. return -EIO;
  114. }
  115. if (current->journal_info)
  116. goto out_ignore;
  117. /* Is the page fully outside i_size? (truncate in progress) */
  118. offset = i_size & (PAGE_CACHE_SIZE-1);
  119. if (page->index > end_index || (page->index == end_index && !offset)) {
  120. page->mapping->a_ops->invalidatepage(page, 0);
  121. unlock_page(page);
  122. return 0; /* don't care */
  123. }
  124. if ((sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) &&
  125. PageChecked(page)) {
  126. ClearPageChecked(page);
  127. error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
  128. if (error)
  129. goto out_ignore;
  130. if (!page_has_buffers(page)) {
  131. create_empty_buffers(page, inode->i_sb->s_blocksize,
  132. (1 << BH_Dirty)|(1 << BH_Uptodate));
  133. }
  134. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
  135. done_trans = 1;
  136. }
  137. error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
  138. if (done_trans)
  139. gfs2_trans_end(sdp);
  140. gfs2_meta_cache_flush(ip);
  141. return error;
  142. out_ignore:
  143. redirty_page_for_writepage(wbc, page);
  144. unlock_page(page);
  145. return 0;
  146. }
  147. /**
  148. * gfs2_writepages - Write a bunch of dirty pages back to disk
  149. * @mapping: The mapping to write
  150. * @wbc: Write-back control
  151. *
  152. * For journaled files and/or ordered writes this just falls back to the
  153. * kernel's default writepages path for now. We will probably want to change
  154. * that eventually (i.e. when we look at allocate on flush).
  155. *
  156. * For the data=writeback case though we can already ignore buffer heads
  157. * and write whole extents at once. This is a big reduction in the
  158. * number of I/O requests we send and the bmap calls we make in this case.
  159. */
  160. static int gfs2_writepages(struct address_space *mapping,
  161. struct writeback_control *wbc)
  162. {
  163. struct inode *inode = mapping->host;
  164. struct gfs2_inode *ip = GFS2_I(inode);
  165. struct gfs2_sbd *sdp = GFS2_SB(inode);
  166. if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
  167. return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
  168. return generic_writepages(mapping, wbc);
  169. }
  170. /**
  171. * stuffed_readpage - Fill in a Linux page with stuffed file data
  172. * @ip: the inode
  173. * @page: the page
  174. *
  175. * Returns: errno
  176. */
  177. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  178. {
  179. struct buffer_head *dibh;
  180. void *kaddr;
  181. int error;
  182. /*
  183. * Due to the order of unstuffing files and ->nopage(), we can be
  184. * asked for a zero page in the case of a stuffed file being extended,
  185. * so we need to supply one here. It doesn't happen often.
  186. */
  187. if (unlikely(page->index)) {
  188. zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  189. return 0;
  190. }
  191. error = gfs2_meta_inode_buffer(ip, &dibh);
  192. if (error)
  193. return error;
  194. kaddr = kmap_atomic(page, KM_USER0);
  195. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
  196. ip->i_di.di_size);
  197. memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
  198. kunmap_atomic(kaddr, KM_USER0);
  199. flush_dcache_page(page);
  200. brelse(dibh);
  201. SetPageUptodate(page);
  202. return 0;
  203. }
  204. /**
  205. * gfs2_readpage - readpage with locking
  206. * @file: The file to read a page for. N.B. This may be NULL if we are
  207. * reading an internal file.
  208. * @page: The page to read
  209. *
  210. * Returns: errno
  211. */
  212. static int gfs2_readpage(struct file *file, struct page *page)
  213. {
  214. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  215. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  216. struct gfs2_file *gf = NULL;
  217. struct gfs2_holder gh;
  218. int error;
  219. int do_unlock = 0;
  220. if (likely(file != &gfs2_internal_file_sentinel)) {
  221. if (file) {
  222. gf = file->private_data;
  223. if (test_bit(GFF_EXLOCK, &gf->f_flags))
  224. /* gfs2_sharewrite_fault has grabbed the ip->i_gl already */
  225. goto skip_lock;
  226. }
  227. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
  228. do_unlock = 1;
  229. error = gfs2_glock_nq_atime(&gh);
  230. if (unlikely(error))
  231. goto out_unlock;
  232. }
  233. skip_lock:
  234. if (gfs2_is_stuffed(ip)) {
  235. error = stuffed_readpage(ip, page);
  236. unlock_page(page);
  237. } else
  238. error = mpage_readpage(page, gfs2_get_block);
  239. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  240. error = -EIO;
  241. if (do_unlock) {
  242. gfs2_glock_dq_m(1, &gh);
  243. gfs2_holder_uninit(&gh);
  244. }
  245. out:
  246. return error;
  247. out_unlock:
  248. unlock_page(page);
  249. if (error == GLR_TRYFAILED) {
  250. error = AOP_TRUNCATED_PAGE;
  251. yield();
  252. }
  253. if (do_unlock)
  254. gfs2_holder_uninit(&gh);
  255. goto out;
  256. }
  257. /**
  258. * gfs2_readpages - Read a bunch of pages at once
  259. *
  260. * Some notes:
  261. * 1. This is only for readahead, so we can simply ignore any things
  262. * which are slightly inconvenient (such as locking conflicts between
  263. * the page lock and the glock) and return having done no I/O. Its
  264. * obviously not something we'd want to do on too regular a basis.
  265. * Any I/O we ignore at this time will be done via readpage later.
  266. * 2. We don't handle stuffed files here we let readpage do the honours.
  267. * 3. mpage_readpages() does most of the heavy lifting in the common case.
  268. * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
  269. * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
  270. * well as read-ahead.
  271. */
  272. static int gfs2_readpages(struct file *file, struct address_space *mapping,
  273. struct list_head *pages, unsigned nr_pages)
  274. {
  275. struct inode *inode = mapping->host;
  276. struct gfs2_inode *ip = GFS2_I(inode);
  277. struct gfs2_sbd *sdp = GFS2_SB(inode);
  278. struct gfs2_holder gh;
  279. int ret = 0;
  280. int do_unlock = 0;
  281. if (likely(file != &gfs2_internal_file_sentinel)) {
  282. if (file) {
  283. struct gfs2_file *gf = file->private_data;
  284. if (test_bit(GFF_EXLOCK, &gf->f_flags))
  285. goto skip_lock;
  286. }
  287. gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
  288. LM_FLAG_TRY_1CB|GL_ATIME, &gh);
  289. do_unlock = 1;
  290. ret = gfs2_glock_nq_atime(&gh);
  291. if (ret == GLR_TRYFAILED)
  292. goto out_noerror;
  293. if (unlikely(ret))
  294. goto out_unlock;
  295. }
  296. skip_lock:
  297. if (!gfs2_is_stuffed(ip))
  298. ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
  299. if (do_unlock) {
  300. gfs2_glock_dq_m(1, &gh);
  301. gfs2_holder_uninit(&gh);
  302. }
  303. out:
  304. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  305. ret = -EIO;
  306. return ret;
  307. out_noerror:
  308. ret = 0;
  309. out_unlock:
  310. if (do_unlock)
  311. gfs2_holder_uninit(&gh);
  312. goto out;
  313. }
  314. /**
  315. * gfs2_prepare_write - Prepare to write a page to a file
  316. * @file: The file to write to
  317. * @page: The page which is to be prepared for writing
  318. * @from: From (byte range within page)
  319. * @to: To (byte range within page)
  320. *
  321. * Returns: errno
  322. */
  323. static int gfs2_prepare_write(struct file *file, struct page *page,
  324. unsigned from, unsigned to)
  325. {
  326. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  327. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  328. unsigned int data_blocks, ind_blocks, rblocks;
  329. int alloc_required;
  330. int error = 0;
  331. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
  332. loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  333. struct gfs2_alloc *al;
  334. unsigned int write_len = to - from;
  335. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
  336. error = gfs2_glock_nq_atime(&ip->i_gh);
  337. if (unlikely(error)) {
  338. if (error == GLR_TRYFAILED) {
  339. unlock_page(page);
  340. error = AOP_TRUNCATED_PAGE;
  341. yield();
  342. }
  343. goto out_uninit;
  344. }
  345. gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
  346. error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
  347. if (error)
  348. goto out_unlock;
  349. ip->i_alloc.al_requested = 0;
  350. if (alloc_required) {
  351. al = gfs2_alloc_get(ip);
  352. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  353. if (error)
  354. goto out_alloc_put;
  355. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  356. if (error)
  357. goto out_qunlock;
  358. al->al_requested = data_blocks + ind_blocks;
  359. error = gfs2_inplace_reserve(ip);
  360. if (error)
  361. goto out_qunlock;
  362. }
  363. rblocks = RES_DINODE + ind_blocks;
  364. if (gfs2_is_jdata(ip))
  365. rblocks += data_blocks ? data_blocks : 1;
  366. if (ind_blocks || data_blocks)
  367. rblocks += RES_STATFS + RES_QUOTA;
  368. error = gfs2_trans_begin(sdp, rblocks, 0);
  369. if (error)
  370. goto out_trans_fail;
  371. if (gfs2_is_stuffed(ip)) {
  372. if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
  373. error = gfs2_unstuff_dinode(ip, page);
  374. if (error == 0)
  375. goto prepare_write;
  376. } else if (!PageUptodate(page))
  377. error = stuffed_readpage(ip, page);
  378. goto out;
  379. }
  380. prepare_write:
  381. error = block_prepare_write(page, from, to, gfs2_get_block);
  382. out:
  383. if (error) {
  384. gfs2_trans_end(sdp);
  385. out_trans_fail:
  386. if (alloc_required) {
  387. gfs2_inplace_release(ip);
  388. out_qunlock:
  389. gfs2_quota_unlock(ip);
  390. out_alloc_put:
  391. gfs2_alloc_put(ip);
  392. }
  393. out_unlock:
  394. gfs2_glock_dq_m(1, &ip->i_gh);
  395. out_uninit:
  396. gfs2_holder_uninit(&ip->i_gh);
  397. }
  398. return error;
  399. }
  400. /**
  401. * adjust_fs_space - Adjusts the free space available due to gfs2_grow
  402. * @inode: the rindex inode
  403. */
  404. static void adjust_fs_space(struct inode *inode)
  405. {
  406. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  407. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  408. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  409. u64 fs_total, new_free;
  410. /* Total up the file system space, according to the latest rindex. */
  411. fs_total = gfs2_ri_total(sdp);
  412. spin_lock(&sdp->sd_statfs_spin);
  413. if (fs_total > (m_sc->sc_total + l_sc->sc_total))
  414. new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
  415. else
  416. new_free = 0;
  417. spin_unlock(&sdp->sd_statfs_spin);
  418. fs_warn(sdp, "File system extended by %llu blocks.\n",
  419. (unsigned long long)new_free);
  420. gfs2_statfs_change(sdp, new_free, new_free, 0);
  421. }
  422. /**
  423. * gfs2_commit_write - Commit write to a file
  424. * @file: The file to write to
  425. * @page: The page containing the data
  426. * @from: From (byte range within page)
  427. * @to: To (byte range within page)
  428. *
  429. * Returns: errno
  430. */
  431. static int gfs2_commit_write(struct file *file, struct page *page,
  432. unsigned from, unsigned to)
  433. {
  434. struct inode *inode = page->mapping->host;
  435. struct gfs2_inode *ip = GFS2_I(inode);
  436. struct gfs2_sbd *sdp = GFS2_SB(inode);
  437. int error = -EOPNOTSUPP;
  438. struct buffer_head *dibh;
  439. struct gfs2_alloc *al = &ip->i_alloc;
  440. struct gfs2_dinode *di;
  441. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
  442. goto fail_nounlock;
  443. error = gfs2_meta_inode_buffer(ip, &dibh);
  444. if (error)
  445. goto fail_endtrans;
  446. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  447. di = (struct gfs2_dinode *)dibh->b_data;
  448. if (gfs2_is_stuffed(ip)) {
  449. u64 file_size;
  450. void *kaddr;
  451. file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
  452. kaddr = kmap_atomic(page, KM_USER0);
  453. memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
  454. kaddr + from, to - from);
  455. kunmap_atomic(kaddr, KM_USER0);
  456. SetPageUptodate(page);
  457. if (inode->i_size < file_size) {
  458. i_size_write(inode, file_size);
  459. mark_inode_dirty(inode);
  460. }
  461. } else {
  462. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
  463. gfs2_is_jdata(ip))
  464. gfs2_page_add_databufs(ip, page, from, to);
  465. error = generic_commit_write(file, page, from, to);
  466. if (error)
  467. goto fail;
  468. }
  469. if (ip->i_di.di_size < inode->i_size) {
  470. ip->i_di.di_size = inode->i_size;
  471. di->di_size = cpu_to_be64(inode->i_size);
  472. }
  473. if (inode == sdp->sd_rindex)
  474. adjust_fs_space(inode);
  475. brelse(dibh);
  476. gfs2_trans_end(sdp);
  477. if (al->al_requested) {
  478. gfs2_inplace_release(ip);
  479. gfs2_quota_unlock(ip);
  480. gfs2_alloc_put(ip);
  481. }
  482. unlock_page(page);
  483. gfs2_glock_dq_m(1, &ip->i_gh);
  484. lock_page(page);
  485. gfs2_holder_uninit(&ip->i_gh);
  486. return 0;
  487. fail:
  488. brelse(dibh);
  489. fail_endtrans:
  490. gfs2_trans_end(sdp);
  491. if (al->al_requested) {
  492. gfs2_inplace_release(ip);
  493. gfs2_quota_unlock(ip);
  494. gfs2_alloc_put(ip);
  495. }
  496. unlock_page(page);
  497. gfs2_glock_dq_m(1, &ip->i_gh);
  498. lock_page(page);
  499. gfs2_holder_uninit(&ip->i_gh);
  500. fail_nounlock:
  501. ClearPageUptodate(page);
  502. return error;
  503. }
  504. /**
  505. * gfs2_set_page_dirty - Page dirtying function
  506. * @page: The page to dirty
  507. *
  508. * Returns: 1 if it dirtyed the page, or 0 otherwise
  509. */
  510. static int gfs2_set_page_dirty(struct page *page)
  511. {
  512. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  513. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  514. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
  515. SetPageChecked(page);
  516. return __set_page_dirty_buffers(page);
  517. }
  518. /**
  519. * gfs2_bmap - Block map function
  520. * @mapping: Address space info
  521. * @lblock: The block to map
  522. *
  523. * Returns: The disk address for the block or 0 on hole or error
  524. */
  525. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  526. {
  527. struct gfs2_inode *ip = GFS2_I(mapping->host);
  528. struct gfs2_holder i_gh;
  529. sector_t dblock = 0;
  530. int error;
  531. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  532. if (error)
  533. return 0;
  534. if (!gfs2_is_stuffed(ip))
  535. dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
  536. gfs2_glock_dq_uninit(&i_gh);
  537. return dblock;
  538. }
  539. static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
  540. {
  541. struct gfs2_bufdata *bd;
  542. gfs2_log_lock(sdp);
  543. bd = bh->b_private;
  544. if (bd) {
  545. bd->bd_bh = NULL;
  546. bh->b_private = NULL;
  547. if (!bd->bd_ail && list_empty(&bd->bd_le.le_list))
  548. kmem_cache_free(gfs2_bufdata_cachep, bd);
  549. }
  550. gfs2_log_unlock(sdp);
  551. lock_buffer(bh);
  552. clear_buffer_dirty(bh);
  553. bh->b_bdev = NULL;
  554. clear_buffer_mapped(bh);
  555. clear_buffer_req(bh);
  556. clear_buffer_new(bh);
  557. clear_buffer_delay(bh);
  558. unlock_buffer(bh);
  559. }
  560. static void gfs2_invalidatepage(struct page *page, unsigned long offset)
  561. {
  562. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  563. struct buffer_head *head, *bh, *next;
  564. unsigned int curr_off = 0;
  565. BUG_ON(!PageLocked(page));
  566. if (offset == 0)
  567. ClearPageChecked(page);
  568. if (!page_has_buffers(page))
  569. return;
  570. bh = head = page_buffers(page);
  571. do {
  572. unsigned int next_off = curr_off + bh->b_size;
  573. next = bh->b_this_page;
  574. if (offset <= curr_off)
  575. discard_buffer(sdp, bh);
  576. curr_off = next_off;
  577. bh = next;
  578. } while (bh != head);
  579. if (!offset)
  580. try_to_release_page(page, 0);
  581. return;
  582. }
  583. /**
  584. * gfs2_ok_for_dio - check that dio is valid on this file
  585. * @ip: The inode
  586. * @rw: READ or WRITE
  587. * @offset: The offset at which we are reading or writing
  588. *
  589. * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
  590. * 1 (to accept the i/o request)
  591. */
  592. static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
  593. {
  594. /*
  595. * Should we return an error here? I can't see that O_DIRECT for
  596. * a journaled file makes any sense. For now we'll silently fall
  597. * back to buffered I/O, likewise we do the same for stuffed
  598. * files since they are (a) small and (b) unaligned.
  599. */
  600. if (gfs2_is_jdata(ip))
  601. return 0;
  602. if (gfs2_is_stuffed(ip))
  603. return 0;
  604. if (offset > i_size_read(&ip->i_inode))
  605. return 0;
  606. return 1;
  607. }
  608. static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
  609. const struct iovec *iov, loff_t offset,
  610. unsigned long nr_segs)
  611. {
  612. struct file *file = iocb->ki_filp;
  613. struct inode *inode = file->f_mapping->host;
  614. struct gfs2_inode *ip = GFS2_I(inode);
  615. struct gfs2_holder gh;
  616. int rv;
  617. /*
  618. * Deferred lock, even if its a write, since we do no allocation
  619. * on this path. All we need change is atime, and this lock mode
  620. * ensures that other nodes have flushed their buffered read caches
  621. * (i.e. their page cache entries for this inode). We do not,
  622. * unfortunately have the option of only flushing a range like
  623. * the VFS does.
  624. */
  625. gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
  626. rv = gfs2_glock_nq_atime(&gh);
  627. if (rv)
  628. return rv;
  629. rv = gfs2_ok_for_dio(ip, rw, offset);
  630. if (rv != 1)
  631. goto out; /* dio not valid, fall back to buffered i/o */
  632. rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
  633. iov, offset, nr_segs,
  634. gfs2_get_block_direct, NULL);
  635. out:
  636. gfs2_glock_dq_m(1, &gh);
  637. gfs2_holder_uninit(&gh);
  638. return rv;
  639. }
  640. /**
  641. * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
  642. * @bh: the buffer we're stuck on
  643. *
  644. */
  645. static void stuck_releasepage(struct buffer_head *bh)
  646. {
  647. struct inode *inode = bh->b_page->mapping->host;
  648. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  649. struct gfs2_bufdata *bd = bh->b_private;
  650. struct gfs2_glock *gl;
  651. static unsigned limit = 0;
  652. if (limit > 3)
  653. return;
  654. limit++;
  655. fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
  656. fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
  657. (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
  658. fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
  659. fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
  660. if (!bd)
  661. return;
  662. gl = bd->bd_gl;
  663. fs_warn(sdp, "gl = (%u, %llu)\n",
  664. gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
  665. fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
  666. (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
  667. (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
  668. if (gl->gl_ops == &gfs2_inode_glops) {
  669. struct gfs2_inode *ip = gl->gl_object;
  670. unsigned int x;
  671. if (!ip)
  672. return;
  673. fs_warn(sdp, "ip = %llu %llu\n",
  674. (unsigned long long)ip->i_no_formal_ino,
  675. (unsigned long long)ip->i_no_addr);
  676. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
  677. fs_warn(sdp, "ip->i_cache[%u] = %s\n",
  678. x, (ip->i_cache[x]) ? "!NULL" : "NULL");
  679. }
  680. }
  681. /**
  682. * gfs2_releasepage - free the metadata associated with a page
  683. * @page: the page that's being released
  684. * @gfp_mask: passed from Linux VFS, ignored by us
  685. *
  686. * Call try_to_free_buffers() if the buffers in this page can be
  687. * released.
  688. *
  689. * Returns: 0
  690. */
  691. int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
  692. {
  693. struct inode *aspace = page->mapping->host;
  694. struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
  695. struct buffer_head *bh, *head;
  696. struct gfs2_bufdata *bd;
  697. unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
  698. if (!page_has_buffers(page))
  699. goto out;
  700. head = bh = page_buffers(page);
  701. do {
  702. while (atomic_read(&bh->b_count)) {
  703. if (!atomic_read(&aspace->i_writecount))
  704. return 0;
  705. if (!(gfp_mask & __GFP_WAIT))
  706. return 0;
  707. if (time_after_eq(jiffies, t)) {
  708. stuck_releasepage(bh);
  709. /* should we withdraw here? */
  710. return 0;
  711. }
  712. yield();
  713. }
  714. gfs2_assert_warn(sdp, !buffer_pinned(bh));
  715. gfs2_assert_warn(sdp, !buffer_dirty(bh));
  716. gfs2_log_lock(sdp);
  717. bd = bh->b_private;
  718. if (bd) {
  719. gfs2_assert_warn(sdp, bd->bd_bh == bh);
  720. gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
  721. gfs2_assert_warn(sdp, !bd->bd_ail);
  722. bd->bd_bh = NULL;
  723. if (!list_empty(&bd->bd_le.le_list))
  724. bd = NULL;
  725. bh->b_private = NULL;
  726. }
  727. gfs2_log_unlock(sdp);
  728. if (bd)
  729. kmem_cache_free(gfs2_bufdata_cachep, bd);
  730. bh = bh->b_this_page;
  731. } while (bh != head);
  732. out:
  733. return try_to_free_buffers(page);
  734. }
  735. const struct address_space_operations gfs2_file_aops = {
  736. .writepage = gfs2_writepage,
  737. .writepages = gfs2_writepages,
  738. .readpage = gfs2_readpage,
  739. .readpages = gfs2_readpages,
  740. .sync_page = block_sync_page,
  741. .prepare_write = gfs2_prepare_write,
  742. .commit_write = gfs2_commit_write,
  743. .set_page_dirty = gfs2_set_page_dirty,
  744. .bmap = gfs2_bmap,
  745. .invalidatepage = gfs2_invalidatepage,
  746. .releasepage = gfs2_releasepage,
  747. .direct_IO = gfs2_direct_IO,
  748. };