ops_address.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/mpage.h>
  17. #include <linux/fs.h>
  18. #include <linux/gfs2_ondisk.h>
  19. #include <asm/semaphore.h>
  20. #include "gfs2.h"
  21. #include "lm_interface.h"
  22. #include "incore.h"
  23. #include "bmap.h"
  24. #include "glock.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "meta_io.h"
  28. #include "ops_address.h"
  29. #include "page.h"
  30. #include "quota.h"
  31. #include "trans.h"
  32. #include "rgrp.h"
  33. #include "ops_file.h"
  34. #include "util.h"
  35. /**
  36. * gfs2_get_block - Fills in a buffer head with details about a block
  37. * @inode: The inode
  38. * @lblock: The block number to look up
  39. * @bh_result: The buffer head to return the result in
  40. * @create: Non-zero if we may add block to the file
  41. *
  42. * Returns: errno
  43. */
  44. int gfs2_get_block(struct inode *inode, sector_t lblock,
  45. struct buffer_head *bh_result, int create)
  46. {
  47. int new = create;
  48. uint64_t dblock;
  49. int error;
  50. int boundary;
  51. error = gfs2_block_map(inode, lblock, &new, &dblock, &boundary);
  52. if (error)
  53. return error;
  54. if (!dblock)
  55. return 0;
  56. map_bh(bh_result, inode->i_sb, dblock);
  57. if (new)
  58. set_buffer_new(bh_result);
  59. if (boundary)
  60. set_buffer_boundary(bh_result);
  61. return 0;
  62. }
  63. /**
  64. * get_block_noalloc - Fills in a buffer head with details about a block
  65. * @inode: The inode
  66. * @lblock: The block number to look up
  67. * @bh_result: The buffer head to return the result in
  68. * @create: Non-zero if we may add block to the file
  69. *
  70. * Returns: errno
  71. */
  72. static int get_block_noalloc(struct inode *inode, sector_t lblock,
  73. struct buffer_head *bh_result, int create)
  74. {
  75. struct gfs2_inode *ip = inode->u.generic_ip;
  76. int new = 0;
  77. uint64_t dblock;
  78. int error;
  79. int boundary;
  80. error = gfs2_block_map(inode, lblock, &new, &dblock, &boundary);
  81. if (error)
  82. return error;
  83. if (dblock)
  84. map_bh(bh_result, inode->i_sb, dblock);
  85. else if (gfs2_assert_withdraw(ip->i_sbd, !create))
  86. error = -EIO;
  87. if (boundary)
  88. set_buffer_boundary(bh_result);
  89. return error;
  90. }
  91. /**
  92. * gfs2_writepage - Write complete page
  93. * @page: Page to write
  94. *
  95. * Returns: errno
  96. *
  97. * Some of this is copied from block_write_full_page() although we still
  98. * call it to do most of the work.
  99. */
  100. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  101. {
  102. struct inode *inode = page->mapping->host;
  103. struct gfs2_inode *ip = page->mapping->host->u.generic_ip;
  104. struct gfs2_sbd *sdp = ip->i_sbd;
  105. loff_t i_size = i_size_read(inode);
  106. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  107. unsigned offset;
  108. int error;
  109. int done_trans = 0;
  110. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
  111. unlock_page(page);
  112. return -EIO;
  113. }
  114. if (current->journal_info)
  115. goto out_ignore;
  116. /* Is the page fully outside i_size? (truncate in progress) */
  117. offset = i_size & (PAGE_CACHE_SIZE-1);
  118. if (page->index > end_index || (page->index == end_index && !offset)) {
  119. page->mapping->a_ops->invalidatepage(page, 0);
  120. unlock_page(page);
  121. return 0; /* don't care */
  122. }
  123. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
  124. error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
  125. if (error)
  126. goto out_ignore;
  127. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
  128. done_trans = 1;
  129. }
  130. error = block_write_full_page(page, get_block_noalloc, wbc);
  131. if (done_trans)
  132. gfs2_trans_end(sdp);
  133. gfs2_meta_cache_flush(ip);
  134. return error;
  135. out_ignore:
  136. redirty_page_for_writepage(wbc, page);
  137. unlock_page(page);
  138. return 0;
  139. }
  140. static int zero_readpage(struct page *page)
  141. {
  142. void *kaddr;
  143. kaddr = kmap_atomic(page, KM_USER0);
  144. memset(kaddr, 0, PAGE_CACHE_SIZE);
  145. kunmap_atomic(page, KM_USER0);
  146. SetPageUptodate(page);
  147. return 0;
  148. }
  149. /**
  150. * stuffed_readpage - Fill in a Linux page with stuffed file data
  151. * @ip: the inode
  152. * @page: the page
  153. *
  154. * Returns: errno
  155. */
  156. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  157. {
  158. struct buffer_head *dibh;
  159. void *kaddr;
  160. int error;
  161. /* Only the first page of a stuffed file might contain data */
  162. if (unlikely(page->index))
  163. return zero_readpage(page);
  164. error = gfs2_meta_inode_buffer(ip, &dibh);
  165. if (error)
  166. return error;
  167. kaddr = kmap_atomic(page, KM_USER0);
  168. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
  169. ip->i_di.di_size);
  170. memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
  171. kunmap_atomic(page, KM_USER0);
  172. brelse(dibh);
  173. SetPageUptodate(page);
  174. return 0;
  175. }
  176. /**
  177. * gfs2_readpage - readpage with locking
  178. * @file: The file to read a page for. N.B. This may be NULL if we are
  179. * reading an internal file.
  180. * @page: The page to read
  181. *
  182. * Returns: errno
  183. */
  184. static int gfs2_readpage(struct file *file, struct page *page)
  185. {
  186. struct gfs2_inode *ip = page->mapping->host->u.generic_ip;
  187. struct gfs2_sbd *sdp = ip->i_sbd;
  188. struct gfs2_holder gh;
  189. int error;
  190. if (likely(file != &gfs2_internal_file_sentinal)) {
  191. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
  192. error = gfs2_glock_nq_m_atime(1, &gh);
  193. if (unlikely(error))
  194. goto out_unlock;
  195. }
  196. if (gfs2_is_stuffed(ip)) {
  197. error = stuffed_readpage(ip, page);
  198. unlock_page(page);
  199. } else
  200. error = mpage_readpage(page, gfs2_get_block);
  201. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  202. error = -EIO;
  203. if (file != &gfs2_internal_file_sentinal) {
  204. gfs2_glock_dq_m(1, &gh);
  205. gfs2_holder_uninit(&gh);
  206. }
  207. out:
  208. return error;
  209. out_unlock:
  210. unlock_page(page);
  211. if (file != &gfs2_internal_file_sentinal)
  212. gfs2_holder_uninit(&gh);
  213. goto out;
  214. }
  215. #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
  216. /**
  217. * gfs2_readpages - Read a bunch of pages at once
  218. *
  219. * Some notes:
  220. * 1. This is only for readahead, so we can simply ignore any things
  221. * which are slightly inconvenient (such as locking conflicts between
  222. * the page lock and the glock) and return having done no I/O. Its
  223. * obviously not something we'd want to do on too regular a basis.
  224. * Any I/O we ignore at this time will be done via readpage later.
  225. * 2. We have to handle stuffed files here too.
  226. * 3. mpage_readpages() does most of the heavy lifting in the common case.
  227. * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
  228. * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
  229. * well as read-ahead.
  230. */
  231. static int gfs2_readpages(struct file *file, struct address_space *mapping,
  232. struct list_head *pages, unsigned nr_pages)
  233. {
  234. struct inode *inode = mapping->host;
  235. struct gfs2_inode *ip = inode->u.generic_ip;
  236. struct gfs2_sbd *sdp = ip->i_sbd;
  237. struct gfs2_holder gh;
  238. unsigned page_idx;
  239. int ret;
  240. if (likely(file != &gfs2_internal_file_sentinal)) {
  241. gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
  242. LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
  243. ret = gfs2_glock_nq_m_atime(1, &gh);
  244. if (ret == GLR_TRYFAILED)
  245. goto out_noerror;
  246. if (unlikely(ret))
  247. goto out_unlock;
  248. }
  249. if (gfs2_is_stuffed(ip)) {
  250. struct pagevec lru_pvec;
  251. pagevec_init(&lru_pvec, 0);
  252. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  253. struct page *page = list_to_page(pages);
  254. list_del(&page->lru);
  255. if (!add_to_page_cache(page, mapping,
  256. page->index, GFP_KERNEL)) {
  257. ret = stuffed_readpage(ip, page);
  258. unlock_page(page);
  259. if (!pagevec_add(&lru_pvec, page))
  260. __pagevec_lru_add(&lru_pvec);
  261. }
  262. page_cache_release(page);
  263. }
  264. pagevec_lru_add(&lru_pvec);
  265. ret = 0;
  266. } else {
  267. /* What we really want to do .... */
  268. ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
  269. }
  270. if (likely(file != &gfs2_internal_file_sentinal)) {
  271. gfs2_glock_dq_m(1, &gh);
  272. gfs2_holder_uninit(&gh);
  273. }
  274. out:
  275. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  276. ret = -EIO;
  277. return ret;
  278. out_noerror:
  279. ret = 0;
  280. out_unlock:
  281. /* unlock all pages, we can't do any I/O right now */
  282. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  283. struct page *page = list_to_page(pages);
  284. list_del(&page->lru);
  285. unlock_page(page);
  286. page_cache_release(page);
  287. }
  288. if (likely(file != &gfs2_internal_file_sentinal))
  289. gfs2_holder_uninit(&gh);
  290. goto out;
  291. }
  292. /**
  293. * gfs2_prepare_write - Prepare to write a page to a file
  294. * @file: The file to write to
  295. * @page: The page which is to be prepared for writing
  296. * @from: From (byte range within page)
  297. * @to: To (byte range within page)
  298. *
  299. * Returns: errno
  300. */
  301. static int gfs2_prepare_write(struct file *file, struct page *page,
  302. unsigned from, unsigned to)
  303. {
  304. struct gfs2_inode *ip = page->mapping->host->u.generic_ip;
  305. struct gfs2_sbd *sdp = ip->i_sbd;
  306. unsigned int data_blocks, ind_blocks, rblocks;
  307. int alloc_required;
  308. int error = 0;
  309. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
  310. loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  311. struct gfs2_alloc *al;
  312. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
  313. error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
  314. if (error)
  315. goto out_uninit;
  316. gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
  317. error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
  318. if (error)
  319. goto out_unlock;
  320. if (alloc_required) {
  321. al = gfs2_alloc_get(ip);
  322. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  323. if (error)
  324. goto out_alloc_put;
  325. error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
  326. if (error)
  327. goto out_qunlock;
  328. al->al_requested = data_blocks + ind_blocks;
  329. error = gfs2_inplace_reserve(ip);
  330. if (error)
  331. goto out_qunlock;
  332. }
  333. rblocks = RES_DINODE + ind_blocks;
  334. if (gfs2_is_jdata(ip))
  335. rblocks += data_blocks ? data_blocks : 1;
  336. if (ind_blocks || data_blocks)
  337. rblocks += RES_STATFS + RES_QUOTA;
  338. error = gfs2_trans_begin(sdp, rblocks, 0);
  339. if (error)
  340. goto out;
  341. if (gfs2_is_stuffed(ip)) {
  342. if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
  343. error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page,
  344. page);
  345. if (error == 0)
  346. goto prepare_write;
  347. } else if (!PageUptodate(page))
  348. error = stuffed_readpage(ip, page);
  349. goto out;
  350. }
  351. prepare_write:
  352. error = block_prepare_write(page, from, to, gfs2_get_block);
  353. out:
  354. if (error) {
  355. gfs2_trans_end(sdp);
  356. if (alloc_required) {
  357. gfs2_inplace_release(ip);
  358. out_qunlock:
  359. gfs2_quota_unlock(ip);
  360. out_alloc_put:
  361. gfs2_alloc_put(ip);
  362. }
  363. out_unlock:
  364. gfs2_glock_dq_m(1, &ip->i_gh);
  365. out_uninit:
  366. gfs2_holder_uninit(&ip->i_gh);
  367. }
  368. return error;
  369. }
  370. /**
  371. * gfs2_commit_write - Commit write to a file
  372. * @file: The file to write to
  373. * @page: The page containing the data
  374. * @from: From (byte range within page)
  375. * @to: To (byte range within page)
  376. *
  377. * Returns: errno
  378. */
  379. static int gfs2_commit_write(struct file *file, struct page *page,
  380. unsigned from, unsigned to)
  381. {
  382. struct inode *inode = page->mapping->host;
  383. struct gfs2_inode *ip = inode->u.generic_ip;
  384. struct gfs2_sbd *sdp = ip->i_sbd;
  385. int error = -EOPNOTSUPP;
  386. struct buffer_head *dibh;
  387. struct gfs2_alloc *al = &ip->i_alloc;;
  388. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
  389. goto fail_nounlock;
  390. error = gfs2_meta_inode_buffer(ip, &dibh);
  391. if (error)
  392. goto fail_endtrans;
  393. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  394. if (gfs2_is_stuffed(ip)) {
  395. uint64_t file_size;
  396. void *kaddr;
  397. file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
  398. kaddr = kmap_atomic(page, KM_USER0);
  399. memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
  400. (char *)kaddr + from, to - from);
  401. kunmap_atomic(page, KM_USER0);
  402. SetPageUptodate(page);
  403. if (inode->i_size < file_size)
  404. i_size_write(inode, file_size);
  405. } else {
  406. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
  407. gfs2_is_jdata(ip))
  408. gfs2_page_add_databufs(ip, page, from, to);
  409. error = generic_commit_write(file, page, from, to);
  410. if (error)
  411. goto fail;
  412. }
  413. if (ip->i_di.di_size < inode->i_size)
  414. ip->i_di.di_size = inode->i_size;
  415. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  416. brelse(dibh);
  417. gfs2_trans_end(sdp);
  418. if (al->al_requested) {
  419. gfs2_inplace_release(ip);
  420. gfs2_quota_unlock(ip);
  421. gfs2_alloc_put(ip);
  422. }
  423. gfs2_glock_dq_m(1, &ip->i_gh);
  424. gfs2_holder_uninit(&ip->i_gh);
  425. return 0;
  426. fail:
  427. brelse(dibh);
  428. fail_endtrans:
  429. gfs2_trans_end(sdp);
  430. if (al->al_requested) {
  431. gfs2_inplace_release(ip);
  432. gfs2_quota_unlock(ip);
  433. gfs2_alloc_put(ip);
  434. }
  435. gfs2_glock_dq_m(1, &ip->i_gh);
  436. gfs2_holder_uninit(&ip->i_gh);
  437. fail_nounlock:
  438. ClearPageUptodate(page);
  439. return error;
  440. }
  441. /**
  442. * gfs2_bmap - Block map function
  443. * @mapping: Address space info
  444. * @lblock: The block to map
  445. *
  446. * Returns: The disk address for the block or 0 on hole or error
  447. */
  448. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  449. {
  450. struct gfs2_inode *ip = mapping->host->u.generic_ip;
  451. struct gfs2_holder i_gh;
  452. sector_t dblock = 0;
  453. int error;
  454. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  455. if (error)
  456. return 0;
  457. if (!gfs2_is_stuffed(ip))
  458. dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
  459. gfs2_glock_dq_uninit(&i_gh);
  460. return dblock;
  461. }
  462. static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
  463. {
  464. struct gfs2_bufdata *bd;
  465. gfs2_log_lock(sdp);
  466. bd = bh->b_private;
  467. if (bd) {
  468. bd->bd_bh = NULL;
  469. bh->b_private = NULL;
  470. gfs2_log_unlock(sdp);
  471. brelse(bh);
  472. } else
  473. gfs2_log_unlock(sdp);
  474. lock_buffer(bh);
  475. clear_buffer_dirty(bh);
  476. bh->b_bdev = NULL;
  477. clear_buffer_mapped(bh);
  478. clear_buffer_req(bh);
  479. clear_buffer_new(bh);
  480. clear_buffer_delay(bh);
  481. unlock_buffer(bh);
  482. }
  483. static void gfs2_invalidatepage(struct page *page, unsigned long offset)
  484. {
  485. struct gfs2_sbd *sdp = page->mapping->host->i_sb->s_fs_info;
  486. struct buffer_head *head, *bh, *next;
  487. unsigned int curr_off = 0;
  488. BUG_ON(!PageLocked(page));
  489. if (!page_has_buffers(page))
  490. return;
  491. bh = head = page_buffers(page);
  492. do {
  493. unsigned int next_off = curr_off + bh->b_size;
  494. next = bh->b_this_page;
  495. if (offset <= curr_off)
  496. discard_buffer(sdp, bh);
  497. curr_off = next_off;
  498. bh = next;
  499. } while (bh != head);
  500. if (!offset)
  501. try_to_release_page(page, 0);
  502. return;
  503. }
  504. static ssize_t gfs2_direct_IO_write(struct kiocb *iocb, const struct iovec *iov,
  505. loff_t offset, unsigned long nr_segs)
  506. {
  507. struct file *file = iocb->ki_filp;
  508. struct inode *inode = file->f_mapping->host;
  509. struct gfs2_inode *ip = inode->u.generic_ip;
  510. struct gfs2_holder gh;
  511. int rv;
  512. /*
  513. * Shared lock, even though its write, since we do no allocation
  514. * on this path. All we need change is atime.
  515. */
  516. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  517. rv = gfs2_glock_nq_m_atime(1, &gh);
  518. if (rv)
  519. goto out;
  520. /*
  521. * Should we return an error here? I can't see that O_DIRECT for
  522. * a journaled file makes any sense. For now we'll silently fall
  523. * back to buffered I/O, likewise we do the same for stuffed
  524. * files since they are (a) small and (b) unaligned.
  525. */
  526. if (gfs2_is_jdata(ip))
  527. goto out;
  528. if (gfs2_is_stuffed(ip))
  529. goto out;
  530. rv = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
  531. iov, offset, nr_segs, gfs2_get_block,
  532. NULL, DIO_OWN_LOCKING);
  533. out:
  534. gfs2_glock_dq_m(1, &gh);
  535. gfs2_holder_uninit(&gh);
  536. return rv;
  537. }
  538. /**
  539. * gfs2_direct_IO
  540. *
  541. * This is called with a shared lock already held for the read path.
  542. * Currently, no locks are held when the write path is called.
  543. */
  544. static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
  545. const struct iovec *iov, loff_t offset,
  546. unsigned long nr_segs)
  547. {
  548. struct file *file = iocb->ki_filp;
  549. struct inode *inode = file->f_mapping->host;
  550. struct gfs2_inode *ip = inode->u.generic_ip;
  551. struct gfs2_sbd *sdp = ip->i_sbd;
  552. if (rw == WRITE)
  553. return gfs2_direct_IO_write(iocb, iov, offset, nr_segs);
  554. if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
  555. gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
  556. return -EINVAL;
  557. return __blockdev_direct_IO(READ, iocb, inode, inode->i_sb->s_bdev, iov,
  558. offset, nr_segs, gfs2_get_block, NULL,
  559. DIO_OWN_LOCKING);
  560. }
  561. struct address_space_operations gfs2_file_aops = {
  562. .writepage = gfs2_writepage,
  563. .readpage = gfs2_readpage,
  564. .readpages = gfs2_readpages,
  565. .sync_page = block_sync_page,
  566. .prepare_write = gfs2_prepare_write,
  567. .commit_write = gfs2_commit_write,
  568. .bmap = gfs2_bmap,
  569. .invalidatepage = gfs2_invalidatepage,
  570. .direct_IO = gfs2_direct_IO,
  571. };