ops_address.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mpage.h>
  16. #include <linux/fs.h>
  17. #include <asm/semaphore.h>
  18. #include "gfs2.h"
  19. #include "bmap.h"
  20. #include "glock.h"
  21. #include "inode.h"
  22. #include "log.h"
  23. #include "meta_io.h"
  24. #include "ops_address.h"
  25. #include "page.h"
  26. #include "quota.h"
  27. #include "trans.h"
  28. #include "rgrp.h"
  29. #include "ops_file.h"
  30. /**
  31. * gfs2_get_block - Fills in a buffer head with details about a block
  32. * @inode: The inode
  33. * @lblock: The block number to look up
  34. * @bh_result: The buffer head to return the result in
  35. * @create: Non-zero if we may add block to the file
  36. *
  37. * Returns: errno
  38. */
  39. int gfs2_get_block(struct inode *inode, sector_t lblock,
  40. struct buffer_head *bh_result, int create)
  41. {
  42. struct gfs2_inode *ip = get_v2ip(inode);
  43. int new = create;
  44. uint64_t dblock;
  45. int error;
  46. error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
  47. if (error)
  48. return error;
  49. if (!dblock)
  50. return 0;
  51. map_bh(bh_result, inode->i_sb, dblock);
  52. if (new)
  53. set_buffer_new(bh_result);
  54. return 0;
  55. }
  56. /**
  57. * get_block_noalloc - Fills in a buffer head with details about a block
  58. * @inode: The inode
  59. * @lblock: The block number to look up
  60. * @bh_result: The buffer head to return the result in
  61. * @create: Non-zero if we may add block to the file
  62. *
  63. * Returns: errno
  64. */
  65. static int get_block_noalloc(struct inode *inode, sector_t lblock,
  66. struct buffer_head *bh_result, int create)
  67. {
  68. struct gfs2_inode *ip = get_v2ip(inode);
  69. int new = 0;
  70. uint64_t dblock;
  71. int error;
  72. error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
  73. if (error)
  74. return error;
  75. if (dblock)
  76. map_bh(bh_result, inode->i_sb, dblock);
  77. else if (gfs2_assert_withdraw(ip->i_sbd, !create))
  78. error = -EIO;
  79. return error;
  80. }
  81. static int get_blocks(struct inode *inode, sector_t lblock,
  82. unsigned long max_blocks, struct buffer_head *bh_result,
  83. int create)
  84. {
  85. struct gfs2_inode *ip = get_v2ip(inode);
  86. int new = create;
  87. uint64_t dblock;
  88. uint32_t extlen;
  89. int error;
  90. error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
  91. if (error)
  92. return error;
  93. if (!dblock)
  94. return 0;
  95. map_bh(bh_result, inode->i_sb, dblock);
  96. if (new)
  97. set_buffer_new(bh_result);
  98. if (extlen > max_blocks)
  99. extlen = max_blocks;
  100. bh_result->b_size = extlen << inode->i_blkbits;
  101. return 0;
  102. }
  103. static int get_blocks_noalloc(struct inode *inode, sector_t lblock,
  104. unsigned long max_blocks,
  105. struct buffer_head *bh_result, int create)
  106. {
  107. struct gfs2_inode *ip = get_v2ip(inode);
  108. int new = 0;
  109. uint64_t dblock;
  110. uint32_t extlen;
  111. int error;
  112. error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
  113. if (error)
  114. return error;
  115. if (dblock) {
  116. map_bh(bh_result, inode->i_sb, dblock);
  117. if (extlen > max_blocks)
  118. extlen = max_blocks;
  119. bh_result->b_size = extlen << inode->i_blkbits;
  120. } else if (gfs2_assert_withdraw(ip->i_sbd, !create))
  121. error = -EIO;
  122. return error;
  123. }
  124. /**
  125. * gfs2_writepage - Write complete page
  126. * @page: Page to write
  127. *
  128. * Returns: errno
  129. *
  130. * Some of this is copied from block_write_full_page() although we still
  131. * call it to do most of the work.
  132. */
  133. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  134. {
  135. struct inode *inode = page->mapping->host;
  136. struct gfs2_inode *ip = get_v2ip(page->mapping->host);
  137. struct gfs2_sbd *sdp = ip->i_sbd;
  138. loff_t i_size = i_size_read(inode);
  139. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  140. unsigned offset;
  141. int error;
  142. int done_trans = 0;
  143. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
  144. unlock_page(page);
  145. return -EIO;
  146. }
  147. if (get_transaction)
  148. goto out_ignore;
  149. /* Is the page fully outside i_size? (truncate in progress) */
  150. offset = i_size & (PAGE_CACHE_SIZE-1);
  151. if (page->index >= end_index+1 || !offset) {
  152. page->mapping->a_ops->invalidatepage(page, 0);
  153. unlock_page(page);
  154. return 0; /* don't care */
  155. }
  156. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
  157. error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
  158. if (error)
  159. goto out_ignore;
  160. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
  161. done_trans = 1;
  162. }
  163. error = block_write_full_page(page, get_block_noalloc, wbc);
  164. if (done_trans)
  165. gfs2_trans_end(sdp);
  166. gfs2_meta_cache_flush(ip);
  167. return error;
  168. out_ignore:
  169. redirty_page_for_writepage(wbc, page);
  170. unlock_page(page);
  171. return 0;
  172. }
  173. /**
  174. * stuffed_readpage - Fill in a Linux page with stuffed file data
  175. * @ip: the inode
  176. * @page: the page
  177. *
  178. * Returns: errno
  179. */
  180. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  181. {
  182. struct buffer_head *dibh;
  183. void *kaddr;
  184. int error;
  185. error = gfs2_meta_inode_buffer(ip, &dibh);
  186. if (error)
  187. return error;
  188. kaddr = kmap_atomic(page, KM_USER0);
  189. memcpy((char *)kaddr,
  190. dibh->b_data + sizeof(struct gfs2_dinode),
  191. ip->i_di.di_size);
  192. memset((char *)kaddr + ip->i_di.di_size,
  193. 0,
  194. PAGE_CACHE_SIZE - ip->i_di.di_size);
  195. kunmap_atomic(page, KM_USER0);
  196. brelse(dibh);
  197. SetPageUptodate(page);
  198. return 0;
  199. }
  200. static int zero_readpage(struct page *page)
  201. {
  202. void *kaddr;
  203. kaddr = kmap_atomic(page, KM_USER0);
  204. memset(kaddr, 0, PAGE_CACHE_SIZE);
  205. kunmap_atomic(page, KM_USER0);
  206. SetPageUptodate(page);
  207. unlock_page(page);
  208. return 0;
  209. }
  210. /**
  211. * gfs2_readpage - readpage with locking
  212. * @file: The file to read a page for. N.B. This may be NULL if we are
  213. * reading an internal file.
  214. * @page: The page to read
  215. *
  216. * Returns: errno
  217. */
  218. static int gfs2_readpage(struct file *file, struct page *page)
  219. {
  220. struct gfs2_inode *ip = get_v2ip(page->mapping->host);
  221. struct gfs2_sbd *sdp = ip->i_sbd;
  222. struct gfs2_holder gh;
  223. int error;
  224. if (file != &gfs2_internal_file_sentinal) {
  225. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  226. error = gfs2_glock_nq_m_atime(1, &gh);
  227. if (error)
  228. goto out_unlock;
  229. }
  230. if (gfs2_is_stuffed(ip)) {
  231. if (!page->index) {
  232. error = stuffed_readpage(ip, page);
  233. unlock_page(page);
  234. } else
  235. error = zero_readpage(page);
  236. } else
  237. error = mpage_readpage(page, gfs2_get_block);
  238. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  239. error = -EIO;
  240. if (file != &gfs2_internal_file_sentinal) {
  241. gfs2_glock_dq_m(1, &gh);
  242. gfs2_holder_uninit(&gh);
  243. }
  244. out:
  245. return error;
  246. out_unlock:
  247. unlock_page(page);
  248. goto out;
  249. }
  250. /**
  251. * gfs2_prepare_write - Prepare to write a page to a file
  252. * @file: The file to write to
  253. * @page: The page which is to be prepared for writing
  254. * @from: From (byte range within page)
  255. * @to: To (byte range within page)
  256. *
  257. * Returns: errno
  258. */
  259. static int gfs2_prepare_write(struct file *file, struct page *page,
  260. unsigned from, unsigned to)
  261. {
  262. struct gfs2_inode *ip = get_v2ip(page->mapping->host);
  263. struct gfs2_sbd *sdp = ip->i_sbd;
  264. unsigned int data_blocks, ind_blocks, rblocks;
  265. int alloc_required;
  266. int error = 0;
  267. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
  268. loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  269. struct gfs2_alloc *al;
  270. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
  271. error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
  272. if (error)
  273. goto out_uninit;
  274. gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
  275. error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
  276. if (error)
  277. goto out_unlock;
  278. if (alloc_required) {
  279. al = gfs2_alloc_get(ip);
  280. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  281. if (error)
  282. goto out_alloc_put;
  283. error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
  284. if (error)
  285. goto out_qunlock;
  286. al->al_requested = data_blocks + ind_blocks;
  287. error = gfs2_inplace_reserve(ip);
  288. if (error)
  289. goto out_qunlock;
  290. }
  291. rblocks = RES_DINODE + ind_blocks;
  292. if (gfs2_is_jdata(ip))
  293. rblocks += data_blocks ? data_blocks : 1;
  294. if (ind_blocks || data_blocks)
  295. rblocks += RES_STATFS + RES_QUOTA;
  296. error = gfs2_trans_begin(sdp, rblocks, 0);
  297. if (error)
  298. goto out;
  299. if (gfs2_is_stuffed(ip)) {
  300. if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
  301. error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page, page);
  302. if (error == 0)
  303. goto prepare_write;
  304. } else if (!PageUptodate(page))
  305. error = stuffed_readpage(ip, page);
  306. goto out;
  307. }
  308. prepare_write:
  309. error = block_prepare_write(page, from, to, gfs2_get_block);
  310. out:
  311. if (error) {
  312. gfs2_trans_end(sdp);
  313. if (alloc_required) {
  314. gfs2_inplace_release(ip);
  315. out_qunlock:
  316. gfs2_quota_unlock(ip);
  317. out_alloc_put:
  318. gfs2_alloc_put(ip);
  319. }
  320. out_unlock:
  321. gfs2_glock_dq_m(1, &ip->i_gh);
  322. out_uninit:
  323. gfs2_holder_uninit(&ip->i_gh);
  324. }
  325. return error;
  326. }
  327. /**
  328. * gfs2_commit_write - Commit write to a file
  329. * @file: The file to write to
  330. * @page: The page containing the data
  331. * @from: From (byte range within page)
  332. * @to: To (byte range within page)
  333. *
  334. * Returns: errno
  335. */
  336. static int gfs2_commit_write(struct file *file, struct page *page,
  337. unsigned from, unsigned to)
  338. {
  339. struct inode *inode = page->mapping->host;
  340. struct gfs2_inode *ip = get_v2ip(inode);
  341. struct gfs2_sbd *sdp = ip->i_sbd;
  342. int error = -EOPNOTSUPP;
  343. struct buffer_head *dibh;
  344. struct gfs2_alloc *al = &ip->i_alloc;;
  345. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
  346. goto fail_nounlock;
  347. error = gfs2_meta_inode_buffer(ip, &dibh);
  348. if (error)
  349. goto fail_endtrans;
  350. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  351. if (gfs2_is_stuffed(ip)) {
  352. uint64_t file_size;
  353. void *kaddr;
  354. file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
  355. kaddr = kmap_atomic(page, KM_USER0);
  356. memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
  357. (char *)kaddr + from, to - from);
  358. kunmap_atomic(page, KM_USER0);
  359. SetPageUptodate(page);
  360. if (inode->i_size < file_size)
  361. i_size_write(inode, file_size);
  362. } else {
  363. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
  364. gfs2_page_add_databufs(ip, page, from, to);
  365. error = generic_commit_write(file, page, from, to);
  366. if (error)
  367. goto fail;
  368. }
  369. if (ip->i_di.di_size < inode->i_size)
  370. ip->i_di.di_size = inode->i_size;
  371. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  372. brelse(dibh);
  373. gfs2_trans_end(sdp);
  374. if (al->al_requested) {
  375. gfs2_inplace_release(ip);
  376. gfs2_quota_unlock(ip);
  377. gfs2_alloc_put(ip);
  378. }
  379. gfs2_glock_dq_m(1, &ip->i_gh);
  380. gfs2_holder_uninit(&ip->i_gh);
  381. return 0;
  382. fail:
  383. brelse(dibh);
  384. fail_endtrans:
  385. gfs2_trans_end(sdp);
  386. if (al->al_requested) {
  387. gfs2_inplace_release(ip);
  388. gfs2_quota_unlock(ip);
  389. gfs2_alloc_put(ip);
  390. }
  391. gfs2_glock_dq_m(1, &ip->i_gh);
  392. gfs2_holder_uninit(&ip->i_gh);
  393. fail_nounlock:
  394. ClearPageUptodate(page);
  395. return error;
  396. }
  397. /**
  398. * gfs2_bmap - Block map function
  399. * @mapping: Address space info
  400. * @lblock: The block to map
  401. *
  402. * Returns: The disk address for the block or 0 on hole or error
  403. */
  404. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  405. {
  406. struct gfs2_inode *ip = get_v2ip(mapping->host);
  407. struct gfs2_holder i_gh;
  408. sector_t dblock = 0;
  409. int error;
  410. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  411. if (error)
  412. return 0;
  413. if (!gfs2_is_stuffed(ip))
  414. dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
  415. gfs2_glock_dq_uninit(&i_gh);
  416. return dblock;
  417. }
  418. static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
  419. {
  420. struct gfs2_bufdata *bd;
  421. gfs2_log_lock(sdp);
  422. bd = get_v2bd(bh);
  423. if (bd) {
  424. bd->bd_bh = NULL;
  425. set_v2bd(bh, NULL);
  426. gfs2_log_unlock(sdp);
  427. brelse(bh);
  428. } else
  429. gfs2_log_unlock(sdp);
  430. lock_buffer(bh);
  431. clear_buffer_dirty(bh);
  432. bh->b_bdev = NULL;
  433. clear_buffer_mapped(bh);
  434. clear_buffer_req(bh);
  435. clear_buffer_new(bh);
  436. clear_buffer_delay(bh);
  437. unlock_buffer(bh);
  438. }
  439. static int gfs2_invalidatepage(struct page *page, unsigned long offset)
  440. {
  441. struct gfs2_sbd *sdp = get_v2sdp(page->mapping->host->i_sb);
  442. struct buffer_head *head, *bh, *next;
  443. unsigned int curr_off = 0;
  444. int ret = 1;
  445. BUG_ON(!PageLocked(page));
  446. if (!page_has_buffers(page))
  447. return 1;
  448. bh = head = page_buffers(page);
  449. do {
  450. unsigned int next_off = curr_off + bh->b_size;
  451. next = bh->b_this_page;
  452. if (offset <= curr_off)
  453. discard_buffer(sdp, bh);
  454. curr_off = next_off;
  455. bh = next;
  456. } while (bh != head);
  457. if (!offset)
  458. ret = try_to_release_page(page, 0);
  459. return ret;
  460. }
  461. static ssize_t gfs2_direct_IO_write(struct kiocb *iocb, const struct iovec *iov,
  462. loff_t offset, unsigned long nr_segs)
  463. {
  464. struct file *file = iocb->ki_filp;
  465. struct inode *inode = file->f_mapping->host;
  466. struct gfs2_inode *ip = get_v2ip(inode);
  467. struct gfs2_holder gh;
  468. int rv;
  469. /*
  470. * Shared lock, even though its write, since we do no allocation
  471. * on this path. All we need change is atime.
  472. */
  473. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  474. rv = gfs2_glock_nq_m_atime(1, &gh);
  475. if (rv)
  476. goto out;
  477. /*
  478. * Should we return an error here? I can't see that O_DIRECT for
  479. * a journaled file makes any sense. For now we'll silently fall
  480. * back to buffered I/O, likewise we do the same for stuffed
  481. * files since they are (a) small and (b) unaligned.
  482. */
  483. if (gfs2_is_jdata(ip))
  484. goto out;
  485. if (gfs2_is_stuffed(ip))
  486. goto out;
  487. rv = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
  488. iov, offset, nr_segs, get_blocks_noalloc,
  489. NULL, DIO_OWN_LOCKING);
  490. out:
  491. gfs2_glock_dq_m(1, &gh);
  492. gfs2_holder_uninit(&gh);
  493. return rv;
  494. }
  495. /**
  496. * gfs2_direct_IO
  497. *
  498. * This is called with a shared lock already held for the read path.
  499. * Currently, no locks are held when the write path is called.
  500. */
  501. static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
  502. const struct iovec *iov, loff_t offset,
  503. unsigned long nr_segs)
  504. {
  505. struct file *file = iocb->ki_filp;
  506. struct inode *inode = file->f_mapping->host;
  507. struct gfs2_inode *ip = get_v2ip(inode);
  508. struct gfs2_sbd *sdp = ip->i_sbd;
  509. if (rw == WRITE)
  510. return gfs2_direct_IO_write(iocb, iov, offset, nr_segs);
  511. if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
  512. gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
  513. return -EINVAL;
  514. return __blockdev_direct_IO(READ, iocb, inode, inode->i_sb->s_bdev, iov,
  515. offset, nr_segs, get_blocks, NULL,
  516. DIO_OWN_LOCKING);
  517. }
  518. struct address_space_operations gfs2_file_aops = {
  519. .writepage = gfs2_writepage,
  520. .readpage = gfs2_readpage,
  521. .sync_page = block_sync_page,
  522. .prepare_write = gfs2_prepare_write,
  523. .commit_write = gfs2_commit_write,
  524. .bmap = gfs2_bmap,
  525. .invalidatepage = gfs2_invalidatepage,
  526. .direct_IO = gfs2_direct_IO,
  527. };