file.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/buffer_head.h>
  19. #include <linux/fs.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/highmem.h>
  22. #include <linux/time.h>
  23. #include <linux/init.h>
  24. #include <linux/string.h>
  25. #include <linux/smp_lock.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/mpage.h>
  28. #include <linux/swap.h>
  29. #include <linux/writeback.h>
  30. #include <linux/statfs.h>
  31. #include <linux/compat.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
  39. struct page **prepared_pages,
  40. const char __user * buf)
  41. {
  42. long page_fault = 0;
  43. int i;
  44. int offset = pos & (PAGE_CACHE_SIZE - 1);
  45. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  46. size_t count = min_t(size_t,
  47. PAGE_CACHE_SIZE - offset, write_bytes);
  48. struct page *page = prepared_pages[i];
  49. fault_in_pages_readable(buf, count);
  50. /* Copy data from userspace to the current page */
  51. kmap(page);
  52. page_fault = __copy_from_user(page_address(page) + offset,
  53. buf, count);
  54. /* Flush processor's dcache for this page */
  55. flush_dcache_page(page);
  56. kunmap(page);
  57. buf += count;
  58. write_bytes -= count;
  59. if (page_fault)
  60. break;
  61. }
  62. return page_fault ? -EFAULT : 0;
  63. }
  64. static void btrfs_drop_pages(struct page **pages, size_t num_pages)
  65. {
  66. size_t i;
  67. for (i = 0; i < num_pages; i++) {
  68. if (!pages[i])
  69. break;
  70. unlock_page(pages[i]);
  71. mark_page_accessed(pages[i]);
  72. page_cache_release(pages[i]);
  73. }
  74. }
  75. static int insert_inline_extent(struct btrfs_root *root, struct inode *inode,
  76. u64 offset, ssize_t size,
  77. struct buffer_head *bh)
  78. {
  79. struct btrfs_key key;
  80. struct btrfs_path *path;
  81. char *ptr, *kaddr;
  82. struct btrfs_trans_handle *trans;
  83. struct btrfs_file_extent_item *ei;
  84. u32 datasize;
  85. int err = 0;
  86. int ret;
  87. path = btrfs_alloc_path();
  88. if (!path)
  89. return -ENOMEM;
  90. mutex_lock(&root->fs_info->fs_mutex);
  91. trans = btrfs_start_transaction(root, 1);
  92. btrfs_set_trans_block_group(trans, inode);
  93. key.objectid = inode->i_ino;
  94. key.offset = offset;
  95. key.flags = 0;
  96. btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
  97. BUG_ON(size >= PAGE_CACHE_SIZE);
  98. datasize = btrfs_file_extent_calc_inline_size(size);
  99. ret = btrfs_insert_empty_item(trans, root, path, &key,
  100. datasize);
  101. if (ret) {
  102. err = ret;
  103. goto fail;
  104. }
  105. ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
  106. path->slots[0], struct btrfs_file_extent_item);
  107. btrfs_set_file_extent_generation(ei, trans->transid);
  108. btrfs_set_file_extent_type(ei,
  109. BTRFS_FILE_EXTENT_INLINE);
  110. ptr = btrfs_file_extent_inline_start(ei);
  111. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  112. btrfs_memcpy(root, path->nodes[0]->b_data,
  113. ptr, kaddr + bh_offset(bh),
  114. size);
  115. kunmap_atomic(kaddr, KM_USER0);
  116. btrfs_mark_buffer_dirty(path->nodes[0]);
  117. fail:
  118. btrfs_free_path(path);
  119. ret = btrfs_end_transaction(trans, root);
  120. if (ret && !err)
  121. err = ret;
  122. mutex_unlock(&root->fs_info->fs_mutex);
  123. return err;
  124. }
  125. static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
  126. struct btrfs_root *root,
  127. struct file *file,
  128. struct page **pages,
  129. size_t num_pages,
  130. loff_t pos,
  131. size_t write_bytes)
  132. {
  133. int i;
  134. int offset;
  135. int err = 0;
  136. int ret;
  137. int this_write;
  138. struct inode *inode = file->f_path.dentry->d_inode;
  139. struct buffer_head *bh;
  140. for (i = 0; i < num_pages; i++) {
  141. offset = pos & (PAGE_CACHE_SIZE -1);
  142. this_write = min((size_t)PAGE_CACHE_SIZE - offset, write_bytes);
  143. /* FIXME, one block at a time */
  144. bh = page_buffers(pages[i]);
  145. if (buffer_mapped(bh) && bh->b_blocknr == 0) {
  146. ret = insert_inline_extent(root, inode,
  147. pages[i]->index << PAGE_CACHE_SHIFT,
  148. offset + this_write, bh);
  149. if (ret) {
  150. err = ret;
  151. goto failed;
  152. }
  153. }
  154. ret = btrfs_commit_write(file, pages[i], offset,
  155. offset + this_write);
  156. pos += this_write;
  157. if (ret) {
  158. err = ret;
  159. goto failed;
  160. }
  161. WARN_ON(this_write > write_bytes);
  162. write_bytes -= this_write;
  163. }
  164. failed:
  165. return err;
  166. }
  167. /*
  168. * this is very complex, but the basic idea is to drop all extents
  169. * in the range start - end. hint_block is filled in with a block number
  170. * that would be a good hint to the block allocator for this file.
  171. *
  172. * If an extent intersects the range but is not entirely inside the range
  173. * it is either truncated or split. Anything entirely inside the range
  174. * is deleted from the tree.
  175. */
  176. int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  177. struct btrfs_root *root, struct inode *inode,
  178. u64 start, u64 end, u64 *hint_block)
  179. {
  180. int ret;
  181. struct btrfs_key key;
  182. struct btrfs_leaf *leaf;
  183. int slot;
  184. struct btrfs_file_extent_item *extent;
  185. u64 extent_end = 0;
  186. int keep;
  187. struct btrfs_file_extent_item old;
  188. struct btrfs_path *path;
  189. u64 search_start = start;
  190. int bookend;
  191. int found_type;
  192. int found_extent;
  193. int found_inline;
  194. int recow;
  195. path = btrfs_alloc_path();
  196. if (!path)
  197. return -ENOMEM;
  198. while(1) {
  199. recow = 0;
  200. btrfs_release_path(root, path);
  201. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  202. search_start, -1);
  203. if (ret < 0)
  204. goto out;
  205. if (ret > 0) {
  206. if (path->slots[0] == 0) {
  207. ret = 0;
  208. goto out;
  209. }
  210. path->slots[0]--;
  211. }
  212. next_slot:
  213. keep = 0;
  214. bookend = 0;
  215. found_extent = 0;
  216. found_inline = 0;
  217. extent = NULL;
  218. leaf = btrfs_buffer_leaf(path->nodes[0]);
  219. slot = path->slots[0];
  220. ret = 0;
  221. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  222. if (key.offset >= end || key.objectid != inode->i_ino) {
  223. goto out;
  224. }
  225. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
  226. goto out;
  227. }
  228. if (recow) {
  229. search_start = key.offset;
  230. continue;
  231. }
  232. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  233. extent = btrfs_item_ptr(leaf, slot,
  234. struct btrfs_file_extent_item);
  235. found_type = btrfs_file_extent_type(extent);
  236. if (found_type == BTRFS_FILE_EXTENT_REG) {
  237. extent_end = key.offset +
  238. (btrfs_file_extent_num_blocks(extent) <<
  239. inode->i_blkbits);
  240. found_extent = 1;
  241. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  242. found_inline = 1;
  243. extent_end = key.offset +
  244. btrfs_file_extent_inline_len(leaf->items +
  245. slot);
  246. }
  247. } else {
  248. extent_end = search_start;
  249. }
  250. /* we found nothing we can drop */
  251. if ((!found_extent && !found_inline) ||
  252. search_start >= extent_end) {
  253. int nextret;
  254. u32 nritems;
  255. nritems = btrfs_header_nritems(
  256. btrfs_buffer_header(path->nodes[0]));
  257. if (slot >= nritems - 1) {
  258. nextret = btrfs_next_leaf(root, path);
  259. if (nextret)
  260. goto out;
  261. recow = 1;
  262. } else {
  263. path->slots[0]++;
  264. }
  265. goto next_slot;
  266. }
  267. /* FIXME, there's only one inline extent allowed right now */
  268. if (found_inline) {
  269. u64 mask = root->blocksize - 1;
  270. search_start = (extent_end + mask) & ~mask;
  271. } else
  272. search_start = extent_end;
  273. if (end < extent_end && end >= key.offset) {
  274. if (found_extent) {
  275. u64 disk_blocknr =
  276. btrfs_file_extent_disk_blocknr(extent);
  277. u64 disk_num_blocks =
  278. btrfs_file_extent_disk_num_blocks(extent);
  279. memcpy(&old, extent, sizeof(old));
  280. if (disk_blocknr != 0) {
  281. ret = btrfs_inc_extent_ref(trans, root,
  282. disk_blocknr, disk_num_blocks);
  283. BUG_ON(ret);
  284. }
  285. }
  286. WARN_ON(found_inline);
  287. bookend = 1;
  288. }
  289. /* truncate existing extent */
  290. if (start > key.offset) {
  291. u64 new_num;
  292. u64 old_num;
  293. keep = 1;
  294. WARN_ON(start & (root->blocksize - 1));
  295. if (found_extent) {
  296. new_num = (start - key.offset) >>
  297. inode->i_blkbits;
  298. old_num = btrfs_file_extent_num_blocks(extent);
  299. *hint_block =
  300. btrfs_file_extent_disk_blocknr(extent);
  301. if (btrfs_file_extent_disk_blocknr(extent)) {
  302. inode->i_blocks -=
  303. (old_num - new_num) << 3;
  304. }
  305. btrfs_set_file_extent_num_blocks(extent,
  306. new_num);
  307. btrfs_mark_buffer_dirty(path->nodes[0]);
  308. } else {
  309. WARN_ON(1);
  310. }
  311. }
  312. /* delete the entire extent */
  313. if (!keep) {
  314. u64 disk_blocknr = 0;
  315. u64 disk_num_blocks = 0;
  316. u64 extent_num_blocks = 0;
  317. if (found_extent) {
  318. disk_blocknr =
  319. btrfs_file_extent_disk_blocknr(extent);
  320. disk_num_blocks =
  321. btrfs_file_extent_disk_num_blocks(extent);
  322. extent_num_blocks =
  323. btrfs_file_extent_num_blocks(extent);
  324. *hint_block =
  325. btrfs_file_extent_disk_blocknr(extent);
  326. }
  327. ret = btrfs_del_item(trans, root, path);
  328. /* TODO update progress marker and return */
  329. BUG_ON(ret);
  330. btrfs_release_path(root, path);
  331. extent = NULL;
  332. if (found_extent && disk_blocknr != 0) {
  333. inode->i_blocks -= extent_num_blocks << 3;
  334. ret = btrfs_free_extent(trans, root,
  335. disk_blocknr,
  336. disk_num_blocks, 0);
  337. }
  338. BUG_ON(ret);
  339. if (!bookend && search_start >= end) {
  340. ret = 0;
  341. goto out;
  342. }
  343. if (!bookend)
  344. continue;
  345. }
  346. /* create bookend, splitting the extent in two */
  347. if (bookend && found_extent) {
  348. struct btrfs_key ins;
  349. ins.objectid = inode->i_ino;
  350. ins.offset = end;
  351. ins.flags = 0;
  352. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  353. btrfs_release_path(root, path);
  354. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  355. sizeof(*extent));
  356. if (ret) {
  357. btrfs_print_leaf(root, btrfs_buffer_leaf(path->nodes[0]));
  358. printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu\n", ret , ins.objectid, ins.flags, ins.offset, start, end, key.offset, extent_end);
  359. }
  360. BUG_ON(ret);
  361. extent = btrfs_item_ptr(
  362. btrfs_buffer_leaf(path->nodes[0]),
  363. path->slots[0],
  364. struct btrfs_file_extent_item);
  365. btrfs_set_file_extent_disk_blocknr(extent,
  366. btrfs_file_extent_disk_blocknr(&old));
  367. btrfs_set_file_extent_disk_num_blocks(extent,
  368. btrfs_file_extent_disk_num_blocks(&old));
  369. btrfs_set_file_extent_offset(extent,
  370. btrfs_file_extent_offset(&old) +
  371. ((end - key.offset) >> inode->i_blkbits));
  372. WARN_ON(btrfs_file_extent_num_blocks(&old) <
  373. (extent_end - end) >> inode->i_blkbits);
  374. btrfs_set_file_extent_num_blocks(extent,
  375. (extent_end - end) >> inode->i_blkbits);
  376. btrfs_set_file_extent_type(extent,
  377. BTRFS_FILE_EXTENT_REG);
  378. btrfs_set_file_extent_generation(extent,
  379. btrfs_file_extent_generation(&old));
  380. btrfs_mark_buffer_dirty(path->nodes[0]);
  381. if (btrfs_file_extent_disk_blocknr(&old) != 0) {
  382. inode->i_blocks +=
  383. btrfs_file_extent_num_blocks(extent) << 3;
  384. }
  385. ret = 0;
  386. goto out;
  387. }
  388. }
  389. out:
  390. btrfs_free_path(path);
  391. return ret;
  392. }
  393. /*
  394. * this gets pages into the page cache and locks them down
  395. */
  396. static int prepare_pages(struct btrfs_root *root,
  397. struct file *file,
  398. struct page **pages,
  399. size_t num_pages,
  400. loff_t pos,
  401. unsigned long first_index,
  402. unsigned long last_index,
  403. size_t write_bytes)
  404. {
  405. int i;
  406. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  407. struct inode *inode = file->f_path.dentry->d_inode;
  408. int offset;
  409. int err = 0;
  410. int this_write;
  411. struct buffer_head *bh;
  412. struct buffer_head *head;
  413. loff_t isize = i_size_read(inode);
  414. struct btrfs_trans_handle *trans;
  415. u64 hint_block;
  416. u64 num_blocks;
  417. u64 alloc_extent_start;
  418. u64 start_pos;
  419. struct btrfs_key ins;
  420. start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
  421. num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >>
  422. inode->i_blkbits;
  423. memset(pages, 0, num_pages * sizeof(struct page *));
  424. for (i = 0; i < num_pages; i++) {
  425. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  426. if (!pages[i]) {
  427. err = -ENOMEM;
  428. goto failed_release;
  429. }
  430. cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
  431. wait_on_page_writeback(pages[i]);
  432. }
  433. mutex_lock(&root->fs_info->fs_mutex);
  434. trans = btrfs_start_transaction(root, 1);
  435. if (!trans) {
  436. err = -ENOMEM;
  437. mutex_unlock(&root->fs_info->fs_mutex);
  438. goto out_unlock;
  439. }
  440. btrfs_set_trans_block_group(trans, inode);
  441. /* FIXME blocksize != 4096 */
  442. inode->i_blocks += num_blocks << 3;
  443. hint_block = 0;
  444. /* FIXME...EIEIO, ENOSPC and more */
  445. /* step one, delete the existing extents in this range */
  446. /* FIXME blocksize != pagesize */
  447. if (start_pos < inode->i_size) {
  448. err = btrfs_drop_extents(trans, root, inode,
  449. start_pos, (pos + write_bytes + root->blocksize -1) &
  450. ~((u64)root->blocksize - 1), &hint_block);
  451. if (err)
  452. goto failed_release;
  453. }
  454. /* insert any holes we need to create */
  455. if (inode->i_size < start_pos) {
  456. u64 last_pos_in_file;
  457. u64 hole_size;
  458. u64 mask = root->blocksize - 1;
  459. last_pos_in_file = (isize + mask) & ~mask;
  460. hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
  461. hole_size >>= inode->i_blkbits;
  462. if (last_pos_in_file < start_pos) {
  463. err = btrfs_insert_file_extent(trans, root,
  464. inode->i_ino,
  465. last_pos_in_file,
  466. 0, 0, hole_size);
  467. }
  468. if (err)
  469. goto failed_release;
  470. }
  471. /*
  472. * either allocate an extent for the new bytes or setup the key
  473. * to show we are doing inline data in the extent
  474. */
  475. if (isize >= PAGE_CACHE_SIZE || pos + write_bytes < inode->i_size ||
  476. pos + write_bytes - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
  477. err = btrfs_alloc_extent(trans, root, inode->i_ino,
  478. num_blocks, hint_block, (u64)-1,
  479. &ins, 1);
  480. if (err)
  481. goto failed_truncate;
  482. err = btrfs_insert_file_extent(trans, root, inode->i_ino,
  483. start_pos, ins.objectid, ins.offset,
  484. ins.offset);
  485. if (err)
  486. goto failed_truncate;
  487. } else {
  488. ins.offset = 0;
  489. ins.objectid = 0;
  490. }
  491. BUG_ON(err);
  492. alloc_extent_start = ins.objectid;
  493. err = btrfs_end_transaction(trans, root);
  494. mutex_unlock(&root->fs_info->fs_mutex);
  495. for (i = 0; i < num_pages; i++) {
  496. offset = pos & (PAGE_CACHE_SIZE -1);
  497. this_write = min((size_t)PAGE_CACHE_SIZE - offset, write_bytes);
  498. if (!page_has_buffers(pages[i])) {
  499. create_empty_buffers(pages[i],
  500. root->fs_info->sb->s_blocksize,
  501. (1 << BH_Uptodate));
  502. }
  503. head = page_buffers(pages[i]);
  504. bh = head;
  505. do {
  506. err = btrfs_map_bh_to_logical(root, bh,
  507. alloc_extent_start);
  508. BUG_ON(err);
  509. if (err)
  510. goto failed_truncate;
  511. bh = bh->b_this_page;
  512. if (alloc_extent_start)
  513. alloc_extent_start++;
  514. } while (bh != head);
  515. pos += this_write;
  516. WARN_ON(this_write > write_bytes);
  517. write_bytes -= this_write;
  518. }
  519. return 0;
  520. failed_release:
  521. btrfs_drop_pages(pages, num_pages);
  522. return err;
  523. failed_truncate:
  524. btrfs_drop_pages(pages, num_pages);
  525. if (pos > isize)
  526. vmtruncate(inode, isize);
  527. return err;
  528. out_unlock:
  529. mutex_unlock(&root->fs_info->fs_mutex);
  530. goto failed_release;
  531. }
  532. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  533. size_t count, loff_t *ppos)
  534. {
  535. loff_t pos;
  536. size_t num_written = 0;
  537. int err = 0;
  538. int ret = 0;
  539. struct inode *inode = file->f_path.dentry->d_inode;
  540. struct btrfs_root *root = BTRFS_I(inode)->root;
  541. struct page **pages = NULL;
  542. int nrptrs;
  543. struct page *pinned[2];
  544. unsigned long first_index;
  545. unsigned long last_index;
  546. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  547. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  548. pinned[0] = NULL;
  549. pinned[1] = NULL;
  550. if (file->f_flags & O_DIRECT)
  551. return -EINVAL;
  552. pos = *ppos;
  553. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  554. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  555. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  556. if (err)
  557. goto out;
  558. if (count == 0)
  559. goto out;
  560. err = remove_suid(file->f_path.dentry);
  561. if (err)
  562. goto out;
  563. file_update_time(file);
  564. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  565. mutex_lock(&inode->i_mutex);
  566. first_index = pos >> PAGE_CACHE_SHIFT;
  567. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  568. /*
  569. * there are lots of better ways to do this, but this code
  570. * makes sure the first and last page in the file range are
  571. * up to date and ready for cow
  572. */
  573. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  574. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  575. if (!PageUptodate(pinned[0])) {
  576. ret = btrfs_readpage(NULL, pinned[0]);
  577. BUG_ON(ret);
  578. wait_on_page_locked(pinned[0]);
  579. } else {
  580. unlock_page(pinned[0]);
  581. }
  582. }
  583. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  584. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  585. if (!PageUptodate(pinned[1])) {
  586. ret = btrfs_readpage(NULL, pinned[1]);
  587. BUG_ON(ret);
  588. wait_on_page_locked(pinned[1]);
  589. } else {
  590. unlock_page(pinned[1]);
  591. }
  592. }
  593. while(count > 0) {
  594. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  595. size_t write_bytes = min(count, nrptrs *
  596. (size_t)PAGE_CACHE_SIZE -
  597. offset);
  598. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  599. PAGE_CACHE_SHIFT;
  600. WARN_ON(num_pages > nrptrs);
  601. memset(pages, 0, sizeof(pages));
  602. ret = prepare_pages(root, file, pages, num_pages,
  603. pos, first_index, last_index,
  604. write_bytes);
  605. if (ret)
  606. goto out;
  607. ret = btrfs_copy_from_user(pos, num_pages,
  608. write_bytes, pages, buf);
  609. if (ret) {
  610. btrfs_drop_pages(pages, num_pages);
  611. goto out;
  612. }
  613. ret = dirty_and_release_pages(NULL, root, file, pages,
  614. num_pages, pos, write_bytes);
  615. btrfs_drop_pages(pages, num_pages);
  616. if (ret)
  617. goto out;
  618. buf += write_bytes;
  619. count -= write_bytes;
  620. pos += write_bytes;
  621. num_written += write_bytes;
  622. balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
  623. btrfs_btree_balance_dirty(root);
  624. cond_resched();
  625. }
  626. mutex_unlock(&inode->i_mutex);
  627. out:
  628. kfree(pages);
  629. if (pinned[0])
  630. page_cache_release(pinned[0]);
  631. if (pinned[1])
  632. page_cache_release(pinned[1]);
  633. *ppos = pos;
  634. current->backing_dev_info = NULL;
  635. mark_inode_dirty(inode);
  636. return num_written ? num_written : err;
  637. }
  638. static int btrfs_sync_file(struct file *file,
  639. struct dentry *dentry, int datasync)
  640. {
  641. struct inode *inode = dentry->d_inode;
  642. struct btrfs_root *root = BTRFS_I(inode)->root;
  643. int ret;
  644. struct btrfs_trans_handle *trans;
  645. /*
  646. * FIXME, use inode generation number to check if we can skip the
  647. * commit
  648. */
  649. mutex_lock(&root->fs_info->fs_mutex);
  650. trans = btrfs_start_transaction(root, 1);
  651. if (!trans) {
  652. ret = -ENOMEM;
  653. goto out;
  654. }
  655. ret = btrfs_commit_transaction(trans, root);
  656. mutex_unlock(&root->fs_info->fs_mutex);
  657. out:
  658. return ret > 0 ? EIO : ret;
  659. }
  660. static struct vm_operations_struct btrfs_file_vm_ops = {
  661. .nopage = filemap_nopage,
  662. .populate = filemap_populate,
  663. .page_mkwrite = btrfs_page_mkwrite,
  664. };
  665. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  666. {
  667. vma->vm_ops = &btrfs_file_vm_ops;
  668. file_accessed(filp);
  669. return 0;
  670. }
  671. struct file_operations btrfs_file_operations = {
  672. .llseek = generic_file_llseek,
  673. .read = do_sync_read,
  674. .aio_read = generic_file_aio_read,
  675. .write = btrfs_file_write,
  676. .mmap = btrfs_file_mmap,
  677. .open = generic_file_open,
  678. .ioctl = btrfs_ioctl,
  679. .fsync = btrfs_sync_file,
  680. #ifdef CONFIG_COMPAT
  681. .compat_ioctl = btrfs_compat_ioctl,
  682. #endif
  683. };