file.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/version.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
  39. struct page **prepared_pages,
  40. const char __user * buf)
  41. {
  42. long page_fault = 0;
  43. int i;
  44. int offset = pos & (PAGE_CACHE_SIZE - 1);
  45. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  46. size_t count = min_t(size_t,
  47. PAGE_CACHE_SIZE - offset, write_bytes);
  48. struct page *page = prepared_pages[i];
  49. fault_in_pages_readable(buf, count);
  50. /* Copy data from userspace to the current page */
  51. kmap(page);
  52. page_fault = __copy_from_user(page_address(page) + offset,
  53. buf, count);
  54. /* Flush processor's dcache for this page */
  55. flush_dcache_page(page);
  56. kunmap(page);
  57. buf += count;
  58. write_bytes -= count;
  59. if (page_fault)
  60. break;
  61. }
  62. return page_fault ? -EFAULT : 0;
  63. }
  64. static void btrfs_drop_pages(struct page **pages, size_t num_pages)
  65. {
  66. size_t i;
  67. for (i = 0; i < num_pages; i++) {
  68. if (!pages[i])
  69. break;
  70. unlock_page(pages[i]);
  71. mark_page_accessed(pages[i]);
  72. page_cache_release(pages[i]);
  73. }
  74. }
  75. static int insert_inline_extent(struct btrfs_trans_handle *trans,
  76. struct btrfs_root *root, struct inode *inode,
  77. u64 offset, size_t size,
  78. struct page **pages, size_t page_offset,
  79. int num_pages)
  80. {
  81. struct btrfs_key key;
  82. struct btrfs_path *path;
  83. struct extent_buffer *leaf;
  84. char *kaddr;
  85. unsigned long ptr;
  86. struct btrfs_file_extent_item *ei;
  87. struct page *page;
  88. u32 datasize;
  89. int err = 0;
  90. int ret;
  91. int i;
  92. ssize_t cur_size;
  93. path = btrfs_alloc_path();
  94. if (!path)
  95. return -ENOMEM;
  96. btrfs_set_trans_block_group(trans, inode);
  97. key.objectid = inode->i_ino;
  98. key.offset = offset;
  99. btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
  100. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  101. if (ret < 0) {
  102. err = ret;
  103. goto fail;
  104. }
  105. if (ret == 1) {
  106. struct btrfs_key found_key;
  107. if (path->slots[0] == 0)
  108. goto insert;
  109. path->slots[0]--;
  110. leaf = path->nodes[0];
  111. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  112. if (found_key.objectid != inode->i_ino)
  113. goto insert;
  114. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  115. goto insert;
  116. ei = btrfs_item_ptr(leaf, path->slots[0],
  117. struct btrfs_file_extent_item);
  118. if (btrfs_file_extent_type(leaf, ei) !=
  119. BTRFS_FILE_EXTENT_INLINE) {
  120. goto insert;
  121. }
  122. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  123. ret = 0;
  124. }
  125. if (ret == 0) {
  126. u32 found_size;
  127. u64 found_end;
  128. leaf = path->nodes[0];
  129. ei = btrfs_item_ptr(leaf, path->slots[0],
  130. struct btrfs_file_extent_item);
  131. if (btrfs_file_extent_type(leaf, ei) !=
  132. BTRFS_FILE_EXTENT_INLINE) {
  133. err = ret;
  134. btrfs_print_leaf(root, leaf);
  135. printk("found wasn't inline offset %Lu inode %lu\n",
  136. offset, inode->i_ino);
  137. goto fail;
  138. }
  139. found_size = btrfs_file_extent_inline_len(leaf,
  140. btrfs_item_nr(leaf, path->slots[0]));
  141. found_end = key.offset + found_size;
  142. if (found_end < offset + size) {
  143. btrfs_release_path(root, path);
  144. ret = btrfs_search_slot(trans, root, &key, path,
  145. offset + size - found_end, 1);
  146. BUG_ON(ret != 0);
  147. ret = btrfs_extend_item(trans, root, path,
  148. offset + size - found_end);
  149. if (ret) {
  150. err = ret;
  151. goto fail;
  152. }
  153. leaf = path->nodes[0];
  154. ei = btrfs_item_ptr(leaf, path->slots[0],
  155. struct btrfs_file_extent_item);
  156. }
  157. if (found_end < offset) {
  158. ptr = btrfs_file_extent_inline_start(ei) + found_size;
  159. memset_extent_buffer(leaf, 0, ptr, offset - found_end);
  160. }
  161. } else {
  162. insert:
  163. btrfs_release_path(root, path);
  164. datasize = offset + size - key.offset;
  165. datasize = btrfs_file_extent_calc_inline_size(datasize);
  166. ret = btrfs_insert_empty_item(trans, root, path, &key,
  167. datasize);
  168. if (ret) {
  169. err = ret;
  170. printk("got bad ret %d\n", ret);
  171. goto fail;
  172. }
  173. leaf = path->nodes[0];
  174. ei = btrfs_item_ptr(leaf, path->slots[0],
  175. struct btrfs_file_extent_item);
  176. btrfs_set_file_extent_generation(leaf, ei, trans->transid);
  177. btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
  178. }
  179. ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
  180. cur_size = size;
  181. i = 0;
  182. while (size > 0) {
  183. page = pages[i];
  184. kaddr = kmap_atomic(page, KM_USER0);
  185. cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
  186. write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
  187. kunmap_atomic(kaddr, KM_USER0);
  188. page_offset = 0;
  189. ptr += cur_size;
  190. size -= cur_size;
  191. if (i >= num_pages) {
  192. printk("i %d num_pages %d\n", i, num_pages);
  193. }
  194. i++;
  195. }
  196. btrfs_mark_buffer_dirty(leaf);
  197. fail:
  198. btrfs_free_path(path);
  199. return err;
  200. }
  201. static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
  202. struct btrfs_root *root,
  203. struct file *file,
  204. struct page **pages,
  205. size_t num_pages,
  206. loff_t pos,
  207. size_t write_bytes)
  208. {
  209. int err = 0;
  210. int i;
  211. struct inode *inode = file->f_path.dentry->d_inode;
  212. struct extent_map *em;
  213. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  214. u64 hint_byte;
  215. u64 num_bytes;
  216. u64 start_pos;
  217. u64 end_of_last_block;
  218. u64 end_pos = pos + write_bytes;
  219. u64 inline_size;
  220. loff_t isize = i_size_read(inode);
  221. em = alloc_extent_map(GFP_NOFS);
  222. if (!em)
  223. return -ENOMEM;
  224. em->bdev = inode->i_sb->s_bdev;
  225. start_pos = pos & ~((u64)root->sectorsize - 1);
  226. num_bytes = (write_bytes + pos - start_pos +
  227. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  228. down_read(&BTRFS_I(inode)->root->snap_sem);
  229. end_of_last_block = start_pos + num_bytes - 1;
  230. lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  231. mutex_lock(&root->fs_info->fs_mutex);
  232. trans = btrfs_start_transaction(root, 1);
  233. if (!trans) {
  234. err = -ENOMEM;
  235. goto out_unlock;
  236. }
  237. btrfs_set_trans_block_group(trans, inode);
  238. inode->i_blocks += num_bytes >> 9;
  239. hint_byte = 0;
  240. if ((end_of_last_block & 4095) == 0) {
  241. printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
  242. }
  243. set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  244. /* FIXME...EIEIO, ENOSPC and more */
  245. /* insert any holes we need to create */
  246. if (inode->i_size < start_pos) {
  247. u64 last_pos_in_file;
  248. u64 hole_size;
  249. u64 mask = root->sectorsize - 1;
  250. last_pos_in_file = (isize + mask) & ~mask;
  251. hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
  252. if (last_pos_in_file < start_pos) {
  253. err = btrfs_drop_extents(trans, root, inode,
  254. last_pos_in_file,
  255. last_pos_in_file + hole_size,
  256. last_pos_in_file,
  257. &hint_byte);
  258. if (err)
  259. goto failed;
  260. err = btrfs_insert_file_extent(trans, root,
  261. inode->i_ino,
  262. last_pos_in_file,
  263. 0, 0, hole_size);
  264. }
  265. if (err)
  266. goto failed;
  267. }
  268. /*
  269. * either allocate an extent for the new bytes or setup the key
  270. * to show we are doing inline data in the extent
  271. */
  272. inline_size = end_pos;
  273. if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
  274. inline_size > 32768 ||
  275. inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
  276. u64 last_end;
  277. for (i = 0; i < num_pages; i++) {
  278. struct page *p = pages[i];
  279. SetPageUptodate(p);
  280. set_page_dirty(p);
  281. }
  282. last_end = (u64)(pages[num_pages -1]->index) <<
  283. PAGE_CACHE_SHIFT;
  284. last_end += PAGE_CACHE_SIZE - 1;
  285. set_extent_delalloc(em_tree, start_pos, end_of_last_block,
  286. GFP_NOFS);
  287. } else {
  288. u64 aligned_end;
  289. /* step one, delete the existing extents in this range */
  290. aligned_end = (pos + write_bytes + root->sectorsize - 1) &
  291. ~((u64)root->sectorsize - 1);
  292. err = btrfs_drop_extents(trans, root, inode, start_pos,
  293. aligned_end, aligned_end, &hint_byte);
  294. if (err)
  295. goto failed;
  296. if (isize > inline_size)
  297. inline_size = min_t(u64, isize, aligned_end);
  298. inline_size -= start_pos;
  299. err = insert_inline_extent(trans, root, inode, start_pos,
  300. inline_size, pages, 0, num_pages);
  301. BUG_ON(err);
  302. }
  303. if (end_pos > isize) {
  304. i_size_write(inode, end_pos);
  305. btrfs_update_inode(trans, root, inode);
  306. }
  307. failed:
  308. err = btrfs_end_transaction(trans, root);
  309. out_unlock:
  310. mutex_unlock(&root->fs_info->fs_mutex);
  311. unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  312. free_extent_map(em);
  313. up_read(&BTRFS_I(inode)->root->snap_sem);
  314. return err;
  315. }
  316. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
  317. {
  318. struct extent_map *em;
  319. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  320. while(1) {
  321. em = lookup_extent_mapping(em_tree, start, end);
  322. if (!em)
  323. break;
  324. remove_extent_mapping(em_tree, em);
  325. /* once for us */
  326. free_extent_map(em);
  327. /* once for the tree*/
  328. free_extent_map(em);
  329. }
  330. return 0;
  331. }
  332. /*
  333. * this is very complex, but the basic idea is to drop all extents
  334. * in the range start - end. hint_block is filled in with a block number
  335. * that would be a good hint to the block allocator for this file.
  336. *
  337. * If an extent intersects the range but is not entirely inside the range
  338. * it is either truncated or split. Anything entirely inside the range
  339. * is deleted from the tree.
  340. */
  341. int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  342. struct btrfs_root *root, struct inode *inode,
  343. u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
  344. {
  345. u64 extent_end = 0;
  346. u64 search_start = start;
  347. struct extent_buffer *leaf;
  348. struct btrfs_file_extent_item *extent;
  349. struct btrfs_path *path;
  350. struct btrfs_key key;
  351. struct btrfs_file_extent_item old;
  352. int keep;
  353. int slot;
  354. int bookend;
  355. int found_type;
  356. int found_extent;
  357. int found_inline;
  358. int recow;
  359. int ret;
  360. btrfs_drop_extent_cache(inode, start, end - 1);
  361. path = btrfs_alloc_path();
  362. if (!path)
  363. return -ENOMEM;
  364. while(1) {
  365. recow = 0;
  366. btrfs_release_path(root, path);
  367. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  368. search_start, -1);
  369. if (ret < 0)
  370. goto out;
  371. if (ret > 0) {
  372. if (path->slots[0] == 0) {
  373. ret = 0;
  374. goto out;
  375. }
  376. path->slots[0]--;
  377. }
  378. next_slot:
  379. keep = 0;
  380. bookend = 0;
  381. found_extent = 0;
  382. found_inline = 0;
  383. extent = NULL;
  384. leaf = path->nodes[0];
  385. slot = path->slots[0];
  386. ret = 0;
  387. btrfs_item_key_to_cpu(leaf, &key, slot);
  388. if (key.offset >= end || key.objectid != inode->i_ino) {
  389. goto out;
  390. }
  391. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
  392. goto out;
  393. }
  394. if (recow) {
  395. search_start = key.offset;
  396. continue;
  397. }
  398. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  399. extent = btrfs_item_ptr(leaf, slot,
  400. struct btrfs_file_extent_item);
  401. found_type = btrfs_file_extent_type(leaf, extent);
  402. if (found_type == BTRFS_FILE_EXTENT_REG) {
  403. extent_end =
  404. btrfs_file_extent_disk_bytenr(leaf,
  405. extent);
  406. if (extent_end)
  407. *hint_byte = extent_end;
  408. extent_end = key.offset +
  409. btrfs_file_extent_num_bytes(leaf, extent);
  410. found_extent = 1;
  411. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  412. struct btrfs_item *item;
  413. item = btrfs_item_nr(leaf, slot);
  414. found_inline = 1;
  415. extent_end = key.offset +
  416. btrfs_file_extent_inline_len(leaf, item);
  417. }
  418. } else {
  419. extent_end = search_start;
  420. }
  421. /* we found nothing we can drop */
  422. if ((!found_extent && !found_inline) ||
  423. search_start >= extent_end) {
  424. int nextret;
  425. u32 nritems;
  426. nritems = btrfs_header_nritems(leaf);
  427. if (slot >= nritems - 1) {
  428. nextret = btrfs_next_leaf(root, path);
  429. if (nextret)
  430. goto out;
  431. recow = 1;
  432. } else {
  433. path->slots[0]++;
  434. }
  435. goto next_slot;
  436. }
  437. if (found_inline) {
  438. u64 mask = root->sectorsize - 1;
  439. search_start = (extent_end + mask) & ~mask;
  440. } else
  441. search_start = extent_end;
  442. if (end < extent_end && start > key.offset && found_inline) {
  443. *hint_byte = EXTENT_MAP_INLINE;
  444. }
  445. if (end < extent_end && end >= key.offset) {
  446. if (found_extent) {
  447. u64 disk_bytenr =
  448. btrfs_file_extent_disk_bytenr(leaf, extent);
  449. u64 disk_num_bytes =
  450. btrfs_file_extent_disk_num_bytes(leaf,
  451. extent);
  452. read_extent_buffer(leaf, &old,
  453. (unsigned long)extent,
  454. sizeof(old));
  455. if (disk_bytenr != 0) {
  456. ret = btrfs_inc_extent_ref(trans, root,
  457. disk_bytenr, disk_num_bytes);
  458. BUG_ON(ret);
  459. }
  460. }
  461. bookend = 1;
  462. if (found_inline && start <= key.offset &&
  463. inline_limit < extent_end)
  464. keep = 1;
  465. }
  466. /* truncate existing extent */
  467. if (start > key.offset) {
  468. u64 new_num;
  469. u64 old_num;
  470. keep = 1;
  471. WARN_ON(start & (root->sectorsize - 1));
  472. if (found_extent) {
  473. new_num = start - key.offset;
  474. old_num = btrfs_file_extent_num_bytes(leaf,
  475. extent);
  476. *hint_byte =
  477. btrfs_file_extent_disk_bytenr(leaf,
  478. extent);
  479. if (btrfs_file_extent_disk_bytenr(leaf,
  480. extent)) {
  481. inode->i_blocks -=
  482. (old_num - new_num) >> 9;
  483. }
  484. btrfs_set_file_extent_num_bytes(leaf, extent,
  485. new_num);
  486. btrfs_mark_buffer_dirty(leaf);
  487. } else if (key.offset < inline_limit &&
  488. (end > extent_end) &&
  489. (inline_limit < extent_end)) {
  490. u32 new_size;
  491. new_size = btrfs_file_extent_calc_inline_size(
  492. inline_limit - key.offset);
  493. btrfs_truncate_item(trans, root, path,
  494. new_size, 1);
  495. }
  496. }
  497. /* delete the entire extent */
  498. if (!keep) {
  499. u64 disk_bytenr = 0;
  500. u64 disk_num_bytes = 0;
  501. u64 extent_num_bytes = 0;
  502. if (found_extent) {
  503. disk_bytenr =
  504. btrfs_file_extent_disk_bytenr(leaf,
  505. extent);
  506. disk_num_bytes =
  507. btrfs_file_extent_disk_num_bytes(leaf,
  508. extent);
  509. extent_num_bytes =
  510. btrfs_file_extent_num_bytes(leaf, extent);
  511. *hint_byte =
  512. btrfs_file_extent_disk_bytenr(leaf,
  513. extent);
  514. }
  515. ret = btrfs_del_item(trans, root, path);
  516. /* TODO update progress marker and return */
  517. BUG_ON(ret);
  518. btrfs_release_path(root, path);
  519. extent = NULL;
  520. if (found_extent && disk_bytenr != 0) {
  521. inode->i_blocks -= extent_num_bytes >> 9;
  522. ret = btrfs_free_extent(trans, root,
  523. disk_bytenr,
  524. disk_num_bytes, 0);
  525. }
  526. BUG_ON(ret);
  527. if (!bookend && search_start >= end) {
  528. ret = 0;
  529. goto out;
  530. }
  531. if (!bookend)
  532. continue;
  533. }
  534. if (bookend && found_inline && start <= key.offset &&
  535. inline_limit < extent_end && key.offset <= inline_limit) {
  536. u32 new_size;
  537. new_size = btrfs_file_extent_calc_inline_size(
  538. extent_end - inline_limit);
  539. btrfs_truncate_item(trans, root, path, new_size, 0);
  540. }
  541. /* create bookend, splitting the extent in two */
  542. if (bookend && found_extent) {
  543. struct btrfs_key ins;
  544. ins.objectid = inode->i_ino;
  545. ins.offset = end;
  546. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  547. btrfs_release_path(root, path);
  548. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  549. sizeof(*extent));
  550. leaf = path->nodes[0];
  551. if (ret) {
  552. btrfs_print_leaf(root, leaf);
  553. printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
  554. }
  555. BUG_ON(ret);
  556. extent = btrfs_item_ptr(leaf, path->slots[0],
  557. struct btrfs_file_extent_item);
  558. write_extent_buffer(leaf, &old,
  559. (unsigned long)extent, sizeof(old));
  560. btrfs_set_file_extent_offset(leaf, extent,
  561. le64_to_cpu(old.offset) + end - key.offset);
  562. WARN_ON(le64_to_cpu(old.num_bytes) <
  563. (extent_end - end));
  564. btrfs_set_file_extent_num_bytes(leaf, extent,
  565. extent_end - end);
  566. btrfs_set_file_extent_type(leaf, extent,
  567. BTRFS_FILE_EXTENT_REG);
  568. btrfs_mark_buffer_dirty(path->nodes[0]);
  569. if (le64_to_cpu(old.disk_bytenr) != 0) {
  570. inode->i_blocks +=
  571. btrfs_file_extent_num_bytes(leaf,
  572. extent) >> 9;
  573. }
  574. ret = 0;
  575. goto out;
  576. }
  577. }
  578. out:
  579. btrfs_free_path(path);
  580. return ret;
  581. }
  582. /*
  583. * this gets pages into the page cache and locks them down
  584. */
  585. static int prepare_pages(struct btrfs_root *root,
  586. struct file *file,
  587. struct page **pages,
  588. size_t num_pages,
  589. loff_t pos,
  590. unsigned long first_index,
  591. unsigned long last_index,
  592. size_t write_bytes)
  593. {
  594. int i;
  595. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  596. struct inode *inode = file->f_path.dentry->d_inode;
  597. int err = 0;
  598. u64 start_pos;
  599. start_pos = pos & ~((u64)root->sectorsize - 1);
  600. memset(pages, 0, num_pages * sizeof(struct page *));
  601. for (i = 0; i < num_pages; i++) {
  602. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  603. if (!pages[i]) {
  604. err = -ENOMEM;
  605. BUG_ON(1);
  606. }
  607. cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
  608. wait_on_page_writeback(pages[i]);
  609. set_page_extent_mapped(pages[i]);
  610. WARN_ON(!PageLocked(pages[i]));
  611. }
  612. return 0;
  613. }
  614. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  615. size_t count, loff_t *ppos)
  616. {
  617. loff_t pos;
  618. loff_t start_pos;
  619. ssize_t num_written = 0;
  620. ssize_t err = 0;
  621. int ret = 0;
  622. struct inode *inode = file->f_path.dentry->d_inode;
  623. struct btrfs_root *root = BTRFS_I(inode)->root;
  624. struct page **pages = NULL;
  625. int nrptrs;
  626. struct page *pinned[2];
  627. unsigned long first_index;
  628. unsigned long last_index;
  629. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  630. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  631. pinned[0] = NULL;
  632. pinned[1] = NULL;
  633. if (file->f_flags & O_DIRECT)
  634. return -EINVAL;
  635. pos = *ppos;
  636. start_pos = pos;
  637. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  638. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  639. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  640. if (err)
  641. goto out;
  642. if (count == 0)
  643. goto out;
  644. err = remove_suid(file->f_path.dentry);
  645. if (err)
  646. goto out;
  647. file_update_time(file);
  648. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  649. mutex_lock(&inode->i_mutex);
  650. first_index = pos >> PAGE_CACHE_SHIFT;
  651. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  652. /*
  653. * there are lots of better ways to do this, but this code
  654. * makes sure the first and last page in the file range are
  655. * up to date and ready for cow
  656. */
  657. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  658. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  659. if (!PageUptodate(pinned[0])) {
  660. ret = btrfs_readpage(NULL, pinned[0]);
  661. BUG_ON(ret);
  662. wait_on_page_locked(pinned[0]);
  663. } else {
  664. unlock_page(pinned[0]);
  665. }
  666. }
  667. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  668. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  669. if (!PageUptodate(pinned[1])) {
  670. ret = btrfs_readpage(NULL, pinned[1]);
  671. BUG_ON(ret);
  672. wait_on_page_locked(pinned[1]);
  673. } else {
  674. unlock_page(pinned[1]);
  675. }
  676. }
  677. while(count > 0) {
  678. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  679. size_t write_bytes = min(count, nrptrs *
  680. (size_t)PAGE_CACHE_SIZE -
  681. offset);
  682. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  683. PAGE_CACHE_SHIFT;
  684. WARN_ON(num_pages > nrptrs);
  685. memset(pages, 0, sizeof(pages));
  686. ret = prepare_pages(root, file, pages, num_pages,
  687. pos, first_index, last_index,
  688. write_bytes);
  689. if (ret)
  690. goto out;
  691. ret = btrfs_copy_from_user(pos, num_pages,
  692. write_bytes, pages, buf);
  693. if (ret) {
  694. btrfs_drop_pages(pages, num_pages);
  695. goto out;
  696. }
  697. ret = dirty_and_release_pages(NULL, root, file, pages,
  698. num_pages, pos, write_bytes);
  699. btrfs_drop_pages(pages, num_pages);
  700. if (ret)
  701. goto out;
  702. buf += write_bytes;
  703. count -= write_bytes;
  704. pos += write_bytes;
  705. num_written += write_bytes;
  706. balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
  707. if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  708. btrfs_btree_balance_dirty(root, 1);
  709. cond_resched();
  710. }
  711. mutex_unlock(&inode->i_mutex);
  712. out:
  713. kfree(pages);
  714. if (pinned[0])
  715. page_cache_release(pinned[0]);
  716. if (pinned[1])
  717. page_cache_release(pinned[1]);
  718. *ppos = pos;
  719. if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  720. err = sync_page_range(inode, inode->i_mapping,
  721. start_pos, num_written);
  722. if (err < 0)
  723. num_written = err;
  724. }
  725. current->backing_dev_info = NULL;
  726. return num_written ? num_written : err;
  727. }
  728. static int btrfs_sync_file(struct file *file,
  729. struct dentry *dentry, int datasync)
  730. {
  731. struct inode *inode = dentry->d_inode;
  732. struct btrfs_root *root = BTRFS_I(inode)->root;
  733. int ret = 0;
  734. struct btrfs_trans_handle *trans;
  735. /*
  736. * check the transaction that last modified this inode
  737. * and see if its already been committed
  738. */
  739. mutex_lock(&root->fs_info->fs_mutex);
  740. if (!BTRFS_I(inode)->last_trans)
  741. goto out;
  742. mutex_lock(&root->fs_info->trans_mutex);
  743. if (BTRFS_I(inode)->last_trans <=
  744. root->fs_info->last_trans_committed) {
  745. BTRFS_I(inode)->last_trans = 0;
  746. mutex_unlock(&root->fs_info->trans_mutex);
  747. goto out;
  748. }
  749. mutex_unlock(&root->fs_info->trans_mutex);
  750. /*
  751. * ok we haven't committed the transaction yet, lets do a commit
  752. */
  753. trans = btrfs_start_transaction(root, 1);
  754. if (!trans) {
  755. ret = -ENOMEM;
  756. goto out;
  757. }
  758. ret = btrfs_commit_transaction(trans, root);
  759. out:
  760. mutex_unlock(&root->fs_info->fs_mutex);
  761. return ret > 0 ? EIO : ret;
  762. }
  763. static struct vm_operations_struct btrfs_file_vm_ops = {
  764. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  765. .nopage = filemap_nopage,
  766. .populate = filemap_populate,
  767. #else
  768. .fault = filemap_fault,
  769. #endif
  770. .page_mkwrite = btrfs_page_mkwrite,
  771. };
  772. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  773. {
  774. vma->vm_ops = &btrfs_file_vm_ops;
  775. file_accessed(filp);
  776. return 0;
  777. }
  778. struct file_operations btrfs_file_operations = {
  779. .llseek = generic_file_llseek,
  780. .read = do_sync_read,
  781. .aio_read = generic_file_aio_read,
  782. .write = btrfs_file_write,
  783. .mmap = btrfs_file_mmap,
  784. .open = generic_file_open,
  785. .fsync = btrfs_sync_file,
  786. .unlocked_ioctl = btrfs_ioctl,
  787. #ifdef CONFIG_COMPAT
  788. .compat_ioctl = btrfs_ioctl,
  789. #endif
  790. };