file.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/version.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
  39. struct page **prepared_pages,
  40. const char __user * buf)
  41. {
  42. long page_fault = 0;
  43. int i;
  44. int offset = pos & (PAGE_CACHE_SIZE - 1);
  45. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  46. size_t count = min_t(size_t,
  47. PAGE_CACHE_SIZE - offset, write_bytes);
  48. struct page *page = prepared_pages[i];
  49. fault_in_pages_readable(buf, count);
  50. /* Copy data from userspace to the current page */
  51. kmap(page);
  52. page_fault = __copy_from_user(page_address(page) + offset,
  53. buf, count);
  54. /* Flush processor's dcache for this page */
  55. flush_dcache_page(page);
  56. kunmap(page);
  57. buf += count;
  58. write_bytes -= count;
  59. if (page_fault)
  60. break;
  61. }
  62. return page_fault ? -EFAULT : 0;
  63. }
  64. static void btrfs_drop_pages(struct page **pages, size_t num_pages)
  65. {
  66. size_t i;
  67. for (i = 0; i < num_pages; i++) {
  68. if (!pages[i])
  69. break;
  70. unlock_page(pages[i]);
  71. mark_page_accessed(pages[i]);
  72. page_cache_release(pages[i]);
  73. }
  74. }
  75. static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
  76. struct btrfs_root *root, struct inode *inode,
  77. u64 offset, size_t size,
  78. struct page **pages, size_t page_offset,
  79. int num_pages)
  80. {
  81. struct btrfs_key key;
  82. struct btrfs_path *path;
  83. struct extent_buffer *leaf;
  84. char *kaddr;
  85. unsigned long ptr;
  86. struct btrfs_file_extent_item *ei;
  87. struct page *page;
  88. u32 datasize;
  89. int err = 0;
  90. int ret;
  91. int i;
  92. ssize_t cur_size;
  93. path = btrfs_alloc_path();
  94. if (!path)
  95. return -ENOMEM;
  96. btrfs_set_trans_block_group(trans, inode);
  97. key.objectid = inode->i_ino;
  98. key.offset = offset;
  99. btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
  100. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  101. if (ret < 0) {
  102. err = ret;
  103. goto fail;
  104. }
  105. if (ret == 1) {
  106. struct btrfs_key found_key;
  107. if (path->slots[0] == 0)
  108. goto insert;
  109. path->slots[0]--;
  110. leaf = path->nodes[0];
  111. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  112. if (found_key.objectid != inode->i_ino)
  113. goto insert;
  114. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  115. goto insert;
  116. ei = btrfs_item_ptr(leaf, path->slots[0],
  117. struct btrfs_file_extent_item);
  118. if (btrfs_file_extent_type(leaf, ei) !=
  119. BTRFS_FILE_EXTENT_INLINE) {
  120. goto insert;
  121. }
  122. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  123. ret = 0;
  124. }
  125. if (ret == 0) {
  126. u32 found_size;
  127. u64 found_end;
  128. leaf = path->nodes[0];
  129. ei = btrfs_item_ptr(leaf, path->slots[0],
  130. struct btrfs_file_extent_item);
  131. if (btrfs_file_extent_type(leaf, ei) !=
  132. BTRFS_FILE_EXTENT_INLINE) {
  133. err = ret;
  134. btrfs_print_leaf(root, leaf);
  135. printk("found wasn't inline offset %Lu inode %lu\n",
  136. offset, inode->i_ino);
  137. goto fail;
  138. }
  139. found_size = btrfs_file_extent_inline_len(leaf,
  140. btrfs_item_nr(leaf, path->slots[0]));
  141. found_end = key.offset + found_size;
  142. if (found_end < offset + size) {
  143. btrfs_release_path(root, path);
  144. ret = btrfs_search_slot(trans, root, &key, path,
  145. offset + size - found_end, 1);
  146. BUG_ON(ret != 0);
  147. ret = btrfs_extend_item(trans, root, path,
  148. offset + size - found_end);
  149. if (ret) {
  150. err = ret;
  151. goto fail;
  152. }
  153. leaf = path->nodes[0];
  154. ei = btrfs_item_ptr(leaf, path->slots[0],
  155. struct btrfs_file_extent_item);
  156. }
  157. if (found_end < offset) {
  158. ptr = btrfs_file_extent_inline_start(ei) + found_size;
  159. memset_extent_buffer(leaf, 0, ptr, offset - found_end);
  160. }
  161. } else {
  162. insert:
  163. btrfs_release_path(root, path);
  164. datasize = offset + size - key.offset;
  165. datasize = btrfs_file_extent_calc_inline_size(datasize);
  166. ret = btrfs_insert_empty_item(trans, root, path, &key,
  167. datasize);
  168. if (ret) {
  169. err = ret;
  170. printk("got bad ret %d\n", ret);
  171. goto fail;
  172. }
  173. leaf = path->nodes[0];
  174. ei = btrfs_item_ptr(leaf, path->slots[0],
  175. struct btrfs_file_extent_item);
  176. btrfs_set_file_extent_generation(leaf, ei, trans->transid);
  177. btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
  178. }
  179. ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
  180. cur_size = size;
  181. i = 0;
  182. while (size > 0) {
  183. page = pages[i];
  184. kaddr = kmap_atomic(page, KM_USER0);
  185. cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
  186. write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
  187. kunmap_atomic(kaddr, KM_USER0);
  188. page_offset = 0;
  189. ptr += cur_size;
  190. size -= cur_size;
  191. if (i >= num_pages) {
  192. printk("i %d num_pages %d\n", i, num_pages);
  193. }
  194. i++;
  195. }
  196. btrfs_mark_buffer_dirty(leaf);
  197. fail:
  198. btrfs_free_path(path);
  199. return err;
  200. }
  201. static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
  202. struct btrfs_root *root,
  203. struct file *file,
  204. struct page **pages,
  205. size_t num_pages,
  206. loff_t pos,
  207. size_t write_bytes)
  208. {
  209. int err = 0;
  210. int i;
  211. struct inode *inode = fdentry(file)->d_inode;
  212. struct extent_map *em;
  213. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  214. u64 hint_byte;
  215. u64 num_bytes;
  216. u64 start_pos;
  217. u64 end_of_last_block;
  218. u64 end_pos = pos + write_bytes;
  219. u64 inline_size;
  220. loff_t isize = i_size_read(inode);
  221. em = alloc_extent_map(GFP_NOFS);
  222. if (!em)
  223. return -ENOMEM;
  224. em->bdev = inode->i_sb->s_bdev;
  225. start_pos = pos & ~((u64)root->sectorsize - 1);
  226. num_bytes = (write_bytes + pos - start_pos +
  227. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  228. end_of_last_block = start_pos + num_bytes - 1;
  229. lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  230. mutex_lock(&root->fs_info->fs_mutex);
  231. trans = btrfs_start_transaction(root, 1);
  232. if (!trans) {
  233. err = -ENOMEM;
  234. goto out_unlock;
  235. }
  236. btrfs_set_trans_block_group(trans, inode);
  237. inode->i_blocks += num_bytes >> 9;
  238. hint_byte = 0;
  239. if ((end_of_last_block & 4095) == 0) {
  240. printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
  241. }
  242. set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  243. /* FIXME...EIEIO, ENOSPC and more */
  244. /* insert any holes we need to create */
  245. if (inode->i_size < start_pos) {
  246. u64 last_pos_in_file;
  247. u64 hole_size;
  248. u64 mask = root->sectorsize - 1;
  249. last_pos_in_file = (isize + mask) & ~mask;
  250. hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
  251. if (last_pos_in_file < start_pos) {
  252. err = btrfs_drop_extents(trans, root, inode,
  253. last_pos_in_file,
  254. last_pos_in_file + hole_size,
  255. last_pos_in_file,
  256. &hint_byte);
  257. if (err)
  258. goto failed;
  259. err = btrfs_insert_file_extent(trans, root,
  260. inode->i_ino,
  261. last_pos_in_file,
  262. 0, 0, hole_size);
  263. }
  264. if (err)
  265. goto failed;
  266. }
  267. /*
  268. * either allocate an extent for the new bytes or setup the key
  269. * to show we are doing inline data in the extent
  270. */
  271. inline_size = end_pos;
  272. if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
  273. inline_size > 32768 ||
  274. inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
  275. u64 last_end;
  276. u64 existing_delalloc = 0;
  277. for (i = 0; i < num_pages; i++) {
  278. struct page *p = pages[i];
  279. SetPageUptodate(p);
  280. set_page_dirty(p);
  281. }
  282. last_end = (u64)(pages[num_pages -1]->index) <<
  283. PAGE_CACHE_SHIFT;
  284. last_end += PAGE_CACHE_SIZE - 1;
  285. if (start_pos < isize) {
  286. u64 delalloc_start = start_pos;
  287. existing_delalloc = count_range_bits(em_tree,
  288. &delalloc_start,
  289. end_of_last_block, (u64)-1,
  290. EXTENT_DELALLOC);
  291. }
  292. set_extent_delalloc(em_tree, start_pos, end_of_last_block,
  293. GFP_NOFS);
  294. spin_lock(&root->fs_info->delalloc_lock);
  295. root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
  296. start_pos) - existing_delalloc;
  297. spin_unlock(&root->fs_info->delalloc_lock);
  298. } else {
  299. u64 aligned_end;
  300. /* step one, delete the existing extents in this range */
  301. aligned_end = (pos + write_bytes + root->sectorsize - 1) &
  302. ~((u64)root->sectorsize - 1);
  303. err = btrfs_drop_extents(trans, root, inode, start_pos,
  304. aligned_end, aligned_end, &hint_byte);
  305. if (err)
  306. goto failed;
  307. if (isize > inline_size)
  308. inline_size = min_t(u64, isize, aligned_end);
  309. inline_size -= start_pos;
  310. err = insert_inline_extent(trans, root, inode, start_pos,
  311. inline_size, pages, 0, num_pages);
  312. BUG_ON(err);
  313. }
  314. if (end_pos > isize) {
  315. i_size_write(inode, end_pos);
  316. btrfs_update_inode(trans, root, inode);
  317. }
  318. failed:
  319. err = btrfs_end_transaction(trans, root);
  320. out_unlock:
  321. mutex_unlock(&root->fs_info->fs_mutex);
  322. unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
  323. free_extent_map(em);
  324. return err;
  325. }
  326. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
  327. {
  328. struct extent_map *em;
  329. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  330. while(1) {
  331. em = lookup_extent_mapping(em_tree, start, end);
  332. if (!em)
  333. break;
  334. remove_extent_mapping(em_tree, em);
  335. /* once for us */
  336. free_extent_map(em);
  337. /* once for the tree*/
  338. free_extent_map(em);
  339. }
  340. return 0;
  341. }
  342. /*
  343. * this is very complex, but the basic idea is to drop all extents
  344. * in the range start - end. hint_block is filled in with a block number
  345. * that would be a good hint to the block allocator for this file.
  346. *
  347. * If an extent intersects the range but is not entirely inside the range
  348. * it is either truncated or split. Anything entirely inside the range
  349. * is deleted from the tree.
  350. */
  351. int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  352. struct btrfs_root *root, struct inode *inode,
  353. u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
  354. {
  355. u64 extent_end = 0;
  356. u64 search_start = start;
  357. struct extent_buffer *leaf;
  358. struct btrfs_file_extent_item *extent;
  359. struct btrfs_path *path;
  360. struct btrfs_key key;
  361. struct btrfs_file_extent_item old;
  362. int keep;
  363. int slot;
  364. int bookend;
  365. int found_type;
  366. int found_extent;
  367. int found_inline;
  368. int recow;
  369. int ret;
  370. btrfs_drop_extent_cache(inode, start, end - 1);
  371. path = btrfs_alloc_path();
  372. if (!path)
  373. return -ENOMEM;
  374. while(1) {
  375. recow = 0;
  376. btrfs_release_path(root, path);
  377. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  378. search_start, -1);
  379. if (ret < 0)
  380. goto out;
  381. if (ret > 0) {
  382. if (path->slots[0] == 0) {
  383. ret = 0;
  384. goto out;
  385. }
  386. path->slots[0]--;
  387. }
  388. next_slot:
  389. keep = 0;
  390. bookend = 0;
  391. found_extent = 0;
  392. found_inline = 0;
  393. extent = NULL;
  394. leaf = path->nodes[0];
  395. slot = path->slots[0];
  396. ret = 0;
  397. btrfs_item_key_to_cpu(leaf, &key, slot);
  398. if (key.offset >= end || key.objectid != inode->i_ino) {
  399. goto out;
  400. }
  401. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
  402. goto out;
  403. }
  404. if (recow) {
  405. search_start = key.offset;
  406. continue;
  407. }
  408. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  409. extent = btrfs_item_ptr(leaf, slot,
  410. struct btrfs_file_extent_item);
  411. found_type = btrfs_file_extent_type(leaf, extent);
  412. if (found_type == BTRFS_FILE_EXTENT_REG) {
  413. extent_end =
  414. btrfs_file_extent_disk_bytenr(leaf,
  415. extent);
  416. if (extent_end)
  417. *hint_byte = extent_end;
  418. extent_end = key.offset +
  419. btrfs_file_extent_num_bytes(leaf, extent);
  420. found_extent = 1;
  421. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  422. struct btrfs_item *item;
  423. item = btrfs_item_nr(leaf, slot);
  424. found_inline = 1;
  425. extent_end = key.offset +
  426. btrfs_file_extent_inline_len(leaf, item);
  427. }
  428. } else {
  429. extent_end = search_start;
  430. }
  431. /* we found nothing we can drop */
  432. if ((!found_extent && !found_inline) ||
  433. search_start >= extent_end) {
  434. int nextret;
  435. u32 nritems;
  436. nritems = btrfs_header_nritems(leaf);
  437. if (slot >= nritems - 1) {
  438. nextret = btrfs_next_leaf(root, path);
  439. if (nextret)
  440. goto out;
  441. recow = 1;
  442. } else {
  443. path->slots[0]++;
  444. }
  445. goto next_slot;
  446. }
  447. if (found_inline) {
  448. u64 mask = root->sectorsize - 1;
  449. search_start = (extent_end + mask) & ~mask;
  450. } else
  451. search_start = extent_end;
  452. if (end <= extent_end && start >= key.offset && found_inline) {
  453. *hint_byte = EXTENT_MAP_INLINE;
  454. continue;
  455. }
  456. if (end < extent_end && end >= key.offset) {
  457. if (found_extent) {
  458. u64 disk_bytenr =
  459. btrfs_file_extent_disk_bytenr(leaf, extent);
  460. u64 disk_num_bytes =
  461. btrfs_file_extent_disk_num_bytes(leaf,
  462. extent);
  463. read_extent_buffer(leaf, &old,
  464. (unsigned long)extent,
  465. sizeof(old));
  466. if (disk_bytenr != 0) {
  467. ret = btrfs_inc_extent_ref(trans, root,
  468. disk_bytenr, disk_num_bytes,
  469. root->root_key.objectid,
  470. trans->transid,
  471. key.objectid, end);
  472. BUG_ON(ret);
  473. }
  474. }
  475. bookend = 1;
  476. if (found_inline && start <= key.offset &&
  477. inline_limit < extent_end)
  478. keep = 1;
  479. }
  480. /* truncate existing extent */
  481. if (start > key.offset) {
  482. u64 new_num;
  483. u64 old_num;
  484. keep = 1;
  485. WARN_ON(start & (root->sectorsize - 1));
  486. if (found_extent) {
  487. new_num = start - key.offset;
  488. old_num = btrfs_file_extent_num_bytes(leaf,
  489. extent);
  490. *hint_byte =
  491. btrfs_file_extent_disk_bytenr(leaf,
  492. extent);
  493. if (btrfs_file_extent_disk_bytenr(leaf,
  494. extent)) {
  495. inode->i_blocks -=
  496. (old_num - new_num) >> 9;
  497. }
  498. btrfs_set_file_extent_num_bytes(leaf, extent,
  499. new_num);
  500. btrfs_mark_buffer_dirty(leaf);
  501. } else if (key.offset < inline_limit &&
  502. (end > extent_end) &&
  503. (inline_limit < extent_end)) {
  504. u32 new_size;
  505. new_size = btrfs_file_extent_calc_inline_size(
  506. inline_limit - key.offset);
  507. btrfs_truncate_item(trans, root, path,
  508. new_size, 1);
  509. }
  510. }
  511. /* delete the entire extent */
  512. if (!keep) {
  513. u64 disk_bytenr = 0;
  514. u64 disk_num_bytes = 0;
  515. u64 extent_num_bytes = 0;
  516. u64 root_gen;
  517. u64 root_owner;
  518. root_gen = btrfs_header_generation(leaf);
  519. root_owner = btrfs_header_owner(leaf);
  520. if (found_extent) {
  521. disk_bytenr =
  522. btrfs_file_extent_disk_bytenr(leaf,
  523. extent);
  524. disk_num_bytes =
  525. btrfs_file_extent_disk_num_bytes(leaf,
  526. extent);
  527. extent_num_bytes =
  528. btrfs_file_extent_num_bytes(leaf, extent);
  529. *hint_byte =
  530. btrfs_file_extent_disk_bytenr(leaf,
  531. extent);
  532. }
  533. ret = btrfs_del_item(trans, root, path);
  534. /* TODO update progress marker and return */
  535. BUG_ON(ret);
  536. btrfs_release_path(root, path);
  537. extent = NULL;
  538. if (found_extent && disk_bytenr != 0) {
  539. inode->i_blocks -= extent_num_bytes >> 9;
  540. ret = btrfs_free_extent(trans, root,
  541. disk_bytenr,
  542. disk_num_bytes,
  543. root_owner,
  544. root_gen, inode->i_ino,
  545. key.offset, 0);
  546. }
  547. BUG_ON(ret);
  548. if (!bookend && search_start >= end) {
  549. ret = 0;
  550. goto out;
  551. }
  552. if (!bookend)
  553. continue;
  554. }
  555. if (bookend && found_inline && start <= key.offset &&
  556. inline_limit < extent_end && key.offset <= inline_limit) {
  557. u32 new_size;
  558. new_size = btrfs_file_extent_calc_inline_size(
  559. extent_end - inline_limit);
  560. btrfs_truncate_item(trans, root, path, new_size, 0);
  561. }
  562. /* create bookend, splitting the extent in two */
  563. if (bookend && found_extent) {
  564. struct btrfs_key ins;
  565. ins.objectid = inode->i_ino;
  566. ins.offset = end;
  567. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  568. btrfs_release_path(root, path);
  569. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  570. sizeof(*extent));
  571. leaf = path->nodes[0];
  572. if (ret) {
  573. btrfs_print_leaf(root, leaf);
  574. printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
  575. }
  576. BUG_ON(ret);
  577. extent = btrfs_item_ptr(leaf, path->slots[0],
  578. struct btrfs_file_extent_item);
  579. write_extent_buffer(leaf, &old,
  580. (unsigned long)extent, sizeof(old));
  581. btrfs_set_file_extent_offset(leaf, extent,
  582. le64_to_cpu(old.offset) + end - key.offset);
  583. WARN_ON(le64_to_cpu(old.num_bytes) <
  584. (extent_end - end));
  585. btrfs_set_file_extent_num_bytes(leaf, extent,
  586. extent_end - end);
  587. btrfs_set_file_extent_type(leaf, extent,
  588. BTRFS_FILE_EXTENT_REG);
  589. btrfs_mark_buffer_dirty(path->nodes[0]);
  590. if (le64_to_cpu(old.disk_bytenr) != 0) {
  591. inode->i_blocks +=
  592. btrfs_file_extent_num_bytes(leaf,
  593. extent) >> 9;
  594. }
  595. ret = 0;
  596. goto out;
  597. }
  598. }
  599. out:
  600. btrfs_free_path(path);
  601. return ret;
  602. }
  603. /*
  604. * this gets pages into the page cache and locks them down
  605. */
  606. static int prepare_pages(struct btrfs_root *root, struct file *file,
  607. struct page **pages, size_t num_pages,
  608. loff_t pos, unsigned long first_index,
  609. unsigned long last_index, size_t write_bytes)
  610. {
  611. int i;
  612. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  613. struct inode *inode = fdentry(file)->d_inode;
  614. int err = 0;
  615. u64 start_pos;
  616. start_pos = pos & ~((u64)root->sectorsize - 1);
  617. memset(pages, 0, num_pages * sizeof(struct page *));
  618. for (i = 0; i < num_pages; i++) {
  619. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  620. if (!pages[i]) {
  621. err = -ENOMEM;
  622. BUG_ON(1);
  623. }
  624. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
  625. ClearPageDirty(pages[i]);
  626. #else
  627. cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
  628. #endif
  629. wait_on_page_writeback(pages[i]);
  630. set_page_extent_mapped(pages[i]);
  631. WARN_ON(!PageLocked(pages[i]));
  632. }
  633. return 0;
  634. }
  635. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  636. size_t count, loff_t *ppos)
  637. {
  638. loff_t pos;
  639. loff_t start_pos;
  640. ssize_t num_written = 0;
  641. ssize_t err = 0;
  642. int ret = 0;
  643. struct inode *inode = fdentry(file)->d_inode;
  644. struct btrfs_root *root = BTRFS_I(inode)->root;
  645. struct page **pages = NULL;
  646. int nrptrs;
  647. struct page *pinned[2];
  648. unsigned long first_index;
  649. unsigned long last_index;
  650. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  651. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  652. pinned[0] = NULL;
  653. pinned[1] = NULL;
  654. if (file->f_flags & O_DIRECT)
  655. return -EINVAL;
  656. pos = *ppos;
  657. start_pos = pos;
  658. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  659. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  660. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  661. if (err)
  662. goto out_nolock;
  663. if (count == 0)
  664. goto out_nolock;
  665. err = remove_suid(fdentry(file));
  666. if (err)
  667. goto out_nolock;
  668. file_update_time(file);
  669. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  670. down_read(&BTRFS_I(inode)->root->snap_sem);
  671. mutex_lock(&inode->i_mutex);
  672. first_index = pos >> PAGE_CACHE_SHIFT;
  673. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  674. /*
  675. * there are lots of better ways to do this, but this code
  676. * makes sure the first and last page in the file range are
  677. * up to date and ready for cow
  678. */
  679. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  680. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  681. if (!PageUptodate(pinned[0])) {
  682. ret = btrfs_readpage(NULL, pinned[0]);
  683. BUG_ON(ret);
  684. wait_on_page_locked(pinned[0]);
  685. } else {
  686. unlock_page(pinned[0]);
  687. }
  688. }
  689. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  690. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  691. if (!PageUptodate(pinned[1])) {
  692. ret = btrfs_readpage(NULL, pinned[1]);
  693. BUG_ON(ret);
  694. wait_on_page_locked(pinned[1]);
  695. } else {
  696. unlock_page(pinned[1]);
  697. }
  698. }
  699. while(count > 0) {
  700. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  701. size_t write_bytes = min(count, nrptrs *
  702. (size_t)PAGE_CACHE_SIZE -
  703. offset);
  704. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  705. PAGE_CACHE_SHIFT;
  706. WARN_ON(num_pages > nrptrs);
  707. memset(pages, 0, sizeof(pages));
  708. mutex_lock(&root->fs_info->fs_mutex);
  709. ret = btrfs_check_free_space(root, write_bytes, 0);
  710. mutex_unlock(&root->fs_info->fs_mutex);
  711. if (ret)
  712. goto out;
  713. ret = prepare_pages(root, file, pages, num_pages,
  714. pos, first_index, last_index,
  715. write_bytes);
  716. if (ret)
  717. goto out;
  718. ret = btrfs_copy_from_user(pos, num_pages,
  719. write_bytes, pages, buf);
  720. if (ret) {
  721. btrfs_drop_pages(pages, num_pages);
  722. goto out;
  723. }
  724. ret = dirty_and_release_pages(NULL, root, file, pages,
  725. num_pages, pos, write_bytes);
  726. btrfs_drop_pages(pages, num_pages);
  727. if (ret)
  728. goto out;
  729. buf += write_bytes;
  730. count -= write_bytes;
  731. pos += write_bytes;
  732. num_written += write_bytes;
  733. balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
  734. if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  735. btrfs_btree_balance_dirty(root, 1);
  736. cond_resched();
  737. }
  738. out:
  739. mutex_unlock(&inode->i_mutex);
  740. up_read(&BTRFS_I(inode)->root->snap_sem);
  741. out_nolock:
  742. kfree(pages);
  743. if (pinned[0])
  744. page_cache_release(pinned[0]);
  745. if (pinned[1])
  746. page_cache_release(pinned[1]);
  747. *ppos = pos;
  748. if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  749. err = sync_page_range(inode, inode->i_mapping,
  750. start_pos, num_written);
  751. if (err < 0)
  752. num_written = err;
  753. }
  754. current->backing_dev_info = NULL;
  755. return num_written ? num_written : err;
  756. }
  757. static int btrfs_sync_file(struct file *file,
  758. struct dentry *dentry, int datasync)
  759. {
  760. struct inode *inode = dentry->d_inode;
  761. struct btrfs_root *root = BTRFS_I(inode)->root;
  762. int ret = 0;
  763. struct btrfs_trans_handle *trans;
  764. /*
  765. * check the transaction that last modified this inode
  766. * and see if its already been committed
  767. */
  768. mutex_lock(&root->fs_info->fs_mutex);
  769. if (!BTRFS_I(inode)->last_trans)
  770. goto out;
  771. mutex_lock(&root->fs_info->trans_mutex);
  772. if (BTRFS_I(inode)->last_trans <=
  773. root->fs_info->last_trans_committed) {
  774. BTRFS_I(inode)->last_trans = 0;
  775. mutex_unlock(&root->fs_info->trans_mutex);
  776. goto out;
  777. }
  778. mutex_unlock(&root->fs_info->trans_mutex);
  779. /*
  780. * ok we haven't committed the transaction yet, lets do a commit
  781. */
  782. trans = btrfs_start_transaction(root, 1);
  783. if (!trans) {
  784. ret = -ENOMEM;
  785. goto out;
  786. }
  787. ret = btrfs_commit_transaction(trans, root);
  788. out:
  789. mutex_unlock(&root->fs_info->fs_mutex);
  790. return ret > 0 ? EIO : ret;
  791. }
  792. static struct vm_operations_struct btrfs_file_vm_ops = {
  793. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  794. .nopage = filemap_nopage,
  795. .populate = filemap_populate,
  796. #else
  797. .fault = filemap_fault,
  798. #endif
  799. .page_mkwrite = btrfs_page_mkwrite,
  800. };
  801. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  802. {
  803. vma->vm_ops = &btrfs_file_vm_ops;
  804. file_accessed(filp);
  805. return 0;
  806. }
  807. struct file_operations btrfs_file_operations = {
  808. .llseek = generic_file_llseek,
  809. .read = do_sync_read,
  810. .aio_read = generic_file_aio_read,
  811. .splice_read = generic_file_splice_read,
  812. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
  813. .sendfile = generic_file_sendfile,
  814. #endif
  815. .write = btrfs_file_write,
  816. .mmap = btrfs_file_mmap,
  817. .open = generic_file_open,
  818. .fsync = btrfs_sync_file,
  819. .unlocked_ioctl = btrfs_ioctl,
  820. #ifdef CONFIG_COMPAT
  821. .compat_ioctl = btrfs_ioctl,
  822. #endif
  823. };