file.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/version.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. #include "tree-log.h"
  39. #include "locking.h"
  40. #include "compat.h"
  41. /* simple helper to fault in pages and copy. This should go away
  42. * and be replaced with calls into generic code.
  43. */
  44. static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
  45. int write_bytes,
  46. struct page **prepared_pages,
  47. const char __user * buf)
  48. {
  49. long page_fault = 0;
  50. int i;
  51. int offset = pos & (PAGE_CACHE_SIZE - 1);
  52. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  53. size_t count = min_t(size_t,
  54. PAGE_CACHE_SIZE - offset, write_bytes);
  55. struct page *page = prepared_pages[i];
  56. fault_in_pages_readable(buf, count);
  57. /* Copy data from userspace to the current page */
  58. kmap(page);
  59. page_fault = __copy_from_user(page_address(page) + offset,
  60. buf, count);
  61. /* Flush processor's dcache for this page */
  62. flush_dcache_page(page);
  63. kunmap(page);
  64. buf += count;
  65. write_bytes -= count;
  66. if (page_fault)
  67. break;
  68. }
  69. return page_fault ? -EFAULT : 0;
  70. }
  71. /*
  72. * unlocks pages after btrfs_file_write is done with them
  73. */
  74. static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
  75. {
  76. size_t i;
  77. for (i = 0; i < num_pages; i++) {
  78. if (!pages[i])
  79. break;
  80. /* page checked is some magic around finding pages that
  81. * have been modified without going through btrfs_set_page_dirty
  82. * clear it here
  83. */
  84. ClearPageChecked(pages[i]);
  85. unlock_page(pages[i]);
  86. mark_page_accessed(pages[i]);
  87. page_cache_release(pages[i]);
  88. }
  89. }
  90. /*
  91. * after copy_from_user, pages need to be dirtied and we need to make
  92. * sure holes are created between the current EOF and the start of
  93. * any next extents (if required).
  94. *
  95. * this also makes the decision about creating an inline extent vs
  96. * doing real data extents, marking pages dirty and delalloc as required.
  97. */
  98. static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
  99. struct btrfs_root *root,
  100. struct file *file,
  101. struct page **pages,
  102. size_t num_pages,
  103. loff_t pos,
  104. size_t write_bytes)
  105. {
  106. int err = 0;
  107. int i;
  108. struct inode *inode = fdentry(file)->d_inode;
  109. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  110. u64 hint_byte;
  111. u64 num_bytes;
  112. u64 start_pos;
  113. u64 end_of_last_block;
  114. u64 end_pos = pos + write_bytes;
  115. loff_t isize = i_size_read(inode);
  116. start_pos = pos & ~((u64)root->sectorsize - 1);
  117. num_bytes = (write_bytes + pos - start_pos +
  118. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  119. end_of_last_block = start_pos + num_bytes - 1;
  120. lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  121. trans = btrfs_join_transaction(root, 1);
  122. if (!trans) {
  123. err = -ENOMEM;
  124. goto out_unlock;
  125. }
  126. btrfs_set_trans_block_group(trans, inode);
  127. hint_byte = 0;
  128. if ((end_of_last_block & 4095) == 0) {
  129. printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
  130. }
  131. set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  132. /* check for reserved extents on each page, we don't want
  133. * to reset the delalloc bit on things that already have
  134. * extents reserved.
  135. */
  136. btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
  137. for (i = 0; i < num_pages; i++) {
  138. struct page *p = pages[i];
  139. SetPageUptodate(p);
  140. ClearPageChecked(p);
  141. set_page_dirty(p);
  142. }
  143. if (end_pos > isize) {
  144. i_size_write(inode, end_pos);
  145. btrfs_update_inode(trans, root, inode);
  146. }
  147. err = btrfs_end_transaction(trans, root);
  148. out_unlock:
  149. unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  150. return err;
  151. }
  152. /*
  153. * this drops all the extents in the cache that intersect the range
  154. * [start, end]. Existing extents are split as required.
  155. */
  156. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  157. int skip_pinned)
  158. {
  159. struct extent_map *em;
  160. struct extent_map *split = NULL;
  161. struct extent_map *split2 = NULL;
  162. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  163. u64 len = end - start + 1;
  164. int ret;
  165. int testend = 1;
  166. unsigned long flags;
  167. int compressed = 0;
  168. WARN_ON(end < start);
  169. if (end == (u64)-1) {
  170. len = (u64)-1;
  171. testend = 0;
  172. }
  173. while(1) {
  174. if (!split)
  175. split = alloc_extent_map(GFP_NOFS);
  176. if (!split2)
  177. split2 = alloc_extent_map(GFP_NOFS);
  178. spin_lock(&em_tree->lock);
  179. em = lookup_extent_mapping(em_tree, start, len);
  180. if (!em) {
  181. spin_unlock(&em_tree->lock);
  182. break;
  183. }
  184. flags = em->flags;
  185. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  186. spin_unlock(&em_tree->lock);
  187. if (em->start <= start &&
  188. (!testend || em->start + em->len >= start + len)) {
  189. free_extent_map(em);
  190. break;
  191. }
  192. if (start < em->start) {
  193. len = em->start - start;
  194. } else {
  195. len = start + len - (em->start + em->len);
  196. start = em->start + em->len;
  197. }
  198. free_extent_map(em);
  199. continue;
  200. }
  201. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  202. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  203. remove_extent_mapping(em_tree, em);
  204. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  205. em->start < start) {
  206. split->start = em->start;
  207. split->len = start - em->start;
  208. split->orig_start = em->orig_start;
  209. split->block_start = em->block_start;
  210. if (compressed)
  211. split->block_len = em->block_len;
  212. else
  213. split->block_len = split->len;
  214. split->bdev = em->bdev;
  215. split->flags = flags;
  216. ret = add_extent_mapping(em_tree, split);
  217. BUG_ON(ret);
  218. free_extent_map(split);
  219. split = split2;
  220. split2 = NULL;
  221. }
  222. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  223. testend && em->start + em->len > start + len) {
  224. u64 diff = start + len - em->start;
  225. split->start = start + len;
  226. split->len = em->start + em->len - (start + len);
  227. split->orig_start = em->orig_start;
  228. split->bdev = em->bdev;
  229. split->flags = flags;
  230. if (compressed) {
  231. split->block_len = em->block_len;
  232. split->block_start = em->block_start;
  233. } else {
  234. split->block_len = split->len;
  235. split->block_start = em->block_start + diff;
  236. }
  237. ret = add_extent_mapping(em_tree, split);
  238. BUG_ON(ret);
  239. free_extent_map(split);
  240. split = NULL;
  241. }
  242. spin_unlock(&em_tree->lock);
  243. /* once for us */
  244. free_extent_map(em);
  245. /* once for the tree*/
  246. free_extent_map(em);
  247. }
  248. if (split)
  249. free_extent_map(split);
  250. if (split2)
  251. free_extent_map(split2);
  252. return 0;
  253. }
  254. int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
  255. {
  256. return 0;
  257. #if 0
  258. struct btrfs_path *path;
  259. struct btrfs_key found_key;
  260. struct extent_buffer *leaf;
  261. struct btrfs_file_extent_item *extent;
  262. u64 last_offset = 0;
  263. int nritems;
  264. int slot;
  265. int found_type;
  266. int ret;
  267. int err = 0;
  268. u64 extent_end = 0;
  269. path = btrfs_alloc_path();
  270. ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
  271. last_offset, 0);
  272. while(1) {
  273. nritems = btrfs_header_nritems(path->nodes[0]);
  274. if (path->slots[0] >= nritems) {
  275. ret = btrfs_next_leaf(root, path);
  276. if (ret)
  277. goto out;
  278. nritems = btrfs_header_nritems(path->nodes[0]);
  279. }
  280. slot = path->slots[0];
  281. leaf = path->nodes[0];
  282. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  283. if (found_key.objectid != inode->i_ino)
  284. break;
  285. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  286. goto out;
  287. if (found_key.offset < last_offset) {
  288. WARN_ON(1);
  289. btrfs_print_leaf(root, leaf);
  290. printk("inode %lu found offset %Lu expected %Lu\n",
  291. inode->i_ino, found_key.offset, last_offset);
  292. err = 1;
  293. goto out;
  294. }
  295. extent = btrfs_item_ptr(leaf, slot,
  296. struct btrfs_file_extent_item);
  297. found_type = btrfs_file_extent_type(leaf, extent);
  298. if (found_type == BTRFS_FILE_EXTENT_REG) {
  299. extent_end = found_key.offset +
  300. btrfs_file_extent_num_bytes(leaf, extent);
  301. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  302. struct btrfs_item *item;
  303. item = btrfs_item_nr(leaf, slot);
  304. extent_end = found_key.offset +
  305. btrfs_file_extent_inline_len(leaf, extent);
  306. extent_end = (extent_end + root->sectorsize - 1) &
  307. ~((u64)root->sectorsize -1 );
  308. }
  309. last_offset = extent_end;
  310. path->slots[0]++;
  311. }
  312. if (0 && last_offset < inode->i_size) {
  313. WARN_ON(1);
  314. btrfs_print_leaf(root, leaf);
  315. printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
  316. last_offset, inode->i_size);
  317. err = 1;
  318. }
  319. out:
  320. btrfs_free_path(path);
  321. return err;
  322. #endif
  323. }
  324. /*
  325. * this is very complex, but the basic idea is to drop all extents
  326. * in the range start - end. hint_block is filled in with a block number
  327. * that would be a good hint to the block allocator for this file.
  328. *
  329. * If an extent intersects the range but is not entirely inside the range
  330. * it is either truncated or split. Anything entirely inside the range
  331. * is deleted from the tree.
  332. *
  333. * inline_limit is used to tell this code which offsets in the file to keep
  334. * if they contain inline extents.
  335. */
  336. int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
  337. struct btrfs_root *root, struct inode *inode,
  338. u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
  339. {
  340. u64 extent_end = 0;
  341. u64 locked_end = end;
  342. u64 search_start = start;
  343. u64 leaf_start;
  344. u64 ram_bytes = 0;
  345. u64 orig_parent = 0;
  346. u64 disk_bytenr = 0;
  347. u8 compression;
  348. u8 encryption;
  349. u16 other_encoding = 0;
  350. u64 root_gen;
  351. u64 root_owner;
  352. struct extent_buffer *leaf;
  353. struct btrfs_file_extent_item *extent;
  354. struct btrfs_path *path;
  355. struct btrfs_key key;
  356. struct btrfs_file_extent_item old;
  357. int keep;
  358. int slot;
  359. int bookend;
  360. int found_type = 0;
  361. int found_extent;
  362. int found_inline;
  363. int recow;
  364. int ret;
  365. inline_limit = 0;
  366. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  367. path = btrfs_alloc_path();
  368. if (!path)
  369. return -ENOMEM;
  370. while(1) {
  371. recow = 0;
  372. btrfs_release_path(root, path);
  373. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  374. search_start, -1);
  375. if (ret < 0)
  376. goto out;
  377. if (ret > 0) {
  378. if (path->slots[0] == 0) {
  379. ret = 0;
  380. goto out;
  381. }
  382. path->slots[0]--;
  383. }
  384. next_slot:
  385. keep = 0;
  386. bookend = 0;
  387. found_extent = 0;
  388. found_inline = 0;
  389. leaf_start = 0;
  390. root_gen = 0;
  391. root_owner = 0;
  392. compression = 0;
  393. encryption = 0;
  394. extent = NULL;
  395. leaf = path->nodes[0];
  396. slot = path->slots[0];
  397. ret = 0;
  398. btrfs_item_key_to_cpu(leaf, &key, slot);
  399. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
  400. key.offset >= end) {
  401. goto out;
  402. }
  403. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
  404. key.objectid != inode->i_ino) {
  405. goto out;
  406. }
  407. if (recow) {
  408. search_start = key.offset;
  409. continue;
  410. }
  411. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  412. extent = btrfs_item_ptr(leaf, slot,
  413. struct btrfs_file_extent_item);
  414. found_type = btrfs_file_extent_type(leaf, extent);
  415. compression = btrfs_file_extent_compression(leaf,
  416. extent);
  417. encryption = btrfs_file_extent_encryption(leaf,
  418. extent);
  419. other_encoding = btrfs_file_extent_other_encoding(leaf,
  420. extent);
  421. if (found_type == BTRFS_FILE_EXTENT_REG ||
  422. found_type == BTRFS_FILE_EXTENT_PREALLOC) {
  423. extent_end =
  424. btrfs_file_extent_disk_bytenr(leaf,
  425. extent);
  426. if (extent_end)
  427. *hint_byte = extent_end;
  428. extent_end = key.offset +
  429. btrfs_file_extent_num_bytes(leaf, extent);
  430. ram_bytes = btrfs_file_extent_ram_bytes(leaf,
  431. extent);
  432. found_extent = 1;
  433. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  434. found_inline = 1;
  435. extent_end = key.offset +
  436. btrfs_file_extent_inline_len(leaf, extent);
  437. }
  438. } else {
  439. extent_end = search_start;
  440. }
  441. /* we found nothing we can drop */
  442. if ((!found_extent && !found_inline) ||
  443. search_start >= extent_end) {
  444. int nextret;
  445. u32 nritems;
  446. nritems = btrfs_header_nritems(leaf);
  447. if (slot >= nritems - 1) {
  448. nextret = btrfs_next_leaf(root, path);
  449. if (nextret)
  450. goto out;
  451. recow = 1;
  452. } else {
  453. path->slots[0]++;
  454. }
  455. goto next_slot;
  456. }
  457. if (end <= extent_end && start >= key.offset && found_inline)
  458. *hint_byte = EXTENT_MAP_INLINE;
  459. if (found_extent) {
  460. read_extent_buffer(leaf, &old, (unsigned long)extent,
  461. sizeof(old));
  462. root_gen = btrfs_header_generation(leaf);
  463. root_owner = btrfs_header_owner(leaf);
  464. leaf_start = leaf->start;
  465. }
  466. if (end < extent_end && end >= key.offset) {
  467. bookend = 1;
  468. if (found_inline && start <= key.offset)
  469. keep = 1;
  470. }
  471. if (bookend && found_extent) {
  472. if (locked_end < extent_end) {
  473. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  474. locked_end, extent_end - 1,
  475. GFP_NOFS);
  476. if (!ret) {
  477. btrfs_release_path(root, path);
  478. lock_extent(&BTRFS_I(inode)->io_tree,
  479. locked_end, extent_end - 1,
  480. GFP_NOFS);
  481. locked_end = extent_end;
  482. continue;
  483. }
  484. locked_end = extent_end;
  485. }
  486. orig_parent = path->nodes[0]->start;
  487. disk_bytenr = le64_to_cpu(old.disk_bytenr);
  488. if (disk_bytenr != 0) {
  489. ret = btrfs_inc_extent_ref(trans, root,
  490. disk_bytenr,
  491. le64_to_cpu(old.disk_num_bytes),
  492. orig_parent, root->root_key.objectid,
  493. trans->transid, inode->i_ino);
  494. BUG_ON(ret);
  495. }
  496. }
  497. if (found_inline) {
  498. u64 mask = root->sectorsize - 1;
  499. search_start = (extent_end + mask) & ~mask;
  500. } else
  501. search_start = extent_end;
  502. /* truncate existing extent */
  503. if (start > key.offset) {
  504. u64 new_num;
  505. u64 old_num;
  506. keep = 1;
  507. WARN_ON(start & (root->sectorsize - 1));
  508. if (found_extent) {
  509. new_num = start - key.offset;
  510. old_num = btrfs_file_extent_num_bytes(leaf,
  511. extent);
  512. *hint_byte =
  513. btrfs_file_extent_disk_bytenr(leaf,
  514. extent);
  515. if (btrfs_file_extent_disk_bytenr(leaf,
  516. extent)) {
  517. inode_sub_bytes(inode, old_num -
  518. new_num);
  519. }
  520. if (!compression && !encryption) {
  521. btrfs_set_file_extent_ram_bytes(leaf,
  522. extent, new_num);
  523. }
  524. btrfs_set_file_extent_num_bytes(leaf,
  525. extent, new_num);
  526. btrfs_mark_buffer_dirty(leaf);
  527. } else if (key.offset < inline_limit &&
  528. (end > extent_end) &&
  529. (inline_limit < extent_end)) {
  530. u32 new_size;
  531. new_size = btrfs_file_extent_calc_inline_size(
  532. inline_limit - key.offset);
  533. inode_sub_bytes(inode, extent_end -
  534. inline_limit);
  535. btrfs_set_file_extent_ram_bytes(leaf, extent,
  536. new_size);
  537. if (!compression && !encryption) {
  538. btrfs_truncate_item(trans, root, path,
  539. new_size, 1);
  540. }
  541. }
  542. }
  543. /* delete the entire extent */
  544. if (!keep) {
  545. if (found_inline)
  546. inode_sub_bytes(inode, extent_end -
  547. key.offset);
  548. ret = btrfs_del_item(trans, root, path);
  549. /* TODO update progress marker and return */
  550. BUG_ON(ret);
  551. extent = NULL;
  552. btrfs_release_path(root, path);
  553. /* the extent will be freed later */
  554. }
  555. if (bookend && found_inline && start <= key.offset) {
  556. u32 new_size;
  557. new_size = btrfs_file_extent_calc_inline_size(
  558. extent_end - end);
  559. inode_sub_bytes(inode, end - key.offset);
  560. btrfs_set_file_extent_ram_bytes(leaf, extent,
  561. new_size);
  562. if (!compression && !encryption)
  563. ret = btrfs_truncate_item(trans, root, path,
  564. new_size, 0);
  565. BUG_ON(ret);
  566. }
  567. /* create bookend, splitting the extent in two */
  568. if (bookend && found_extent) {
  569. struct btrfs_key ins;
  570. ins.objectid = inode->i_ino;
  571. ins.offset = end;
  572. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  573. btrfs_release_path(root, path);
  574. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  575. sizeof(*extent));
  576. BUG_ON(ret);
  577. leaf = path->nodes[0];
  578. extent = btrfs_item_ptr(leaf, path->slots[0],
  579. struct btrfs_file_extent_item);
  580. write_extent_buffer(leaf, &old,
  581. (unsigned long)extent, sizeof(old));
  582. btrfs_set_file_extent_compression(leaf, extent,
  583. compression);
  584. btrfs_set_file_extent_encryption(leaf, extent,
  585. encryption);
  586. btrfs_set_file_extent_other_encoding(leaf, extent,
  587. other_encoding);
  588. btrfs_set_file_extent_offset(leaf, extent,
  589. le64_to_cpu(old.offset) + end - key.offset);
  590. WARN_ON(le64_to_cpu(old.num_bytes) <
  591. (extent_end - end));
  592. btrfs_set_file_extent_num_bytes(leaf, extent,
  593. extent_end - end);
  594. /*
  595. * set the ram bytes to the size of the full extent
  596. * before splitting. This is a worst case flag,
  597. * but its the best we can do because we don't know
  598. * how splitting affects compression
  599. */
  600. btrfs_set_file_extent_ram_bytes(leaf, extent,
  601. ram_bytes);
  602. btrfs_set_file_extent_type(leaf, extent, found_type);
  603. btrfs_mark_buffer_dirty(path->nodes[0]);
  604. if (disk_bytenr != 0) {
  605. ret = btrfs_update_extent_ref(trans, root,
  606. disk_bytenr, orig_parent,
  607. leaf->start,
  608. root->root_key.objectid,
  609. trans->transid, ins.objectid);
  610. BUG_ON(ret);
  611. }
  612. btrfs_release_path(root, path);
  613. if (disk_bytenr != 0) {
  614. inode_add_bytes(inode, extent_end - end);
  615. }
  616. }
  617. if (found_extent && !keep) {
  618. u64 disk_bytenr = le64_to_cpu(old.disk_bytenr);
  619. if (disk_bytenr != 0) {
  620. inode_sub_bytes(inode,
  621. le64_to_cpu(old.num_bytes));
  622. ret = btrfs_free_extent(trans, root,
  623. disk_bytenr,
  624. le64_to_cpu(old.disk_num_bytes),
  625. leaf_start, root_owner,
  626. root_gen, key.objectid, 0);
  627. BUG_ON(ret);
  628. *hint_byte = disk_bytenr;
  629. }
  630. }
  631. if (search_start >= end) {
  632. ret = 0;
  633. goto out;
  634. }
  635. }
  636. out:
  637. btrfs_free_path(path);
  638. if (locked_end > end) {
  639. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  640. GFP_NOFS);
  641. }
  642. btrfs_check_file(root, inode);
  643. return ret;
  644. }
  645. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  646. u64 objectid, u64 bytenr, u64 *start, u64 *end)
  647. {
  648. struct btrfs_file_extent_item *fi;
  649. struct btrfs_key key;
  650. u64 extent_end;
  651. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  652. return 0;
  653. btrfs_item_key_to_cpu(leaf, &key, slot);
  654. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  655. return 0;
  656. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  657. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  658. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  659. btrfs_file_extent_compression(leaf, fi) ||
  660. btrfs_file_extent_encryption(leaf, fi) ||
  661. btrfs_file_extent_other_encoding(leaf, fi))
  662. return 0;
  663. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  664. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  665. return 0;
  666. *start = key.offset;
  667. *end = extent_end;
  668. return 1;
  669. }
  670. /*
  671. * Mark extent in the range start - end as written.
  672. *
  673. * This changes extent type from 'pre-allocated' to 'regular'. If only
  674. * part of extent is marked as written, the extent will be split into
  675. * two or three.
  676. */
  677. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  678. struct btrfs_root *root,
  679. struct inode *inode, u64 start, u64 end)
  680. {
  681. struct extent_buffer *leaf;
  682. struct btrfs_path *path;
  683. struct btrfs_file_extent_item *fi;
  684. struct btrfs_key key;
  685. u64 bytenr;
  686. u64 num_bytes;
  687. u64 extent_end;
  688. u64 extent_offset;
  689. u64 other_start;
  690. u64 other_end;
  691. u64 split = start;
  692. u64 locked_end = end;
  693. int extent_type;
  694. int split_end = 1;
  695. int ret;
  696. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  697. path = btrfs_alloc_path();
  698. BUG_ON(!path);
  699. again:
  700. key.objectid = inode->i_ino;
  701. key.type = BTRFS_EXTENT_DATA_KEY;
  702. if (split == start)
  703. key.offset = split;
  704. else
  705. key.offset = split - 1;
  706. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  707. if (ret > 0 && path->slots[0] > 0)
  708. path->slots[0]--;
  709. leaf = path->nodes[0];
  710. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  711. BUG_ON(key.objectid != inode->i_ino ||
  712. key.type != BTRFS_EXTENT_DATA_KEY);
  713. fi = btrfs_item_ptr(leaf, path->slots[0],
  714. struct btrfs_file_extent_item);
  715. extent_type = btrfs_file_extent_type(leaf, fi);
  716. BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
  717. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  718. BUG_ON(key.offset > start || extent_end < end);
  719. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  720. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  721. extent_offset = btrfs_file_extent_offset(leaf, fi);
  722. if (key.offset == start)
  723. split = end;
  724. if (key.offset == start && extent_end == end) {
  725. int del_nr = 0;
  726. int del_slot = 0;
  727. u64 leaf_owner = btrfs_header_owner(leaf);
  728. u64 leaf_gen = btrfs_header_generation(leaf);
  729. other_start = end;
  730. other_end = 0;
  731. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  732. bytenr, &other_start, &other_end)) {
  733. extent_end = other_end;
  734. del_slot = path->slots[0] + 1;
  735. del_nr++;
  736. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  737. leaf->start, leaf_owner,
  738. leaf_gen, inode->i_ino, 0);
  739. BUG_ON(ret);
  740. }
  741. other_start = 0;
  742. other_end = start;
  743. if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
  744. bytenr, &other_start, &other_end)) {
  745. key.offset = other_start;
  746. del_slot = path->slots[0];
  747. del_nr++;
  748. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  749. leaf->start, leaf_owner,
  750. leaf_gen, inode->i_ino, 0);
  751. BUG_ON(ret);
  752. }
  753. split_end = 0;
  754. if (del_nr == 0) {
  755. btrfs_set_file_extent_type(leaf, fi,
  756. BTRFS_FILE_EXTENT_REG);
  757. goto done;
  758. }
  759. fi = btrfs_item_ptr(leaf, del_slot - 1,
  760. struct btrfs_file_extent_item);
  761. btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
  762. btrfs_set_file_extent_num_bytes(leaf, fi,
  763. extent_end - key.offset);
  764. btrfs_mark_buffer_dirty(leaf);
  765. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  766. BUG_ON(ret);
  767. goto done;
  768. } else if (split == start) {
  769. if (locked_end < extent_end) {
  770. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  771. locked_end, extent_end - 1, GFP_NOFS);
  772. if (!ret) {
  773. btrfs_release_path(root, path);
  774. lock_extent(&BTRFS_I(inode)->io_tree,
  775. locked_end, extent_end - 1, GFP_NOFS);
  776. locked_end = extent_end;
  777. goto again;
  778. }
  779. locked_end = extent_end;
  780. }
  781. btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
  782. extent_offset += split - key.offset;
  783. } else {
  784. BUG_ON(key.offset != start);
  785. btrfs_set_file_extent_offset(leaf, fi, extent_offset +
  786. split - key.offset);
  787. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
  788. key.offset = split;
  789. btrfs_set_item_key_safe(trans, root, path, &key);
  790. extent_end = split;
  791. }
  792. if (extent_end == end) {
  793. split_end = 0;
  794. extent_type = BTRFS_FILE_EXTENT_REG;
  795. }
  796. if (extent_end == end && split == start) {
  797. other_start = end;
  798. other_end = 0;
  799. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  800. bytenr, &other_start, &other_end)) {
  801. path->slots[0]++;
  802. fi = btrfs_item_ptr(leaf, path->slots[0],
  803. struct btrfs_file_extent_item);
  804. key.offset = split;
  805. btrfs_set_item_key_safe(trans, root, path, &key);
  806. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  807. btrfs_set_file_extent_num_bytes(leaf, fi,
  808. other_end - split);
  809. goto done;
  810. }
  811. }
  812. if (extent_end == end && split == end) {
  813. other_start = 0;
  814. other_end = start;
  815. if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
  816. bytenr, &other_start, &other_end)) {
  817. path->slots[0]--;
  818. fi = btrfs_item_ptr(leaf, path->slots[0],
  819. struct btrfs_file_extent_item);
  820. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
  821. other_start);
  822. goto done;
  823. }
  824. }
  825. btrfs_mark_buffer_dirty(leaf);
  826. btrfs_release_path(root, path);
  827. key.offset = start;
  828. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
  829. BUG_ON(ret);
  830. leaf = path->nodes[0];
  831. fi = btrfs_item_ptr(leaf, path->slots[0],
  832. struct btrfs_file_extent_item);
  833. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  834. btrfs_set_file_extent_type(leaf, fi, extent_type);
  835. btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
  836. btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
  837. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  838. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
  839. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  840. btrfs_set_file_extent_compression(leaf, fi, 0);
  841. btrfs_set_file_extent_encryption(leaf, fi, 0);
  842. btrfs_set_file_extent_other_encoding(leaf, fi, 0);
  843. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
  844. leaf->start, root->root_key.objectid,
  845. trans->transid, inode->i_ino);
  846. BUG_ON(ret);
  847. done:
  848. btrfs_mark_buffer_dirty(leaf);
  849. btrfs_release_path(root, path);
  850. if (split_end && split == start) {
  851. split = end;
  852. goto again;
  853. }
  854. if (locked_end > end) {
  855. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  856. GFP_NOFS);
  857. }
  858. btrfs_free_path(path);
  859. return 0;
  860. }
  861. /*
  862. * this gets pages into the page cache and locks them down, it also properly
  863. * waits for data=ordered extents to finish before allowing the pages to be
  864. * modified.
  865. */
  866. static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
  867. struct page **pages, size_t num_pages,
  868. loff_t pos, unsigned long first_index,
  869. unsigned long last_index, size_t write_bytes)
  870. {
  871. int i;
  872. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  873. struct inode *inode = fdentry(file)->d_inode;
  874. int err = 0;
  875. u64 start_pos;
  876. u64 last_pos;
  877. start_pos = pos & ~((u64)root->sectorsize - 1);
  878. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  879. if (start_pos > inode->i_size) {
  880. err = btrfs_cont_expand(inode, start_pos);
  881. if (err)
  882. return err;
  883. }
  884. memset(pages, 0, num_pages * sizeof(struct page *));
  885. again:
  886. for (i = 0; i < num_pages; i++) {
  887. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  888. if (!pages[i]) {
  889. err = -ENOMEM;
  890. BUG_ON(1);
  891. }
  892. wait_on_page_writeback(pages[i]);
  893. }
  894. if (start_pos < inode->i_size) {
  895. struct btrfs_ordered_extent *ordered;
  896. lock_extent(&BTRFS_I(inode)->io_tree,
  897. start_pos, last_pos - 1, GFP_NOFS);
  898. ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
  899. if (ordered &&
  900. ordered->file_offset + ordered->len > start_pos &&
  901. ordered->file_offset < last_pos) {
  902. btrfs_put_ordered_extent(ordered);
  903. unlock_extent(&BTRFS_I(inode)->io_tree,
  904. start_pos, last_pos - 1, GFP_NOFS);
  905. for (i = 0; i < num_pages; i++) {
  906. unlock_page(pages[i]);
  907. page_cache_release(pages[i]);
  908. }
  909. btrfs_wait_ordered_range(inode, start_pos,
  910. last_pos - start_pos);
  911. goto again;
  912. }
  913. if (ordered)
  914. btrfs_put_ordered_extent(ordered);
  915. clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
  916. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
  917. GFP_NOFS);
  918. unlock_extent(&BTRFS_I(inode)->io_tree,
  919. start_pos, last_pos - 1, GFP_NOFS);
  920. }
  921. for (i = 0; i < num_pages; i++) {
  922. clear_page_dirty_for_io(pages[i]);
  923. set_page_extent_mapped(pages[i]);
  924. WARN_ON(!PageLocked(pages[i]));
  925. }
  926. return 0;
  927. }
  928. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  929. size_t count, loff_t *ppos)
  930. {
  931. loff_t pos;
  932. loff_t start_pos;
  933. ssize_t num_written = 0;
  934. ssize_t err = 0;
  935. int ret = 0;
  936. struct inode *inode = fdentry(file)->d_inode;
  937. struct btrfs_root *root = BTRFS_I(inode)->root;
  938. struct page **pages = NULL;
  939. int nrptrs;
  940. struct page *pinned[2];
  941. unsigned long first_index;
  942. unsigned long last_index;
  943. int will_write;
  944. will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
  945. (file->f_flags & O_DIRECT));
  946. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  947. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  948. pinned[0] = NULL;
  949. pinned[1] = NULL;
  950. pos = *ppos;
  951. start_pos = pos;
  952. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  953. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  954. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  955. if (err)
  956. goto out_nolock;
  957. if (count == 0)
  958. goto out_nolock;
  959. err = file_remove_suid(file);
  960. if (err)
  961. goto out_nolock;
  962. file_update_time(file);
  963. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  964. mutex_lock(&inode->i_mutex);
  965. first_index = pos >> PAGE_CACHE_SHIFT;
  966. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  967. /*
  968. * if this is a nodatasum mount, force summing off for the inode
  969. * all the time. That way a later mount with summing on won't
  970. * get confused
  971. */
  972. if (btrfs_test_opt(root, NODATASUM))
  973. btrfs_set_flag(inode, NODATASUM);
  974. /*
  975. * there are lots of better ways to do this, but this code
  976. * makes sure the first and last page in the file range are
  977. * up to date and ready for cow
  978. */
  979. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  980. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  981. if (!PageUptodate(pinned[0])) {
  982. ret = btrfs_readpage(NULL, pinned[0]);
  983. BUG_ON(ret);
  984. wait_on_page_locked(pinned[0]);
  985. } else {
  986. unlock_page(pinned[0]);
  987. }
  988. }
  989. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  990. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  991. if (!PageUptodate(pinned[1])) {
  992. ret = btrfs_readpage(NULL, pinned[1]);
  993. BUG_ON(ret);
  994. wait_on_page_locked(pinned[1]);
  995. } else {
  996. unlock_page(pinned[1]);
  997. }
  998. }
  999. while(count > 0) {
  1000. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1001. size_t write_bytes = min(count, nrptrs *
  1002. (size_t)PAGE_CACHE_SIZE -
  1003. offset);
  1004. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  1005. PAGE_CACHE_SHIFT;
  1006. WARN_ON(num_pages > nrptrs);
  1007. memset(pages, 0, sizeof(pages));
  1008. ret = btrfs_check_free_space(root, write_bytes, 0);
  1009. if (ret)
  1010. goto out;
  1011. ret = prepare_pages(root, file, pages, num_pages,
  1012. pos, first_index, last_index,
  1013. write_bytes);
  1014. if (ret)
  1015. goto out;
  1016. ret = btrfs_copy_from_user(pos, num_pages,
  1017. write_bytes, pages, buf);
  1018. if (ret) {
  1019. btrfs_drop_pages(pages, num_pages);
  1020. goto out;
  1021. }
  1022. ret = dirty_and_release_pages(NULL, root, file, pages,
  1023. num_pages, pos, write_bytes);
  1024. btrfs_drop_pages(pages, num_pages);
  1025. if (ret)
  1026. goto out;
  1027. if (will_write) {
  1028. btrfs_fdatawrite_range(inode->i_mapping, pos,
  1029. pos + write_bytes - 1,
  1030. WB_SYNC_NONE);
  1031. } else {
  1032. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1033. num_pages);
  1034. if (num_pages <
  1035. (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1036. btrfs_btree_balance_dirty(root, 1);
  1037. btrfs_throttle(root);
  1038. }
  1039. buf += write_bytes;
  1040. count -= write_bytes;
  1041. pos += write_bytes;
  1042. num_written += write_bytes;
  1043. cond_resched();
  1044. }
  1045. out:
  1046. mutex_unlock(&inode->i_mutex);
  1047. out_nolock:
  1048. kfree(pages);
  1049. if (pinned[0])
  1050. page_cache_release(pinned[0]);
  1051. if (pinned[1])
  1052. page_cache_release(pinned[1]);
  1053. *ppos = pos;
  1054. if (num_written > 0 && will_write) {
  1055. struct btrfs_trans_handle *trans;
  1056. err = btrfs_wait_ordered_range(inode, start_pos, num_written);
  1057. if (err)
  1058. num_written = err;
  1059. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  1060. trans = btrfs_start_transaction(root, 1);
  1061. ret = btrfs_log_dentry_safe(trans, root,
  1062. file->f_dentry);
  1063. if (ret == 0) {
  1064. btrfs_sync_log(trans, root);
  1065. btrfs_end_transaction(trans, root);
  1066. } else {
  1067. btrfs_commit_transaction(trans, root);
  1068. }
  1069. }
  1070. if (file->f_flags & O_DIRECT) {
  1071. invalidate_mapping_pages(inode->i_mapping,
  1072. start_pos >> PAGE_CACHE_SHIFT,
  1073. (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
  1074. }
  1075. }
  1076. current->backing_dev_info = NULL;
  1077. return num_written ? num_written : err;
  1078. }
  1079. int btrfs_release_file(struct inode * inode, struct file * filp)
  1080. {
  1081. if (filp->private_data)
  1082. btrfs_ioctl_trans_end(filp);
  1083. return 0;
  1084. }
  1085. /*
  1086. * fsync call for both files and directories. This logs the inode into
  1087. * the tree log instead of forcing full commits whenever possible.
  1088. *
  1089. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1090. * in the metadata btree are up to date for copying to the log.
  1091. *
  1092. * It drops the inode mutex before doing the tree log commit. This is an
  1093. * important optimization for directories because holding the mutex prevents
  1094. * new operations on the dir while we write to disk.
  1095. */
  1096. int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
  1097. {
  1098. struct inode *inode = dentry->d_inode;
  1099. struct btrfs_root *root = BTRFS_I(inode)->root;
  1100. int ret = 0;
  1101. struct btrfs_trans_handle *trans;
  1102. /*
  1103. * check the transaction that last modified this inode
  1104. * and see if its already been committed
  1105. */
  1106. if (!BTRFS_I(inode)->last_trans)
  1107. goto out;
  1108. mutex_lock(&root->fs_info->trans_mutex);
  1109. if (BTRFS_I(inode)->last_trans <=
  1110. root->fs_info->last_trans_committed) {
  1111. BTRFS_I(inode)->last_trans = 0;
  1112. mutex_unlock(&root->fs_info->trans_mutex);
  1113. goto out;
  1114. }
  1115. mutex_unlock(&root->fs_info->trans_mutex);
  1116. root->fs_info->tree_log_batch++;
  1117. filemap_fdatawait(inode->i_mapping);
  1118. root->fs_info->tree_log_batch++;
  1119. /*
  1120. * ok we haven't committed the transaction yet, lets do a commit
  1121. */
  1122. if (file->private_data)
  1123. btrfs_ioctl_trans_end(file);
  1124. trans = btrfs_start_transaction(root, 1);
  1125. if (!trans) {
  1126. ret = -ENOMEM;
  1127. goto out;
  1128. }
  1129. ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
  1130. if (ret < 0) {
  1131. goto out;
  1132. }
  1133. /* we've logged all the items and now have a consistent
  1134. * version of the file in the log. It is possible that
  1135. * someone will come in and modify the file, but that's
  1136. * fine because the log is consistent on disk, and we
  1137. * have references to all of the file's extents
  1138. *
  1139. * It is possible that someone will come in and log the
  1140. * file again, but that will end up using the synchronization
  1141. * inside btrfs_sync_log to keep things safe.
  1142. */
  1143. mutex_unlock(&file->f_dentry->d_inode->i_mutex);
  1144. if (ret > 0) {
  1145. ret = btrfs_commit_transaction(trans, root);
  1146. } else {
  1147. btrfs_sync_log(trans, root);
  1148. ret = btrfs_end_transaction(trans, root);
  1149. }
  1150. mutex_lock(&file->f_dentry->d_inode->i_mutex);
  1151. out:
  1152. return ret > 0 ? EIO : ret;
  1153. }
  1154. static struct vm_operations_struct btrfs_file_vm_ops = {
  1155. .fault = filemap_fault,
  1156. .page_mkwrite = btrfs_page_mkwrite,
  1157. };
  1158. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1159. {
  1160. vma->vm_ops = &btrfs_file_vm_ops;
  1161. file_accessed(filp);
  1162. return 0;
  1163. }
  1164. struct file_operations btrfs_file_operations = {
  1165. .llseek = generic_file_llseek,
  1166. .read = do_sync_read,
  1167. .aio_read = generic_file_aio_read,
  1168. .splice_read = generic_file_splice_read,
  1169. .write = btrfs_file_write,
  1170. .mmap = btrfs_file_mmap,
  1171. .open = generic_file_open,
  1172. .release = btrfs_release_file,
  1173. .fsync = btrfs_sync_file,
  1174. .unlocked_ioctl = btrfs_ioctl,
  1175. #ifdef CONFIG_COMPAT
  1176. .compat_ioctl = btrfs_ioctl,
  1177. #endif
  1178. };