file.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/version.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. #include "tree-log.h"
  39. #include "locking.h"
  40. #include "compat.h"
  41. /* simple helper to fault in pages and copy. This should go away
  42. * and be replaced with calls into generic code.
  43. */
  44. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  45. int write_bytes,
  46. struct page **prepared_pages,
  47. const char __user *buf)
  48. {
  49. long page_fault = 0;
  50. int i;
  51. int offset = pos & (PAGE_CACHE_SIZE - 1);
  52. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  53. size_t count = min_t(size_t,
  54. PAGE_CACHE_SIZE - offset, write_bytes);
  55. struct page *page = prepared_pages[i];
  56. fault_in_pages_readable(buf, count);
  57. /* Copy data from userspace to the current page */
  58. kmap(page);
  59. page_fault = __copy_from_user(page_address(page) + offset,
  60. buf, count);
  61. /* Flush processor's dcache for this page */
  62. flush_dcache_page(page);
  63. kunmap(page);
  64. buf += count;
  65. write_bytes -= count;
  66. if (page_fault)
  67. break;
  68. }
  69. return page_fault ? -EFAULT : 0;
  70. }
  71. /*
  72. * unlocks pages after btrfs_file_write is done with them
  73. */
  74. static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
  75. {
  76. size_t i;
  77. for (i = 0; i < num_pages; i++) {
  78. if (!pages[i])
  79. break;
  80. /* page checked is some magic around finding pages that
  81. * have been modified without going through btrfs_set_page_dirty
  82. * clear it here
  83. */
  84. ClearPageChecked(pages[i]);
  85. unlock_page(pages[i]);
  86. mark_page_accessed(pages[i]);
  87. page_cache_release(pages[i]);
  88. }
  89. }
  90. /*
  91. * after copy_from_user, pages need to be dirtied and we need to make
  92. * sure holes are created between the current EOF and the start of
  93. * any next extents (if required).
  94. *
  95. * this also makes the decision about creating an inline extent vs
  96. * doing real data extents, marking pages dirty and delalloc as required.
  97. */
  98. static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
  99. struct btrfs_root *root,
  100. struct file *file,
  101. struct page **pages,
  102. size_t num_pages,
  103. loff_t pos,
  104. size_t write_bytes)
  105. {
  106. int err = 0;
  107. int i;
  108. struct inode *inode = fdentry(file)->d_inode;
  109. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  110. u64 hint_byte;
  111. u64 num_bytes;
  112. u64 start_pos;
  113. u64 end_of_last_block;
  114. u64 end_pos = pos + write_bytes;
  115. loff_t isize = i_size_read(inode);
  116. start_pos = pos & ~((u64)root->sectorsize - 1);
  117. num_bytes = (write_bytes + pos - start_pos +
  118. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  119. end_of_last_block = start_pos + num_bytes - 1;
  120. lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  121. trans = btrfs_join_transaction(root, 1);
  122. if (!trans) {
  123. err = -ENOMEM;
  124. goto out_unlock;
  125. }
  126. btrfs_set_trans_block_group(trans, inode);
  127. hint_byte = 0;
  128. set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  129. /* check for reserved extents on each page, we don't want
  130. * to reset the delalloc bit on things that already have
  131. * extents reserved.
  132. */
  133. btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
  134. for (i = 0; i < num_pages; i++) {
  135. struct page *p = pages[i];
  136. SetPageUptodate(p);
  137. ClearPageChecked(p);
  138. set_page_dirty(p);
  139. }
  140. if (end_pos > isize) {
  141. i_size_write(inode, end_pos);
  142. btrfs_update_inode(trans, root, inode);
  143. }
  144. err = btrfs_end_transaction(trans, root);
  145. out_unlock:
  146. unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  147. return err;
  148. }
  149. /*
  150. * this drops all the extents in the cache that intersect the range
  151. * [start, end]. Existing extents are split as required.
  152. */
  153. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  154. int skip_pinned)
  155. {
  156. struct extent_map *em;
  157. struct extent_map *split = NULL;
  158. struct extent_map *split2 = NULL;
  159. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  160. u64 len = end - start + 1;
  161. int ret;
  162. int testend = 1;
  163. unsigned long flags;
  164. int compressed = 0;
  165. WARN_ON(end < start);
  166. if (end == (u64)-1) {
  167. len = (u64)-1;
  168. testend = 0;
  169. }
  170. while (1) {
  171. if (!split)
  172. split = alloc_extent_map(GFP_NOFS);
  173. if (!split2)
  174. split2 = alloc_extent_map(GFP_NOFS);
  175. spin_lock(&em_tree->lock);
  176. em = lookup_extent_mapping(em_tree, start, len);
  177. if (!em) {
  178. spin_unlock(&em_tree->lock);
  179. break;
  180. }
  181. flags = em->flags;
  182. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  183. spin_unlock(&em_tree->lock);
  184. if (em->start <= start &&
  185. (!testend || em->start + em->len >= start + len)) {
  186. free_extent_map(em);
  187. break;
  188. }
  189. if (start < em->start) {
  190. len = em->start - start;
  191. } else {
  192. len = start + len - (em->start + em->len);
  193. start = em->start + em->len;
  194. }
  195. free_extent_map(em);
  196. continue;
  197. }
  198. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  199. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  200. remove_extent_mapping(em_tree, em);
  201. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  202. em->start < start) {
  203. split->start = em->start;
  204. split->len = start - em->start;
  205. split->orig_start = em->orig_start;
  206. split->block_start = em->block_start;
  207. if (compressed)
  208. split->block_len = em->block_len;
  209. else
  210. split->block_len = split->len;
  211. split->bdev = em->bdev;
  212. split->flags = flags;
  213. ret = add_extent_mapping(em_tree, split);
  214. BUG_ON(ret);
  215. free_extent_map(split);
  216. split = split2;
  217. split2 = NULL;
  218. }
  219. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  220. testend && em->start + em->len > start + len) {
  221. u64 diff = start + len - em->start;
  222. split->start = start + len;
  223. split->len = em->start + em->len - (start + len);
  224. split->bdev = em->bdev;
  225. split->flags = flags;
  226. if (compressed) {
  227. split->block_len = em->block_len;
  228. split->block_start = em->block_start;
  229. split->orig_start = em->orig_start;
  230. } else {
  231. split->block_len = split->len;
  232. split->block_start = em->block_start + diff;
  233. split->orig_start = split->start;
  234. }
  235. ret = add_extent_mapping(em_tree, split);
  236. BUG_ON(ret);
  237. free_extent_map(split);
  238. split = NULL;
  239. }
  240. spin_unlock(&em_tree->lock);
  241. /* once for us */
  242. free_extent_map(em);
  243. /* once for the tree*/
  244. free_extent_map(em);
  245. }
  246. if (split)
  247. free_extent_map(split);
  248. if (split2)
  249. free_extent_map(split2);
  250. return 0;
  251. }
  252. int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
  253. {
  254. return 0;
  255. #if 0
  256. struct btrfs_path *path;
  257. struct btrfs_key found_key;
  258. struct extent_buffer *leaf;
  259. struct btrfs_file_extent_item *extent;
  260. u64 last_offset = 0;
  261. int nritems;
  262. int slot;
  263. int found_type;
  264. int ret;
  265. int err = 0;
  266. u64 extent_end = 0;
  267. path = btrfs_alloc_path();
  268. ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
  269. last_offset, 0);
  270. while (1) {
  271. nritems = btrfs_header_nritems(path->nodes[0]);
  272. if (path->slots[0] >= nritems) {
  273. ret = btrfs_next_leaf(root, path);
  274. if (ret)
  275. goto out;
  276. nritems = btrfs_header_nritems(path->nodes[0]);
  277. }
  278. slot = path->slots[0];
  279. leaf = path->nodes[0];
  280. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  281. if (found_key.objectid != inode->i_ino)
  282. break;
  283. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  284. goto out;
  285. if (found_key.offset < last_offset) {
  286. WARN_ON(1);
  287. btrfs_print_leaf(root, leaf);
  288. printk(KERN_ERR "inode %lu found offset %llu "
  289. "expected %llu\n", inode->i_ino,
  290. (unsigned long long)found_key.offset,
  291. (unsigned long long)last_offset);
  292. err = 1;
  293. goto out;
  294. }
  295. extent = btrfs_item_ptr(leaf, slot,
  296. struct btrfs_file_extent_item);
  297. found_type = btrfs_file_extent_type(leaf, extent);
  298. if (found_type == BTRFS_FILE_EXTENT_REG) {
  299. extent_end = found_key.offset +
  300. btrfs_file_extent_num_bytes(leaf, extent);
  301. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  302. struct btrfs_item *item;
  303. item = btrfs_item_nr(leaf, slot);
  304. extent_end = found_key.offset +
  305. btrfs_file_extent_inline_len(leaf, extent);
  306. extent_end = (extent_end + root->sectorsize - 1) &
  307. ~((u64)root->sectorsize - 1);
  308. }
  309. last_offset = extent_end;
  310. path->slots[0]++;
  311. }
  312. if (0 && last_offset < inode->i_size) {
  313. WARN_ON(1);
  314. btrfs_print_leaf(root, leaf);
  315. printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
  316. inode->i_ino, (unsigned long long)last_offset,
  317. (unsigned long long)inode->i_size);
  318. err = 1;
  319. }
  320. out:
  321. btrfs_free_path(path);
  322. return err;
  323. #endif
  324. }
  325. /*
  326. * this is very complex, but the basic idea is to drop all extents
  327. * in the range start - end. hint_block is filled in with a block number
  328. * that would be a good hint to the block allocator for this file.
  329. *
  330. * If an extent intersects the range but is not entirely inside the range
  331. * it is either truncated or split. Anything entirely inside the range
  332. * is deleted from the tree.
  333. *
  334. * inline_limit is used to tell this code which offsets in the file to keep
  335. * if they contain inline extents.
  336. */
  337. noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  338. struct btrfs_root *root, struct inode *inode,
  339. u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
  340. {
  341. u64 extent_end = 0;
  342. u64 locked_end = end;
  343. u64 search_start = start;
  344. u64 leaf_start;
  345. u64 ram_bytes = 0;
  346. u64 orig_parent = 0;
  347. u64 disk_bytenr = 0;
  348. u8 compression;
  349. u8 encryption;
  350. u16 other_encoding = 0;
  351. u64 root_gen;
  352. u64 root_owner;
  353. struct extent_buffer *leaf;
  354. struct btrfs_file_extent_item *extent;
  355. struct btrfs_path *path;
  356. struct btrfs_key key;
  357. struct btrfs_file_extent_item old;
  358. int keep;
  359. int slot;
  360. int bookend;
  361. int found_type = 0;
  362. int found_extent;
  363. int found_inline;
  364. int recow;
  365. int ret;
  366. inline_limit = 0;
  367. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  368. path = btrfs_alloc_path();
  369. if (!path)
  370. return -ENOMEM;
  371. while (1) {
  372. recow = 0;
  373. btrfs_release_path(root, path);
  374. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  375. search_start, -1);
  376. if (ret < 0)
  377. goto out;
  378. if (ret > 0) {
  379. if (path->slots[0] == 0) {
  380. ret = 0;
  381. goto out;
  382. }
  383. path->slots[0]--;
  384. }
  385. next_slot:
  386. keep = 0;
  387. bookend = 0;
  388. found_extent = 0;
  389. found_inline = 0;
  390. leaf_start = 0;
  391. root_gen = 0;
  392. root_owner = 0;
  393. compression = 0;
  394. encryption = 0;
  395. extent = NULL;
  396. leaf = path->nodes[0];
  397. slot = path->slots[0];
  398. ret = 0;
  399. btrfs_item_key_to_cpu(leaf, &key, slot);
  400. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
  401. key.offset >= end) {
  402. goto out;
  403. }
  404. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
  405. key.objectid != inode->i_ino) {
  406. goto out;
  407. }
  408. if (recow) {
  409. search_start = max(key.offset, start);
  410. continue;
  411. }
  412. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  413. extent = btrfs_item_ptr(leaf, slot,
  414. struct btrfs_file_extent_item);
  415. found_type = btrfs_file_extent_type(leaf, extent);
  416. compression = btrfs_file_extent_compression(leaf,
  417. extent);
  418. encryption = btrfs_file_extent_encryption(leaf,
  419. extent);
  420. other_encoding = btrfs_file_extent_other_encoding(leaf,
  421. extent);
  422. if (found_type == BTRFS_FILE_EXTENT_REG ||
  423. found_type == BTRFS_FILE_EXTENT_PREALLOC) {
  424. extent_end =
  425. btrfs_file_extent_disk_bytenr(leaf,
  426. extent);
  427. if (extent_end)
  428. *hint_byte = extent_end;
  429. extent_end = key.offset +
  430. btrfs_file_extent_num_bytes(leaf, extent);
  431. ram_bytes = btrfs_file_extent_ram_bytes(leaf,
  432. extent);
  433. found_extent = 1;
  434. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  435. found_inline = 1;
  436. extent_end = key.offset +
  437. btrfs_file_extent_inline_len(leaf, extent);
  438. }
  439. } else {
  440. extent_end = search_start;
  441. }
  442. /* we found nothing we can drop */
  443. if ((!found_extent && !found_inline) ||
  444. search_start >= extent_end) {
  445. int nextret;
  446. u32 nritems;
  447. nritems = btrfs_header_nritems(leaf);
  448. if (slot >= nritems - 1) {
  449. nextret = btrfs_next_leaf(root, path);
  450. if (nextret)
  451. goto out;
  452. recow = 1;
  453. } else {
  454. path->slots[0]++;
  455. }
  456. goto next_slot;
  457. }
  458. if (end <= extent_end && start >= key.offset && found_inline)
  459. *hint_byte = EXTENT_MAP_INLINE;
  460. if (found_extent) {
  461. read_extent_buffer(leaf, &old, (unsigned long)extent,
  462. sizeof(old));
  463. root_gen = btrfs_header_generation(leaf);
  464. root_owner = btrfs_header_owner(leaf);
  465. leaf_start = leaf->start;
  466. }
  467. if (end < extent_end && end >= key.offset) {
  468. bookend = 1;
  469. if (found_inline && start <= key.offset)
  470. keep = 1;
  471. }
  472. if (bookend && found_extent) {
  473. if (locked_end < extent_end) {
  474. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  475. locked_end, extent_end - 1,
  476. GFP_NOFS);
  477. if (!ret) {
  478. btrfs_release_path(root, path);
  479. lock_extent(&BTRFS_I(inode)->io_tree,
  480. locked_end, extent_end - 1,
  481. GFP_NOFS);
  482. locked_end = extent_end;
  483. continue;
  484. }
  485. locked_end = extent_end;
  486. }
  487. orig_parent = path->nodes[0]->start;
  488. disk_bytenr = le64_to_cpu(old.disk_bytenr);
  489. if (disk_bytenr != 0) {
  490. ret = btrfs_inc_extent_ref(trans, root,
  491. disk_bytenr,
  492. le64_to_cpu(old.disk_num_bytes),
  493. orig_parent, root->root_key.objectid,
  494. trans->transid, inode->i_ino);
  495. BUG_ON(ret);
  496. }
  497. }
  498. if (found_inline) {
  499. u64 mask = root->sectorsize - 1;
  500. search_start = (extent_end + mask) & ~mask;
  501. } else
  502. search_start = extent_end;
  503. /* truncate existing extent */
  504. if (start > key.offset) {
  505. u64 new_num;
  506. u64 old_num;
  507. keep = 1;
  508. WARN_ON(start & (root->sectorsize - 1));
  509. if (found_extent) {
  510. new_num = start - key.offset;
  511. old_num = btrfs_file_extent_num_bytes(leaf,
  512. extent);
  513. *hint_byte =
  514. btrfs_file_extent_disk_bytenr(leaf,
  515. extent);
  516. if (btrfs_file_extent_disk_bytenr(leaf,
  517. extent)) {
  518. inode_sub_bytes(inode, old_num -
  519. new_num);
  520. }
  521. btrfs_set_file_extent_num_bytes(leaf,
  522. extent, new_num);
  523. btrfs_mark_buffer_dirty(leaf);
  524. } else if (key.offset < inline_limit &&
  525. (end > extent_end) &&
  526. (inline_limit < extent_end)) {
  527. u32 new_size;
  528. new_size = btrfs_file_extent_calc_inline_size(
  529. inline_limit - key.offset);
  530. inode_sub_bytes(inode, extent_end -
  531. inline_limit);
  532. btrfs_set_file_extent_ram_bytes(leaf, extent,
  533. new_size);
  534. if (!compression && !encryption) {
  535. btrfs_truncate_item(trans, root, path,
  536. new_size, 1);
  537. }
  538. }
  539. }
  540. /* delete the entire extent */
  541. if (!keep) {
  542. if (found_inline)
  543. inode_sub_bytes(inode, extent_end -
  544. key.offset);
  545. ret = btrfs_del_item(trans, root, path);
  546. /* TODO update progress marker and return */
  547. BUG_ON(ret);
  548. extent = NULL;
  549. btrfs_release_path(root, path);
  550. /* the extent will be freed later */
  551. }
  552. if (bookend && found_inline && start <= key.offset) {
  553. u32 new_size;
  554. new_size = btrfs_file_extent_calc_inline_size(
  555. extent_end - end);
  556. inode_sub_bytes(inode, end - key.offset);
  557. btrfs_set_file_extent_ram_bytes(leaf, extent,
  558. new_size);
  559. if (!compression && !encryption)
  560. ret = btrfs_truncate_item(trans, root, path,
  561. new_size, 0);
  562. BUG_ON(ret);
  563. }
  564. /* create bookend, splitting the extent in two */
  565. if (bookend && found_extent) {
  566. struct btrfs_key ins;
  567. ins.objectid = inode->i_ino;
  568. ins.offset = end;
  569. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  570. btrfs_release_path(root, path);
  571. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  572. sizeof(*extent));
  573. BUG_ON(ret);
  574. leaf = path->nodes[0];
  575. extent = btrfs_item_ptr(leaf, path->slots[0],
  576. struct btrfs_file_extent_item);
  577. write_extent_buffer(leaf, &old,
  578. (unsigned long)extent, sizeof(old));
  579. btrfs_set_file_extent_compression(leaf, extent,
  580. compression);
  581. btrfs_set_file_extent_encryption(leaf, extent,
  582. encryption);
  583. btrfs_set_file_extent_other_encoding(leaf, extent,
  584. other_encoding);
  585. btrfs_set_file_extent_offset(leaf, extent,
  586. le64_to_cpu(old.offset) + end - key.offset);
  587. WARN_ON(le64_to_cpu(old.num_bytes) <
  588. (extent_end - end));
  589. btrfs_set_file_extent_num_bytes(leaf, extent,
  590. extent_end - end);
  591. /*
  592. * set the ram bytes to the size of the full extent
  593. * before splitting. This is a worst case flag,
  594. * but its the best we can do because we don't know
  595. * how splitting affects compression
  596. */
  597. btrfs_set_file_extent_ram_bytes(leaf, extent,
  598. ram_bytes);
  599. btrfs_set_file_extent_type(leaf, extent, found_type);
  600. btrfs_mark_buffer_dirty(path->nodes[0]);
  601. if (disk_bytenr != 0) {
  602. ret = btrfs_update_extent_ref(trans, root,
  603. disk_bytenr, orig_parent,
  604. leaf->start,
  605. root->root_key.objectid,
  606. trans->transid, ins.objectid);
  607. BUG_ON(ret);
  608. }
  609. btrfs_release_path(root, path);
  610. if (disk_bytenr != 0)
  611. inode_add_bytes(inode, extent_end - end);
  612. }
  613. if (found_extent && !keep) {
  614. u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
  615. if (old_disk_bytenr != 0) {
  616. inode_sub_bytes(inode,
  617. le64_to_cpu(old.num_bytes));
  618. ret = btrfs_free_extent(trans, root,
  619. old_disk_bytenr,
  620. le64_to_cpu(old.disk_num_bytes),
  621. leaf_start, root_owner,
  622. root_gen, key.objectid, 0);
  623. BUG_ON(ret);
  624. *hint_byte = old_disk_bytenr;
  625. }
  626. }
  627. if (search_start >= end) {
  628. ret = 0;
  629. goto out;
  630. }
  631. }
  632. out:
  633. btrfs_free_path(path);
  634. if (locked_end > end) {
  635. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  636. GFP_NOFS);
  637. }
  638. btrfs_check_file(root, inode);
  639. return ret;
  640. }
  641. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  642. u64 objectid, u64 bytenr, u64 *start, u64 *end)
  643. {
  644. struct btrfs_file_extent_item *fi;
  645. struct btrfs_key key;
  646. u64 extent_end;
  647. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  648. return 0;
  649. btrfs_item_key_to_cpu(leaf, &key, slot);
  650. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  651. return 0;
  652. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  653. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  654. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  655. btrfs_file_extent_compression(leaf, fi) ||
  656. btrfs_file_extent_encryption(leaf, fi) ||
  657. btrfs_file_extent_other_encoding(leaf, fi))
  658. return 0;
  659. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  660. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  661. return 0;
  662. *start = key.offset;
  663. *end = extent_end;
  664. return 1;
  665. }
  666. /*
  667. * Mark extent in the range start - end as written.
  668. *
  669. * This changes extent type from 'pre-allocated' to 'regular'. If only
  670. * part of extent is marked as written, the extent will be split into
  671. * two or three.
  672. */
  673. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  674. struct btrfs_root *root,
  675. struct inode *inode, u64 start, u64 end)
  676. {
  677. struct extent_buffer *leaf;
  678. struct btrfs_path *path;
  679. struct btrfs_file_extent_item *fi;
  680. struct btrfs_key key;
  681. u64 bytenr;
  682. u64 num_bytes;
  683. u64 extent_end;
  684. u64 extent_offset;
  685. u64 other_start;
  686. u64 other_end;
  687. u64 split = start;
  688. u64 locked_end = end;
  689. u64 orig_parent;
  690. int extent_type;
  691. int split_end = 1;
  692. int ret;
  693. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  694. path = btrfs_alloc_path();
  695. BUG_ON(!path);
  696. again:
  697. key.objectid = inode->i_ino;
  698. key.type = BTRFS_EXTENT_DATA_KEY;
  699. if (split == start)
  700. key.offset = split;
  701. else
  702. key.offset = split - 1;
  703. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  704. if (ret > 0 && path->slots[0] > 0)
  705. path->slots[0]--;
  706. leaf = path->nodes[0];
  707. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  708. BUG_ON(key.objectid != inode->i_ino ||
  709. key.type != BTRFS_EXTENT_DATA_KEY);
  710. fi = btrfs_item_ptr(leaf, path->slots[0],
  711. struct btrfs_file_extent_item);
  712. extent_type = btrfs_file_extent_type(leaf, fi);
  713. BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
  714. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  715. BUG_ON(key.offset > start || extent_end < end);
  716. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  717. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  718. extent_offset = btrfs_file_extent_offset(leaf, fi);
  719. if (key.offset == start)
  720. split = end;
  721. if (key.offset == start && extent_end == end) {
  722. int del_nr = 0;
  723. int del_slot = 0;
  724. u64 leaf_owner = btrfs_header_owner(leaf);
  725. u64 leaf_gen = btrfs_header_generation(leaf);
  726. other_start = end;
  727. other_end = 0;
  728. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  729. bytenr, &other_start, &other_end)) {
  730. extent_end = other_end;
  731. del_slot = path->slots[0] + 1;
  732. del_nr++;
  733. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  734. leaf->start, leaf_owner,
  735. leaf_gen, inode->i_ino, 0);
  736. BUG_ON(ret);
  737. }
  738. other_start = 0;
  739. other_end = start;
  740. if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
  741. bytenr, &other_start, &other_end)) {
  742. key.offset = other_start;
  743. del_slot = path->slots[0];
  744. del_nr++;
  745. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  746. leaf->start, leaf_owner,
  747. leaf_gen, inode->i_ino, 0);
  748. BUG_ON(ret);
  749. }
  750. split_end = 0;
  751. if (del_nr == 0) {
  752. btrfs_set_file_extent_type(leaf, fi,
  753. BTRFS_FILE_EXTENT_REG);
  754. goto done;
  755. }
  756. fi = btrfs_item_ptr(leaf, del_slot - 1,
  757. struct btrfs_file_extent_item);
  758. btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
  759. btrfs_set_file_extent_num_bytes(leaf, fi,
  760. extent_end - key.offset);
  761. btrfs_mark_buffer_dirty(leaf);
  762. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  763. BUG_ON(ret);
  764. goto done;
  765. } else if (split == start) {
  766. if (locked_end < extent_end) {
  767. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  768. locked_end, extent_end - 1, GFP_NOFS);
  769. if (!ret) {
  770. btrfs_release_path(root, path);
  771. lock_extent(&BTRFS_I(inode)->io_tree,
  772. locked_end, extent_end - 1, GFP_NOFS);
  773. locked_end = extent_end;
  774. goto again;
  775. }
  776. locked_end = extent_end;
  777. }
  778. btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
  779. extent_offset += split - key.offset;
  780. } else {
  781. BUG_ON(key.offset != start);
  782. btrfs_set_file_extent_offset(leaf, fi, extent_offset +
  783. split - key.offset);
  784. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
  785. key.offset = split;
  786. btrfs_set_item_key_safe(trans, root, path, &key);
  787. extent_end = split;
  788. }
  789. if (extent_end == end) {
  790. split_end = 0;
  791. extent_type = BTRFS_FILE_EXTENT_REG;
  792. }
  793. if (extent_end == end && split == start) {
  794. other_start = end;
  795. other_end = 0;
  796. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  797. bytenr, &other_start, &other_end)) {
  798. path->slots[0]++;
  799. fi = btrfs_item_ptr(leaf, path->slots[0],
  800. struct btrfs_file_extent_item);
  801. key.offset = split;
  802. btrfs_set_item_key_safe(trans, root, path, &key);
  803. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  804. btrfs_set_file_extent_num_bytes(leaf, fi,
  805. other_end - split);
  806. goto done;
  807. }
  808. }
  809. if (extent_end == end && split == end) {
  810. other_start = 0;
  811. other_end = start;
  812. if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
  813. bytenr, &other_start, &other_end)) {
  814. path->slots[0]--;
  815. fi = btrfs_item_ptr(leaf, path->slots[0],
  816. struct btrfs_file_extent_item);
  817. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
  818. other_start);
  819. goto done;
  820. }
  821. }
  822. btrfs_mark_buffer_dirty(leaf);
  823. orig_parent = leaf->start;
  824. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
  825. orig_parent, root->root_key.objectid,
  826. trans->transid, inode->i_ino);
  827. BUG_ON(ret);
  828. btrfs_release_path(root, path);
  829. key.offset = start;
  830. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
  831. BUG_ON(ret);
  832. leaf = path->nodes[0];
  833. fi = btrfs_item_ptr(leaf, path->slots[0],
  834. struct btrfs_file_extent_item);
  835. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  836. btrfs_set_file_extent_type(leaf, fi, extent_type);
  837. btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
  838. btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
  839. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  840. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
  841. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  842. btrfs_set_file_extent_compression(leaf, fi, 0);
  843. btrfs_set_file_extent_encryption(leaf, fi, 0);
  844. btrfs_set_file_extent_other_encoding(leaf, fi, 0);
  845. if (orig_parent != leaf->start) {
  846. ret = btrfs_update_extent_ref(trans, root, bytenr,
  847. orig_parent, leaf->start,
  848. root->root_key.objectid,
  849. trans->transid, inode->i_ino);
  850. BUG_ON(ret);
  851. }
  852. done:
  853. btrfs_mark_buffer_dirty(leaf);
  854. btrfs_release_path(root, path);
  855. if (split_end && split == start) {
  856. split = end;
  857. goto again;
  858. }
  859. if (locked_end > end) {
  860. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  861. GFP_NOFS);
  862. }
  863. btrfs_free_path(path);
  864. return 0;
  865. }
  866. /*
  867. * this gets pages into the page cache and locks them down, it also properly
  868. * waits for data=ordered extents to finish before allowing the pages to be
  869. * modified.
  870. */
  871. static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
  872. struct page **pages, size_t num_pages,
  873. loff_t pos, unsigned long first_index,
  874. unsigned long last_index, size_t write_bytes)
  875. {
  876. int i;
  877. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  878. struct inode *inode = fdentry(file)->d_inode;
  879. int err = 0;
  880. u64 start_pos;
  881. u64 last_pos;
  882. start_pos = pos & ~((u64)root->sectorsize - 1);
  883. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  884. if (start_pos > inode->i_size) {
  885. err = btrfs_cont_expand(inode, start_pos);
  886. if (err)
  887. return err;
  888. }
  889. memset(pages, 0, num_pages * sizeof(struct page *));
  890. again:
  891. for (i = 0; i < num_pages; i++) {
  892. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  893. if (!pages[i]) {
  894. err = -ENOMEM;
  895. BUG_ON(1);
  896. }
  897. wait_on_page_writeback(pages[i]);
  898. }
  899. if (start_pos < inode->i_size) {
  900. struct btrfs_ordered_extent *ordered;
  901. lock_extent(&BTRFS_I(inode)->io_tree,
  902. start_pos, last_pos - 1, GFP_NOFS);
  903. ordered = btrfs_lookup_first_ordered_extent(inode,
  904. last_pos - 1);
  905. if (ordered &&
  906. ordered->file_offset + ordered->len > start_pos &&
  907. ordered->file_offset < last_pos) {
  908. btrfs_put_ordered_extent(ordered);
  909. unlock_extent(&BTRFS_I(inode)->io_tree,
  910. start_pos, last_pos - 1, GFP_NOFS);
  911. for (i = 0; i < num_pages; i++) {
  912. unlock_page(pages[i]);
  913. page_cache_release(pages[i]);
  914. }
  915. btrfs_wait_ordered_range(inode, start_pos,
  916. last_pos - start_pos);
  917. goto again;
  918. }
  919. if (ordered)
  920. btrfs_put_ordered_extent(ordered);
  921. clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
  922. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
  923. GFP_NOFS);
  924. unlock_extent(&BTRFS_I(inode)->io_tree,
  925. start_pos, last_pos - 1, GFP_NOFS);
  926. }
  927. for (i = 0; i < num_pages; i++) {
  928. clear_page_dirty_for_io(pages[i]);
  929. set_page_extent_mapped(pages[i]);
  930. WARN_ON(!PageLocked(pages[i]));
  931. }
  932. return 0;
  933. }
  934. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  935. size_t count, loff_t *ppos)
  936. {
  937. loff_t pos;
  938. loff_t start_pos;
  939. ssize_t num_written = 0;
  940. ssize_t err = 0;
  941. int ret = 0;
  942. struct inode *inode = fdentry(file)->d_inode;
  943. struct btrfs_root *root = BTRFS_I(inode)->root;
  944. struct page **pages = NULL;
  945. int nrptrs;
  946. struct page *pinned[2];
  947. unsigned long first_index;
  948. unsigned long last_index;
  949. int will_write;
  950. will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
  951. (file->f_flags & O_DIRECT));
  952. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  953. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  954. pinned[0] = NULL;
  955. pinned[1] = NULL;
  956. pos = *ppos;
  957. start_pos = pos;
  958. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  959. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  960. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  961. if (err)
  962. goto out_nolock;
  963. if (count == 0)
  964. goto out_nolock;
  965. err = file_remove_suid(file);
  966. if (err)
  967. goto out_nolock;
  968. file_update_time(file);
  969. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  970. mutex_lock(&inode->i_mutex);
  971. BTRFS_I(inode)->sequence++;
  972. first_index = pos >> PAGE_CACHE_SHIFT;
  973. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  974. /*
  975. * there are lots of better ways to do this, but this code
  976. * makes sure the first and last page in the file range are
  977. * up to date and ready for cow
  978. */
  979. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  980. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  981. if (!PageUptodate(pinned[0])) {
  982. ret = btrfs_readpage(NULL, pinned[0]);
  983. BUG_ON(ret);
  984. wait_on_page_locked(pinned[0]);
  985. } else {
  986. unlock_page(pinned[0]);
  987. }
  988. }
  989. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  990. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  991. if (!PageUptodate(pinned[1])) {
  992. ret = btrfs_readpage(NULL, pinned[1]);
  993. BUG_ON(ret);
  994. wait_on_page_locked(pinned[1]);
  995. } else {
  996. unlock_page(pinned[1]);
  997. }
  998. }
  999. while (count > 0) {
  1000. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1001. size_t write_bytes = min(count, nrptrs *
  1002. (size_t)PAGE_CACHE_SIZE -
  1003. offset);
  1004. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  1005. PAGE_CACHE_SHIFT;
  1006. WARN_ON(num_pages > nrptrs);
  1007. memset(pages, 0, sizeof(struct page *) * nrptrs);
  1008. ret = btrfs_check_free_space(root, write_bytes, 0);
  1009. if (ret)
  1010. goto out;
  1011. ret = prepare_pages(root, file, pages, num_pages,
  1012. pos, first_index, last_index,
  1013. write_bytes);
  1014. if (ret)
  1015. goto out;
  1016. ret = btrfs_copy_from_user(pos, num_pages,
  1017. write_bytes, pages, buf);
  1018. if (ret) {
  1019. btrfs_drop_pages(pages, num_pages);
  1020. goto out;
  1021. }
  1022. ret = dirty_and_release_pages(NULL, root, file, pages,
  1023. num_pages, pos, write_bytes);
  1024. btrfs_drop_pages(pages, num_pages);
  1025. if (ret)
  1026. goto out;
  1027. if (will_write) {
  1028. btrfs_fdatawrite_range(inode->i_mapping, pos,
  1029. pos + write_bytes - 1,
  1030. WB_SYNC_NONE);
  1031. } else {
  1032. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1033. num_pages);
  1034. if (num_pages <
  1035. (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1036. btrfs_btree_balance_dirty(root, 1);
  1037. btrfs_throttle(root);
  1038. }
  1039. buf += write_bytes;
  1040. count -= write_bytes;
  1041. pos += write_bytes;
  1042. num_written += write_bytes;
  1043. cond_resched();
  1044. }
  1045. out:
  1046. mutex_unlock(&inode->i_mutex);
  1047. out_nolock:
  1048. kfree(pages);
  1049. if (pinned[0])
  1050. page_cache_release(pinned[0]);
  1051. if (pinned[1])
  1052. page_cache_release(pinned[1]);
  1053. *ppos = pos;
  1054. if (num_written > 0 && will_write) {
  1055. struct btrfs_trans_handle *trans;
  1056. err = btrfs_wait_ordered_range(inode, start_pos, num_written);
  1057. if (err)
  1058. num_written = err;
  1059. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  1060. trans = btrfs_start_transaction(root, 1);
  1061. ret = btrfs_log_dentry_safe(trans, root,
  1062. file->f_dentry);
  1063. if (ret == 0) {
  1064. btrfs_sync_log(trans, root);
  1065. btrfs_end_transaction(trans, root);
  1066. } else {
  1067. btrfs_commit_transaction(trans, root);
  1068. }
  1069. }
  1070. if (file->f_flags & O_DIRECT) {
  1071. invalidate_mapping_pages(inode->i_mapping,
  1072. start_pos >> PAGE_CACHE_SHIFT,
  1073. (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
  1074. }
  1075. }
  1076. current->backing_dev_info = NULL;
  1077. return num_written ? num_written : err;
  1078. }
  1079. int btrfs_release_file(struct inode *inode, struct file *filp)
  1080. {
  1081. if (filp->private_data)
  1082. btrfs_ioctl_trans_end(filp);
  1083. return 0;
  1084. }
  1085. /*
  1086. * fsync call for both files and directories. This logs the inode into
  1087. * the tree log instead of forcing full commits whenever possible.
  1088. *
  1089. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1090. * in the metadata btree are up to date for copying to the log.
  1091. *
  1092. * It drops the inode mutex before doing the tree log commit. This is an
  1093. * important optimization for directories because holding the mutex prevents
  1094. * new operations on the dir while we write to disk.
  1095. */
  1096. int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
  1097. {
  1098. struct inode *inode = dentry->d_inode;
  1099. struct btrfs_root *root = BTRFS_I(inode)->root;
  1100. int ret = 0;
  1101. struct btrfs_trans_handle *trans;
  1102. /*
  1103. * check the transaction that last modified this inode
  1104. * and see if its already been committed
  1105. */
  1106. if (!BTRFS_I(inode)->last_trans)
  1107. goto out;
  1108. mutex_lock(&root->fs_info->trans_mutex);
  1109. if (BTRFS_I(inode)->last_trans <=
  1110. root->fs_info->last_trans_committed) {
  1111. BTRFS_I(inode)->last_trans = 0;
  1112. mutex_unlock(&root->fs_info->trans_mutex);
  1113. goto out;
  1114. }
  1115. mutex_unlock(&root->fs_info->trans_mutex);
  1116. root->fs_info->tree_log_batch++;
  1117. filemap_fdatawrite(inode->i_mapping);
  1118. btrfs_wait_ordered_range(inode, 0, (u64)-1);
  1119. root->fs_info->tree_log_batch++;
  1120. /*
  1121. * ok we haven't committed the transaction yet, lets do a commit
  1122. */
  1123. if (file->private_data)
  1124. btrfs_ioctl_trans_end(file);
  1125. trans = btrfs_start_transaction(root, 1);
  1126. if (!trans) {
  1127. ret = -ENOMEM;
  1128. goto out;
  1129. }
  1130. ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
  1131. if (ret < 0)
  1132. goto out;
  1133. /* we've logged all the items and now have a consistent
  1134. * version of the file in the log. It is possible that
  1135. * someone will come in and modify the file, but that's
  1136. * fine because the log is consistent on disk, and we
  1137. * have references to all of the file's extents
  1138. *
  1139. * It is possible that someone will come in and log the
  1140. * file again, but that will end up using the synchronization
  1141. * inside btrfs_sync_log to keep things safe.
  1142. */
  1143. mutex_unlock(&file->f_dentry->d_inode->i_mutex);
  1144. if (ret > 0) {
  1145. ret = btrfs_commit_transaction(trans, root);
  1146. } else {
  1147. btrfs_sync_log(trans, root);
  1148. ret = btrfs_end_transaction(trans, root);
  1149. }
  1150. mutex_lock(&file->f_dentry->d_inode->i_mutex);
  1151. out:
  1152. return ret > 0 ? EIO : ret;
  1153. }
  1154. static struct vm_operations_struct btrfs_file_vm_ops = {
  1155. .fault = filemap_fault,
  1156. .page_mkwrite = btrfs_page_mkwrite,
  1157. };
  1158. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1159. {
  1160. vma->vm_ops = &btrfs_file_vm_ops;
  1161. file_accessed(filp);
  1162. return 0;
  1163. }
  1164. struct file_operations btrfs_file_operations = {
  1165. .llseek = generic_file_llseek,
  1166. .read = do_sync_read,
  1167. .aio_read = generic_file_aio_read,
  1168. .splice_read = generic_file_splice_read,
  1169. .write = btrfs_file_write,
  1170. .mmap = btrfs_file_mmap,
  1171. .open = generic_file_open,
  1172. .release = btrfs_release_file,
  1173. .fsync = btrfs_sync_file,
  1174. .unlocked_ioctl = btrfs_ioctl,
  1175. #ifdef CONFIG_COMPAT
  1176. .compat_ioctl = btrfs_ioctl,
  1177. #endif
  1178. };