file.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include "ctree.h"
  32. #include "disk-io.h"
  33. #include "transaction.h"
  34. #include "btrfs_inode.h"
  35. #include "ioctl.h"
  36. #include "print-tree.h"
  37. #include "tree-log.h"
  38. #include "locking.h"
  39. #include "compat.h"
  40. /* simple helper to fault in pages and copy. This should go away
  41. * and be replaced with calls into generic code.
  42. */
  43. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  44. int write_bytes,
  45. struct page **prepared_pages,
  46. const char __user *buf)
  47. {
  48. long page_fault = 0;
  49. int i;
  50. int offset = pos & (PAGE_CACHE_SIZE - 1);
  51. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  52. size_t count = min_t(size_t,
  53. PAGE_CACHE_SIZE - offset, write_bytes);
  54. struct page *page = prepared_pages[i];
  55. fault_in_pages_readable(buf, count);
  56. /* Copy data from userspace to the current page */
  57. kmap(page);
  58. page_fault = __copy_from_user(page_address(page) + offset,
  59. buf, count);
  60. /* Flush processor's dcache for this page */
  61. flush_dcache_page(page);
  62. kunmap(page);
  63. buf += count;
  64. write_bytes -= count;
  65. if (page_fault)
  66. break;
  67. }
  68. return page_fault ? -EFAULT : 0;
  69. }
  70. /*
  71. * unlocks pages after btrfs_file_write is done with them
  72. */
  73. static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
  74. {
  75. size_t i;
  76. for (i = 0; i < num_pages; i++) {
  77. if (!pages[i])
  78. break;
  79. /* page checked is some magic around finding pages that
  80. * have been modified without going through btrfs_set_page_dirty
  81. * clear it here
  82. */
  83. ClearPageChecked(pages[i]);
  84. unlock_page(pages[i]);
  85. mark_page_accessed(pages[i]);
  86. page_cache_release(pages[i]);
  87. }
  88. }
  89. /*
  90. * after copy_from_user, pages need to be dirtied and we need to make
  91. * sure holes are created between the current EOF and the start of
  92. * any next extents (if required).
  93. *
  94. * this also makes the decision about creating an inline extent vs
  95. * doing real data extents, marking pages dirty and delalloc as required.
  96. */
  97. static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
  98. struct btrfs_root *root,
  99. struct file *file,
  100. struct page **pages,
  101. size_t num_pages,
  102. loff_t pos,
  103. size_t write_bytes)
  104. {
  105. int err = 0;
  106. int i;
  107. struct inode *inode = fdentry(file)->d_inode;
  108. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  109. u64 hint_byte;
  110. u64 num_bytes;
  111. u64 start_pos;
  112. u64 end_of_last_block;
  113. u64 end_pos = pos + write_bytes;
  114. loff_t isize = i_size_read(inode);
  115. start_pos = pos & ~((u64)root->sectorsize - 1);
  116. num_bytes = (write_bytes + pos - start_pos +
  117. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  118. end_of_last_block = start_pos + num_bytes - 1;
  119. lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  120. trans = btrfs_join_transaction(root, 1);
  121. if (!trans) {
  122. err = -ENOMEM;
  123. goto out_unlock;
  124. }
  125. btrfs_set_trans_block_group(trans, inode);
  126. hint_byte = 0;
  127. set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  128. /* check for reserved extents on each page, we don't want
  129. * to reset the delalloc bit on things that already have
  130. * extents reserved.
  131. */
  132. btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
  133. for (i = 0; i < num_pages; i++) {
  134. struct page *p = pages[i];
  135. SetPageUptodate(p);
  136. ClearPageChecked(p);
  137. set_page_dirty(p);
  138. }
  139. if (end_pos > isize) {
  140. i_size_write(inode, end_pos);
  141. btrfs_update_inode(trans, root, inode);
  142. }
  143. err = btrfs_end_transaction(trans, root);
  144. out_unlock:
  145. unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  146. return err;
  147. }
  148. /*
  149. * this drops all the extents in the cache that intersect the range
  150. * [start, end]. Existing extents are split as required.
  151. */
  152. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  153. int skip_pinned)
  154. {
  155. struct extent_map *em;
  156. struct extent_map *split = NULL;
  157. struct extent_map *split2 = NULL;
  158. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  159. u64 len = end - start + 1;
  160. int ret;
  161. int testend = 1;
  162. unsigned long flags;
  163. int compressed = 0;
  164. WARN_ON(end < start);
  165. if (end == (u64)-1) {
  166. len = (u64)-1;
  167. testend = 0;
  168. }
  169. while (1) {
  170. if (!split)
  171. split = alloc_extent_map(GFP_NOFS);
  172. if (!split2)
  173. split2 = alloc_extent_map(GFP_NOFS);
  174. spin_lock(&em_tree->lock);
  175. em = lookup_extent_mapping(em_tree, start, len);
  176. if (!em) {
  177. spin_unlock(&em_tree->lock);
  178. break;
  179. }
  180. flags = em->flags;
  181. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  182. spin_unlock(&em_tree->lock);
  183. if (em->start <= start &&
  184. (!testend || em->start + em->len >= start + len)) {
  185. free_extent_map(em);
  186. break;
  187. }
  188. if (start < em->start) {
  189. len = em->start - start;
  190. } else {
  191. len = start + len - (em->start + em->len);
  192. start = em->start + em->len;
  193. }
  194. free_extent_map(em);
  195. continue;
  196. }
  197. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  198. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  199. remove_extent_mapping(em_tree, em);
  200. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  201. em->start < start) {
  202. split->start = em->start;
  203. split->len = start - em->start;
  204. split->orig_start = em->orig_start;
  205. split->block_start = em->block_start;
  206. if (compressed)
  207. split->block_len = em->block_len;
  208. else
  209. split->block_len = split->len;
  210. split->bdev = em->bdev;
  211. split->flags = flags;
  212. ret = add_extent_mapping(em_tree, split);
  213. BUG_ON(ret);
  214. free_extent_map(split);
  215. split = split2;
  216. split2 = NULL;
  217. }
  218. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  219. testend && em->start + em->len > start + len) {
  220. u64 diff = start + len - em->start;
  221. split->start = start + len;
  222. split->len = em->start + em->len - (start + len);
  223. split->bdev = em->bdev;
  224. split->flags = flags;
  225. if (compressed) {
  226. split->block_len = em->block_len;
  227. split->block_start = em->block_start;
  228. split->orig_start = em->orig_start;
  229. } else {
  230. split->block_len = split->len;
  231. split->block_start = em->block_start + diff;
  232. split->orig_start = split->start;
  233. }
  234. ret = add_extent_mapping(em_tree, split);
  235. BUG_ON(ret);
  236. free_extent_map(split);
  237. split = NULL;
  238. }
  239. spin_unlock(&em_tree->lock);
  240. /* once for us */
  241. free_extent_map(em);
  242. /* once for the tree*/
  243. free_extent_map(em);
  244. }
  245. if (split)
  246. free_extent_map(split);
  247. if (split2)
  248. free_extent_map(split2);
  249. return 0;
  250. }
  251. int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
  252. {
  253. return 0;
  254. #if 0
  255. struct btrfs_path *path;
  256. struct btrfs_key found_key;
  257. struct extent_buffer *leaf;
  258. struct btrfs_file_extent_item *extent;
  259. u64 last_offset = 0;
  260. int nritems;
  261. int slot;
  262. int found_type;
  263. int ret;
  264. int err = 0;
  265. u64 extent_end = 0;
  266. path = btrfs_alloc_path();
  267. ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
  268. last_offset, 0);
  269. while (1) {
  270. nritems = btrfs_header_nritems(path->nodes[0]);
  271. if (path->slots[0] >= nritems) {
  272. ret = btrfs_next_leaf(root, path);
  273. if (ret)
  274. goto out;
  275. nritems = btrfs_header_nritems(path->nodes[0]);
  276. }
  277. slot = path->slots[0];
  278. leaf = path->nodes[0];
  279. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  280. if (found_key.objectid != inode->i_ino)
  281. break;
  282. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  283. goto out;
  284. if (found_key.offset < last_offset) {
  285. WARN_ON(1);
  286. btrfs_print_leaf(root, leaf);
  287. printk(KERN_ERR "inode %lu found offset %llu "
  288. "expected %llu\n", inode->i_ino,
  289. (unsigned long long)found_key.offset,
  290. (unsigned long long)last_offset);
  291. err = 1;
  292. goto out;
  293. }
  294. extent = btrfs_item_ptr(leaf, slot,
  295. struct btrfs_file_extent_item);
  296. found_type = btrfs_file_extent_type(leaf, extent);
  297. if (found_type == BTRFS_FILE_EXTENT_REG) {
  298. extent_end = found_key.offset +
  299. btrfs_file_extent_num_bytes(leaf, extent);
  300. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  301. struct btrfs_item *item;
  302. item = btrfs_item_nr(leaf, slot);
  303. extent_end = found_key.offset +
  304. btrfs_file_extent_inline_len(leaf, extent);
  305. extent_end = (extent_end + root->sectorsize - 1) &
  306. ~((u64)root->sectorsize - 1);
  307. }
  308. last_offset = extent_end;
  309. path->slots[0]++;
  310. }
  311. if (0 && last_offset < inode->i_size) {
  312. WARN_ON(1);
  313. btrfs_print_leaf(root, leaf);
  314. printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
  315. inode->i_ino, (unsigned long long)last_offset,
  316. (unsigned long long)inode->i_size);
  317. err = 1;
  318. }
  319. out:
  320. btrfs_free_path(path);
  321. return err;
  322. #endif
  323. }
  324. /*
  325. * this is very complex, but the basic idea is to drop all extents
  326. * in the range start - end. hint_block is filled in with a block number
  327. * that would be a good hint to the block allocator for this file.
  328. *
  329. * If an extent intersects the range but is not entirely inside the range
  330. * it is either truncated or split. Anything entirely inside the range
  331. * is deleted from the tree.
  332. *
  333. * inline_limit is used to tell this code which offsets in the file to keep
  334. * if they contain inline extents.
  335. */
  336. noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  337. struct btrfs_root *root, struct inode *inode,
  338. u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
  339. {
  340. u64 extent_end = 0;
  341. u64 locked_end = end;
  342. u64 search_start = start;
  343. u64 leaf_start;
  344. u64 ram_bytes = 0;
  345. u64 orig_parent = 0;
  346. u64 disk_bytenr = 0;
  347. u8 compression;
  348. u8 encryption;
  349. u16 other_encoding = 0;
  350. u64 root_gen;
  351. u64 root_owner;
  352. struct extent_buffer *leaf;
  353. struct btrfs_file_extent_item *extent;
  354. struct btrfs_path *path;
  355. struct btrfs_key key;
  356. struct btrfs_file_extent_item old;
  357. int keep;
  358. int slot;
  359. int bookend;
  360. int found_type = 0;
  361. int found_extent;
  362. int found_inline;
  363. int recow;
  364. int ret;
  365. inline_limit = 0;
  366. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  367. path = btrfs_alloc_path();
  368. if (!path)
  369. return -ENOMEM;
  370. while (1) {
  371. recow = 0;
  372. btrfs_release_path(root, path);
  373. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  374. search_start, -1);
  375. if (ret < 0)
  376. goto out;
  377. if (ret > 0) {
  378. if (path->slots[0] == 0) {
  379. ret = 0;
  380. goto out;
  381. }
  382. path->slots[0]--;
  383. }
  384. next_slot:
  385. keep = 0;
  386. bookend = 0;
  387. found_extent = 0;
  388. found_inline = 0;
  389. leaf_start = 0;
  390. root_gen = 0;
  391. root_owner = 0;
  392. compression = 0;
  393. encryption = 0;
  394. extent = NULL;
  395. leaf = path->nodes[0];
  396. slot = path->slots[0];
  397. ret = 0;
  398. btrfs_item_key_to_cpu(leaf, &key, slot);
  399. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
  400. key.offset >= end) {
  401. goto out;
  402. }
  403. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
  404. key.objectid != inode->i_ino) {
  405. goto out;
  406. }
  407. if (recow) {
  408. search_start = max(key.offset, start);
  409. continue;
  410. }
  411. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  412. extent = btrfs_item_ptr(leaf, slot,
  413. struct btrfs_file_extent_item);
  414. found_type = btrfs_file_extent_type(leaf, extent);
  415. compression = btrfs_file_extent_compression(leaf,
  416. extent);
  417. encryption = btrfs_file_extent_encryption(leaf,
  418. extent);
  419. other_encoding = btrfs_file_extent_other_encoding(leaf,
  420. extent);
  421. if (found_type == BTRFS_FILE_EXTENT_REG ||
  422. found_type == BTRFS_FILE_EXTENT_PREALLOC) {
  423. extent_end =
  424. btrfs_file_extent_disk_bytenr(leaf,
  425. extent);
  426. if (extent_end)
  427. *hint_byte = extent_end;
  428. extent_end = key.offset +
  429. btrfs_file_extent_num_bytes(leaf, extent);
  430. ram_bytes = btrfs_file_extent_ram_bytes(leaf,
  431. extent);
  432. found_extent = 1;
  433. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  434. found_inline = 1;
  435. extent_end = key.offset +
  436. btrfs_file_extent_inline_len(leaf, extent);
  437. }
  438. } else {
  439. extent_end = search_start;
  440. }
  441. /* we found nothing we can drop */
  442. if ((!found_extent && !found_inline) ||
  443. search_start >= extent_end) {
  444. int nextret;
  445. u32 nritems;
  446. nritems = btrfs_header_nritems(leaf);
  447. if (slot >= nritems - 1) {
  448. nextret = btrfs_next_leaf(root, path);
  449. if (nextret)
  450. goto out;
  451. recow = 1;
  452. } else {
  453. path->slots[0]++;
  454. }
  455. goto next_slot;
  456. }
  457. if (end <= extent_end && start >= key.offset && found_inline)
  458. *hint_byte = EXTENT_MAP_INLINE;
  459. if (found_extent) {
  460. read_extent_buffer(leaf, &old, (unsigned long)extent,
  461. sizeof(old));
  462. root_gen = btrfs_header_generation(leaf);
  463. root_owner = btrfs_header_owner(leaf);
  464. leaf_start = leaf->start;
  465. }
  466. if (end < extent_end && end >= key.offset) {
  467. bookend = 1;
  468. if (found_inline && start <= key.offset)
  469. keep = 1;
  470. }
  471. if (bookend && found_extent) {
  472. if (locked_end < extent_end) {
  473. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  474. locked_end, extent_end - 1,
  475. GFP_NOFS);
  476. if (!ret) {
  477. btrfs_release_path(root, path);
  478. lock_extent(&BTRFS_I(inode)->io_tree,
  479. locked_end, extent_end - 1,
  480. GFP_NOFS);
  481. locked_end = extent_end;
  482. continue;
  483. }
  484. locked_end = extent_end;
  485. }
  486. orig_parent = path->nodes[0]->start;
  487. disk_bytenr = le64_to_cpu(old.disk_bytenr);
  488. if (disk_bytenr != 0) {
  489. ret = btrfs_inc_extent_ref(trans, root,
  490. disk_bytenr,
  491. le64_to_cpu(old.disk_num_bytes),
  492. orig_parent, root->root_key.objectid,
  493. trans->transid, inode->i_ino);
  494. BUG_ON(ret);
  495. }
  496. }
  497. if (found_inline) {
  498. u64 mask = root->sectorsize - 1;
  499. search_start = (extent_end + mask) & ~mask;
  500. } else
  501. search_start = extent_end;
  502. /* truncate existing extent */
  503. if (start > key.offset) {
  504. u64 new_num;
  505. u64 old_num;
  506. keep = 1;
  507. WARN_ON(start & (root->sectorsize - 1));
  508. if (found_extent) {
  509. new_num = start - key.offset;
  510. old_num = btrfs_file_extent_num_bytes(leaf,
  511. extent);
  512. *hint_byte =
  513. btrfs_file_extent_disk_bytenr(leaf,
  514. extent);
  515. if (btrfs_file_extent_disk_bytenr(leaf,
  516. extent)) {
  517. inode_sub_bytes(inode, old_num -
  518. new_num);
  519. }
  520. btrfs_set_file_extent_num_bytes(leaf,
  521. extent, new_num);
  522. btrfs_mark_buffer_dirty(leaf);
  523. } else if (key.offset < inline_limit &&
  524. (end > extent_end) &&
  525. (inline_limit < extent_end)) {
  526. u32 new_size;
  527. new_size = btrfs_file_extent_calc_inline_size(
  528. inline_limit - key.offset);
  529. inode_sub_bytes(inode, extent_end -
  530. inline_limit);
  531. btrfs_set_file_extent_ram_bytes(leaf, extent,
  532. new_size);
  533. if (!compression && !encryption) {
  534. btrfs_truncate_item(trans, root, path,
  535. new_size, 1);
  536. }
  537. }
  538. }
  539. /* delete the entire extent */
  540. if (!keep) {
  541. if (found_inline)
  542. inode_sub_bytes(inode, extent_end -
  543. key.offset);
  544. ret = btrfs_del_item(trans, root, path);
  545. /* TODO update progress marker and return */
  546. BUG_ON(ret);
  547. extent = NULL;
  548. btrfs_release_path(root, path);
  549. /* the extent will be freed later */
  550. }
  551. if (bookend && found_inline && start <= key.offset) {
  552. u32 new_size;
  553. new_size = btrfs_file_extent_calc_inline_size(
  554. extent_end - end);
  555. inode_sub_bytes(inode, end - key.offset);
  556. btrfs_set_file_extent_ram_bytes(leaf, extent,
  557. new_size);
  558. if (!compression && !encryption)
  559. ret = btrfs_truncate_item(trans, root, path,
  560. new_size, 0);
  561. BUG_ON(ret);
  562. }
  563. /* create bookend, splitting the extent in two */
  564. if (bookend && found_extent) {
  565. struct btrfs_key ins;
  566. ins.objectid = inode->i_ino;
  567. ins.offset = end;
  568. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  569. btrfs_release_path(root, path);
  570. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  571. sizeof(*extent));
  572. BUG_ON(ret);
  573. leaf = path->nodes[0];
  574. extent = btrfs_item_ptr(leaf, path->slots[0],
  575. struct btrfs_file_extent_item);
  576. write_extent_buffer(leaf, &old,
  577. (unsigned long)extent, sizeof(old));
  578. btrfs_set_file_extent_compression(leaf, extent,
  579. compression);
  580. btrfs_set_file_extent_encryption(leaf, extent,
  581. encryption);
  582. btrfs_set_file_extent_other_encoding(leaf, extent,
  583. other_encoding);
  584. btrfs_set_file_extent_offset(leaf, extent,
  585. le64_to_cpu(old.offset) + end - key.offset);
  586. WARN_ON(le64_to_cpu(old.num_bytes) <
  587. (extent_end - end));
  588. btrfs_set_file_extent_num_bytes(leaf, extent,
  589. extent_end - end);
  590. /*
  591. * set the ram bytes to the size of the full extent
  592. * before splitting. This is a worst case flag,
  593. * but its the best we can do because we don't know
  594. * how splitting affects compression
  595. */
  596. btrfs_set_file_extent_ram_bytes(leaf, extent,
  597. ram_bytes);
  598. btrfs_set_file_extent_type(leaf, extent, found_type);
  599. btrfs_mark_buffer_dirty(path->nodes[0]);
  600. if (disk_bytenr != 0) {
  601. ret = btrfs_update_extent_ref(trans, root,
  602. disk_bytenr, orig_parent,
  603. leaf->start,
  604. root->root_key.objectid,
  605. trans->transid, ins.objectid);
  606. BUG_ON(ret);
  607. }
  608. btrfs_release_path(root, path);
  609. if (disk_bytenr != 0)
  610. inode_add_bytes(inode, extent_end - end);
  611. }
  612. if (found_extent && !keep) {
  613. u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
  614. if (old_disk_bytenr != 0) {
  615. inode_sub_bytes(inode,
  616. le64_to_cpu(old.num_bytes));
  617. ret = btrfs_free_extent(trans, root,
  618. old_disk_bytenr,
  619. le64_to_cpu(old.disk_num_bytes),
  620. leaf_start, root_owner,
  621. root_gen, key.objectid, 0);
  622. BUG_ON(ret);
  623. *hint_byte = old_disk_bytenr;
  624. }
  625. }
  626. if (search_start >= end) {
  627. ret = 0;
  628. goto out;
  629. }
  630. }
  631. out:
  632. btrfs_free_path(path);
  633. if (locked_end > end) {
  634. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  635. GFP_NOFS);
  636. }
  637. btrfs_check_file(root, inode);
  638. return ret;
  639. }
  640. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  641. u64 objectid, u64 bytenr, u64 *start, u64 *end)
  642. {
  643. struct btrfs_file_extent_item *fi;
  644. struct btrfs_key key;
  645. u64 extent_end;
  646. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  647. return 0;
  648. btrfs_item_key_to_cpu(leaf, &key, slot);
  649. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  650. return 0;
  651. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  652. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  653. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  654. btrfs_file_extent_compression(leaf, fi) ||
  655. btrfs_file_extent_encryption(leaf, fi) ||
  656. btrfs_file_extent_other_encoding(leaf, fi))
  657. return 0;
  658. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  659. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  660. return 0;
  661. *start = key.offset;
  662. *end = extent_end;
  663. return 1;
  664. }
  665. /*
  666. * Mark extent in the range start - end as written.
  667. *
  668. * This changes extent type from 'pre-allocated' to 'regular'. If only
  669. * part of extent is marked as written, the extent will be split into
  670. * two or three.
  671. */
  672. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  673. struct btrfs_root *root,
  674. struct inode *inode, u64 start, u64 end)
  675. {
  676. struct extent_buffer *leaf;
  677. struct btrfs_path *path;
  678. struct btrfs_file_extent_item *fi;
  679. struct btrfs_key key;
  680. u64 bytenr;
  681. u64 num_bytes;
  682. u64 extent_end;
  683. u64 extent_offset;
  684. u64 other_start;
  685. u64 other_end;
  686. u64 split = start;
  687. u64 locked_end = end;
  688. u64 orig_parent;
  689. int extent_type;
  690. int split_end = 1;
  691. int ret;
  692. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  693. path = btrfs_alloc_path();
  694. BUG_ON(!path);
  695. again:
  696. key.objectid = inode->i_ino;
  697. key.type = BTRFS_EXTENT_DATA_KEY;
  698. if (split == start)
  699. key.offset = split;
  700. else
  701. key.offset = split - 1;
  702. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  703. if (ret > 0 && path->slots[0] > 0)
  704. path->slots[0]--;
  705. leaf = path->nodes[0];
  706. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  707. BUG_ON(key.objectid != inode->i_ino ||
  708. key.type != BTRFS_EXTENT_DATA_KEY);
  709. fi = btrfs_item_ptr(leaf, path->slots[0],
  710. struct btrfs_file_extent_item);
  711. extent_type = btrfs_file_extent_type(leaf, fi);
  712. BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
  713. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  714. BUG_ON(key.offset > start || extent_end < end);
  715. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  716. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  717. extent_offset = btrfs_file_extent_offset(leaf, fi);
  718. if (key.offset == start)
  719. split = end;
  720. if (key.offset == start && extent_end == end) {
  721. int del_nr = 0;
  722. int del_slot = 0;
  723. u64 leaf_owner = btrfs_header_owner(leaf);
  724. u64 leaf_gen = btrfs_header_generation(leaf);
  725. other_start = end;
  726. other_end = 0;
  727. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  728. bytenr, &other_start, &other_end)) {
  729. extent_end = other_end;
  730. del_slot = path->slots[0] + 1;
  731. del_nr++;
  732. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  733. leaf->start, leaf_owner,
  734. leaf_gen, inode->i_ino, 0);
  735. BUG_ON(ret);
  736. }
  737. other_start = 0;
  738. other_end = start;
  739. if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
  740. bytenr, &other_start, &other_end)) {
  741. key.offset = other_start;
  742. del_slot = path->slots[0];
  743. del_nr++;
  744. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  745. leaf->start, leaf_owner,
  746. leaf_gen, inode->i_ino, 0);
  747. BUG_ON(ret);
  748. }
  749. split_end = 0;
  750. if (del_nr == 0) {
  751. btrfs_set_file_extent_type(leaf, fi,
  752. BTRFS_FILE_EXTENT_REG);
  753. goto done;
  754. }
  755. fi = btrfs_item_ptr(leaf, del_slot - 1,
  756. struct btrfs_file_extent_item);
  757. btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
  758. btrfs_set_file_extent_num_bytes(leaf, fi,
  759. extent_end - key.offset);
  760. btrfs_mark_buffer_dirty(leaf);
  761. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  762. BUG_ON(ret);
  763. goto done;
  764. } else if (split == start) {
  765. if (locked_end < extent_end) {
  766. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  767. locked_end, extent_end - 1, GFP_NOFS);
  768. if (!ret) {
  769. btrfs_release_path(root, path);
  770. lock_extent(&BTRFS_I(inode)->io_tree,
  771. locked_end, extent_end - 1, GFP_NOFS);
  772. locked_end = extent_end;
  773. goto again;
  774. }
  775. locked_end = extent_end;
  776. }
  777. btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
  778. extent_offset += split - key.offset;
  779. } else {
  780. BUG_ON(key.offset != start);
  781. btrfs_set_file_extent_offset(leaf, fi, extent_offset +
  782. split - key.offset);
  783. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
  784. key.offset = split;
  785. btrfs_set_item_key_safe(trans, root, path, &key);
  786. extent_end = split;
  787. }
  788. if (extent_end == end) {
  789. split_end = 0;
  790. extent_type = BTRFS_FILE_EXTENT_REG;
  791. }
  792. if (extent_end == end && split == start) {
  793. other_start = end;
  794. other_end = 0;
  795. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  796. bytenr, &other_start, &other_end)) {
  797. path->slots[0]++;
  798. fi = btrfs_item_ptr(leaf, path->slots[0],
  799. struct btrfs_file_extent_item);
  800. key.offset = split;
  801. btrfs_set_item_key_safe(trans, root, path, &key);
  802. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  803. btrfs_set_file_extent_num_bytes(leaf, fi,
  804. other_end - split);
  805. goto done;
  806. }
  807. }
  808. if (extent_end == end && split == end) {
  809. other_start = 0;
  810. other_end = start;
  811. if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
  812. bytenr, &other_start, &other_end)) {
  813. path->slots[0]--;
  814. fi = btrfs_item_ptr(leaf, path->slots[0],
  815. struct btrfs_file_extent_item);
  816. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
  817. other_start);
  818. goto done;
  819. }
  820. }
  821. btrfs_mark_buffer_dirty(leaf);
  822. orig_parent = leaf->start;
  823. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
  824. orig_parent, root->root_key.objectid,
  825. trans->transid, inode->i_ino);
  826. BUG_ON(ret);
  827. btrfs_release_path(root, path);
  828. key.offset = start;
  829. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
  830. BUG_ON(ret);
  831. leaf = path->nodes[0];
  832. fi = btrfs_item_ptr(leaf, path->slots[0],
  833. struct btrfs_file_extent_item);
  834. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  835. btrfs_set_file_extent_type(leaf, fi, extent_type);
  836. btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
  837. btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
  838. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  839. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
  840. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  841. btrfs_set_file_extent_compression(leaf, fi, 0);
  842. btrfs_set_file_extent_encryption(leaf, fi, 0);
  843. btrfs_set_file_extent_other_encoding(leaf, fi, 0);
  844. if (orig_parent != leaf->start) {
  845. ret = btrfs_update_extent_ref(trans, root, bytenr,
  846. orig_parent, leaf->start,
  847. root->root_key.objectid,
  848. trans->transid, inode->i_ino);
  849. BUG_ON(ret);
  850. }
  851. done:
  852. btrfs_mark_buffer_dirty(leaf);
  853. btrfs_release_path(root, path);
  854. if (split_end && split == start) {
  855. split = end;
  856. goto again;
  857. }
  858. if (locked_end > end) {
  859. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  860. GFP_NOFS);
  861. }
  862. btrfs_free_path(path);
  863. return 0;
  864. }
  865. /*
  866. * this gets pages into the page cache and locks them down, it also properly
  867. * waits for data=ordered extents to finish before allowing the pages to be
  868. * modified.
  869. */
  870. static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
  871. struct page **pages, size_t num_pages,
  872. loff_t pos, unsigned long first_index,
  873. unsigned long last_index, size_t write_bytes)
  874. {
  875. int i;
  876. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  877. struct inode *inode = fdentry(file)->d_inode;
  878. int err = 0;
  879. u64 start_pos;
  880. u64 last_pos;
  881. start_pos = pos & ~((u64)root->sectorsize - 1);
  882. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  883. if (start_pos > inode->i_size) {
  884. err = btrfs_cont_expand(inode, start_pos);
  885. if (err)
  886. return err;
  887. }
  888. memset(pages, 0, num_pages * sizeof(struct page *));
  889. again:
  890. for (i = 0; i < num_pages; i++) {
  891. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  892. if (!pages[i]) {
  893. err = -ENOMEM;
  894. BUG_ON(1);
  895. }
  896. wait_on_page_writeback(pages[i]);
  897. }
  898. if (start_pos < inode->i_size) {
  899. struct btrfs_ordered_extent *ordered;
  900. lock_extent(&BTRFS_I(inode)->io_tree,
  901. start_pos, last_pos - 1, GFP_NOFS);
  902. ordered = btrfs_lookup_first_ordered_extent(inode,
  903. last_pos - 1);
  904. if (ordered &&
  905. ordered->file_offset + ordered->len > start_pos &&
  906. ordered->file_offset < last_pos) {
  907. btrfs_put_ordered_extent(ordered);
  908. unlock_extent(&BTRFS_I(inode)->io_tree,
  909. start_pos, last_pos - 1, GFP_NOFS);
  910. for (i = 0; i < num_pages; i++) {
  911. unlock_page(pages[i]);
  912. page_cache_release(pages[i]);
  913. }
  914. btrfs_wait_ordered_range(inode, start_pos,
  915. last_pos - start_pos);
  916. goto again;
  917. }
  918. if (ordered)
  919. btrfs_put_ordered_extent(ordered);
  920. clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
  921. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
  922. GFP_NOFS);
  923. unlock_extent(&BTRFS_I(inode)->io_tree,
  924. start_pos, last_pos - 1, GFP_NOFS);
  925. }
  926. for (i = 0; i < num_pages; i++) {
  927. clear_page_dirty_for_io(pages[i]);
  928. set_page_extent_mapped(pages[i]);
  929. WARN_ON(!PageLocked(pages[i]));
  930. }
  931. return 0;
  932. }
  933. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  934. size_t count, loff_t *ppos)
  935. {
  936. loff_t pos;
  937. loff_t start_pos;
  938. ssize_t num_written = 0;
  939. ssize_t err = 0;
  940. int ret = 0;
  941. struct inode *inode = fdentry(file)->d_inode;
  942. struct btrfs_root *root = BTRFS_I(inode)->root;
  943. struct page **pages = NULL;
  944. int nrptrs;
  945. struct page *pinned[2];
  946. unsigned long first_index;
  947. unsigned long last_index;
  948. int will_write;
  949. will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
  950. (file->f_flags & O_DIRECT));
  951. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  952. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  953. pinned[0] = NULL;
  954. pinned[1] = NULL;
  955. pos = *ppos;
  956. start_pos = pos;
  957. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  958. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  959. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  960. if (err)
  961. goto out_nolock;
  962. if (count == 0)
  963. goto out_nolock;
  964. err = file_remove_suid(file);
  965. if (err)
  966. goto out_nolock;
  967. file_update_time(file);
  968. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  969. mutex_lock(&inode->i_mutex);
  970. BTRFS_I(inode)->sequence++;
  971. first_index = pos >> PAGE_CACHE_SHIFT;
  972. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  973. /*
  974. * there are lots of better ways to do this, but this code
  975. * makes sure the first and last page in the file range are
  976. * up to date and ready for cow
  977. */
  978. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  979. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  980. if (!PageUptodate(pinned[0])) {
  981. ret = btrfs_readpage(NULL, pinned[0]);
  982. BUG_ON(ret);
  983. wait_on_page_locked(pinned[0]);
  984. } else {
  985. unlock_page(pinned[0]);
  986. }
  987. }
  988. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  989. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  990. if (!PageUptodate(pinned[1])) {
  991. ret = btrfs_readpage(NULL, pinned[1]);
  992. BUG_ON(ret);
  993. wait_on_page_locked(pinned[1]);
  994. } else {
  995. unlock_page(pinned[1]);
  996. }
  997. }
  998. while (count > 0) {
  999. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1000. size_t write_bytes = min(count, nrptrs *
  1001. (size_t)PAGE_CACHE_SIZE -
  1002. offset);
  1003. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  1004. PAGE_CACHE_SHIFT;
  1005. WARN_ON(num_pages > nrptrs);
  1006. memset(pages, 0, sizeof(struct page *) * nrptrs);
  1007. ret = btrfs_check_data_free_space(root, inode, write_bytes);
  1008. if (ret)
  1009. goto out;
  1010. ret = prepare_pages(root, file, pages, num_pages,
  1011. pos, first_index, last_index,
  1012. write_bytes);
  1013. if (ret) {
  1014. btrfs_free_reserved_data_space(root, inode,
  1015. write_bytes);
  1016. goto out;
  1017. }
  1018. ret = btrfs_copy_from_user(pos, num_pages,
  1019. write_bytes, pages, buf);
  1020. if (ret) {
  1021. btrfs_free_reserved_data_space(root, inode,
  1022. write_bytes);
  1023. btrfs_drop_pages(pages, num_pages);
  1024. goto out;
  1025. }
  1026. ret = dirty_and_release_pages(NULL, root, file, pages,
  1027. num_pages, pos, write_bytes);
  1028. btrfs_drop_pages(pages, num_pages);
  1029. if (ret) {
  1030. btrfs_free_reserved_data_space(root, inode,
  1031. write_bytes);
  1032. goto out;
  1033. }
  1034. if (will_write) {
  1035. btrfs_fdatawrite_range(inode->i_mapping, pos,
  1036. pos + write_bytes - 1,
  1037. WB_SYNC_NONE);
  1038. } else {
  1039. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1040. num_pages);
  1041. if (num_pages <
  1042. (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1043. btrfs_btree_balance_dirty(root, 1);
  1044. btrfs_throttle(root);
  1045. }
  1046. buf += write_bytes;
  1047. count -= write_bytes;
  1048. pos += write_bytes;
  1049. num_written += write_bytes;
  1050. cond_resched();
  1051. }
  1052. out:
  1053. mutex_unlock(&inode->i_mutex);
  1054. if (ret)
  1055. err = ret;
  1056. out_nolock:
  1057. kfree(pages);
  1058. if (pinned[0])
  1059. page_cache_release(pinned[0]);
  1060. if (pinned[1])
  1061. page_cache_release(pinned[1]);
  1062. *ppos = pos;
  1063. if (num_written > 0 && will_write) {
  1064. struct btrfs_trans_handle *trans;
  1065. err = btrfs_wait_ordered_range(inode, start_pos, num_written);
  1066. if (err)
  1067. num_written = err;
  1068. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  1069. trans = btrfs_start_transaction(root, 1);
  1070. ret = btrfs_log_dentry_safe(trans, root,
  1071. file->f_dentry);
  1072. if (ret == 0) {
  1073. btrfs_sync_log(trans, root);
  1074. btrfs_end_transaction(trans, root);
  1075. } else {
  1076. btrfs_commit_transaction(trans, root);
  1077. }
  1078. }
  1079. if (file->f_flags & O_DIRECT) {
  1080. invalidate_mapping_pages(inode->i_mapping,
  1081. start_pos >> PAGE_CACHE_SHIFT,
  1082. (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
  1083. }
  1084. }
  1085. current->backing_dev_info = NULL;
  1086. return num_written ? num_written : err;
  1087. }
  1088. int btrfs_release_file(struct inode *inode, struct file *filp)
  1089. {
  1090. if (filp->private_data)
  1091. btrfs_ioctl_trans_end(filp);
  1092. return 0;
  1093. }
  1094. /*
  1095. * fsync call for both files and directories. This logs the inode into
  1096. * the tree log instead of forcing full commits whenever possible.
  1097. *
  1098. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1099. * in the metadata btree are up to date for copying to the log.
  1100. *
  1101. * It drops the inode mutex before doing the tree log commit. This is an
  1102. * important optimization for directories because holding the mutex prevents
  1103. * new operations on the dir while we write to disk.
  1104. */
  1105. int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
  1106. {
  1107. struct inode *inode = dentry->d_inode;
  1108. struct btrfs_root *root = BTRFS_I(inode)->root;
  1109. int ret = 0;
  1110. struct btrfs_trans_handle *trans;
  1111. /*
  1112. * check the transaction that last modified this inode
  1113. * and see if its already been committed
  1114. */
  1115. if (!BTRFS_I(inode)->last_trans)
  1116. goto out;
  1117. mutex_lock(&root->fs_info->trans_mutex);
  1118. if (BTRFS_I(inode)->last_trans <=
  1119. root->fs_info->last_trans_committed) {
  1120. BTRFS_I(inode)->last_trans = 0;
  1121. mutex_unlock(&root->fs_info->trans_mutex);
  1122. goto out;
  1123. }
  1124. mutex_unlock(&root->fs_info->trans_mutex);
  1125. root->log_batch++;
  1126. filemap_fdatawrite(inode->i_mapping);
  1127. btrfs_wait_ordered_range(inode, 0, (u64)-1);
  1128. root->log_batch++;
  1129. /*
  1130. * ok we haven't committed the transaction yet, lets do a commit
  1131. */
  1132. if (file && file->private_data)
  1133. btrfs_ioctl_trans_end(file);
  1134. trans = btrfs_start_transaction(root, 1);
  1135. if (!trans) {
  1136. ret = -ENOMEM;
  1137. goto out;
  1138. }
  1139. ret = btrfs_log_dentry_safe(trans, root, dentry);
  1140. if (ret < 0)
  1141. goto out;
  1142. /* we've logged all the items and now have a consistent
  1143. * version of the file in the log. It is possible that
  1144. * someone will come in and modify the file, but that's
  1145. * fine because the log is consistent on disk, and we
  1146. * have references to all of the file's extents
  1147. *
  1148. * It is possible that someone will come in and log the
  1149. * file again, but that will end up using the synchronization
  1150. * inside btrfs_sync_log to keep things safe.
  1151. */
  1152. mutex_unlock(&dentry->d_inode->i_mutex);
  1153. if (ret > 0) {
  1154. ret = btrfs_commit_transaction(trans, root);
  1155. } else {
  1156. btrfs_sync_log(trans, root);
  1157. ret = btrfs_end_transaction(trans, root);
  1158. }
  1159. mutex_lock(&dentry->d_inode->i_mutex);
  1160. out:
  1161. return ret > 0 ? EIO : ret;
  1162. }
  1163. static struct vm_operations_struct btrfs_file_vm_ops = {
  1164. .fault = filemap_fault,
  1165. .page_mkwrite = btrfs_page_mkwrite,
  1166. };
  1167. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1168. {
  1169. vma->vm_ops = &btrfs_file_vm_ops;
  1170. file_accessed(filp);
  1171. return 0;
  1172. }
  1173. struct file_operations btrfs_file_operations = {
  1174. .llseek = generic_file_llseek,
  1175. .read = do_sync_read,
  1176. .aio_read = generic_file_aio_read,
  1177. .splice_read = generic_file_splice_read,
  1178. .write = btrfs_file_write,
  1179. .mmap = btrfs_file_mmap,
  1180. .open = generic_file_open,
  1181. .release = btrfs_release_file,
  1182. .fsync = btrfs_sync_file,
  1183. .unlocked_ioctl = btrfs_ioctl,
  1184. #ifdef CONFIG_COMPAT
  1185. .compat_ioctl = btrfs_ioctl,
  1186. #endif
  1187. };