file.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/mpage.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include "ctree.h"
  32. #include "disk-io.h"
  33. #include "transaction.h"
  34. #include "btrfs_inode.h"
  35. #include "ioctl.h"
  36. #include "print-tree.h"
  37. #include "tree-log.h"
  38. #include "locking.h"
  39. #include "compat.h"
  40. /* simple helper to fault in pages and copy. This should go away
  41. * and be replaced with calls into generic code.
  42. */
  43. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  44. int write_bytes,
  45. struct page **prepared_pages,
  46. const char __user *buf)
  47. {
  48. long page_fault = 0;
  49. int i;
  50. int offset = pos & (PAGE_CACHE_SIZE - 1);
  51. for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
  52. size_t count = min_t(size_t,
  53. PAGE_CACHE_SIZE - offset, write_bytes);
  54. struct page *page = prepared_pages[i];
  55. fault_in_pages_readable(buf, count);
  56. /* Copy data from userspace to the current page */
  57. kmap(page);
  58. page_fault = __copy_from_user(page_address(page) + offset,
  59. buf, count);
  60. /* Flush processor's dcache for this page */
  61. flush_dcache_page(page);
  62. kunmap(page);
  63. buf += count;
  64. write_bytes -= count;
  65. if (page_fault)
  66. break;
  67. }
  68. return page_fault ? -EFAULT : 0;
  69. }
  70. /*
  71. * unlocks pages after btrfs_file_write is done with them
  72. */
  73. static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
  74. {
  75. size_t i;
  76. for (i = 0; i < num_pages; i++) {
  77. if (!pages[i])
  78. break;
  79. /* page checked is some magic around finding pages that
  80. * have been modified without going through btrfs_set_page_dirty
  81. * clear it here
  82. */
  83. ClearPageChecked(pages[i]);
  84. unlock_page(pages[i]);
  85. mark_page_accessed(pages[i]);
  86. page_cache_release(pages[i]);
  87. }
  88. }
  89. /*
  90. * after copy_from_user, pages need to be dirtied and we need to make
  91. * sure holes are created between the current EOF and the start of
  92. * any next extents (if required).
  93. *
  94. * this also makes the decision about creating an inline extent vs
  95. * doing real data extents, marking pages dirty and delalloc as required.
  96. */
  97. static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
  98. struct btrfs_root *root,
  99. struct file *file,
  100. struct page **pages,
  101. size_t num_pages,
  102. loff_t pos,
  103. size_t write_bytes)
  104. {
  105. int err = 0;
  106. int i;
  107. struct inode *inode = fdentry(file)->d_inode;
  108. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  109. u64 hint_byte;
  110. u64 num_bytes;
  111. u64 start_pos;
  112. u64 end_of_last_block;
  113. u64 end_pos = pos + write_bytes;
  114. loff_t isize = i_size_read(inode);
  115. start_pos = pos & ~((u64)root->sectorsize - 1);
  116. num_bytes = (write_bytes + pos - start_pos +
  117. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  118. end_of_last_block = start_pos + num_bytes - 1;
  119. lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  120. trans = btrfs_join_transaction(root, 1);
  121. if (!trans) {
  122. err = -ENOMEM;
  123. goto out_unlock;
  124. }
  125. btrfs_set_trans_block_group(trans, inode);
  126. hint_byte = 0;
  127. set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  128. /* check for reserved extents on each page, we don't want
  129. * to reset the delalloc bit on things that already have
  130. * extents reserved.
  131. */
  132. btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
  133. for (i = 0; i < num_pages; i++) {
  134. struct page *p = pages[i];
  135. SetPageUptodate(p);
  136. ClearPageChecked(p);
  137. set_page_dirty(p);
  138. }
  139. if (end_pos > isize) {
  140. i_size_write(inode, end_pos);
  141. btrfs_update_inode(trans, root, inode);
  142. }
  143. err = btrfs_end_transaction(trans, root);
  144. out_unlock:
  145. unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
  146. return err;
  147. }
  148. /*
  149. * this drops all the extents in the cache that intersect the range
  150. * [start, end]. Existing extents are split as required.
  151. */
  152. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  153. int skip_pinned)
  154. {
  155. struct extent_map *em;
  156. struct extent_map *split = NULL;
  157. struct extent_map *split2 = NULL;
  158. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  159. u64 len = end - start + 1;
  160. int ret;
  161. int testend = 1;
  162. unsigned long flags;
  163. int compressed = 0;
  164. WARN_ON(end < start);
  165. if (end == (u64)-1) {
  166. len = (u64)-1;
  167. testend = 0;
  168. }
  169. while (1) {
  170. if (!split)
  171. split = alloc_extent_map(GFP_NOFS);
  172. if (!split2)
  173. split2 = alloc_extent_map(GFP_NOFS);
  174. spin_lock(&em_tree->lock);
  175. em = lookup_extent_mapping(em_tree, start, len);
  176. if (!em) {
  177. spin_unlock(&em_tree->lock);
  178. break;
  179. }
  180. flags = em->flags;
  181. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  182. spin_unlock(&em_tree->lock);
  183. if (em->start <= start &&
  184. (!testend || em->start + em->len >= start + len)) {
  185. free_extent_map(em);
  186. break;
  187. }
  188. if (start < em->start) {
  189. len = em->start - start;
  190. } else {
  191. len = start + len - (em->start + em->len);
  192. start = em->start + em->len;
  193. }
  194. free_extent_map(em);
  195. continue;
  196. }
  197. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  198. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  199. remove_extent_mapping(em_tree, em);
  200. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  201. em->start < start) {
  202. split->start = em->start;
  203. split->len = start - em->start;
  204. split->orig_start = em->orig_start;
  205. split->block_start = em->block_start;
  206. if (compressed)
  207. split->block_len = em->block_len;
  208. else
  209. split->block_len = split->len;
  210. split->bdev = em->bdev;
  211. split->flags = flags;
  212. ret = add_extent_mapping(em_tree, split);
  213. BUG_ON(ret);
  214. free_extent_map(split);
  215. split = split2;
  216. split2 = NULL;
  217. }
  218. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  219. testend && em->start + em->len > start + len) {
  220. u64 diff = start + len - em->start;
  221. split->start = start + len;
  222. split->len = em->start + em->len - (start + len);
  223. split->bdev = em->bdev;
  224. split->flags = flags;
  225. if (compressed) {
  226. split->block_len = em->block_len;
  227. split->block_start = em->block_start;
  228. split->orig_start = em->orig_start;
  229. } else {
  230. split->block_len = split->len;
  231. split->block_start = em->block_start + diff;
  232. split->orig_start = split->start;
  233. }
  234. ret = add_extent_mapping(em_tree, split);
  235. BUG_ON(ret);
  236. free_extent_map(split);
  237. split = NULL;
  238. }
  239. spin_unlock(&em_tree->lock);
  240. /* once for us */
  241. free_extent_map(em);
  242. /* once for the tree*/
  243. free_extent_map(em);
  244. }
  245. if (split)
  246. free_extent_map(split);
  247. if (split2)
  248. free_extent_map(split2);
  249. return 0;
  250. }
  251. int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
  252. {
  253. return 0;
  254. #if 0
  255. struct btrfs_path *path;
  256. struct btrfs_key found_key;
  257. struct extent_buffer *leaf;
  258. struct btrfs_file_extent_item *extent;
  259. u64 last_offset = 0;
  260. int nritems;
  261. int slot;
  262. int found_type;
  263. int ret;
  264. int err = 0;
  265. u64 extent_end = 0;
  266. path = btrfs_alloc_path();
  267. ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
  268. last_offset, 0);
  269. while (1) {
  270. nritems = btrfs_header_nritems(path->nodes[0]);
  271. if (path->slots[0] >= nritems) {
  272. ret = btrfs_next_leaf(root, path);
  273. if (ret)
  274. goto out;
  275. nritems = btrfs_header_nritems(path->nodes[0]);
  276. }
  277. slot = path->slots[0];
  278. leaf = path->nodes[0];
  279. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  280. if (found_key.objectid != inode->i_ino)
  281. break;
  282. if (found_key.type != BTRFS_EXTENT_DATA_KEY)
  283. goto out;
  284. if (found_key.offset < last_offset) {
  285. WARN_ON(1);
  286. btrfs_print_leaf(root, leaf);
  287. printk(KERN_ERR "inode %lu found offset %llu "
  288. "expected %llu\n", inode->i_ino,
  289. (unsigned long long)found_key.offset,
  290. (unsigned long long)last_offset);
  291. err = 1;
  292. goto out;
  293. }
  294. extent = btrfs_item_ptr(leaf, slot,
  295. struct btrfs_file_extent_item);
  296. found_type = btrfs_file_extent_type(leaf, extent);
  297. if (found_type == BTRFS_FILE_EXTENT_REG) {
  298. extent_end = found_key.offset +
  299. btrfs_file_extent_num_bytes(leaf, extent);
  300. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  301. struct btrfs_item *item;
  302. item = btrfs_item_nr(leaf, slot);
  303. extent_end = found_key.offset +
  304. btrfs_file_extent_inline_len(leaf, extent);
  305. extent_end = (extent_end + root->sectorsize - 1) &
  306. ~((u64)root->sectorsize - 1);
  307. }
  308. last_offset = extent_end;
  309. path->slots[0]++;
  310. }
  311. if (0 && last_offset < inode->i_size) {
  312. WARN_ON(1);
  313. btrfs_print_leaf(root, leaf);
  314. printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
  315. inode->i_ino, (unsigned long long)last_offset,
  316. (unsigned long long)inode->i_size);
  317. err = 1;
  318. }
  319. out:
  320. btrfs_free_path(path);
  321. return err;
  322. #endif
  323. }
  324. /*
  325. * this is very complex, but the basic idea is to drop all extents
  326. * in the range start - end. hint_block is filled in with a block number
  327. * that would be a good hint to the block allocator for this file.
  328. *
  329. * If an extent intersects the range but is not entirely inside the range
  330. * it is either truncated or split. Anything entirely inside the range
  331. * is deleted from the tree.
  332. *
  333. * inline_limit is used to tell this code which offsets in the file to keep
  334. * if they contain inline extents.
  335. */
  336. noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  337. struct btrfs_root *root, struct inode *inode,
  338. u64 start, u64 end, u64 locked_end,
  339. u64 inline_limit, u64 *hint_byte)
  340. {
  341. u64 extent_end = 0;
  342. u64 search_start = start;
  343. u64 leaf_start;
  344. u64 ram_bytes = 0;
  345. u64 orig_parent = 0;
  346. u64 disk_bytenr = 0;
  347. u64 orig_locked_end = locked_end;
  348. u8 compression;
  349. u8 encryption;
  350. u16 other_encoding = 0;
  351. u64 root_gen;
  352. u64 root_owner;
  353. struct extent_buffer *leaf;
  354. struct btrfs_file_extent_item *extent;
  355. struct btrfs_path *path;
  356. struct btrfs_key key;
  357. struct btrfs_file_extent_item old;
  358. int keep;
  359. int slot;
  360. int bookend;
  361. int found_type = 0;
  362. int found_extent;
  363. int found_inline;
  364. int recow;
  365. int ret;
  366. inline_limit = 0;
  367. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  368. path = btrfs_alloc_path();
  369. if (!path)
  370. return -ENOMEM;
  371. while (1) {
  372. recow = 0;
  373. btrfs_release_path(root, path);
  374. ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
  375. search_start, -1);
  376. if (ret < 0)
  377. goto out;
  378. if (ret > 0) {
  379. if (path->slots[0] == 0) {
  380. ret = 0;
  381. goto out;
  382. }
  383. path->slots[0]--;
  384. }
  385. next_slot:
  386. keep = 0;
  387. bookend = 0;
  388. found_extent = 0;
  389. found_inline = 0;
  390. leaf_start = 0;
  391. root_gen = 0;
  392. root_owner = 0;
  393. compression = 0;
  394. encryption = 0;
  395. extent = NULL;
  396. leaf = path->nodes[0];
  397. slot = path->slots[0];
  398. ret = 0;
  399. btrfs_item_key_to_cpu(leaf, &key, slot);
  400. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
  401. key.offset >= end) {
  402. goto out;
  403. }
  404. if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
  405. key.objectid != inode->i_ino) {
  406. goto out;
  407. }
  408. if (recow) {
  409. search_start = max(key.offset, start);
  410. continue;
  411. }
  412. if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
  413. extent = btrfs_item_ptr(leaf, slot,
  414. struct btrfs_file_extent_item);
  415. found_type = btrfs_file_extent_type(leaf, extent);
  416. compression = btrfs_file_extent_compression(leaf,
  417. extent);
  418. encryption = btrfs_file_extent_encryption(leaf,
  419. extent);
  420. other_encoding = btrfs_file_extent_other_encoding(leaf,
  421. extent);
  422. if (found_type == BTRFS_FILE_EXTENT_REG ||
  423. found_type == BTRFS_FILE_EXTENT_PREALLOC) {
  424. extent_end =
  425. btrfs_file_extent_disk_bytenr(leaf,
  426. extent);
  427. if (extent_end)
  428. *hint_byte = extent_end;
  429. extent_end = key.offset +
  430. btrfs_file_extent_num_bytes(leaf, extent);
  431. ram_bytes = btrfs_file_extent_ram_bytes(leaf,
  432. extent);
  433. found_extent = 1;
  434. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  435. found_inline = 1;
  436. extent_end = key.offset +
  437. btrfs_file_extent_inline_len(leaf, extent);
  438. }
  439. } else {
  440. extent_end = search_start;
  441. }
  442. /* we found nothing we can drop */
  443. if ((!found_extent && !found_inline) ||
  444. search_start >= extent_end) {
  445. int nextret;
  446. u32 nritems;
  447. nritems = btrfs_header_nritems(leaf);
  448. if (slot >= nritems - 1) {
  449. nextret = btrfs_next_leaf(root, path);
  450. if (nextret)
  451. goto out;
  452. recow = 1;
  453. } else {
  454. path->slots[0]++;
  455. }
  456. goto next_slot;
  457. }
  458. if (end <= extent_end && start >= key.offset && found_inline)
  459. *hint_byte = EXTENT_MAP_INLINE;
  460. if (found_extent) {
  461. read_extent_buffer(leaf, &old, (unsigned long)extent,
  462. sizeof(old));
  463. root_gen = btrfs_header_generation(leaf);
  464. root_owner = btrfs_header_owner(leaf);
  465. leaf_start = leaf->start;
  466. }
  467. if (end < extent_end && end >= key.offset) {
  468. bookend = 1;
  469. if (found_inline && start <= key.offset)
  470. keep = 1;
  471. }
  472. if (bookend && found_extent) {
  473. if (locked_end < extent_end) {
  474. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  475. locked_end, extent_end - 1,
  476. GFP_NOFS);
  477. if (!ret) {
  478. btrfs_release_path(root, path);
  479. lock_extent(&BTRFS_I(inode)->io_tree,
  480. locked_end, extent_end - 1,
  481. GFP_NOFS);
  482. locked_end = extent_end;
  483. continue;
  484. }
  485. locked_end = extent_end;
  486. }
  487. orig_parent = path->nodes[0]->start;
  488. disk_bytenr = le64_to_cpu(old.disk_bytenr);
  489. if (disk_bytenr != 0) {
  490. ret = btrfs_inc_extent_ref(trans, root,
  491. disk_bytenr,
  492. le64_to_cpu(old.disk_num_bytes),
  493. orig_parent, root->root_key.objectid,
  494. trans->transid, inode->i_ino);
  495. BUG_ON(ret);
  496. }
  497. }
  498. if (found_inline) {
  499. u64 mask = root->sectorsize - 1;
  500. search_start = (extent_end + mask) & ~mask;
  501. } else
  502. search_start = extent_end;
  503. /* truncate existing extent */
  504. if (start > key.offset) {
  505. u64 new_num;
  506. u64 old_num;
  507. keep = 1;
  508. WARN_ON(start & (root->sectorsize - 1));
  509. if (found_extent) {
  510. new_num = start - key.offset;
  511. old_num = btrfs_file_extent_num_bytes(leaf,
  512. extent);
  513. *hint_byte =
  514. btrfs_file_extent_disk_bytenr(leaf,
  515. extent);
  516. if (btrfs_file_extent_disk_bytenr(leaf,
  517. extent)) {
  518. inode_sub_bytes(inode, old_num -
  519. new_num);
  520. }
  521. btrfs_set_file_extent_num_bytes(leaf,
  522. extent, new_num);
  523. btrfs_mark_buffer_dirty(leaf);
  524. } else if (key.offset < inline_limit &&
  525. (end > extent_end) &&
  526. (inline_limit < extent_end)) {
  527. u32 new_size;
  528. new_size = btrfs_file_extent_calc_inline_size(
  529. inline_limit - key.offset);
  530. inode_sub_bytes(inode, extent_end -
  531. inline_limit);
  532. btrfs_set_file_extent_ram_bytes(leaf, extent,
  533. new_size);
  534. if (!compression && !encryption) {
  535. btrfs_truncate_item(trans, root, path,
  536. new_size, 1);
  537. }
  538. }
  539. }
  540. /* delete the entire extent */
  541. if (!keep) {
  542. if (found_inline)
  543. inode_sub_bytes(inode, extent_end -
  544. key.offset);
  545. ret = btrfs_del_item(trans, root, path);
  546. /* TODO update progress marker and return */
  547. BUG_ON(ret);
  548. extent = NULL;
  549. btrfs_release_path(root, path);
  550. /* the extent will be freed later */
  551. }
  552. if (bookend && found_inline && start <= key.offset) {
  553. u32 new_size;
  554. new_size = btrfs_file_extent_calc_inline_size(
  555. extent_end - end);
  556. inode_sub_bytes(inode, end - key.offset);
  557. btrfs_set_file_extent_ram_bytes(leaf, extent,
  558. new_size);
  559. if (!compression && !encryption)
  560. ret = btrfs_truncate_item(trans, root, path,
  561. new_size, 0);
  562. BUG_ON(ret);
  563. }
  564. /* create bookend, splitting the extent in two */
  565. if (bookend && found_extent) {
  566. struct btrfs_key ins;
  567. ins.objectid = inode->i_ino;
  568. ins.offset = end;
  569. btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
  570. btrfs_release_path(root, path);
  571. path->leave_spinning = 1;
  572. ret = btrfs_insert_empty_item(trans, root, path, &ins,
  573. sizeof(*extent));
  574. BUG_ON(ret);
  575. leaf = path->nodes[0];
  576. extent = btrfs_item_ptr(leaf, path->slots[0],
  577. struct btrfs_file_extent_item);
  578. write_extent_buffer(leaf, &old,
  579. (unsigned long)extent, sizeof(old));
  580. btrfs_set_file_extent_compression(leaf, extent,
  581. compression);
  582. btrfs_set_file_extent_encryption(leaf, extent,
  583. encryption);
  584. btrfs_set_file_extent_other_encoding(leaf, extent,
  585. other_encoding);
  586. btrfs_set_file_extent_offset(leaf, extent,
  587. le64_to_cpu(old.offset) + end - key.offset);
  588. WARN_ON(le64_to_cpu(old.num_bytes) <
  589. (extent_end - end));
  590. btrfs_set_file_extent_num_bytes(leaf, extent,
  591. extent_end - end);
  592. /*
  593. * set the ram bytes to the size of the full extent
  594. * before splitting. This is a worst case flag,
  595. * but its the best we can do because we don't know
  596. * how splitting affects compression
  597. */
  598. btrfs_set_file_extent_ram_bytes(leaf, extent,
  599. ram_bytes);
  600. btrfs_set_file_extent_type(leaf, extent, found_type);
  601. btrfs_unlock_up_safe(path, 1);
  602. btrfs_mark_buffer_dirty(path->nodes[0]);
  603. btrfs_set_lock_blocking(path->nodes[0]);
  604. if (disk_bytenr != 0) {
  605. ret = btrfs_update_extent_ref(trans, root,
  606. disk_bytenr,
  607. le64_to_cpu(old.disk_num_bytes),
  608. orig_parent,
  609. leaf->start,
  610. root->root_key.objectid,
  611. trans->transid, ins.objectid);
  612. BUG_ON(ret);
  613. }
  614. path->leave_spinning = 0;
  615. btrfs_release_path(root, path);
  616. if (disk_bytenr != 0)
  617. inode_add_bytes(inode, extent_end - end);
  618. }
  619. if (found_extent && !keep) {
  620. u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
  621. if (old_disk_bytenr != 0) {
  622. inode_sub_bytes(inode,
  623. le64_to_cpu(old.num_bytes));
  624. ret = btrfs_free_extent(trans, root,
  625. old_disk_bytenr,
  626. le64_to_cpu(old.disk_num_bytes),
  627. leaf_start, root_owner,
  628. root_gen, key.objectid, 0);
  629. BUG_ON(ret);
  630. *hint_byte = old_disk_bytenr;
  631. }
  632. }
  633. if (search_start >= end) {
  634. ret = 0;
  635. goto out;
  636. }
  637. }
  638. out:
  639. btrfs_free_path(path);
  640. if (locked_end > orig_locked_end) {
  641. unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
  642. locked_end - 1, GFP_NOFS);
  643. }
  644. btrfs_check_file(root, inode);
  645. return ret;
  646. }
  647. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  648. u64 objectid, u64 bytenr, u64 *start, u64 *end)
  649. {
  650. struct btrfs_file_extent_item *fi;
  651. struct btrfs_key key;
  652. u64 extent_end;
  653. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  654. return 0;
  655. btrfs_item_key_to_cpu(leaf, &key, slot);
  656. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  657. return 0;
  658. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  659. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  660. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  661. btrfs_file_extent_compression(leaf, fi) ||
  662. btrfs_file_extent_encryption(leaf, fi) ||
  663. btrfs_file_extent_other_encoding(leaf, fi))
  664. return 0;
  665. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  666. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  667. return 0;
  668. *start = key.offset;
  669. *end = extent_end;
  670. return 1;
  671. }
  672. /*
  673. * Mark extent in the range start - end as written.
  674. *
  675. * This changes extent type from 'pre-allocated' to 'regular'. If only
  676. * part of extent is marked as written, the extent will be split into
  677. * two or three.
  678. */
  679. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  680. struct btrfs_root *root,
  681. struct inode *inode, u64 start, u64 end)
  682. {
  683. struct extent_buffer *leaf;
  684. struct btrfs_path *path;
  685. struct btrfs_file_extent_item *fi;
  686. struct btrfs_key key;
  687. u64 bytenr;
  688. u64 num_bytes;
  689. u64 extent_end;
  690. u64 extent_offset;
  691. u64 other_start;
  692. u64 other_end;
  693. u64 split = start;
  694. u64 locked_end = end;
  695. u64 orig_parent;
  696. int extent_type;
  697. int split_end = 1;
  698. int ret;
  699. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  700. path = btrfs_alloc_path();
  701. BUG_ON(!path);
  702. again:
  703. key.objectid = inode->i_ino;
  704. key.type = BTRFS_EXTENT_DATA_KEY;
  705. if (split == start)
  706. key.offset = split;
  707. else
  708. key.offset = split - 1;
  709. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  710. if (ret > 0 && path->slots[0] > 0)
  711. path->slots[0]--;
  712. leaf = path->nodes[0];
  713. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  714. BUG_ON(key.objectid != inode->i_ino ||
  715. key.type != BTRFS_EXTENT_DATA_KEY);
  716. fi = btrfs_item_ptr(leaf, path->slots[0],
  717. struct btrfs_file_extent_item);
  718. extent_type = btrfs_file_extent_type(leaf, fi);
  719. BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
  720. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  721. BUG_ON(key.offset > start || extent_end < end);
  722. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  723. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  724. extent_offset = btrfs_file_extent_offset(leaf, fi);
  725. if (key.offset == start)
  726. split = end;
  727. if (key.offset == start && extent_end == end) {
  728. int del_nr = 0;
  729. int del_slot = 0;
  730. u64 leaf_owner = btrfs_header_owner(leaf);
  731. u64 leaf_gen = btrfs_header_generation(leaf);
  732. other_start = end;
  733. other_end = 0;
  734. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  735. bytenr, &other_start, &other_end)) {
  736. extent_end = other_end;
  737. del_slot = path->slots[0] + 1;
  738. del_nr++;
  739. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  740. leaf->start, leaf_owner,
  741. leaf_gen, inode->i_ino, 0);
  742. BUG_ON(ret);
  743. }
  744. other_start = 0;
  745. other_end = start;
  746. if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
  747. bytenr, &other_start, &other_end)) {
  748. key.offset = other_start;
  749. del_slot = path->slots[0];
  750. del_nr++;
  751. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  752. leaf->start, leaf_owner,
  753. leaf_gen, inode->i_ino, 0);
  754. BUG_ON(ret);
  755. }
  756. split_end = 0;
  757. if (del_nr == 0) {
  758. btrfs_set_file_extent_type(leaf, fi,
  759. BTRFS_FILE_EXTENT_REG);
  760. goto done;
  761. }
  762. fi = btrfs_item_ptr(leaf, del_slot - 1,
  763. struct btrfs_file_extent_item);
  764. btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
  765. btrfs_set_file_extent_num_bytes(leaf, fi,
  766. extent_end - key.offset);
  767. btrfs_mark_buffer_dirty(leaf);
  768. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  769. BUG_ON(ret);
  770. goto release;
  771. } else if (split == start) {
  772. if (locked_end < extent_end) {
  773. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  774. locked_end, extent_end - 1, GFP_NOFS);
  775. if (!ret) {
  776. btrfs_release_path(root, path);
  777. lock_extent(&BTRFS_I(inode)->io_tree,
  778. locked_end, extent_end - 1, GFP_NOFS);
  779. locked_end = extent_end;
  780. goto again;
  781. }
  782. locked_end = extent_end;
  783. }
  784. btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
  785. extent_offset += split - key.offset;
  786. } else {
  787. BUG_ON(key.offset != start);
  788. btrfs_set_file_extent_offset(leaf, fi, extent_offset +
  789. split - key.offset);
  790. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
  791. key.offset = split;
  792. btrfs_set_item_key_safe(trans, root, path, &key);
  793. extent_end = split;
  794. }
  795. if (extent_end == end) {
  796. split_end = 0;
  797. extent_type = BTRFS_FILE_EXTENT_REG;
  798. }
  799. if (extent_end == end && split == start) {
  800. other_start = end;
  801. other_end = 0;
  802. if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
  803. bytenr, &other_start, &other_end)) {
  804. path->slots[0]++;
  805. fi = btrfs_item_ptr(leaf, path->slots[0],
  806. struct btrfs_file_extent_item);
  807. key.offset = split;
  808. btrfs_set_item_key_safe(trans, root, path, &key);
  809. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  810. btrfs_set_file_extent_num_bytes(leaf, fi,
  811. other_end - split);
  812. goto done;
  813. }
  814. }
  815. if (extent_end == end && split == end) {
  816. other_start = 0;
  817. other_end = start;
  818. if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
  819. bytenr, &other_start, &other_end)) {
  820. path->slots[0]--;
  821. fi = btrfs_item_ptr(leaf, path->slots[0],
  822. struct btrfs_file_extent_item);
  823. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
  824. other_start);
  825. goto done;
  826. }
  827. }
  828. btrfs_mark_buffer_dirty(leaf);
  829. orig_parent = leaf->start;
  830. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
  831. orig_parent, root->root_key.objectid,
  832. trans->transid, inode->i_ino);
  833. BUG_ON(ret);
  834. btrfs_release_path(root, path);
  835. key.offset = start;
  836. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
  837. BUG_ON(ret);
  838. leaf = path->nodes[0];
  839. fi = btrfs_item_ptr(leaf, path->slots[0],
  840. struct btrfs_file_extent_item);
  841. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  842. btrfs_set_file_extent_type(leaf, fi, extent_type);
  843. btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
  844. btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
  845. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  846. btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
  847. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  848. btrfs_set_file_extent_compression(leaf, fi, 0);
  849. btrfs_set_file_extent_encryption(leaf, fi, 0);
  850. btrfs_set_file_extent_other_encoding(leaf, fi, 0);
  851. if (orig_parent != leaf->start) {
  852. ret = btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
  853. orig_parent, leaf->start,
  854. root->root_key.objectid,
  855. trans->transid, inode->i_ino);
  856. BUG_ON(ret);
  857. }
  858. done:
  859. btrfs_mark_buffer_dirty(leaf);
  860. release:
  861. btrfs_release_path(root, path);
  862. if (split_end && split == start) {
  863. split = end;
  864. goto again;
  865. }
  866. if (locked_end > end) {
  867. unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
  868. GFP_NOFS);
  869. }
  870. btrfs_free_path(path);
  871. return 0;
  872. }
  873. /*
  874. * this gets pages into the page cache and locks them down, it also properly
  875. * waits for data=ordered extents to finish before allowing the pages to be
  876. * modified.
  877. */
  878. static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
  879. struct page **pages, size_t num_pages,
  880. loff_t pos, unsigned long first_index,
  881. unsigned long last_index, size_t write_bytes)
  882. {
  883. int i;
  884. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  885. struct inode *inode = fdentry(file)->d_inode;
  886. int err = 0;
  887. u64 start_pos;
  888. u64 last_pos;
  889. start_pos = pos & ~((u64)root->sectorsize - 1);
  890. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  891. if (start_pos > inode->i_size) {
  892. err = btrfs_cont_expand(inode, start_pos);
  893. if (err)
  894. return err;
  895. }
  896. memset(pages, 0, num_pages * sizeof(struct page *));
  897. again:
  898. for (i = 0; i < num_pages; i++) {
  899. pages[i] = grab_cache_page(inode->i_mapping, index + i);
  900. if (!pages[i]) {
  901. err = -ENOMEM;
  902. BUG_ON(1);
  903. }
  904. wait_on_page_writeback(pages[i]);
  905. }
  906. if (start_pos < inode->i_size) {
  907. struct btrfs_ordered_extent *ordered;
  908. lock_extent(&BTRFS_I(inode)->io_tree,
  909. start_pos, last_pos - 1, GFP_NOFS);
  910. ordered = btrfs_lookup_first_ordered_extent(inode,
  911. last_pos - 1);
  912. if (ordered &&
  913. ordered->file_offset + ordered->len > start_pos &&
  914. ordered->file_offset < last_pos) {
  915. btrfs_put_ordered_extent(ordered);
  916. unlock_extent(&BTRFS_I(inode)->io_tree,
  917. start_pos, last_pos - 1, GFP_NOFS);
  918. for (i = 0; i < num_pages; i++) {
  919. unlock_page(pages[i]);
  920. page_cache_release(pages[i]);
  921. }
  922. btrfs_wait_ordered_range(inode, start_pos,
  923. last_pos - start_pos);
  924. goto again;
  925. }
  926. if (ordered)
  927. btrfs_put_ordered_extent(ordered);
  928. clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
  929. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
  930. GFP_NOFS);
  931. unlock_extent(&BTRFS_I(inode)->io_tree,
  932. start_pos, last_pos - 1, GFP_NOFS);
  933. }
  934. for (i = 0; i < num_pages; i++) {
  935. clear_page_dirty_for_io(pages[i]);
  936. set_page_extent_mapped(pages[i]);
  937. WARN_ON(!PageLocked(pages[i]));
  938. }
  939. return 0;
  940. }
  941. static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
  942. size_t count, loff_t *ppos)
  943. {
  944. loff_t pos;
  945. loff_t start_pos;
  946. ssize_t num_written = 0;
  947. ssize_t err = 0;
  948. int ret = 0;
  949. struct inode *inode = fdentry(file)->d_inode;
  950. struct btrfs_root *root = BTRFS_I(inode)->root;
  951. struct page **pages = NULL;
  952. int nrptrs;
  953. struct page *pinned[2];
  954. unsigned long first_index;
  955. unsigned long last_index;
  956. int will_write;
  957. will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
  958. (file->f_flags & O_DIRECT));
  959. nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
  960. PAGE_CACHE_SIZE / (sizeof(struct page *)));
  961. pinned[0] = NULL;
  962. pinned[1] = NULL;
  963. pos = *ppos;
  964. start_pos = pos;
  965. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  966. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  967. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  968. if (err)
  969. goto out_nolock;
  970. if (count == 0)
  971. goto out_nolock;
  972. err = file_remove_suid(file);
  973. if (err)
  974. goto out_nolock;
  975. file_update_time(file);
  976. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  977. mutex_lock(&inode->i_mutex);
  978. BTRFS_I(inode)->sequence++;
  979. first_index = pos >> PAGE_CACHE_SHIFT;
  980. last_index = (pos + count) >> PAGE_CACHE_SHIFT;
  981. /*
  982. * there are lots of better ways to do this, but this code
  983. * makes sure the first and last page in the file range are
  984. * up to date and ready for cow
  985. */
  986. if ((pos & (PAGE_CACHE_SIZE - 1))) {
  987. pinned[0] = grab_cache_page(inode->i_mapping, first_index);
  988. if (!PageUptodate(pinned[0])) {
  989. ret = btrfs_readpage(NULL, pinned[0]);
  990. BUG_ON(ret);
  991. wait_on_page_locked(pinned[0]);
  992. } else {
  993. unlock_page(pinned[0]);
  994. }
  995. }
  996. if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
  997. pinned[1] = grab_cache_page(inode->i_mapping, last_index);
  998. if (!PageUptodate(pinned[1])) {
  999. ret = btrfs_readpage(NULL, pinned[1]);
  1000. BUG_ON(ret);
  1001. wait_on_page_locked(pinned[1]);
  1002. } else {
  1003. unlock_page(pinned[1]);
  1004. }
  1005. }
  1006. while (count > 0) {
  1007. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1008. size_t write_bytes = min(count, nrptrs *
  1009. (size_t)PAGE_CACHE_SIZE -
  1010. offset);
  1011. size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
  1012. PAGE_CACHE_SHIFT;
  1013. WARN_ON(num_pages > nrptrs);
  1014. memset(pages, 0, sizeof(struct page *) * nrptrs);
  1015. ret = btrfs_check_data_free_space(root, inode, write_bytes);
  1016. if (ret)
  1017. goto out;
  1018. ret = prepare_pages(root, file, pages, num_pages,
  1019. pos, first_index, last_index,
  1020. write_bytes);
  1021. if (ret) {
  1022. btrfs_free_reserved_data_space(root, inode,
  1023. write_bytes);
  1024. goto out;
  1025. }
  1026. ret = btrfs_copy_from_user(pos, num_pages,
  1027. write_bytes, pages, buf);
  1028. if (ret) {
  1029. btrfs_free_reserved_data_space(root, inode,
  1030. write_bytes);
  1031. btrfs_drop_pages(pages, num_pages);
  1032. goto out;
  1033. }
  1034. ret = dirty_and_release_pages(NULL, root, file, pages,
  1035. num_pages, pos, write_bytes);
  1036. btrfs_drop_pages(pages, num_pages);
  1037. if (ret) {
  1038. btrfs_free_reserved_data_space(root, inode,
  1039. write_bytes);
  1040. goto out;
  1041. }
  1042. if (will_write) {
  1043. btrfs_fdatawrite_range(inode->i_mapping, pos,
  1044. pos + write_bytes - 1,
  1045. WB_SYNC_ALL);
  1046. } else {
  1047. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1048. num_pages);
  1049. if (num_pages <
  1050. (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1051. btrfs_btree_balance_dirty(root, 1);
  1052. btrfs_throttle(root);
  1053. }
  1054. buf += write_bytes;
  1055. count -= write_bytes;
  1056. pos += write_bytes;
  1057. num_written += write_bytes;
  1058. cond_resched();
  1059. }
  1060. out:
  1061. mutex_unlock(&inode->i_mutex);
  1062. if (ret)
  1063. err = ret;
  1064. out_nolock:
  1065. kfree(pages);
  1066. if (pinned[0])
  1067. page_cache_release(pinned[0]);
  1068. if (pinned[1])
  1069. page_cache_release(pinned[1]);
  1070. *ppos = pos;
  1071. /*
  1072. * we want to make sure fsync finds this change
  1073. * but we haven't joined a transaction running right now.
  1074. *
  1075. * Later on, someone is sure to update the inode and get the
  1076. * real transid recorded.
  1077. *
  1078. * We set last_trans now to the fs_info generation + 1,
  1079. * this will either be one more than the running transaction
  1080. * or the generation used for the next transaction if there isn't
  1081. * one running right now.
  1082. */
  1083. BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
  1084. if (num_written > 0 && will_write) {
  1085. struct btrfs_trans_handle *trans;
  1086. err = btrfs_wait_ordered_range(inode, start_pos, num_written);
  1087. if (err)
  1088. num_written = err;
  1089. if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
  1090. trans = btrfs_start_transaction(root, 1);
  1091. ret = btrfs_log_dentry_safe(trans, root,
  1092. file->f_dentry);
  1093. if (ret == 0) {
  1094. ret = btrfs_sync_log(trans, root);
  1095. if (ret == 0)
  1096. btrfs_end_transaction(trans, root);
  1097. else
  1098. btrfs_commit_transaction(trans, root);
  1099. } else {
  1100. btrfs_commit_transaction(trans, root);
  1101. }
  1102. }
  1103. if (file->f_flags & O_DIRECT) {
  1104. invalidate_mapping_pages(inode->i_mapping,
  1105. start_pos >> PAGE_CACHE_SHIFT,
  1106. (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
  1107. }
  1108. }
  1109. current->backing_dev_info = NULL;
  1110. return num_written ? num_written : err;
  1111. }
  1112. int btrfs_release_file(struct inode *inode, struct file *filp)
  1113. {
  1114. /*
  1115. * ordered_data_close is set by settattr when we are about to truncate
  1116. * a file from a non-zero size to a zero size. This tries to
  1117. * flush down new bytes that may have been written if the
  1118. * application were using truncate to replace a file in place.
  1119. */
  1120. if (BTRFS_I(inode)->ordered_data_close) {
  1121. BTRFS_I(inode)->ordered_data_close = 0;
  1122. btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
  1123. if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
  1124. filemap_flush(inode->i_mapping);
  1125. }
  1126. if (filp->private_data)
  1127. btrfs_ioctl_trans_end(filp);
  1128. return 0;
  1129. }
  1130. /*
  1131. * fsync call for both files and directories. This logs the inode into
  1132. * the tree log instead of forcing full commits whenever possible.
  1133. *
  1134. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1135. * in the metadata btree are up to date for copying to the log.
  1136. *
  1137. * It drops the inode mutex before doing the tree log commit. This is an
  1138. * important optimization for directories because holding the mutex prevents
  1139. * new operations on the dir while we write to disk.
  1140. */
  1141. int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
  1142. {
  1143. struct inode *inode = dentry->d_inode;
  1144. struct btrfs_root *root = BTRFS_I(inode)->root;
  1145. int ret = 0;
  1146. struct btrfs_trans_handle *trans;
  1147. /*
  1148. * check the transaction that last modified this inode
  1149. * and see if its already been committed
  1150. */
  1151. if (!BTRFS_I(inode)->last_trans)
  1152. goto out;
  1153. mutex_lock(&root->fs_info->trans_mutex);
  1154. if (BTRFS_I(inode)->last_trans <=
  1155. root->fs_info->last_trans_committed) {
  1156. BTRFS_I(inode)->last_trans = 0;
  1157. mutex_unlock(&root->fs_info->trans_mutex);
  1158. goto out;
  1159. }
  1160. mutex_unlock(&root->fs_info->trans_mutex);
  1161. root->log_batch++;
  1162. filemap_fdatawrite(inode->i_mapping);
  1163. btrfs_wait_ordered_range(inode, 0, (u64)-1);
  1164. root->log_batch++;
  1165. /*
  1166. * ok we haven't committed the transaction yet, lets do a commit
  1167. */
  1168. if (file && file->private_data)
  1169. btrfs_ioctl_trans_end(file);
  1170. trans = btrfs_start_transaction(root, 1);
  1171. if (!trans) {
  1172. ret = -ENOMEM;
  1173. goto out;
  1174. }
  1175. ret = btrfs_log_dentry_safe(trans, root, dentry);
  1176. if (ret < 0)
  1177. goto out;
  1178. /* we've logged all the items and now have a consistent
  1179. * version of the file in the log. It is possible that
  1180. * someone will come in and modify the file, but that's
  1181. * fine because the log is consistent on disk, and we
  1182. * have references to all of the file's extents
  1183. *
  1184. * It is possible that someone will come in and log the
  1185. * file again, but that will end up using the synchronization
  1186. * inside btrfs_sync_log to keep things safe.
  1187. */
  1188. mutex_unlock(&dentry->d_inode->i_mutex);
  1189. if (ret > 0) {
  1190. ret = btrfs_commit_transaction(trans, root);
  1191. } else {
  1192. ret = btrfs_sync_log(trans, root);
  1193. if (ret == 0)
  1194. ret = btrfs_end_transaction(trans, root);
  1195. else
  1196. ret = btrfs_commit_transaction(trans, root);
  1197. }
  1198. mutex_lock(&dentry->d_inode->i_mutex);
  1199. out:
  1200. return ret > 0 ? EIO : ret;
  1201. }
  1202. static struct vm_operations_struct btrfs_file_vm_ops = {
  1203. .fault = filemap_fault,
  1204. .page_mkwrite = btrfs_page_mkwrite,
  1205. };
  1206. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1207. {
  1208. vma->vm_ops = &btrfs_file_vm_ops;
  1209. file_accessed(filp);
  1210. return 0;
  1211. }
  1212. struct file_operations btrfs_file_operations = {
  1213. .llseek = generic_file_llseek,
  1214. .read = do_sync_read,
  1215. .aio_read = generic_file_aio_read,
  1216. .splice_read = generic_file_splice_read,
  1217. .write = btrfs_file_write,
  1218. .mmap = btrfs_file_mmap,
  1219. .open = generic_file_open,
  1220. .release = btrfs_release_file,
  1221. .fsync = btrfs_sync_file,
  1222. .unlocked_ioctl = btrfs_ioctl,
  1223. #ifdef CONFIG_COMPAT
  1224. .compat_ioctl = btrfs_ioctl,
  1225. #endif
  1226. };