inline.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright (c) 2012 Taobao.
  3. * Written by Tao Ma <boyu.mt@taobao.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2.1 of the GNU Lesser General Public License
  7. * as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include "ext4_jbd2.h"
  15. #include "ext4.h"
  16. #include "xattr.h"
  17. #include "truncate.h"
  18. #define EXT4_XATTR_SYSTEM_DATA "data"
  19. #define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
  20. int ext4_get_inline_size(struct inode *inode)
  21. {
  22. if (EXT4_I(inode)->i_inline_off)
  23. return EXT4_I(inode)->i_inline_size;
  24. return 0;
  25. }
  26. static int get_max_inline_xattr_value_size(struct inode *inode,
  27. struct ext4_iloc *iloc)
  28. {
  29. struct ext4_xattr_ibody_header *header;
  30. struct ext4_xattr_entry *entry;
  31. struct ext4_inode *raw_inode;
  32. int free, min_offs;
  33. min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
  34. EXT4_GOOD_OLD_INODE_SIZE -
  35. EXT4_I(inode)->i_extra_isize -
  36. sizeof(struct ext4_xattr_ibody_header);
  37. /*
  38. * We need to subtract another sizeof(__u32) since an in-inode xattr
  39. * needs an empty 4 bytes to indicate the gap between the xattr entry
  40. * and the name/value pair.
  41. */
  42. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  43. return EXT4_XATTR_SIZE(min_offs -
  44. EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA)) -
  45. EXT4_XATTR_ROUND - sizeof(__u32));
  46. raw_inode = ext4_raw_inode(iloc);
  47. header = IHDR(inode, raw_inode);
  48. entry = IFIRST(header);
  49. /* Compute min_offs. */
  50. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  51. if (!entry->e_value_block && entry->e_value_size) {
  52. size_t offs = le16_to_cpu(entry->e_value_offs);
  53. if (offs < min_offs)
  54. min_offs = offs;
  55. }
  56. }
  57. free = min_offs -
  58. ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
  59. if (EXT4_I(inode)->i_inline_off) {
  60. entry = (struct ext4_xattr_entry *)
  61. ((void *)raw_inode + EXT4_I(inode)->i_inline_off);
  62. free += le32_to_cpu(entry->e_value_size);
  63. goto out;
  64. }
  65. free -= EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA));
  66. if (free > EXT4_XATTR_ROUND)
  67. free = EXT4_XATTR_SIZE(free - EXT4_XATTR_ROUND);
  68. else
  69. free = 0;
  70. out:
  71. return free;
  72. }
  73. /*
  74. * Get the maximum size we now can store in an inode.
  75. * If we can't find the space for a xattr entry, don't use the space
  76. * of the extents since we have no space to indicate the inline data.
  77. */
  78. int ext4_get_max_inline_size(struct inode *inode)
  79. {
  80. int error, max_inline_size;
  81. struct ext4_iloc iloc;
  82. if (EXT4_I(inode)->i_extra_isize == 0)
  83. return 0;
  84. error = ext4_get_inode_loc(inode, &iloc);
  85. if (error) {
  86. ext4_error_inode(inode, __func__, __LINE__, 0,
  87. "can't get inode location %lu",
  88. inode->i_ino);
  89. return 0;
  90. }
  91. down_read(&EXT4_I(inode)->xattr_sem);
  92. max_inline_size = get_max_inline_xattr_value_size(inode, &iloc);
  93. up_read(&EXT4_I(inode)->xattr_sem);
  94. brelse(iloc.bh);
  95. if (!max_inline_size)
  96. return 0;
  97. return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
  98. }
  99. int ext4_has_inline_data(struct inode *inode)
  100. {
  101. return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
  102. EXT4_I(inode)->i_inline_off;
  103. }
  104. /*
  105. * this function does not take xattr_sem, which is OK because it is
  106. * currently only used in a code path coming form ext4_iget, before
  107. * the new inode has been unlocked
  108. */
  109. int ext4_find_inline_data_nolock(struct inode *inode)
  110. {
  111. struct ext4_xattr_ibody_find is = {
  112. .s = { .not_found = -ENODATA, },
  113. };
  114. struct ext4_xattr_info i = {
  115. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  116. .name = EXT4_XATTR_SYSTEM_DATA,
  117. };
  118. int error;
  119. if (EXT4_I(inode)->i_extra_isize == 0)
  120. return 0;
  121. error = ext4_get_inode_loc(inode, &is.iloc);
  122. if (error)
  123. return error;
  124. error = ext4_xattr_ibody_find(inode, &i, &is);
  125. if (error)
  126. goto out;
  127. if (!is.s.not_found) {
  128. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  129. (void *)ext4_raw_inode(&is.iloc));
  130. EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
  131. le32_to_cpu(is.s.here->e_value_size);
  132. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  133. }
  134. out:
  135. brelse(is.iloc.bh);
  136. return error;
  137. }
  138. static int ext4_read_inline_data(struct inode *inode, void *buffer,
  139. unsigned int len,
  140. struct ext4_iloc *iloc)
  141. {
  142. struct ext4_xattr_entry *entry;
  143. struct ext4_xattr_ibody_header *header;
  144. int cp_len = 0;
  145. struct ext4_inode *raw_inode;
  146. if (!len)
  147. return 0;
  148. BUG_ON(len > EXT4_I(inode)->i_inline_size);
  149. cp_len = len < EXT4_MIN_INLINE_DATA_SIZE ?
  150. len : EXT4_MIN_INLINE_DATA_SIZE;
  151. raw_inode = ext4_raw_inode(iloc);
  152. memcpy(buffer, (void *)(raw_inode->i_block), cp_len);
  153. len -= cp_len;
  154. buffer += cp_len;
  155. if (!len)
  156. goto out;
  157. header = IHDR(inode, raw_inode);
  158. entry = (struct ext4_xattr_entry *)((void *)raw_inode +
  159. EXT4_I(inode)->i_inline_off);
  160. len = min_t(unsigned int, len,
  161. (unsigned int)le32_to_cpu(entry->e_value_size));
  162. memcpy(buffer,
  163. (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), len);
  164. cp_len += len;
  165. out:
  166. return cp_len;
  167. }
  168. /*
  169. * write the buffer to the inline inode.
  170. * If 'create' is set, we don't need to do the extra copy in the xattr
  171. * value since it is already handled by ext4_xattr_ibody_set. That saves
  172. * us one memcpy.
  173. */
  174. void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
  175. void *buffer, loff_t pos, unsigned int len)
  176. {
  177. struct ext4_xattr_entry *entry;
  178. struct ext4_xattr_ibody_header *header;
  179. struct ext4_inode *raw_inode;
  180. int cp_len = 0;
  181. BUG_ON(!EXT4_I(inode)->i_inline_off);
  182. BUG_ON(pos + len > EXT4_I(inode)->i_inline_size);
  183. raw_inode = ext4_raw_inode(iloc);
  184. buffer += pos;
  185. if (pos < EXT4_MIN_INLINE_DATA_SIZE) {
  186. cp_len = pos + len > EXT4_MIN_INLINE_DATA_SIZE ?
  187. EXT4_MIN_INLINE_DATA_SIZE - pos : len;
  188. memcpy((void *)raw_inode->i_block + pos, buffer, cp_len);
  189. len -= cp_len;
  190. buffer += cp_len;
  191. pos += cp_len;
  192. }
  193. if (!len)
  194. return;
  195. pos -= EXT4_MIN_INLINE_DATA_SIZE;
  196. header = IHDR(inode, raw_inode);
  197. entry = (struct ext4_xattr_entry *)((void *)raw_inode +
  198. EXT4_I(inode)->i_inline_off);
  199. memcpy((void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs) + pos,
  200. buffer, len);
  201. }
  202. static int ext4_create_inline_data(handle_t *handle,
  203. struct inode *inode, unsigned len)
  204. {
  205. int error;
  206. void *value = NULL;
  207. struct ext4_xattr_ibody_find is = {
  208. .s = { .not_found = -ENODATA, },
  209. };
  210. struct ext4_xattr_info i = {
  211. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  212. .name = EXT4_XATTR_SYSTEM_DATA,
  213. };
  214. error = ext4_get_inode_loc(inode, &is.iloc);
  215. if (error)
  216. return error;
  217. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  218. if (error)
  219. goto out;
  220. if (len > EXT4_MIN_INLINE_DATA_SIZE) {
  221. value = (void *)empty_zero_page;
  222. len -= EXT4_MIN_INLINE_DATA_SIZE;
  223. } else {
  224. value = "";
  225. len = 0;
  226. }
  227. /* Insert the the xttr entry. */
  228. i.value = value;
  229. i.value_len = len;
  230. error = ext4_xattr_ibody_find(inode, &i, &is);
  231. if (error)
  232. goto out;
  233. BUG_ON(!is.s.not_found);
  234. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  235. if (error) {
  236. if (error == -ENOSPC)
  237. ext4_clear_inode_state(inode,
  238. EXT4_STATE_MAY_INLINE_DATA);
  239. goto out;
  240. }
  241. memset((void *)ext4_raw_inode(&is.iloc)->i_block,
  242. 0, EXT4_MIN_INLINE_DATA_SIZE);
  243. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  244. (void *)ext4_raw_inode(&is.iloc));
  245. EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
  246. ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
  247. ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
  248. get_bh(is.iloc.bh);
  249. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  250. out:
  251. brelse(is.iloc.bh);
  252. return error;
  253. }
  254. static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
  255. unsigned int len)
  256. {
  257. int error;
  258. void *value = NULL;
  259. struct ext4_xattr_ibody_find is = {
  260. .s = { .not_found = -ENODATA, },
  261. };
  262. struct ext4_xattr_info i = {
  263. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  264. .name = EXT4_XATTR_SYSTEM_DATA,
  265. };
  266. /* If the old space is ok, write the data directly. */
  267. if (len <= EXT4_I(inode)->i_inline_size)
  268. return 0;
  269. error = ext4_get_inode_loc(inode, &is.iloc);
  270. if (error)
  271. return error;
  272. error = ext4_xattr_ibody_find(inode, &i, &is);
  273. if (error)
  274. goto out;
  275. BUG_ON(is.s.not_found);
  276. len -= EXT4_MIN_INLINE_DATA_SIZE;
  277. value = kzalloc(len, GFP_NOFS);
  278. if (!value)
  279. goto out;
  280. error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
  281. value, len);
  282. if (error == -ENODATA)
  283. goto out;
  284. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  285. if (error)
  286. goto out;
  287. /* Update the xttr entry. */
  288. i.value = value;
  289. i.value_len = len;
  290. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  291. if (error)
  292. goto out;
  293. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  294. (void *)ext4_raw_inode(&is.iloc));
  295. EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
  296. le32_to_cpu(is.s.here->e_value_size);
  297. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  298. get_bh(is.iloc.bh);
  299. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  300. out:
  301. kfree(value);
  302. brelse(is.iloc.bh);
  303. return error;
  304. }
  305. int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
  306. unsigned int len)
  307. {
  308. int ret, size;
  309. struct ext4_inode_info *ei = EXT4_I(inode);
  310. if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
  311. return -ENOSPC;
  312. size = ext4_get_max_inline_size(inode);
  313. if (size < len)
  314. return -ENOSPC;
  315. down_write(&EXT4_I(inode)->xattr_sem);
  316. if (ei->i_inline_off)
  317. ret = ext4_update_inline_data(handle, inode, len);
  318. else
  319. ret = ext4_create_inline_data(handle, inode, len);
  320. up_write(&EXT4_I(inode)->xattr_sem);
  321. return ret;
  322. }
  323. static int ext4_destroy_inline_data_nolock(handle_t *handle,
  324. struct inode *inode)
  325. {
  326. struct ext4_inode_info *ei = EXT4_I(inode);
  327. struct ext4_xattr_ibody_find is = {
  328. .s = { .not_found = 0, },
  329. };
  330. struct ext4_xattr_info i = {
  331. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  332. .name = EXT4_XATTR_SYSTEM_DATA,
  333. .value = NULL,
  334. .value_len = 0,
  335. };
  336. int error;
  337. if (!ei->i_inline_off)
  338. return 0;
  339. error = ext4_get_inode_loc(inode, &is.iloc);
  340. if (error)
  341. return error;
  342. error = ext4_xattr_ibody_find(inode, &i, &is);
  343. if (error)
  344. goto out;
  345. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  346. if (error)
  347. goto out;
  348. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  349. if (error)
  350. goto out;
  351. memset((void *)ext4_raw_inode(&is.iloc)->i_block,
  352. 0, EXT4_MIN_INLINE_DATA_SIZE);
  353. if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
  354. EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  355. if (S_ISDIR(inode->i_mode) ||
  356. S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
  357. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  358. ext4_ext_tree_init(handle, inode);
  359. }
  360. }
  361. ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
  362. get_bh(is.iloc.bh);
  363. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  364. EXT4_I(inode)->i_inline_off = 0;
  365. EXT4_I(inode)->i_inline_size = 0;
  366. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  367. out:
  368. brelse(is.iloc.bh);
  369. if (error == -ENODATA)
  370. error = 0;
  371. return error;
  372. }
  373. static int ext4_read_inline_page(struct inode *inode, struct page *page)
  374. {
  375. void *kaddr;
  376. int ret = 0;
  377. size_t len;
  378. struct ext4_iloc iloc;
  379. BUG_ON(!PageLocked(page));
  380. BUG_ON(!ext4_has_inline_data(inode));
  381. BUG_ON(page->index);
  382. if (!EXT4_I(inode)->i_inline_off) {
  383. ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
  384. inode->i_ino);
  385. goto out;
  386. }
  387. ret = ext4_get_inode_loc(inode, &iloc);
  388. if (ret)
  389. goto out;
  390. len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
  391. kaddr = kmap_atomic(page);
  392. ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
  393. flush_dcache_page(page);
  394. kunmap_atomic(kaddr);
  395. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  396. SetPageUptodate(page);
  397. brelse(iloc.bh);
  398. out:
  399. return ret;
  400. }
  401. int ext4_readpage_inline(struct inode *inode, struct page *page)
  402. {
  403. int ret = 0;
  404. down_read(&EXT4_I(inode)->xattr_sem);
  405. if (!ext4_has_inline_data(inode)) {
  406. up_read(&EXT4_I(inode)->xattr_sem);
  407. return -EAGAIN;
  408. }
  409. /*
  410. * Current inline data can only exist in the 1st page,
  411. * So for all the other pages, just set them uptodate.
  412. */
  413. if (!page->index)
  414. ret = ext4_read_inline_page(inode, page);
  415. else if (!PageUptodate(page)) {
  416. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  417. SetPageUptodate(page);
  418. }
  419. up_read(&EXT4_I(inode)->xattr_sem);
  420. unlock_page(page);
  421. return ret >= 0 ? 0 : ret;
  422. }
  423. static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
  424. struct inode *inode,
  425. unsigned flags)
  426. {
  427. int ret, needed_blocks;
  428. handle_t *handle = NULL;
  429. int retries = 0, sem_held = 0;
  430. struct page *page = NULL;
  431. unsigned from, to;
  432. struct ext4_iloc iloc;
  433. if (!ext4_has_inline_data(inode)) {
  434. /*
  435. * clear the flag so that no new write
  436. * will trap here again.
  437. */
  438. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  439. return 0;
  440. }
  441. needed_blocks = ext4_writepage_trans_blocks(inode);
  442. ret = ext4_get_inode_loc(inode, &iloc);
  443. if (ret)
  444. return ret;
  445. retry:
  446. handle = ext4_journal_start(inode, needed_blocks);
  447. if (IS_ERR(handle)) {
  448. ret = PTR_ERR(handle);
  449. handle = NULL;
  450. goto out;
  451. }
  452. /* We cannot recurse into the filesystem as the transaction is already
  453. * started */
  454. flags |= AOP_FLAG_NOFS;
  455. page = grab_cache_page_write_begin(mapping, 0, flags);
  456. if (!page) {
  457. ret = -ENOMEM;
  458. goto out;
  459. }
  460. down_write(&EXT4_I(inode)->xattr_sem);
  461. sem_held = 1;
  462. /* If some one has already done this for us, just exit. */
  463. if (!ext4_has_inline_data(inode)) {
  464. ret = 0;
  465. goto out;
  466. }
  467. from = 0;
  468. to = ext4_get_inline_size(inode);
  469. if (!PageUptodate(page)) {
  470. ret = ext4_read_inline_page(inode, page);
  471. if (ret < 0)
  472. goto out;
  473. }
  474. ret = ext4_destroy_inline_data_nolock(handle, inode);
  475. if (ret)
  476. goto out;
  477. if (ext4_should_dioread_nolock(inode))
  478. ret = __block_write_begin(page, from, to, ext4_get_block_write);
  479. else
  480. ret = __block_write_begin(page, from, to, ext4_get_block);
  481. if (!ret && ext4_should_journal_data(inode)) {
  482. ret = ext4_walk_page_buffers(handle, page_buffers(page),
  483. from, to, NULL,
  484. do_journal_get_write_access);
  485. }
  486. if (ret) {
  487. unlock_page(page);
  488. page_cache_release(page);
  489. ext4_orphan_add(handle, inode);
  490. up_write(&EXT4_I(inode)->xattr_sem);
  491. sem_held = 0;
  492. ext4_journal_stop(handle);
  493. handle = NULL;
  494. ext4_truncate_failed_write(inode);
  495. /*
  496. * If truncate failed early the inode might
  497. * still be on the orphan list; we need to
  498. * make sure the inode is removed from the
  499. * orphan list in that case.
  500. */
  501. if (inode->i_nlink)
  502. ext4_orphan_del(NULL, inode);
  503. }
  504. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  505. goto retry;
  506. block_commit_write(page, from, to);
  507. out:
  508. if (page) {
  509. unlock_page(page);
  510. page_cache_release(page);
  511. }
  512. if (sem_held)
  513. up_write(&EXT4_I(inode)->xattr_sem);
  514. if (handle)
  515. ext4_journal_stop(handle);
  516. brelse(iloc.bh);
  517. return ret;
  518. }
  519. /*
  520. * Try to write data in the inode.
  521. * If the inode has inline data, check whether the new write can be
  522. * in the inode also. If not, create the page the handle, move the data
  523. * to the page make it update and let the later codes create extent for it.
  524. */
  525. int ext4_try_to_write_inline_data(struct address_space *mapping,
  526. struct inode *inode,
  527. loff_t pos, unsigned len,
  528. unsigned flags,
  529. struct page **pagep)
  530. {
  531. int ret;
  532. handle_t *handle;
  533. struct page *page;
  534. struct ext4_iloc iloc;
  535. if (pos + len > ext4_get_max_inline_size(inode))
  536. goto convert;
  537. ret = ext4_get_inode_loc(inode, &iloc);
  538. if (ret)
  539. return ret;
  540. /*
  541. * The possible write could happen in the inode,
  542. * so try to reserve the space in inode first.
  543. */
  544. handle = ext4_journal_start(inode, 1);
  545. if (IS_ERR(handle)) {
  546. ret = PTR_ERR(handle);
  547. handle = NULL;
  548. goto out;
  549. }
  550. ret = ext4_prepare_inline_data(handle, inode, pos + len);
  551. if (ret && ret != -ENOSPC)
  552. goto out;
  553. /* We don't have space in inline inode, so convert it to extent. */
  554. if (ret == -ENOSPC) {
  555. ext4_journal_stop(handle);
  556. brelse(iloc.bh);
  557. goto convert;
  558. }
  559. flags |= AOP_FLAG_NOFS;
  560. page = grab_cache_page_write_begin(mapping, 0, flags);
  561. if (!page) {
  562. ret = -ENOMEM;
  563. goto out;
  564. }
  565. *pagep = page;
  566. down_read(&EXT4_I(inode)->xattr_sem);
  567. if (!ext4_has_inline_data(inode)) {
  568. ret = 0;
  569. unlock_page(page);
  570. page_cache_release(page);
  571. goto out_up_read;
  572. }
  573. if (!PageUptodate(page)) {
  574. ret = ext4_read_inline_page(inode, page);
  575. if (ret < 0)
  576. goto out_up_read;
  577. }
  578. ret = 1;
  579. handle = NULL;
  580. out_up_read:
  581. up_read(&EXT4_I(inode)->xattr_sem);
  582. out:
  583. if (handle)
  584. ext4_journal_stop(handle);
  585. brelse(iloc.bh);
  586. return ret;
  587. convert:
  588. return ext4_convert_inline_data_to_extent(mapping,
  589. inode, flags);
  590. }
  591. int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
  592. unsigned copied, struct page *page)
  593. {
  594. int ret;
  595. void *kaddr;
  596. struct ext4_iloc iloc;
  597. if (unlikely(copied < len)) {
  598. if (!PageUptodate(page)) {
  599. copied = 0;
  600. goto out;
  601. }
  602. }
  603. ret = ext4_get_inode_loc(inode, &iloc);
  604. if (ret) {
  605. ext4_std_error(inode->i_sb, ret);
  606. copied = 0;
  607. goto out;
  608. }
  609. down_write(&EXT4_I(inode)->xattr_sem);
  610. BUG_ON(!ext4_has_inline_data(inode));
  611. kaddr = kmap_atomic(page);
  612. ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
  613. kunmap_atomic(kaddr);
  614. SetPageUptodate(page);
  615. /* clear page dirty so that writepages wouldn't work for us. */
  616. ClearPageDirty(page);
  617. up_write(&EXT4_I(inode)->xattr_sem);
  618. brelse(iloc.bh);
  619. out:
  620. return copied;
  621. }
  622. struct buffer_head *
  623. ext4_journalled_write_inline_data(struct inode *inode,
  624. unsigned len,
  625. struct page *page)
  626. {
  627. int ret;
  628. void *kaddr;
  629. struct ext4_iloc iloc;
  630. ret = ext4_get_inode_loc(inode, &iloc);
  631. if (ret) {
  632. ext4_std_error(inode->i_sb, ret);
  633. return NULL;
  634. }
  635. down_write(&EXT4_I(inode)->xattr_sem);
  636. kaddr = kmap_atomic(page);
  637. ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
  638. kunmap_atomic(kaddr);
  639. up_write(&EXT4_I(inode)->xattr_sem);
  640. return iloc.bh;
  641. }
  642. /*
  643. * Try to make the page cache and handle ready for the inline data case.
  644. * We can call this function in 2 cases:
  645. * 1. The inode is created and the first write exceeds inline size. We can
  646. * clear the inode state safely.
  647. * 2. The inode has inline data, then we need to read the data, make it
  648. * update and dirty so that ext4_da_writepages can handle it. We don't
  649. * need to start the journal since the file's metatdata isn't changed now.
  650. */
  651. static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
  652. struct inode *inode,
  653. unsigned flags,
  654. void **fsdata)
  655. {
  656. int ret = 0, inline_size;
  657. struct page *page;
  658. page = grab_cache_page_write_begin(mapping, 0, flags);
  659. if (!page)
  660. return -ENOMEM;
  661. down_read(&EXT4_I(inode)->xattr_sem);
  662. if (!ext4_has_inline_data(inode)) {
  663. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  664. goto out;
  665. }
  666. inline_size = ext4_get_inline_size(inode);
  667. if (!PageUptodate(page)) {
  668. ret = ext4_read_inline_page(inode, page);
  669. if (ret < 0)
  670. goto out;
  671. }
  672. ret = __block_write_begin(page, 0, inline_size,
  673. ext4_da_get_block_prep);
  674. if (ret) {
  675. ext4_truncate_failed_write(inode);
  676. goto out;
  677. }
  678. SetPageDirty(page);
  679. SetPageUptodate(page);
  680. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  681. *fsdata = (void *)CONVERT_INLINE_DATA;
  682. out:
  683. up_read(&EXT4_I(inode)->xattr_sem);
  684. if (page) {
  685. unlock_page(page);
  686. page_cache_release(page);
  687. }
  688. return ret;
  689. }
  690. /*
  691. * Prepare the write for the inline data.
  692. * If the the data can be written into the inode, we just read
  693. * the page and make it uptodate, and start the journal.
  694. * Otherwise read the page, makes it dirty so that it can be
  695. * handle in writepages(the i_disksize update is left to the
  696. * normal ext4_da_write_end).
  697. */
  698. int ext4_da_write_inline_data_begin(struct address_space *mapping,
  699. struct inode *inode,
  700. loff_t pos, unsigned len,
  701. unsigned flags,
  702. struct page **pagep,
  703. void **fsdata)
  704. {
  705. int ret, inline_size;
  706. handle_t *handle;
  707. struct page *page;
  708. struct ext4_iloc iloc;
  709. ret = ext4_get_inode_loc(inode, &iloc);
  710. if (ret)
  711. return ret;
  712. handle = ext4_journal_start(inode, 1);
  713. if (IS_ERR(handle)) {
  714. ret = PTR_ERR(handle);
  715. handle = NULL;
  716. goto out;
  717. }
  718. inline_size = ext4_get_max_inline_size(inode);
  719. ret = -ENOSPC;
  720. if (inline_size >= pos + len) {
  721. ret = ext4_prepare_inline_data(handle, inode, pos + len);
  722. if (ret && ret != -ENOSPC)
  723. goto out;
  724. }
  725. if (ret == -ENOSPC) {
  726. ret = ext4_da_convert_inline_data_to_extent(mapping,
  727. inode,
  728. flags,
  729. fsdata);
  730. goto out;
  731. }
  732. /*
  733. * We cannot recurse into the filesystem as the transaction
  734. * is already started.
  735. */
  736. flags |= AOP_FLAG_NOFS;
  737. page = grab_cache_page_write_begin(mapping, 0, flags);
  738. if (!page) {
  739. ret = -ENOMEM;
  740. goto out;
  741. }
  742. down_read(&EXT4_I(inode)->xattr_sem);
  743. if (!ext4_has_inline_data(inode)) {
  744. ret = 0;
  745. goto out_release_page;
  746. }
  747. if (!PageUptodate(page)) {
  748. ret = ext4_read_inline_page(inode, page);
  749. if (ret < 0)
  750. goto out_release_page;
  751. }
  752. up_read(&EXT4_I(inode)->xattr_sem);
  753. *pagep = page;
  754. handle = NULL;
  755. brelse(iloc.bh);
  756. return 1;
  757. out_release_page:
  758. up_read(&EXT4_I(inode)->xattr_sem);
  759. unlock_page(page);
  760. page_cache_release(page);
  761. out:
  762. if (handle)
  763. ext4_journal_stop(handle);
  764. brelse(iloc.bh);
  765. return ret;
  766. }
  767. int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
  768. unsigned len, unsigned copied,
  769. struct page *page)
  770. {
  771. int i_size_changed = 0;
  772. copied = ext4_write_inline_data_end(inode, pos, len, copied, page);
  773. /*
  774. * No need to use i_size_read() here, the i_size
  775. * cannot change under us because we hold i_mutex.
  776. *
  777. * But it's important to update i_size while still holding page lock:
  778. * page writeout could otherwise come in and zero beyond i_size.
  779. */
  780. if (pos+copied > inode->i_size) {
  781. i_size_write(inode, pos+copied);
  782. i_size_changed = 1;
  783. }
  784. unlock_page(page);
  785. page_cache_release(page);
  786. /*
  787. * Don't mark the inode dirty under page lock. First, it unnecessarily
  788. * makes the holding time of page lock longer. Second, it forces lock
  789. * ordering of page lock and transaction start for journaling
  790. * filesystems.
  791. */
  792. if (i_size_changed)
  793. mark_inode_dirty(inode);
  794. return copied;
  795. }
  796. int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
  797. {
  798. int ret;
  799. down_write(&EXT4_I(inode)->xattr_sem);
  800. ret = ext4_destroy_inline_data_nolock(handle, inode);
  801. up_write(&EXT4_I(inode)->xattr_sem);
  802. return ret;
  803. }