inline.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577
  1. /*
  2. * Copyright (c) 2012 Taobao.
  3. * Written by Tao Ma <boyu.mt@taobao.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2.1 of the GNU Lesser General Public License
  7. * as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include "ext4_jbd2.h"
  15. #include "ext4.h"
  16. #include "xattr.h"
  17. #include "truncate.h"
  18. #define EXT4_XATTR_SYSTEM_DATA "data"
  19. #define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
  20. #define EXT4_INLINE_DOTDOT_SIZE 4
  21. int ext4_get_inline_size(struct inode *inode)
  22. {
  23. if (EXT4_I(inode)->i_inline_off)
  24. return EXT4_I(inode)->i_inline_size;
  25. return 0;
  26. }
  27. static int get_max_inline_xattr_value_size(struct inode *inode,
  28. struct ext4_iloc *iloc)
  29. {
  30. struct ext4_xattr_ibody_header *header;
  31. struct ext4_xattr_entry *entry;
  32. struct ext4_inode *raw_inode;
  33. int free, min_offs;
  34. min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
  35. EXT4_GOOD_OLD_INODE_SIZE -
  36. EXT4_I(inode)->i_extra_isize -
  37. sizeof(struct ext4_xattr_ibody_header);
  38. /*
  39. * We need to subtract another sizeof(__u32) since an in-inode xattr
  40. * needs an empty 4 bytes to indicate the gap between the xattr entry
  41. * and the name/value pair.
  42. */
  43. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  44. return EXT4_XATTR_SIZE(min_offs -
  45. EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA)) -
  46. EXT4_XATTR_ROUND - sizeof(__u32));
  47. raw_inode = ext4_raw_inode(iloc);
  48. header = IHDR(inode, raw_inode);
  49. entry = IFIRST(header);
  50. /* Compute min_offs. */
  51. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  52. if (!entry->e_value_block && entry->e_value_size) {
  53. size_t offs = le16_to_cpu(entry->e_value_offs);
  54. if (offs < min_offs)
  55. min_offs = offs;
  56. }
  57. }
  58. free = min_offs -
  59. ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
  60. if (EXT4_I(inode)->i_inline_off) {
  61. entry = (struct ext4_xattr_entry *)
  62. ((void *)raw_inode + EXT4_I(inode)->i_inline_off);
  63. free += le32_to_cpu(entry->e_value_size);
  64. goto out;
  65. }
  66. free -= EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA));
  67. if (free > EXT4_XATTR_ROUND)
  68. free = EXT4_XATTR_SIZE(free - EXT4_XATTR_ROUND);
  69. else
  70. free = 0;
  71. out:
  72. return free;
  73. }
  74. /*
  75. * Get the maximum size we now can store in an inode.
  76. * If we can't find the space for a xattr entry, don't use the space
  77. * of the extents since we have no space to indicate the inline data.
  78. */
  79. int ext4_get_max_inline_size(struct inode *inode)
  80. {
  81. int error, max_inline_size;
  82. struct ext4_iloc iloc;
  83. if (EXT4_I(inode)->i_extra_isize == 0)
  84. return 0;
  85. error = ext4_get_inode_loc(inode, &iloc);
  86. if (error) {
  87. ext4_error_inode(inode, __func__, __LINE__, 0,
  88. "can't get inode location %lu",
  89. inode->i_ino);
  90. return 0;
  91. }
  92. down_read(&EXT4_I(inode)->xattr_sem);
  93. max_inline_size = get_max_inline_xattr_value_size(inode, &iloc);
  94. up_read(&EXT4_I(inode)->xattr_sem);
  95. brelse(iloc.bh);
  96. if (!max_inline_size)
  97. return 0;
  98. return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
  99. }
  100. int ext4_has_inline_data(struct inode *inode)
  101. {
  102. return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
  103. EXT4_I(inode)->i_inline_off;
  104. }
  105. /*
  106. * this function does not take xattr_sem, which is OK because it is
  107. * currently only used in a code path coming form ext4_iget, before
  108. * the new inode has been unlocked
  109. */
  110. int ext4_find_inline_data_nolock(struct inode *inode)
  111. {
  112. struct ext4_xattr_ibody_find is = {
  113. .s = { .not_found = -ENODATA, },
  114. };
  115. struct ext4_xattr_info i = {
  116. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  117. .name = EXT4_XATTR_SYSTEM_DATA,
  118. };
  119. int error;
  120. if (EXT4_I(inode)->i_extra_isize == 0)
  121. return 0;
  122. error = ext4_get_inode_loc(inode, &is.iloc);
  123. if (error)
  124. return error;
  125. error = ext4_xattr_ibody_find(inode, &i, &is);
  126. if (error)
  127. goto out;
  128. if (!is.s.not_found) {
  129. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  130. (void *)ext4_raw_inode(&is.iloc));
  131. EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
  132. le32_to_cpu(is.s.here->e_value_size);
  133. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  134. }
  135. out:
  136. brelse(is.iloc.bh);
  137. return error;
  138. }
  139. static int ext4_read_inline_data(struct inode *inode, void *buffer,
  140. unsigned int len,
  141. struct ext4_iloc *iloc)
  142. {
  143. struct ext4_xattr_entry *entry;
  144. struct ext4_xattr_ibody_header *header;
  145. int cp_len = 0;
  146. struct ext4_inode *raw_inode;
  147. if (!len)
  148. return 0;
  149. BUG_ON(len > EXT4_I(inode)->i_inline_size);
  150. cp_len = len < EXT4_MIN_INLINE_DATA_SIZE ?
  151. len : EXT4_MIN_INLINE_DATA_SIZE;
  152. raw_inode = ext4_raw_inode(iloc);
  153. memcpy(buffer, (void *)(raw_inode->i_block), cp_len);
  154. len -= cp_len;
  155. buffer += cp_len;
  156. if (!len)
  157. goto out;
  158. header = IHDR(inode, raw_inode);
  159. entry = (struct ext4_xattr_entry *)((void *)raw_inode +
  160. EXT4_I(inode)->i_inline_off);
  161. len = min_t(unsigned int, len,
  162. (unsigned int)le32_to_cpu(entry->e_value_size));
  163. memcpy(buffer,
  164. (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), len);
  165. cp_len += len;
  166. out:
  167. return cp_len;
  168. }
  169. /*
  170. * write the buffer to the inline inode.
  171. * If 'create' is set, we don't need to do the extra copy in the xattr
  172. * value since it is already handled by ext4_xattr_ibody_set. That saves
  173. * us one memcpy.
  174. */
  175. void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
  176. void *buffer, loff_t pos, unsigned int len)
  177. {
  178. struct ext4_xattr_entry *entry;
  179. struct ext4_xattr_ibody_header *header;
  180. struct ext4_inode *raw_inode;
  181. int cp_len = 0;
  182. BUG_ON(!EXT4_I(inode)->i_inline_off);
  183. BUG_ON(pos + len > EXT4_I(inode)->i_inline_size);
  184. raw_inode = ext4_raw_inode(iloc);
  185. buffer += pos;
  186. if (pos < EXT4_MIN_INLINE_DATA_SIZE) {
  187. cp_len = pos + len > EXT4_MIN_INLINE_DATA_SIZE ?
  188. EXT4_MIN_INLINE_DATA_SIZE - pos : len;
  189. memcpy((void *)raw_inode->i_block + pos, buffer, cp_len);
  190. len -= cp_len;
  191. buffer += cp_len;
  192. pos += cp_len;
  193. }
  194. if (!len)
  195. return;
  196. pos -= EXT4_MIN_INLINE_DATA_SIZE;
  197. header = IHDR(inode, raw_inode);
  198. entry = (struct ext4_xattr_entry *)((void *)raw_inode +
  199. EXT4_I(inode)->i_inline_off);
  200. memcpy((void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs) + pos,
  201. buffer, len);
  202. }
  203. static int ext4_create_inline_data(handle_t *handle,
  204. struct inode *inode, unsigned len)
  205. {
  206. int error;
  207. void *value = NULL;
  208. struct ext4_xattr_ibody_find is = {
  209. .s = { .not_found = -ENODATA, },
  210. };
  211. struct ext4_xattr_info i = {
  212. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  213. .name = EXT4_XATTR_SYSTEM_DATA,
  214. };
  215. error = ext4_get_inode_loc(inode, &is.iloc);
  216. if (error)
  217. return error;
  218. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  219. if (error)
  220. goto out;
  221. if (len > EXT4_MIN_INLINE_DATA_SIZE) {
  222. value = (void *)empty_zero_page;
  223. len -= EXT4_MIN_INLINE_DATA_SIZE;
  224. } else {
  225. value = "";
  226. len = 0;
  227. }
  228. /* Insert the the xttr entry. */
  229. i.value = value;
  230. i.value_len = len;
  231. error = ext4_xattr_ibody_find(inode, &i, &is);
  232. if (error)
  233. goto out;
  234. BUG_ON(!is.s.not_found);
  235. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  236. if (error) {
  237. if (error == -ENOSPC)
  238. ext4_clear_inode_state(inode,
  239. EXT4_STATE_MAY_INLINE_DATA);
  240. goto out;
  241. }
  242. memset((void *)ext4_raw_inode(&is.iloc)->i_block,
  243. 0, EXT4_MIN_INLINE_DATA_SIZE);
  244. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  245. (void *)ext4_raw_inode(&is.iloc));
  246. EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
  247. ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
  248. ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
  249. get_bh(is.iloc.bh);
  250. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  251. out:
  252. brelse(is.iloc.bh);
  253. return error;
  254. }
  255. static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
  256. unsigned int len)
  257. {
  258. int error;
  259. void *value = NULL;
  260. struct ext4_xattr_ibody_find is = {
  261. .s = { .not_found = -ENODATA, },
  262. };
  263. struct ext4_xattr_info i = {
  264. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  265. .name = EXT4_XATTR_SYSTEM_DATA,
  266. };
  267. /* If the old space is ok, write the data directly. */
  268. if (len <= EXT4_I(inode)->i_inline_size)
  269. return 0;
  270. error = ext4_get_inode_loc(inode, &is.iloc);
  271. if (error)
  272. return error;
  273. error = ext4_xattr_ibody_find(inode, &i, &is);
  274. if (error)
  275. goto out;
  276. BUG_ON(is.s.not_found);
  277. len -= EXT4_MIN_INLINE_DATA_SIZE;
  278. value = kzalloc(len, GFP_NOFS);
  279. if (!value)
  280. goto out;
  281. error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
  282. value, len);
  283. if (error == -ENODATA)
  284. goto out;
  285. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  286. if (error)
  287. goto out;
  288. /* Update the xttr entry. */
  289. i.value = value;
  290. i.value_len = len;
  291. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  292. if (error)
  293. goto out;
  294. EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
  295. (void *)ext4_raw_inode(&is.iloc));
  296. EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
  297. le32_to_cpu(is.s.here->e_value_size);
  298. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  299. get_bh(is.iloc.bh);
  300. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  301. out:
  302. kfree(value);
  303. brelse(is.iloc.bh);
  304. return error;
  305. }
  306. int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
  307. unsigned int len)
  308. {
  309. int ret, size;
  310. struct ext4_inode_info *ei = EXT4_I(inode);
  311. if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
  312. return -ENOSPC;
  313. size = ext4_get_max_inline_size(inode);
  314. if (size < len)
  315. return -ENOSPC;
  316. down_write(&EXT4_I(inode)->xattr_sem);
  317. if (ei->i_inline_off)
  318. ret = ext4_update_inline_data(handle, inode, len);
  319. else
  320. ret = ext4_create_inline_data(handle, inode, len);
  321. up_write(&EXT4_I(inode)->xattr_sem);
  322. return ret;
  323. }
  324. static int ext4_destroy_inline_data_nolock(handle_t *handle,
  325. struct inode *inode)
  326. {
  327. struct ext4_inode_info *ei = EXT4_I(inode);
  328. struct ext4_xattr_ibody_find is = {
  329. .s = { .not_found = 0, },
  330. };
  331. struct ext4_xattr_info i = {
  332. .name_index = EXT4_XATTR_INDEX_SYSTEM,
  333. .name = EXT4_XATTR_SYSTEM_DATA,
  334. .value = NULL,
  335. .value_len = 0,
  336. };
  337. int error;
  338. if (!ei->i_inline_off)
  339. return 0;
  340. error = ext4_get_inode_loc(inode, &is.iloc);
  341. if (error)
  342. return error;
  343. error = ext4_xattr_ibody_find(inode, &i, &is);
  344. if (error)
  345. goto out;
  346. error = ext4_journal_get_write_access(handle, is.iloc.bh);
  347. if (error)
  348. goto out;
  349. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  350. if (error)
  351. goto out;
  352. memset((void *)ext4_raw_inode(&is.iloc)->i_block,
  353. 0, EXT4_MIN_INLINE_DATA_SIZE);
  354. if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
  355. EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  356. if (S_ISDIR(inode->i_mode) ||
  357. S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
  358. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  359. ext4_ext_tree_init(handle, inode);
  360. }
  361. }
  362. ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
  363. get_bh(is.iloc.bh);
  364. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  365. EXT4_I(inode)->i_inline_off = 0;
  366. EXT4_I(inode)->i_inline_size = 0;
  367. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  368. out:
  369. brelse(is.iloc.bh);
  370. if (error == -ENODATA)
  371. error = 0;
  372. return error;
  373. }
  374. static int ext4_read_inline_page(struct inode *inode, struct page *page)
  375. {
  376. void *kaddr;
  377. int ret = 0;
  378. size_t len;
  379. struct ext4_iloc iloc;
  380. BUG_ON(!PageLocked(page));
  381. BUG_ON(!ext4_has_inline_data(inode));
  382. BUG_ON(page->index);
  383. if (!EXT4_I(inode)->i_inline_off) {
  384. ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
  385. inode->i_ino);
  386. goto out;
  387. }
  388. ret = ext4_get_inode_loc(inode, &iloc);
  389. if (ret)
  390. goto out;
  391. len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
  392. kaddr = kmap_atomic(page);
  393. ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
  394. flush_dcache_page(page);
  395. kunmap_atomic(kaddr);
  396. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  397. SetPageUptodate(page);
  398. brelse(iloc.bh);
  399. out:
  400. return ret;
  401. }
  402. int ext4_readpage_inline(struct inode *inode, struct page *page)
  403. {
  404. int ret = 0;
  405. down_read(&EXT4_I(inode)->xattr_sem);
  406. if (!ext4_has_inline_data(inode)) {
  407. up_read(&EXT4_I(inode)->xattr_sem);
  408. return -EAGAIN;
  409. }
  410. /*
  411. * Current inline data can only exist in the 1st page,
  412. * So for all the other pages, just set them uptodate.
  413. */
  414. if (!page->index)
  415. ret = ext4_read_inline_page(inode, page);
  416. else if (!PageUptodate(page)) {
  417. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  418. SetPageUptodate(page);
  419. }
  420. up_read(&EXT4_I(inode)->xattr_sem);
  421. unlock_page(page);
  422. return ret >= 0 ? 0 : ret;
  423. }
  424. static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
  425. struct inode *inode,
  426. unsigned flags)
  427. {
  428. int ret, needed_blocks;
  429. handle_t *handle = NULL;
  430. int retries = 0, sem_held = 0;
  431. struct page *page = NULL;
  432. unsigned from, to;
  433. struct ext4_iloc iloc;
  434. if (!ext4_has_inline_data(inode)) {
  435. /*
  436. * clear the flag so that no new write
  437. * will trap here again.
  438. */
  439. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  440. return 0;
  441. }
  442. needed_blocks = ext4_writepage_trans_blocks(inode);
  443. ret = ext4_get_inode_loc(inode, &iloc);
  444. if (ret)
  445. return ret;
  446. retry:
  447. handle = ext4_journal_start(inode, needed_blocks);
  448. if (IS_ERR(handle)) {
  449. ret = PTR_ERR(handle);
  450. handle = NULL;
  451. goto out;
  452. }
  453. /* We cannot recurse into the filesystem as the transaction is already
  454. * started */
  455. flags |= AOP_FLAG_NOFS;
  456. page = grab_cache_page_write_begin(mapping, 0, flags);
  457. if (!page) {
  458. ret = -ENOMEM;
  459. goto out;
  460. }
  461. down_write(&EXT4_I(inode)->xattr_sem);
  462. sem_held = 1;
  463. /* If some one has already done this for us, just exit. */
  464. if (!ext4_has_inline_data(inode)) {
  465. ret = 0;
  466. goto out;
  467. }
  468. from = 0;
  469. to = ext4_get_inline_size(inode);
  470. if (!PageUptodate(page)) {
  471. ret = ext4_read_inline_page(inode, page);
  472. if (ret < 0)
  473. goto out;
  474. }
  475. ret = ext4_destroy_inline_data_nolock(handle, inode);
  476. if (ret)
  477. goto out;
  478. if (ext4_should_dioread_nolock(inode))
  479. ret = __block_write_begin(page, from, to, ext4_get_block_write);
  480. else
  481. ret = __block_write_begin(page, from, to, ext4_get_block);
  482. if (!ret && ext4_should_journal_data(inode)) {
  483. ret = ext4_walk_page_buffers(handle, page_buffers(page),
  484. from, to, NULL,
  485. do_journal_get_write_access);
  486. }
  487. if (ret) {
  488. unlock_page(page);
  489. page_cache_release(page);
  490. ext4_orphan_add(handle, inode);
  491. up_write(&EXT4_I(inode)->xattr_sem);
  492. sem_held = 0;
  493. ext4_journal_stop(handle);
  494. handle = NULL;
  495. ext4_truncate_failed_write(inode);
  496. /*
  497. * If truncate failed early the inode might
  498. * still be on the orphan list; we need to
  499. * make sure the inode is removed from the
  500. * orphan list in that case.
  501. */
  502. if (inode->i_nlink)
  503. ext4_orphan_del(NULL, inode);
  504. }
  505. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  506. goto retry;
  507. block_commit_write(page, from, to);
  508. out:
  509. if (page) {
  510. unlock_page(page);
  511. page_cache_release(page);
  512. }
  513. if (sem_held)
  514. up_write(&EXT4_I(inode)->xattr_sem);
  515. if (handle)
  516. ext4_journal_stop(handle);
  517. brelse(iloc.bh);
  518. return ret;
  519. }
  520. /*
  521. * Try to write data in the inode.
  522. * If the inode has inline data, check whether the new write can be
  523. * in the inode also. If not, create the page the handle, move the data
  524. * to the page make it update and let the later codes create extent for it.
  525. */
  526. int ext4_try_to_write_inline_data(struct address_space *mapping,
  527. struct inode *inode,
  528. loff_t pos, unsigned len,
  529. unsigned flags,
  530. struct page **pagep)
  531. {
  532. int ret;
  533. handle_t *handle;
  534. struct page *page;
  535. struct ext4_iloc iloc;
  536. if (pos + len > ext4_get_max_inline_size(inode))
  537. goto convert;
  538. ret = ext4_get_inode_loc(inode, &iloc);
  539. if (ret)
  540. return ret;
  541. /*
  542. * The possible write could happen in the inode,
  543. * so try to reserve the space in inode first.
  544. */
  545. handle = ext4_journal_start(inode, 1);
  546. if (IS_ERR(handle)) {
  547. ret = PTR_ERR(handle);
  548. handle = NULL;
  549. goto out;
  550. }
  551. ret = ext4_prepare_inline_data(handle, inode, pos + len);
  552. if (ret && ret != -ENOSPC)
  553. goto out;
  554. /* We don't have space in inline inode, so convert it to extent. */
  555. if (ret == -ENOSPC) {
  556. ext4_journal_stop(handle);
  557. brelse(iloc.bh);
  558. goto convert;
  559. }
  560. flags |= AOP_FLAG_NOFS;
  561. page = grab_cache_page_write_begin(mapping, 0, flags);
  562. if (!page) {
  563. ret = -ENOMEM;
  564. goto out;
  565. }
  566. *pagep = page;
  567. down_read(&EXT4_I(inode)->xattr_sem);
  568. if (!ext4_has_inline_data(inode)) {
  569. ret = 0;
  570. unlock_page(page);
  571. page_cache_release(page);
  572. goto out_up_read;
  573. }
  574. if (!PageUptodate(page)) {
  575. ret = ext4_read_inline_page(inode, page);
  576. if (ret < 0)
  577. goto out_up_read;
  578. }
  579. ret = 1;
  580. handle = NULL;
  581. out_up_read:
  582. up_read(&EXT4_I(inode)->xattr_sem);
  583. out:
  584. if (handle)
  585. ext4_journal_stop(handle);
  586. brelse(iloc.bh);
  587. return ret;
  588. convert:
  589. return ext4_convert_inline_data_to_extent(mapping,
  590. inode, flags);
  591. }
  592. int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
  593. unsigned copied, struct page *page)
  594. {
  595. int ret;
  596. void *kaddr;
  597. struct ext4_iloc iloc;
  598. if (unlikely(copied < len)) {
  599. if (!PageUptodate(page)) {
  600. copied = 0;
  601. goto out;
  602. }
  603. }
  604. ret = ext4_get_inode_loc(inode, &iloc);
  605. if (ret) {
  606. ext4_std_error(inode->i_sb, ret);
  607. copied = 0;
  608. goto out;
  609. }
  610. down_write(&EXT4_I(inode)->xattr_sem);
  611. BUG_ON(!ext4_has_inline_data(inode));
  612. kaddr = kmap_atomic(page);
  613. ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
  614. kunmap_atomic(kaddr);
  615. SetPageUptodate(page);
  616. /* clear page dirty so that writepages wouldn't work for us. */
  617. ClearPageDirty(page);
  618. up_write(&EXT4_I(inode)->xattr_sem);
  619. brelse(iloc.bh);
  620. out:
  621. return copied;
  622. }
  623. struct buffer_head *
  624. ext4_journalled_write_inline_data(struct inode *inode,
  625. unsigned len,
  626. struct page *page)
  627. {
  628. int ret;
  629. void *kaddr;
  630. struct ext4_iloc iloc;
  631. ret = ext4_get_inode_loc(inode, &iloc);
  632. if (ret) {
  633. ext4_std_error(inode->i_sb, ret);
  634. return NULL;
  635. }
  636. down_write(&EXT4_I(inode)->xattr_sem);
  637. kaddr = kmap_atomic(page);
  638. ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
  639. kunmap_atomic(kaddr);
  640. up_write(&EXT4_I(inode)->xattr_sem);
  641. return iloc.bh;
  642. }
  643. /*
  644. * Try to make the page cache and handle ready for the inline data case.
  645. * We can call this function in 2 cases:
  646. * 1. The inode is created and the first write exceeds inline size. We can
  647. * clear the inode state safely.
  648. * 2. The inode has inline data, then we need to read the data, make it
  649. * update and dirty so that ext4_da_writepages can handle it. We don't
  650. * need to start the journal since the file's metatdata isn't changed now.
  651. */
  652. static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
  653. struct inode *inode,
  654. unsigned flags,
  655. void **fsdata)
  656. {
  657. int ret = 0, inline_size;
  658. struct page *page;
  659. page = grab_cache_page_write_begin(mapping, 0, flags);
  660. if (!page)
  661. return -ENOMEM;
  662. down_read(&EXT4_I(inode)->xattr_sem);
  663. if (!ext4_has_inline_data(inode)) {
  664. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  665. goto out;
  666. }
  667. inline_size = ext4_get_inline_size(inode);
  668. if (!PageUptodate(page)) {
  669. ret = ext4_read_inline_page(inode, page);
  670. if (ret < 0)
  671. goto out;
  672. }
  673. ret = __block_write_begin(page, 0, inline_size,
  674. ext4_da_get_block_prep);
  675. if (ret) {
  676. ext4_truncate_failed_write(inode);
  677. goto out;
  678. }
  679. SetPageDirty(page);
  680. SetPageUptodate(page);
  681. ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  682. *fsdata = (void *)CONVERT_INLINE_DATA;
  683. out:
  684. up_read(&EXT4_I(inode)->xattr_sem);
  685. if (page) {
  686. unlock_page(page);
  687. page_cache_release(page);
  688. }
  689. return ret;
  690. }
  691. /*
  692. * Prepare the write for the inline data.
  693. * If the the data can be written into the inode, we just read
  694. * the page and make it uptodate, and start the journal.
  695. * Otherwise read the page, makes it dirty so that it can be
  696. * handle in writepages(the i_disksize update is left to the
  697. * normal ext4_da_write_end).
  698. */
  699. int ext4_da_write_inline_data_begin(struct address_space *mapping,
  700. struct inode *inode,
  701. loff_t pos, unsigned len,
  702. unsigned flags,
  703. struct page **pagep,
  704. void **fsdata)
  705. {
  706. int ret, inline_size;
  707. handle_t *handle;
  708. struct page *page;
  709. struct ext4_iloc iloc;
  710. ret = ext4_get_inode_loc(inode, &iloc);
  711. if (ret)
  712. return ret;
  713. handle = ext4_journal_start(inode, 1);
  714. if (IS_ERR(handle)) {
  715. ret = PTR_ERR(handle);
  716. handle = NULL;
  717. goto out;
  718. }
  719. inline_size = ext4_get_max_inline_size(inode);
  720. ret = -ENOSPC;
  721. if (inline_size >= pos + len) {
  722. ret = ext4_prepare_inline_data(handle, inode, pos + len);
  723. if (ret && ret != -ENOSPC)
  724. goto out;
  725. }
  726. if (ret == -ENOSPC) {
  727. ret = ext4_da_convert_inline_data_to_extent(mapping,
  728. inode,
  729. flags,
  730. fsdata);
  731. goto out;
  732. }
  733. /*
  734. * We cannot recurse into the filesystem as the transaction
  735. * is already started.
  736. */
  737. flags |= AOP_FLAG_NOFS;
  738. page = grab_cache_page_write_begin(mapping, 0, flags);
  739. if (!page) {
  740. ret = -ENOMEM;
  741. goto out;
  742. }
  743. down_read(&EXT4_I(inode)->xattr_sem);
  744. if (!ext4_has_inline_data(inode)) {
  745. ret = 0;
  746. goto out_release_page;
  747. }
  748. if (!PageUptodate(page)) {
  749. ret = ext4_read_inline_page(inode, page);
  750. if (ret < 0)
  751. goto out_release_page;
  752. }
  753. up_read(&EXT4_I(inode)->xattr_sem);
  754. *pagep = page;
  755. handle = NULL;
  756. brelse(iloc.bh);
  757. return 1;
  758. out_release_page:
  759. up_read(&EXT4_I(inode)->xattr_sem);
  760. unlock_page(page);
  761. page_cache_release(page);
  762. out:
  763. if (handle)
  764. ext4_journal_stop(handle);
  765. brelse(iloc.bh);
  766. return ret;
  767. }
  768. int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
  769. unsigned len, unsigned copied,
  770. struct page *page)
  771. {
  772. int i_size_changed = 0;
  773. copied = ext4_write_inline_data_end(inode, pos, len, copied, page);
  774. /*
  775. * No need to use i_size_read() here, the i_size
  776. * cannot change under us because we hold i_mutex.
  777. *
  778. * But it's important to update i_size while still holding page lock:
  779. * page writeout could otherwise come in and zero beyond i_size.
  780. */
  781. if (pos+copied > inode->i_size) {
  782. i_size_write(inode, pos+copied);
  783. i_size_changed = 1;
  784. }
  785. unlock_page(page);
  786. page_cache_release(page);
  787. /*
  788. * Don't mark the inode dirty under page lock. First, it unnecessarily
  789. * makes the holding time of page lock longer. Second, it forces lock
  790. * ordering of page lock and transaction start for journaling
  791. * filesystems.
  792. */
  793. if (i_size_changed)
  794. mark_inode_dirty(inode);
  795. return copied;
  796. }
  797. #ifdef INLINE_DIR_DEBUG
  798. void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
  799. void *inline_start, int inline_size)
  800. {
  801. int offset;
  802. unsigned short de_len;
  803. struct ext4_dir_entry_2 *de = inline_start;
  804. void *dlimit = inline_start + inline_size;
  805. trace_printk("inode %lu\n", dir->i_ino);
  806. offset = 0;
  807. while ((void *)de < dlimit) {
  808. de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
  809. trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
  810. offset, de_len, de->name_len, de->name,
  811. de->name_len, le32_to_cpu(de->inode));
  812. if (ext4_check_dir_entry(dir, NULL, de, bh,
  813. inline_start, inline_size, offset))
  814. BUG();
  815. offset += de_len;
  816. de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
  817. }
  818. }
  819. #else
  820. #define ext4_show_inline_dir(dir, bh, inline_start, inline_size)
  821. #endif
  822. /*
  823. * Add a new entry into a inline dir.
  824. * It will return -ENOSPC if no space is available, and -EIO
  825. * and -EEXIST if directory entry already exists.
  826. */
  827. static int ext4_add_dirent_to_inline(handle_t *handle,
  828. struct dentry *dentry,
  829. struct inode *inode,
  830. struct ext4_iloc *iloc,
  831. void *inline_start, int inline_size)
  832. {
  833. struct inode *dir = dentry->d_parent->d_inode;
  834. const char *name = dentry->d_name.name;
  835. int namelen = dentry->d_name.len;
  836. unsigned short reclen;
  837. int err;
  838. struct ext4_dir_entry_2 *de;
  839. reclen = EXT4_DIR_REC_LEN(namelen);
  840. err = ext4_find_dest_de(dir, inode, iloc->bh,
  841. inline_start, inline_size,
  842. name, namelen, &de);
  843. if (err)
  844. return err;
  845. err = ext4_journal_get_write_access(handle, iloc->bh);
  846. if (err)
  847. return err;
  848. ext4_insert_dentry(inode, de, inline_size, name, namelen);
  849. ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
  850. /*
  851. * XXX shouldn't update any times until successful
  852. * completion of syscall, but too many callers depend
  853. * on this.
  854. *
  855. * XXX similarly, too many callers depend on
  856. * ext4_new_inode() setting the times, but error
  857. * recovery deletes the inode, so the worst that can
  858. * happen is that the times are slightly out of date
  859. * and/or different from the directory change time.
  860. */
  861. dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
  862. ext4_update_dx_flag(dir);
  863. dir->i_version++;
  864. ext4_mark_inode_dirty(handle, dir);
  865. return 1;
  866. }
  867. static void *ext4_get_inline_xattr_pos(struct inode *inode,
  868. struct ext4_iloc *iloc)
  869. {
  870. struct ext4_xattr_entry *entry;
  871. struct ext4_xattr_ibody_header *header;
  872. BUG_ON(!EXT4_I(inode)->i_inline_off);
  873. header = IHDR(inode, ext4_raw_inode(iloc));
  874. entry = (struct ext4_xattr_entry *)((void *)ext4_raw_inode(iloc) +
  875. EXT4_I(inode)->i_inline_off);
  876. return (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs);
  877. }
  878. /* Set the final de to cover the whole block. */
  879. static void ext4_update_final_de(void *de_buf, int old_size, int new_size)
  880. {
  881. struct ext4_dir_entry_2 *de, *prev_de;
  882. void *limit;
  883. int de_len;
  884. de = (struct ext4_dir_entry_2 *)de_buf;
  885. if (old_size) {
  886. limit = de_buf + old_size;
  887. do {
  888. prev_de = de;
  889. de_len = ext4_rec_len_from_disk(de->rec_len, old_size);
  890. de_buf += de_len;
  891. de = (struct ext4_dir_entry_2 *)de_buf;
  892. } while (de_buf < limit);
  893. prev_de->rec_len = ext4_rec_len_to_disk(de_len + new_size -
  894. old_size, new_size);
  895. } else {
  896. /* this is just created, so create an empty entry. */
  897. de->inode = 0;
  898. de->rec_len = ext4_rec_len_to_disk(new_size, new_size);
  899. }
  900. }
  901. static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
  902. struct ext4_iloc *iloc)
  903. {
  904. int ret;
  905. int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
  906. int new_size = get_max_inline_xattr_value_size(dir, iloc);
  907. if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
  908. return -ENOSPC;
  909. ret = ext4_update_inline_data(handle, dir,
  910. new_size + EXT4_MIN_INLINE_DATA_SIZE);
  911. if (ret)
  912. return ret;
  913. ext4_update_final_de(ext4_get_inline_xattr_pos(dir, iloc), old_size,
  914. EXT4_I(dir)->i_inline_size -
  915. EXT4_MIN_INLINE_DATA_SIZE);
  916. dir->i_size = EXT4_I(dir)->i_disksize = EXT4_I(dir)->i_inline_size;
  917. return 0;
  918. }
  919. static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
  920. struct ext4_iloc *iloc,
  921. void *buf, int inline_size)
  922. {
  923. ext4_create_inline_data(handle, inode, inline_size);
  924. ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
  925. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  926. }
  927. static int ext4_finish_convert_inline_dir(handle_t *handle,
  928. struct inode *inode,
  929. struct buffer_head *dir_block,
  930. void *buf,
  931. int inline_size)
  932. {
  933. int err, csum_size = 0, header_size = 0;
  934. struct ext4_dir_entry_2 *de;
  935. struct ext4_dir_entry_tail *t;
  936. void *target = dir_block->b_data;
  937. /*
  938. * First create "." and ".." and then copy the dir information
  939. * back to the block.
  940. */
  941. de = (struct ext4_dir_entry_2 *)target;
  942. de = ext4_init_dot_dotdot(inode, de,
  943. inode->i_sb->s_blocksize, csum_size,
  944. le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
  945. header_size = (void *)de - target;
  946. memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
  947. inline_size - EXT4_INLINE_DOTDOT_SIZE);
  948. if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  949. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  950. csum_size = sizeof(struct ext4_dir_entry_tail);
  951. inode->i_size = inode->i_sb->s_blocksize;
  952. i_size_write(inode, inode->i_sb->s_blocksize);
  953. EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
  954. ext4_update_final_de(dir_block->b_data,
  955. inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
  956. inode->i_sb->s_blocksize - csum_size);
  957. if (csum_size) {
  958. t = EXT4_DIRENT_TAIL(dir_block->b_data,
  959. inode->i_sb->s_blocksize);
  960. initialize_dirent_tail(t, inode->i_sb->s_blocksize);
  961. }
  962. set_buffer_uptodate(dir_block);
  963. err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
  964. if (err)
  965. goto out;
  966. set_buffer_verified(dir_block);
  967. out:
  968. return err;
  969. }
  970. static int ext4_convert_inline_data_nolock(handle_t *handle,
  971. struct inode *inode,
  972. struct ext4_iloc *iloc)
  973. {
  974. int error;
  975. void *buf = NULL;
  976. struct buffer_head *data_bh = NULL;
  977. struct ext4_map_blocks map;
  978. int inline_size;
  979. inline_size = ext4_get_inline_size(inode);
  980. buf = kmalloc(inline_size, GFP_NOFS);
  981. if (!buf) {
  982. error = -ENOMEM;
  983. goto out;
  984. }
  985. error = ext4_read_inline_data(inode, buf, inline_size, iloc);
  986. if (error < 0)
  987. goto out;
  988. error = ext4_destroy_inline_data_nolock(handle, inode);
  989. if (error)
  990. goto out;
  991. map.m_lblk = 0;
  992. map.m_len = 1;
  993. map.m_flags = 0;
  994. error = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE);
  995. if (error < 0)
  996. goto out_restore;
  997. if (!(map.m_flags & EXT4_MAP_MAPPED)) {
  998. error = -EIO;
  999. goto out_restore;
  1000. }
  1001. data_bh = sb_getblk(inode->i_sb, map.m_pblk);
  1002. if (!data_bh) {
  1003. error = -EIO;
  1004. goto out_restore;
  1005. }
  1006. lock_buffer(data_bh);
  1007. error = ext4_journal_get_create_access(handle, data_bh);
  1008. if (error) {
  1009. unlock_buffer(data_bh);
  1010. error = -EIO;
  1011. goto out_restore;
  1012. }
  1013. memset(data_bh->b_data, 0, inode->i_sb->s_blocksize);
  1014. if (!S_ISDIR(inode->i_mode)) {
  1015. memcpy(data_bh->b_data, buf, inline_size);
  1016. set_buffer_uptodate(data_bh);
  1017. error = ext4_handle_dirty_metadata(handle,
  1018. inode, data_bh);
  1019. } else {
  1020. error = ext4_finish_convert_inline_dir(handle, inode, data_bh,
  1021. buf, inline_size);
  1022. }
  1023. unlock_buffer(data_bh);
  1024. out_restore:
  1025. if (error)
  1026. ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
  1027. out:
  1028. brelse(data_bh);
  1029. kfree(buf);
  1030. return error;
  1031. }
  1032. /*
  1033. * Try to add the new entry to the inline data.
  1034. * If succeeds, return 0. If not, extended the inline dir and copied data to
  1035. * the new created block.
  1036. */
  1037. int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
  1038. struct inode *inode)
  1039. {
  1040. int ret, inline_size;
  1041. void *inline_start;
  1042. struct ext4_iloc iloc;
  1043. struct inode *dir = dentry->d_parent->d_inode;
  1044. ret = ext4_get_inode_loc(dir, &iloc);
  1045. if (ret)
  1046. return ret;
  1047. down_write(&EXT4_I(dir)->xattr_sem);
  1048. if (!ext4_has_inline_data(dir))
  1049. goto out;
  1050. inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
  1051. EXT4_INLINE_DOTDOT_SIZE;
  1052. inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
  1053. ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
  1054. inline_start, inline_size);
  1055. if (ret != -ENOSPC)
  1056. goto out;
  1057. /* check whether it can be inserted to inline xattr space. */
  1058. inline_size = EXT4_I(dir)->i_inline_size -
  1059. EXT4_MIN_INLINE_DATA_SIZE;
  1060. if (!inline_size) {
  1061. /* Try to use the xattr space.*/
  1062. ret = ext4_update_inline_dir(handle, dir, &iloc);
  1063. if (ret && ret != -ENOSPC)
  1064. goto out;
  1065. inline_size = EXT4_I(dir)->i_inline_size -
  1066. EXT4_MIN_INLINE_DATA_SIZE;
  1067. }
  1068. if (inline_size) {
  1069. inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
  1070. ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
  1071. inline_start, inline_size);
  1072. if (ret != -ENOSPC)
  1073. goto out;
  1074. }
  1075. /*
  1076. * The inline space is filled up, so create a new block for it.
  1077. * As the extent tree will be created, we have to save the inline
  1078. * dir first.
  1079. */
  1080. ret = ext4_convert_inline_data_nolock(handle, dir, &iloc);
  1081. out:
  1082. ext4_mark_inode_dirty(handle, dir);
  1083. up_write(&EXT4_I(dir)->xattr_sem);
  1084. brelse(iloc.bh);
  1085. return ret;
  1086. }
  1087. int ext4_read_inline_dir(struct file *filp,
  1088. void *dirent, filldir_t filldir,
  1089. int *has_inline_data)
  1090. {
  1091. int error = 0;
  1092. unsigned int offset, parent_ino;
  1093. int i, stored;
  1094. struct ext4_dir_entry_2 *de;
  1095. struct super_block *sb;
  1096. struct inode *inode = filp->f_path.dentry->d_inode;
  1097. int ret, inline_size = 0;
  1098. struct ext4_iloc iloc;
  1099. void *dir_buf = NULL;
  1100. ret = ext4_get_inode_loc(inode, &iloc);
  1101. if (ret)
  1102. return ret;
  1103. down_read(&EXT4_I(inode)->xattr_sem);
  1104. if (!ext4_has_inline_data(inode)) {
  1105. up_read(&EXT4_I(inode)->xattr_sem);
  1106. *has_inline_data = 0;
  1107. goto out;
  1108. }
  1109. inline_size = ext4_get_inline_size(inode);
  1110. dir_buf = kmalloc(inline_size, GFP_NOFS);
  1111. if (!dir_buf) {
  1112. ret = -ENOMEM;
  1113. up_read(&EXT4_I(inode)->xattr_sem);
  1114. goto out;
  1115. }
  1116. ret = ext4_read_inline_data(inode, dir_buf, inline_size, &iloc);
  1117. up_read(&EXT4_I(inode)->xattr_sem);
  1118. if (ret < 0)
  1119. goto out;
  1120. sb = inode->i_sb;
  1121. stored = 0;
  1122. parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
  1123. while (!error && !stored && filp->f_pos < inode->i_size) {
  1124. revalidate:
  1125. /*
  1126. * If the version has changed since the last call to
  1127. * readdir(2), then we might be pointing to an invalid
  1128. * dirent right now. Scan from the start of the inline
  1129. * dir to make sure.
  1130. */
  1131. if (filp->f_version != inode->i_version) {
  1132. for (i = 0;
  1133. i < inode->i_size && i < offset;) {
  1134. if (!i) {
  1135. /* skip "." and ".." if needed. */
  1136. i += EXT4_INLINE_DOTDOT_SIZE;
  1137. continue;
  1138. }
  1139. de = (struct ext4_dir_entry_2 *)
  1140. (dir_buf + i);
  1141. /* It's too expensive to do a full
  1142. * dirent test each time round this
  1143. * loop, but we do have to test at
  1144. * least that it is non-zero. A
  1145. * failure will be detected in the
  1146. * dirent test below. */
  1147. if (ext4_rec_len_from_disk(de->rec_len,
  1148. inline_size) < EXT4_DIR_REC_LEN(1))
  1149. break;
  1150. i += ext4_rec_len_from_disk(de->rec_len,
  1151. inline_size);
  1152. }
  1153. offset = i;
  1154. filp->f_pos = offset;
  1155. filp->f_version = inode->i_version;
  1156. }
  1157. while (!error && filp->f_pos < inode->i_size) {
  1158. if (filp->f_pos == 0) {
  1159. error = filldir(dirent, ".", 1, 0, inode->i_ino,
  1160. DT_DIR);
  1161. if (error)
  1162. break;
  1163. stored++;
  1164. error = filldir(dirent, "..", 2, 0, parent_ino,
  1165. DT_DIR);
  1166. if (error)
  1167. break;
  1168. stored++;
  1169. filp->f_pos = offset = EXT4_INLINE_DOTDOT_SIZE;
  1170. continue;
  1171. }
  1172. de = (struct ext4_dir_entry_2 *)(dir_buf + offset);
  1173. if (ext4_check_dir_entry(inode, filp, de,
  1174. iloc.bh, dir_buf,
  1175. inline_size, offset)) {
  1176. ret = stored;
  1177. goto out;
  1178. }
  1179. offset += ext4_rec_len_from_disk(de->rec_len,
  1180. inline_size);
  1181. if (le32_to_cpu(de->inode)) {
  1182. /* We might block in the next section
  1183. * if the data destination is
  1184. * currently swapped out. So, use a
  1185. * version stamp to detect whether or
  1186. * not the directory has been modified
  1187. * during the copy operation.
  1188. */
  1189. u64 version = filp->f_version;
  1190. error = filldir(dirent, de->name,
  1191. de->name_len,
  1192. filp->f_pos,
  1193. le32_to_cpu(de->inode),
  1194. get_dtype(sb, de->file_type));
  1195. if (error)
  1196. break;
  1197. if (version != filp->f_version)
  1198. goto revalidate;
  1199. stored++;
  1200. }
  1201. filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
  1202. inline_size);
  1203. }
  1204. offset = 0;
  1205. }
  1206. out:
  1207. kfree(dir_buf);
  1208. brelse(iloc.bh);
  1209. return ret;
  1210. }
  1211. /*
  1212. * Try to create the inline data for the new dir.
  1213. * If it succeeds, return 0, otherwise return the error.
  1214. * In case of ENOSPC, the caller should create the normal disk layout dir.
  1215. */
  1216. int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent,
  1217. struct inode *inode)
  1218. {
  1219. int ret, inline_size = EXT4_MIN_INLINE_DATA_SIZE;
  1220. struct ext4_iloc iloc;
  1221. struct ext4_dir_entry_2 *de;
  1222. ret = ext4_get_inode_loc(inode, &iloc);
  1223. if (ret)
  1224. return ret;
  1225. ret = ext4_prepare_inline_data(handle, inode, inline_size);
  1226. if (ret)
  1227. goto out;
  1228. /*
  1229. * For inline dir, we only save the inode information for the ".."
  1230. * and create a fake dentry to cover the left space.
  1231. */
  1232. de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
  1233. de->inode = cpu_to_le32(parent->i_ino);
  1234. de = (struct ext4_dir_entry_2 *)((void *)de + EXT4_INLINE_DOTDOT_SIZE);
  1235. de->inode = 0;
  1236. de->rec_len = ext4_rec_len_to_disk(
  1237. inline_size - EXT4_INLINE_DOTDOT_SIZE,
  1238. inline_size);
  1239. set_nlink(inode, 2);
  1240. inode->i_size = EXT4_I(inode)->i_disksize = inline_size;
  1241. out:
  1242. brelse(iloc.bh);
  1243. return ret;
  1244. }
  1245. struct buffer_head *ext4_find_inline_entry(struct inode *dir,
  1246. const struct qstr *d_name,
  1247. struct ext4_dir_entry_2 **res_dir,
  1248. int *has_inline_data)
  1249. {
  1250. int ret;
  1251. struct ext4_iloc iloc;
  1252. void *inline_start;
  1253. int inline_size;
  1254. if (ext4_get_inode_loc(dir, &iloc))
  1255. return NULL;
  1256. down_read(&EXT4_I(dir)->xattr_sem);
  1257. if (!ext4_has_inline_data(dir)) {
  1258. *has_inline_data = 0;
  1259. goto out;
  1260. }
  1261. inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
  1262. EXT4_INLINE_DOTDOT_SIZE;
  1263. inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
  1264. ret = search_dir(iloc.bh, inline_start, inline_size,
  1265. dir, d_name, 0, res_dir);
  1266. if (ret == 1)
  1267. goto out_find;
  1268. if (ret < 0)
  1269. goto out;
  1270. if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
  1271. goto out;
  1272. inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
  1273. inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
  1274. ret = search_dir(iloc.bh, inline_start, inline_size,
  1275. dir, d_name, 0, res_dir);
  1276. if (ret == 1)
  1277. goto out_find;
  1278. out:
  1279. brelse(iloc.bh);
  1280. iloc.bh = NULL;
  1281. out_find:
  1282. up_read(&EXT4_I(dir)->xattr_sem);
  1283. return iloc.bh;
  1284. }
  1285. int ext4_delete_inline_entry(handle_t *handle,
  1286. struct inode *dir,
  1287. struct ext4_dir_entry_2 *de_del,
  1288. struct buffer_head *bh,
  1289. int *has_inline_data)
  1290. {
  1291. int err, inline_size;
  1292. struct ext4_iloc iloc;
  1293. void *inline_start;
  1294. err = ext4_get_inode_loc(dir, &iloc);
  1295. if (err)
  1296. return err;
  1297. down_write(&EXT4_I(dir)->xattr_sem);
  1298. if (!ext4_has_inline_data(dir)) {
  1299. *has_inline_data = 0;
  1300. goto out;
  1301. }
  1302. if ((void *)de_del - ((void *)ext4_raw_inode(&iloc)->i_block) <
  1303. EXT4_MIN_INLINE_DATA_SIZE) {
  1304. inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
  1305. EXT4_INLINE_DOTDOT_SIZE;
  1306. inline_size = EXT4_MIN_INLINE_DATA_SIZE -
  1307. EXT4_INLINE_DOTDOT_SIZE;
  1308. } else {
  1309. inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
  1310. inline_size = ext4_get_inline_size(dir) -
  1311. EXT4_MIN_INLINE_DATA_SIZE;
  1312. }
  1313. err = ext4_journal_get_write_access(handle, bh);
  1314. if (err)
  1315. goto out;
  1316. err = ext4_generic_delete_entry(handle, dir, de_del, bh,
  1317. inline_start, inline_size, 0);
  1318. if (err)
  1319. goto out;
  1320. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  1321. err = ext4_mark_inode_dirty(handle, dir);
  1322. if (unlikely(err))
  1323. goto out;
  1324. ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
  1325. out:
  1326. up_write(&EXT4_I(dir)->xattr_sem);
  1327. brelse(iloc.bh);
  1328. if (err != -ENOENT)
  1329. ext4_std_error(dir->i_sb, err);
  1330. return err;
  1331. }
  1332. int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
  1333. {
  1334. int ret;
  1335. down_write(&EXT4_I(inode)->xattr_sem);
  1336. ret = ext4_destroy_inline_data_nolock(handle, inode);
  1337. up_write(&EXT4_I(inode)->xattr_sem);
  1338. return ret;
  1339. }