delayed-inode.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922
  1. /*
  2. * Copyright (C) 2011 Fujitsu. All rights reserved.
  3. * Written by Miao Xie <miaox@cn.fujitsu.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public
  7. * License v2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public
  15. * License along with this program; if not, write to the
  16. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17. * Boston, MA 021110-1307, USA.
  18. */
  19. #include <linux/slab.h>
  20. #include "delayed-inode.h"
  21. #include "disk-io.h"
  22. #include "transaction.h"
  23. #include "ctree.h"
  24. #define BTRFS_DELAYED_WRITEBACK 512
  25. #define BTRFS_DELAYED_BACKGROUND 128
  26. #define BTRFS_DELAYED_BATCH 16
  27. static struct kmem_cache *delayed_node_cache;
  28. int __init btrfs_delayed_inode_init(void)
  29. {
  30. delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  31. sizeof(struct btrfs_delayed_node),
  32. 0,
  33. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  34. NULL);
  35. if (!delayed_node_cache)
  36. return -ENOMEM;
  37. return 0;
  38. }
  39. void btrfs_delayed_inode_exit(void)
  40. {
  41. if (delayed_node_cache)
  42. kmem_cache_destroy(delayed_node_cache);
  43. }
  44. static inline void btrfs_init_delayed_node(
  45. struct btrfs_delayed_node *delayed_node,
  46. struct btrfs_root *root, u64 inode_id)
  47. {
  48. delayed_node->root = root;
  49. delayed_node->inode_id = inode_id;
  50. atomic_set(&delayed_node->refs, 0);
  51. delayed_node->count = 0;
  52. delayed_node->in_list = 0;
  53. delayed_node->inode_dirty = 0;
  54. delayed_node->ins_root = RB_ROOT;
  55. delayed_node->del_root = RB_ROOT;
  56. mutex_init(&delayed_node->mutex);
  57. delayed_node->index_cnt = 0;
  58. INIT_LIST_HEAD(&delayed_node->n_list);
  59. INIT_LIST_HEAD(&delayed_node->p_list);
  60. delayed_node->bytes_reserved = 0;
  61. memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
  62. }
  63. static inline int btrfs_is_continuous_delayed_item(
  64. struct btrfs_delayed_item *item1,
  65. struct btrfs_delayed_item *item2)
  66. {
  67. if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  68. item1->key.objectid == item2->key.objectid &&
  69. item1->key.type == item2->key.type &&
  70. item1->key.offset + 1 == item2->key.offset)
  71. return 1;
  72. return 0;
  73. }
  74. static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  75. struct btrfs_root *root)
  76. {
  77. return root->fs_info->delayed_root;
  78. }
  79. static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  80. {
  81. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  82. struct btrfs_root *root = btrfs_inode->root;
  83. u64 ino = btrfs_ino(inode);
  84. struct btrfs_delayed_node *node;
  85. node = ACCESS_ONCE(btrfs_inode->delayed_node);
  86. if (node) {
  87. atomic_inc(&node->refs);
  88. return node;
  89. }
  90. spin_lock(&root->inode_lock);
  91. node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  92. if (node) {
  93. if (btrfs_inode->delayed_node) {
  94. atomic_inc(&node->refs); /* can be accessed */
  95. BUG_ON(btrfs_inode->delayed_node != node);
  96. spin_unlock(&root->inode_lock);
  97. return node;
  98. }
  99. btrfs_inode->delayed_node = node;
  100. atomic_inc(&node->refs); /* can be accessed */
  101. atomic_inc(&node->refs); /* cached in the inode */
  102. spin_unlock(&root->inode_lock);
  103. return node;
  104. }
  105. spin_unlock(&root->inode_lock);
  106. return NULL;
  107. }
  108. /* Will return either the node or PTR_ERR(-ENOMEM) */
  109. static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
  110. struct inode *inode)
  111. {
  112. struct btrfs_delayed_node *node;
  113. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  114. struct btrfs_root *root = btrfs_inode->root;
  115. u64 ino = btrfs_ino(inode);
  116. int ret;
  117. again:
  118. node = btrfs_get_delayed_node(inode);
  119. if (node)
  120. return node;
  121. node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
  122. if (!node)
  123. return ERR_PTR(-ENOMEM);
  124. btrfs_init_delayed_node(node, root, ino);
  125. atomic_inc(&node->refs); /* cached in the btrfs inode */
  126. atomic_inc(&node->refs); /* can be accessed */
  127. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  128. if (ret) {
  129. kmem_cache_free(delayed_node_cache, node);
  130. return ERR_PTR(ret);
  131. }
  132. spin_lock(&root->inode_lock);
  133. ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
  134. if (ret == -EEXIST) {
  135. kmem_cache_free(delayed_node_cache, node);
  136. spin_unlock(&root->inode_lock);
  137. radix_tree_preload_end();
  138. goto again;
  139. }
  140. btrfs_inode->delayed_node = node;
  141. spin_unlock(&root->inode_lock);
  142. radix_tree_preload_end();
  143. return node;
  144. }
  145. /*
  146. * Call it when holding delayed_node->mutex
  147. *
  148. * If mod = 1, add this node into the prepared list.
  149. */
  150. static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
  151. struct btrfs_delayed_node *node,
  152. int mod)
  153. {
  154. spin_lock(&root->lock);
  155. if (node->in_list) {
  156. if (!list_empty(&node->p_list))
  157. list_move_tail(&node->p_list, &root->prepare_list);
  158. else if (mod)
  159. list_add_tail(&node->p_list, &root->prepare_list);
  160. } else {
  161. list_add_tail(&node->n_list, &root->node_list);
  162. list_add_tail(&node->p_list, &root->prepare_list);
  163. atomic_inc(&node->refs); /* inserted into list */
  164. root->nodes++;
  165. node->in_list = 1;
  166. }
  167. spin_unlock(&root->lock);
  168. }
  169. /* Call it when holding delayed_node->mutex */
  170. static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
  171. struct btrfs_delayed_node *node)
  172. {
  173. spin_lock(&root->lock);
  174. if (node->in_list) {
  175. root->nodes--;
  176. atomic_dec(&node->refs); /* not in the list */
  177. list_del_init(&node->n_list);
  178. if (!list_empty(&node->p_list))
  179. list_del_init(&node->p_list);
  180. node->in_list = 0;
  181. }
  182. spin_unlock(&root->lock);
  183. }
  184. static struct btrfs_delayed_node *btrfs_first_delayed_node(
  185. struct btrfs_delayed_root *delayed_root)
  186. {
  187. struct list_head *p;
  188. struct btrfs_delayed_node *node = NULL;
  189. spin_lock(&delayed_root->lock);
  190. if (list_empty(&delayed_root->node_list))
  191. goto out;
  192. p = delayed_root->node_list.next;
  193. node = list_entry(p, struct btrfs_delayed_node, n_list);
  194. atomic_inc(&node->refs);
  195. out:
  196. spin_unlock(&delayed_root->lock);
  197. return node;
  198. }
  199. static struct btrfs_delayed_node *btrfs_next_delayed_node(
  200. struct btrfs_delayed_node *node)
  201. {
  202. struct btrfs_delayed_root *delayed_root;
  203. struct list_head *p;
  204. struct btrfs_delayed_node *next = NULL;
  205. delayed_root = node->root->fs_info->delayed_root;
  206. spin_lock(&delayed_root->lock);
  207. if (!node->in_list) { /* not in the list */
  208. if (list_empty(&delayed_root->node_list))
  209. goto out;
  210. p = delayed_root->node_list.next;
  211. } else if (list_is_last(&node->n_list, &delayed_root->node_list))
  212. goto out;
  213. else
  214. p = node->n_list.next;
  215. next = list_entry(p, struct btrfs_delayed_node, n_list);
  216. atomic_inc(&next->refs);
  217. out:
  218. spin_unlock(&delayed_root->lock);
  219. return next;
  220. }
  221. static void __btrfs_release_delayed_node(
  222. struct btrfs_delayed_node *delayed_node,
  223. int mod)
  224. {
  225. struct btrfs_delayed_root *delayed_root;
  226. if (!delayed_node)
  227. return;
  228. delayed_root = delayed_node->root->fs_info->delayed_root;
  229. mutex_lock(&delayed_node->mutex);
  230. if (delayed_node->count)
  231. btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
  232. else
  233. btrfs_dequeue_delayed_node(delayed_root, delayed_node);
  234. mutex_unlock(&delayed_node->mutex);
  235. if (atomic_dec_and_test(&delayed_node->refs)) {
  236. struct btrfs_root *root = delayed_node->root;
  237. spin_lock(&root->inode_lock);
  238. if (atomic_read(&delayed_node->refs) == 0) {
  239. radix_tree_delete(&root->delayed_nodes_tree,
  240. delayed_node->inode_id);
  241. kmem_cache_free(delayed_node_cache, delayed_node);
  242. }
  243. spin_unlock(&root->inode_lock);
  244. }
  245. }
  246. static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
  247. {
  248. __btrfs_release_delayed_node(node, 0);
  249. }
  250. static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
  251. struct btrfs_delayed_root *delayed_root)
  252. {
  253. struct list_head *p;
  254. struct btrfs_delayed_node *node = NULL;
  255. spin_lock(&delayed_root->lock);
  256. if (list_empty(&delayed_root->prepare_list))
  257. goto out;
  258. p = delayed_root->prepare_list.next;
  259. list_del_init(p);
  260. node = list_entry(p, struct btrfs_delayed_node, p_list);
  261. atomic_inc(&node->refs);
  262. out:
  263. spin_unlock(&delayed_root->lock);
  264. return node;
  265. }
  266. static inline void btrfs_release_prepared_delayed_node(
  267. struct btrfs_delayed_node *node)
  268. {
  269. __btrfs_release_delayed_node(node, 1);
  270. }
  271. static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
  272. {
  273. struct btrfs_delayed_item *item;
  274. item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
  275. if (item) {
  276. item->data_len = data_len;
  277. item->ins_or_del = 0;
  278. item->bytes_reserved = 0;
  279. item->delayed_node = NULL;
  280. atomic_set(&item->refs, 1);
  281. }
  282. return item;
  283. }
  284. /*
  285. * __btrfs_lookup_delayed_item - look up the delayed item by key
  286. * @delayed_node: pointer to the delayed node
  287. * @key: the key to look up
  288. * @prev: used to store the prev item if the right item isn't found
  289. * @next: used to store the next item if the right item isn't found
  290. *
  291. * Note: if we don't find the right item, we will return the prev item and
  292. * the next item.
  293. */
  294. static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
  295. struct rb_root *root,
  296. struct btrfs_key *key,
  297. struct btrfs_delayed_item **prev,
  298. struct btrfs_delayed_item **next)
  299. {
  300. struct rb_node *node, *prev_node = NULL;
  301. struct btrfs_delayed_item *delayed_item = NULL;
  302. int ret = 0;
  303. node = root->rb_node;
  304. while (node) {
  305. delayed_item = rb_entry(node, struct btrfs_delayed_item,
  306. rb_node);
  307. prev_node = node;
  308. ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
  309. if (ret < 0)
  310. node = node->rb_right;
  311. else if (ret > 0)
  312. node = node->rb_left;
  313. else
  314. return delayed_item;
  315. }
  316. if (prev) {
  317. if (!prev_node)
  318. *prev = NULL;
  319. else if (ret < 0)
  320. *prev = delayed_item;
  321. else if ((node = rb_prev(prev_node)) != NULL) {
  322. *prev = rb_entry(node, struct btrfs_delayed_item,
  323. rb_node);
  324. } else
  325. *prev = NULL;
  326. }
  327. if (next) {
  328. if (!prev_node)
  329. *next = NULL;
  330. else if (ret > 0)
  331. *next = delayed_item;
  332. else if ((node = rb_next(prev_node)) != NULL) {
  333. *next = rb_entry(node, struct btrfs_delayed_item,
  334. rb_node);
  335. } else
  336. *next = NULL;
  337. }
  338. return NULL;
  339. }
  340. static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
  341. struct btrfs_delayed_node *delayed_node,
  342. struct btrfs_key *key)
  343. {
  344. struct btrfs_delayed_item *item;
  345. item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
  346. NULL, NULL);
  347. return item;
  348. }
  349. static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
  350. struct btrfs_delayed_item *ins,
  351. int action)
  352. {
  353. struct rb_node **p, *node;
  354. struct rb_node *parent_node = NULL;
  355. struct rb_root *root;
  356. struct btrfs_delayed_item *item;
  357. int cmp;
  358. if (action == BTRFS_DELAYED_INSERTION_ITEM)
  359. root = &delayed_node->ins_root;
  360. else if (action == BTRFS_DELAYED_DELETION_ITEM)
  361. root = &delayed_node->del_root;
  362. else
  363. BUG();
  364. p = &root->rb_node;
  365. node = &ins->rb_node;
  366. while (*p) {
  367. parent_node = *p;
  368. item = rb_entry(parent_node, struct btrfs_delayed_item,
  369. rb_node);
  370. cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
  371. if (cmp < 0)
  372. p = &(*p)->rb_right;
  373. else if (cmp > 0)
  374. p = &(*p)->rb_left;
  375. else
  376. return -EEXIST;
  377. }
  378. rb_link_node(node, parent_node, p);
  379. rb_insert_color(node, root);
  380. ins->delayed_node = delayed_node;
  381. ins->ins_or_del = action;
  382. if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
  383. action == BTRFS_DELAYED_INSERTION_ITEM &&
  384. ins->key.offset >= delayed_node->index_cnt)
  385. delayed_node->index_cnt = ins->key.offset + 1;
  386. delayed_node->count++;
  387. atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
  388. return 0;
  389. }
  390. static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
  391. struct btrfs_delayed_item *item)
  392. {
  393. return __btrfs_add_delayed_item(node, item,
  394. BTRFS_DELAYED_INSERTION_ITEM);
  395. }
  396. static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
  397. struct btrfs_delayed_item *item)
  398. {
  399. return __btrfs_add_delayed_item(node, item,
  400. BTRFS_DELAYED_DELETION_ITEM);
  401. }
  402. static void finish_one_item(struct btrfs_delayed_root *delayed_root)
  403. {
  404. int seq = atomic_inc_return(&delayed_root->items_seq);
  405. if ((atomic_dec_return(&delayed_root->items) <
  406. BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
  407. waitqueue_active(&delayed_root->wait))
  408. wake_up(&delayed_root->wait);
  409. }
  410. static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
  411. {
  412. struct rb_root *root;
  413. struct btrfs_delayed_root *delayed_root;
  414. delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
  415. BUG_ON(!delayed_root);
  416. BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
  417. delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
  418. if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
  419. root = &delayed_item->delayed_node->ins_root;
  420. else
  421. root = &delayed_item->delayed_node->del_root;
  422. rb_erase(&delayed_item->rb_node, root);
  423. delayed_item->delayed_node->count--;
  424. finish_one_item(delayed_root);
  425. }
  426. static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
  427. {
  428. if (item) {
  429. __btrfs_remove_delayed_item(item);
  430. if (atomic_dec_and_test(&item->refs))
  431. kfree(item);
  432. }
  433. }
  434. static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
  435. struct btrfs_delayed_node *delayed_node)
  436. {
  437. struct rb_node *p;
  438. struct btrfs_delayed_item *item = NULL;
  439. p = rb_first(&delayed_node->ins_root);
  440. if (p)
  441. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  442. return item;
  443. }
  444. static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
  445. struct btrfs_delayed_node *delayed_node)
  446. {
  447. struct rb_node *p;
  448. struct btrfs_delayed_item *item = NULL;
  449. p = rb_first(&delayed_node->del_root);
  450. if (p)
  451. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  452. return item;
  453. }
  454. static struct btrfs_delayed_item *__btrfs_next_delayed_item(
  455. struct btrfs_delayed_item *item)
  456. {
  457. struct rb_node *p;
  458. struct btrfs_delayed_item *next = NULL;
  459. p = rb_next(&item->rb_node);
  460. if (p)
  461. next = rb_entry(p, struct btrfs_delayed_item, rb_node);
  462. return next;
  463. }
  464. static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
  465. struct btrfs_root *root,
  466. struct btrfs_delayed_item *item)
  467. {
  468. struct btrfs_block_rsv *src_rsv;
  469. struct btrfs_block_rsv *dst_rsv;
  470. u64 num_bytes;
  471. int ret;
  472. if (!trans->bytes_reserved)
  473. return 0;
  474. src_rsv = trans->block_rsv;
  475. dst_rsv = &root->fs_info->delayed_block_rsv;
  476. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  477. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  478. if (!ret) {
  479. trace_btrfs_space_reservation(root->fs_info, "delayed_item",
  480. item->key.objectid,
  481. num_bytes, 1);
  482. item->bytes_reserved = num_bytes;
  483. }
  484. return ret;
  485. }
  486. static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
  487. struct btrfs_delayed_item *item)
  488. {
  489. struct btrfs_block_rsv *rsv;
  490. if (!item->bytes_reserved)
  491. return;
  492. rsv = &root->fs_info->delayed_block_rsv;
  493. trace_btrfs_space_reservation(root->fs_info, "delayed_item",
  494. item->key.objectid, item->bytes_reserved,
  495. 0);
  496. btrfs_block_rsv_release(root, rsv,
  497. item->bytes_reserved);
  498. }
  499. static int btrfs_delayed_inode_reserve_metadata(
  500. struct btrfs_trans_handle *trans,
  501. struct btrfs_root *root,
  502. struct inode *inode,
  503. struct btrfs_delayed_node *node)
  504. {
  505. struct btrfs_block_rsv *src_rsv;
  506. struct btrfs_block_rsv *dst_rsv;
  507. u64 num_bytes;
  508. int ret;
  509. bool release = false;
  510. src_rsv = trans->block_rsv;
  511. dst_rsv = &root->fs_info->delayed_block_rsv;
  512. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  513. /*
  514. * btrfs_dirty_inode will update the inode under btrfs_join_transaction
  515. * which doesn't reserve space for speed. This is a problem since we
  516. * still need to reserve space for this update, so try to reserve the
  517. * space.
  518. *
  519. * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
  520. * we're accounted for.
  521. */
  522. if (!src_rsv || (!trans->bytes_reserved &&
  523. src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
  524. ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
  525. BTRFS_RESERVE_NO_FLUSH);
  526. /*
  527. * Since we're under a transaction reserve_metadata_bytes could
  528. * try to commit the transaction which will make it return
  529. * EAGAIN to make us stop the transaction we have, so return
  530. * ENOSPC instead so that btrfs_dirty_inode knows what to do.
  531. */
  532. if (ret == -EAGAIN)
  533. ret = -ENOSPC;
  534. if (!ret) {
  535. node->bytes_reserved = num_bytes;
  536. trace_btrfs_space_reservation(root->fs_info,
  537. "delayed_inode",
  538. btrfs_ino(inode),
  539. num_bytes, 1);
  540. }
  541. return ret;
  542. } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
  543. spin_lock(&BTRFS_I(inode)->lock);
  544. if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  545. &BTRFS_I(inode)->runtime_flags)) {
  546. spin_unlock(&BTRFS_I(inode)->lock);
  547. release = true;
  548. goto migrate;
  549. }
  550. spin_unlock(&BTRFS_I(inode)->lock);
  551. /* Ok we didn't have space pre-reserved. This shouldn't happen
  552. * too often but it can happen if we do delalloc to an existing
  553. * inode which gets dirtied because of the time update, and then
  554. * isn't touched again until after the transaction commits and
  555. * then we try to write out the data. First try to be nice and
  556. * reserve something strictly for us. If not be a pain and try
  557. * to steal from the delalloc block rsv.
  558. */
  559. ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
  560. BTRFS_RESERVE_NO_FLUSH);
  561. if (!ret)
  562. goto out;
  563. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  564. if (!ret)
  565. goto out;
  566. /*
  567. * Ok this is a problem, let's just steal from the global rsv
  568. * since this really shouldn't happen that often.
  569. */
  570. WARN_ON(1);
  571. ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
  572. dst_rsv, num_bytes);
  573. goto out;
  574. }
  575. migrate:
  576. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  577. out:
  578. /*
  579. * Migrate only takes a reservation, it doesn't touch the size of the
  580. * block_rsv. This is to simplify people who don't normally have things
  581. * migrated from their block rsv. If they go to release their
  582. * reservation, that will decrease the size as well, so if migrate
  583. * reduced size we'd end up with a negative size. But for the
  584. * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
  585. * but we could in fact do this reserve/migrate dance several times
  586. * between the time we did the original reservation and we'd clean it
  587. * up. So to take care of this, release the space for the meta
  588. * reservation here. I think it may be time for a documentation page on
  589. * how block rsvs. work.
  590. */
  591. if (!ret) {
  592. trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
  593. btrfs_ino(inode), num_bytes, 1);
  594. node->bytes_reserved = num_bytes;
  595. }
  596. if (release) {
  597. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  598. btrfs_ino(inode), num_bytes, 0);
  599. btrfs_block_rsv_release(root, src_rsv, num_bytes);
  600. }
  601. return ret;
  602. }
  603. static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
  604. struct btrfs_delayed_node *node)
  605. {
  606. struct btrfs_block_rsv *rsv;
  607. if (!node->bytes_reserved)
  608. return;
  609. rsv = &root->fs_info->delayed_block_rsv;
  610. trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
  611. node->inode_id, node->bytes_reserved, 0);
  612. btrfs_block_rsv_release(root, rsv,
  613. node->bytes_reserved);
  614. node->bytes_reserved = 0;
  615. }
  616. /*
  617. * This helper will insert some continuous items into the same leaf according
  618. * to the free space of the leaf.
  619. */
  620. static int btrfs_batch_insert_items(struct btrfs_root *root,
  621. struct btrfs_path *path,
  622. struct btrfs_delayed_item *item)
  623. {
  624. struct btrfs_delayed_item *curr, *next;
  625. int free_space;
  626. int total_data_size = 0, total_size = 0;
  627. struct extent_buffer *leaf;
  628. char *data_ptr;
  629. struct btrfs_key *keys;
  630. u32 *data_size;
  631. struct list_head head;
  632. int slot;
  633. int nitems;
  634. int i;
  635. int ret = 0;
  636. BUG_ON(!path->nodes[0]);
  637. leaf = path->nodes[0];
  638. free_space = btrfs_leaf_free_space(root, leaf);
  639. INIT_LIST_HEAD(&head);
  640. next = item;
  641. nitems = 0;
  642. /*
  643. * count the number of the continuous items that we can insert in batch
  644. */
  645. while (total_size + next->data_len + sizeof(struct btrfs_item) <=
  646. free_space) {
  647. total_data_size += next->data_len;
  648. total_size += next->data_len + sizeof(struct btrfs_item);
  649. list_add_tail(&next->tree_list, &head);
  650. nitems++;
  651. curr = next;
  652. next = __btrfs_next_delayed_item(curr);
  653. if (!next)
  654. break;
  655. if (!btrfs_is_continuous_delayed_item(curr, next))
  656. break;
  657. }
  658. if (!nitems) {
  659. ret = 0;
  660. goto out;
  661. }
  662. /*
  663. * we need allocate some memory space, but it might cause the task
  664. * to sleep, so we set all locked nodes in the path to blocking locks
  665. * first.
  666. */
  667. btrfs_set_path_blocking(path);
  668. keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
  669. if (!keys) {
  670. ret = -ENOMEM;
  671. goto out;
  672. }
  673. data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
  674. if (!data_size) {
  675. ret = -ENOMEM;
  676. goto error;
  677. }
  678. /* get keys of all the delayed items */
  679. i = 0;
  680. list_for_each_entry(next, &head, tree_list) {
  681. keys[i] = next->key;
  682. data_size[i] = next->data_len;
  683. i++;
  684. }
  685. /* reset all the locked nodes in the patch to spinning locks. */
  686. btrfs_clear_path_blocking(path, NULL, 0);
  687. /* insert the keys of the items */
  688. setup_items_for_insert(root, path, keys, data_size,
  689. total_data_size, total_size, nitems);
  690. /* insert the dir index items */
  691. slot = path->slots[0];
  692. list_for_each_entry_safe(curr, next, &head, tree_list) {
  693. data_ptr = btrfs_item_ptr(leaf, slot, char);
  694. write_extent_buffer(leaf, &curr->data,
  695. (unsigned long)data_ptr,
  696. curr->data_len);
  697. slot++;
  698. btrfs_delayed_item_release_metadata(root, curr);
  699. list_del(&curr->tree_list);
  700. btrfs_release_delayed_item(curr);
  701. }
  702. error:
  703. kfree(data_size);
  704. kfree(keys);
  705. out:
  706. return ret;
  707. }
  708. /*
  709. * This helper can just do simple insertion that needn't extend item for new
  710. * data, such as directory name index insertion, inode insertion.
  711. */
  712. static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
  713. struct btrfs_root *root,
  714. struct btrfs_path *path,
  715. struct btrfs_delayed_item *delayed_item)
  716. {
  717. struct extent_buffer *leaf;
  718. char *ptr;
  719. int ret;
  720. ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
  721. delayed_item->data_len);
  722. if (ret < 0 && ret != -EEXIST)
  723. return ret;
  724. leaf = path->nodes[0];
  725. ptr = btrfs_item_ptr(leaf, path->slots[0], char);
  726. write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
  727. delayed_item->data_len);
  728. btrfs_mark_buffer_dirty(leaf);
  729. btrfs_delayed_item_release_metadata(root, delayed_item);
  730. return 0;
  731. }
  732. /*
  733. * we insert an item first, then if there are some continuous items, we try
  734. * to insert those items into the same leaf.
  735. */
  736. static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
  737. struct btrfs_path *path,
  738. struct btrfs_root *root,
  739. struct btrfs_delayed_node *node)
  740. {
  741. struct btrfs_delayed_item *curr, *prev;
  742. int ret = 0;
  743. do_again:
  744. mutex_lock(&node->mutex);
  745. curr = __btrfs_first_delayed_insertion_item(node);
  746. if (!curr)
  747. goto insert_end;
  748. ret = btrfs_insert_delayed_item(trans, root, path, curr);
  749. if (ret < 0) {
  750. btrfs_release_path(path);
  751. goto insert_end;
  752. }
  753. prev = curr;
  754. curr = __btrfs_next_delayed_item(prev);
  755. if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
  756. /* insert the continuous items into the same leaf */
  757. path->slots[0]++;
  758. btrfs_batch_insert_items(root, path, curr);
  759. }
  760. btrfs_release_delayed_item(prev);
  761. btrfs_mark_buffer_dirty(path->nodes[0]);
  762. btrfs_release_path(path);
  763. mutex_unlock(&node->mutex);
  764. goto do_again;
  765. insert_end:
  766. mutex_unlock(&node->mutex);
  767. return ret;
  768. }
  769. static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
  770. struct btrfs_root *root,
  771. struct btrfs_path *path,
  772. struct btrfs_delayed_item *item)
  773. {
  774. struct btrfs_delayed_item *curr, *next;
  775. struct extent_buffer *leaf;
  776. struct btrfs_key key;
  777. struct list_head head;
  778. int nitems, i, last_item;
  779. int ret = 0;
  780. BUG_ON(!path->nodes[0]);
  781. leaf = path->nodes[0];
  782. i = path->slots[0];
  783. last_item = btrfs_header_nritems(leaf) - 1;
  784. if (i > last_item)
  785. return -ENOENT; /* FIXME: Is errno suitable? */
  786. next = item;
  787. INIT_LIST_HEAD(&head);
  788. btrfs_item_key_to_cpu(leaf, &key, i);
  789. nitems = 0;
  790. /*
  791. * count the number of the dir index items that we can delete in batch
  792. */
  793. while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
  794. list_add_tail(&next->tree_list, &head);
  795. nitems++;
  796. curr = next;
  797. next = __btrfs_next_delayed_item(curr);
  798. if (!next)
  799. break;
  800. if (!btrfs_is_continuous_delayed_item(curr, next))
  801. break;
  802. i++;
  803. if (i > last_item)
  804. break;
  805. btrfs_item_key_to_cpu(leaf, &key, i);
  806. }
  807. if (!nitems)
  808. return 0;
  809. ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
  810. if (ret)
  811. goto out;
  812. list_for_each_entry_safe(curr, next, &head, tree_list) {
  813. btrfs_delayed_item_release_metadata(root, curr);
  814. list_del(&curr->tree_list);
  815. btrfs_release_delayed_item(curr);
  816. }
  817. out:
  818. return ret;
  819. }
  820. static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
  821. struct btrfs_path *path,
  822. struct btrfs_root *root,
  823. struct btrfs_delayed_node *node)
  824. {
  825. struct btrfs_delayed_item *curr, *prev;
  826. int ret = 0;
  827. do_again:
  828. mutex_lock(&node->mutex);
  829. curr = __btrfs_first_delayed_deletion_item(node);
  830. if (!curr)
  831. goto delete_fail;
  832. ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
  833. if (ret < 0)
  834. goto delete_fail;
  835. else if (ret > 0) {
  836. /*
  837. * can't find the item which the node points to, so this node
  838. * is invalid, just drop it.
  839. */
  840. prev = curr;
  841. curr = __btrfs_next_delayed_item(prev);
  842. btrfs_release_delayed_item(prev);
  843. ret = 0;
  844. btrfs_release_path(path);
  845. if (curr) {
  846. mutex_unlock(&node->mutex);
  847. goto do_again;
  848. } else
  849. goto delete_fail;
  850. }
  851. btrfs_batch_delete_items(trans, root, path, curr);
  852. btrfs_release_path(path);
  853. mutex_unlock(&node->mutex);
  854. goto do_again;
  855. delete_fail:
  856. btrfs_release_path(path);
  857. mutex_unlock(&node->mutex);
  858. return ret;
  859. }
  860. static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
  861. {
  862. struct btrfs_delayed_root *delayed_root;
  863. if (delayed_node && delayed_node->inode_dirty) {
  864. BUG_ON(!delayed_node->root);
  865. delayed_node->inode_dirty = 0;
  866. delayed_node->count--;
  867. delayed_root = delayed_node->root->fs_info->delayed_root;
  868. finish_one_item(delayed_root);
  869. }
  870. }
  871. static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
  872. struct btrfs_root *root,
  873. struct btrfs_path *path,
  874. struct btrfs_delayed_node *node)
  875. {
  876. struct btrfs_key key;
  877. struct btrfs_inode_item *inode_item;
  878. struct extent_buffer *leaf;
  879. int ret;
  880. key.objectid = node->inode_id;
  881. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  882. key.offset = 0;
  883. ret = btrfs_lookup_inode(trans, root, path, &key, 1);
  884. if (ret > 0) {
  885. btrfs_release_path(path);
  886. return -ENOENT;
  887. } else if (ret < 0) {
  888. return ret;
  889. }
  890. btrfs_unlock_up_safe(path, 1);
  891. leaf = path->nodes[0];
  892. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  893. struct btrfs_inode_item);
  894. write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
  895. sizeof(struct btrfs_inode_item));
  896. btrfs_mark_buffer_dirty(leaf);
  897. btrfs_release_path(path);
  898. btrfs_delayed_inode_release_metadata(root, node);
  899. btrfs_release_delayed_inode(node);
  900. return 0;
  901. }
  902. static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
  903. struct btrfs_root *root,
  904. struct btrfs_path *path,
  905. struct btrfs_delayed_node *node)
  906. {
  907. int ret;
  908. mutex_lock(&node->mutex);
  909. if (!node->inode_dirty) {
  910. mutex_unlock(&node->mutex);
  911. return 0;
  912. }
  913. ret = __btrfs_update_delayed_inode(trans, root, path, node);
  914. mutex_unlock(&node->mutex);
  915. return ret;
  916. }
  917. static inline int
  918. __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  919. struct btrfs_path *path,
  920. struct btrfs_delayed_node *node)
  921. {
  922. int ret;
  923. ret = btrfs_insert_delayed_items(trans, path, node->root, node);
  924. if (ret)
  925. return ret;
  926. ret = btrfs_delete_delayed_items(trans, path, node->root, node);
  927. if (ret)
  928. return ret;
  929. ret = btrfs_update_delayed_inode(trans, node->root, path, node);
  930. return ret;
  931. }
  932. /*
  933. * Called when committing the transaction.
  934. * Returns 0 on success.
  935. * Returns < 0 on error and returns with an aborted transaction with any
  936. * outstanding delayed items cleaned up.
  937. */
  938. static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
  939. struct btrfs_root *root, int nr)
  940. {
  941. struct btrfs_delayed_root *delayed_root;
  942. struct btrfs_delayed_node *curr_node, *prev_node;
  943. struct btrfs_path *path;
  944. struct btrfs_block_rsv *block_rsv;
  945. int ret = 0;
  946. bool count = (nr > 0);
  947. if (trans->aborted)
  948. return -EIO;
  949. path = btrfs_alloc_path();
  950. if (!path)
  951. return -ENOMEM;
  952. path->leave_spinning = 1;
  953. block_rsv = trans->block_rsv;
  954. trans->block_rsv = &root->fs_info->delayed_block_rsv;
  955. delayed_root = btrfs_get_delayed_root(root);
  956. curr_node = btrfs_first_delayed_node(delayed_root);
  957. while (curr_node && (!count || (count && nr--))) {
  958. ret = __btrfs_commit_inode_delayed_items(trans, path,
  959. curr_node);
  960. if (ret) {
  961. btrfs_release_delayed_node(curr_node);
  962. curr_node = NULL;
  963. btrfs_abort_transaction(trans, root, ret);
  964. break;
  965. }
  966. prev_node = curr_node;
  967. curr_node = btrfs_next_delayed_node(curr_node);
  968. btrfs_release_delayed_node(prev_node);
  969. }
  970. if (curr_node)
  971. btrfs_release_delayed_node(curr_node);
  972. btrfs_free_path(path);
  973. trans->block_rsv = block_rsv;
  974. return ret;
  975. }
  976. int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
  977. struct btrfs_root *root)
  978. {
  979. return __btrfs_run_delayed_items(trans, root, -1);
  980. }
  981. int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
  982. struct btrfs_root *root, int nr)
  983. {
  984. return __btrfs_run_delayed_items(trans, root, nr);
  985. }
  986. int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  987. struct inode *inode)
  988. {
  989. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  990. struct btrfs_path *path;
  991. struct btrfs_block_rsv *block_rsv;
  992. int ret;
  993. if (!delayed_node)
  994. return 0;
  995. mutex_lock(&delayed_node->mutex);
  996. if (!delayed_node->count) {
  997. mutex_unlock(&delayed_node->mutex);
  998. btrfs_release_delayed_node(delayed_node);
  999. return 0;
  1000. }
  1001. mutex_unlock(&delayed_node->mutex);
  1002. path = btrfs_alloc_path();
  1003. if (!path)
  1004. return -ENOMEM;
  1005. path->leave_spinning = 1;
  1006. block_rsv = trans->block_rsv;
  1007. trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
  1008. ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
  1009. btrfs_release_delayed_node(delayed_node);
  1010. btrfs_free_path(path);
  1011. trans->block_rsv = block_rsv;
  1012. return ret;
  1013. }
  1014. int btrfs_commit_inode_delayed_inode(struct inode *inode)
  1015. {
  1016. struct btrfs_trans_handle *trans;
  1017. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1018. struct btrfs_path *path;
  1019. struct btrfs_block_rsv *block_rsv;
  1020. int ret;
  1021. if (!delayed_node)
  1022. return 0;
  1023. mutex_lock(&delayed_node->mutex);
  1024. if (!delayed_node->inode_dirty) {
  1025. mutex_unlock(&delayed_node->mutex);
  1026. btrfs_release_delayed_node(delayed_node);
  1027. return 0;
  1028. }
  1029. mutex_unlock(&delayed_node->mutex);
  1030. trans = btrfs_join_transaction(delayed_node->root);
  1031. if (IS_ERR(trans)) {
  1032. ret = PTR_ERR(trans);
  1033. goto out;
  1034. }
  1035. path = btrfs_alloc_path();
  1036. if (!path) {
  1037. ret = -ENOMEM;
  1038. goto trans_out;
  1039. }
  1040. path->leave_spinning = 1;
  1041. block_rsv = trans->block_rsv;
  1042. trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
  1043. mutex_lock(&delayed_node->mutex);
  1044. if (delayed_node->inode_dirty)
  1045. ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
  1046. path, delayed_node);
  1047. else
  1048. ret = 0;
  1049. mutex_unlock(&delayed_node->mutex);
  1050. btrfs_free_path(path);
  1051. trans->block_rsv = block_rsv;
  1052. trans_out:
  1053. btrfs_end_transaction(trans, delayed_node->root);
  1054. btrfs_btree_balance_dirty(delayed_node->root);
  1055. out:
  1056. btrfs_release_delayed_node(delayed_node);
  1057. return ret;
  1058. }
  1059. void btrfs_remove_delayed_node(struct inode *inode)
  1060. {
  1061. struct btrfs_delayed_node *delayed_node;
  1062. delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
  1063. if (!delayed_node)
  1064. return;
  1065. BTRFS_I(inode)->delayed_node = NULL;
  1066. btrfs_release_delayed_node(delayed_node);
  1067. }
  1068. struct btrfs_async_delayed_work {
  1069. struct btrfs_delayed_root *delayed_root;
  1070. int nr;
  1071. struct btrfs_work work;
  1072. };
  1073. static void btrfs_async_run_delayed_root(struct btrfs_work *work)
  1074. {
  1075. struct btrfs_async_delayed_work *async_work;
  1076. struct btrfs_delayed_root *delayed_root;
  1077. struct btrfs_trans_handle *trans;
  1078. struct btrfs_path *path;
  1079. struct btrfs_delayed_node *delayed_node = NULL;
  1080. struct btrfs_root *root;
  1081. struct btrfs_block_rsv *block_rsv;
  1082. int total_done = 0;
  1083. async_work = container_of(work, struct btrfs_async_delayed_work, work);
  1084. delayed_root = async_work->delayed_root;
  1085. path = btrfs_alloc_path();
  1086. if (!path)
  1087. goto out;
  1088. again:
  1089. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
  1090. goto free_path;
  1091. delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
  1092. if (!delayed_node)
  1093. goto free_path;
  1094. path->leave_spinning = 1;
  1095. root = delayed_node->root;
  1096. trans = btrfs_join_transaction(root);
  1097. if (IS_ERR(trans))
  1098. goto release_path;
  1099. block_rsv = trans->block_rsv;
  1100. trans->block_rsv = &root->fs_info->delayed_block_rsv;
  1101. __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
  1102. /*
  1103. * Maybe new delayed items have been inserted, so we need requeue
  1104. * the work. Besides that, we must dequeue the empty delayed nodes
  1105. * to avoid the race between delayed items balance and the worker.
  1106. * The race like this:
  1107. * Task1 Worker thread
  1108. * count == 0, needn't requeue
  1109. * also needn't insert the
  1110. * delayed node into prepare
  1111. * list again.
  1112. * add lots of delayed items
  1113. * queue the delayed node
  1114. * already in the list,
  1115. * and not in the prepare
  1116. * list, it means the delayed
  1117. * node is being dealt with
  1118. * by the worker.
  1119. * do delayed items balance
  1120. * the delayed node is being
  1121. * dealt with by the worker
  1122. * now, just wait.
  1123. * the worker goto idle.
  1124. * Task1 will sleep until the transaction is commited.
  1125. */
  1126. mutex_lock(&delayed_node->mutex);
  1127. btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node);
  1128. mutex_unlock(&delayed_node->mutex);
  1129. trans->block_rsv = block_rsv;
  1130. btrfs_end_transaction_dmeta(trans, root);
  1131. btrfs_btree_balance_dirty_nodelay(root);
  1132. release_path:
  1133. btrfs_release_path(path);
  1134. total_done++;
  1135. btrfs_release_prepared_delayed_node(delayed_node);
  1136. if (async_work->nr == 0 || total_done < async_work->nr)
  1137. goto again;
  1138. free_path:
  1139. btrfs_free_path(path);
  1140. out:
  1141. wake_up(&delayed_root->wait);
  1142. kfree(async_work);
  1143. }
  1144. static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
  1145. struct btrfs_root *root, int nr)
  1146. {
  1147. struct btrfs_async_delayed_work *async_work;
  1148. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1149. return 0;
  1150. async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
  1151. if (!async_work)
  1152. return -ENOMEM;
  1153. async_work->delayed_root = delayed_root;
  1154. async_work->work.func = btrfs_async_run_delayed_root;
  1155. async_work->work.flags = 0;
  1156. async_work->nr = nr;
  1157. btrfs_queue_worker(&root->fs_info->delayed_workers, &async_work->work);
  1158. return 0;
  1159. }
  1160. void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
  1161. {
  1162. struct btrfs_delayed_root *delayed_root;
  1163. delayed_root = btrfs_get_delayed_root(root);
  1164. WARN_ON(btrfs_first_delayed_node(delayed_root));
  1165. }
  1166. static int refs_newer(struct btrfs_delayed_root *delayed_root,
  1167. int seq, int count)
  1168. {
  1169. int val = atomic_read(&delayed_root->items_seq);
  1170. if (val < seq || val >= seq + count)
  1171. return 1;
  1172. return 0;
  1173. }
  1174. void btrfs_balance_delayed_items(struct btrfs_root *root)
  1175. {
  1176. struct btrfs_delayed_root *delayed_root;
  1177. int seq;
  1178. delayed_root = btrfs_get_delayed_root(root);
  1179. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1180. return;
  1181. seq = atomic_read(&delayed_root->items_seq);
  1182. if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
  1183. int ret;
  1184. DEFINE_WAIT(__wait);
  1185. ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
  1186. if (ret)
  1187. return;
  1188. while (1) {
  1189. prepare_to_wait(&delayed_root->wait, &__wait,
  1190. TASK_INTERRUPTIBLE);
  1191. if (refs_newer(delayed_root, seq,
  1192. BTRFS_DELAYED_BATCH) ||
  1193. atomic_read(&delayed_root->items) <
  1194. BTRFS_DELAYED_BACKGROUND) {
  1195. break;
  1196. }
  1197. if (!signal_pending(current))
  1198. schedule();
  1199. else
  1200. break;
  1201. }
  1202. finish_wait(&delayed_root->wait, &__wait);
  1203. }
  1204. btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
  1205. }
  1206. /* Will return 0 or -ENOMEM */
  1207. int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
  1208. struct btrfs_root *root, const char *name,
  1209. int name_len, struct inode *dir,
  1210. struct btrfs_disk_key *disk_key, u8 type,
  1211. u64 index)
  1212. {
  1213. struct btrfs_delayed_node *delayed_node;
  1214. struct btrfs_delayed_item *delayed_item;
  1215. struct btrfs_dir_item *dir_item;
  1216. int ret;
  1217. delayed_node = btrfs_get_or_create_delayed_node(dir);
  1218. if (IS_ERR(delayed_node))
  1219. return PTR_ERR(delayed_node);
  1220. delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
  1221. if (!delayed_item) {
  1222. ret = -ENOMEM;
  1223. goto release_node;
  1224. }
  1225. delayed_item->key.objectid = btrfs_ino(dir);
  1226. btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
  1227. delayed_item->key.offset = index;
  1228. dir_item = (struct btrfs_dir_item *)delayed_item->data;
  1229. dir_item->location = *disk_key;
  1230. btrfs_set_stack_dir_transid(dir_item, trans->transid);
  1231. btrfs_set_stack_dir_data_len(dir_item, 0);
  1232. btrfs_set_stack_dir_name_len(dir_item, name_len);
  1233. btrfs_set_stack_dir_type(dir_item, type);
  1234. memcpy((char *)(dir_item + 1), name, name_len);
  1235. ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
  1236. /*
  1237. * we have reserved enough space when we start a new transaction,
  1238. * so reserving metadata failure is impossible
  1239. */
  1240. BUG_ON(ret);
  1241. mutex_lock(&delayed_node->mutex);
  1242. ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
  1243. if (unlikely(ret)) {
  1244. printk(KERN_ERR "err add delayed dir index item(name: %s) into "
  1245. "the insertion tree of the delayed node"
  1246. "(root id: %llu, inode id: %llu, errno: %d)\n",
  1247. name,
  1248. (unsigned long long)delayed_node->root->objectid,
  1249. (unsigned long long)delayed_node->inode_id,
  1250. ret);
  1251. BUG();
  1252. }
  1253. mutex_unlock(&delayed_node->mutex);
  1254. release_node:
  1255. btrfs_release_delayed_node(delayed_node);
  1256. return ret;
  1257. }
  1258. static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
  1259. struct btrfs_delayed_node *node,
  1260. struct btrfs_key *key)
  1261. {
  1262. struct btrfs_delayed_item *item;
  1263. mutex_lock(&node->mutex);
  1264. item = __btrfs_lookup_delayed_insertion_item(node, key);
  1265. if (!item) {
  1266. mutex_unlock(&node->mutex);
  1267. return 1;
  1268. }
  1269. btrfs_delayed_item_release_metadata(root, item);
  1270. btrfs_release_delayed_item(item);
  1271. mutex_unlock(&node->mutex);
  1272. return 0;
  1273. }
  1274. int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
  1275. struct btrfs_root *root, struct inode *dir,
  1276. u64 index)
  1277. {
  1278. struct btrfs_delayed_node *node;
  1279. struct btrfs_delayed_item *item;
  1280. struct btrfs_key item_key;
  1281. int ret;
  1282. node = btrfs_get_or_create_delayed_node(dir);
  1283. if (IS_ERR(node))
  1284. return PTR_ERR(node);
  1285. item_key.objectid = btrfs_ino(dir);
  1286. btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
  1287. item_key.offset = index;
  1288. ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
  1289. if (!ret)
  1290. goto end;
  1291. item = btrfs_alloc_delayed_item(0);
  1292. if (!item) {
  1293. ret = -ENOMEM;
  1294. goto end;
  1295. }
  1296. item->key = item_key;
  1297. ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
  1298. /*
  1299. * we have reserved enough space when we start a new transaction,
  1300. * so reserving metadata failure is impossible.
  1301. */
  1302. BUG_ON(ret);
  1303. mutex_lock(&node->mutex);
  1304. ret = __btrfs_add_delayed_deletion_item(node, item);
  1305. if (unlikely(ret)) {
  1306. printk(KERN_ERR "err add delayed dir index item(index: %llu) "
  1307. "into the deletion tree of the delayed node"
  1308. "(root id: %llu, inode id: %llu, errno: %d)\n",
  1309. (unsigned long long)index,
  1310. (unsigned long long)node->root->objectid,
  1311. (unsigned long long)node->inode_id,
  1312. ret);
  1313. BUG();
  1314. }
  1315. mutex_unlock(&node->mutex);
  1316. end:
  1317. btrfs_release_delayed_node(node);
  1318. return ret;
  1319. }
  1320. int btrfs_inode_delayed_dir_index_count(struct inode *inode)
  1321. {
  1322. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1323. if (!delayed_node)
  1324. return -ENOENT;
  1325. /*
  1326. * Since we have held i_mutex of this directory, it is impossible that
  1327. * a new directory index is added into the delayed node and index_cnt
  1328. * is updated now. So we needn't lock the delayed node.
  1329. */
  1330. if (!delayed_node->index_cnt) {
  1331. btrfs_release_delayed_node(delayed_node);
  1332. return -EINVAL;
  1333. }
  1334. BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
  1335. btrfs_release_delayed_node(delayed_node);
  1336. return 0;
  1337. }
  1338. void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
  1339. struct list_head *del_list)
  1340. {
  1341. struct btrfs_delayed_node *delayed_node;
  1342. struct btrfs_delayed_item *item;
  1343. delayed_node = btrfs_get_delayed_node(inode);
  1344. if (!delayed_node)
  1345. return;
  1346. mutex_lock(&delayed_node->mutex);
  1347. item = __btrfs_first_delayed_insertion_item(delayed_node);
  1348. while (item) {
  1349. atomic_inc(&item->refs);
  1350. list_add_tail(&item->readdir_list, ins_list);
  1351. item = __btrfs_next_delayed_item(item);
  1352. }
  1353. item = __btrfs_first_delayed_deletion_item(delayed_node);
  1354. while (item) {
  1355. atomic_inc(&item->refs);
  1356. list_add_tail(&item->readdir_list, del_list);
  1357. item = __btrfs_next_delayed_item(item);
  1358. }
  1359. mutex_unlock(&delayed_node->mutex);
  1360. /*
  1361. * This delayed node is still cached in the btrfs inode, so refs
  1362. * must be > 1 now, and we needn't check it is going to be freed
  1363. * or not.
  1364. *
  1365. * Besides that, this function is used to read dir, we do not
  1366. * insert/delete delayed items in this period. So we also needn't
  1367. * requeue or dequeue this delayed node.
  1368. */
  1369. atomic_dec(&delayed_node->refs);
  1370. }
  1371. void btrfs_put_delayed_items(struct list_head *ins_list,
  1372. struct list_head *del_list)
  1373. {
  1374. struct btrfs_delayed_item *curr, *next;
  1375. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1376. list_del(&curr->readdir_list);
  1377. if (atomic_dec_and_test(&curr->refs))
  1378. kfree(curr);
  1379. }
  1380. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1381. list_del(&curr->readdir_list);
  1382. if (atomic_dec_and_test(&curr->refs))
  1383. kfree(curr);
  1384. }
  1385. }
  1386. int btrfs_should_delete_dir_index(struct list_head *del_list,
  1387. u64 index)
  1388. {
  1389. struct btrfs_delayed_item *curr, *next;
  1390. int ret;
  1391. if (list_empty(del_list))
  1392. return 0;
  1393. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1394. if (curr->key.offset > index)
  1395. break;
  1396. list_del(&curr->readdir_list);
  1397. ret = (curr->key.offset == index);
  1398. if (atomic_dec_and_test(&curr->refs))
  1399. kfree(curr);
  1400. if (ret)
  1401. return 1;
  1402. else
  1403. continue;
  1404. }
  1405. return 0;
  1406. }
  1407. /*
  1408. * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
  1409. *
  1410. */
  1411. int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
  1412. struct list_head *ins_list)
  1413. {
  1414. struct btrfs_dir_item *di;
  1415. struct btrfs_delayed_item *curr, *next;
  1416. struct btrfs_key location;
  1417. char *name;
  1418. int name_len;
  1419. int over = 0;
  1420. unsigned char d_type;
  1421. if (list_empty(ins_list))
  1422. return 0;
  1423. /*
  1424. * Changing the data of the delayed item is impossible. So
  1425. * we needn't lock them. And we have held i_mutex of the
  1426. * directory, nobody can delete any directory indexes now.
  1427. */
  1428. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1429. list_del(&curr->readdir_list);
  1430. if (curr->key.offset < ctx->pos) {
  1431. if (atomic_dec_and_test(&curr->refs))
  1432. kfree(curr);
  1433. continue;
  1434. }
  1435. ctx->pos = curr->key.offset;
  1436. di = (struct btrfs_dir_item *)curr->data;
  1437. name = (char *)(di + 1);
  1438. name_len = btrfs_stack_dir_name_len(di);
  1439. d_type = btrfs_filetype_table[di->type];
  1440. btrfs_disk_key_to_cpu(&location, &di->location);
  1441. over = !dir_emit(ctx, name, name_len,
  1442. location.objectid, d_type);
  1443. if (atomic_dec_and_test(&curr->refs))
  1444. kfree(curr);
  1445. if (over)
  1446. return 1;
  1447. }
  1448. return 0;
  1449. }
  1450. static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
  1451. struct btrfs_inode_item *inode_item,
  1452. struct inode *inode)
  1453. {
  1454. btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
  1455. btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
  1456. btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
  1457. btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
  1458. btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
  1459. btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
  1460. btrfs_set_stack_inode_generation(inode_item,
  1461. BTRFS_I(inode)->generation);
  1462. btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
  1463. btrfs_set_stack_inode_transid(inode_item, trans->transid);
  1464. btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
  1465. btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
  1466. btrfs_set_stack_inode_block_group(inode_item, 0);
  1467. btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
  1468. inode->i_atime.tv_sec);
  1469. btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
  1470. inode->i_atime.tv_nsec);
  1471. btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
  1472. inode->i_mtime.tv_sec);
  1473. btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
  1474. inode->i_mtime.tv_nsec);
  1475. btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
  1476. inode->i_ctime.tv_sec);
  1477. btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
  1478. inode->i_ctime.tv_nsec);
  1479. }
  1480. int btrfs_fill_inode(struct inode *inode, u32 *rdev)
  1481. {
  1482. struct btrfs_delayed_node *delayed_node;
  1483. struct btrfs_inode_item *inode_item;
  1484. struct btrfs_timespec *tspec;
  1485. delayed_node = btrfs_get_delayed_node(inode);
  1486. if (!delayed_node)
  1487. return -ENOENT;
  1488. mutex_lock(&delayed_node->mutex);
  1489. if (!delayed_node->inode_dirty) {
  1490. mutex_unlock(&delayed_node->mutex);
  1491. btrfs_release_delayed_node(delayed_node);
  1492. return -ENOENT;
  1493. }
  1494. inode_item = &delayed_node->inode_item;
  1495. i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
  1496. i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
  1497. btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
  1498. inode->i_mode = btrfs_stack_inode_mode(inode_item);
  1499. set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
  1500. inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
  1501. BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
  1502. inode->i_version = btrfs_stack_inode_sequence(inode_item);
  1503. inode->i_rdev = 0;
  1504. *rdev = btrfs_stack_inode_rdev(inode_item);
  1505. BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
  1506. tspec = btrfs_inode_atime(inode_item);
  1507. inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1508. inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1509. tspec = btrfs_inode_mtime(inode_item);
  1510. inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1511. inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1512. tspec = btrfs_inode_ctime(inode_item);
  1513. inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1514. inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1515. inode->i_generation = BTRFS_I(inode)->generation;
  1516. BTRFS_I(inode)->index_cnt = (u64)-1;
  1517. mutex_unlock(&delayed_node->mutex);
  1518. btrfs_release_delayed_node(delayed_node);
  1519. return 0;
  1520. }
  1521. int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
  1522. struct btrfs_root *root, struct inode *inode)
  1523. {
  1524. struct btrfs_delayed_node *delayed_node;
  1525. int ret = 0;
  1526. delayed_node = btrfs_get_or_create_delayed_node(inode);
  1527. if (IS_ERR(delayed_node))
  1528. return PTR_ERR(delayed_node);
  1529. mutex_lock(&delayed_node->mutex);
  1530. if (delayed_node->inode_dirty) {
  1531. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1532. goto release_node;
  1533. }
  1534. ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
  1535. delayed_node);
  1536. if (ret)
  1537. goto release_node;
  1538. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1539. delayed_node->inode_dirty = 1;
  1540. delayed_node->count++;
  1541. atomic_inc(&root->fs_info->delayed_root->items);
  1542. release_node:
  1543. mutex_unlock(&delayed_node->mutex);
  1544. btrfs_release_delayed_node(delayed_node);
  1545. return ret;
  1546. }
  1547. static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
  1548. {
  1549. struct btrfs_root *root = delayed_node->root;
  1550. struct btrfs_delayed_item *curr_item, *prev_item;
  1551. mutex_lock(&delayed_node->mutex);
  1552. curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
  1553. while (curr_item) {
  1554. btrfs_delayed_item_release_metadata(root, curr_item);
  1555. prev_item = curr_item;
  1556. curr_item = __btrfs_next_delayed_item(prev_item);
  1557. btrfs_release_delayed_item(prev_item);
  1558. }
  1559. curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
  1560. while (curr_item) {
  1561. btrfs_delayed_item_release_metadata(root, curr_item);
  1562. prev_item = curr_item;
  1563. curr_item = __btrfs_next_delayed_item(prev_item);
  1564. btrfs_release_delayed_item(prev_item);
  1565. }
  1566. if (delayed_node->inode_dirty) {
  1567. btrfs_delayed_inode_release_metadata(root, delayed_node);
  1568. btrfs_release_delayed_inode(delayed_node);
  1569. }
  1570. mutex_unlock(&delayed_node->mutex);
  1571. }
  1572. void btrfs_kill_delayed_inode_items(struct inode *inode)
  1573. {
  1574. struct btrfs_delayed_node *delayed_node;
  1575. delayed_node = btrfs_get_delayed_node(inode);
  1576. if (!delayed_node)
  1577. return;
  1578. __btrfs_kill_delayed_node(delayed_node);
  1579. btrfs_release_delayed_node(delayed_node);
  1580. }
  1581. void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
  1582. {
  1583. u64 inode_id = 0;
  1584. struct btrfs_delayed_node *delayed_nodes[8];
  1585. int i, n;
  1586. while (1) {
  1587. spin_lock(&root->inode_lock);
  1588. n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
  1589. (void **)delayed_nodes, inode_id,
  1590. ARRAY_SIZE(delayed_nodes));
  1591. if (!n) {
  1592. spin_unlock(&root->inode_lock);
  1593. break;
  1594. }
  1595. inode_id = delayed_nodes[n - 1]->inode_id + 1;
  1596. for (i = 0; i < n; i++)
  1597. atomic_inc(&delayed_nodes[i]->refs);
  1598. spin_unlock(&root->inode_lock);
  1599. for (i = 0; i < n; i++) {
  1600. __btrfs_kill_delayed_node(delayed_nodes[i]);
  1601. btrfs_release_delayed_node(delayed_nodes[i]);
  1602. }
  1603. }
  1604. }
  1605. void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
  1606. {
  1607. struct btrfs_delayed_root *delayed_root;
  1608. struct btrfs_delayed_node *curr_node, *prev_node;
  1609. delayed_root = btrfs_get_delayed_root(root);
  1610. curr_node = btrfs_first_delayed_node(delayed_root);
  1611. while (curr_node) {
  1612. __btrfs_kill_delayed_node(curr_node);
  1613. prev_node = curr_node;
  1614. curr_node = btrfs_next_delayed_node(curr_node);
  1615. btrfs_release_delayed_node(prev_node);
  1616. }
  1617. }