unlinked.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/kthread.h>
  15. #include <asm/semaphore.h>
  16. #include "gfs2.h"
  17. #include "bmap.h"
  18. #include "inode.h"
  19. #include "meta_io.h"
  20. #include "trans.h"
  21. #include "unlinked.h"
  22. static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
  23. struct gfs2_unlinked_tag *ut)
  24. {
  25. struct gfs2_inode *ip = get_v2ip(sdp->sd_ut_inode);
  26. unsigned int block, offset;
  27. uint64_t dblock;
  28. int new = 0;
  29. struct buffer_head *bh;
  30. int error;
  31. block = slot / sdp->sd_ut_per_block;
  32. offset = slot % sdp->sd_ut_per_block;
  33. error = gfs2_block_map(ip, block, &new, &dblock, NULL);
  34. if (error)
  35. return error;
  36. error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
  37. if (error)
  38. return error;
  39. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
  40. error = -EIO;
  41. goto out;
  42. }
  43. down(&sdp->sd_unlinked_mutex);
  44. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  45. gfs2_unlinked_tag_out(ut, bh->b_data +
  46. sizeof(struct gfs2_meta_header) +
  47. offset * sizeof(struct gfs2_unlinked_tag));
  48. up(&sdp->sd_unlinked_mutex);
  49. out:
  50. brelse(bh);
  51. return error;
  52. }
  53. static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  54. {
  55. spin_lock(&sdp->sd_unlinked_spin);
  56. list_add(&ul->ul_list, &sdp->sd_unlinked_list);
  57. gfs2_assert(sdp, ul->ul_count);
  58. ul->ul_count++;
  59. atomic_inc(&sdp->sd_unlinked_count);
  60. spin_unlock(&sdp->sd_unlinked_spin);
  61. }
  62. static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  63. {
  64. spin_lock(&sdp->sd_unlinked_spin);
  65. list_del_init(&ul->ul_list);
  66. gfs2_assert(sdp, ul->ul_count > 1);
  67. ul->ul_count--;
  68. gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
  69. atomic_dec(&sdp->sd_unlinked_count);
  70. spin_unlock(&sdp->sd_unlinked_spin);
  71. }
  72. static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
  73. {
  74. struct list_head *head;
  75. struct gfs2_unlinked *ul;
  76. int found = 0;
  77. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  78. return NULL;
  79. spin_lock(&sdp->sd_unlinked_spin);
  80. head = &sdp->sd_unlinked_list;
  81. list_for_each_entry(ul, head, ul_list) {
  82. if (test_bit(ULF_LOCKED, &ul->ul_flags))
  83. continue;
  84. list_move_tail(&ul->ul_list, head);
  85. ul->ul_count++;
  86. set_bit(ULF_LOCKED, &ul->ul_flags);
  87. found = 1;
  88. break;
  89. }
  90. if (!found)
  91. ul = NULL;
  92. spin_unlock(&sdp->sd_unlinked_spin);
  93. return ul;
  94. }
  95. /**
  96. * enforce_limit - limit the number of inodes waiting to be deallocated
  97. * @sdp: the filesystem
  98. *
  99. * Returns: errno
  100. */
  101. static void enforce_limit(struct gfs2_sbd *sdp)
  102. {
  103. unsigned int tries = 0, min = 0;
  104. int error;
  105. if (atomic_read(&sdp->sd_unlinked_count) >=
  106. gfs2_tune_get(sdp, gt_ilimit)) {
  107. tries = gfs2_tune_get(sdp, gt_ilimit_tries);
  108. min = gfs2_tune_get(sdp, gt_ilimit_min);
  109. }
  110. while (tries--) {
  111. struct gfs2_unlinked *ul = ul_fish(sdp);
  112. if (!ul)
  113. break;
  114. error = gfs2_inode_dealloc(sdp, ul);
  115. gfs2_unlinked_put(sdp, ul);
  116. if (!error) {
  117. if (!--min)
  118. break;
  119. } else if (error != 1)
  120. break;
  121. }
  122. }
  123. static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
  124. {
  125. struct gfs2_unlinked *ul;
  126. ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
  127. if (ul) {
  128. INIT_LIST_HEAD(&ul->ul_list);
  129. ul->ul_count = 1;
  130. set_bit(ULF_LOCKED, &ul->ul_flags);
  131. }
  132. return ul;
  133. }
  134. int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
  135. {
  136. unsigned int c, o = 0, b;
  137. unsigned char byte = 0;
  138. enforce_limit(sdp);
  139. *ul = ul_alloc(sdp);
  140. if (!*ul)
  141. return -ENOMEM;
  142. spin_lock(&sdp->sd_unlinked_spin);
  143. for (c = 0; c < sdp->sd_unlinked_chunks; c++)
  144. for (o = 0; o < PAGE_SIZE; o++) {
  145. byte = sdp->sd_unlinked_bitmap[c][o];
  146. if (byte != 0xFF)
  147. goto found;
  148. }
  149. goto fail;
  150. found:
  151. for (b = 0; b < 8; b++)
  152. if (!(byte & (1 << b)))
  153. break;
  154. (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
  155. if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
  156. goto fail;
  157. sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
  158. spin_unlock(&sdp->sd_unlinked_spin);
  159. return 0;
  160. fail:
  161. spin_unlock(&sdp->sd_unlinked_spin);
  162. kfree(*ul);
  163. return -ENOSPC;
  164. }
  165. void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  166. {
  167. gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
  168. spin_lock(&sdp->sd_unlinked_spin);
  169. gfs2_assert(sdp, ul->ul_count);
  170. ul->ul_count--;
  171. if (!ul->ul_count) {
  172. gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
  173. spin_unlock(&sdp->sd_unlinked_spin);
  174. kfree(ul);
  175. } else
  176. spin_unlock(&sdp->sd_unlinked_spin);
  177. }
  178. int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  179. {
  180. int error;
  181. gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
  182. gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
  183. error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
  184. if (!error)
  185. ul_hash(sdp, ul);
  186. return error;
  187. }
  188. int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  189. {
  190. int error;
  191. gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
  192. gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
  193. error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
  194. return error;
  195. }
  196. int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  197. {
  198. struct gfs2_unlinked_tag ut;
  199. int error;
  200. gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
  201. gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
  202. memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
  203. error = munge_ondisk(sdp, ul->ul_slot, &ut);
  204. if (error)
  205. return error;
  206. ul_unhash(sdp, ul);
  207. return 0;
  208. }
  209. /**
  210. * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
  211. * @sdp: the filesystem
  212. *
  213. * Returns: errno
  214. */
  215. int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
  216. {
  217. unsigned int hits, strikes;
  218. int error;
  219. for (;;) {
  220. hits = 0;
  221. strikes = 0;
  222. for (;;) {
  223. struct gfs2_unlinked *ul = ul_fish(sdp);
  224. if (!ul)
  225. return 0;
  226. error = gfs2_inode_dealloc(sdp, ul);
  227. gfs2_unlinked_put(sdp, ul);
  228. if (!error) {
  229. hits++;
  230. if (strikes)
  231. strikes--;
  232. } else if (error == 1) {
  233. strikes++;
  234. if (strikes >=
  235. atomic_read(&sdp->sd_unlinked_count)) {
  236. error = 0;
  237. break;
  238. }
  239. } else
  240. return error;
  241. }
  242. if (!hits || kthread_should_stop())
  243. break;
  244. cond_resched();
  245. }
  246. return 0;
  247. }
  248. int gfs2_unlinked_init(struct gfs2_sbd *sdp)
  249. {
  250. struct gfs2_inode *ip = get_v2ip(sdp->sd_ut_inode);
  251. unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
  252. unsigned int x, slot = 0;
  253. unsigned int found = 0;
  254. uint64_t dblock;
  255. uint32_t extlen = 0;
  256. int error;
  257. if (!ip->i_di.di_size ||
  258. ip->i_di.di_size > (64 << 20) ||
  259. ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
  260. gfs2_consist_inode(ip);
  261. return -EIO;
  262. }
  263. sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
  264. sdp->sd_unlinked_chunks = DIV_RU(sdp->sd_unlinked_slots, 8 * PAGE_SIZE);
  265. error = -ENOMEM;
  266. sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
  267. sizeof(unsigned char *),
  268. GFP_KERNEL);
  269. if (!sdp->sd_unlinked_bitmap)
  270. return error;
  271. for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
  272. sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
  273. if (!sdp->sd_unlinked_bitmap[x])
  274. goto fail;
  275. }
  276. for (x = 0; x < blocks; x++) {
  277. struct buffer_head *bh;
  278. unsigned int y;
  279. if (!extlen) {
  280. int new = 0;
  281. error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
  282. if (error)
  283. goto fail;
  284. }
  285. gfs2_meta_ra(ip->i_gl, dblock, extlen);
  286. error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
  287. &bh);
  288. if (error)
  289. goto fail;
  290. error = -EIO;
  291. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
  292. brelse(bh);
  293. goto fail;
  294. }
  295. for (y = 0;
  296. y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
  297. y++, slot++) {
  298. struct gfs2_unlinked_tag ut;
  299. struct gfs2_unlinked *ul;
  300. gfs2_unlinked_tag_in(&ut, bh->b_data +
  301. sizeof(struct gfs2_meta_header) +
  302. y * sizeof(struct gfs2_unlinked_tag));
  303. if (!ut.ut_inum.no_addr)
  304. continue;
  305. error = -ENOMEM;
  306. ul = ul_alloc(sdp);
  307. if (!ul) {
  308. brelse(bh);
  309. goto fail;
  310. }
  311. ul->ul_ut = ut;
  312. ul->ul_slot = slot;
  313. spin_lock(&sdp->sd_unlinked_spin);
  314. gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
  315. spin_unlock(&sdp->sd_unlinked_spin);
  316. ul_hash(sdp, ul);
  317. gfs2_unlinked_put(sdp, ul);
  318. found++;
  319. }
  320. brelse(bh);
  321. dblock++;
  322. extlen--;
  323. }
  324. if (found)
  325. fs_info(sdp, "found %u unlinked inodes\n", found);
  326. return 0;
  327. fail:
  328. gfs2_unlinked_cleanup(sdp);
  329. return error;
  330. }
  331. /**
  332. * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
  333. * @sdp: the filesystem
  334. *
  335. */
  336. void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
  337. {
  338. struct list_head *head = &sdp->sd_unlinked_list;
  339. struct gfs2_unlinked *ul;
  340. unsigned int x;
  341. spin_lock(&sdp->sd_unlinked_spin);
  342. while (!list_empty(head)) {
  343. ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
  344. if (ul->ul_count > 1) {
  345. list_move_tail(&ul->ul_list, head);
  346. spin_unlock(&sdp->sd_unlinked_spin);
  347. schedule();
  348. spin_lock(&sdp->sd_unlinked_spin);
  349. continue;
  350. }
  351. list_del_init(&ul->ul_list);
  352. atomic_dec(&sdp->sd_unlinked_count);
  353. gfs2_assert_warn(sdp, ul->ul_count == 1);
  354. gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
  355. kfree(ul);
  356. }
  357. spin_unlock(&sdp->sd_unlinked_spin);
  358. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
  359. if (sdp->sd_unlinked_bitmap) {
  360. for (x = 0; x < sdp->sd_unlinked_chunks; x++)
  361. kfree(sdp->sd_unlinked_bitmap[x]);
  362. kfree(sdp->sd_unlinked_bitmap);
  363. }
  364. }