sufile.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/fs.h>
  24. #include <linux/string.h>
  25. #include <linux/buffer_head.h>
  26. #include <linux/errno.h>
  27. #include <linux/nilfs2_fs.h>
  28. #include "mdt.h"
  29. #include "sufile.h"
  30. static inline unsigned long
  31. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  32. {
  33. return NILFS_MDT(sufile)->mi_entries_per_block;
  34. }
  35. static unsigned long
  36. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  37. {
  38. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  39. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  40. return (unsigned long)t;
  41. }
  42. static unsigned long
  43. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  44. {
  45. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  46. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  47. }
  48. static unsigned long
  49. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  50. __u64 max)
  51. {
  52. return min_t(unsigned long,
  53. nilfs_sufile_segment_usages_per_block(sufile) -
  54. nilfs_sufile_get_offset(sufile, curr),
  55. max - curr + 1);
  56. }
  57. static inline struct nilfs_sufile_header *
  58. nilfs_sufile_block_get_header(const struct inode *sufile,
  59. struct buffer_head *bh,
  60. void *kaddr)
  61. {
  62. return kaddr + bh_offset(bh);
  63. }
  64. static struct nilfs_segment_usage *
  65. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  66. struct buffer_head *bh, void *kaddr)
  67. {
  68. return kaddr + bh_offset(bh) +
  69. nilfs_sufile_get_offset(sufile, segnum) *
  70. NILFS_MDT(sufile)->mi_entry_size;
  71. }
  72. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  73. struct buffer_head **bhp)
  74. {
  75. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  76. }
  77. static inline int
  78. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  79. int create, struct buffer_head **bhp)
  80. {
  81. return nilfs_mdt_get_block(sufile,
  82. nilfs_sufile_get_blkoff(sufile, segnum),
  83. create, NULL, bhp);
  84. }
  85. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  86. u64 ncleanadd, u64 ndirtyadd)
  87. {
  88. struct nilfs_sufile_header *header;
  89. void *kaddr;
  90. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  91. header = kaddr + bh_offset(header_bh);
  92. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  93. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  94. kunmap_atomic(kaddr, KM_USER0);
  95. nilfs_mdt_mark_buffer_dirty(header_bh);
  96. }
  97. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  98. void (*dofunc)(struct inode *, __u64,
  99. struct buffer_head *,
  100. struct buffer_head *))
  101. {
  102. struct buffer_head *header_bh, *bh;
  103. int ret;
  104. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  105. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  106. __func__, (unsigned long long)segnum);
  107. return -EINVAL;
  108. }
  109. down_write(&NILFS_MDT(sufile)->mi_sem);
  110. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  111. if (ret < 0)
  112. goto out_sem;
  113. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  114. if (!ret) {
  115. dofunc(sufile, segnum, header_bh, bh);
  116. brelse(bh);
  117. }
  118. brelse(header_bh);
  119. out_sem:
  120. up_write(&NILFS_MDT(sufile)->mi_sem);
  121. return ret;
  122. }
  123. /**
  124. * nilfs_sufile_alloc - allocate a segment
  125. * @sufile: inode of segment usage file
  126. * @segnump: pointer to segment number
  127. *
  128. * Description: nilfs_sufile_alloc() allocates a clean segment.
  129. *
  130. * Return Value: On success, 0 is returned and the segment number of the
  131. * allocated segment is stored in the place pointed by @segnump. On error, one
  132. * of the following negative error codes is returned.
  133. *
  134. * %-EIO - I/O error.
  135. *
  136. * %-ENOMEM - Insufficient amount of memory available.
  137. *
  138. * %-ENOSPC - No clean segment left.
  139. */
  140. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  141. {
  142. struct buffer_head *header_bh, *su_bh;
  143. struct nilfs_sufile_header *header;
  144. struct nilfs_segment_usage *su;
  145. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  146. __u64 segnum, maxsegnum, last_alloc;
  147. void *kaddr;
  148. unsigned long nsegments, ncleansegs, nsus;
  149. int ret, i, j;
  150. down_write(&NILFS_MDT(sufile)->mi_sem);
  151. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  152. if (ret < 0)
  153. goto out_sem;
  154. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  155. header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
  156. ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  157. last_alloc = le64_to_cpu(header->sh_last_alloc);
  158. kunmap_atomic(kaddr, KM_USER0);
  159. nsegments = nilfs_sufile_get_nsegments(sufile);
  160. segnum = last_alloc + 1;
  161. maxsegnum = nsegments - 1;
  162. for (i = 0; i < nsegments; i += nsus) {
  163. if (segnum >= nsegments) {
  164. /* wrap around */
  165. segnum = 0;
  166. maxsegnum = last_alloc;
  167. }
  168. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  169. &su_bh);
  170. if (ret < 0)
  171. goto out_header;
  172. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  173. su = nilfs_sufile_block_get_segment_usage(
  174. sufile, segnum, su_bh, kaddr);
  175. nsus = nilfs_sufile_segment_usages_in_block(
  176. sufile, segnum, maxsegnum);
  177. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  178. if (!nilfs_segment_usage_clean(su))
  179. continue;
  180. /* found a clean segment */
  181. nilfs_segment_usage_set_dirty(su);
  182. kunmap_atomic(kaddr, KM_USER0);
  183. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  184. header = nilfs_sufile_block_get_header(
  185. sufile, header_bh, kaddr);
  186. le64_add_cpu(&header->sh_ncleansegs, -1);
  187. le64_add_cpu(&header->sh_ndirtysegs, 1);
  188. header->sh_last_alloc = cpu_to_le64(segnum);
  189. kunmap_atomic(kaddr, KM_USER0);
  190. nilfs_mdt_mark_buffer_dirty(header_bh);
  191. nilfs_mdt_mark_buffer_dirty(su_bh);
  192. nilfs_mdt_mark_dirty(sufile);
  193. brelse(su_bh);
  194. *segnump = segnum;
  195. goto out_header;
  196. }
  197. kunmap_atomic(kaddr, KM_USER0);
  198. brelse(su_bh);
  199. }
  200. /* no segments left */
  201. ret = -ENOSPC;
  202. out_header:
  203. brelse(header_bh);
  204. out_sem:
  205. up_write(&NILFS_MDT(sufile)->mi_sem);
  206. return ret;
  207. }
  208. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  209. struct buffer_head *header_bh,
  210. struct buffer_head *su_bh)
  211. {
  212. struct nilfs_segment_usage *su;
  213. void *kaddr;
  214. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  215. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  216. if (unlikely(!nilfs_segment_usage_clean(su))) {
  217. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  218. __func__, (unsigned long long)segnum);
  219. kunmap_atomic(kaddr, KM_USER0);
  220. return;
  221. }
  222. nilfs_segment_usage_set_dirty(su);
  223. kunmap_atomic(kaddr, KM_USER0);
  224. nilfs_sufile_mod_counter(header_bh, -1, 1);
  225. nilfs_mdt_mark_buffer_dirty(su_bh);
  226. nilfs_mdt_mark_dirty(sufile);
  227. }
  228. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  229. struct buffer_head *header_bh,
  230. struct buffer_head *su_bh)
  231. {
  232. struct nilfs_segment_usage *su;
  233. void *kaddr;
  234. int clean, dirty;
  235. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  236. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  237. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  238. su->su_nblocks == cpu_to_le32(0)) {
  239. kunmap_atomic(kaddr, KM_USER0);
  240. return;
  241. }
  242. clean = nilfs_segment_usage_clean(su);
  243. dirty = nilfs_segment_usage_dirty(su);
  244. /* make the segment garbage */
  245. su->su_lastmod = cpu_to_le64(0);
  246. su->su_nblocks = cpu_to_le32(0);
  247. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  248. kunmap_atomic(kaddr, KM_USER0);
  249. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  250. nilfs_mdt_mark_buffer_dirty(su_bh);
  251. nilfs_mdt_mark_dirty(sufile);
  252. }
  253. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  254. struct buffer_head *header_bh,
  255. struct buffer_head *su_bh)
  256. {
  257. struct nilfs_segment_usage *su;
  258. void *kaddr;
  259. int sudirty;
  260. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  261. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  262. if (nilfs_segment_usage_clean(su)) {
  263. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  264. __func__, (unsigned long long)segnum);
  265. kunmap_atomic(kaddr, KM_USER0);
  266. return;
  267. }
  268. WARN_ON(nilfs_segment_usage_error(su));
  269. WARN_ON(!nilfs_segment_usage_dirty(su));
  270. sudirty = nilfs_segment_usage_dirty(su);
  271. nilfs_segment_usage_set_clean(su);
  272. kunmap_atomic(kaddr, KM_USER0);
  273. nilfs_mdt_mark_buffer_dirty(su_bh);
  274. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  275. nilfs_mdt_mark_dirty(sufile);
  276. }
  277. /**
  278. * nilfs_sufile_get_segment_usage - get a segment usage
  279. * @sufile: inode of segment usage file
  280. * @segnum: segment number
  281. * @sup: pointer to segment usage
  282. * @bhp: pointer to buffer head
  283. *
  284. * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
  285. * specified by @segnum.
  286. *
  287. * Return Value: On success, 0 is returned, and the segment usage and the
  288. * buffer head of the buffer on which the segment usage is located are stored
  289. * in the place pointed by @sup and @bhp, respectively. On error, one of the
  290. * following negative error codes is returned.
  291. *
  292. * %-EIO - I/O error.
  293. *
  294. * %-ENOMEM - Insufficient amount of memory available.
  295. *
  296. * %-EINVAL - Invalid segment usage number.
  297. */
  298. int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
  299. struct nilfs_segment_usage **sup,
  300. struct buffer_head **bhp)
  301. {
  302. struct buffer_head *bh;
  303. struct nilfs_segment_usage *su;
  304. void *kaddr;
  305. int ret;
  306. /* segnum is 0 origin */
  307. if (segnum >= nilfs_sufile_get_nsegments(sufile))
  308. return -EINVAL;
  309. down_write(&NILFS_MDT(sufile)->mi_sem);
  310. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
  311. if (ret < 0)
  312. goto out_sem;
  313. kaddr = kmap(bh->b_page);
  314. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  315. if (nilfs_segment_usage_error(su)) {
  316. kunmap(bh->b_page);
  317. brelse(bh);
  318. ret = -EINVAL;
  319. goto out_sem;
  320. }
  321. if (sup != NULL)
  322. *sup = su;
  323. *bhp = bh;
  324. out_sem:
  325. up_write(&NILFS_MDT(sufile)->mi_sem);
  326. return ret;
  327. }
  328. /**
  329. * nilfs_sufile_put_segment_usage - put a segment usage
  330. * @sufile: inode of segment usage file
  331. * @segnum: segment number
  332. * @bh: buffer head
  333. *
  334. * Description: nilfs_sufile_put_segment_usage() releases the segment usage
  335. * specified by @segnum. @bh must be the buffer head which have been returned
  336. * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
  337. */
  338. void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
  339. struct buffer_head *bh)
  340. {
  341. kunmap(bh->b_page);
  342. brelse(bh);
  343. }
  344. /**
  345. * nilfs_sufile_get_stat - get segment usage statistics
  346. * @sufile: inode of segment usage file
  347. * @stat: pointer to a structure of segment usage statistics
  348. *
  349. * Description: nilfs_sufile_get_stat() returns information about segment
  350. * usage.
  351. *
  352. * Return Value: On success, 0 is returned, and segment usage information is
  353. * stored in the place pointed by @stat. On error, one of the following
  354. * negative error codes is returned.
  355. *
  356. * %-EIO - I/O error.
  357. *
  358. * %-ENOMEM - Insufficient amount of memory available.
  359. */
  360. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  361. {
  362. struct buffer_head *header_bh;
  363. struct nilfs_sufile_header *header;
  364. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  365. void *kaddr;
  366. int ret;
  367. down_read(&NILFS_MDT(sufile)->mi_sem);
  368. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  369. if (ret < 0)
  370. goto out_sem;
  371. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  372. header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
  373. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  374. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  375. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  376. sustat->ss_ctime = nilfs->ns_ctime;
  377. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  378. spin_lock(&nilfs->ns_last_segment_lock);
  379. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  380. spin_unlock(&nilfs->ns_last_segment_lock);
  381. kunmap_atomic(kaddr, KM_USER0);
  382. brelse(header_bh);
  383. out_sem:
  384. up_read(&NILFS_MDT(sufile)->mi_sem);
  385. return ret;
  386. }
  387. /**
  388. * nilfs_sufile_get_ncleansegs - get the number of clean segments
  389. * @sufile: inode of segment usage file
  390. * @nsegsp: pointer to the number of clean segments
  391. *
  392. * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
  393. * segments.
  394. *
  395. * Return Value: On success, 0 is returned and the number of clean segments is
  396. * stored in the place pointed by @nsegsp. On error, one of the following
  397. * negative error codes is returned.
  398. *
  399. * %-EIO - I/O error.
  400. *
  401. * %-ENOMEM - Insufficient amount of memory available.
  402. */
  403. int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
  404. {
  405. struct nilfs_sustat sustat;
  406. int ret;
  407. ret = nilfs_sufile_get_stat(sufile, &sustat);
  408. if (ret == 0)
  409. *nsegsp = sustat.ss_ncleansegs;
  410. return ret;
  411. }
  412. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  413. struct buffer_head *header_bh,
  414. struct buffer_head *su_bh)
  415. {
  416. struct nilfs_segment_usage *su;
  417. void *kaddr;
  418. int suclean;
  419. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  420. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  421. if (nilfs_segment_usage_error(su)) {
  422. kunmap_atomic(kaddr, KM_USER0);
  423. return;
  424. }
  425. suclean = nilfs_segment_usage_clean(su);
  426. nilfs_segment_usage_set_error(su);
  427. kunmap_atomic(kaddr, KM_USER0);
  428. if (suclean)
  429. nilfs_sufile_mod_counter(header_bh, -1, 0);
  430. nilfs_mdt_mark_buffer_dirty(su_bh);
  431. nilfs_mdt_mark_dirty(sufile);
  432. }
  433. /**
  434. * nilfs_sufile_get_suinfo -
  435. * @sufile: inode of segment usage file
  436. * @segnum: segment number to start looking
  437. * @si: array of suinfo
  438. * @nsi: size of suinfo array
  439. *
  440. * Description:
  441. *
  442. * Return Value: On success, 0 is returned and .... On error, one of the
  443. * following negative error codes is returned.
  444. *
  445. * %-EIO - I/O error.
  446. *
  447. * %-ENOMEM - Insufficient amount of memory available.
  448. */
  449. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
  450. struct nilfs_suinfo *si, size_t nsi)
  451. {
  452. struct buffer_head *su_bh;
  453. struct nilfs_segment_usage *su;
  454. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  455. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  456. void *kaddr;
  457. unsigned long nsegs, segusages_per_block;
  458. ssize_t n;
  459. int ret, i, j;
  460. down_read(&NILFS_MDT(sufile)->mi_sem);
  461. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  462. nsegs = min_t(unsigned long,
  463. nilfs_sufile_get_nsegments(sufile) - segnum,
  464. nsi);
  465. for (i = 0; i < nsegs; i += n, segnum += n) {
  466. n = min_t(unsigned long,
  467. segusages_per_block -
  468. nilfs_sufile_get_offset(sufile, segnum),
  469. nsegs - i);
  470. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  471. &su_bh);
  472. if (ret < 0) {
  473. if (ret != -ENOENT)
  474. goto out;
  475. /* hole */
  476. memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
  477. continue;
  478. }
  479. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  480. su = nilfs_sufile_block_get_segment_usage(
  481. sufile, segnum, su_bh, kaddr);
  482. for (j = 0; j < n; j++, su = (void *)su + susz) {
  483. si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
  484. si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
  485. si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
  486. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  487. if (nilfs_segment_is_active(nilfs, segnum + j))
  488. si[i + j].sui_flags |=
  489. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  490. }
  491. kunmap_atomic(kaddr, KM_USER0);
  492. brelse(su_bh);
  493. }
  494. ret = nsegs;
  495. out:
  496. up_read(&NILFS_MDT(sufile)->mi_sem);
  497. return ret;
  498. }