sufile.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/string.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/errno.h>
  28. #include <linux/nilfs2_fs.h>
  29. #include "mdt.h"
  30. #include "sufile.h"
  31. struct nilfs_sufile_info {
  32. struct nilfs_mdt_info mi;
  33. unsigned long ncleansegs;
  34. };
  35. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  36. {
  37. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  38. }
  39. static inline unsigned long
  40. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  41. {
  42. return NILFS_MDT(sufile)->mi_entries_per_block;
  43. }
  44. static unsigned long
  45. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  46. {
  47. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  48. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  49. return (unsigned long)t;
  50. }
  51. static unsigned long
  52. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  53. {
  54. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  55. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  56. }
  57. static unsigned long
  58. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  59. __u64 max)
  60. {
  61. return min_t(unsigned long,
  62. nilfs_sufile_segment_usages_per_block(sufile) -
  63. nilfs_sufile_get_offset(sufile, curr),
  64. max - curr + 1);
  65. }
  66. static struct nilfs_segment_usage *
  67. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  68. struct buffer_head *bh, void *kaddr)
  69. {
  70. return kaddr + bh_offset(bh) +
  71. nilfs_sufile_get_offset(sufile, segnum) *
  72. NILFS_MDT(sufile)->mi_entry_size;
  73. }
  74. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  75. struct buffer_head **bhp)
  76. {
  77. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  78. }
  79. static inline int
  80. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  81. int create, struct buffer_head **bhp)
  82. {
  83. return nilfs_mdt_get_block(sufile,
  84. nilfs_sufile_get_blkoff(sufile, segnum),
  85. create, NULL, bhp);
  86. }
  87. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  88. u64 ncleanadd, u64 ndirtyadd)
  89. {
  90. struct nilfs_sufile_header *header;
  91. void *kaddr;
  92. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  93. header = kaddr + bh_offset(header_bh);
  94. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  95. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  96. kunmap_atomic(kaddr, KM_USER0);
  97. nilfs_mdt_mark_buffer_dirty(header_bh);
  98. }
  99. /**
  100. * nilfs_sufile_updatev - modify multiple segment usages at a time
  101. * @sufile: inode of segment usage file
  102. * @segnumv: array of segment numbers
  103. * @nsegs: size of @segnumv array
  104. * @create: creation flag
  105. * @ndone: place to store number of modified segments on @segnumv
  106. * @dofunc: primitive operation for the update
  107. *
  108. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  109. * against the given array of segments. The @dofunc is called with
  110. * buffers of a header block and the sufile block in which the target
  111. * segment usage entry is contained. If @ndone is given, the number
  112. * of successfully modified segments from the head is stored in the
  113. * place @ndone points to.
  114. *
  115. * Return Value: On success, zero is returned. On error, one of the
  116. * following negative error codes is returned.
  117. *
  118. * %-EIO - I/O error.
  119. *
  120. * %-ENOMEM - Insufficient amount of memory available.
  121. *
  122. * %-ENOENT - Given segment usage is in hole block (may be returned if
  123. * @create is zero)
  124. *
  125. * %-EINVAL - Invalid segment usage number
  126. */
  127. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  128. int create, size_t *ndone,
  129. void (*dofunc)(struct inode *, __u64,
  130. struct buffer_head *,
  131. struct buffer_head *))
  132. {
  133. struct buffer_head *header_bh, *bh;
  134. unsigned long blkoff, prev_blkoff;
  135. __u64 *seg;
  136. size_t nerr = 0, n = 0;
  137. int ret = 0;
  138. if (unlikely(nsegs == 0))
  139. goto out;
  140. down_write(&NILFS_MDT(sufile)->mi_sem);
  141. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  142. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  143. printk(KERN_WARNING
  144. "%s: invalid segment number: %llu\n", __func__,
  145. (unsigned long long)*seg);
  146. nerr++;
  147. }
  148. }
  149. if (nerr > 0) {
  150. ret = -EINVAL;
  151. goto out_sem;
  152. }
  153. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  154. if (ret < 0)
  155. goto out_sem;
  156. seg = segnumv;
  157. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  158. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  159. if (ret < 0)
  160. goto out_header;
  161. for (;;) {
  162. dofunc(sufile, *seg, header_bh, bh);
  163. if (++seg >= segnumv + nsegs)
  164. break;
  165. prev_blkoff = blkoff;
  166. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  167. if (blkoff == prev_blkoff)
  168. continue;
  169. /* get different block */
  170. brelse(bh);
  171. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  172. if (unlikely(ret < 0))
  173. goto out_header;
  174. }
  175. brelse(bh);
  176. out_header:
  177. n = seg - segnumv;
  178. brelse(header_bh);
  179. out_sem:
  180. up_write(&NILFS_MDT(sufile)->mi_sem);
  181. out:
  182. if (ndone)
  183. *ndone = n;
  184. return ret;
  185. }
  186. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  187. void (*dofunc)(struct inode *, __u64,
  188. struct buffer_head *,
  189. struct buffer_head *))
  190. {
  191. struct buffer_head *header_bh, *bh;
  192. int ret;
  193. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  194. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  195. __func__, (unsigned long long)segnum);
  196. return -EINVAL;
  197. }
  198. down_write(&NILFS_MDT(sufile)->mi_sem);
  199. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  200. if (ret < 0)
  201. goto out_sem;
  202. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  203. if (!ret) {
  204. dofunc(sufile, segnum, header_bh, bh);
  205. brelse(bh);
  206. }
  207. brelse(header_bh);
  208. out_sem:
  209. up_write(&NILFS_MDT(sufile)->mi_sem);
  210. return ret;
  211. }
  212. /**
  213. * nilfs_sufile_alloc - allocate a segment
  214. * @sufile: inode of segment usage file
  215. * @segnump: pointer to segment number
  216. *
  217. * Description: nilfs_sufile_alloc() allocates a clean segment.
  218. *
  219. * Return Value: On success, 0 is returned and the segment number of the
  220. * allocated segment is stored in the place pointed by @segnump. On error, one
  221. * of the following negative error codes is returned.
  222. *
  223. * %-EIO - I/O error.
  224. *
  225. * %-ENOMEM - Insufficient amount of memory available.
  226. *
  227. * %-ENOSPC - No clean segment left.
  228. */
  229. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  230. {
  231. struct buffer_head *header_bh, *su_bh;
  232. struct nilfs_sufile_header *header;
  233. struct nilfs_segment_usage *su;
  234. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  235. __u64 segnum, maxsegnum, last_alloc;
  236. void *kaddr;
  237. unsigned long nsegments, ncleansegs, nsus;
  238. int ret, i, j;
  239. down_write(&NILFS_MDT(sufile)->mi_sem);
  240. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  241. if (ret < 0)
  242. goto out_sem;
  243. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  244. header = kaddr + bh_offset(header_bh);
  245. ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  246. last_alloc = le64_to_cpu(header->sh_last_alloc);
  247. kunmap_atomic(kaddr, KM_USER0);
  248. nsegments = nilfs_sufile_get_nsegments(sufile);
  249. segnum = last_alloc + 1;
  250. maxsegnum = nsegments - 1;
  251. for (i = 0; i < nsegments; i += nsus) {
  252. if (segnum >= nsegments) {
  253. /* wrap around */
  254. segnum = 0;
  255. maxsegnum = last_alloc;
  256. }
  257. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  258. &su_bh);
  259. if (ret < 0)
  260. goto out_header;
  261. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  262. su = nilfs_sufile_block_get_segment_usage(
  263. sufile, segnum, su_bh, kaddr);
  264. nsus = nilfs_sufile_segment_usages_in_block(
  265. sufile, segnum, maxsegnum);
  266. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  267. if (!nilfs_segment_usage_clean(su))
  268. continue;
  269. /* found a clean segment */
  270. nilfs_segment_usage_set_dirty(su);
  271. kunmap_atomic(kaddr, KM_USER0);
  272. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  273. header = kaddr + bh_offset(header_bh);
  274. le64_add_cpu(&header->sh_ncleansegs, -1);
  275. le64_add_cpu(&header->sh_ndirtysegs, 1);
  276. header->sh_last_alloc = cpu_to_le64(segnum);
  277. kunmap_atomic(kaddr, KM_USER0);
  278. NILFS_SUI(sufile)->ncleansegs--;
  279. nilfs_mdt_mark_buffer_dirty(header_bh);
  280. nilfs_mdt_mark_buffer_dirty(su_bh);
  281. nilfs_mdt_mark_dirty(sufile);
  282. brelse(su_bh);
  283. *segnump = segnum;
  284. goto out_header;
  285. }
  286. kunmap_atomic(kaddr, KM_USER0);
  287. brelse(su_bh);
  288. }
  289. /* no segments left */
  290. ret = -ENOSPC;
  291. out_header:
  292. brelse(header_bh);
  293. out_sem:
  294. up_write(&NILFS_MDT(sufile)->mi_sem);
  295. return ret;
  296. }
  297. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  298. struct buffer_head *header_bh,
  299. struct buffer_head *su_bh)
  300. {
  301. struct nilfs_segment_usage *su;
  302. void *kaddr;
  303. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  304. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  305. if (unlikely(!nilfs_segment_usage_clean(su))) {
  306. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  307. __func__, (unsigned long long)segnum);
  308. kunmap_atomic(kaddr, KM_USER0);
  309. return;
  310. }
  311. nilfs_segment_usage_set_dirty(su);
  312. kunmap_atomic(kaddr, KM_USER0);
  313. nilfs_sufile_mod_counter(header_bh, -1, 1);
  314. NILFS_SUI(sufile)->ncleansegs--;
  315. nilfs_mdt_mark_buffer_dirty(su_bh);
  316. nilfs_mdt_mark_dirty(sufile);
  317. }
  318. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  319. struct buffer_head *header_bh,
  320. struct buffer_head *su_bh)
  321. {
  322. struct nilfs_segment_usage *su;
  323. void *kaddr;
  324. int clean, dirty;
  325. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  326. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  327. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  328. su->su_nblocks == cpu_to_le32(0)) {
  329. kunmap_atomic(kaddr, KM_USER0);
  330. return;
  331. }
  332. clean = nilfs_segment_usage_clean(su);
  333. dirty = nilfs_segment_usage_dirty(su);
  334. /* make the segment garbage */
  335. su->su_lastmod = cpu_to_le64(0);
  336. su->su_nblocks = cpu_to_le32(0);
  337. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  338. kunmap_atomic(kaddr, KM_USER0);
  339. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  340. NILFS_SUI(sufile)->ncleansegs -= clean;
  341. nilfs_mdt_mark_buffer_dirty(su_bh);
  342. nilfs_mdt_mark_dirty(sufile);
  343. }
  344. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  345. struct buffer_head *header_bh,
  346. struct buffer_head *su_bh)
  347. {
  348. struct nilfs_segment_usage *su;
  349. void *kaddr;
  350. int sudirty;
  351. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  352. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  353. if (nilfs_segment_usage_clean(su)) {
  354. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  355. __func__, (unsigned long long)segnum);
  356. kunmap_atomic(kaddr, KM_USER0);
  357. return;
  358. }
  359. WARN_ON(nilfs_segment_usage_error(su));
  360. WARN_ON(!nilfs_segment_usage_dirty(su));
  361. sudirty = nilfs_segment_usage_dirty(su);
  362. nilfs_segment_usage_set_clean(su);
  363. kunmap_atomic(kaddr, KM_USER0);
  364. nilfs_mdt_mark_buffer_dirty(su_bh);
  365. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  366. NILFS_SUI(sufile)->ncleansegs++;
  367. nilfs_mdt_mark_dirty(sufile);
  368. }
  369. /**
  370. * nilfs_sufile_get_segment_usage - get a segment usage
  371. * @sufile: inode of segment usage file
  372. * @segnum: segment number
  373. * @sup: pointer to segment usage
  374. * @bhp: pointer to buffer head
  375. *
  376. * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
  377. * specified by @segnum.
  378. *
  379. * Return Value: On success, 0 is returned, and the segment usage and the
  380. * buffer head of the buffer on which the segment usage is located are stored
  381. * in the place pointed by @sup and @bhp, respectively. On error, one of the
  382. * following negative error codes is returned.
  383. *
  384. * %-EIO - I/O error.
  385. *
  386. * %-ENOMEM - Insufficient amount of memory available.
  387. *
  388. * %-EINVAL - Invalid segment usage number.
  389. */
  390. int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
  391. struct nilfs_segment_usage **sup,
  392. struct buffer_head **bhp)
  393. {
  394. struct buffer_head *bh;
  395. struct nilfs_segment_usage *su;
  396. void *kaddr;
  397. int ret;
  398. /* segnum is 0 origin */
  399. if (segnum >= nilfs_sufile_get_nsegments(sufile))
  400. return -EINVAL;
  401. down_write(&NILFS_MDT(sufile)->mi_sem);
  402. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
  403. if (ret < 0)
  404. goto out_sem;
  405. kaddr = kmap(bh->b_page);
  406. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  407. if (nilfs_segment_usage_error(su)) {
  408. kunmap(bh->b_page);
  409. brelse(bh);
  410. ret = -EINVAL;
  411. goto out_sem;
  412. }
  413. if (sup != NULL)
  414. *sup = su;
  415. *bhp = bh;
  416. out_sem:
  417. up_write(&NILFS_MDT(sufile)->mi_sem);
  418. return ret;
  419. }
  420. /**
  421. * nilfs_sufile_put_segment_usage - put a segment usage
  422. * @sufile: inode of segment usage file
  423. * @segnum: segment number
  424. * @bh: buffer head
  425. *
  426. * Description: nilfs_sufile_put_segment_usage() releases the segment usage
  427. * specified by @segnum. @bh must be the buffer head which have been returned
  428. * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
  429. */
  430. void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
  431. struct buffer_head *bh)
  432. {
  433. kunmap(bh->b_page);
  434. brelse(bh);
  435. }
  436. /**
  437. * nilfs_sufile_get_stat - get segment usage statistics
  438. * @sufile: inode of segment usage file
  439. * @stat: pointer to a structure of segment usage statistics
  440. *
  441. * Description: nilfs_sufile_get_stat() returns information about segment
  442. * usage.
  443. *
  444. * Return Value: On success, 0 is returned, and segment usage information is
  445. * stored in the place pointed by @stat. On error, one of the following
  446. * negative error codes is returned.
  447. *
  448. * %-EIO - I/O error.
  449. *
  450. * %-ENOMEM - Insufficient amount of memory available.
  451. */
  452. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  453. {
  454. struct buffer_head *header_bh;
  455. struct nilfs_sufile_header *header;
  456. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  457. void *kaddr;
  458. int ret;
  459. down_read(&NILFS_MDT(sufile)->mi_sem);
  460. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  461. if (ret < 0)
  462. goto out_sem;
  463. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  464. header = kaddr + bh_offset(header_bh);
  465. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  466. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  467. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  468. sustat->ss_ctime = nilfs->ns_ctime;
  469. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  470. spin_lock(&nilfs->ns_last_segment_lock);
  471. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  472. spin_unlock(&nilfs->ns_last_segment_lock);
  473. kunmap_atomic(kaddr, KM_USER0);
  474. brelse(header_bh);
  475. out_sem:
  476. up_read(&NILFS_MDT(sufile)->mi_sem);
  477. return ret;
  478. }
  479. /**
  480. * nilfs_sufile_get_ncleansegs - get the number of clean segments
  481. * @sufile: inode of segment usage file
  482. * @nsegsp: pointer to the number of clean segments
  483. *
  484. * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
  485. * segments.
  486. *
  487. * Return Value: On success, 0 is returned and the number of clean segments is
  488. * stored in the place pointed by @nsegsp. On error, one of the following
  489. * negative error codes is returned.
  490. *
  491. * %-EIO - I/O error.
  492. *
  493. * %-ENOMEM - Insufficient amount of memory available.
  494. */
  495. int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
  496. {
  497. *nsegsp = NILFS_SUI(sufile)->ncleansegs;
  498. return 0;
  499. }
  500. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  501. struct buffer_head *header_bh,
  502. struct buffer_head *su_bh)
  503. {
  504. struct nilfs_segment_usage *su;
  505. void *kaddr;
  506. int suclean;
  507. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  508. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  509. if (nilfs_segment_usage_error(su)) {
  510. kunmap_atomic(kaddr, KM_USER0);
  511. return;
  512. }
  513. suclean = nilfs_segment_usage_clean(su);
  514. nilfs_segment_usage_set_error(su);
  515. kunmap_atomic(kaddr, KM_USER0);
  516. if (suclean) {
  517. nilfs_sufile_mod_counter(header_bh, -1, 0);
  518. NILFS_SUI(sufile)->ncleansegs--;
  519. }
  520. nilfs_mdt_mark_buffer_dirty(su_bh);
  521. nilfs_mdt_mark_dirty(sufile);
  522. }
  523. /**
  524. * nilfs_sufile_get_suinfo -
  525. * @sufile: inode of segment usage file
  526. * @segnum: segment number to start looking
  527. * @buf: array of suinfo
  528. * @sisz: byte size of suinfo
  529. * @nsi: size of suinfo array
  530. *
  531. * Description:
  532. *
  533. * Return Value: On success, 0 is returned and .... On error, one of the
  534. * following negative error codes is returned.
  535. *
  536. * %-EIO - I/O error.
  537. *
  538. * %-ENOMEM - Insufficient amount of memory available.
  539. */
  540. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  541. unsigned sisz, size_t nsi)
  542. {
  543. struct buffer_head *su_bh;
  544. struct nilfs_segment_usage *su;
  545. struct nilfs_suinfo *si = buf;
  546. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  547. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  548. void *kaddr;
  549. unsigned long nsegs, segusages_per_block;
  550. ssize_t n;
  551. int ret, i, j;
  552. down_read(&NILFS_MDT(sufile)->mi_sem);
  553. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  554. nsegs = min_t(unsigned long,
  555. nilfs_sufile_get_nsegments(sufile) - segnum,
  556. nsi);
  557. for (i = 0; i < nsegs; i += n, segnum += n) {
  558. n = min_t(unsigned long,
  559. segusages_per_block -
  560. nilfs_sufile_get_offset(sufile, segnum),
  561. nsegs - i);
  562. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  563. &su_bh);
  564. if (ret < 0) {
  565. if (ret != -ENOENT)
  566. goto out;
  567. /* hole */
  568. memset(si, 0, sisz * n);
  569. si = (void *)si + sisz * n;
  570. continue;
  571. }
  572. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  573. su = nilfs_sufile_block_get_segment_usage(
  574. sufile, segnum, su_bh, kaddr);
  575. for (j = 0; j < n;
  576. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  577. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  578. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  579. si->sui_flags = le32_to_cpu(su->su_flags) &
  580. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  581. if (nilfs_segment_is_active(nilfs, segnum + j))
  582. si->sui_flags |=
  583. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  584. }
  585. kunmap_atomic(kaddr, KM_USER0);
  586. brelse(su_bh);
  587. }
  588. ret = nsegs;
  589. out:
  590. up_read(&NILFS_MDT(sufile)->mi_sem);
  591. return ret;
  592. }
  593. /**
  594. * nilfs_sufile_read - read sufile inode
  595. * @sufile: sufile inode
  596. * @raw_inode: on-disk sufile inode
  597. */
  598. int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
  599. {
  600. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  601. struct buffer_head *header_bh;
  602. struct nilfs_sufile_header *header;
  603. void *kaddr;
  604. int ret;
  605. ret = nilfs_read_inode_common(sufile, raw_inode);
  606. if (ret < 0)
  607. return ret;
  608. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  609. if (!ret) {
  610. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  611. header = kaddr + bh_offset(header_bh);
  612. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  613. kunmap_atomic(kaddr, KM_USER0);
  614. brelse(header_bh);
  615. }
  616. return ret;
  617. }
  618. /**
  619. * nilfs_sufile_new - create sufile
  620. * @nilfs: nilfs object
  621. * @susize: size of a segment usage entry
  622. */
  623. struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
  624. {
  625. struct inode *sufile;
  626. sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
  627. sizeof(struct nilfs_sufile_info));
  628. if (sufile)
  629. nilfs_mdt_set_entry_size(sufile, susize,
  630. sizeof(struct nilfs_sufile_header));
  631. return sufile;
  632. }