sufile.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/string.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/errno.h>
  28. #include <linux/nilfs2_fs.h>
  29. #include "mdt.h"
  30. #include "sufile.h"
  31. struct nilfs_sufile_info {
  32. struct nilfs_mdt_info mi;
  33. unsigned long ncleansegs;
  34. };
  35. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  36. {
  37. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  38. }
  39. static inline unsigned long
  40. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  41. {
  42. return NILFS_MDT(sufile)->mi_entries_per_block;
  43. }
  44. static unsigned long
  45. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  46. {
  47. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  48. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  49. return (unsigned long)t;
  50. }
  51. static unsigned long
  52. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  53. {
  54. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  55. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  56. }
  57. static unsigned long
  58. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  59. __u64 max)
  60. {
  61. return min_t(unsigned long,
  62. nilfs_sufile_segment_usages_per_block(sufile) -
  63. nilfs_sufile_get_offset(sufile, curr),
  64. max - curr + 1);
  65. }
  66. static struct nilfs_segment_usage *
  67. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  68. struct buffer_head *bh, void *kaddr)
  69. {
  70. return kaddr + bh_offset(bh) +
  71. nilfs_sufile_get_offset(sufile, segnum) *
  72. NILFS_MDT(sufile)->mi_entry_size;
  73. }
  74. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  75. struct buffer_head **bhp)
  76. {
  77. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  78. }
  79. static inline int
  80. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  81. int create, struct buffer_head **bhp)
  82. {
  83. return nilfs_mdt_get_block(sufile,
  84. nilfs_sufile_get_blkoff(sufile, segnum),
  85. create, NULL, bhp);
  86. }
  87. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  88. u64 ncleanadd, u64 ndirtyadd)
  89. {
  90. struct nilfs_sufile_header *header;
  91. void *kaddr;
  92. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  93. header = kaddr + bh_offset(header_bh);
  94. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  95. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  96. kunmap_atomic(kaddr, KM_USER0);
  97. nilfs_mdt_mark_buffer_dirty(header_bh);
  98. }
  99. /**
  100. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  101. * @sufile: inode of segment usage file
  102. */
  103. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  104. {
  105. return NILFS_SUI(sufile)->ncleansegs;
  106. }
  107. /**
  108. * nilfs_sufile_updatev - modify multiple segment usages at a time
  109. * @sufile: inode of segment usage file
  110. * @segnumv: array of segment numbers
  111. * @nsegs: size of @segnumv array
  112. * @create: creation flag
  113. * @ndone: place to store number of modified segments on @segnumv
  114. * @dofunc: primitive operation for the update
  115. *
  116. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  117. * against the given array of segments. The @dofunc is called with
  118. * buffers of a header block and the sufile block in which the target
  119. * segment usage entry is contained. If @ndone is given, the number
  120. * of successfully modified segments from the head is stored in the
  121. * place @ndone points to.
  122. *
  123. * Return Value: On success, zero is returned. On error, one of the
  124. * following negative error codes is returned.
  125. *
  126. * %-EIO - I/O error.
  127. *
  128. * %-ENOMEM - Insufficient amount of memory available.
  129. *
  130. * %-ENOENT - Given segment usage is in hole block (may be returned if
  131. * @create is zero)
  132. *
  133. * %-EINVAL - Invalid segment usage number
  134. */
  135. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  136. int create, size_t *ndone,
  137. void (*dofunc)(struct inode *, __u64,
  138. struct buffer_head *,
  139. struct buffer_head *))
  140. {
  141. struct buffer_head *header_bh, *bh;
  142. unsigned long blkoff, prev_blkoff;
  143. __u64 *seg;
  144. size_t nerr = 0, n = 0;
  145. int ret = 0;
  146. if (unlikely(nsegs == 0))
  147. goto out;
  148. down_write(&NILFS_MDT(sufile)->mi_sem);
  149. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  150. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  151. printk(KERN_WARNING
  152. "%s: invalid segment number: %llu\n", __func__,
  153. (unsigned long long)*seg);
  154. nerr++;
  155. }
  156. }
  157. if (nerr > 0) {
  158. ret = -EINVAL;
  159. goto out_sem;
  160. }
  161. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  162. if (ret < 0)
  163. goto out_sem;
  164. seg = segnumv;
  165. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  166. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  167. if (ret < 0)
  168. goto out_header;
  169. for (;;) {
  170. dofunc(sufile, *seg, header_bh, bh);
  171. if (++seg >= segnumv + nsegs)
  172. break;
  173. prev_blkoff = blkoff;
  174. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  175. if (blkoff == prev_blkoff)
  176. continue;
  177. /* get different block */
  178. brelse(bh);
  179. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  180. if (unlikely(ret < 0))
  181. goto out_header;
  182. }
  183. brelse(bh);
  184. out_header:
  185. n = seg - segnumv;
  186. brelse(header_bh);
  187. out_sem:
  188. up_write(&NILFS_MDT(sufile)->mi_sem);
  189. out:
  190. if (ndone)
  191. *ndone = n;
  192. return ret;
  193. }
  194. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  195. void (*dofunc)(struct inode *, __u64,
  196. struct buffer_head *,
  197. struct buffer_head *))
  198. {
  199. struct buffer_head *header_bh, *bh;
  200. int ret;
  201. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  202. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  203. __func__, (unsigned long long)segnum);
  204. return -EINVAL;
  205. }
  206. down_write(&NILFS_MDT(sufile)->mi_sem);
  207. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  208. if (ret < 0)
  209. goto out_sem;
  210. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  211. if (!ret) {
  212. dofunc(sufile, segnum, header_bh, bh);
  213. brelse(bh);
  214. }
  215. brelse(header_bh);
  216. out_sem:
  217. up_write(&NILFS_MDT(sufile)->mi_sem);
  218. return ret;
  219. }
  220. /**
  221. * nilfs_sufile_alloc - allocate a segment
  222. * @sufile: inode of segment usage file
  223. * @segnump: pointer to segment number
  224. *
  225. * Description: nilfs_sufile_alloc() allocates a clean segment.
  226. *
  227. * Return Value: On success, 0 is returned and the segment number of the
  228. * allocated segment is stored in the place pointed by @segnump. On error, one
  229. * of the following negative error codes is returned.
  230. *
  231. * %-EIO - I/O error.
  232. *
  233. * %-ENOMEM - Insufficient amount of memory available.
  234. *
  235. * %-ENOSPC - No clean segment left.
  236. */
  237. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  238. {
  239. struct buffer_head *header_bh, *su_bh;
  240. struct nilfs_sufile_header *header;
  241. struct nilfs_segment_usage *su;
  242. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  243. __u64 segnum, maxsegnum, last_alloc;
  244. void *kaddr;
  245. unsigned long nsegments, ncleansegs, nsus;
  246. int ret, i, j;
  247. down_write(&NILFS_MDT(sufile)->mi_sem);
  248. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  249. if (ret < 0)
  250. goto out_sem;
  251. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  252. header = kaddr + bh_offset(header_bh);
  253. ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  254. last_alloc = le64_to_cpu(header->sh_last_alloc);
  255. kunmap_atomic(kaddr, KM_USER0);
  256. nsegments = nilfs_sufile_get_nsegments(sufile);
  257. segnum = last_alloc + 1;
  258. maxsegnum = nsegments - 1;
  259. for (i = 0; i < nsegments; i += nsus) {
  260. if (segnum >= nsegments) {
  261. /* wrap around */
  262. segnum = 0;
  263. maxsegnum = last_alloc;
  264. }
  265. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  266. &su_bh);
  267. if (ret < 0)
  268. goto out_header;
  269. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  270. su = nilfs_sufile_block_get_segment_usage(
  271. sufile, segnum, su_bh, kaddr);
  272. nsus = nilfs_sufile_segment_usages_in_block(
  273. sufile, segnum, maxsegnum);
  274. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  275. if (!nilfs_segment_usage_clean(su))
  276. continue;
  277. /* found a clean segment */
  278. nilfs_segment_usage_set_dirty(su);
  279. kunmap_atomic(kaddr, KM_USER0);
  280. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  281. header = kaddr + bh_offset(header_bh);
  282. le64_add_cpu(&header->sh_ncleansegs, -1);
  283. le64_add_cpu(&header->sh_ndirtysegs, 1);
  284. header->sh_last_alloc = cpu_to_le64(segnum);
  285. kunmap_atomic(kaddr, KM_USER0);
  286. NILFS_SUI(sufile)->ncleansegs--;
  287. nilfs_mdt_mark_buffer_dirty(header_bh);
  288. nilfs_mdt_mark_buffer_dirty(su_bh);
  289. nilfs_mdt_mark_dirty(sufile);
  290. brelse(su_bh);
  291. *segnump = segnum;
  292. goto out_header;
  293. }
  294. kunmap_atomic(kaddr, KM_USER0);
  295. brelse(su_bh);
  296. }
  297. /* no segments left */
  298. ret = -ENOSPC;
  299. out_header:
  300. brelse(header_bh);
  301. out_sem:
  302. up_write(&NILFS_MDT(sufile)->mi_sem);
  303. return ret;
  304. }
  305. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  306. struct buffer_head *header_bh,
  307. struct buffer_head *su_bh)
  308. {
  309. struct nilfs_segment_usage *su;
  310. void *kaddr;
  311. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  312. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  313. if (unlikely(!nilfs_segment_usage_clean(su))) {
  314. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  315. __func__, (unsigned long long)segnum);
  316. kunmap_atomic(kaddr, KM_USER0);
  317. return;
  318. }
  319. nilfs_segment_usage_set_dirty(su);
  320. kunmap_atomic(kaddr, KM_USER0);
  321. nilfs_sufile_mod_counter(header_bh, -1, 1);
  322. NILFS_SUI(sufile)->ncleansegs--;
  323. nilfs_mdt_mark_buffer_dirty(su_bh);
  324. nilfs_mdt_mark_dirty(sufile);
  325. }
  326. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  327. struct buffer_head *header_bh,
  328. struct buffer_head *su_bh)
  329. {
  330. struct nilfs_segment_usage *su;
  331. void *kaddr;
  332. int clean, dirty;
  333. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  334. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  335. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  336. su->su_nblocks == cpu_to_le32(0)) {
  337. kunmap_atomic(kaddr, KM_USER0);
  338. return;
  339. }
  340. clean = nilfs_segment_usage_clean(su);
  341. dirty = nilfs_segment_usage_dirty(su);
  342. /* make the segment garbage */
  343. su->su_lastmod = cpu_to_le64(0);
  344. su->su_nblocks = cpu_to_le32(0);
  345. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  346. kunmap_atomic(kaddr, KM_USER0);
  347. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  348. NILFS_SUI(sufile)->ncleansegs -= clean;
  349. nilfs_mdt_mark_buffer_dirty(su_bh);
  350. nilfs_mdt_mark_dirty(sufile);
  351. }
  352. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  353. struct buffer_head *header_bh,
  354. struct buffer_head *su_bh)
  355. {
  356. struct nilfs_segment_usage *su;
  357. void *kaddr;
  358. int sudirty;
  359. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  360. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  361. if (nilfs_segment_usage_clean(su)) {
  362. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  363. __func__, (unsigned long long)segnum);
  364. kunmap_atomic(kaddr, KM_USER0);
  365. return;
  366. }
  367. WARN_ON(nilfs_segment_usage_error(su));
  368. WARN_ON(!nilfs_segment_usage_dirty(su));
  369. sudirty = nilfs_segment_usage_dirty(su);
  370. nilfs_segment_usage_set_clean(su);
  371. kunmap_atomic(kaddr, KM_USER0);
  372. nilfs_mdt_mark_buffer_dirty(su_bh);
  373. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  374. NILFS_SUI(sufile)->ncleansegs++;
  375. nilfs_mdt_mark_dirty(sufile);
  376. }
  377. /**
  378. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  379. * @sufile: inode of segment usage file
  380. * @segnum: segment number
  381. */
  382. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  383. {
  384. struct buffer_head *bh;
  385. int ret;
  386. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  387. if (!ret) {
  388. nilfs_mdt_mark_buffer_dirty(bh);
  389. nilfs_mdt_mark_dirty(sufile);
  390. brelse(bh);
  391. }
  392. return ret;
  393. }
  394. /**
  395. * nilfs_sufile_set_segment_usage - set usage of a segment
  396. * @sufile: inode of segment usage file
  397. * @segnum: segment number
  398. * @nblocks: number of live blocks in the segment
  399. * @modtime: modification time (option)
  400. */
  401. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  402. unsigned long nblocks, time_t modtime)
  403. {
  404. struct buffer_head *bh;
  405. struct nilfs_segment_usage *su;
  406. void *kaddr;
  407. int ret;
  408. down_write(&NILFS_MDT(sufile)->mi_sem);
  409. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  410. if (ret < 0)
  411. goto out_sem;
  412. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  413. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  414. WARN_ON(nilfs_segment_usage_error(su));
  415. if (modtime)
  416. su->su_lastmod = cpu_to_le64(modtime);
  417. su->su_nblocks = cpu_to_le32(nblocks);
  418. kunmap_atomic(kaddr, KM_USER0);
  419. nilfs_mdt_mark_buffer_dirty(bh);
  420. nilfs_mdt_mark_dirty(sufile);
  421. brelse(bh);
  422. out_sem:
  423. up_write(&NILFS_MDT(sufile)->mi_sem);
  424. return ret;
  425. }
  426. /**
  427. * nilfs_sufile_get_stat - get segment usage statistics
  428. * @sufile: inode of segment usage file
  429. * @stat: pointer to a structure of segment usage statistics
  430. *
  431. * Description: nilfs_sufile_get_stat() returns information about segment
  432. * usage.
  433. *
  434. * Return Value: On success, 0 is returned, and segment usage information is
  435. * stored in the place pointed by @stat. On error, one of the following
  436. * negative error codes is returned.
  437. *
  438. * %-EIO - I/O error.
  439. *
  440. * %-ENOMEM - Insufficient amount of memory available.
  441. */
  442. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  443. {
  444. struct buffer_head *header_bh;
  445. struct nilfs_sufile_header *header;
  446. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  447. void *kaddr;
  448. int ret;
  449. down_read(&NILFS_MDT(sufile)->mi_sem);
  450. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  451. if (ret < 0)
  452. goto out_sem;
  453. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  454. header = kaddr + bh_offset(header_bh);
  455. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  456. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  457. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  458. sustat->ss_ctime = nilfs->ns_ctime;
  459. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  460. spin_lock(&nilfs->ns_last_segment_lock);
  461. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  462. spin_unlock(&nilfs->ns_last_segment_lock);
  463. kunmap_atomic(kaddr, KM_USER0);
  464. brelse(header_bh);
  465. out_sem:
  466. up_read(&NILFS_MDT(sufile)->mi_sem);
  467. return ret;
  468. }
  469. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  470. struct buffer_head *header_bh,
  471. struct buffer_head *su_bh)
  472. {
  473. struct nilfs_segment_usage *su;
  474. void *kaddr;
  475. int suclean;
  476. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  477. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  478. if (nilfs_segment_usage_error(su)) {
  479. kunmap_atomic(kaddr, KM_USER0);
  480. return;
  481. }
  482. suclean = nilfs_segment_usage_clean(su);
  483. nilfs_segment_usage_set_error(su);
  484. kunmap_atomic(kaddr, KM_USER0);
  485. if (suclean) {
  486. nilfs_sufile_mod_counter(header_bh, -1, 0);
  487. NILFS_SUI(sufile)->ncleansegs--;
  488. }
  489. nilfs_mdt_mark_buffer_dirty(su_bh);
  490. nilfs_mdt_mark_dirty(sufile);
  491. }
  492. /**
  493. * nilfs_sufile_get_suinfo -
  494. * @sufile: inode of segment usage file
  495. * @segnum: segment number to start looking
  496. * @buf: array of suinfo
  497. * @sisz: byte size of suinfo
  498. * @nsi: size of suinfo array
  499. *
  500. * Description:
  501. *
  502. * Return Value: On success, 0 is returned and .... On error, one of the
  503. * following negative error codes is returned.
  504. *
  505. * %-EIO - I/O error.
  506. *
  507. * %-ENOMEM - Insufficient amount of memory available.
  508. */
  509. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  510. unsigned sisz, size_t nsi)
  511. {
  512. struct buffer_head *su_bh;
  513. struct nilfs_segment_usage *su;
  514. struct nilfs_suinfo *si = buf;
  515. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  516. struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
  517. void *kaddr;
  518. unsigned long nsegs, segusages_per_block;
  519. ssize_t n;
  520. int ret, i, j;
  521. down_read(&NILFS_MDT(sufile)->mi_sem);
  522. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  523. nsegs = min_t(unsigned long,
  524. nilfs_sufile_get_nsegments(sufile) - segnum,
  525. nsi);
  526. for (i = 0; i < nsegs; i += n, segnum += n) {
  527. n = min_t(unsigned long,
  528. segusages_per_block -
  529. nilfs_sufile_get_offset(sufile, segnum),
  530. nsegs - i);
  531. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  532. &su_bh);
  533. if (ret < 0) {
  534. if (ret != -ENOENT)
  535. goto out;
  536. /* hole */
  537. memset(si, 0, sisz * n);
  538. si = (void *)si + sisz * n;
  539. continue;
  540. }
  541. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  542. su = nilfs_sufile_block_get_segment_usage(
  543. sufile, segnum, su_bh, kaddr);
  544. for (j = 0; j < n;
  545. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  546. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  547. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  548. si->sui_flags = le32_to_cpu(su->su_flags) &
  549. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  550. if (nilfs_segment_is_active(nilfs, segnum + j))
  551. si->sui_flags |=
  552. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  553. }
  554. kunmap_atomic(kaddr, KM_USER0);
  555. brelse(su_bh);
  556. }
  557. ret = nsegs;
  558. out:
  559. up_read(&NILFS_MDT(sufile)->mi_sem);
  560. return ret;
  561. }
  562. /**
  563. * nilfs_sufile_read - read sufile inode
  564. * @sufile: sufile inode
  565. * @raw_inode: on-disk sufile inode
  566. */
  567. int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
  568. {
  569. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  570. struct buffer_head *header_bh;
  571. struct nilfs_sufile_header *header;
  572. void *kaddr;
  573. int ret;
  574. ret = nilfs_read_inode_common(sufile, raw_inode);
  575. if (ret < 0)
  576. return ret;
  577. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  578. if (!ret) {
  579. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  580. header = kaddr + bh_offset(header_bh);
  581. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  582. kunmap_atomic(kaddr, KM_USER0);
  583. brelse(header_bh);
  584. }
  585. return ret;
  586. }
  587. /**
  588. * nilfs_sufile_new - create sufile
  589. * @nilfs: nilfs object
  590. * @susize: size of a segment usage entry
  591. */
  592. struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
  593. {
  594. struct inode *sufile;
  595. sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
  596. sizeof(struct nilfs_sufile_info));
  597. if (sufile)
  598. nilfs_mdt_set_entry_size(sufile, susize,
  599. sizeof(struct nilfs_sufile_header));
  600. return sufile;
  601. }