sufile.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/string.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/errno.h>
  28. #include <linux/nilfs2_fs.h>
  29. #include "mdt.h"
  30. #include "sufile.h"
  31. /**
  32. * struct nilfs_sufile_info - on-memory private data of sufile
  33. * @mi: on-memory private data of metadata file
  34. * @ncleansegs: number of clean segments
  35. * @allocmin: lower limit of allocatable segment range
  36. * @allocmax: upper limit of allocatable segment range
  37. */
  38. struct nilfs_sufile_info {
  39. struct nilfs_mdt_info mi;
  40. unsigned long ncleansegs;/* number of clean segments */
  41. __u64 allocmin; /* lower limit of allocatable segment range */
  42. __u64 allocmax; /* upper limit of allocatable segment range */
  43. };
  44. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  45. {
  46. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  47. }
  48. static inline unsigned long
  49. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  50. {
  51. return NILFS_MDT(sufile)->mi_entries_per_block;
  52. }
  53. static unsigned long
  54. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  55. {
  56. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  57. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  58. return (unsigned long)t;
  59. }
  60. static unsigned long
  61. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  62. {
  63. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  64. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  65. }
  66. static unsigned long
  67. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  68. __u64 max)
  69. {
  70. return min_t(unsigned long,
  71. nilfs_sufile_segment_usages_per_block(sufile) -
  72. nilfs_sufile_get_offset(sufile, curr),
  73. max - curr + 1);
  74. }
  75. static struct nilfs_segment_usage *
  76. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  77. struct buffer_head *bh, void *kaddr)
  78. {
  79. return kaddr + bh_offset(bh) +
  80. nilfs_sufile_get_offset(sufile, segnum) *
  81. NILFS_MDT(sufile)->mi_entry_size;
  82. }
  83. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  84. struct buffer_head **bhp)
  85. {
  86. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  87. }
  88. static inline int
  89. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  90. int create, struct buffer_head **bhp)
  91. {
  92. return nilfs_mdt_get_block(sufile,
  93. nilfs_sufile_get_blkoff(sufile, segnum),
  94. create, NULL, bhp);
  95. }
  96. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  97. __u64 segnum)
  98. {
  99. return nilfs_mdt_delete_block(sufile,
  100. nilfs_sufile_get_blkoff(sufile, segnum));
  101. }
  102. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  103. u64 ncleanadd, u64 ndirtyadd)
  104. {
  105. struct nilfs_sufile_header *header;
  106. void *kaddr;
  107. kaddr = kmap_atomic(header_bh->b_page);
  108. header = kaddr + bh_offset(header_bh);
  109. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  110. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  111. kunmap_atomic(kaddr);
  112. mark_buffer_dirty(header_bh);
  113. }
  114. /**
  115. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  116. * @sufile: inode of segment usage file
  117. */
  118. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  119. {
  120. return NILFS_SUI(sufile)->ncleansegs;
  121. }
  122. /**
  123. * nilfs_sufile_updatev - modify multiple segment usages at a time
  124. * @sufile: inode of segment usage file
  125. * @segnumv: array of segment numbers
  126. * @nsegs: size of @segnumv array
  127. * @create: creation flag
  128. * @ndone: place to store number of modified segments on @segnumv
  129. * @dofunc: primitive operation for the update
  130. *
  131. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  132. * against the given array of segments. The @dofunc is called with
  133. * buffers of a header block and the sufile block in which the target
  134. * segment usage entry is contained. If @ndone is given, the number
  135. * of successfully modified segments from the head is stored in the
  136. * place @ndone points to.
  137. *
  138. * Return Value: On success, zero is returned. On error, one of the
  139. * following negative error codes is returned.
  140. *
  141. * %-EIO - I/O error.
  142. *
  143. * %-ENOMEM - Insufficient amount of memory available.
  144. *
  145. * %-ENOENT - Given segment usage is in hole block (may be returned if
  146. * @create is zero)
  147. *
  148. * %-EINVAL - Invalid segment usage number
  149. */
  150. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  151. int create, size_t *ndone,
  152. void (*dofunc)(struct inode *, __u64,
  153. struct buffer_head *,
  154. struct buffer_head *))
  155. {
  156. struct buffer_head *header_bh, *bh;
  157. unsigned long blkoff, prev_blkoff;
  158. __u64 *seg;
  159. size_t nerr = 0, n = 0;
  160. int ret = 0;
  161. if (unlikely(nsegs == 0))
  162. goto out;
  163. down_write(&NILFS_MDT(sufile)->mi_sem);
  164. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  165. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  166. printk(KERN_WARNING
  167. "%s: invalid segment number: %llu\n", __func__,
  168. (unsigned long long)*seg);
  169. nerr++;
  170. }
  171. }
  172. if (nerr > 0) {
  173. ret = -EINVAL;
  174. goto out_sem;
  175. }
  176. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  177. if (ret < 0)
  178. goto out_sem;
  179. seg = segnumv;
  180. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  181. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  182. if (ret < 0)
  183. goto out_header;
  184. for (;;) {
  185. dofunc(sufile, *seg, header_bh, bh);
  186. if (++seg >= segnumv + nsegs)
  187. break;
  188. prev_blkoff = blkoff;
  189. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  190. if (blkoff == prev_blkoff)
  191. continue;
  192. /* get different block */
  193. brelse(bh);
  194. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  195. if (unlikely(ret < 0))
  196. goto out_header;
  197. }
  198. brelse(bh);
  199. out_header:
  200. n = seg - segnumv;
  201. brelse(header_bh);
  202. out_sem:
  203. up_write(&NILFS_MDT(sufile)->mi_sem);
  204. out:
  205. if (ndone)
  206. *ndone = n;
  207. return ret;
  208. }
  209. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  210. void (*dofunc)(struct inode *, __u64,
  211. struct buffer_head *,
  212. struct buffer_head *))
  213. {
  214. struct buffer_head *header_bh, *bh;
  215. int ret;
  216. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  217. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  218. __func__, (unsigned long long)segnum);
  219. return -EINVAL;
  220. }
  221. down_write(&NILFS_MDT(sufile)->mi_sem);
  222. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  223. if (ret < 0)
  224. goto out_sem;
  225. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  226. if (!ret) {
  227. dofunc(sufile, segnum, header_bh, bh);
  228. brelse(bh);
  229. }
  230. brelse(header_bh);
  231. out_sem:
  232. up_write(&NILFS_MDT(sufile)->mi_sem);
  233. return ret;
  234. }
  235. /**
  236. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  237. * @sufile: inode of segment usage file
  238. * @start: minimum segment number of allocatable region (inclusive)
  239. * @end: maximum segment number of allocatable region (inclusive)
  240. *
  241. * Return Value: On success, 0 is returned. On error, one of the
  242. * following negative error codes is returned.
  243. *
  244. * %-ERANGE - invalid segment region
  245. */
  246. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  247. {
  248. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  249. __u64 nsegs;
  250. int ret = -ERANGE;
  251. down_write(&NILFS_MDT(sufile)->mi_sem);
  252. nsegs = nilfs_sufile_get_nsegments(sufile);
  253. if (start <= end && end < nsegs) {
  254. sui->allocmin = start;
  255. sui->allocmax = end;
  256. ret = 0;
  257. }
  258. up_write(&NILFS_MDT(sufile)->mi_sem);
  259. return ret;
  260. }
  261. /**
  262. * nilfs_sufile_alloc - allocate a segment
  263. * @sufile: inode of segment usage file
  264. * @segnump: pointer to segment number
  265. *
  266. * Description: nilfs_sufile_alloc() allocates a clean segment.
  267. *
  268. * Return Value: On success, 0 is returned and the segment number of the
  269. * allocated segment is stored in the place pointed by @segnump. On error, one
  270. * of the following negative error codes is returned.
  271. *
  272. * %-EIO - I/O error.
  273. *
  274. * %-ENOMEM - Insufficient amount of memory available.
  275. *
  276. * %-ENOSPC - No clean segment left.
  277. */
  278. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  279. {
  280. struct buffer_head *header_bh, *su_bh;
  281. struct nilfs_sufile_header *header;
  282. struct nilfs_segment_usage *su;
  283. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  284. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  285. __u64 segnum, maxsegnum, last_alloc;
  286. void *kaddr;
  287. unsigned long nsegments, ncleansegs, nsus, cnt;
  288. int ret, j;
  289. down_write(&NILFS_MDT(sufile)->mi_sem);
  290. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  291. if (ret < 0)
  292. goto out_sem;
  293. kaddr = kmap_atomic(header_bh->b_page);
  294. header = kaddr + bh_offset(header_bh);
  295. ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  296. last_alloc = le64_to_cpu(header->sh_last_alloc);
  297. kunmap_atomic(kaddr);
  298. nsegments = nilfs_sufile_get_nsegments(sufile);
  299. maxsegnum = sui->allocmax;
  300. segnum = last_alloc + 1;
  301. if (segnum < sui->allocmin || segnum > sui->allocmax)
  302. segnum = sui->allocmin;
  303. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  304. if (segnum > maxsegnum) {
  305. if (cnt < sui->allocmax - sui->allocmin + 1) {
  306. /*
  307. * wrap around in the limited region.
  308. * if allocation started from
  309. * sui->allocmin, this never happens.
  310. */
  311. segnum = sui->allocmin;
  312. maxsegnum = last_alloc;
  313. } else if (segnum > sui->allocmin &&
  314. sui->allocmax + 1 < nsegments) {
  315. segnum = sui->allocmax + 1;
  316. maxsegnum = nsegments - 1;
  317. } else if (sui->allocmin > 0) {
  318. segnum = 0;
  319. maxsegnum = sui->allocmin - 1;
  320. } else {
  321. break; /* never happens */
  322. }
  323. }
  324. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  325. &su_bh);
  326. if (ret < 0)
  327. goto out_header;
  328. kaddr = kmap_atomic(su_bh->b_page);
  329. su = nilfs_sufile_block_get_segment_usage(
  330. sufile, segnum, su_bh, kaddr);
  331. nsus = nilfs_sufile_segment_usages_in_block(
  332. sufile, segnum, maxsegnum);
  333. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  334. if (!nilfs_segment_usage_clean(su))
  335. continue;
  336. /* found a clean segment */
  337. nilfs_segment_usage_set_dirty(su);
  338. kunmap_atomic(kaddr);
  339. kaddr = kmap_atomic(header_bh->b_page);
  340. header = kaddr + bh_offset(header_bh);
  341. le64_add_cpu(&header->sh_ncleansegs, -1);
  342. le64_add_cpu(&header->sh_ndirtysegs, 1);
  343. header->sh_last_alloc = cpu_to_le64(segnum);
  344. kunmap_atomic(kaddr);
  345. sui->ncleansegs--;
  346. mark_buffer_dirty(header_bh);
  347. mark_buffer_dirty(su_bh);
  348. nilfs_mdt_mark_dirty(sufile);
  349. brelse(su_bh);
  350. *segnump = segnum;
  351. goto out_header;
  352. }
  353. kunmap_atomic(kaddr);
  354. brelse(su_bh);
  355. }
  356. /* no segments left */
  357. ret = -ENOSPC;
  358. out_header:
  359. brelse(header_bh);
  360. out_sem:
  361. up_write(&NILFS_MDT(sufile)->mi_sem);
  362. return ret;
  363. }
  364. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  365. struct buffer_head *header_bh,
  366. struct buffer_head *su_bh)
  367. {
  368. struct nilfs_segment_usage *su;
  369. void *kaddr;
  370. kaddr = kmap_atomic(su_bh->b_page);
  371. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  372. if (unlikely(!nilfs_segment_usage_clean(su))) {
  373. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  374. __func__, (unsigned long long)segnum);
  375. kunmap_atomic(kaddr);
  376. return;
  377. }
  378. nilfs_segment_usage_set_dirty(su);
  379. kunmap_atomic(kaddr);
  380. nilfs_sufile_mod_counter(header_bh, -1, 1);
  381. NILFS_SUI(sufile)->ncleansegs--;
  382. mark_buffer_dirty(su_bh);
  383. nilfs_mdt_mark_dirty(sufile);
  384. }
  385. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  386. struct buffer_head *header_bh,
  387. struct buffer_head *su_bh)
  388. {
  389. struct nilfs_segment_usage *su;
  390. void *kaddr;
  391. int clean, dirty;
  392. kaddr = kmap_atomic(su_bh->b_page);
  393. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  394. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  395. su->su_nblocks == cpu_to_le32(0)) {
  396. kunmap_atomic(kaddr);
  397. return;
  398. }
  399. clean = nilfs_segment_usage_clean(su);
  400. dirty = nilfs_segment_usage_dirty(su);
  401. /* make the segment garbage */
  402. su->su_lastmod = cpu_to_le64(0);
  403. su->su_nblocks = cpu_to_le32(0);
  404. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  405. kunmap_atomic(kaddr);
  406. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  407. NILFS_SUI(sufile)->ncleansegs -= clean;
  408. mark_buffer_dirty(su_bh);
  409. nilfs_mdt_mark_dirty(sufile);
  410. }
  411. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  412. struct buffer_head *header_bh,
  413. struct buffer_head *su_bh)
  414. {
  415. struct nilfs_segment_usage *su;
  416. void *kaddr;
  417. int sudirty;
  418. kaddr = kmap_atomic(su_bh->b_page);
  419. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  420. if (nilfs_segment_usage_clean(su)) {
  421. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  422. __func__, (unsigned long long)segnum);
  423. kunmap_atomic(kaddr);
  424. return;
  425. }
  426. WARN_ON(nilfs_segment_usage_error(su));
  427. WARN_ON(!nilfs_segment_usage_dirty(su));
  428. sudirty = nilfs_segment_usage_dirty(su);
  429. nilfs_segment_usage_set_clean(su);
  430. kunmap_atomic(kaddr);
  431. mark_buffer_dirty(su_bh);
  432. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  433. NILFS_SUI(sufile)->ncleansegs++;
  434. nilfs_mdt_mark_dirty(sufile);
  435. }
  436. /**
  437. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  438. * @sufile: inode of segment usage file
  439. * @segnum: segment number
  440. */
  441. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  442. {
  443. struct buffer_head *bh;
  444. int ret;
  445. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  446. if (!ret) {
  447. mark_buffer_dirty(bh);
  448. nilfs_mdt_mark_dirty(sufile);
  449. brelse(bh);
  450. }
  451. return ret;
  452. }
  453. /**
  454. * nilfs_sufile_set_segment_usage - set usage of a segment
  455. * @sufile: inode of segment usage file
  456. * @segnum: segment number
  457. * @nblocks: number of live blocks in the segment
  458. * @modtime: modification time (option)
  459. */
  460. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  461. unsigned long nblocks, time_t modtime)
  462. {
  463. struct buffer_head *bh;
  464. struct nilfs_segment_usage *su;
  465. void *kaddr;
  466. int ret;
  467. down_write(&NILFS_MDT(sufile)->mi_sem);
  468. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  469. if (ret < 0)
  470. goto out_sem;
  471. kaddr = kmap_atomic(bh->b_page);
  472. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  473. WARN_ON(nilfs_segment_usage_error(su));
  474. if (modtime)
  475. su->su_lastmod = cpu_to_le64(modtime);
  476. su->su_nblocks = cpu_to_le32(nblocks);
  477. kunmap_atomic(kaddr);
  478. mark_buffer_dirty(bh);
  479. nilfs_mdt_mark_dirty(sufile);
  480. brelse(bh);
  481. out_sem:
  482. up_write(&NILFS_MDT(sufile)->mi_sem);
  483. return ret;
  484. }
  485. /**
  486. * nilfs_sufile_get_stat - get segment usage statistics
  487. * @sufile: inode of segment usage file
  488. * @stat: pointer to a structure of segment usage statistics
  489. *
  490. * Description: nilfs_sufile_get_stat() returns information about segment
  491. * usage.
  492. *
  493. * Return Value: On success, 0 is returned, and segment usage information is
  494. * stored in the place pointed by @stat. On error, one of the following
  495. * negative error codes is returned.
  496. *
  497. * %-EIO - I/O error.
  498. *
  499. * %-ENOMEM - Insufficient amount of memory available.
  500. */
  501. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  502. {
  503. struct buffer_head *header_bh;
  504. struct nilfs_sufile_header *header;
  505. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  506. void *kaddr;
  507. int ret;
  508. down_read(&NILFS_MDT(sufile)->mi_sem);
  509. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  510. if (ret < 0)
  511. goto out_sem;
  512. kaddr = kmap_atomic(header_bh->b_page);
  513. header = kaddr + bh_offset(header_bh);
  514. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  515. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  516. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  517. sustat->ss_ctime = nilfs->ns_ctime;
  518. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  519. spin_lock(&nilfs->ns_last_segment_lock);
  520. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  521. spin_unlock(&nilfs->ns_last_segment_lock);
  522. kunmap_atomic(kaddr);
  523. brelse(header_bh);
  524. out_sem:
  525. up_read(&NILFS_MDT(sufile)->mi_sem);
  526. return ret;
  527. }
  528. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  529. struct buffer_head *header_bh,
  530. struct buffer_head *su_bh)
  531. {
  532. struct nilfs_segment_usage *su;
  533. void *kaddr;
  534. int suclean;
  535. kaddr = kmap_atomic(su_bh->b_page);
  536. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  537. if (nilfs_segment_usage_error(su)) {
  538. kunmap_atomic(kaddr);
  539. return;
  540. }
  541. suclean = nilfs_segment_usage_clean(su);
  542. nilfs_segment_usage_set_error(su);
  543. kunmap_atomic(kaddr);
  544. if (suclean) {
  545. nilfs_sufile_mod_counter(header_bh, -1, 0);
  546. NILFS_SUI(sufile)->ncleansegs--;
  547. }
  548. mark_buffer_dirty(su_bh);
  549. nilfs_mdt_mark_dirty(sufile);
  550. }
  551. /**
  552. * nilfs_sufile_truncate_range - truncate range of segment array
  553. * @sufile: inode of segment usage file
  554. * @start: start segment number (inclusive)
  555. * @end: end segment number (inclusive)
  556. *
  557. * Return Value: On success, 0 is returned. On error, one of the
  558. * following negative error codes is returned.
  559. *
  560. * %-EIO - I/O error.
  561. *
  562. * %-ENOMEM - Insufficient amount of memory available.
  563. *
  564. * %-EINVAL - Invalid number of segments specified
  565. *
  566. * %-EBUSY - Dirty or active segments are present in the range
  567. */
  568. static int nilfs_sufile_truncate_range(struct inode *sufile,
  569. __u64 start, __u64 end)
  570. {
  571. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  572. struct buffer_head *header_bh;
  573. struct buffer_head *su_bh;
  574. struct nilfs_segment_usage *su, *su2;
  575. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  576. unsigned long segusages_per_block;
  577. unsigned long nsegs, ncleaned;
  578. __u64 segnum;
  579. void *kaddr;
  580. ssize_t n, nc;
  581. int ret;
  582. int j;
  583. nsegs = nilfs_sufile_get_nsegments(sufile);
  584. ret = -EINVAL;
  585. if (start > end || start >= nsegs)
  586. goto out;
  587. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  588. if (ret < 0)
  589. goto out;
  590. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  591. ncleaned = 0;
  592. for (segnum = start; segnum <= end; segnum += n) {
  593. n = min_t(unsigned long,
  594. segusages_per_block -
  595. nilfs_sufile_get_offset(sufile, segnum),
  596. end - segnum + 1);
  597. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  598. &su_bh);
  599. if (ret < 0) {
  600. if (ret != -ENOENT)
  601. goto out_header;
  602. /* hole */
  603. continue;
  604. }
  605. kaddr = kmap_atomic(su_bh->b_page);
  606. su = nilfs_sufile_block_get_segment_usage(
  607. sufile, segnum, su_bh, kaddr);
  608. su2 = su;
  609. for (j = 0; j < n; j++, su = (void *)su + susz) {
  610. if ((le32_to_cpu(su->su_flags) &
  611. ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
  612. nilfs_segment_is_active(nilfs, segnum + j)) {
  613. ret = -EBUSY;
  614. kunmap_atomic(kaddr);
  615. brelse(su_bh);
  616. goto out_header;
  617. }
  618. }
  619. nc = 0;
  620. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  621. if (nilfs_segment_usage_error(su)) {
  622. nilfs_segment_usage_set_clean(su);
  623. nc++;
  624. }
  625. }
  626. kunmap_atomic(kaddr);
  627. if (nc > 0) {
  628. mark_buffer_dirty(su_bh);
  629. ncleaned += nc;
  630. }
  631. brelse(su_bh);
  632. if (n == segusages_per_block) {
  633. /* make hole */
  634. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  635. }
  636. }
  637. ret = 0;
  638. out_header:
  639. if (ncleaned > 0) {
  640. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  641. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  642. nilfs_mdt_mark_dirty(sufile);
  643. }
  644. brelse(header_bh);
  645. out:
  646. return ret;
  647. }
  648. /**
  649. * nilfs_sufile_resize - resize segment array
  650. * @sufile: inode of segment usage file
  651. * @newnsegs: new number of segments
  652. *
  653. * Return Value: On success, 0 is returned. On error, one of the
  654. * following negative error codes is returned.
  655. *
  656. * %-EIO - I/O error.
  657. *
  658. * %-ENOMEM - Insufficient amount of memory available.
  659. *
  660. * %-ENOSPC - Enough free space is not left for shrinking
  661. *
  662. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  663. */
  664. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  665. {
  666. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  667. struct buffer_head *header_bh;
  668. struct nilfs_sufile_header *header;
  669. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  670. void *kaddr;
  671. unsigned long nsegs, nrsvsegs;
  672. int ret = 0;
  673. down_write(&NILFS_MDT(sufile)->mi_sem);
  674. nsegs = nilfs_sufile_get_nsegments(sufile);
  675. if (nsegs == newnsegs)
  676. goto out;
  677. ret = -ENOSPC;
  678. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  679. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  680. goto out;
  681. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  682. if (ret < 0)
  683. goto out;
  684. if (newnsegs > nsegs) {
  685. sui->ncleansegs += newnsegs - nsegs;
  686. } else /* newnsegs < nsegs */ {
  687. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  688. if (ret < 0)
  689. goto out_header;
  690. sui->ncleansegs -= nsegs - newnsegs;
  691. }
  692. kaddr = kmap_atomic(header_bh->b_page);
  693. header = kaddr + bh_offset(header_bh);
  694. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  695. kunmap_atomic(kaddr);
  696. mark_buffer_dirty(header_bh);
  697. nilfs_mdt_mark_dirty(sufile);
  698. nilfs_set_nsegments(nilfs, newnsegs);
  699. out_header:
  700. brelse(header_bh);
  701. out:
  702. up_write(&NILFS_MDT(sufile)->mi_sem);
  703. return ret;
  704. }
  705. /**
  706. * nilfs_sufile_get_suinfo -
  707. * @sufile: inode of segment usage file
  708. * @segnum: segment number to start looking
  709. * @buf: array of suinfo
  710. * @sisz: byte size of suinfo
  711. * @nsi: size of suinfo array
  712. *
  713. * Description:
  714. *
  715. * Return Value: On success, 0 is returned and .... On error, one of the
  716. * following negative error codes is returned.
  717. *
  718. * %-EIO - I/O error.
  719. *
  720. * %-ENOMEM - Insufficient amount of memory available.
  721. */
  722. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  723. unsigned sisz, size_t nsi)
  724. {
  725. struct buffer_head *su_bh;
  726. struct nilfs_segment_usage *su;
  727. struct nilfs_suinfo *si = buf;
  728. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  729. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  730. void *kaddr;
  731. unsigned long nsegs, segusages_per_block;
  732. ssize_t n;
  733. int ret, i, j;
  734. down_read(&NILFS_MDT(sufile)->mi_sem);
  735. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  736. nsegs = min_t(unsigned long,
  737. nilfs_sufile_get_nsegments(sufile) - segnum,
  738. nsi);
  739. for (i = 0; i < nsegs; i += n, segnum += n) {
  740. n = min_t(unsigned long,
  741. segusages_per_block -
  742. nilfs_sufile_get_offset(sufile, segnum),
  743. nsegs - i);
  744. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  745. &su_bh);
  746. if (ret < 0) {
  747. if (ret != -ENOENT)
  748. goto out;
  749. /* hole */
  750. memset(si, 0, sisz * n);
  751. si = (void *)si + sisz * n;
  752. continue;
  753. }
  754. kaddr = kmap_atomic(su_bh->b_page);
  755. su = nilfs_sufile_block_get_segment_usage(
  756. sufile, segnum, su_bh, kaddr);
  757. for (j = 0; j < n;
  758. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  759. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  760. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  761. si->sui_flags = le32_to_cpu(su->su_flags) &
  762. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  763. if (nilfs_segment_is_active(nilfs, segnum + j))
  764. si->sui_flags |=
  765. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  766. }
  767. kunmap_atomic(kaddr);
  768. brelse(su_bh);
  769. }
  770. ret = nsegs;
  771. out:
  772. up_read(&NILFS_MDT(sufile)->mi_sem);
  773. return ret;
  774. }
  775. /**
  776. * nilfs_sufile_read - read or get sufile inode
  777. * @sb: super block instance
  778. * @susize: size of a segment usage entry
  779. * @raw_inode: on-disk sufile inode
  780. * @inodep: buffer to store the inode
  781. */
  782. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  783. struct nilfs_inode *raw_inode, struct inode **inodep)
  784. {
  785. struct inode *sufile;
  786. struct nilfs_sufile_info *sui;
  787. struct buffer_head *header_bh;
  788. struct nilfs_sufile_header *header;
  789. void *kaddr;
  790. int err;
  791. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  792. if (unlikely(!sufile))
  793. return -ENOMEM;
  794. if (!(sufile->i_state & I_NEW))
  795. goto out;
  796. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  797. if (err)
  798. goto failed;
  799. nilfs_mdt_set_entry_size(sufile, susize,
  800. sizeof(struct nilfs_sufile_header));
  801. err = nilfs_read_inode_common(sufile, raw_inode);
  802. if (err)
  803. goto failed;
  804. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  805. if (err)
  806. goto failed;
  807. sui = NILFS_SUI(sufile);
  808. kaddr = kmap_atomic(header_bh->b_page);
  809. header = kaddr + bh_offset(header_bh);
  810. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  811. kunmap_atomic(kaddr);
  812. brelse(header_bh);
  813. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  814. sui->allocmin = 0;
  815. unlock_new_inode(sufile);
  816. out:
  817. *inodep = sufile;
  818. return 0;
  819. failed:
  820. iget_failed(sufile);
  821. return err;
  822. }