cpfile.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * cpfile.c - NILFS checkpoint file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/fs.h>
  24. #include <linux/string.h>
  25. #include <linux/buffer_head.h>
  26. #include <linux/errno.h>
  27. #include <linux/nilfs2_fs.h>
  28. #include "mdt.h"
  29. #include "cpfile.h"
  30. static inline unsigned long
  31. nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
  32. {
  33. return NILFS_MDT(cpfile)->mi_entries_per_block;
  34. }
  35. /* block number from the beginning of the file */
  36. static unsigned long
  37. nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
  38. {
  39. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  40. do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  41. return (unsigned long)tcno;
  42. }
  43. /* offset in block */
  44. static unsigned long
  45. nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
  46. {
  47. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  48. return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  49. }
  50. static unsigned long
  51. nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
  52. __u64 curr,
  53. __u64 max)
  54. {
  55. return min_t(__u64,
  56. nilfs_cpfile_checkpoints_per_block(cpfile) -
  57. nilfs_cpfile_get_offset(cpfile, curr),
  58. max - curr);
  59. }
  60. static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
  61. __u64 cno)
  62. {
  63. return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
  64. }
  65. static unsigned int
  66. nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
  67. struct buffer_head *bh,
  68. void *kaddr,
  69. unsigned int n)
  70. {
  71. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  72. unsigned int count;
  73. count = le32_to_cpu(cp->cp_checkpoints_count) + n;
  74. cp->cp_checkpoints_count = cpu_to_le32(count);
  75. return count;
  76. }
  77. static unsigned int
  78. nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
  79. struct buffer_head *bh,
  80. void *kaddr,
  81. unsigned int n)
  82. {
  83. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  84. unsigned int count;
  85. WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
  86. count = le32_to_cpu(cp->cp_checkpoints_count) - n;
  87. cp->cp_checkpoints_count = cpu_to_le32(count);
  88. return count;
  89. }
  90. static inline struct nilfs_cpfile_header *
  91. nilfs_cpfile_block_get_header(const struct inode *cpfile,
  92. struct buffer_head *bh,
  93. void *kaddr)
  94. {
  95. return kaddr + bh_offset(bh);
  96. }
  97. static struct nilfs_checkpoint *
  98. nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
  99. struct buffer_head *bh,
  100. void *kaddr)
  101. {
  102. return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
  103. NILFS_MDT(cpfile)->mi_entry_size;
  104. }
  105. static void nilfs_cpfile_block_init(struct inode *cpfile,
  106. struct buffer_head *bh,
  107. void *kaddr)
  108. {
  109. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  110. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  111. int n = nilfs_cpfile_checkpoints_per_block(cpfile);
  112. while (n-- > 0) {
  113. nilfs_checkpoint_set_invalid(cp);
  114. cp = (void *)cp + cpsz;
  115. }
  116. }
  117. static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
  118. struct buffer_head **bhp)
  119. {
  120. return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
  121. }
  122. static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
  123. __u64 cno,
  124. int create,
  125. struct buffer_head **bhp)
  126. {
  127. return nilfs_mdt_get_block(cpfile,
  128. nilfs_cpfile_get_blkoff(cpfile, cno),
  129. create, nilfs_cpfile_block_init, bhp);
  130. }
  131. static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
  132. __u64 cno)
  133. {
  134. return nilfs_mdt_delete_block(cpfile,
  135. nilfs_cpfile_get_blkoff(cpfile, cno));
  136. }
  137. /**
  138. * nilfs_cpfile_get_checkpoint - get a checkpoint
  139. * @cpfile: inode of checkpoint file
  140. * @cno: checkpoint number
  141. * @create: create flag
  142. * @cpp: pointer to a checkpoint
  143. * @bhp: pointer to a buffer head
  144. *
  145. * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
  146. * specified by @cno. A new checkpoint will be created if @cno is the current
  147. * checkpoint number and @create is nonzero.
  148. *
  149. * Return Value: On success, 0 is returned, and the checkpoint and the
  150. * buffer head of the buffer on which the checkpoint is located are stored in
  151. * the place pointed by @cpp and @bhp, respectively. On error, one of the
  152. * following negative error codes is returned.
  153. *
  154. * %-EIO - I/O error.
  155. *
  156. * %-ENOMEM - Insufficient amount of memory available.
  157. *
  158. * %-ENOENT - No such checkpoint.
  159. *
  160. * %-EINVAL - invalid checkpoint.
  161. */
  162. int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
  163. __u64 cno,
  164. int create,
  165. struct nilfs_checkpoint **cpp,
  166. struct buffer_head **bhp)
  167. {
  168. struct buffer_head *header_bh, *cp_bh;
  169. struct nilfs_cpfile_header *header;
  170. struct nilfs_checkpoint *cp;
  171. void *kaddr;
  172. int ret;
  173. if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
  174. (cno < nilfs_mdt_cno(cpfile) && create)))
  175. return -EINVAL;
  176. down_write(&NILFS_MDT(cpfile)->mi_sem);
  177. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  178. if (ret < 0)
  179. goto out_sem;
  180. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
  181. if (ret < 0)
  182. goto out_header;
  183. kaddr = kmap(cp_bh->b_page);
  184. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  185. if (nilfs_checkpoint_invalid(cp)) {
  186. if (!create) {
  187. kunmap(cp_bh->b_page);
  188. brelse(cp_bh);
  189. ret = -ENOENT;
  190. goto out_header;
  191. }
  192. /* a newly-created checkpoint */
  193. nilfs_checkpoint_clear_invalid(cp);
  194. if (!nilfs_cpfile_is_in_first(cpfile, cno))
  195. nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
  196. kaddr, 1);
  197. nilfs_mdt_mark_buffer_dirty(cp_bh);
  198. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  199. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  200. kaddr);
  201. le64_add_cpu(&header->ch_ncheckpoints, 1);
  202. kunmap_atomic(kaddr, KM_USER0);
  203. nilfs_mdt_mark_buffer_dirty(header_bh);
  204. nilfs_mdt_mark_dirty(cpfile);
  205. }
  206. if (cpp != NULL)
  207. *cpp = cp;
  208. *bhp = cp_bh;
  209. out_header:
  210. brelse(header_bh);
  211. out_sem:
  212. up_write(&NILFS_MDT(cpfile)->mi_sem);
  213. return ret;
  214. }
  215. /**
  216. * nilfs_cpfile_put_checkpoint - put a checkpoint
  217. * @cpfile: inode of checkpoint file
  218. * @cno: checkpoint number
  219. * @bh: buffer head
  220. *
  221. * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
  222. * specified by @cno. @bh must be the buffer head which has been returned by
  223. * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
  224. */
  225. void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
  226. struct buffer_head *bh)
  227. {
  228. kunmap(bh->b_page);
  229. brelse(bh);
  230. }
  231. /**
  232. * nilfs_cpfile_delete_checkpoints - delete checkpoints
  233. * @cpfile: inode of checkpoint file
  234. * @start: start checkpoint number
  235. * @end: end checkpoint numer
  236. *
  237. * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
  238. * the period from @start to @end, excluding @end itself. The checkpoints
  239. * which have been already deleted are ignored.
  240. *
  241. * Return Value: On success, 0 is returned. On error, one of the following
  242. * negative error codes is returned.
  243. *
  244. * %-EIO - I/O error.
  245. *
  246. * %-ENOMEM - Insufficient amount of memory available.
  247. *
  248. * %-EINVAL - invalid checkpoints.
  249. */
  250. int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
  251. __u64 start,
  252. __u64 end)
  253. {
  254. struct buffer_head *header_bh, *cp_bh;
  255. struct nilfs_cpfile_header *header;
  256. struct nilfs_checkpoint *cp;
  257. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  258. __u64 cno;
  259. void *kaddr;
  260. unsigned long tnicps;
  261. int ret, ncps, nicps, count, i;
  262. if (unlikely(start == 0 || start > end)) {
  263. printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
  264. "[%llu, %llu)\n", __func__,
  265. (unsigned long long)start, (unsigned long long)end);
  266. return -EINVAL;
  267. }
  268. /* cannot delete the latest checkpoint */
  269. if (start == nilfs_mdt_cno(cpfile) - 1)
  270. return -EPERM;
  271. down_write(&NILFS_MDT(cpfile)->mi_sem);
  272. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  273. if (ret < 0)
  274. goto out_sem;
  275. tnicps = 0;
  276. for (cno = start; cno < end; cno += ncps) {
  277. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
  278. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  279. if (ret < 0) {
  280. if (ret != -ENOENT)
  281. goto out_sem;
  282. /* skip hole */
  283. ret = 0;
  284. continue;
  285. }
  286. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  287. cp = nilfs_cpfile_block_get_checkpoint(
  288. cpfile, cno, cp_bh, kaddr);
  289. nicps = 0;
  290. for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
  291. WARN_ON(nilfs_checkpoint_snapshot(cp));
  292. if (!nilfs_checkpoint_invalid(cp)) {
  293. nilfs_checkpoint_set_invalid(cp);
  294. nicps++;
  295. }
  296. }
  297. if (nicps > 0) {
  298. tnicps += nicps;
  299. nilfs_mdt_mark_buffer_dirty(cp_bh);
  300. nilfs_mdt_mark_dirty(cpfile);
  301. if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
  302. (count = nilfs_cpfile_block_sub_valid_checkpoints(
  303. cpfile, cp_bh, kaddr, nicps)) == 0) {
  304. /* make hole */
  305. kunmap_atomic(kaddr, KM_USER0);
  306. brelse(cp_bh);
  307. ret = nilfs_cpfile_delete_checkpoint_block(
  308. cpfile, cno);
  309. if (ret == 0)
  310. continue;
  311. printk(KERN_ERR "%s: cannot delete block\n",
  312. __func__);
  313. goto out_sem;
  314. }
  315. }
  316. kunmap_atomic(kaddr, KM_USER0);
  317. brelse(cp_bh);
  318. }
  319. if (tnicps > 0) {
  320. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  321. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  322. kaddr);
  323. le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
  324. nilfs_mdt_mark_buffer_dirty(header_bh);
  325. nilfs_mdt_mark_dirty(cpfile);
  326. kunmap_atomic(kaddr, KM_USER0);
  327. }
  328. brelse(header_bh);
  329. out_sem:
  330. up_write(&NILFS_MDT(cpfile)->mi_sem);
  331. return ret;
  332. }
  333. static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
  334. struct nilfs_checkpoint *cp,
  335. struct nilfs_cpinfo *ci)
  336. {
  337. ci->ci_flags = le32_to_cpu(cp->cp_flags);
  338. ci->ci_cno = le64_to_cpu(cp->cp_cno);
  339. ci->ci_create = le64_to_cpu(cp->cp_create);
  340. ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
  341. ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
  342. ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
  343. ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  344. }
  345. static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
  346. struct nilfs_cpinfo *ci, size_t nci)
  347. {
  348. struct nilfs_checkpoint *cp;
  349. struct buffer_head *bh;
  350. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  351. __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
  352. void *kaddr;
  353. int n, ret;
  354. int ncps, i;
  355. if (cno == 0)
  356. return -ENOENT; /* checkpoint number 0 is invalid */
  357. down_read(&NILFS_MDT(cpfile)->mi_sem);
  358. for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
  359. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
  360. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  361. if (ret < 0) {
  362. if (ret != -ENOENT)
  363. goto out;
  364. continue; /* skip hole */
  365. }
  366. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  367. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  368. for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
  369. if (!nilfs_checkpoint_invalid(cp))
  370. nilfs_cpfile_checkpoint_to_cpinfo(
  371. cpfile, cp, &ci[n++]);
  372. }
  373. kunmap_atomic(kaddr, KM_USER0);
  374. brelse(bh);
  375. }
  376. ret = n;
  377. if (n > 0)
  378. *cnop = ci[n - 1].ci_cno + 1;
  379. out:
  380. up_read(&NILFS_MDT(cpfile)->mi_sem);
  381. return ret;
  382. }
  383. static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
  384. struct nilfs_cpinfo *ci, size_t nci)
  385. {
  386. struct buffer_head *bh;
  387. struct nilfs_cpfile_header *header;
  388. struct nilfs_checkpoint *cp;
  389. __u64 curr = *cnop, next;
  390. unsigned long curr_blkoff, next_blkoff;
  391. void *kaddr;
  392. int n = 0, ret;
  393. down_read(&NILFS_MDT(cpfile)->mi_sem);
  394. if (curr == 0) {
  395. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  396. if (ret < 0)
  397. goto out;
  398. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  399. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  400. curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
  401. kunmap_atomic(kaddr, KM_USER0);
  402. brelse(bh);
  403. if (curr == 0) {
  404. ret = 0;
  405. goto out;
  406. }
  407. } else if (unlikely(curr == ~(__u64)0)) {
  408. ret = 0;
  409. goto out;
  410. }
  411. curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
  412. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
  413. if (unlikely(ret < 0)) {
  414. if (ret == -ENOENT)
  415. ret = 0; /* No snapshots (started from a hole block) */
  416. goto out;
  417. }
  418. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  419. while (n < nci) {
  420. cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
  421. curr = ~(__u64)0; /* Terminator */
  422. if (unlikely(nilfs_checkpoint_invalid(cp) ||
  423. !nilfs_checkpoint_snapshot(cp)))
  424. break;
  425. nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, &ci[n++]);
  426. next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  427. if (next == 0)
  428. break; /* reach end of the snapshot list */
  429. next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
  430. if (curr_blkoff != next_blkoff) {
  431. kunmap_atomic(kaddr, KM_USER0);
  432. brelse(bh);
  433. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
  434. 0, &bh);
  435. if (unlikely(ret < 0)) {
  436. WARN_ON(ret == -ENOENT);
  437. goto out;
  438. }
  439. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  440. }
  441. curr = next;
  442. curr_blkoff = next_blkoff;
  443. }
  444. kunmap_atomic(kaddr, KM_USER0);
  445. brelse(bh);
  446. *cnop = curr;
  447. ret = n;
  448. out:
  449. up_read(&NILFS_MDT(cpfile)->mi_sem);
  450. return ret;
  451. }
  452. /**
  453. * nilfs_cpfile_get_cpinfo -
  454. * @cpfile:
  455. * @cno:
  456. * @ci:
  457. * @nci:
  458. */
  459. ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
  460. struct nilfs_cpinfo *ci, size_t nci)
  461. {
  462. switch (mode) {
  463. case NILFS_CHECKPOINT:
  464. return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, ci, nci);
  465. case NILFS_SNAPSHOT:
  466. return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, ci, nci);
  467. default:
  468. return -EINVAL;
  469. }
  470. }
  471. /**
  472. * nilfs_cpfile_delete_checkpoint -
  473. * @cpfile:
  474. * @cno:
  475. */
  476. int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
  477. {
  478. struct nilfs_cpinfo ci;
  479. __u64 tcno = cno;
  480. ssize_t nci;
  481. int ret;
  482. nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, 1);
  483. if (nci < 0)
  484. return nci;
  485. else if (nci == 0 || ci.ci_cno != cno)
  486. return -ENOENT;
  487. /* cannot delete the latest checkpoint nor snapshots */
  488. ret = nilfs_cpinfo_snapshot(&ci);
  489. if (ret < 0)
  490. return ret;
  491. else if (ret > 0 || cno == nilfs_mdt_cno(cpfile) - 1)
  492. return -EPERM;
  493. return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
  494. }
  495. static struct nilfs_snapshot_list *
  496. nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
  497. __u64 cno,
  498. struct buffer_head *bh,
  499. void *kaddr)
  500. {
  501. struct nilfs_cpfile_header *header;
  502. struct nilfs_checkpoint *cp;
  503. struct nilfs_snapshot_list *list;
  504. if (cno != 0) {
  505. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  506. list = &cp->cp_snapshot_list;
  507. } else {
  508. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  509. list = &header->ch_snapshot_list;
  510. }
  511. return list;
  512. }
  513. static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
  514. {
  515. struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
  516. struct nilfs_cpfile_header *header;
  517. struct nilfs_checkpoint *cp;
  518. struct nilfs_snapshot_list *list;
  519. __u64 curr, prev;
  520. unsigned long curr_blkoff, prev_blkoff;
  521. void *kaddr;
  522. int ret;
  523. if (cno == 0)
  524. return -ENOENT; /* checkpoint number 0 is invalid */
  525. down_write(&NILFS_MDT(cpfile)->mi_sem);
  526. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  527. if (ret < 0)
  528. goto out_sem;
  529. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  530. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  531. if (nilfs_checkpoint_invalid(cp)) {
  532. ret = -ENOENT;
  533. kunmap_atomic(kaddr, KM_USER0);
  534. goto out_cp;
  535. }
  536. if (nilfs_checkpoint_snapshot(cp)) {
  537. ret = 0;
  538. kunmap_atomic(kaddr, KM_USER0);
  539. goto out_cp;
  540. }
  541. kunmap_atomic(kaddr, KM_USER0);
  542. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  543. if (ret < 0)
  544. goto out_cp;
  545. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  546. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  547. list = &header->ch_snapshot_list;
  548. curr_bh = header_bh;
  549. get_bh(curr_bh);
  550. curr = 0;
  551. curr_blkoff = 0;
  552. prev = le64_to_cpu(list->ssl_prev);
  553. while (prev > cno) {
  554. prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
  555. curr = prev;
  556. if (curr_blkoff != prev_blkoff) {
  557. kunmap_atomic(kaddr, KM_USER0);
  558. brelse(curr_bh);
  559. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
  560. 0, &curr_bh);
  561. if (ret < 0)
  562. goto out_header;
  563. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  564. }
  565. curr_blkoff = prev_blkoff;
  566. cp = nilfs_cpfile_block_get_checkpoint(
  567. cpfile, curr, curr_bh, kaddr);
  568. list = &cp->cp_snapshot_list;
  569. prev = le64_to_cpu(list->ssl_prev);
  570. }
  571. kunmap_atomic(kaddr, KM_USER0);
  572. if (prev != 0) {
  573. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  574. &prev_bh);
  575. if (ret < 0)
  576. goto out_curr;
  577. } else {
  578. prev_bh = header_bh;
  579. get_bh(prev_bh);
  580. }
  581. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  582. list = nilfs_cpfile_block_get_snapshot_list(
  583. cpfile, curr, curr_bh, kaddr);
  584. list->ssl_prev = cpu_to_le64(cno);
  585. kunmap_atomic(kaddr, KM_USER0);
  586. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  587. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  588. cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
  589. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
  590. nilfs_checkpoint_set_snapshot(cp);
  591. kunmap_atomic(kaddr, KM_USER0);
  592. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  593. list = nilfs_cpfile_block_get_snapshot_list(
  594. cpfile, prev, prev_bh, kaddr);
  595. list->ssl_next = cpu_to_le64(cno);
  596. kunmap_atomic(kaddr, KM_USER0);
  597. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  598. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  599. le64_add_cpu(&header->ch_nsnapshots, 1);
  600. kunmap_atomic(kaddr, KM_USER0);
  601. nilfs_mdt_mark_buffer_dirty(prev_bh);
  602. nilfs_mdt_mark_buffer_dirty(curr_bh);
  603. nilfs_mdt_mark_buffer_dirty(cp_bh);
  604. nilfs_mdt_mark_buffer_dirty(header_bh);
  605. nilfs_mdt_mark_dirty(cpfile);
  606. brelse(prev_bh);
  607. out_curr:
  608. brelse(curr_bh);
  609. out_header:
  610. brelse(header_bh);
  611. out_cp:
  612. brelse(cp_bh);
  613. out_sem:
  614. up_write(&NILFS_MDT(cpfile)->mi_sem);
  615. return ret;
  616. }
  617. static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
  618. {
  619. struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
  620. struct nilfs_cpfile_header *header;
  621. struct nilfs_checkpoint *cp;
  622. struct nilfs_snapshot_list *list;
  623. __u64 next, prev;
  624. void *kaddr;
  625. int ret;
  626. if (cno == 0)
  627. return -ENOENT; /* checkpoint number 0 is invalid */
  628. down_write(&NILFS_MDT(cpfile)->mi_sem);
  629. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  630. if (ret < 0)
  631. goto out_sem;
  632. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  633. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  634. if (nilfs_checkpoint_invalid(cp)) {
  635. ret = -ENOENT;
  636. kunmap_atomic(kaddr, KM_USER0);
  637. goto out_cp;
  638. }
  639. if (!nilfs_checkpoint_snapshot(cp)) {
  640. ret = 0;
  641. kunmap_atomic(kaddr, KM_USER0);
  642. goto out_cp;
  643. }
  644. list = &cp->cp_snapshot_list;
  645. next = le64_to_cpu(list->ssl_next);
  646. prev = le64_to_cpu(list->ssl_prev);
  647. kunmap_atomic(kaddr, KM_USER0);
  648. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  649. if (ret < 0)
  650. goto out_cp;
  651. if (next != 0) {
  652. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
  653. &next_bh);
  654. if (ret < 0)
  655. goto out_header;
  656. } else {
  657. next_bh = header_bh;
  658. get_bh(next_bh);
  659. }
  660. if (prev != 0) {
  661. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  662. &prev_bh);
  663. if (ret < 0)
  664. goto out_next;
  665. } else {
  666. prev_bh = header_bh;
  667. get_bh(prev_bh);
  668. }
  669. kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
  670. list = nilfs_cpfile_block_get_snapshot_list(
  671. cpfile, next, next_bh, kaddr);
  672. list->ssl_prev = cpu_to_le64(prev);
  673. kunmap_atomic(kaddr, KM_USER0);
  674. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  675. list = nilfs_cpfile_block_get_snapshot_list(
  676. cpfile, prev, prev_bh, kaddr);
  677. list->ssl_next = cpu_to_le64(next);
  678. kunmap_atomic(kaddr, KM_USER0);
  679. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  680. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  681. cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
  682. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
  683. nilfs_checkpoint_clear_snapshot(cp);
  684. kunmap_atomic(kaddr, KM_USER0);
  685. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  686. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  687. le64_add_cpu(&header->ch_nsnapshots, -1);
  688. kunmap_atomic(kaddr, KM_USER0);
  689. nilfs_mdt_mark_buffer_dirty(next_bh);
  690. nilfs_mdt_mark_buffer_dirty(prev_bh);
  691. nilfs_mdt_mark_buffer_dirty(cp_bh);
  692. nilfs_mdt_mark_buffer_dirty(header_bh);
  693. nilfs_mdt_mark_dirty(cpfile);
  694. brelse(prev_bh);
  695. out_next:
  696. brelse(next_bh);
  697. out_header:
  698. brelse(header_bh);
  699. out_cp:
  700. brelse(cp_bh);
  701. out_sem:
  702. up_write(&NILFS_MDT(cpfile)->mi_sem);
  703. return ret;
  704. }
  705. /**
  706. * nilfs_cpfile_is_snapshot -
  707. * @cpfile: inode of checkpoint file
  708. * @cno: checkpoint number
  709. *
  710. * Description:
  711. *
  712. * Return Value: On success, 1 is returned if the checkpoint specified by
  713. * @cno is a snapshot, or 0 if not. On error, one of the following negative
  714. * error codes is returned.
  715. *
  716. * %-EIO - I/O error.
  717. *
  718. * %-ENOMEM - Insufficient amount of memory available.
  719. *
  720. * %-ENOENT - No such checkpoint.
  721. */
  722. int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
  723. {
  724. struct buffer_head *bh;
  725. struct nilfs_checkpoint *cp;
  726. void *kaddr;
  727. int ret;
  728. if (cno == 0)
  729. return -ENOENT; /* checkpoint number 0 is invalid */
  730. down_read(&NILFS_MDT(cpfile)->mi_sem);
  731. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  732. if (ret < 0)
  733. goto out;
  734. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  735. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  736. ret = nilfs_checkpoint_snapshot(cp);
  737. kunmap_atomic(kaddr, KM_USER0);
  738. brelse(bh);
  739. out:
  740. up_read(&NILFS_MDT(cpfile)->mi_sem);
  741. return ret;
  742. }
  743. /**
  744. * nilfs_cpfile_change_cpmode - change checkpoint mode
  745. * @cpfile: inode of checkpoint file
  746. * @cno: checkpoint number
  747. * @status: mode of checkpoint
  748. *
  749. * Description: nilfs_change_cpmode() changes the mode of the checkpoint
  750. * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
  751. *
  752. * Return Value: On success, 0 is returned. On error, one of the following
  753. * negative error codes is returned.
  754. *
  755. * %-EIO - I/O error.
  756. *
  757. * %-ENOMEM - Insufficient amount of memory available.
  758. *
  759. * %-ENOENT - No such checkpoint.
  760. */
  761. int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
  762. {
  763. struct the_nilfs *nilfs;
  764. int ret;
  765. nilfs = NILFS_MDT(cpfile)->mi_nilfs;
  766. switch (mode) {
  767. case NILFS_CHECKPOINT:
  768. /*
  769. * Check for protecting existing snapshot mounts:
  770. * bd_mount_sem is used to make this operation atomic and
  771. * exclusive with a new mount job. Though it doesn't cover
  772. * umount, it's enough for the purpose.
  773. */
  774. down(&nilfs->ns_bdev->bd_mount_sem);
  775. if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
  776. /* Current implementation does not have to protect
  777. plain read-only mounts since they are exclusive
  778. with a read/write mount and are protected from the
  779. cleaner. */
  780. ret = -EBUSY;
  781. } else
  782. ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
  783. up(&nilfs->ns_bdev->bd_mount_sem);
  784. return ret;
  785. case NILFS_SNAPSHOT:
  786. return nilfs_cpfile_set_snapshot(cpfile, cno);
  787. default:
  788. return -EINVAL;
  789. }
  790. }
  791. /**
  792. * nilfs_cpfile_get_stat - get checkpoint statistics
  793. * @cpfile: inode of checkpoint file
  794. * @stat: pointer to a structure of checkpoint statistics
  795. *
  796. * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
  797. *
  798. * Return Value: On success, 0 is returned, and checkpoints information is
  799. * stored in the place pointed by @stat. On error, one of the following
  800. * negative error codes is returned.
  801. *
  802. * %-EIO - I/O error.
  803. *
  804. * %-ENOMEM - Insufficient amount of memory available.
  805. */
  806. int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
  807. {
  808. struct buffer_head *bh;
  809. struct nilfs_cpfile_header *header;
  810. void *kaddr;
  811. int ret;
  812. down_read(&NILFS_MDT(cpfile)->mi_sem);
  813. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  814. if (ret < 0)
  815. goto out_sem;
  816. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  817. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  818. cpstat->cs_cno = nilfs_mdt_cno(cpfile);
  819. cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
  820. cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
  821. kunmap_atomic(kaddr, KM_USER0);
  822. brelse(bh);
  823. out_sem:
  824. up_read(&NILFS_MDT(cpfile)->mi_sem);
  825. return ret;
  826. }