cpfile.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. /*
  2. * cpfile.c - NILFS checkpoint file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/fs.h>
  24. #include <linux/string.h>
  25. #include <linux/buffer_head.h>
  26. #include <linux/errno.h>
  27. #include <linux/nilfs2_fs.h>
  28. #include "mdt.h"
  29. #include "cpfile.h"
  30. static inline unsigned long
  31. nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
  32. {
  33. return NILFS_MDT(cpfile)->mi_entries_per_block;
  34. }
  35. /* block number from the beginning of the file */
  36. static unsigned long
  37. nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
  38. {
  39. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  40. do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  41. return (unsigned long)tcno;
  42. }
  43. /* offset in block */
  44. static unsigned long
  45. nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
  46. {
  47. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  48. return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  49. }
  50. static unsigned long
  51. nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
  52. __u64 curr,
  53. __u64 max)
  54. {
  55. return min_t(__u64,
  56. nilfs_cpfile_checkpoints_per_block(cpfile) -
  57. nilfs_cpfile_get_offset(cpfile, curr),
  58. max - curr);
  59. }
  60. static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
  61. __u64 cno)
  62. {
  63. return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
  64. }
  65. static unsigned int
  66. nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
  67. struct buffer_head *bh,
  68. void *kaddr,
  69. unsigned int n)
  70. {
  71. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  72. unsigned int count;
  73. count = le32_to_cpu(cp->cp_checkpoints_count) + n;
  74. cp->cp_checkpoints_count = cpu_to_le32(count);
  75. return count;
  76. }
  77. static unsigned int
  78. nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
  79. struct buffer_head *bh,
  80. void *kaddr,
  81. unsigned int n)
  82. {
  83. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  84. unsigned int count;
  85. WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
  86. count = le32_to_cpu(cp->cp_checkpoints_count) - n;
  87. cp->cp_checkpoints_count = cpu_to_le32(count);
  88. return count;
  89. }
  90. static inline struct nilfs_cpfile_header *
  91. nilfs_cpfile_block_get_header(const struct inode *cpfile,
  92. struct buffer_head *bh,
  93. void *kaddr)
  94. {
  95. return kaddr + bh_offset(bh);
  96. }
  97. static struct nilfs_checkpoint *
  98. nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
  99. struct buffer_head *bh,
  100. void *kaddr)
  101. {
  102. return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
  103. NILFS_MDT(cpfile)->mi_entry_size;
  104. }
  105. static void nilfs_cpfile_block_init(struct inode *cpfile,
  106. struct buffer_head *bh,
  107. void *kaddr)
  108. {
  109. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  110. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  111. int n = nilfs_cpfile_checkpoints_per_block(cpfile);
  112. while (n-- > 0) {
  113. nilfs_checkpoint_set_invalid(cp);
  114. cp = (void *)cp + cpsz;
  115. }
  116. }
  117. static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
  118. struct buffer_head **bhp)
  119. {
  120. return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
  121. }
  122. static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
  123. __u64 cno,
  124. int create,
  125. struct buffer_head **bhp)
  126. {
  127. return nilfs_mdt_get_block(cpfile,
  128. nilfs_cpfile_get_blkoff(cpfile, cno),
  129. create, nilfs_cpfile_block_init, bhp);
  130. }
  131. static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
  132. __u64 cno)
  133. {
  134. return nilfs_mdt_delete_block(cpfile,
  135. nilfs_cpfile_get_blkoff(cpfile, cno));
  136. }
  137. /**
  138. * nilfs_cpfile_get_checkpoint - get a checkpoint
  139. * @cpfile: inode of checkpoint file
  140. * @cno: checkpoint number
  141. * @create: create flag
  142. * @cpp: pointer to a checkpoint
  143. * @bhp: pointer to a buffer head
  144. *
  145. * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
  146. * specified by @cno. A new checkpoint will be created if @cno is the current
  147. * checkpoint number and @create is nonzero.
  148. *
  149. * Return Value: On success, 0 is returned, and the checkpoint and the
  150. * buffer head of the buffer on which the checkpoint is located are stored in
  151. * the place pointed by @cpp and @bhp, respectively. On error, one of the
  152. * following negative error codes is returned.
  153. *
  154. * %-EIO - I/O error.
  155. *
  156. * %-ENOMEM - Insufficient amount of memory available.
  157. *
  158. * %-ENOENT - No such checkpoint.
  159. *
  160. * %-EINVAL - invalid checkpoint.
  161. */
  162. int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
  163. __u64 cno,
  164. int create,
  165. struct nilfs_checkpoint **cpp,
  166. struct buffer_head **bhp)
  167. {
  168. struct buffer_head *header_bh, *cp_bh;
  169. struct nilfs_cpfile_header *header;
  170. struct nilfs_checkpoint *cp;
  171. void *kaddr;
  172. int ret;
  173. if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
  174. (cno < nilfs_mdt_cno(cpfile) && create)))
  175. return -EINVAL;
  176. down_write(&NILFS_MDT(cpfile)->mi_sem);
  177. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  178. if (ret < 0)
  179. goto out_sem;
  180. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
  181. if (ret < 0)
  182. goto out_header;
  183. kaddr = kmap(cp_bh->b_page);
  184. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  185. if (nilfs_checkpoint_invalid(cp)) {
  186. if (!create) {
  187. kunmap(cp_bh->b_page);
  188. brelse(cp_bh);
  189. ret = -ENOENT;
  190. goto out_header;
  191. }
  192. /* a newly-created checkpoint */
  193. nilfs_checkpoint_clear_invalid(cp);
  194. if (!nilfs_cpfile_is_in_first(cpfile, cno))
  195. nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
  196. kaddr, 1);
  197. nilfs_mdt_mark_buffer_dirty(cp_bh);
  198. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  199. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  200. kaddr);
  201. le64_add_cpu(&header->ch_ncheckpoints, 1);
  202. kunmap_atomic(kaddr, KM_USER0);
  203. nilfs_mdt_mark_buffer_dirty(header_bh);
  204. nilfs_mdt_mark_dirty(cpfile);
  205. }
  206. if (cpp != NULL)
  207. *cpp = cp;
  208. *bhp = cp_bh;
  209. out_header:
  210. brelse(header_bh);
  211. out_sem:
  212. up_write(&NILFS_MDT(cpfile)->mi_sem);
  213. return ret;
  214. }
  215. /**
  216. * nilfs_cpfile_put_checkpoint - put a checkpoint
  217. * @cpfile: inode of checkpoint file
  218. * @cno: checkpoint number
  219. * @bh: buffer head
  220. *
  221. * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
  222. * specified by @cno. @bh must be the buffer head which has been returned by
  223. * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
  224. */
  225. void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
  226. struct buffer_head *bh)
  227. {
  228. kunmap(bh->b_page);
  229. brelse(bh);
  230. }
  231. /**
  232. * nilfs_cpfile_delete_checkpoints - delete checkpoints
  233. * @cpfile: inode of checkpoint file
  234. * @start: start checkpoint number
  235. * @end: end checkpoint numer
  236. *
  237. * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
  238. * the period from @start to @end, excluding @end itself. The checkpoints
  239. * which have been already deleted are ignored.
  240. *
  241. * Return Value: On success, 0 is returned. On error, one of the following
  242. * negative error codes is returned.
  243. *
  244. * %-EIO - I/O error.
  245. *
  246. * %-ENOMEM - Insufficient amount of memory available.
  247. *
  248. * %-EINVAL - invalid checkpoints.
  249. */
  250. int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
  251. __u64 start,
  252. __u64 end)
  253. {
  254. struct buffer_head *header_bh, *cp_bh;
  255. struct nilfs_cpfile_header *header;
  256. struct nilfs_checkpoint *cp;
  257. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  258. __u64 cno;
  259. void *kaddr;
  260. unsigned long tnicps;
  261. int ret, ncps, nicps, count, i;
  262. if (unlikely(start == 0 || start > end)) {
  263. printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
  264. "[%llu, %llu)\n", __func__,
  265. (unsigned long long)start, (unsigned long long)end);
  266. return -EINVAL;
  267. }
  268. down_write(&NILFS_MDT(cpfile)->mi_sem);
  269. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  270. if (ret < 0)
  271. goto out_sem;
  272. tnicps = 0;
  273. for (cno = start; cno < end; cno += ncps) {
  274. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
  275. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  276. if (ret < 0) {
  277. if (ret != -ENOENT)
  278. goto out_header;
  279. /* skip hole */
  280. ret = 0;
  281. continue;
  282. }
  283. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  284. cp = nilfs_cpfile_block_get_checkpoint(
  285. cpfile, cno, cp_bh, kaddr);
  286. nicps = 0;
  287. for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
  288. WARN_ON(nilfs_checkpoint_snapshot(cp));
  289. if (!nilfs_checkpoint_invalid(cp)) {
  290. nilfs_checkpoint_set_invalid(cp);
  291. nicps++;
  292. }
  293. }
  294. if (nicps > 0) {
  295. tnicps += nicps;
  296. nilfs_mdt_mark_buffer_dirty(cp_bh);
  297. nilfs_mdt_mark_dirty(cpfile);
  298. if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
  299. (count = nilfs_cpfile_block_sub_valid_checkpoints(
  300. cpfile, cp_bh, kaddr, nicps)) == 0) {
  301. /* make hole */
  302. kunmap_atomic(kaddr, KM_USER0);
  303. brelse(cp_bh);
  304. ret = nilfs_cpfile_delete_checkpoint_block(
  305. cpfile, cno);
  306. if (ret == 0)
  307. continue;
  308. printk(KERN_ERR "%s: cannot delete block\n",
  309. __func__);
  310. goto out_header;
  311. }
  312. }
  313. kunmap_atomic(kaddr, KM_USER0);
  314. brelse(cp_bh);
  315. }
  316. if (tnicps > 0) {
  317. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  318. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  319. kaddr);
  320. le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
  321. nilfs_mdt_mark_buffer_dirty(header_bh);
  322. nilfs_mdt_mark_dirty(cpfile);
  323. kunmap_atomic(kaddr, KM_USER0);
  324. }
  325. out_header:
  326. brelse(header_bh);
  327. out_sem:
  328. up_write(&NILFS_MDT(cpfile)->mi_sem);
  329. return ret;
  330. }
  331. static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
  332. struct nilfs_checkpoint *cp,
  333. struct nilfs_cpinfo *ci)
  334. {
  335. ci->ci_flags = le32_to_cpu(cp->cp_flags);
  336. ci->ci_cno = le64_to_cpu(cp->cp_cno);
  337. ci->ci_create = le64_to_cpu(cp->cp_create);
  338. ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
  339. ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
  340. ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
  341. ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  342. }
  343. static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
  344. void *buf, unsigned cisz, size_t nci)
  345. {
  346. struct nilfs_checkpoint *cp;
  347. struct nilfs_cpinfo *ci = buf;
  348. struct buffer_head *bh;
  349. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  350. __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
  351. void *kaddr;
  352. int n, ret;
  353. int ncps, i;
  354. if (cno == 0)
  355. return -ENOENT; /* checkpoint number 0 is invalid */
  356. down_read(&NILFS_MDT(cpfile)->mi_sem);
  357. for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
  358. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
  359. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  360. if (ret < 0) {
  361. if (ret != -ENOENT)
  362. goto out;
  363. continue; /* skip hole */
  364. }
  365. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  366. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  367. for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
  368. if (!nilfs_checkpoint_invalid(cp)) {
  369. nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
  370. ci);
  371. ci = (void *)ci + cisz;
  372. n++;
  373. }
  374. }
  375. kunmap_atomic(kaddr, KM_USER0);
  376. brelse(bh);
  377. }
  378. ret = n;
  379. if (n > 0) {
  380. ci = (void *)ci - cisz;
  381. *cnop = ci->ci_cno + 1;
  382. }
  383. out:
  384. up_read(&NILFS_MDT(cpfile)->mi_sem);
  385. return ret;
  386. }
  387. static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
  388. void *buf, unsigned cisz, size_t nci)
  389. {
  390. struct buffer_head *bh;
  391. struct nilfs_cpfile_header *header;
  392. struct nilfs_checkpoint *cp;
  393. struct nilfs_cpinfo *ci = buf;
  394. __u64 curr = *cnop, next;
  395. unsigned long curr_blkoff, next_blkoff;
  396. void *kaddr;
  397. int n = 0, ret;
  398. down_read(&NILFS_MDT(cpfile)->mi_sem);
  399. if (curr == 0) {
  400. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  401. if (ret < 0)
  402. goto out;
  403. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  404. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  405. curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
  406. kunmap_atomic(kaddr, KM_USER0);
  407. brelse(bh);
  408. if (curr == 0) {
  409. ret = 0;
  410. goto out;
  411. }
  412. } else if (unlikely(curr == ~(__u64)0)) {
  413. ret = 0;
  414. goto out;
  415. }
  416. curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
  417. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
  418. if (unlikely(ret < 0)) {
  419. if (ret == -ENOENT)
  420. ret = 0; /* No snapshots (started from a hole block) */
  421. goto out;
  422. }
  423. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  424. while (n < nci) {
  425. cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
  426. curr = ~(__u64)0; /* Terminator */
  427. if (unlikely(nilfs_checkpoint_invalid(cp) ||
  428. !nilfs_checkpoint_snapshot(cp)))
  429. break;
  430. nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
  431. ci = (void *)ci + cisz;
  432. n++;
  433. next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  434. if (next == 0)
  435. break; /* reach end of the snapshot list */
  436. next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
  437. if (curr_blkoff != next_blkoff) {
  438. kunmap_atomic(kaddr, KM_USER0);
  439. brelse(bh);
  440. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
  441. 0, &bh);
  442. if (unlikely(ret < 0)) {
  443. WARN_ON(ret == -ENOENT);
  444. goto out;
  445. }
  446. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  447. }
  448. curr = next;
  449. curr_blkoff = next_blkoff;
  450. }
  451. kunmap_atomic(kaddr, KM_USER0);
  452. brelse(bh);
  453. *cnop = curr;
  454. ret = n;
  455. out:
  456. up_read(&NILFS_MDT(cpfile)->mi_sem);
  457. return ret;
  458. }
  459. /**
  460. * nilfs_cpfile_get_cpinfo -
  461. * @cpfile:
  462. * @cno:
  463. * @ci:
  464. * @nci:
  465. */
  466. ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
  467. void *buf, unsigned cisz, size_t nci)
  468. {
  469. switch (mode) {
  470. case NILFS_CHECKPOINT:
  471. return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
  472. case NILFS_SNAPSHOT:
  473. return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
  474. default:
  475. return -EINVAL;
  476. }
  477. }
  478. /**
  479. * nilfs_cpfile_delete_checkpoint -
  480. * @cpfile:
  481. * @cno:
  482. */
  483. int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
  484. {
  485. struct nilfs_cpinfo ci;
  486. __u64 tcno = cno;
  487. ssize_t nci;
  488. nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
  489. if (nci < 0)
  490. return nci;
  491. else if (nci == 0 || ci.ci_cno != cno)
  492. return -ENOENT;
  493. else if (nilfs_cpinfo_snapshot(&ci))
  494. return -EBUSY;
  495. return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
  496. }
  497. static struct nilfs_snapshot_list *
  498. nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
  499. __u64 cno,
  500. struct buffer_head *bh,
  501. void *kaddr)
  502. {
  503. struct nilfs_cpfile_header *header;
  504. struct nilfs_checkpoint *cp;
  505. struct nilfs_snapshot_list *list;
  506. if (cno != 0) {
  507. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  508. list = &cp->cp_snapshot_list;
  509. } else {
  510. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  511. list = &header->ch_snapshot_list;
  512. }
  513. return list;
  514. }
  515. static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
  516. {
  517. struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
  518. struct nilfs_cpfile_header *header;
  519. struct nilfs_checkpoint *cp;
  520. struct nilfs_snapshot_list *list;
  521. __u64 curr, prev;
  522. unsigned long curr_blkoff, prev_blkoff;
  523. void *kaddr;
  524. int ret;
  525. if (cno == 0)
  526. return -ENOENT; /* checkpoint number 0 is invalid */
  527. down_write(&NILFS_MDT(cpfile)->mi_sem);
  528. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  529. if (ret < 0)
  530. goto out_sem;
  531. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  532. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  533. if (nilfs_checkpoint_invalid(cp)) {
  534. ret = -ENOENT;
  535. kunmap_atomic(kaddr, KM_USER0);
  536. goto out_cp;
  537. }
  538. if (nilfs_checkpoint_snapshot(cp)) {
  539. ret = 0;
  540. kunmap_atomic(kaddr, KM_USER0);
  541. goto out_cp;
  542. }
  543. kunmap_atomic(kaddr, KM_USER0);
  544. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  545. if (ret < 0)
  546. goto out_cp;
  547. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  548. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  549. list = &header->ch_snapshot_list;
  550. curr_bh = header_bh;
  551. get_bh(curr_bh);
  552. curr = 0;
  553. curr_blkoff = 0;
  554. prev = le64_to_cpu(list->ssl_prev);
  555. while (prev > cno) {
  556. prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
  557. curr = prev;
  558. if (curr_blkoff != prev_blkoff) {
  559. kunmap_atomic(kaddr, KM_USER0);
  560. brelse(curr_bh);
  561. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
  562. 0, &curr_bh);
  563. if (ret < 0)
  564. goto out_header;
  565. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  566. }
  567. curr_blkoff = prev_blkoff;
  568. cp = nilfs_cpfile_block_get_checkpoint(
  569. cpfile, curr, curr_bh, kaddr);
  570. list = &cp->cp_snapshot_list;
  571. prev = le64_to_cpu(list->ssl_prev);
  572. }
  573. kunmap_atomic(kaddr, KM_USER0);
  574. if (prev != 0) {
  575. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  576. &prev_bh);
  577. if (ret < 0)
  578. goto out_curr;
  579. } else {
  580. prev_bh = header_bh;
  581. get_bh(prev_bh);
  582. }
  583. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  584. list = nilfs_cpfile_block_get_snapshot_list(
  585. cpfile, curr, curr_bh, kaddr);
  586. list->ssl_prev = cpu_to_le64(cno);
  587. kunmap_atomic(kaddr, KM_USER0);
  588. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  589. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  590. cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
  591. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
  592. nilfs_checkpoint_set_snapshot(cp);
  593. kunmap_atomic(kaddr, KM_USER0);
  594. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  595. list = nilfs_cpfile_block_get_snapshot_list(
  596. cpfile, prev, prev_bh, kaddr);
  597. list->ssl_next = cpu_to_le64(cno);
  598. kunmap_atomic(kaddr, KM_USER0);
  599. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  600. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  601. le64_add_cpu(&header->ch_nsnapshots, 1);
  602. kunmap_atomic(kaddr, KM_USER0);
  603. nilfs_mdt_mark_buffer_dirty(prev_bh);
  604. nilfs_mdt_mark_buffer_dirty(curr_bh);
  605. nilfs_mdt_mark_buffer_dirty(cp_bh);
  606. nilfs_mdt_mark_buffer_dirty(header_bh);
  607. nilfs_mdt_mark_dirty(cpfile);
  608. brelse(prev_bh);
  609. out_curr:
  610. brelse(curr_bh);
  611. out_header:
  612. brelse(header_bh);
  613. out_cp:
  614. brelse(cp_bh);
  615. out_sem:
  616. up_write(&NILFS_MDT(cpfile)->mi_sem);
  617. return ret;
  618. }
  619. static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
  620. {
  621. struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
  622. struct nilfs_cpfile_header *header;
  623. struct nilfs_checkpoint *cp;
  624. struct nilfs_snapshot_list *list;
  625. __u64 next, prev;
  626. void *kaddr;
  627. int ret;
  628. if (cno == 0)
  629. return -ENOENT; /* checkpoint number 0 is invalid */
  630. down_write(&NILFS_MDT(cpfile)->mi_sem);
  631. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  632. if (ret < 0)
  633. goto out_sem;
  634. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  635. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  636. if (nilfs_checkpoint_invalid(cp)) {
  637. ret = -ENOENT;
  638. kunmap_atomic(kaddr, KM_USER0);
  639. goto out_cp;
  640. }
  641. if (!nilfs_checkpoint_snapshot(cp)) {
  642. ret = 0;
  643. kunmap_atomic(kaddr, KM_USER0);
  644. goto out_cp;
  645. }
  646. list = &cp->cp_snapshot_list;
  647. next = le64_to_cpu(list->ssl_next);
  648. prev = le64_to_cpu(list->ssl_prev);
  649. kunmap_atomic(kaddr, KM_USER0);
  650. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  651. if (ret < 0)
  652. goto out_cp;
  653. if (next != 0) {
  654. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
  655. &next_bh);
  656. if (ret < 0)
  657. goto out_header;
  658. } else {
  659. next_bh = header_bh;
  660. get_bh(next_bh);
  661. }
  662. if (prev != 0) {
  663. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  664. &prev_bh);
  665. if (ret < 0)
  666. goto out_next;
  667. } else {
  668. prev_bh = header_bh;
  669. get_bh(prev_bh);
  670. }
  671. kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
  672. list = nilfs_cpfile_block_get_snapshot_list(
  673. cpfile, next, next_bh, kaddr);
  674. list->ssl_prev = cpu_to_le64(prev);
  675. kunmap_atomic(kaddr, KM_USER0);
  676. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  677. list = nilfs_cpfile_block_get_snapshot_list(
  678. cpfile, prev, prev_bh, kaddr);
  679. list->ssl_next = cpu_to_le64(next);
  680. kunmap_atomic(kaddr, KM_USER0);
  681. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  682. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  683. cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
  684. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
  685. nilfs_checkpoint_clear_snapshot(cp);
  686. kunmap_atomic(kaddr, KM_USER0);
  687. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  688. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  689. le64_add_cpu(&header->ch_nsnapshots, -1);
  690. kunmap_atomic(kaddr, KM_USER0);
  691. nilfs_mdt_mark_buffer_dirty(next_bh);
  692. nilfs_mdt_mark_buffer_dirty(prev_bh);
  693. nilfs_mdt_mark_buffer_dirty(cp_bh);
  694. nilfs_mdt_mark_buffer_dirty(header_bh);
  695. nilfs_mdt_mark_dirty(cpfile);
  696. brelse(prev_bh);
  697. out_next:
  698. brelse(next_bh);
  699. out_header:
  700. brelse(header_bh);
  701. out_cp:
  702. brelse(cp_bh);
  703. out_sem:
  704. up_write(&NILFS_MDT(cpfile)->mi_sem);
  705. return ret;
  706. }
  707. /**
  708. * nilfs_cpfile_is_snapshot -
  709. * @cpfile: inode of checkpoint file
  710. * @cno: checkpoint number
  711. *
  712. * Description:
  713. *
  714. * Return Value: On success, 1 is returned if the checkpoint specified by
  715. * @cno is a snapshot, or 0 if not. On error, one of the following negative
  716. * error codes is returned.
  717. *
  718. * %-EIO - I/O error.
  719. *
  720. * %-ENOMEM - Insufficient amount of memory available.
  721. *
  722. * %-ENOENT - No such checkpoint.
  723. */
  724. int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
  725. {
  726. struct buffer_head *bh;
  727. struct nilfs_checkpoint *cp;
  728. void *kaddr;
  729. int ret;
  730. if (cno == 0)
  731. return -ENOENT; /* checkpoint number 0 is invalid */
  732. down_read(&NILFS_MDT(cpfile)->mi_sem);
  733. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  734. if (ret < 0)
  735. goto out;
  736. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  737. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  738. ret = nilfs_checkpoint_snapshot(cp);
  739. kunmap_atomic(kaddr, KM_USER0);
  740. brelse(bh);
  741. out:
  742. up_read(&NILFS_MDT(cpfile)->mi_sem);
  743. return ret;
  744. }
  745. /**
  746. * nilfs_cpfile_change_cpmode - change checkpoint mode
  747. * @cpfile: inode of checkpoint file
  748. * @cno: checkpoint number
  749. * @status: mode of checkpoint
  750. *
  751. * Description: nilfs_change_cpmode() changes the mode of the checkpoint
  752. * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
  753. *
  754. * Return Value: On success, 0 is returned. On error, one of the following
  755. * negative error codes is returned.
  756. *
  757. * %-EIO - I/O error.
  758. *
  759. * %-ENOMEM - Insufficient amount of memory available.
  760. *
  761. * %-ENOENT - No such checkpoint.
  762. */
  763. int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
  764. {
  765. struct the_nilfs *nilfs;
  766. int ret;
  767. nilfs = NILFS_MDT(cpfile)->mi_nilfs;
  768. switch (mode) {
  769. case NILFS_CHECKPOINT:
  770. /*
  771. * Check for protecting existing snapshot mounts:
  772. * ns_mount_mutex is used to make this operation atomic and
  773. * exclusive with a new mount job. Though it doesn't cover
  774. * umount, it's enough for the purpose.
  775. */
  776. mutex_lock(&nilfs->ns_mount_mutex);
  777. if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
  778. /* Current implementation does not have to protect
  779. plain read-only mounts since they are exclusive
  780. with a read/write mount and are protected from the
  781. cleaner. */
  782. ret = -EBUSY;
  783. } else
  784. ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
  785. mutex_unlock(&nilfs->ns_mount_mutex);
  786. return ret;
  787. case NILFS_SNAPSHOT:
  788. return nilfs_cpfile_set_snapshot(cpfile, cno);
  789. default:
  790. return -EINVAL;
  791. }
  792. }
  793. /**
  794. * nilfs_cpfile_get_stat - get checkpoint statistics
  795. * @cpfile: inode of checkpoint file
  796. * @stat: pointer to a structure of checkpoint statistics
  797. *
  798. * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
  799. *
  800. * Return Value: On success, 0 is returned, and checkpoints information is
  801. * stored in the place pointed by @stat. On error, one of the following
  802. * negative error codes is returned.
  803. *
  804. * %-EIO - I/O error.
  805. *
  806. * %-ENOMEM - Insufficient amount of memory available.
  807. */
  808. int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
  809. {
  810. struct buffer_head *bh;
  811. struct nilfs_cpfile_header *header;
  812. void *kaddr;
  813. int ret;
  814. down_read(&NILFS_MDT(cpfile)->mi_sem);
  815. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  816. if (ret < 0)
  817. goto out_sem;
  818. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  819. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  820. cpstat->cs_cno = nilfs_mdt_cno(cpfile);
  821. cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
  822. cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
  823. kunmap_atomic(kaddr, KM_USER0);
  824. brelse(bh);
  825. out_sem:
  826. up_read(&NILFS_MDT(cpfile)->mi_sem);
  827. return ret;
  828. }