cpfile.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * cpfile.c - NILFS checkpoint file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/fs.h>
  24. #include <linux/string.h>
  25. #include <linux/buffer_head.h>
  26. #include <linux/errno.h>
  27. #include <linux/nilfs2_fs.h>
  28. #include "mdt.h"
  29. #include "cpfile.h"
  30. static inline unsigned long
  31. nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
  32. {
  33. return NILFS_MDT(cpfile)->mi_entries_per_block;
  34. }
  35. /* block number from the beginning of the file */
  36. static unsigned long
  37. nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
  38. {
  39. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  40. do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  41. return (unsigned long)tcno;
  42. }
  43. /* offset in block */
  44. static unsigned long
  45. nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
  46. {
  47. __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
  48. return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
  49. }
  50. static unsigned long
  51. nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
  52. __u64 curr,
  53. __u64 max)
  54. {
  55. return min_t(__u64,
  56. nilfs_cpfile_checkpoints_per_block(cpfile) -
  57. nilfs_cpfile_get_offset(cpfile, curr),
  58. max - curr);
  59. }
  60. static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
  61. __u64 cno)
  62. {
  63. return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
  64. }
  65. static unsigned int
  66. nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
  67. struct buffer_head *bh,
  68. void *kaddr,
  69. unsigned int n)
  70. {
  71. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  72. unsigned int count;
  73. count = le32_to_cpu(cp->cp_checkpoints_count) + n;
  74. cp->cp_checkpoints_count = cpu_to_le32(count);
  75. return count;
  76. }
  77. static unsigned int
  78. nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
  79. struct buffer_head *bh,
  80. void *kaddr,
  81. unsigned int n)
  82. {
  83. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  84. unsigned int count;
  85. WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
  86. count = le32_to_cpu(cp->cp_checkpoints_count) - n;
  87. cp->cp_checkpoints_count = cpu_to_le32(count);
  88. return count;
  89. }
  90. static inline struct nilfs_cpfile_header *
  91. nilfs_cpfile_block_get_header(const struct inode *cpfile,
  92. struct buffer_head *bh,
  93. void *kaddr)
  94. {
  95. return kaddr + bh_offset(bh);
  96. }
  97. static struct nilfs_checkpoint *
  98. nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
  99. struct buffer_head *bh,
  100. void *kaddr)
  101. {
  102. return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
  103. NILFS_MDT(cpfile)->mi_entry_size;
  104. }
  105. static void nilfs_cpfile_block_init(struct inode *cpfile,
  106. struct buffer_head *bh,
  107. void *kaddr)
  108. {
  109. struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
  110. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  111. int n = nilfs_cpfile_checkpoints_per_block(cpfile);
  112. while (n-- > 0) {
  113. nilfs_checkpoint_set_invalid(cp);
  114. cp = (void *)cp + cpsz;
  115. }
  116. }
  117. static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
  118. struct buffer_head **bhp)
  119. {
  120. return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
  121. }
  122. static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
  123. __u64 cno,
  124. int create,
  125. struct buffer_head **bhp)
  126. {
  127. return nilfs_mdt_get_block(cpfile,
  128. nilfs_cpfile_get_blkoff(cpfile, cno),
  129. create, nilfs_cpfile_block_init, bhp);
  130. }
  131. static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
  132. __u64 cno)
  133. {
  134. return nilfs_mdt_delete_block(cpfile,
  135. nilfs_cpfile_get_blkoff(cpfile, cno));
  136. }
  137. /**
  138. * nilfs_cpfile_get_checkpoint - get a checkpoint
  139. * @cpfile: inode of checkpoint file
  140. * @cno: checkpoint number
  141. * @create: create flag
  142. * @cpp: pointer to a checkpoint
  143. * @bhp: pointer to a buffer head
  144. *
  145. * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
  146. * specified by @cno. A new checkpoint will be created if @cno is the current
  147. * checkpoint number and @create is nonzero.
  148. *
  149. * Return Value: On success, 0 is returned, and the checkpoint and the
  150. * buffer head of the buffer on which the checkpoint is located are stored in
  151. * the place pointed by @cpp and @bhp, respectively. On error, one of the
  152. * following negative error codes is returned.
  153. *
  154. * %-EIO - I/O error.
  155. *
  156. * %-ENOMEM - Insufficient amount of memory available.
  157. *
  158. * %-ENOENT - No such checkpoint.
  159. *
  160. * %-EINVAL - invalid checkpoint.
  161. */
  162. int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
  163. __u64 cno,
  164. int create,
  165. struct nilfs_checkpoint **cpp,
  166. struct buffer_head **bhp)
  167. {
  168. struct buffer_head *header_bh, *cp_bh;
  169. struct nilfs_cpfile_header *header;
  170. struct nilfs_checkpoint *cp;
  171. void *kaddr;
  172. int ret;
  173. if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
  174. (cno < nilfs_mdt_cno(cpfile) && create)))
  175. return -EINVAL;
  176. down_write(&NILFS_MDT(cpfile)->mi_sem);
  177. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  178. if (ret < 0)
  179. goto out_sem;
  180. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
  181. if (ret < 0)
  182. goto out_header;
  183. kaddr = kmap(cp_bh->b_page);
  184. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  185. if (nilfs_checkpoint_invalid(cp)) {
  186. if (!create) {
  187. kunmap(cp_bh->b_page);
  188. brelse(cp_bh);
  189. ret = -ENOENT;
  190. goto out_header;
  191. }
  192. /* a newly-created checkpoint */
  193. nilfs_checkpoint_clear_invalid(cp);
  194. if (!nilfs_cpfile_is_in_first(cpfile, cno))
  195. nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
  196. kaddr, 1);
  197. nilfs_mdt_mark_buffer_dirty(cp_bh);
  198. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  199. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  200. kaddr);
  201. le64_add_cpu(&header->ch_ncheckpoints, 1);
  202. kunmap_atomic(kaddr, KM_USER0);
  203. nilfs_mdt_mark_buffer_dirty(header_bh);
  204. nilfs_mdt_mark_dirty(cpfile);
  205. }
  206. if (cpp != NULL)
  207. *cpp = cp;
  208. *bhp = cp_bh;
  209. out_header:
  210. brelse(header_bh);
  211. out_sem:
  212. up_write(&NILFS_MDT(cpfile)->mi_sem);
  213. return ret;
  214. }
  215. /**
  216. * nilfs_cpfile_put_checkpoint - put a checkpoint
  217. * @cpfile: inode of checkpoint file
  218. * @cno: checkpoint number
  219. * @bh: buffer head
  220. *
  221. * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
  222. * specified by @cno. @bh must be the buffer head which has been returned by
  223. * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
  224. */
  225. void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
  226. struct buffer_head *bh)
  227. {
  228. kunmap(bh->b_page);
  229. brelse(bh);
  230. }
  231. /**
  232. * nilfs_cpfile_delete_checkpoints - delete checkpoints
  233. * @cpfile: inode of checkpoint file
  234. * @start: start checkpoint number
  235. * @end: end checkpoint numer
  236. *
  237. * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
  238. * the period from @start to @end, excluding @end itself. The checkpoints
  239. * which have been already deleted are ignored.
  240. *
  241. * Return Value: On success, 0 is returned. On error, one of the following
  242. * negative error codes is returned.
  243. *
  244. * %-EIO - I/O error.
  245. *
  246. * %-ENOMEM - Insufficient amount of memory available.
  247. *
  248. * %-EINVAL - invalid checkpoints.
  249. */
  250. int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
  251. __u64 start,
  252. __u64 end)
  253. {
  254. struct buffer_head *header_bh, *cp_bh;
  255. struct nilfs_cpfile_header *header;
  256. struct nilfs_checkpoint *cp;
  257. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  258. __u64 cno;
  259. void *kaddr;
  260. unsigned long tnicps;
  261. int ret, ncps, nicps, count, i;
  262. if (unlikely(start == 0 || start > end)) {
  263. printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
  264. "[%llu, %llu)\n", __func__,
  265. (unsigned long long)start, (unsigned long long)end);
  266. return -EINVAL;
  267. }
  268. down_write(&NILFS_MDT(cpfile)->mi_sem);
  269. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  270. if (ret < 0)
  271. goto out_sem;
  272. tnicps = 0;
  273. for (cno = start; cno < end; cno += ncps) {
  274. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
  275. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  276. if (ret < 0) {
  277. if (ret != -ENOENT)
  278. break;
  279. /* skip hole */
  280. ret = 0;
  281. continue;
  282. }
  283. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  284. cp = nilfs_cpfile_block_get_checkpoint(
  285. cpfile, cno, cp_bh, kaddr);
  286. nicps = 0;
  287. for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
  288. WARN_ON(nilfs_checkpoint_snapshot(cp));
  289. if (!nilfs_checkpoint_invalid(cp)) {
  290. nilfs_checkpoint_set_invalid(cp);
  291. nicps++;
  292. }
  293. }
  294. if (nicps > 0) {
  295. tnicps += nicps;
  296. nilfs_mdt_mark_buffer_dirty(cp_bh);
  297. nilfs_mdt_mark_dirty(cpfile);
  298. if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
  299. (count = nilfs_cpfile_block_sub_valid_checkpoints(
  300. cpfile, cp_bh, kaddr, nicps)) == 0) {
  301. /* make hole */
  302. kunmap_atomic(kaddr, KM_USER0);
  303. brelse(cp_bh);
  304. ret = nilfs_cpfile_delete_checkpoint_block(
  305. cpfile, cno);
  306. if (ret == 0)
  307. continue;
  308. printk(KERN_ERR "%s: cannot delete block\n",
  309. __func__);
  310. break;
  311. }
  312. }
  313. kunmap_atomic(kaddr, KM_USER0);
  314. brelse(cp_bh);
  315. }
  316. if (tnicps > 0) {
  317. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  318. header = nilfs_cpfile_block_get_header(cpfile, header_bh,
  319. kaddr);
  320. le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
  321. nilfs_mdt_mark_buffer_dirty(header_bh);
  322. nilfs_mdt_mark_dirty(cpfile);
  323. kunmap_atomic(kaddr, KM_USER0);
  324. }
  325. brelse(header_bh);
  326. out_sem:
  327. up_write(&NILFS_MDT(cpfile)->mi_sem);
  328. return ret;
  329. }
  330. static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
  331. struct nilfs_checkpoint *cp,
  332. struct nilfs_cpinfo *ci)
  333. {
  334. ci->ci_flags = le32_to_cpu(cp->cp_flags);
  335. ci->ci_cno = le64_to_cpu(cp->cp_cno);
  336. ci->ci_create = le64_to_cpu(cp->cp_create);
  337. ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
  338. ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
  339. ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
  340. ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  341. }
  342. static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
  343. void *buf, unsigned cisz, size_t nci)
  344. {
  345. struct nilfs_checkpoint *cp;
  346. struct nilfs_cpinfo *ci = buf;
  347. struct buffer_head *bh;
  348. size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
  349. __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
  350. void *kaddr;
  351. int n, ret;
  352. int ncps, i;
  353. if (cno == 0)
  354. return -ENOENT; /* checkpoint number 0 is invalid */
  355. down_read(&NILFS_MDT(cpfile)->mi_sem);
  356. for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
  357. ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
  358. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  359. if (ret < 0) {
  360. if (ret != -ENOENT)
  361. goto out;
  362. continue; /* skip hole */
  363. }
  364. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  365. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  366. for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
  367. if (!nilfs_checkpoint_invalid(cp)) {
  368. nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
  369. ci);
  370. ci = (void *)ci + cisz;
  371. n++;
  372. }
  373. }
  374. kunmap_atomic(kaddr, KM_USER0);
  375. brelse(bh);
  376. }
  377. ret = n;
  378. if (n > 0) {
  379. ci = (void *)ci - cisz;
  380. *cnop = ci->ci_cno + 1;
  381. }
  382. out:
  383. up_read(&NILFS_MDT(cpfile)->mi_sem);
  384. return ret;
  385. }
  386. static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
  387. void *buf, unsigned cisz, size_t nci)
  388. {
  389. struct buffer_head *bh;
  390. struct nilfs_cpfile_header *header;
  391. struct nilfs_checkpoint *cp;
  392. struct nilfs_cpinfo *ci = buf;
  393. __u64 curr = *cnop, next;
  394. unsigned long curr_blkoff, next_blkoff;
  395. void *kaddr;
  396. int n = 0, ret;
  397. down_read(&NILFS_MDT(cpfile)->mi_sem);
  398. if (curr == 0) {
  399. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  400. if (ret < 0)
  401. goto out;
  402. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  403. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  404. curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
  405. kunmap_atomic(kaddr, KM_USER0);
  406. brelse(bh);
  407. if (curr == 0) {
  408. ret = 0;
  409. goto out;
  410. }
  411. } else if (unlikely(curr == ~(__u64)0)) {
  412. ret = 0;
  413. goto out;
  414. }
  415. curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
  416. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
  417. if (unlikely(ret < 0)) {
  418. if (ret == -ENOENT)
  419. ret = 0; /* No snapshots (started from a hole block) */
  420. goto out;
  421. }
  422. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  423. while (n < nci) {
  424. cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
  425. curr = ~(__u64)0; /* Terminator */
  426. if (unlikely(nilfs_checkpoint_invalid(cp) ||
  427. !nilfs_checkpoint_snapshot(cp)))
  428. break;
  429. nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
  430. ci = (void *)ci + cisz;
  431. n++;
  432. next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
  433. if (next == 0)
  434. break; /* reach end of the snapshot list */
  435. next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
  436. if (curr_blkoff != next_blkoff) {
  437. kunmap_atomic(kaddr, KM_USER0);
  438. brelse(bh);
  439. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
  440. 0, &bh);
  441. if (unlikely(ret < 0)) {
  442. WARN_ON(ret == -ENOENT);
  443. goto out;
  444. }
  445. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  446. }
  447. curr = next;
  448. curr_blkoff = next_blkoff;
  449. }
  450. kunmap_atomic(kaddr, KM_USER0);
  451. brelse(bh);
  452. *cnop = curr;
  453. ret = n;
  454. out:
  455. up_read(&NILFS_MDT(cpfile)->mi_sem);
  456. return ret;
  457. }
  458. /**
  459. * nilfs_cpfile_get_cpinfo -
  460. * @cpfile:
  461. * @cno:
  462. * @ci:
  463. * @nci:
  464. */
  465. ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
  466. void *buf, unsigned cisz, size_t nci)
  467. {
  468. switch (mode) {
  469. case NILFS_CHECKPOINT:
  470. return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
  471. case NILFS_SNAPSHOT:
  472. return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
  473. default:
  474. return -EINVAL;
  475. }
  476. }
  477. /**
  478. * nilfs_cpfile_delete_checkpoint -
  479. * @cpfile:
  480. * @cno:
  481. */
  482. int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
  483. {
  484. struct nilfs_cpinfo ci;
  485. __u64 tcno = cno;
  486. ssize_t nci;
  487. nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
  488. if (nci < 0)
  489. return nci;
  490. else if (nci == 0 || ci.ci_cno != cno)
  491. return -ENOENT;
  492. else if (nilfs_cpinfo_snapshot(&ci))
  493. return -EBUSY;
  494. return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
  495. }
  496. static struct nilfs_snapshot_list *
  497. nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
  498. __u64 cno,
  499. struct buffer_head *bh,
  500. void *kaddr)
  501. {
  502. struct nilfs_cpfile_header *header;
  503. struct nilfs_checkpoint *cp;
  504. struct nilfs_snapshot_list *list;
  505. if (cno != 0) {
  506. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  507. list = &cp->cp_snapshot_list;
  508. } else {
  509. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  510. list = &header->ch_snapshot_list;
  511. }
  512. return list;
  513. }
  514. static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
  515. {
  516. struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
  517. struct nilfs_cpfile_header *header;
  518. struct nilfs_checkpoint *cp;
  519. struct nilfs_snapshot_list *list;
  520. __u64 curr, prev;
  521. unsigned long curr_blkoff, prev_blkoff;
  522. void *kaddr;
  523. int ret;
  524. if (cno == 0)
  525. return -ENOENT; /* checkpoint number 0 is invalid */
  526. down_write(&NILFS_MDT(cpfile)->mi_sem);
  527. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  528. if (ret < 0)
  529. goto out_sem;
  530. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  531. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  532. if (nilfs_checkpoint_invalid(cp)) {
  533. ret = -ENOENT;
  534. kunmap_atomic(kaddr, KM_USER0);
  535. goto out_cp;
  536. }
  537. if (nilfs_checkpoint_snapshot(cp)) {
  538. ret = 0;
  539. kunmap_atomic(kaddr, KM_USER0);
  540. goto out_cp;
  541. }
  542. kunmap_atomic(kaddr, KM_USER0);
  543. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  544. if (ret < 0)
  545. goto out_cp;
  546. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  547. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  548. list = &header->ch_snapshot_list;
  549. curr_bh = header_bh;
  550. get_bh(curr_bh);
  551. curr = 0;
  552. curr_blkoff = 0;
  553. prev = le64_to_cpu(list->ssl_prev);
  554. while (prev > cno) {
  555. prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
  556. curr = prev;
  557. if (curr_blkoff != prev_blkoff) {
  558. kunmap_atomic(kaddr, KM_USER0);
  559. brelse(curr_bh);
  560. ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
  561. 0, &curr_bh);
  562. if (ret < 0)
  563. goto out_header;
  564. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  565. }
  566. curr_blkoff = prev_blkoff;
  567. cp = nilfs_cpfile_block_get_checkpoint(
  568. cpfile, curr, curr_bh, kaddr);
  569. list = &cp->cp_snapshot_list;
  570. prev = le64_to_cpu(list->ssl_prev);
  571. }
  572. kunmap_atomic(kaddr, KM_USER0);
  573. if (prev != 0) {
  574. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  575. &prev_bh);
  576. if (ret < 0)
  577. goto out_curr;
  578. } else {
  579. prev_bh = header_bh;
  580. get_bh(prev_bh);
  581. }
  582. kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
  583. list = nilfs_cpfile_block_get_snapshot_list(
  584. cpfile, curr, curr_bh, kaddr);
  585. list->ssl_prev = cpu_to_le64(cno);
  586. kunmap_atomic(kaddr, KM_USER0);
  587. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  588. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  589. cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
  590. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
  591. nilfs_checkpoint_set_snapshot(cp);
  592. kunmap_atomic(kaddr, KM_USER0);
  593. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  594. list = nilfs_cpfile_block_get_snapshot_list(
  595. cpfile, prev, prev_bh, kaddr);
  596. list->ssl_next = cpu_to_le64(cno);
  597. kunmap_atomic(kaddr, KM_USER0);
  598. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  599. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  600. le64_add_cpu(&header->ch_nsnapshots, 1);
  601. kunmap_atomic(kaddr, KM_USER0);
  602. nilfs_mdt_mark_buffer_dirty(prev_bh);
  603. nilfs_mdt_mark_buffer_dirty(curr_bh);
  604. nilfs_mdt_mark_buffer_dirty(cp_bh);
  605. nilfs_mdt_mark_buffer_dirty(header_bh);
  606. nilfs_mdt_mark_dirty(cpfile);
  607. brelse(prev_bh);
  608. out_curr:
  609. brelse(curr_bh);
  610. out_header:
  611. brelse(header_bh);
  612. out_cp:
  613. brelse(cp_bh);
  614. out_sem:
  615. up_write(&NILFS_MDT(cpfile)->mi_sem);
  616. return ret;
  617. }
  618. static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
  619. {
  620. struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
  621. struct nilfs_cpfile_header *header;
  622. struct nilfs_checkpoint *cp;
  623. struct nilfs_snapshot_list *list;
  624. __u64 next, prev;
  625. void *kaddr;
  626. int ret;
  627. if (cno == 0)
  628. return -ENOENT; /* checkpoint number 0 is invalid */
  629. down_write(&NILFS_MDT(cpfile)->mi_sem);
  630. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
  631. if (ret < 0)
  632. goto out_sem;
  633. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  634. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  635. if (nilfs_checkpoint_invalid(cp)) {
  636. ret = -ENOENT;
  637. kunmap_atomic(kaddr, KM_USER0);
  638. goto out_cp;
  639. }
  640. if (!nilfs_checkpoint_snapshot(cp)) {
  641. ret = 0;
  642. kunmap_atomic(kaddr, KM_USER0);
  643. goto out_cp;
  644. }
  645. list = &cp->cp_snapshot_list;
  646. next = le64_to_cpu(list->ssl_next);
  647. prev = le64_to_cpu(list->ssl_prev);
  648. kunmap_atomic(kaddr, KM_USER0);
  649. ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
  650. if (ret < 0)
  651. goto out_cp;
  652. if (next != 0) {
  653. ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
  654. &next_bh);
  655. if (ret < 0)
  656. goto out_header;
  657. } else {
  658. next_bh = header_bh;
  659. get_bh(next_bh);
  660. }
  661. if (prev != 0) {
  662. ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
  663. &prev_bh);
  664. if (ret < 0)
  665. goto out_next;
  666. } else {
  667. prev_bh = header_bh;
  668. get_bh(prev_bh);
  669. }
  670. kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
  671. list = nilfs_cpfile_block_get_snapshot_list(
  672. cpfile, next, next_bh, kaddr);
  673. list->ssl_prev = cpu_to_le64(prev);
  674. kunmap_atomic(kaddr, KM_USER0);
  675. kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
  676. list = nilfs_cpfile_block_get_snapshot_list(
  677. cpfile, prev, prev_bh, kaddr);
  678. list->ssl_next = cpu_to_le64(next);
  679. kunmap_atomic(kaddr, KM_USER0);
  680. kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
  681. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
  682. cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
  683. cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
  684. nilfs_checkpoint_clear_snapshot(cp);
  685. kunmap_atomic(kaddr, KM_USER0);
  686. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  687. header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
  688. le64_add_cpu(&header->ch_nsnapshots, -1);
  689. kunmap_atomic(kaddr, KM_USER0);
  690. nilfs_mdt_mark_buffer_dirty(next_bh);
  691. nilfs_mdt_mark_buffer_dirty(prev_bh);
  692. nilfs_mdt_mark_buffer_dirty(cp_bh);
  693. nilfs_mdt_mark_buffer_dirty(header_bh);
  694. nilfs_mdt_mark_dirty(cpfile);
  695. brelse(prev_bh);
  696. out_next:
  697. brelse(next_bh);
  698. out_header:
  699. brelse(header_bh);
  700. out_cp:
  701. brelse(cp_bh);
  702. out_sem:
  703. up_write(&NILFS_MDT(cpfile)->mi_sem);
  704. return ret;
  705. }
  706. /**
  707. * nilfs_cpfile_is_snapshot -
  708. * @cpfile: inode of checkpoint file
  709. * @cno: checkpoint number
  710. *
  711. * Description:
  712. *
  713. * Return Value: On success, 1 is returned if the checkpoint specified by
  714. * @cno is a snapshot, or 0 if not. On error, one of the following negative
  715. * error codes is returned.
  716. *
  717. * %-EIO - I/O error.
  718. *
  719. * %-ENOMEM - Insufficient amount of memory available.
  720. *
  721. * %-ENOENT - No such checkpoint.
  722. */
  723. int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
  724. {
  725. struct buffer_head *bh;
  726. struct nilfs_checkpoint *cp;
  727. void *kaddr;
  728. int ret;
  729. if (cno == 0)
  730. return -ENOENT; /* checkpoint number 0 is invalid */
  731. down_read(&NILFS_MDT(cpfile)->mi_sem);
  732. ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
  733. if (ret < 0)
  734. goto out;
  735. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  736. cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
  737. ret = nilfs_checkpoint_snapshot(cp);
  738. kunmap_atomic(kaddr, KM_USER0);
  739. brelse(bh);
  740. out:
  741. up_read(&NILFS_MDT(cpfile)->mi_sem);
  742. return ret;
  743. }
  744. /**
  745. * nilfs_cpfile_change_cpmode - change checkpoint mode
  746. * @cpfile: inode of checkpoint file
  747. * @cno: checkpoint number
  748. * @status: mode of checkpoint
  749. *
  750. * Description: nilfs_change_cpmode() changes the mode of the checkpoint
  751. * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
  752. *
  753. * Return Value: On success, 0 is returned. On error, one of the following
  754. * negative error codes is returned.
  755. *
  756. * %-EIO - I/O error.
  757. *
  758. * %-ENOMEM - Insufficient amount of memory available.
  759. *
  760. * %-ENOENT - No such checkpoint.
  761. */
  762. int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
  763. {
  764. struct the_nilfs *nilfs;
  765. int ret;
  766. nilfs = NILFS_MDT(cpfile)->mi_nilfs;
  767. switch (mode) {
  768. case NILFS_CHECKPOINT:
  769. /*
  770. * Check for protecting existing snapshot mounts:
  771. * ns_mount_mutex is used to make this operation atomic and
  772. * exclusive with a new mount job. Though it doesn't cover
  773. * umount, it's enough for the purpose.
  774. */
  775. mutex_lock(&nilfs->ns_mount_mutex);
  776. if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
  777. /* Current implementation does not have to protect
  778. plain read-only mounts since they are exclusive
  779. with a read/write mount and are protected from the
  780. cleaner. */
  781. ret = -EBUSY;
  782. } else
  783. ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
  784. mutex_unlock(&nilfs->ns_mount_mutex);
  785. return ret;
  786. case NILFS_SNAPSHOT:
  787. return nilfs_cpfile_set_snapshot(cpfile, cno);
  788. default:
  789. return -EINVAL;
  790. }
  791. }
  792. /**
  793. * nilfs_cpfile_get_stat - get checkpoint statistics
  794. * @cpfile: inode of checkpoint file
  795. * @stat: pointer to a structure of checkpoint statistics
  796. *
  797. * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
  798. *
  799. * Return Value: On success, 0 is returned, and checkpoints information is
  800. * stored in the place pointed by @stat. On error, one of the following
  801. * negative error codes is returned.
  802. *
  803. * %-EIO - I/O error.
  804. *
  805. * %-ENOMEM - Insufficient amount of memory available.
  806. */
  807. int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
  808. {
  809. struct buffer_head *bh;
  810. struct nilfs_cpfile_header *header;
  811. void *kaddr;
  812. int ret;
  813. down_read(&NILFS_MDT(cpfile)->mi_sem);
  814. ret = nilfs_cpfile_get_header_block(cpfile, &bh);
  815. if (ret < 0)
  816. goto out_sem;
  817. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  818. header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
  819. cpstat->cs_cno = nilfs_mdt_cno(cpfile);
  820. cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
  821. cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
  822. kunmap_atomic(kaddr, KM_USER0);
  823. brelse(bh);
  824. out_sem:
  825. up_read(&NILFS_MDT(cpfile)->mi_sem);
  826. return ret;
  827. }