dat.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * dat.c - NILFS disk address translation.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/types.h>
  23. #include <linux/buffer_head.h>
  24. #include <linux/string.h>
  25. #include <linux/errno.h>
  26. #include "nilfs.h"
  27. #include "mdt.h"
  28. #include "alloc.h"
  29. #include "dat.h"
  30. #define NILFS_CNO_MIN ((__u64)1)
  31. #define NILFS_CNO_MAX (~(__u64)0)
  32. static int nilfs_dat_prepare_entry(struct inode *dat,
  33. struct nilfs_palloc_req *req, int create)
  34. {
  35. return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
  36. create, &req->pr_entry_bh);
  37. }
  38. static void nilfs_dat_commit_entry(struct inode *dat,
  39. struct nilfs_palloc_req *req)
  40. {
  41. nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
  42. nilfs_mdt_mark_dirty(dat);
  43. brelse(req->pr_entry_bh);
  44. }
  45. static void nilfs_dat_abort_entry(struct inode *dat,
  46. struct nilfs_palloc_req *req)
  47. {
  48. brelse(req->pr_entry_bh);
  49. }
  50. int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
  51. {
  52. int ret;
  53. ret = nilfs_palloc_prepare_alloc_entry(dat, req);
  54. if (ret < 0)
  55. return ret;
  56. ret = nilfs_dat_prepare_entry(dat, req, 1);
  57. if (ret < 0)
  58. nilfs_palloc_abort_alloc_entry(dat, req);
  59. return ret;
  60. }
  61. void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
  62. {
  63. struct nilfs_dat_entry *entry;
  64. void *kaddr;
  65. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  66. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  67. req->pr_entry_bh, kaddr);
  68. entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
  69. entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
  70. entry->de_blocknr = cpu_to_le64(0);
  71. kunmap_atomic(kaddr, KM_USER0);
  72. nilfs_palloc_commit_alloc_entry(dat, req);
  73. nilfs_dat_commit_entry(dat, req);
  74. }
  75. void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
  76. {
  77. nilfs_dat_abort_entry(dat, req);
  78. nilfs_palloc_abort_alloc_entry(dat, req);
  79. }
  80. void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
  81. {
  82. struct nilfs_dat_entry *entry;
  83. void *kaddr;
  84. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  85. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  86. req->pr_entry_bh, kaddr);
  87. entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
  88. entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
  89. entry->de_blocknr = cpu_to_le64(0);
  90. kunmap_atomic(kaddr, KM_USER0);
  91. nilfs_dat_commit_entry(dat, req);
  92. nilfs_palloc_commit_free_entry(dat, req);
  93. }
  94. void nilfs_dat_abort_free(struct inode *dat, struct nilfs_palloc_req *req)
  95. {
  96. nilfs_dat_abort_entry(dat, req);
  97. nilfs_palloc_abort_free_entry(dat, req);
  98. }
  99. int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
  100. {
  101. int ret;
  102. ret = nilfs_dat_prepare_entry(dat, req, 0);
  103. WARN_ON(ret == -ENOENT);
  104. return ret;
  105. }
  106. void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
  107. sector_t blocknr)
  108. {
  109. struct nilfs_dat_entry *entry;
  110. void *kaddr;
  111. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  112. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  113. req->pr_entry_bh, kaddr);
  114. entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
  115. entry->de_blocknr = cpu_to_le64(blocknr);
  116. kunmap_atomic(kaddr, KM_USER0);
  117. nilfs_dat_commit_entry(dat, req);
  118. }
  119. void nilfs_dat_abort_start(struct inode *dat, struct nilfs_palloc_req *req)
  120. {
  121. nilfs_dat_abort_entry(dat, req);
  122. }
  123. int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
  124. {
  125. struct nilfs_dat_entry *entry;
  126. __u64 start;
  127. sector_t blocknr;
  128. void *kaddr;
  129. int ret;
  130. ret = nilfs_dat_prepare_entry(dat, req, 0);
  131. if (ret < 0) {
  132. WARN_ON(ret == -ENOENT);
  133. return ret;
  134. }
  135. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  136. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  137. req->pr_entry_bh, kaddr);
  138. start = le64_to_cpu(entry->de_start);
  139. blocknr = le64_to_cpu(entry->de_blocknr);
  140. kunmap_atomic(kaddr, KM_USER0);
  141. if (blocknr == 0) {
  142. ret = nilfs_palloc_prepare_free_entry(dat, req);
  143. if (ret < 0) {
  144. nilfs_dat_abort_entry(dat, req);
  145. return ret;
  146. }
  147. }
  148. return 0;
  149. }
  150. void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
  151. int dead)
  152. {
  153. struct nilfs_dat_entry *entry;
  154. __u64 start, end;
  155. sector_t blocknr;
  156. void *kaddr;
  157. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  158. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  159. req->pr_entry_bh, kaddr);
  160. end = start = le64_to_cpu(entry->de_start);
  161. if (!dead) {
  162. end = nilfs_mdt_cno(dat);
  163. WARN_ON(start > end);
  164. }
  165. entry->de_end = cpu_to_le64(end);
  166. blocknr = le64_to_cpu(entry->de_blocknr);
  167. kunmap_atomic(kaddr, KM_USER0);
  168. if (blocknr == 0)
  169. nilfs_dat_commit_free(dat, req);
  170. else
  171. nilfs_dat_commit_entry(dat, req);
  172. }
  173. void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
  174. {
  175. struct nilfs_dat_entry *entry;
  176. __u64 start;
  177. sector_t blocknr;
  178. void *kaddr;
  179. kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
  180. entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
  181. req->pr_entry_bh, kaddr);
  182. start = le64_to_cpu(entry->de_start);
  183. blocknr = le64_to_cpu(entry->de_blocknr);
  184. kunmap_atomic(kaddr, KM_USER0);
  185. if (start == nilfs_mdt_cno(dat) && blocknr == 0)
  186. nilfs_palloc_abort_free_entry(dat, req);
  187. nilfs_dat_abort_entry(dat, req);
  188. }
  189. /**
  190. * nilfs_dat_mark_dirty -
  191. * @dat: DAT file inode
  192. * @vblocknr: virtual block number
  193. *
  194. * Description:
  195. *
  196. * Return Value: On success, 0 is returned. On error, one of the following
  197. * negative error codes is returned.
  198. *
  199. * %-EIO - I/O error.
  200. *
  201. * %-ENOMEM - Insufficient amount of memory available.
  202. */
  203. int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
  204. {
  205. struct nilfs_palloc_req req;
  206. int ret;
  207. req.pr_entry_nr = vblocknr;
  208. ret = nilfs_dat_prepare_entry(dat, &req, 0);
  209. if (ret == 0)
  210. nilfs_dat_commit_entry(dat, &req);
  211. return ret;
  212. }
  213. /**
  214. * nilfs_dat_freev - free virtual block numbers
  215. * @dat: DAT file inode
  216. * @vblocknrs: array of virtual block numbers
  217. * @nitems: number of virtual block numbers
  218. *
  219. * Description: nilfs_dat_freev() frees the virtual block numbers specified by
  220. * @vblocknrs and @nitems.
  221. *
  222. * Return Value: On success, 0 is returned. On error, one of the following
  223. * nagative error codes is returned.
  224. *
  225. * %-EIO - I/O error.
  226. *
  227. * %-ENOMEM - Insufficient amount of memory available.
  228. *
  229. * %-ENOENT - The virtual block number have not been allocated.
  230. */
  231. int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
  232. {
  233. return nilfs_palloc_freev(dat, vblocknrs, nitems);
  234. }
  235. /**
  236. * nilfs_dat_move - change a block number
  237. * @dat: DAT file inode
  238. * @vblocknr: virtual block number
  239. * @blocknr: block number
  240. *
  241. * Description: nilfs_dat_move() changes the block number associated with
  242. * @vblocknr to @blocknr.
  243. *
  244. * Return Value: On success, 0 is returned. On error, one of the following
  245. * negative error codes is returned.
  246. *
  247. * %-EIO - I/O error.
  248. *
  249. * %-ENOMEM - Insufficient amount of memory available.
  250. */
  251. int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
  252. {
  253. struct buffer_head *entry_bh;
  254. struct nilfs_dat_entry *entry;
  255. void *kaddr;
  256. int ret;
  257. ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
  258. if (ret < 0)
  259. return ret;
  260. kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
  261. entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
  262. if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
  263. printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
  264. (unsigned long long)vblocknr,
  265. (unsigned long long)le64_to_cpu(entry->de_start),
  266. (unsigned long long)le64_to_cpu(entry->de_end));
  267. kunmap_atomic(kaddr, KM_USER0);
  268. brelse(entry_bh);
  269. return -EINVAL;
  270. }
  271. WARN_ON(blocknr == 0);
  272. entry->de_blocknr = cpu_to_le64(blocknr);
  273. kunmap_atomic(kaddr, KM_USER0);
  274. nilfs_mdt_mark_buffer_dirty(entry_bh);
  275. nilfs_mdt_mark_dirty(dat);
  276. brelse(entry_bh);
  277. return 0;
  278. }
  279. /**
  280. * nilfs_dat_translate - translate a virtual block number to a block number
  281. * @dat: DAT file inode
  282. * @vblocknr: virtual block number
  283. * @blocknrp: pointer to a block number
  284. *
  285. * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
  286. * to the corresponding block number.
  287. *
  288. * Return Value: On success, 0 is returned and the block number associated
  289. * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
  290. * of the following negative error codes is returned.
  291. *
  292. * %-EIO - I/O error.
  293. *
  294. * %-ENOMEM - Insufficient amount of memory available.
  295. *
  296. * %-ENOENT - A block number associated with @vblocknr does not exist.
  297. */
  298. int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
  299. {
  300. struct buffer_head *entry_bh;
  301. struct nilfs_dat_entry *entry;
  302. sector_t blocknr;
  303. void *kaddr;
  304. int ret;
  305. ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
  306. if (ret < 0)
  307. return ret;
  308. kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
  309. entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
  310. blocknr = le64_to_cpu(entry->de_blocknr);
  311. if (blocknr == 0) {
  312. ret = -ENOENT;
  313. goto out;
  314. }
  315. if (blocknrp != NULL)
  316. *blocknrp = blocknr;
  317. out:
  318. kunmap_atomic(kaddr, KM_USER0);
  319. brelse(entry_bh);
  320. return ret;
  321. }
  322. ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
  323. size_t nvi)
  324. {
  325. struct buffer_head *entry_bh;
  326. struct nilfs_dat_entry *entry;
  327. struct nilfs_vinfo *vinfo = buf;
  328. __u64 first, last;
  329. void *kaddr;
  330. unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
  331. int i, j, n, ret;
  332. for (i = 0; i < nvi; i += n) {
  333. ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
  334. 0, &entry_bh);
  335. if (ret < 0)
  336. return ret;
  337. kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
  338. /* last virtual block number in this block */
  339. first = vinfo->vi_vblocknr;
  340. do_div(first, entries_per_block);
  341. first *= entries_per_block;
  342. last = first + entries_per_block - 1;
  343. for (j = i, n = 0;
  344. j < nvi && vinfo->vi_vblocknr >= first &&
  345. vinfo->vi_vblocknr <= last;
  346. j++, n++, vinfo = (void *)vinfo + visz) {
  347. entry = nilfs_palloc_block_get_entry(
  348. dat, vinfo->vi_vblocknr, entry_bh, kaddr);
  349. vinfo->vi_start = le64_to_cpu(entry->de_start);
  350. vinfo->vi_end = le64_to_cpu(entry->de_end);
  351. vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
  352. }
  353. kunmap_atomic(kaddr, KM_USER0);
  354. brelse(entry_bh);
  355. }
  356. return nvi;
  357. }