dev_bdev.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * fs/logfs/dev_bdev.c - Device access methods for block devices
  3. *
  4. * As should be obvious for Linux kernel code, license is GPLv2
  5. *
  6. * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7. */
  8. #include "logfs.h"
  9. #include <linux/bio.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/buffer_head.h>
  12. #include <linux/gfp.h>
  13. #include <linux/prefetch.h>
  14. #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  15. static void request_complete(struct bio *bio, int err)
  16. {
  17. complete((struct completion *)bio->bi_private);
  18. }
  19. static int sync_request(struct page *page, struct block_device *bdev, int rw)
  20. {
  21. struct bio bio;
  22. struct bio_vec bio_vec;
  23. struct completion complete;
  24. bio_init(&bio);
  25. bio.bi_max_vecs = 1;
  26. bio.bi_io_vec = &bio_vec;
  27. bio_vec.bv_page = page;
  28. bio_vec.bv_len = PAGE_SIZE;
  29. bio_vec.bv_offset = 0;
  30. bio.bi_vcnt = 1;
  31. bio.bi_idx = 0;
  32. bio.bi_size = PAGE_SIZE;
  33. bio.bi_bdev = bdev;
  34. bio.bi_sector = page->index * (PAGE_SIZE >> 9);
  35. init_completion(&complete);
  36. bio.bi_private = &complete;
  37. bio.bi_end_io = request_complete;
  38. submit_bio(rw, &bio);
  39. wait_for_completion(&complete);
  40. return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
  41. }
  42. static int bdev_readpage(void *_sb, struct page *page)
  43. {
  44. struct super_block *sb = _sb;
  45. struct block_device *bdev = logfs_super(sb)->s_bdev;
  46. int err;
  47. err = sync_request(page, bdev, READ);
  48. if (err) {
  49. ClearPageUptodate(page);
  50. SetPageError(page);
  51. } else {
  52. SetPageUptodate(page);
  53. ClearPageError(page);
  54. }
  55. unlock_page(page);
  56. return err;
  57. }
  58. static DECLARE_WAIT_QUEUE_HEAD(wq);
  59. static void writeseg_end_io(struct bio *bio, int err)
  60. {
  61. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  62. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  63. struct super_block *sb = bio->bi_private;
  64. struct logfs_super *super = logfs_super(sb);
  65. struct page *page;
  66. BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  67. BUG_ON(err);
  68. BUG_ON(bio->bi_vcnt == 0);
  69. do {
  70. page = bvec->bv_page;
  71. if (--bvec >= bio->bi_io_vec)
  72. prefetchw(&bvec->bv_page->flags);
  73. end_page_writeback(page);
  74. page_cache_release(page);
  75. } while (bvec >= bio->bi_io_vec);
  76. bio_put(bio);
  77. if (atomic_dec_and_test(&super->s_pending_writes))
  78. wake_up(&wq);
  79. }
  80. static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  81. size_t nr_pages)
  82. {
  83. struct logfs_super *super = logfs_super(sb);
  84. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  85. struct bio *bio;
  86. struct page *page;
  87. unsigned int max_pages;
  88. int i;
  89. max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
  90. bio = bio_alloc(GFP_NOFS, max_pages);
  91. BUG_ON(!bio);
  92. for (i = 0; i < nr_pages; i++) {
  93. if (i >= max_pages) {
  94. /* Block layer cannot split bios :( */
  95. bio->bi_vcnt = i;
  96. bio->bi_idx = 0;
  97. bio->bi_size = i * PAGE_SIZE;
  98. bio->bi_bdev = super->s_bdev;
  99. bio->bi_sector = ofs >> 9;
  100. bio->bi_private = sb;
  101. bio->bi_end_io = writeseg_end_io;
  102. atomic_inc(&super->s_pending_writes);
  103. submit_bio(WRITE, bio);
  104. ofs += i * PAGE_SIZE;
  105. index += i;
  106. nr_pages -= i;
  107. i = 0;
  108. bio = bio_alloc(GFP_NOFS, max_pages);
  109. BUG_ON(!bio);
  110. }
  111. page = find_lock_page(mapping, index + i);
  112. BUG_ON(!page);
  113. bio->bi_io_vec[i].bv_page = page;
  114. bio->bi_io_vec[i].bv_len = PAGE_SIZE;
  115. bio->bi_io_vec[i].bv_offset = 0;
  116. BUG_ON(PageWriteback(page));
  117. set_page_writeback(page);
  118. unlock_page(page);
  119. }
  120. bio->bi_vcnt = nr_pages;
  121. bio->bi_idx = 0;
  122. bio->bi_size = nr_pages * PAGE_SIZE;
  123. bio->bi_bdev = super->s_bdev;
  124. bio->bi_sector = ofs >> 9;
  125. bio->bi_private = sb;
  126. bio->bi_end_io = writeseg_end_io;
  127. atomic_inc(&super->s_pending_writes);
  128. submit_bio(WRITE, bio);
  129. return 0;
  130. }
  131. static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
  132. {
  133. struct logfs_super *super = logfs_super(sb);
  134. int head;
  135. BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
  136. if (len == 0) {
  137. /* This can happen when the object fit perfectly into a
  138. * segment, the segment gets written per sync and subsequently
  139. * closed.
  140. */
  141. return;
  142. }
  143. head = ofs & (PAGE_SIZE - 1);
  144. if (head) {
  145. ofs -= head;
  146. len += head;
  147. }
  148. len = PAGE_ALIGN(len);
  149. __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
  150. }
  151. static void erase_end_io(struct bio *bio, int err)
  152. {
  153. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  154. struct super_block *sb = bio->bi_private;
  155. struct logfs_super *super = logfs_super(sb);
  156. BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  157. BUG_ON(err);
  158. BUG_ON(bio->bi_vcnt == 0);
  159. bio_put(bio);
  160. if (atomic_dec_and_test(&super->s_pending_writes))
  161. wake_up(&wq);
  162. }
  163. static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
  164. size_t nr_pages)
  165. {
  166. struct logfs_super *super = logfs_super(sb);
  167. struct bio *bio;
  168. unsigned int max_pages;
  169. int i;
  170. max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
  171. bio = bio_alloc(GFP_NOFS, max_pages);
  172. BUG_ON(!bio);
  173. for (i = 0; i < nr_pages; i++) {
  174. if (i >= max_pages) {
  175. /* Block layer cannot split bios :( */
  176. bio->bi_vcnt = i;
  177. bio->bi_idx = 0;
  178. bio->bi_size = i * PAGE_SIZE;
  179. bio->bi_bdev = super->s_bdev;
  180. bio->bi_sector = ofs >> 9;
  181. bio->bi_private = sb;
  182. bio->bi_end_io = erase_end_io;
  183. atomic_inc(&super->s_pending_writes);
  184. submit_bio(WRITE, bio);
  185. ofs += i * PAGE_SIZE;
  186. index += i;
  187. nr_pages -= i;
  188. i = 0;
  189. bio = bio_alloc(GFP_NOFS, max_pages);
  190. BUG_ON(!bio);
  191. }
  192. bio->bi_io_vec[i].bv_page = super->s_erase_page;
  193. bio->bi_io_vec[i].bv_len = PAGE_SIZE;
  194. bio->bi_io_vec[i].bv_offset = 0;
  195. }
  196. bio->bi_vcnt = nr_pages;
  197. bio->bi_idx = 0;
  198. bio->bi_size = nr_pages * PAGE_SIZE;
  199. bio->bi_bdev = super->s_bdev;
  200. bio->bi_sector = ofs >> 9;
  201. bio->bi_private = sb;
  202. bio->bi_end_io = erase_end_io;
  203. atomic_inc(&super->s_pending_writes);
  204. submit_bio(WRITE, bio);
  205. return 0;
  206. }
  207. static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
  208. int ensure_write)
  209. {
  210. struct logfs_super *super = logfs_super(sb);
  211. BUG_ON(to & (PAGE_SIZE - 1));
  212. BUG_ON(len & (PAGE_SIZE - 1));
  213. if (super->s_flags & LOGFS_SB_FLAG_RO)
  214. return -EROFS;
  215. if (ensure_write) {
  216. /*
  217. * Object store doesn't care whether erases happen or not.
  218. * But for the journal they are required. Otherwise a scan
  219. * can find an old commit entry and assume it is the current
  220. * one, travelling back in time.
  221. */
  222. do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
  223. }
  224. return 0;
  225. }
  226. static void bdev_sync(struct super_block *sb)
  227. {
  228. struct logfs_super *super = logfs_super(sb);
  229. wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
  230. }
  231. static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
  232. {
  233. struct logfs_super *super = logfs_super(sb);
  234. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  235. filler_t *filler = bdev_readpage;
  236. *ofs = 0;
  237. return read_cache_page(mapping, 0, filler, sb);
  238. }
  239. static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
  240. {
  241. struct logfs_super *super = logfs_super(sb);
  242. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  243. filler_t *filler = bdev_readpage;
  244. u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
  245. pgoff_t index = pos >> PAGE_SHIFT;
  246. *ofs = pos;
  247. return read_cache_page(mapping, index, filler, sb);
  248. }
  249. static int bdev_write_sb(struct super_block *sb, struct page *page)
  250. {
  251. struct block_device *bdev = logfs_super(sb)->s_bdev;
  252. /* Nothing special to do for block devices. */
  253. return sync_request(page, bdev, WRITE);
  254. }
  255. static void bdev_put_device(struct logfs_super *s)
  256. {
  257. blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  258. }
  259. static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
  260. {
  261. return 0;
  262. }
  263. static const struct logfs_device_ops bd_devops = {
  264. .find_first_sb = bdev_find_first_sb,
  265. .find_last_sb = bdev_find_last_sb,
  266. .write_sb = bdev_write_sb,
  267. .readpage = bdev_readpage,
  268. .writeseg = bdev_writeseg,
  269. .erase = bdev_erase,
  270. .can_write_buf = bdev_can_write_buf,
  271. .sync = bdev_sync,
  272. .put_device = bdev_put_device,
  273. };
  274. int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
  275. const char *devname)
  276. {
  277. struct block_device *bdev;
  278. bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  279. type);
  280. if (IS_ERR(bdev))
  281. return PTR_ERR(bdev);
  282. if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
  283. int mtdnr = MINOR(bdev->bd_dev);
  284. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  285. return logfs_get_sb_mtd(p, mtdnr);
  286. }
  287. p->s_bdev = bdev;
  288. p->s_mtd = NULL;
  289. p->s_devops = &bd_devops;
  290. return 0;
  291. }