dev_bdev.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * fs/logfs/dev_bdev.c - Device access methods for block devices
  3. *
  4. * As should be obvious for Linux kernel code, license is GPLv2
  5. *
  6. * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7. */
  8. #include "logfs.h"
  9. #include <linux/bio.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/buffer_head.h>
  12. #include <linux/slab.h>
  13. #include <linux/gfp.h>
  14. #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  15. static void request_complete(struct bio *bio, int err)
  16. {
  17. complete((struct completion *)bio->bi_private);
  18. }
  19. static int sync_request(struct page *page, struct block_device *bdev, int rw)
  20. {
  21. struct bio bio;
  22. struct bio_vec bio_vec;
  23. struct completion complete;
  24. bio_init(&bio);
  25. bio.bi_io_vec = &bio_vec;
  26. bio_vec.bv_page = page;
  27. bio_vec.bv_len = PAGE_SIZE;
  28. bio_vec.bv_offset = 0;
  29. bio.bi_vcnt = 1;
  30. bio.bi_idx = 0;
  31. bio.bi_size = PAGE_SIZE;
  32. bio.bi_bdev = bdev;
  33. bio.bi_sector = page->index * (PAGE_SIZE >> 9);
  34. init_completion(&complete);
  35. bio.bi_private = &complete;
  36. bio.bi_end_io = request_complete;
  37. submit_bio(rw, &bio);
  38. generic_unplug_device(bdev_get_queue(bdev));
  39. wait_for_completion(&complete);
  40. return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
  41. }
  42. static int bdev_readpage(void *_sb, struct page *page)
  43. {
  44. struct super_block *sb = _sb;
  45. struct block_device *bdev = logfs_super(sb)->s_bdev;
  46. int err;
  47. err = sync_request(page, bdev, READ);
  48. if (err) {
  49. ClearPageUptodate(page);
  50. SetPageError(page);
  51. } else {
  52. SetPageUptodate(page);
  53. ClearPageError(page);
  54. }
  55. unlock_page(page);
  56. return err;
  57. }
  58. static DECLARE_WAIT_QUEUE_HEAD(wq);
  59. static void writeseg_end_io(struct bio *bio, int err)
  60. {
  61. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  62. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  63. struct super_block *sb = bio->bi_private;
  64. struct logfs_super *super = logfs_super(sb);
  65. struct page *page;
  66. BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  67. BUG_ON(err);
  68. BUG_ON(bio->bi_vcnt == 0);
  69. do {
  70. page = bvec->bv_page;
  71. if (--bvec >= bio->bi_io_vec)
  72. prefetchw(&bvec->bv_page->flags);
  73. end_page_writeback(page);
  74. page_cache_release(page);
  75. } while (bvec >= bio->bi_io_vec);
  76. bio_put(bio);
  77. if (atomic_dec_and_test(&super->s_pending_writes))
  78. wake_up(&wq);
  79. }
  80. static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  81. size_t nr_pages)
  82. {
  83. struct logfs_super *super = logfs_super(sb);
  84. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  85. struct bio *bio;
  86. struct page *page;
  87. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  88. unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
  89. int i;
  90. if (max_pages > BIO_MAX_PAGES)
  91. max_pages = BIO_MAX_PAGES;
  92. bio = bio_alloc(GFP_NOFS, max_pages);
  93. BUG_ON(!bio);
  94. for (i = 0; i < nr_pages; i++) {
  95. if (i >= max_pages) {
  96. /* Block layer cannot split bios :( */
  97. bio->bi_vcnt = i;
  98. bio->bi_idx = 0;
  99. bio->bi_size = i * PAGE_SIZE;
  100. bio->bi_bdev = super->s_bdev;
  101. bio->bi_sector = ofs >> 9;
  102. bio->bi_private = sb;
  103. bio->bi_end_io = writeseg_end_io;
  104. atomic_inc(&super->s_pending_writes);
  105. submit_bio(WRITE, bio);
  106. ofs += i * PAGE_SIZE;
  107. index += i;
  108. nr_pages -= i;
  109. i = 0;
  110. bio = bio_alloc(GFP_NOFS, max_pages);
  111. BUG_ON(!bio);
  112. }
  113. page = find_lock_page(mapping, index + i);
  114. BUG_ON(!page);
  115. bio->bi_io_vec[i].bv_page = page;
  116. bio->bi_io_vec[i].bv_len = PAGE_SIZE;
  117. bio->bi_io_vec[i].bv_offset = 0;
  118. BUG_ON(PageWriteback(page));
  119. set_page_writeback(page);
  120. unlock_page(page);
  121. }
  122. bio->bi_vcnt = nr_pages;
  123. bio->bi_idx = 0;
  124. bio->bi_size = nr_pages * PAGE_SIZE;
  125. bio->bi_bdev = super->s_bdev;
  126. bio->bi_sector = ofs >> 9;
  127. bio->bi_private = sb;
  128. bio->bi_end_io = writeseg_end_io;
  129. atomic_inc(&super->s_pending_writes);
  130. submit_bio(WRITE, bio);
  131. return 0;
  132. }
  133. static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
  134. {
  135. struct logfs_super *super = logfs_super(sb);
  136. int head;
  137. BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
  138. if (len == 0) {
  139. /* This can happen when the object fit perfectly into a
  140. * segment, the segment gets written per sync and subsequently
  141. * closed.
  142. */
  143. return;
  144. }
  145. head = ofs & (PAGE_SIZE - 1);
  146. if (head) {
  147. ofs -= head;
  148. len += head;
  149. }
  150. len = PAGE_ALIGN(len);
  151. __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
  152. generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
  153. }
  154. static void erase_end_io(struct bio *bio, int err)
  155. {
  156. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  157. struct super_block *sb = bio->bi_private;
  158. struct logfs_super *super = logfs_super(sb);
  159. BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
  160. BUG_ON(err);
  161. BUG_ON(bio->bi_vcnt == 0);
  162. bio_put(bio);
  163. if (atomic_dec_and_test(&super->s_pending_writes))
  164. wake_up(&wq);
  165. }
  166. static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
  167. size_t nr_pages)
  168. {
  169. struct logfs_super *super = logfs_super(sb);
  170. struct bio *bio;
  171. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  172. unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
  173. int i;
  174. if (max_pages > BIO_MAX_PAGES)
  175. max_pages = BIO_MAX_PAGES;
  176. bio = bio_alloc(GFP_NOFS, max_pages);
  177. BUG_ON(!bio);
  178. for (i = 0; i < nr_pages; i++) {
  179. if (i >= max_pages) {
  180. /* Block layer cannot split bios :( */
  181. bio->bi_vcnt = i;
  182. bio->bi_idx = 0;
  183. bio->bi_size = i * PAGE_SIZE;
  184. bio->bi_bdev = super->s_bdev;
  185. bio->bi_sector = ofs >> 9;
  186. bio->bi_private = sb;
  187. bio->bi_end_io = erase_end_io;
  188. atomic_inc(&super->s_pending_writes);
  189. submit_bio(WRITE, bio);
  190. ofs += i * PAGE_SIZE;
  191. index += i;
  192. nr_pages -= i;
  193. i = 0;
  194. bio = bio_alloc(GFP_NOFS, max_pages);
  195. BUG_ON(!bio);
  196. }
  197. bio->bi_io_vec[i].bv_page = super->s_erase_page;
  198. bio->bi_io_vec[i].bv_len = PAGE_SIZE;
  199. bio->bi_io_vec[i].bv_offset = 0;
  200. }
  201. bio->bi_vcnt = nr_pages;
  202. bio->bi_idx = 0;
  203. bio->bi_size = nr_pages * PAGE_SIZE;
  204. bio->bi_bdev = super->s_bdev;
  205. bio->bi_sector = ofs >> 9;
  206. bio->bi_private = sb;
  207. bio->bi_end_io = erase_end_io;
  208. atomic_inc(&super->s_pending_writes);
  209. submit_bio(WRITE, bio);
  210. return 0;
  211. }
  212. static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
  213. int ensure_write)
  214. {
  215. struct logfs_super *super = logfs_super(sb);
  216. BUG_ON(to & (PAGE_SIZE - 1));
  217. BUG_ON(len & (PAGE_SIZE - 1));
  218. if (super->s_flags & LOGFS_SB_FLAG_RO)
  219. return -EROFS;
  220. if (ensure_write) {
  221. /*
  222. * Object store doesn't care whether erases happen or not.
  223. * But for the journal they are required. Otherwise a scan
  224. * can find an old commit entry and assume it is the current
  225. * one, travelling back in time.
  226. */
  227. do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
  228. }
  229. return 0;
  230. }
  231. static void bdev_sync(struct super_block *sb)
  232. {
  233. struct logfs_super *super = logfs_super(sb);
  234. wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
  235. }
  236. static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
  237. {
  238. struct logfs_super *super = logfs_super(sb);
  239. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  240. filler_t *filler = bdev_readpage;
  241. *ofs = 0;
  242. return read_cache_page(mapping, 0, filler, sb);
  243. }
  244. static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
  245. {
  246. struct logfs_super *super = logfs_super(sb);
  247. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  248. filler_t *filler = bdev_readpage;
  249. u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
  250. pgoff_t index = pos >> PAGE_SHIFT;
  251. *ofs = pos;
  252. return read_cache_page(mapping, index, filler, sb);
  253. }
  254. static int bdev_write_sb(struct super_block *sb, struct page *page)
  255. {
  256. struct block_device *bdev = logfs_super(sb)->s_bdev;
  257. /* Nothing special to do for block devices. */
  258. return sync_request(page, bdev, WRITE);
  259. }
  260. static void bdev_put_device(struct super_block *sb)
  261. {
  262. close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
  263. }
  264. static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
  265. {
  266. return 0;
  267. }
  268. static const struct logfs_device_ops bd_devops = {
  269. .find_first_sb = bdev_find_first_sb,
  270. .find_last_sb = bdev_find_last_sb,
  271. .write_sb = bdev_write_sb,
  272. .readpage = bdev_readpage,
  273. .writeseg = bdev_writeseg,
  274. .erase = bdev_erase,
  275. .can_write_buf = bdev_can_write_buf,
  276. .sync = bdev_sync,
  277. .put_device = bdev_put_device,
  278. };
  279. int logfs_get_sb_bdev(struct logfs_super *p,
  280. struct file_system_type *type, int flags,
  281. const char *devname, struct vfsmount *mnt)
  282. {
  283. struct block_device *bdev;
  284. bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
  285. if (IS_ERR(bdev)) {
  286. kfree(p);
  287. return PTR_ERR(bdev);
  288. }
  289. if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
  290. int mtdnr = MINOR(bdev->bd_dev);
  291. close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
  292. return logfs_get_sb_mtd(p, type, flags, mtdnr, mnt);
  293. }
  294. return logfs_get_sb_device(p, type, flags, NULL, bdev, &bd_devops, mnt);
  295. }