dev_mtd.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * fs/logfs/dev_mtd.c - Device access methods for MTD
  3. *
  4. * As should be obvious for Linux kernel code, license is GPLv2
  5. *
  6. * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7. */
  8. #include "logfs.h"
  9. #include <linux/completion.h>
  10. #include <linux/mount.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  14. static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
  15. void *buf)
  16. {
  17. struct mtd_info *mtd = logfs_super(sb)->s_mtd;
  18. size_t retlen;
  19. int ret;
  20. ret = mtd->read(mtd, ofs, len, &retlen, buf);
  21. BUG_ON(ret == -EINVAL);
  22. if (ret)
  23. return ret;
  24. /* Not sure if we should loop instead. */
  25. if (retlen != len)
  26. return -EIO;
  27. return 0;
  28. }
  29. static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
  30. void *buf)
  31. {
  32. struct logfs_super *super = logfs_super(sb);
  33. struct mtd_info *mtd = super->s_mtd;
  34. size_t retlen;
  35. loff_t page_start, page_end;
  36. int ret;
  37. if (super->s_flags & LOGFS_SB_FLAG_RO)
  38. return -EROFS;
  39. BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
  40. BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
  41. BUG_ON(len > PAGE_CACHE_SIZE);
  42. page_start = ofs & PAGE_CACHE_MASK;
  43. page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
  44. ret = mtd->write(mtd, ofs, len, &retlen, buf);
  45. if (ret || (retlen != len))
  46. return -EIO;
  47. return 0;
  48. }
  49. /*
  50. * For as long as I can remember (since about 2001) mtd->erase has been an
  51. * asynchronous interface lacking the first driver to actually use the
  52. * asynchronous properties. So just to prevent the first implementor of such
  53. * a thing from breaking logfs in 2350, we do the usual pointless dance to
  54. * declare a completion variable and wait for completion before returning
  55. * from logfs_mtd_erase(). What an exercise in futility!
  56. */
  57. static void logfs_erase_callback(struct erase_info *ei)
  58. {
  59. complete((struct completion *)ei->priv);
  60. }
  61. static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
  62. size_t len)
  63. {
  64. struct logfs_super *super = logfs_super(sb);
  65. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  66. struct page *page;
  67. pgoff_t index = ofs >> PAGE_SHIFT;
  68. for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
  69. page = find_get_page(mapping, index);
  70. if (!page)
  71. continue;
  72. memset(page_address(page), 0xFF, PAGE_SIZE);
  73. page_cache_release(page);
  74. }
  75. return 0;
  76. }
  77. static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
  78. int ensure_write)
  79. {
  80. struct mtd_info *mtd = logfs_super(sb)->s_mtd;
  81. struct erase_info ei;
  82. DECLARE_COMPLETION_ONSTACK(complete);
  83. int ret;
  84. BUG_ON(len % mtd->erasesize);
  85. if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
  86. return -EROFS;
  87. memset(&ei, 0, sizeof(ei));
  88. ei.mtd = mtd;
  89. ei.addr = ofs;
  90. ei.len = len;
  91. ei.callback = logfs_erase_callback;
  92. ei.priv = (long)&complete;
  93. ret = mtd->erase(mtd, &ei);
  94. if (ret)
  95. return -EIO;
  96. wait_for_completion(&complete);
  97. if (ei.state != MTD_ERASE_DONE)
  98. return -EIO;
  99. return logfs_mtd_erase_mapping(sb, ofs, len);
  100. }
  101. static void logfs_mtd_sync(struct super_block *sb)
  102. {
  103. struct mtd_info *mtd = logfs_super(sb)->s_mtd;
  104. if (mtd->sync)
  105. mtd->sync(mtd);
  106. }
  107. static int logfs_mtd_readpage(void *_sb, struct page *page)
  108. {
  109. struct super_block *sb = _sb;
  110. int err;
  111. err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
  112. page_address(page));
  113. if (err == -EUCLEAN || err == -EBADMSG) {
  114. /* -EBADMSG happens regularly on power failures */
  115. err = 0;
  116. /* FIXME: force GC this segment */
  117. }
  118. if (err) {
  119. ClearPageUptodate(page);
  120. SetPageError(page);
  121. } else {
  122. SetPageUptodate(page);
  123. ClearPageError(page);
  124. }
  125. unlock_page(page);
  126. return err;
  127. }
  128. static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
  129. {
  130. struct logfs_super *super = logfs_super(sb);
  131. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  132. filler_t *filler = logfs_mtd_readpage;
  133. struct mtd_info *mtd = super->s_mtd;
  134. if (!mtd->block_isbad)
  135. return NULL;
  136. *ofs = 0;
  137. while (mtd->block_isbad(mtd, *ofs)) {
  138. *ofs += mtd->erasesize;
  139. if (*ofs >= mtd->size)
  140. return NULL;
  141. }
  142. BUG_ON(*ofs & ~PAGE_MASK);
  143. return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
  144. }
  145. static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
  146. {
  147. struct logfs_super *super = logfs_super(sb);
  148. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  149. filler_t *filler = logfs_mtd_readpage;
  150. struct mtd_info *mtd = super->s_mtd;
  151. if (!mtd->block_isbad)
  152. return NULL;
  153. *ofs = mtd->size - mtd->erasesize;
  154. while (mtd->block_isbad(mtd, *ofs)) {
  155. *ofs -= mtd->erasesize;
  156. if (*ofs <= 0)
  157. return NULL;
  158. }
  159. *ofs = *ofs + mtd->erasesize - 0x1000;
  160. BUG_ON(*ofs & ~PAGE_MASK);
  161. return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
  162. }
  163. static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  164. size_t nr_pages)
  165. {
  166. struct logfs_super *super = logfs_super(sb);
  167. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  168. struct page *page;
  169. int i, err;
  170. for (i = 0; i < nr_pages; i++) {
  171. page = find_lock_page(mapping, index + i);
  172. BUG_ON(!page);
  173. err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
  174. page_address(page));
  175. unlock_page(page);
  176. page_cache_release(page);
  177. if (err)
  178. return err;
  179. }
  180. return 0;
  181. }
  182. static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
  183. {
  184. struct logfs_super *super = logfs_super(sb);
  185. int head;
  186. if (super->s_flags & LOGFS_SB_FLAG_RO)
  187. return;
  188. if (len == 0) {
  189. /* This can happen when the object fit perfectly into a
  190. * segment, the segment gets written per sync and subsequently
  191. * closed.
  192. */
  193. return;
  194. }
  195. head = ofs & (PAGE_SIZE - 1);
  196. if (head) {
  197. ofs -= head;
  198. len += head;
  199. }
  200. len = PAGE_ALIGN(len);
  201. __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
  202. }
  203. static void logfs_mtd_put_device(struct logfs_super *s)
  204. {
  205. put_mtd_device(s->s_mtd);
  206. }
  207. static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
  208. {
  209. struct logfs_super *super = logfs_super(sb);
  210. void *buf;
  211. int err;
  212. buf = kmalloc(super->s_writesize, GFP_KERNEL);
  213. if (!buf)
  214. return -ENOMEM;
  215. err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
  216. if (err)
  217. goto out;
  218. if (memchr_inv(buf, 0xff, super->s_writesize))
  219. err = -EIO;
  220. kfree(buf);
  221. out:
  222. return err;
  223. }
  224. static const struct logfs_device_ops mtd_devops = {
  225. .find_first_sb = logfs_mtd_find_first_sb,
  226. .find_last_sb = logfs_mtd_find_last_sb,
  227. .readpage = logfs_mtd_readpage,
  228. .writeseg = logfs_mtd_writeseg,
  229. .erase = logfs_mtd_erase,
  230. .can_write_buf = logfs_mtd_can_write_buf,
  231. .sync = logfs_mtd_sync,
  232. .put_device = logfs_mtd_put_device,
  233. };
  234. int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
  235. {
  236. struct mtd_info *mtd = get_mtd_device(NULL, mtdnr);
  237. if (IS_ERR(mtd))
  238. return PTR_ERR(mtd);
  239. s->s_bdev = NULL;
  240. s->s_mtd = mtd;
  241. s->s_devops = &mtd_devops;
  242. return 0;
  243. }