block2mtd.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. /*
  2. * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
  3. *
  4. * block2mtd.c - create an mtd from a block device
  5. *
  6. * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
  7. * Copyright (C) 2004,2005 Jörn Engel <joern@wh.fh-wedel.de>
  8. *
  9. * Licence: GPL
  10. */
  11. #include <linux/config.h>
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/bio.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/list.h>
  18. #include <linux/init.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/mutex.h>
  22. #define VERSION "$Revision: 1.30 $"
  23. #define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
  24. #define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
  25. /* Info for the block device */
  26. struct block2mtd_dev {
  27. struct list_head list;
  28. struct block_device *blkdev;
  29. struct mtd_info mtd;
  30. struct mutex write_mutex;
  31. };
  32. /* Static info about the MTD, used in cleanup_module */
  33. static LIST_HEAD(blkmtd_device_list);
  34. #define PAGE_READAHEAD 64
  35. static void cache_readahead(struct address_space *mapping, int index)
  36. {
  37. filler_t *filler = (filler_t*)mapping->a_ops->readpage;
  38. int i, pagei;
  39. unsigned ret = 0;
  40. unsigned long end_index;
  41. struct page *page;
  42. LIST_HEAD(page_pool);
  43. struct inode *inode = mapping->host;
  44. loff_t isize = i_size_read(inode);
  45. if (!isize) {
  46. INFO("iSize=0 in cache_readahead\n");
  47. return;
  48. }
  49. end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
  50. read_lock_irq(&mapping->tree_lock);
  51. for (i = 0; i < PAGE_READAHEAD; i++) {
  52. pagei = index + i;
  53. if (pagei > end_index) {
  54. INFO("Overrun end of disk in cache readahead\n");
  55. break;
  56. }
  57. page = radix_tree_lookup(&mapping->page_tree, pagei);
  58. if (page && (!i))
  59. break;
  60. if (page)
  61. continue;
  62. read_unlock_irq(&mapping->tree_lock);
  63. page = page_cache_alloc_cold(mapping);
  64. read_lock_irq(&mapping->tree_lock);
  65. if (!page)
  66. break;
  67. page->index = pagei;
  68. list_add(&page->lru, &page_pool);
  69. ret++;
  70. }
  71. read_unlock_irq(&mapping->tree_lock);
  72. if (ret)
  73. read_cache_pages(mapping, &page_pool, filler, NULL);
  74. }
  75. static struct page* page_readahead(struct address_space *mapping, int index)
  76. {
  77. filler_t *filler = (filler_t*)mapping->a_ops->readpage;
  78. cache_readahead(mapping, index);
  79. return read_cache_page(mapping, index, filler, NULL);
  80. }
  81. /* erase a specified part of the device */
  82. static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
  83. {
  84. struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
  85. struct page *page;
  86. int index = to >> PAGE_SHIFT; // page index
  87. int pages = len >> PAGE_SHIFT;
  88. u_long *p;
  89. u_long *max;
  90. while (pages) {
  91. page = page_readahead(mapping, index);
  92. if (!page)
  93. return -ENOMEM;
  94. if (IS_ERR(page))
  95. return PTR_ERR(page);
  96. max = (u_long*)page_address(page) + PAGE_SIZE;
  97. for (p=(u_long*)page_address(page); p<max; p++)
  98. if (*p != -1UL) {
  99. lock_page(page);
  100. memset(page_address(page), 0xff, PAGE_SIZE);
  101. set_page_dirty(page);
  102. unlock_page(page);
  103. break;
  104. }
  105. page_cache_release(page);
  106. pages--;
  107. index++;
  108. }
  109. return 0;
  110. }
  111. static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  112. {
  113. struct block2mtd_dev *dev = mtd->priv;
  114. size_t from = instr->addr;
  115. size_t len = instr->len;
  116. int err;
  117. instr->state = MTD_ERASING;
  118. mutex_lock(&dev->write_mutex);
  119. err = _block2mtd_erase(dev, from, len);
  120. mutex_unlock(&dev->write_mutex);
  121. if (err) {
  122. ERROR("erase failed err = %d", err);
  123. instr->state = MTD_ERASE_FAILED;
  124. } else
  125. instr->state = MTD_ERASE_DONE;
  126. instr->state = MTD_ERASE_DONE;
  127. mtd_erase_callback(instr);
  128. return err;
  129. }
  130. static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
  131. size_t *retlen, u_char *buf)
  132. {
  133. struct block2mtd_dev *dev = mtd->priv;
  134. struct page *page;
  135. int index = from >> PAGE_SHIFT;
  136. int offset = from & (PAGE_SIZE-1);
  137. int cpylen;
  138. if (from > mtd->size)
  139. return -EINVAL;
  140. if (from + len > mtd->size)
  141. len = mtd->size - from;
  142. if (retlen)
  143. *retlen = 0;
  144. while (len) {
  145. if ((offset + len) > PAGE_SIZE)
  146. cpylen = PAGE_SIZE - offset; // multiple pages
  147. else
  148. cpylen = len; // this page
  149. len = len - cpylen;
  150. // Get page
  151. page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
  152. if (!page)
  153. return -ENOMEM;
  154. if (IS_ERR(page))
  155. return PTR_ERR(page);
  156. memcpy(buf, page_address(page) + offset, cpylen);
  157. page_cache_release(page);
  158. if (retlen)
  159. *retlen += cpylen;
  160. buf += cpylen;
  161. offset = 0;
  162. index++;
  163. }
  164. return 0;
  165. }
  166. /* write data to the underlying device */
  167. static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
  168. loff_t to, size_t len, size_t *retlen)
  169. {
  170. struct page *page;
  171. struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
  172. int index = to >> PAGE_SHIFT; // page index
  173. int offset = to & ~PAGE_MASK; // page offset
  174. int cpylen;
  175. if (retlen)
  176. *retlen = 0;
  177. while (len) {
  178. if ((offset+len) > PAGE_SIZE)
  179. cpylen = PAGE_SIZE - offset; // multiple pages
  180. else
  181. cpylen = len; // this page
  182. len = len - cpylen;
  183. // Get page
  184. page = page_readahead(mapping, index);
  185. if (!page)
  186. return -ENOMEM;
  187. if (IS_ERR(page))
  188. return PTR_ERR(page);
  189. if (memcmp(page_address(page)+offset, buf, cpylen)) {
  190. lock_page(page);
  191. memcpy(page_address(page) + offset, buf, cpylen);
  192. set_page_dirty(page);
  193. unlock_page(page);
  194. }
  195. page_cache_release(page);
  196. if (retlen)
  197. *retlen += cpylen;
  198. buf += cpylen;
  199. offset = 0;
  200. index++;
  201. }
  202. return 0;
  203. }
  204. static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
  205. size_t *retlen, const u_char *buf)
  206. {
  207. struct block2mtd_dev *dev = mtd->priv;
  208. int err;
  209. if (!len)
  210. return 0;
  211. if (to >= mtd->size)
  212. return -ENOSPC;
  213. if (to + len > mtd->size)
  214. len = mtd->size - to;
  215. mutex_lock(&dev->write_mutex);
  216. err = _block2mtd_write(dev, buf, to, len, retlen);
  217. mutex_unlock(&dev->write_mutex);
  218. if (err > 0)
  219. err = 0;
  220. return err;
  221. }
  222. /* sync the device - wait until the write queue is empty */
  223. static void block2mtd_sync(struct mtd_info *mtd)
  224. {
  225. struct block2mtd_dev *dev = mtd->priv;
  226. sync_blockdev(dev->blkdev);
  227. return;
  228. }
  229. static void block2mtd_free_device(struct block2mtd_dev *dev)
  230. {
  231. if (!dev)
  232. return;
  233. kfree(dev->mtd.name);
  234. if (dev->blkdev) {
  235. invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
  236. close_bdev_excl(dev->blkdev);
  237. }
  238. kfree(dev);
  239. }
  240. /* FIXME: ensure that mtd->size % erase_size == 0 */
  241. static struct block2mtd_dev *add_device(char *devname, int erase_size)
  242. {
  243. struct block_device *bdev;
  244. struct block2mtd_dev *dev;
  245. if (!devname)
  246. return NULL;
  247. dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
  248. if (!dev)
  249. return NULL;
  250. memset(dev, 0, sizeof(*dev));
  251. /* Get a handle on the device */
  252. bdev = open_bdev_excl(devname, O_RDWR, NULL);
  253. if (IS_ERR(bdev)) {
  254. ERROR("error: cannot open device %s", devname);
  255. goto devinit_err;
  256. }
  257. dev->blkdev = bdev;
  258. if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
  259. ERROR("attempting to use an MTD device as a block device");
  260. goto devinit_err;
  261. }
  262. mutex_init(&dev->write_mutex);
  263. /* Setup the MTD structure */
  264. /* make the name contain the block device in */
  265. dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
  266. GFP_KERNEL);
  267. if (!dev->mtd.name)
  268. goto devinit_err;
  269. sprintf(dev->mtd.name, "block2mtd: %s", devname);
  270. dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
  271. dev->mtd.erasesize = erase_size;
  272. dev->mtd.type = MTD_RAM;
  273. dev->mtd.flags = MTD_CAP_RAM;
  274. dev->mtd.erase = block2mtd_erase;
  275. dev->mtd.write = block2mtd_write;
  276. dev->mtd.writev = default_mtd_writev;
  277. dev->mtd.sync = block2mtd_sync;
  278. dev->mtd.read = block2mtd_read;
  279. dev->mtd.readv = default_mtd_readv;
  280. dev->mtd.priv = dev;
  281. dev->mtd.owner = THIS_MODULE;
  282. if (add_mtd_device(&dev->mtd)) {
  283. /* Device didnt get added, so free the entry */
  284. goto devinit_err;
  285. }
  286. list_add(&dev->list, &blkmtd_device_list);
  287. INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
  288. dev->mtd.name + strlen("blkmtd: "),
  289. dev->mtd.erasesize >> 10, dev->mtd.erasesize);
  290. return dev;
  291. devinit_err:
  292. block2mtd_free_device(dev);
  293. return NULL;
  294. }
  295. static int ustrtoul(const char *cp, char **endp, unsigned int base)
  296. {
  297. unsigned long result = simple_strtoul(cp, endp, base);
  298. switch (**endp) {
  299. case 'G' :
  300. result *= 1024;
  301. case 'M':
  302. result *= 1024;
  303. case 'k':
  304. result *= 1024;
  305. /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
  306. if ((*endp)[1] == 'i')
  307. (*endp) += 2;
  308. }
  309. return result;
  310. }
  311. static int parse_num(size_t *num, const char *token)
  312. {
  313. char *endp;
  314. size_t n;
  315. n = (size_t) ustrtoul(token, &endp, 0);
  316. if (*endp)
  317. return -EINVAL;
  318. *num = n;
  319. return 0;
  320. }
  321. static int parse_name(char **pname, const char *token, size_t limit)
  322. {
  323. size_t len;
  324. char *name;
  325. len = strlen(token) + 1;
  326. if (len > limit)
  327. return -ENOSPC;
  328. name = kmalloc(len, GFP_KERNEL);
  329. if (!name)
  330. return -ENOMEM;
  331. strcpy(name, token);
  332. *pname = name;
  333. return 0;
  334. }
  335. static inline void kill_final_newline(char *str)
  336. {
  337. char *newline = strrchr(str, '\n');
  338. if (newline && !newline[1])
  339. *newline = 0;
  340. }
  341. #define parse_err(fmt, args...) do { \
  342. ERROR("block2mtd: " fmt "\n", ## args); \
  343. return 0; \
  344. } while (0)
  345. static int block2mtd_setup(const char *val, struct kernel_param *kp)
  346. {
  347. char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */
  348. char *token[2];
  349. char *name;
  350. size_t erase_size = PAGE_SIZE;
  351. int i, ret;
  352. if (strnlen(val, sizeof(buf)) >= sizeof(buf))
  353. parse_err("parameter too long");
  354. strcpy(str, val);
  355. kill_final_newline(str);
  356. for (i=0; i<2; i++)
  357. token[i] = strsep(&str, ",");
  358. if (str)
  359. parse_err("too many arguments");
  360. if (!token[0])
  361. parse_err("no argument");
  362. ret = parse_name(&name, token[0], 80);
  363. if (ret == -ENOMEM)
  364. parse_err("out of memory");
  365. if (ret == -ENOSPC)
  366. parse_err("name too long");
  367. if (ret)
  368. return 0;
  369. if (token[1]) {
  370. ret = parse_num(&erase_size, token[1]);
  371. if (ret)
  372. parse_err("illegal erase size");
  373. }
  374. add_device(name, erase_size);
  375. return 0;
  376. }
  377. module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
  378. MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
  379. static int __init block2mtd_init(void)
  380. {
  381. INFO("version " VERSION);
  382. return 0;
  383. }
  384. static void __devexit block2mtd_exit(void)
  385. {
  386. struct list_head *pos, *next;
  387. /* Remove the MTD devices */
  388. list_for_each_safe(pos, next, &blkmtd_device_list) {
  389. struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
  390. block2mtd_sync(&dev->mtd);
  391. del_mtd_device(&dev->mtd);
  392. INFO("mtd%d: [%s] removed", dev->mtd.index,
  393. dev->mtd.name + strlen("blkmtd: "));
  394. list_del(&dev->list);
  395. block2mtd_free_device(dev);
  396. }
  397. }
  398. module_init(block2mtd_init);
  399. module_exit(block2mtd_exit);
  400. MODULE_LICENSE("GPL");
  401. MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
  402. MODULE_DESCRIPTION("Emulate an MTD using a block device");