mtdpart.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. /*
  2. * Simple MTD partitioning layer
  3. *
  4. * (C) 2000 Nicolas Pitre <nico@cam.org>
  5. *
  6. * This code is GPL
  7. *
  8. * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
  9. *
  10. * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
  11. * added support for read_oob, write_oob
  12. */
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include <linux/list.h>
  18. #include <linux/config.h>
  19. #include <linux/kmod.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/mtd/partitions.h>
  22. #include <linux/mtd/compatmac.h>
  23. /* Our partition linked list */
  24. static LIST_HEAD(mtd_partitions);
  25. /* Our partition node structure */
  26. struct mtd_part {
  27. struct mtd_info mtd;
  28. struct mtd_info *master;
  29. u_int32_t offset;
  30. int index;
  31. struct list_head list;
  32. int registered;
  33. };
  34. /*
  35. * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
  36. * the pointer to that structure with this macro.
  37. */
  38. #define PART(x) ((struct mtd_part *)(x))
  39. /*
  40. * MTD methods which simply translate the effective address and pass through
  41. * to the _real_ device.
  42. */
  43. static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
  44. size_t *retlen, u_char *buf)
  45. {
  46. struct mtd_part *part = PART(mtd);
  47. int res;
  48. if (from >= mtd->size)
  49. len = 0;
  50. else if (from + len > mtd->size)
  51. len = mtd->size - from;
  52. res = part->master->read (part->master, from + part->offset,
  53. len, retlen, buf);
  54. if (unlikely(res)) {
  55. if (res == -EUCLEAN)
  56. mtd->ecc_stats.corrected++;
  57. if (res == -EBADMSG)
  58. mtd->ecc_stats.failed++;
  59. }
  60. return res;
  61. }
  62. static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
  63. size_t *retlen, u_char **buf)
  64. {
  65. struct mtd_part *part = PART(mtd);
  66. if (from >= mtd->size)
  67. len = 0;
  68. else if (from + len > mtd->size)
  69. len = mtd->size - from;
  70. return part->master->point (part->master, from + part->offset,
  71. len, retlen, buf);
  72. }
  73. static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
  74. {
  75. struct mtd_part *part = PART(mtd);
  76. part->master->unpoint (part->master, addr, from + part->offset, len);
  77. }
  78. static int part_read_oob(struct mtd_info *mtd, loff_t from,
  79. struct mtd_oob_ops *ops)
  80. {
  81. struct mtd_part *part = PART(mtd);
  82. int res;
  83. if (from >= mtd->size)
  84. return -EINVAL;
  85. if (from + ops->len > mtd->size)
  86. return -EINVAL;
  87. res = part->master->read_oob(part->master, from + part->offset, ops);
  88. if (unlikely(res)) {
  89. if (res == -EUCLEAN)
  90. mtd->ecc_stats.corrected++;
  91. if (res == -EBADMSG)
  92. mtd->ecc_stats.failed++;
  93. }
  94. return res;
  95. }
  96. static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  97. size_t *retlen, u_char *buf)
  98. {
  99. struct mtd_part *part = PART(mtd);
  100. return part->master->read_user_prot_reg (part->master, from,
  101. len, retlen, buf);
  102. }
  103. static int part_get_user_prot_info (struct mtd_info *mtd,
  104. struct otp_info *buf, size_t len)
  105. {
  106. struct mtd_part *part = PART(mtd);
  107. return part->master->get_user_prot_info (part->master, buf, len);
  108. }
  109. static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  110. size_t *retlen, u_char *buf)
  111. {
  112. struct mtd_part *part = PART(mtd);
  113. return part->master->read_fact_prot_reg (part->master, from,
  114. len, retlen, buf);
  115. }
  116. static int part_get_fact_prot_info (struct mtd_info *mtd,
  117. struct otp_info *buf, size_t len)
  118. {
  119. struct mtd_part *part = PART(mtd);
  120. return part->master->get_fact_prot_info (part->master, buf, len);
  121. }
  122. static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
  123. size_t *retlen, const u_char *buf)
  124. {
  125. struct mtd_part *part = PART(mtd);
  126. if (!(mtd->flags & MTD_WRITEABLE))
  127. return -EROFS;
  128. if (to >= mtd->size)
  129. len = 0;
  130. else if (to + len > mtd->size)
  131. len = mtd->size - to;
  132. return part->master->write (part->master, to + part->offset,
  133. len, retlen, buf);
  134. }
  135. static int part_write_oob(struct mtd_info *mtd, loff_t to,
  136. struct mtd_oob_ops *ops)
  137. {
  138. struct mtd_part *part = PART(mtd);
  139. if (!(mtd->flags & MTD_WRITEABLE))
  140. return -EROFS;
  141. if (to >= mtd->size)
  142. return -EINVAL;
  143. if (to + ops->len > mtd->size)
  144. return -EINVAL;
  145. return part->master->write_oob(part->master, to + part->offset, ops);
  146. }
  147. static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  148. size_t *retlen, u_char *buf)
  149. {
  150. struct mtd_part *part = PART(mtd);
  151. return part->master->write_user_prot_reg (part->master, from,
  152. len, retlen, buf);
  153. }
  154. static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
  155. {
  156. struct mtd_part *part = PART(mtd);
  157. return part->master->lock_user_prot_reg (part->master, from, len);
  158. }
  159. static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
  160. unsigned long count, loff_t to, size_t *retlen)
  161. {
  162. struct mtd_part *part = PART(mtd);
  163. if (!(mtd->flags & MTD_WRITEABLE))
  164. return -EROFS;
  165. return part->master->writev (part->master, vecs, count,
  166. to + part->offset, retlen);
  167. }
  168. static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
  169. {
  170. struct mtd_part *part = PART(mtd);
  171. int ret;
  172. if (!(mtd->flags & MTD_WRITEABLE))
  173. return -EROFS;
  174. if (instr->addr >= mtd->size)
  175. return -EINVAL;
  176. instr->addr += part->offset;
  177. ret = part->master->erase(part->master, instr);
  178. return ret;
  179. }
  180. void mtd_erase_callback(struct erase_info *instr)
  181. {
  182. if (instr->mtd->erase == part_erase) {
  183. struct mtd_part *part = PART(instr->mtd);
  184. if (instr->fail_addr != 0xffffffff)
  185. instr->fail_addr -= part->offset;
  186. instr->addr -= part->offset;
  187. }
  188. if (instr->callback)
  189. instr->callback(instr);
  190. }
  191. EXPORT_SYMBOL_GPL(mtd_erase_callback);
  192. static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
  193. {
  194. struct mtd_part *part = PART(mtd);
  195. if ((len + ofs) > mtd->size)
  196. return -EINVAL;
  197. return part->master->lock(part->master, ofs + part->offset, len);
  198. }
  199. static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
  200. {
  201. struct mtd_part *part = PART(mtd);
  202. if ((len + ofs) > mtd->size)
  203. return -EINVAL;
  204. return part->master->unlock(part->master, ofs + part->offset, len);
  205. }
  206. static void part_sync(struct mtd_info *mtd)
  207. {
  208. struct mtd_part *part = PART(mtd);
  209. part->master->sync(part->master);
  210. }
  211. static int part_suspend(struct mtd_info *mtd)
  212. {
  213. struct mtd_part *part = PART(mtd);
  214. return part->master->suspend(part->master);
  215. }
  216. static void part_resume(struct mtd_info *mtd)
  217. {
  218. struct mtd_part *part = PART(mtd);
  219. part->master->resume(part->master);
  220. }
  221. static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
  222. {
  223. struct mtd_part *part = PART(mtd);
  224. if (ofs >= mtd->size)
  225. return -EINVAL;
  226. ofs += part->offset;
  227. return part->master->block_isbad(part->master, ofs);
  228. }
  229. static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
  230. {
  231. struct mtd_part *part = PART(mtd);
  232. int res;
  233. if (!(mtd->flags & MTD_WRITEABLE))
  234. return -EROFS;
  235. if (ofs >= mtd->size)
  236. return -EINVAL;
  237. ofs += part->offset;
  238. res = part->master->block_markbad(part->master, ofs);
  239. if (!res)
  240. mtd->ecc_stats.badblocks++;
  241. return res;
  242. }
  243. /*
  244. * This function unregisters and destroy all slave MTD objects which are
  245. * attached to the given master MTD object.
  246. */
  247. int del_mtd_partitions(struct mtd_info *master)
  248. {
  249. struct list_head *node;
  250. struct mtd_part *slave;
  251. for (node = mtd_partitions.next;
  252. node != &mtd_partitions;
  253. node = node->next) {
  254. slave = list_entry(node, struct mtd_part, list);
  255. if (slave->master == master) {
  256. struct list_head *prev = node->prev;
  257. __list_del(prev, node->next);
  258. if(slave->registered)
  259. del_mtd_device(&slave->mtd);
  260. kfree(slave);
  261. node = prev;
  262. }
  263. }
  264. return 0;
  265. }
  266. /*
  267. * This function, given a master MTD object and a partition table, creates
  268. * and registers slave MTD objects which are bound to the master according to
  269. * the partition definitions.
  270. * (Q: should we register the master MTD object as well?)
  271. */
  272. int add_mtd_partitions(struct mtd_info *master,
  273. const struct mtd_partition *parts,
  274. int nbparts)
  275. {
  276. struct mtd_part *slave;
  277. u_int32_t cur_offset = 0;
  278. int i;
  279. printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
  280. for (i = 0; i < nbparts; i++) {
  281. /* allocate the partition structure */
  282. slave = kmalloc (sizeof(*slave), GFP_KERNEL);
  283. if (!slave) {
  284. printk ("memory allocation error while creating partitions for \"%s\"\n",
  285. master->name);
  286. del_mtd_partitions(master);
  287. return -ENOMEM;
  288. }
  289. memset(slave, 0, sizeof(*slave));
  290. list_add(&slave->list, &mtd_partitions);
  291. /* set up the MTD object for this partition */
  292. slave->mtd.type = master->type;
  293. slave->mtd.flags = master->flags & ~parts[i].mask_flags;
  294. slave->mtd.size = parts[i].size;
  295. slave->mtd.writesize = master->writesize;
  296. slave->mtd.oobsize = master->oobsize;
  297. slave->mtd.ecctype = master->ecctype;
  298. slave->mtd.eccsize = master->eccsize;
  299. slave->mtd.name = parts[i].name;
  300. slave->mtd.bank_size = master->bank_size;
  301. slave->mtd.owner = master->owner;
  302. slave->mtd.read = part_read;
  303. slave->mtd.write = part_write;
  304. if(master->point && master->unpoint){
  305. slave->mtd.point = part_point;
  306. slave->mtd.unpoint = part_unpoint;
  307. }
  308. if (master->read_oob)
  309. slave->mtd.read_oob = part_read_oob;
  310. if (master->write_oob)
  311. slave->mtd.write_oob = part_write_oob;
  312. if(master->read_user_prot_reg)
  313. slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
  314. if(master->read_fact_prot_reg)
  315. slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
  316. if(master->write_user_prot_reg)
  317. slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
  318. if(master->lock_user_prot_reg)
  319. slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
  320. if(master->get_user_prot_info)
  321. slave->mtd.get_user_prot_info = part_get_user_prot_info;
  322. if(master->get_fact_prot_info)
  323. slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
  324. if (master->sync)
  325. slave->mtd.sync = part_sync;
  326. if (!i && master->suspend && master->resume) {
  327. slave->mtd.suspend = part_suspend;
  328. slave->mtd.resume = part_resume;
  329. }
  330. if (master->writev)
  331. slave->mtd.writev = part_writev;
  332. if (master->lock)
  333. slave->mtd.lock = part_lock;
  334. if (master->unlock)
  335. slave->mtd.unlock = part_unlock;
  336. if (master->block_isbad)
  337. slave->mtd.block_isbad = part_block_isbad;
  338. if (master->block_markbad)
  339. slave->mtd.block_markbad = part_block_markbad;
  340. slave->mtd.erase = part_erase;
  341. slave->master = master;
  342. slave->offset = parts[i].offset;
  343. slave->index = i;
  344. if (slave->offset == MTDPART_OFS_APPEND)
  345. slave->offset = cur_offset;
  346. if (slave->offset == MTDPART_OFS_NXTBLK) {
  347. slave->offset = cur_offset;
  348. if ((cur_offset % master->erasesize) != 0) {
  349. /* Round up to next erasesize */
  350. slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
  351. printk(KERN_NOTICE "Moving partition %d: "
  352. "0x%08x -> 0x%08x\n", i,
  353. cur_offset, slave->offset);
  354. }
  355. }
  356. if (slave->mtd.size == MTDPART_SIZ_FULL)
  357. slave->mtd.size = master->size - slave->offset;
  358. cur_offset = slave->offset + slave->mtd.size;
  359. printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
  360. slave->offset + slave->mtd.size, slave->mtd.name);
  361. /* let's do some sanity checks */
  362. if (slave->offset >= master->size) {
  363. /* let's register it anyway to preserve ordering */
  364. slave->offset = 0;
  365. slave->mtd.size = 0;
  366. printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
  367. parts[i].name);
  368. }
  369. if (slave->offset + slave->mtd.size > master->size) {
  370. slave->mtd.size = master->size - slave->offset;
  371. printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
  372. parts[i].name, master->name, slave->mtd.size);
  373. }
  374. if (master->numeraseregions>1) {
  375. /* Deal with variable erase size stuff */
  376. int i;
  377. struct mtd_erase_region_info *regions = master->eraseregions;
  378. /* Find the first erase regions which is part of this partition. */
  379. for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
  380. ;
  381. for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
  382. if (slave->mtd.erasesize < regions[i].erasesize) {
  383. slave->mtd.erasesize = regions[i].erasesize;
  384. }
  385. }
  386. } else {
  387. /* Single erase size */
  388. slave->mtd.erasesize = master->erasesize;
  389. }
  390. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  391. (slave->offset % slave->mtd.erasesize)) {
  392. /* Doesn't start on a boundary of major erase size */
  393. /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
  394. slave->mtd.flags &= ~MTD_WRITEABLE;
  395. printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
  396. parts[i].name);
  397. }
  398. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  399. (slave->mtd.size % slave->mtd.erasesize)) {
  400. slave->mtd.flags &= ~MTD_WRITEABLE;
  401. printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
  402. parts[i].name);
  403. }
  404. slave->mtd.ecclayout = master->ecclayout;
  405. if (master->block_isbad) {
  406. uint32_t offs = 0;
  407. while(offs < slave->mtd.size) {
  408. if (master->block_isbad(master,
  409. offs + slave->offset))
  410. slave->mtd.ecc_stats.badblocks++;
  411. offs += slave->mtd.erasesize;
  412. }
  413. }
  414. if(parts[i].mtdp)
  415. { /* store the object pointer (caller may or may not register it */
  416. *parts[i].mtdp = &slave->mtd;
  417. slave->registered = 0;
  418. }
  419. else
  420. {
  421. /* register our partition */
  422. add_mtd_device(&slave->mtd);
  423. slave->registered = 1;
  424. }
  425. }
  426. return 0;
  427. }
  428. EXPORT_SYMBOL(add_mtd_partitions);
  429. EXPORT_SYMBOL(del_mtd_partitions);
  430. static DEFINE_SPINLOCK(part_parser_lock);
  431. static LIST_HEAD(part_parsers);
  432. static struct mtd_part_parser *get_partition_parser(const char *name)
  433. {
  434. struct list_head *this;
  435. void *ret = NULL;
  436. spin_lock(&part_parser_lock);
  437. list_for_each(this, &part_parsers) {
  438. struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
  439. if (!strcmp(p->name, name) && try_module_get(p->owner)) {
  440. ret = p;
  441. break;
  442. }
  443. }
  444. spin_unlock(&part_parser_lock);
  445. return ret;
  446. }
  447. int register_mtd_parser(struct mtd_part_parser *p)
  448. {
  449. spin_lock(&part_parser_lock);
  450. list_add(&p->list, &part_parsers);
  451. spin_unlock(&part_parser_lock);
  452. return 0;
  453. }
  454. int deregister_mtd_parser(struct mtd_part_parser *p)
  455. {
  456. spin_lock(&part_parser_lock);
  457. list_del(&p->list);
  458. spin_unlock(&part_parser_lock);
  459. return 0;
  460. }
  461. int parse_mtd_partitions(struct mtd_info *master, const char **types,
  462. struct mtd_partition **pparts, unsigned long origin)
  463. {
  464. struct mtd_part_parser *parser;
  465. int ret = 0;
  466. for ( ; ret <= 0 && *types; types++) {
  467. parser = get_partition_parser(*types);
  468. #ifdef CONFIG_KMOD
  469. if (!parser && !request_module("%s", *types))
  470. parser = get_partition_parser(*types);
  471. #endif
  472. if (!parser) {
  473. printk(KERN_NOTICE "%s partition parsing not available\n",
  474. *types);
  475. continue;
  476. }
  477. ret = (*parser->parse_fn)(master, pparts, origin);
  478. if (ret > 0) {
  479. printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
  480. ret, parser->name, master->name);
  481. }
  482. put_partition_parser(parser);
  483. }
  484. return ret;
  485. }
  486. EXPORT_SYMBOL_GPL(parse_mtd_partitions);
  487. EXPORT_SYMBOL_GPL(register_mtd_parser);
  488. EXPORT_SYMBOL_GPL(deregister_mtd_parser);
  489. MODULE_LICENSE("GPL");
  490. MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
  491. MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");