mtdpart.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /*
  2. * Simple MTD partitioning layer
  3. *
  4. * (C) 2000 Nicolas Pitre <nico@cam.org>
  5. *
  6. * This code is GPL
  7. *
  8. * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
  9. *
  10. * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
  11. * added support for read_oob, write_oob
  12. */
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include <linux/list.h>
  18. #include <linux/config.h>
  19. #include <linux/kmod.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/mtd/partitions.h>
  22. #include <linux/mtd/compatmac.h>
  23. /* Our partition linked list */
  24. static LIST_HEAD(mtd_partitions);
  25. /* Our partition node structure */
  26. struct mtd_part {
  27. struct mtd_info mtd;
  28. struct mtd_info *master;
  29. u_int32_t offset;
  30. int index;
  31. struct list_head list;
  32. int registered;
  33. };
  34. /*
  35. * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
  36. * the pointer to that structure with this macro.
  37. */
  38. #define PART(x) ((struct mtd_part *)(x))
  39. /*
  40. * MTD methods which simply translate the effective address and pass through
  41. * to the _real_ device.
  42. */
  43. static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
  44. size_t *retlen, u_char *buf)
  45. {
  46. struct mtd_part *part = PART(mtd);
  47. if (from >= mtd->size)
  48. len = 0;
  49. else if (from + len > mtd->size)
  50. len = mtd->size - from;
  51. if (part->master->read_ecc == NULL)
  52. return part->master->read (part->master, from + part->offset,
  53. len, retlen, buf);
  54. else
  55. return part->master->read_ecc (part->master, from + part->offset,
  56. len, retlen, buf, NULL, &mtd->oobinfo);
  57. }
  58. static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
  59. size_t *retlen, u_char **buf)
  60. {
  61. struct mtd_part *part = PART(mtd);
  62. if (from >= mtd->size)
  63. len = 0;
  64. else if (from + len > mtd->size)
  65. len = mtd->size - from;
  66. return part->master->point (part->master, from + part->offset,
  67. len, retlen, buf);
  68. }
  69. static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
  70. {
  71. struct mtd_part *part = PART(mtd);
  72. part->master->unpoint (part->master, addr, from + part->offset, len);
  73. }
  74. static int part_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
  75. size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel)
  76. {
  77. struct mtd_part *part = PART(mtd);
  78. if (oobsel == NULL)
  79. oobsel = &mtd->oobinfo;
  80. if (from >= mtd->size)
  81. len = 0;
  82. else if (from + len > mtd->size)
  83. len = mtd->size - from;
  84. return part->master->read_ecc (part->master, from + part->offset,
  85. len, retlen, buf, eccbuf, oobsel);
  86. }
  87. static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
  88. size_t *retlen, u_char *buf)
  89. {
  90. struct mtd_part *part = PART(mtd);
  91. if (from >= mtd->size)
  92. len = 0;
  93. else if (from + len > mtd->size)
  94. len = mtd->size - from;
  95. return part->master->read_oob (part->master, from + part->offset,
  96. len, retlen, buf);
  97. }
  98. static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  99. size_t *retlen, u_char *buf)
  100. {
  101. struct mtd_part *part = PART(mtd);
  102. return part->master->read_user_prot_reg (part->master, from,
  103. len, retlen, buf);
  104. }
  105. static int part_get_user_prot_info (struct mtd_info *mtd,
  106. struct otp_info *buf, size_t len)
  107. {
  108. struct mtd_part *part = PART(mtd);
  109. return part->master->get_user_prot_info (part->master, buf, len);
  110. }
  111. static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  112. size_t *retlen, u_char *buf)
  113. {
  114. struct mtd_part *part = PART(mtd);
  115. return part->master->read_fact_prot_reg (part->master, from,
  116. len, retlen, buf);
  117. }
  118. static int part_get_fact_prot_info (struct mtd_info *mtd,
  119. struct otp_info *buf, size_t len)
  120. {
  121. struct mtd_part *part = PART(mtd);
  122. return part->master->get_fact_prot_info (part->master, buf, len);
  123. }
  124. static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
  125. size_t *retlen, const u_char *buf)
  126. {
  127. struct mtd_part *part = PART(mtd);
  128. if (!(mtd->flags & MTD_WRITEABLE))
  129. return -EROFS;
  130. if (to >= mtd->size)
  131. len = 0;
  132. else if (to + len > mtd->size)
  133. len = mtd->size - to;
  134. if (part->master->write_ecc == NULL)
  135. return part->master->write (part->master, to + part->offset,
  136. len, retlen, buf);
  137. else
  138. return part->master->write_ecc (part->master, to + part->offset,
  139. len, retlen, buf, NULL, &mtd->oobinfo);
  140. }
  141. static int part_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
  142. size_t *retlen, const u_char *buf,
  143. u_char *eccbuf, struct nand_oobinfo *oobsel)
  144. {
  145. struct mtd_part *part = PART(mtd);
  146. if (!(mtd->flags & MTD_WRITEABLE))
  147. return -EROFS;
  148. if (oobsel == NULL)
  149. oobsel = &mtd->oobinfo;
  150. if (to >= mtd->size)
  151. len = 0;
  152. else if (to + len > mtd->size)
  153. len = mtd->size - to;
  154. return part->master->write_ecc (part->master, to + part->offset,
  155. len, retlen, buf, eccbuf, oobsel);
  156. }
  157. static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
  158. size_t *retlen, const u_char *buf)
  159. {
  160. struct mtd_part *part = PART(mtd);
  161. if (!(mtd->flags & MTD_WRITEABLE))
  162. return -EROFS;
  163. if (to >= mtd->size)
  164. len = 0;
  165. else if (to + len > mtd->size)
  166. len = mtd->size - to;
  167. return part->master->write_oob (part->master, to + part->offset,
  168. len, retlen, buf);
  169. }
  170. static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
  171. size_t *retlen, u_char *buf)
  172. {
  173. struct mtd_part *part = PART(mtd);
  174. return part->master->write_user_prot_reg (part->master, from,
  175. len, retlen, buf);
  176. }
  177. static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
  178. {
  179. struct mtd_part *part = PART(mtd);
  180. return part->master->lock_user_prot_reg (part->master, from, len);
  181. }
  182. static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
  183. unsigned long count, loff_t to, size_t *retlen)
  184. {
  185. struct mtd_part *part = PART(mtd);
  186. if (!(mtd->flags & MTD_WRITEABLE))
  187. return -EROFS;
  188. return part->master->writev (part->master, vecs, count,
  189. to + part->offset, retlen);
  190. }
  191. static int part_readv (struct mtd_info *mtd, struct kvec *vecs,
  192. unsigned long count, loff_t from, size_t *retlen)
  193. {
  194. struct mtd_part *part = PART(mtd);
  195. if (part->master->readv_ecc == NULL)
  196. return part->master->readv (part->master, vecs, count,
  197. from + part->offset, retlen);
  198. else
  199. return part->master->readv_ecc (part->master, vecs, count,
  200. from + part->offset, retlen,
  201. NULL, &mtd->oobinfo);
  202. }
  203. static int part_readv_ecc (struct mtd_info *mtd, struct kvec *vecs,
  204. unsigned long count, loff_t from, size_t *retlen,
  205. u_char *eccbuf, struct nand_oobinfo *oobsel)
  206. {
  207. struct mtd_part *part = PART(mtd);
  208. if (oobsel == NULL)
  209. oobsel = &mtd->oobinfo;
  210. return part->master->readv_ecc (part->master, vecs, count,
  211. from + part->offset, retlen,
  212. eccbuf, oobsel);
  213. }
  214. static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
  215. {
  216. struct mtd_part *part = PART(mtd);
  217. int ret;
  218. if (!(mtd->flags & MTD_WRITEABLE))
  219. return -EROFS;
  220. if (instr->addr >= mtd->size)
  221. return -EINVAL;
  222. instr->addr += part->offset;
  223. ret = part->master->erase(part->master, instr);
  224. return ret;
  225. }
  226. void mtd_erase_callback(struct erase_info *instr)
  227. {
  228. if (instr->mtd->erase == part_erase) {
  229. struct mtd_part *part = PART(instr->mtd);
  230. if (instr->fail_addr != 0xffffffff)
  231. instr->fail_addr -= part->offset;
  232. instr->addr -= part->offset;
  233. }
  234. if (instr->callback)
  235. instr->callback(instr);
  236. }
  237. EXPORT_SYMBOL_GPL(mtd_erase_callback);
  238. static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
  239. {
  240. struct mtd_part *part = PART(mtd);
  241. if ((len + ofs) > mtd->size)
  242. return -EINVAL;
  243. return part->master->lock(part->master, ofs + part->offset, len);
  244. }
  245. static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
  246. {
  247. struct mtd_part *part = PART(mtd);
  248. if ((len + ofs) > mtd->size)
  249. return -EINVAL;
  250. return part->master->unlock(part->master, ofs + part->offset, len);
  251. }
  252. static void part_sync(struct mtd_info *mtd)
  253. {
  254. struct mtd_part *part = PART(mtd);
  255. part->master->sync(part->master);
  256. }
  257. static int part_suspend(struct mtd_info *mtd)
  258. {
  259. struct mtd_part *part = PART(mtd);
  260. return part->master->suspend(part->master);
  261. }
  262. static void part_resume(struct mtd_info *mtd)
  263. {
  264. struct mtd_part *part = PART(mtd);
  265. part->master->resume(part->master);
  266. }
  267. static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
  268. {
  269. struct mtd_part *part = PART(mtd);
  270. if (ofs >= mtd->size)
  271. return -EINVAL;
  272. ofs += part->offset;
  273. return part->master->block_isbad(part->master, ofs);
  274. }
  275. static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
  276. {
  277. struct mtd_part *part = PART(mtd);
  278. if (!(mtd->flags & MTD_WRITEABLE))
  279. return -EROFS;
  280. if (ofs >= mtd->size)
  281. return -EINVAL;
  282. ofs += part->offset;
  283. return part->master->block_markbad(part->master, ofs);
  284. }
  285. /*
  286. * This function unregisters and destroy all slave MTD objects which are
  287. * attached to the given master MTD object.
  288. */
  289. int del_mtd_partitions(struct mtd_info *master)
  290. {
  291. struct list_head *node;
  292. struct mtd_part *slave;
  293. for (node = mtd_partitions.next;
  294. node != &mtd_partitions;
  295. node = node->next) {
  296. slave = list_entry(node, struct mtd_part, list);
  297. if (slave->master == master) {
  298. struct list_head *prev = node->prev;
  299. __list_del(prev, node->next);
  300. if(slave->registered)
  301. del_mtd_device(&slave->mtd);
  302. kfree(slave);
  303. node = prev;
  304. }
  305. }
  306. return 0;
  307. }
  308. /*
  309. * This function, given a master MTD object and a partition table, creates
  310. * and registers slave MTD objects which are bound to the master according to
  311. * the partition definitions.
  312. * (Q: should we register the master MTD object as well?)
  313. */
  314. int add_mtd_partitions(struct mtd_info *master,
  315. const struct mtd_partition *parts,
  316. int nbparts)
  317. {
  318. struct mtd_part *slave;
  319. u_int32_t cur_offset = 0;
  320. int i;
  321. printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
  322. for (i = 0; i < nbparts; i++) {
  323. /* allocate the partition structure */
  324. slave = kmalloc (sizeof(*slave), GFP_KERNEL);
  325. if (!slave) {
  326. printk ("memory allocation error while creating partitions for \"%s\"\n",
  327. master->name);
  328. del_mtd_partitions(master);
  329. return -ENOMEM;
  330. }
  331. memset(slave, 0, sizeof(*slave));
  332. list_add(&slave->list, &mtd_partitions);
  333. /* set up the MTD object for this partition */
  334. slave->mtd.type = master->type;
  335. slave->mtd.flags = master->flags & ~parts[i].mask_flags;
  336. slave->mtd.size = parts[i].size;
  337. slave->mtd.writesize = master->writesize;
  338. slave->mtd.oobsize = master->oobsize;
  339. slave->mtd.oobavail = master->oobavail;
  340. slave->mtd.ecctype = master->ecctype;
  341. slave->mtd.eccsize = master->eccsize;
  342. slave->mtd.name = parts[i].name;
  343. slave->mtd.bank_size = master->bank_size;
  344. slave->mtd.owner = master->owner;
  345. slave->mtd.read = part_read;
  346. slave->mtd.write = part_write;
  347. if(master->point && master->unpoint){
  348. slave->mtd.point = part_point;
  349. slave->mtd.unpoint = part_unpoint;
  350. }
  351. if (master->read_ecc)
  352. slave->mtd.read_ecc = part_read_ecc;
  353. if (master->write_ecc)
  354. slave->mtd.write_ecc = part_write_ecc;
  355. if (master->read_oob)
  356. slave->mtd.read_oob = part_read_oob;
  357. if (master->write_oob)
  358. slave->mtd.write_oob = part_write_oob;
  359. if(master->read_user_prot_reg)
  360. slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
  361. if(master->read_fact_prot_reg)
  362. slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
  363. if(master->write_user_prot_reg)
  364. slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
  365. if(master->lock_user_prot_reg)
  366. slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
  367. if(master->get_user_prot_info)
  368. slave->mtd.get_user_prot_info = part_get_user_prot_info;
  369. if(master->get_fact_prot_info)
  370. slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
  371. if (master->sync)
  372. slave->mtd.sync = part_sync;
  373. if (!i && master->suspend && master->resume) {
  374. slave->mtd.suspend = part_suspend;
  375. slave->mtd.resume = part_resume;
  376. }
  377. if (master->writev)
  378. slave->mtd.writev = part_writev;
  379. if (master->readv)
  380. slave->mtd.readv = part_readv;
  381. if (master->readv_ecc)
  382. slave->mtd.readv_ecc = part_readv_ecc;
  383. if (master->lock)
  384. slave->mtd.lock = part_lock;
  385. if (master->unlock)
  386. slave->mtd.unlock = part_unlock;
  387. if (master->block_isbad)
  388. slave->mtd.block_isbad = part_block_isbad;
  389. if (master->block_markbad)
  390. slave->mtd.block_markbad = part_block_markbad;
  391. slave->mtd.erase = part_erase;
  392. slave->master = master;
  393. slave->offset = parts[i].offset;
  394. slave->index = i;
  395. if (slave->offset == MTDPART_OFS_APPEND)
  396. slave->offset = cur_offset;
  397. if (slave->offset == MTDPART_OFS_NXTBLK) {
  398. slave->offset = cur_offset;
  399. if ((cur_offset % master->erasesize) != 0) {
  400. /* Round up to next erasesize */
  401. slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
  402. printk(KERN_NOTICE "Moving partition %d: "
  403. "0x%08x -> 0x%08x\n", i,
  404. cur_offset, slave->offset);
  405. }
  406. }
  407. if (slave->mtd.size == MTDPART_SIZ_FULL)
  408. slave->mtd.size = master->size - slave->offset;
  409. cur_offset = slave->offset + slave->mtd.size;
  410. printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
  411. slave->offset + slave->mtd.size, slave->mtd.name);
  412. /* let's do some sanity checks */
  413. if (slave->offset >= master->size) {
  414. /* let's register it anyway to preserve ordering */
  415. slave->offset = 0;
  416. slave->mtd.size = 0;
  417. printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
  418. parts[i].name);
  419. }
  420. if (slave->offset + slave->mtd.size > master->size) {
  421. slave->mtd.size = master->size - slave->offset;
  422. printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
  423. parts[i].name, master->name, slave->mtd.size);
  424. }
  425. if (master->numeraseregions>1) {
  426. /* Deal with variable erase size stuff */
  427. int i;
  428. struct mtd_erase_region_info *regions = master->eraseregions;
  429. /* Find the first erase regions which is part of this partition. */
  430. for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
  431. ;
  432. for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
  433. if (slave->mtd.erasesize < regions[i].erasesize) {
  434. slave->mtd.erasesize = regions[i].erasesize;
  435. }
  436. }
  437. } else {
  438. /* Single erase size */
  439. slave->mtd.erasesize = master->erasesize;
  440. }
  441. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  442. (slave->offset % slave->mtd.erasesize)) {
  443. /* Doesn't start on a boundary of major erase size */
  444. /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
  445. slave->mtd.flags &= ~MTD_WRITEABLE;
  446. printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
  447. parts[i].name);
  448. }
  449. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  450. (slave->mtd.size % slave->mtd.erasesize)) {
  451. slave->mtd.flags &= ~MTD_WRITEABLE;
  452. printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
  453. parts[i].name);
  454. }
  455. /* copy oobinfo from master */
  456. memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo));
  457. if(parts[i].mtdp)
  458. { /* store the object pointer (caller may or may not register it */
  459. *parts[i].mtdp = &slave->mtd;
  460. slave->registered = 0;
  461. }
  462. else
  463. {
  464. /* register our partition */
  465. add_mtd_device(&slave->mtd);
  466. slave->registered = 1;
  467. }
  468. }
  469. return 0;
  470. }
  471. EXPORT_SYMBOL(add_mtd_partitions);
  472. EXPORT_SYMBOL(del_mtd_partitions);
  473. static DEFINE_SPINLOCK(part_parser_lock);
  474. static LIST_HEAD(part_parsers);
  475. static struct mtd_part_parser *get_partition_parser(const char *name)
  476. {
  477. struct list_head *this;
  478. void *ret = NULL;
  479. spin_lock(&part_parser_lock);
  480. list_for_each(this, &part_parsers) {
  481. struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
  482. if (!strcmp(p->name, name) && try_module_get(p->owner)) {
  483. ret = p;
  484. break;
  485. }
  486. }
  487. spin_unlock(&part_parser_lock);
  488. return ret;
  489. }
  490. int register_mtd_parser(struct mtd_part_parser *p)
  491. {
  492. spin_lock(&part_parser_lock);
  493. list_add(&p->list, &part_parsers);
  494. spin_unlock(&part_parser_lock);
  495. return 0;
  496. }
  497. int deregister_mtd_parser(struct mtd_part_parser *p)
  498. {
  499. spin_lock(&part_parser_lock);
  500. list_del(&p->list);
  501. spin_unlock(&part_parser_lock);
  502. return 0;
  503. }
  504. int parse_mtd_partitions(struct mtd_info *master, const char **types,
  505. struct mtd_partition **pparts, unsigned long origin)
  506. {
  507. struct mtd_part_parser *parser;
  508. int ret = 0;
  509. for ( ; ret <= 0 && *types; types++) {
  510. parser = get_partition_parser(*types);
  511. #ifdef CONFIG_KMOD
  512. if (!parser && !request_module("%s", *types))
  513. parser = get_partition_parser(*types);
  514. #endif
  515. if (!parser) {
  516. printk(KERN_NOTICE "%s partition parsing not available\n",
  517. *types);
  518. continue;
  519. }
  520. ret = (*parser->parse_fn)(master, pparts, origin);
  521. if (ret > 0) {
  522. printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
  523. ret, parser->name, master->name);
  524. }
  525. put_partition_parser(parser);
  526. }
  527. return ret;
  528. }
  529. EXPORT_SYMBOL_GPL(parse_mtd_partitions);
  530. EXPORT_SYMBOL_GPL(register_mtd_parser);
  531. EXPORT_SYMBOL_GPL(deregister_mtd_parser);
  532. MODULE_LICENSE("GPL");
  533. MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
  534. MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");