vmu-flash.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. /* vmu-flash.c
  2. * Driver for SEGA Dreamcast Visual Memory Unit
  3. *
  4. * Copyright (c) Adrian McMenamin 2002 - 2009
  5. * Copyright (c) Paul Mundt 2001
  6. *
  7. * Licensed under version 2 of the
  8. * GNU General Public Licence
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/delay.h>
  13. #include <linux/maple.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/mtd/map.h>
  16. struct vmu_cache {
  17. unsigned char *buffer; /* Cache */
  18. unsigned int block; /* Which block was cached */
  19. unsigned long jiffies_atc; /* When was it cached? */
  20. int valid;
  21. };
  22. struct mdev_part {
  23. struct maple_device *mdev;
  24. int partition;
  25. };
  26. struct vmupart {
  27. u16 user_blocks;
  28. u16 root_block;
  29. u16 numblocks;
  30. char *name;
  31. struct vmu_cache *pcache;
  32. };
  33. struct memcard {
  34. u16 tempA;
  35. u16 tempB;
  36. u32 partitions;
  37. u32 blocklen;
  38. u32 writecnt;
  39. u32 readcnt;
  40. u32 removeable;
  41. int partition;
  42. int read;
  43. unsigned char *blockread;
  44. struct vmupart *parts;
  45. struct mtd_info *mtd;
  46. };
  47. struct vmu_block {
  48. unsigned int num; /* block number */
  49. unsigned int ofs; /* block offset */
  50. };
  51. static struct vmu_block *ofs_to_block(unsigned long src_ofs,
  52. struct mtd_info *mtd, int partition)
  53. {
  54. struct vmu_block *vblock;
  55. struct maple_device *mdev;
  56. struct memcard *card;
  57. struct mdev_part *mpart;
  58. int num;
  59. mpart = mtd->priv;
  60. mdev = mpart->mdev;
  61. card = maple_get_drvdata(mdev);
  62. if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
  63. goto failed;
  64. num = src_ofs / card->blocklen;
  65. if (num > card->parts[partition].numblocks)
  66. goto failed;
  67. vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
  68. if (!vblock)
  69. goto failed;
  70. vblock->num = num;
  71. vblock->ofs = src_ofs % card->blocklen;
  72. return vblock;
  73. failed:
  74. return NULL;
  75. }
  76. /* Maple bus callback function for reads */
  77. static void vmu_blockread(struct mapleq *mq)
  78. {
  79. struct maple_device *mdev;
  80. struct memcard *card;
  81. mdev = mq->dev;
  82. card = maple_get_drvdata(mdev);
  83. /* copy the read in data */
  84. if (unlikely(!card->blockread))
  85. return;
  86. memcpy(card->blockread, mq->recvbuf->buf + 12,
  87. card->blocklen/card->readcnt);
  88. }
  89. /* Interface with maple bus to read blocks
  90. * caching the results so that other parts
  91. * of the driver can access block reads */
  92. static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
  93. struct mtd_info *mtd)
  94. {
  95. struct memcard *card;
  96. struct mdev_part *mpart;
  97. struct maple_device *mdev;
  98. int partition, error = 0, x, wait;
  99. unsigned char *blockread = NULL;
  100. struct vmu_cache *pcache;
  101. __be32 sendbuf;
  102. mpart = mtd->priv;
  103. mdev = mpart->mdev;
  104. partition = mpart->partition;
  105. card = maple_get_drvdata(mdev);
  106. pcache = card->parts[partition].pcache;
  107. pcache->valid = 0;
  108. /* prepare the cache for this block */
  109. if (!pcache->buffer) {
  110. pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
  111. if (!pcache->buffer) {
  112. dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
  113. " to lack of memory\n", mdev->port,
  114. mdev->unit);
  115. error = -ENOMEM;
  116. goto outB;
  117. }
  118. }
  119. /*
  120. * Reads may be phased - again the hardware spec
  121. * supports this - though may not be any devices in
  122. * the wild that implement it, but we will here
  123. */
  124. for (x = 0; x < card->readcnt; x++) {
  125. sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
  126. if (atomic_read(&mdev->busy) == 1) {
  127. wait_event_interruptible_timeout(mdev->maple_wait,
  128. atomic_read(&mdev->busy) == 0, HZ);
  129. if (atomic_read(&mdev->busy) == 1) {
  130. dev_notice(&mdev->dev, "VMU at (%d, %d)"
  131. " is busy\n", mdev->port, mdev->unit);
  132. error = -EAGAIN;
  133. goto outB;
  134. }
  135. }
  136. atomic_set(&mdev->busy, 1);
  137. blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
  138. if (!blockread) {
  139. error = -ENOMEM;
  140. atomic_set(&mdev->busy, 0);
  141. goto outB;
  142. }
  143. card->blockread = blockread;
  144. maple_getcond_callback(mdev, vmu_blockread, 0,
  145. MAPLE_FUNC_MEMCARD);
  146. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  147. MAPLE_COMMAND_BREAD, 2, &sendbuf);
  148. /* Very long timeouts seem to be needed when box is stressed */
  149. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  150. (atomic_read(&mdev->busy) == 0 ||
  151. atomic_read(&mdev->busy) == 2), HZ * 3);
  152. /*
  153. * MTD layer does not handle hotplugging well
  154. * so have to return errors when VMU is unplugged
  155. * in the middle of a read (busy == 2)
  156. */
  157. if (error || atomic_read(&mdev->busy) == 2) {
  158. if (atomic_read(&mdev->busy) == 2)
  159. error = -ENXIO;
  160. atomic_set(&mdev->busy, 0);
  161. card->blockread = NULL;
  162. goto outA;
  163. }
  164. if (wait == 0 || wait == -ERESTARTSYS) {
  165. card->blockread = NULL;
  166. atomic_set(&mdev->busy, 0);
  167. error = -EIO;
  168. list_del_init(&(mdev->mq->list));
  169. kfree(mdev->mq->sendbuf);
  170. mdev->mq->sendbuf = NULL;
  171. if (wait == -ERESTARTSYS) {
  172. dev_warn(&mdev->dev, "VMU read on (%d, %d)"
  173. " interrupted on block 0x%X\n",
  174. mdev->port, mdev->unit, num);
  175. } else
  176. dev_notice(&mdev->dev, "VMU read on (%d, %d)"
  177. " timed out on block 0x%X\n",
  178. mdev->port, mdev->unit, num);
  179. goto outA;
  180. }
  181. memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
  182. card->blocklen/card->readcnt);
  183. memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
  184. card->blockread, card->blocklen/card->readcnt);
  185. card->blockread = NULL;
  186. pcache->block = num;
  187. pcache->jiffies_atc = jiffies;
  188. pcache->valid = 1;
  189. kfree(blockread);
  190. }
  191. return error;
  192. outA:
  193. kfree(blockread);
  194. outB:
  195. return error;
  196. }
  197. /* communicate with maple bus for phased writing */
  198. static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
  199. struct mtd_info *mtd)
  200. {
  201. struct memcard *card;
  202. struct mdev_part *mpart;
  203. struct maple_device *mdev;
  204. int partition, error, locking, x, phaselen, wait;
  205. __be32 *sendbuf;
  206. mpart = mtd->priv;
  207. mdev = mpart->mdev;
  208. partition = mpart->partition;
  209. card = maple_get_drvdata(mdev);
  210. phaselen = card->blocklen/card->writecnt;
  211. sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
  212. if (!sendbuf) {
  213. error = -ENOMEM;
  214. goto fail_nosendbuf;
  215. }
  216. for (x = 0; x < card->writecnt; x++) {
  217. sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
  218. memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
  219. /* wait until the device is not busy doing something else
  220. * or 1 second - which ever is longer */
  221. if (atomic_read(&mdev->busy) == 1) {
  222. wait_event_interruptible_timeout(mdev->maple_wait,
  223. atomic_read(&mdev->busy) == 0, HZ);
  224. if (atomic_read(&mdev->busy) == 1) {
  225. error = -EBUSY;
  226. dev_notice(&mdev->dev, "VMU write at (%d, %d)"
  227. "failed - device is busy\n",
  228. mdev->port, mdev->unit);
  229. goto fail_nolock;
  230. }
  231. }
  232. atomic_set(&mdev->busy, 1);
  233. locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  234. MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
  235. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  236. atomic_read(&mdev->busy) == 0, HZ/10);
  237. if (locking) {
  238. error = -EIO;
  239. atomic_set(&mdev->busy, 0);
  240. goto fail_nolock;
  241. }
  242. if (atomic_read(&mdev->busy) == 2) {
  243. atomic_set(&mdev->busy, 0);
  244. } else if (wait == 0 || wait == -ERESTARTSYS) {
  245. error = -EIO;
  246. dev_warn(&mdev->dev, "Write at (%d, %d) of block"
  247. " 0x%X at phase %d failed: could not"
  248. " communicate with VMU", mdev->port,
  249. mdev->unit, num, x);
  250. atomic_set(&mdev->busy, 0);
  251. kfree(mdev->mq->sendbuf);
  252. mdev->mq->sendbuf = NULL;
  253. list_del_init(&(mdev->mq->list));
  254. goto fail_nolock;
  255. }
  256. }
  257. kfree(sendbuf);
  258. return card->blocklen;
  259. fail_nolock:
  260. kfree(sendbuf);
  261. fail_nosendbuf:
  262. dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
  263. mdev->unit);
  264. return error;
  265. }
  266. /* mtd function to simulate reading byte by byte */
  267. static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
  268. struct mtd_info *mtd)
  269. {
  270. struct vmu_block *vblock;
  271. struct memcard *card;
  272. struct mdev_part *mpart;
  273. struct maple_device *mdev;
  274. unsigned char *buf, ret;
  275. int partition, error;
  276. mpart = mtd->priv;
  277. mdev = mpart->mdev;
  278. partition = mpart->partition;
  279. card = maple_get_drvdata(mdev);
  280. *retval = 0;
  281. buf = kmalloc(card->blocklen, GFP_KERNEL);
  282. if (!buf) {
  283. *retval = 1;
  284. ret = -ENOMEM;
  285. goto finish;
  286. }
  287. vblock = ofs_to_block(ofs, mtd, partition);
  288. if (!vblock) {
  289. *retval = 3;
  290. ret = -ENOMEM;
  291. goto out_buf;
  292. }
  293. error = maple_vmu_read_block(vblock->num, buf, mtd);
  294. if (error) {
  295. ret = error;
  296. *retval = 2;
  297. goto out_vblock;
  298. }
  299. ret = buf[vblock->ofs];
  300. out_vblock:
  301. kfree(vblock);
  302. out_buf:
  303. kfree(buf);
  304. finish:
  305. return ret;
  306. }
  307. /* mtd higher order function to read flash */
  308. static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
  309. size_t *retlen, u_char *buf)
  310. {
  311. struct maple_device *mdev;
  312. struct memcard *card;
  313. struct mdev_part *mpart;
  314. struct vmu_cache *pcache;
  315. struct vmu_block *vblock;
  316. int index = 0, retval, partition, leftover, numblocks;
  317. unsigned char cx;
  318. if (len < 1)
  319. return -EIO;
  320. mpart = mtd->priv;
  321. mdev = mpart->mdev;
  322. partition = mpart->partition;
  323. card = maple_get_drvdata(mdev);
  324. numblocks = card->parts[partition].numblocks;
  325. if (from + len > numblocks * card->blocklen)
  326. len = numblocks * card->blocklen - from;
  327. if (len == 0)
  328. return -EIO;
  329. /* Have we cached this bit already? */
  330. pcache = card->parts[partition].pcache;
  331. do {
  332. vblock = ofs_to_block(from + index, mtd, partition);
  333. if (!vblock)
  334. return -ENOMEM;
  335. /* Have we cached this and is the cache valid and timely? */
  336. if (pcache->valid &&
  337. time_before(jiffies, pcache->jiffies_atc + HZ) &&
  338. (pcache->block == vblock->num)) {
  339. /* we have cached it, so do necessary copying */
  340. leftover = card->blocklen - vblock->ofs;
  341. if (vblock->ofs + len - index < card->blocklen) {
  342. /* only a bit of this block to copy */
  343. memcpy(buf + index,
  344. pcache->buffer + vblock->ofs,
  345. len - index);
  346. index = len;
  347. } else {
  348. /* otherwise copy remainder of whole block */
  349. memcpy(buf + index, pcache->buffer +
  350. vblock->ofs, leftover);
  351. index += leftover;
  352. }
  353. } else {
  354. /*
  355. * Not cached so read one byte -
  356. * but cache the rest of the block
  357. */
  358. cx = vmu_flash_read_char(from + index, &retval, mtd);
  359. if (retval) {
  360. *retlen = index;
  361. kfree(vblock);
  362. return cx;
  363. }
  364. memset(buf + index, cx, 1);
  365. index++;
  366. }
  367. kfree(vblock);
  368. } while (len > index);
  369. *retlen = index;
  370. return 0;
  371. }
  372. static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
  373. size_t *retlen, const u_char *buf)
  374. {
  375. struct maple_device *mdev;
  376. struct memcard *card;
  377. struct mdev_part *mpart;
  378. int index = 0, partition, error = 0, numblocks;
  379. struct vmu_cache *pcache;
  380. struct vmu_block *vblock;
  381. unsigned char *buffer;
  382. mpart = mtd->priv;
  383. mdev = mpart->mdev;
  384. partition = mpart->partition;
  385. card = maple_get_drvdata(mdev);
  386. /* simple sanity checks */
  387. if (len < 1) {
  388. error = -EIO;
  389. goto failed;
  390. }
  391. numblocks = card->parts[partition].numblocks;
  392. if (to + len > numblocks * card->blocklen)
  393. len = numblocks * card->blocklen - to;
  394. if (len == 0) {
  395. error = -EIO;
  396. goto failed;
  397. }
  398. vblock = ofs_to_block(to, mtd, partition);
  399. if (!vblock) {
  400. error = -ENOMEM;
  401. goto failed;
  402. }
  403. buffer = kmalloc(card->blocklen, GFP_KERNEL);
  404. if (!buffer) {
  405. error = -ENOMEM;
  406. goto fail_buffer;
  407. }
  408. do {
  409. /* Read in the block we are to write to */
  410. error = maple_vmu_read_block(vblock->num, buffer, mtd);
  411. if (error)
  412. goto fail_io;
  413. do {
  414. buffer[vblock->ofs] = buf[index];
  415. vblock->ofs++;
  416. index++;
  417. if (index >= len)
  418. break;
  419. } while (vblock->ofs < card->blocklen);
  420. /* write out new buffer */
  421. error = maple_vmu_write_block(vblock->num, buffer, mtd);
  422. /* invalidate the cache */
  423. pcache = card->parts[partition].pcache;
  424. pcache->valid = 0;
  425. if (error != card->blocklen)
  426. goto fail_io;
  427. vblock->num++;
  428. vblock->ofs = 0;
  429. } while (len > index);
  430. kfree(buffer);
  431. *retlen = index;
  432. kfree(vblock);
  433. return 0;
  434. fail_io:
  435. kfree(buffer);
  436. fail_buffer:
  437. kfree(vblock);
  438. failed:
  439. dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
  440. return error;
  441. }
  442. static void vmu_flash_sync(struct mtd_info *mtd)
  443. {
  444. /* Do nothing here */
  445. }
  446. /* Maple bus callback function to recursively query hardware details */
  447. static void vmu_queryblocks(struct mapleq *mq)
  448. {
  449. struct maple_device *mdev;
  450. unsigned short *res;
  451. struct memcard *card;
  452. __be32 partnum;
  453. struct vmu_cache *pcache;
  454. struct mdev_part *mpart;
  455. struct mtd_info *mtd_cur;
  456. struct vmupart *part_cur;
  457. int error;
  458. mdev = mq->dev;
  459. card = maple_get_drvdata(mdev);
  460. res = (unsigned short *) (mq->recvbuf->buf);
  461. card->tempA = res[12];
  462. card->tempB = res[6];
  463. dev_info(&mdev->dev, "VMU device at partition %d has %d user "
  464. "blocks with a root block at %d\n", card->partition,
  465. card->tempA, card->tempB);
  466. part_cur = &card->parts[card->partition];
  467. part_cur->user_blocks = card->tempA;
  468. part_cur->root_block = card->tempB;
  469. part_cur->numblocks = card->tempB + 1;
  470. part_cur->name = kmalloc(12, GFP_KERNEL);
  471. if (!part_cur->name)
  472. goto fail_name;
  473. sprintf(part_cur->name, "vmu%d.%d.%d",
  474. mdev->port, mdev->unit, card->partition);
  475. mtd_cur = &card->mtd[card->partition];
  476. mtd_cur->name = part_cur->name;
  477. mtd_cur->type = 8;
  478. mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
  479. mtd_cur->size = part_cur->numblocks * card->blocklen;
  480. mtd_cur->erasesize = card->blocklen;
  481. mtd_cur->write = vmu_flash_write;
  482. mtd_cur->read = vmu_flash_read;
  483. mtd_cur->sync = vmu_flash_sync;
  484. mtd_cur->writesize = card->blocklen;
  485. mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
  486. if (!mpart)
  487. goto fail_mpart;
  488. mpart->mdev = mdev;
  489. mpart->partition = card->partition;
  490. mtd_cur->priv = mpart;
  491. mtd_cur->owner = THIS_MODULE;
  492. pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
  493. if (!pcache)
  494. goto fail_cache_create;
  495. part_cur->pcache = pcache;
  496. error = add_mtd_device(mtd_cur);
  497. if (error)
  498. goto fail_mtd_register;
  499. maple_getcond_callback(mdev, NULL, 0,
  500. MAPLE_FUNC_MEMCARD);
  501. /*
  502. * Set up a recursive call to the (probably theoretical)
  503. * second or more partition
  504. */
  505. if (++card->partition < card->partitions) {
  506. partnum = cpu_to_be32(card->partition << 24);
  507. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  508. MAPLE_FUNC_MEMCARD);
  509. maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  510. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  511. }
  512. return;
  513. fail_mtd_register:
  514. dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
  515. "error is 0x%X\n", mdev->port, mdev->unit, error);
  516. for (error = 0; error <= card->partition; error++) {
  517. kfree(((card->parts)[error]).pcache);
  518. ((card->parts)[error]).pcache = NULL;
  519. }
  520. fail_cache_create:
  521. fail_mpart:
  522. for (error = 0; error <= card->partition; error++) {
  523. kfree(((card->mtd)[error]).priv);
  524. ((card->mtd)[error]).priv = NULL;
  525. }
  526. maple_getcond_callback(mdev, NULL, 0,
  527. MAPLE_FUNC_MEMCARD);
  528. kfree(part_cur->name);
  529. fail_name:
  530. return;
  531. }
  532. /* Handles very basic info about the flash, queries for details */
  533. static int __devinit vmu_connect(struct maple_device *mdev)
  534. {
  535. unsigned long test_flash_data, basic_flash_data;
  536. int c, error;
  537. struct memcard *card;
  538. u32 partnum = 0;
  539. test_flash_data = be32_to_cpu(mdev->devinfo.function);
  540. /* Need to count how many bits are set - to find out which
  541. * function_data element has details of the memory card:
  542. * using Brian Kernighan's/Peter Wegner's method */
  543. for (c = 0; test_flash_data; c++)
  544. test_flash_data &= test_flash_data - 1;
  545. basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
  546. card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
  547. if (!card) {
  548. error = ENOMEM;
  549. goto fail_nomem;
  550. }
  551. card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
  552. card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
  553. card->writecnt = basic_flash_data >> 12 & 0xF;
  554. card->readcnt = basic_flash_data >> 8 & 0xF;
  555. card->removeable = basic_flash_data >> 7 & 1;
  556. card->partition = 0;
  557. /*
  558. * Not sure there are actually any multi-partition devices in the
  559. * real world, but the hardware supports them, so, so will we
  560. */
  561. card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
  562. GFP_KERNEL);
  563. if (!card->parts) {
  564. error = -ENOMEM;
  565. goto fail_partitions;
  566. }
  567. card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
  568. GFP_KERNEL);
  569. if (!card->mtd) {
  570. error = -ENOMEM;
  571. goto fail_mtd_info;
  572. }
  573. maple_set_drvdata(mdev, card);
  574. /*
  575. * We want to trap meminfo not get cond
  576. * so set interval to zero, but rely on maple bus
  577. * driver to pass back the results of the meminfo
  578. */
  579. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  580. MAPLE_FUNC_MEMCARD);
  581. /* Make sure we are clear to go */
  582. if (atomic_read(&mdev->busy) == 1) {
  583. wait_event_interruptible_timeout(mdev->maple_wait,
  584. atomic_read(&mdev->busy) == 0, HZ);
  585. if (atomic_read(&mdev->busy) == 1) {
  586. dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
  587. mdev->port, mdev->unit);
  588. error = -EAGAIN;
  589. goto fail_device_busy;
  590. }
  591. }
  592. atomic_set(&mdev->busy, 1);
  593. /*
  594. * Set up the minfo call: vmu_queryblocks will handle
  595. * the information passed back
  596. */
  597. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  598. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  599. if (error) {
  600. dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
  601. " error is 0x%X\n", mdev->port, mdev->unit, error);
  602. goto fail_mtd_info;
  603. }
  604. return 0;
  605. fail_device_busy:
  606. kfree(card->mtd);
  607. fail_mtd_info:
  608. kfree(card->parts);
  609. fail_partitions:
  610. kfree(card);
  611. fail_nomem:
  612. return error;
  613. }
  614. static void __devexit vmu_disconnect(struct maple_device *mdev)
  615. {
  616. struct memcard *card;
  617. struct mdev_part *mpart;
  618. int x;
  619. mdev->callback = NULL;
  620. card = maple_get_drvdata(mdev);
  621. for (x = 0; x < card->partitions; x++) {
  622. mpart = ((card->mtd)[x]).priv;
  623. mpart->mdev = NULL;
  624. del_mtd_device(&((card->mtd)[x]));
  625. kfree(((card->parts)[x]).name);
  626. }
  627. kfree(card->parts);
  628. kfree(card->mtd);
  629. kfree(card);
  630. }
  631. /* Callback to handle eccentricities of both mtd subsystem
  632. * and general flakyness of Dreamcast VMUs
  633. */
  634. static int vmu_can_unload(struct maple_device *mdev)
  635. {
  636. struct memcard *card;
  637. int x;
  638. struct mtd_info *mtd;
  639. card = maple_get_drvdata(mdev);
  640. for (x = 0; x < card->partitions; x++) {
  641. mtd = &((card->mtd)[x]);
  642. if (mtd->usecount > 0)
  643. return 0;
  644. }
  645. return 1;
  646. }
  647. #define ERRSTR "VMU at (%d, %d) file error -"
  648. static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
  649. {
  650. enum maple_file_errors error = ((int *)recvbuf)[1];
  651. switch (error) {
  652. case MAPLE_FILEERR_INVALID_PARTITION:
  653. dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
  654. mdev->port, mdev->unit);
  655. break;
  656. case MAPLE_FILEERR_PHASE_ERROR:
  657. dev_notice(&mdev->dev, ERRSTR " phase error\n",
  658. mdev->port, mdev->unit);
  659. break;
  660. case MAPLE_FILEERR_INVALID_BLOCK:
  661. dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
  662. mdev->port, mdev->unit);
  663. break;
  664. case MAPLE_FILEERR_WRITE_ERROR:
  665. dev_notice(&mdev->dev, ERRSTR " write error\n",
  666. mdev->port, mdev->unit);
  667. break;
  668. case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
  669. dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
  670. mdev->port, mdev->unit);
  671. break;
  672. case MAPLE_FILEERR_BAD_CRC:
  673. dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
  674. mdev->port, mdev->unit);
  675. break;
  676. default:
  677. dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
  678. mdev->port, mdev->unit, error);
  679. }
  680. }
  681. static int __devinit probe_maple_vmu(struct device *dev)
  682. {
  683. int error;
  684. struct maple_device *mdev = to_maple_dev(dev);
  685. struct maple_driver *mdrv = to_maple_driver(dev->driver);
  686. mdev->can_unload = vmu_can_unload;
  687. mdev->fileerr_handler = vmu_file_error;
  688. mdev->driver = mdrv;
  689. error = vmu_connect(mdev);
  690. if (error)
  691. return error;
  692. return 0;
  693. }
  694. static int __devexit remove_maple_vmu(struct device *dev)
  695. {
  696. struct maple_device *mdev = to_maple_dev(dev);
  697. vmu_disconnect(mdev);
  698. return 0;
  699. }
  700. static struct maple_driver vmu_flash_driver = {
  701. .function = MAPLE_FUNC_MEMCARD,
  702. .drv = {
  703. .name = "Dreamcast_visual_memory",
  704. .probe = probe_maple_vmu,
  705. .remove = __devexit_p(remove_maple_vmu),
  706. },
  707. };
  708. static int __init vmu_flash_map_init(void)
  709. {
  710. return maple_driver_register(&vmu_flash_driver);
  711. }
  712. static void __exit vmu_flash_map_exit(void)
  713. {
  714. maple_driver_unregister(&vmu_flash_driver);
  715. }
  716. module_init(vmu_flash_map_init);
  717. module_exit(vmu_flash_map_exit);
  718. MODULE_LICENSE("GPL");
  719. MODULE_AUTHOR("Adrian McMenamin");
  720. MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");