mg_disk.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /*
  2. * drivers/block/mg_disk.c
  3. *
  4. * Support for the mGine m[g]flash IO mode.
  5. * Based on legacy hd.c
  6. *
  7. * (c) 2008 mGine Co.,LTD
  8. * (c) 2008 unsik Kim <donari75@gmail.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/fs.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/hdreg.h>
  19. #include <linux/libata.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/delay.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/gpio.h>
  24. #include <linux/mg_disk.h>
  25. #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
  26. static void mg_request(struct request_queue *);
  27. static void mg_dump_status(const char *msg, unsigned int stat,
  28. struct mg_host *host)
  29. {
  30. char *name = MG_DISK_NAME;
  31. struct request *req;
  32. if (host->breq) {
  33. req = elv_next_request(host->breq);
  34. if (req)
  35. name = req->rq_disk->disk_name;
  36. }
  37. printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
  38. if (stat & MG_REG_STATUS_BIT_BUSY)
  39. printk("Busy ");
  40. if (stat & MG_REG_STATUS_BIT_READY)
  41. printk("DriveReady ");
  42. if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
  43. printk("WriteFault ");
  44. if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
  45. printk("SeekComplete ");
  46. if (stat & MG_REG_STATUS_BIT_DATA_REQ)
  47. printk("DataRequest ");
  48. if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
  49. printk("CorrectedError ");
  50. if (stat & MG_REG_STATUS_BIT_ERROR)
  51. printk("Error ");
  52. printk("}\n");
  53. if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
  54. host->error = 0;
  55. } else {
  56. host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
  57. printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
  58. host->error & 0xff);
  59. if (host->error & MG_REG_ERR_BBK)
  60. printk("BadSector ");
  61. if (host->error & MG_REG_ERR_UNC)
  62. printk("UncorrectableError ");
  63. if (host->error & MG_REG_ERR_IDNF)
  64. printk("SectorIdNotFound ");
  65. if (host->error & MG_REG_ERR_ABRT)
  66. printk("DriveStatusError ");
  67. if (host->error & MG_REG_ERR_AMNF)
  68. printk("AddrMarkNotFound ");
  69. printk("}");
  70. if (host->error &
  71. (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
  72. MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
  73. if (host->breq) {
  74. req = elv_next_request(host->breq);
  75. if (req)
  76. printk(", sector=%u", (u32)req->sector);
  77. }
  78. }
  79. printk("\n");
  80. }
  81. }
  82. static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
  83. {
  84. u8 status;
  85. unsigned long expire, cur_jiffies;
  86. struct mg_drv_data *prv_data = host->dev->platform_data;
  87. host->error = MG_ERR_NONE;
  88. expire = jiffies + msecs_to_jiffies(msec);
  89. status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  90. do {
  91. cur_jiffies = jiffies;
  92. if (status & MG_REG_STATUS_BIT_BUSY) {
  93. if (expect == MG_REG_STATUS_BIT_BUSY)
  94. break;
  95. } else {
  96. /* Check the error condition! */
  97. if (status & MG_REG_STATUS_BIT_ERROR) {
  98. mg_dump_status("mg_wait", status, host);
  99. break;
  100. }
  101. if (expect == MG_STAT_READY)
  102. if (MG_READY_OK(status))
  103. break;
  104. if (expect == MG_REG_STATUS_BIT_DATA_REQ)
  105. if (status & MG_REG_STATUS_BIT_DATA_REQ)
  106. break;
  107. }
  108. if (!msec) {
  109. mg_dump_status("not ready", status, host);
  110. return MG_ERR_INV_STAT;
  111. }
  112. if (prv_data->use_polling)
  113. msleep(1);
  114. status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  115. } while (time_before(cur_jiffies, expire));
  116. if (time_after_eq(cur_jiffies, expire) && msec)
  117. host->error = MG_ERR_TIMEOUT;
  118. return host->error;
  119. }
  120. static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
  121. {
  122. unsigned long expire;
  123. expire = jiffies + msecs_to_jiffies(msec);
  124. while (time_before(jiffies, expire)) {
  125. if (gpio_get_value(rstout) == 1)
  126. return MG_ERR_NONE;
  127. msleep(10);
  128. }
  129. return MG_ERR_RSTOUT;
  130. }
  131. static void mg_unexpected_intr(struct mg_host *host)
  132. {
  133. u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  134. mg_dump_status("mg_unexpected_intr", status, host);
  135. }
  136. static irqreturn_t mg_irq(int irq, void *dev_id)
  137. {
  138. struct mg_host *host = dev_id;
  139. void (*handler)(struct mg_host *) = host->mg_do_intr;
  140. spin_lock(&host->lock);
  141. host->mg_do_intr = NULL;
  142. del_timer(&host->timer);
  143. if (!handler)
  144. handler = mg_unexpected_intr;
  145. handler(host);
  146. spin_unlock(&host->lock);
  147. return IRQ_HANDLED;
  148. }
  149. static int mg_get_disk_id(struct mg_host *host)
  150. {
  151. u32 i;
  152. s32 err;
  153. const u16 *id = host->id;
  154. struct mg_drv_data *prv_data = host->dev->platform_data;
  155. char fwrev[ATA_ID_FW_REV_LEN + 1];
  156. char model[ATA_ID_PROD_LEN + 1];
  157. char serial[ATA_ID_SERNO_LEN + 1];
  158. if (!prv_data->use_polling)
  159. outb(MG_REG_CTRL_INTR_DISABLE,
  160. (unsigned long)host->dev_base +
  161. MG_REG_DRV_CTRL);
  162. outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
  163. err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
  164. if (err)
  165. return err;
  166. for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
  167. host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
  168. MG_BUFF_OFFSET + i * 2));
  169. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  170. err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
  171. if (err)
  172. return err;
  173. if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
  174. return MG_ERR_TRANSLATION;
  175. host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
  176. host->cyls = id[ATA_ID_CYLS];
  177. host->heads = id[ATA_ID_HEADS];
  178. host->sectors = id[ATA_ID_SECTORS];
  179. if (MG_RES_SEC && host->heads && host->sectors) {
  180. /* modify cyls, n_sectors */
  181. host->cyls = (host->n_sectors - MG_RES_SEC) /
  182. host->heads / host->sectors;
  183. host->nres_sectors = host->n_sectors - host->cyls *
  184. host->heads * host->sectors;
  185. host->n_sectors -= host->nres_sectors;
  186. }
  187. ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
  188. ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
  189. ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
  190. printk(KERN_INFO "mg_disk: model: %s\n", model);
  191. printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
  192. printk(KERN_INFO "mg_disk: serial: %s\n", serial);
  193. printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
  194. host->n_sectors, host->nres_sectors);
  195. if (!prv_data->use_polling)
  196. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  197. MG_REG_DRV_CTRL);
  198. return err;
  199. }
  200. static int mg_disk_init(struct mg_host *host)
  201. {
  202. struct mg_drv_data *prv_data = host->dev->platform_data;
  203. s32 err;
  204. u8 init_status;
  205. /* hdd rst low */
  206. gpio_set_value(host->rst, 0);
  207. err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
  208. if (err)
  209. return err;
  210. /* hdd rst high */
  211. gpio_set_value(host->rst, 1);
  212. err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
  213. if (err)
  214. return err;
  215. /* soft reset on */
  216. outb(MG_REG_CTRL_RESET |
  217. (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
  218. MG_REG_CTRL_INTR_ENABLE),
  219. (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
  220. err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
  221. if (err)
  222. return err;
  223. /* soft reset off */
  224. outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
  225. MG_REG_CTRL_INTR_ENABLE,
  226. (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
  227. err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
  228. if (err)
  229. return err;
  230. init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
  231. if (init_status == 0xf)
  232. return MG_ERR_INIT_STAT;
  233. return err;
  234. }
  235. static void mg_bad_rw_intr(struct mg_host *host)
  236. {
  237. struct request *req = elv_next_request(host->breq);
  238. if (req != NULL)
  239. if (++req->errors >= MG_MAX_ERRORS ||
  240. host->error == MG_ERR_TIMEOUT)
  241. end_request(req, 0);
  242. }
  243. static unsigned int mg_out(struct mg_host *host,
  244. unsigned int sect_num,
  245. unsigned int sect_cnt,
  246. unsigned int cmd,
  247. void (*intr_addr)(struct mg_host *))
  248. {
  249. struct mg_drv_data *prv_data = host->dev->platform_data;
  250. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  251. return host->error;
  252. if (!prv_data->use_polling) {
  253. host->mg_do_intr = intr_addr;
  254. mod_timer(&host->timer, jiffies + 3 * HZ);
  255. }
  256. if (MG_RES_SEC)
  257. sect_num += MG_RES_SEC;
  258. outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
  259. outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
  260. outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
  261. MG_REG_CYL_LOW);
  262. outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
  263. MG_REG_CYL_HIGH);
  264. outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
  265. (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
  266. outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
  267. return MG_ERR_NONE;
  268. }
  269. static void mg_read(struct request *req)
  270. {
  271. u32 remains, j;
  272. struct mg_host *host = req->rq_disk->private_data;
  273. remains = req->nr_sectors;
  274. if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
  275. MG_ERR_NONE)
  276. mg_bad_rw_intr(host);
  277. MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
  278. remains, req->sector, req->buffer);
  279. while (remains) {
  280. if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
  281. MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
  282. mg_bad_rw_intr(host);
  283. return;
  284. }
  285. for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
  286. *(u16 *)req->buffer =
  287. inw((unsigned long)host->dev_base +
  288. MG_BUFF_OFFSET + (j << 1));
  289. req->buffer += 2;
  290. }
  291. req->sector++;
  292. req->errors = 0;
  293. remains = --req->nr_sectors;
  294. --req->current_nr_sectors;
  295. if (req->current_nr_sectors <= 0) {
  296. MG_DBG("remain : %d sects\n", remains);
  297. end_request(req, 1);
  298. if (remains > 0)
  299. req = elv_next_request(host->breq);
  300. }
  301. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
  302. MG_REG_COMMAND);
  303. }
  304. }
  305. static void mg_write(struct request *req)
  306. {
  307. u32 remains, j;
  308. struct mg_host *host = req->rq_disk->private_data;
  309. remains = req->nr_sectors;
  310. if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
  311. MG_ERR_NONE) {
  312. mg_bad_rw_intr(host);
  313. return;
  314. }
  315. MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
  316. remains, req->sector, req->buffer);
  317. while (remains) {
  318. if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
  319. MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
  320. mg_bad_rw_intr(host);
  321. return;
  322. }
  323. for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
  324. outw(*(u16 *)req->buffer,
  325. (unsigned long)host->dev_base +
  326. MG_BUFF_OFFSET + (j << 1));
  327. req->buffer += 2;
  328. }
  329. req->sector++;
  330. remains = --req->nr_sectors;
  331. --req->current_nr_sectors;
  332. if (req->current_nr_sectors <= 0) {
  333. MG_DBG("remain : %d sects\n", remains);
  334. end_request(req, 1);
  335. if (remains > 0)
  336. req = elv_next_request(host->breq);
  337. }
  338. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
  339. MG_REG_COMMAND);
  340. }
  341. }
  342. static void mg_read_intr(struct mg_host *host)
  343. {
  344. u32 i;
  345. struct request *req;
  346. /* check status */
  347. do {
  348. i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  349. if (i & MG_REG_STATUS_BIT_BUSY)
  350. break;
  351. if (!MG_READY_OK(i))
  352. break;
  353. if (i & MG_REG_STATUS_BIT_DATA_REQ)
  354. goto ok_to_read;
  355. } while (0);
  356. mg_dump_status("mg_read_intr", i, host);
  357. mg_bad_rw_intr(host);
  358. mg_request(host->breq);
  359. return;
  360. ok_to_read:
  361. /* get current segment of request */
  362. req = elv_next_request(host->breq);
  363. /* read 1 sector */
  364. for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
  365. *(u16 *)req->buffer =
  366. inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
  367. (i << 1));
  368. req->buffer += 2;
  369. }
  370. /* manipulate request */
  371. MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
  372. req->sector, req->nr_sectors - 1, req->buffer);
  373. req->sector++;
  374. req->errors = 0;
  375. i = --req->nr_sectors;
  376. --req->current_nr_sectors;
  377. /* let know if current segment done */
  378. if (req->current_nr_sectors <= 0)
  379. end_request(req, 1);
  380. /* set handler if read remains */
  381. if (i > 0) {
  382. host->mg_do_intr = mg_read_intr;
  383. mod_timer(&host->timer, jiffies + 3 * HZ);
  384. }
  385. /* send read confirm */
  386. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  387. /* goto next request */
  388. if (!i)
  389. mg_request(host->breq);
  390. }
  391. static void mg_write_intr(struct mg_host *host)
  392. {
  393. u32 i, j;
  394. u16 *buff;
  395. struct request *req;
  396. /* get current segment of request */
  397. req = elv_next_request(host->breq);
  398. /* check status */
  399. do {
  400. i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  401. if (i & MG_REG_STATUS_BIT_BUSY)
  402. break;
  403. if (!MG_READY_OK(i))
  404. break;
  405. if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
  406. goto ok_to_write;
  407. } while (0);
  408. mg_dump_status("mg_write_intr", i, host);
  409. mg_bad_rw_intr(host);
  410. mg_request(host->breq);
  411. return;
  412. ok_to_write:
  413. /* manipulate request */
  414. req->sector++;
  415. i = --req->nr_sectors;
  416. --req->current_nr_sectors;
  417. req->buffer += MG_SECTOR_SIZE;
  418. /* let know if current segment or all done */
  419. if (!i || (req->bio && req->current_nr_sectors <= 0))
  420. end_request(req, 1);
  421. /* write 1 sector and set handler if remains */
  422. if (i > 0) {
  423. buff = (u16 *)req->buffer;
  424. for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
  425. outw(*buff, (unsigned long)host->dev_base +
  426. MG_BUFF_OFFSET + (j << 1));
  427. buff++;
  428. }
  429. MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
  430. req->sector, req->nr_sectors, req->buffer);
  431. host->mg_do_intr = mg_write_intr;
  432. mod_timer(&host->timer, jiffies + 3 * HZ);
  433. }
  434. /* send write confirm */
  435. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  436. if (!i)
  437. mg_request(host->breq);
  438. }
  439. void mg_times_out(unsigned long data)
  440. {
  441. struct mg_host *host = (struct mg_host *)data;
  442. char *name;
  443. struct request *req;
  444. spin_lock_irq(&host->lock);
  445. req = elv_next_request(host->breq);
  446. if (!req)
  447. goto out_unlock;
  448. host->mg_do_intr = NULL;
  449. name = req->rq_disk->disk_name;
  450. printk(KERN_DEBUG "%s: timeout\n", name);
  451. host->error = MG_ERR_TIMEOUT;
  452. mg_bad_rw_intr(host);
  453. mg_request(host->breq);
  454. out_unlock:
  455. spin_unlock_irq(&host->lock);
  456. }
  457. static void mg_request_poll(struct request_queue *q)
  458. {
  459. struct request *req;
  460. struct mg_host *host;
  461. while ((req = elv_next_request(q)) != NULL) {
  462. host = req->rq_disk->private_data;
  463. if (blk_fs_request(req)) {
  464. switch (rq_data_dir(req)) {
  465. case READ:
  466. mg_read(req);
  467. break;
  468. case WRITE:
  469. mg_write(req);
  470. break;
  471. default:
  472. printk(KERN_WARNING "%s:%d unknown command\n",
  473. __func__, __LINE__);
  474. end_request(req, 0);
  475. break;
  476. }
  477. }
  478. }
  479. }
  480. static unsigned int mg_issue_req(struct request *req,
  481. struct mg_host *host,
  482. unsigned int sect_num,
  483. unsigned int sect_cnt)
  484. {
  485. u16 *buff;
  486. u32 i;
  487. switch (rq_data_dir(req)) {
  488. case READ:
  489. if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
  490. != MG_ERR_NONE) {
  491. mg_bad_rw_intr(host);
  492. return host->error;
  493. }
  494. break;
  495. case WRITE:
  496. /* TODO : handler */
  497. outb(MG_REG_CTRL_INTR_DISABLE,
  498. (unsigned long)host->dev_base +
  499. MG_REG_DRV_CTRL);
  500. if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
  501. != MG_ERR_NONE) {
  502. mg_bad_rw_intr(host);
  503. return host->error;
  504. }
  505. del_timer(&host->timer);
  506. mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
  507. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  508. MG_REG_DRV_CTRL);
  509. if (host->error) {
  510. mg_bad_rw_intr(host);
  511. return host->error;
  512. }
  513. buff = (u16 *)req->buffer;
  514. for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
  515. outw(*buff, (unsigned long)host->dev_base +
  516. MG_BUFF_OFFSET + (i << 1));
  517. buff++;
  518. }
  519. mod_timer(&host->timer, jiffies + 3 * HZ);
  520. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
  521. MG_REG_COMMAND);
  522. break;
  523. default:
  524. printk(KERN_WARNING "%s:%d unknown command\n",
  525. __func__, __LINE__);
  526. end_request(req, 0);
  527. break;
  528. }
  529. return MG_ERR_NONE;
  530. }
  531. /* This function also called from IRQ context */
  532. static void mg_request(struct request_queue *q)
  533. {
  534. struct request *req;
  535. struct mg_host *host;
  536. u32 sect_num, sect_cnt;
  537. while (1) {
  538. req = elv_next_request(q);
  539. if (!req)
  540. return;
  541. host = req->rq_disk->private_data;
  542. /* check unwanted request call */
  543. if (host->mg_do_intr)
  544. return;
  545. del_timer(&host->timer);
  546. sect_num = req->sector;
  547. /* deal whole segments */
  548. sect_cnt = req->nr_sectors;
  549. /* sanity check */
  550. if (sect_num >= get_capacity(req->rq_disk) ||
  551. ((sect_num + sect_cnt) >
  552. get_capacity(req->rq_disk))) {
  553. printk(KERN_WARNING
  554. "%s: bad access: sector=%d, count=%d\n",
  555. req->rq_disk->disk_name,
  556. sect_num, sect_cnt);
  557. end_request(req, 0);
  558. continue;
  559. }
  560. if (!blk_fs_request(req))
  561. return;
  562. if (!mg_issue_req(req, host, sect_num, sect_cnt))
  563. return;
  564. }
  565. }
  566. static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  567. {
  568. struct mg_host *host = bdev->bd_disk->private_data;
  569. geo->cylinders = (unsigned short)host->cyls;
  570. geo->heads = (unsigned char)host->heads;
  571. geo->sectors = (unsigned char)host->sectors;
  572. return 0;
  573. }
  574. static struct block_device_operations mg_disk_ops = {
  575. .getgeo = mg_getgeo
  576. };
  577. static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
  578. {
  579. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  580. struct mg_host *host = prv_data->host;
  581. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  582. return -EIO;
  583. if (!prv_data->use_polling)
  584. outb(MG_REG_CTRL_INTR_DISABLE,
  585. (unsigned long)host->dev_base +
  586. MG_REG_DRV_CTRL);
  587. outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
  588. /* wait until mflash deep sleep */
  589. msleep(1);
  590. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
  591. if (!prv_data->use_polling)
  592. outb(MG_REG_CTRL_INTR_ENABLE,
  593. (unsigned long)host->dev_base +
  594. MG_REG_DRV_CTRL);
  595. return -EIO;
  596. }
  597. return 0;
  598. }
  599. static int mg_resume(struct platform_device *plat_dev)
  600. {
  601. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  602. struct mg_host *host = prv_data->host;
  603. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  604. return -EIO;
  605. outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
  606. /* wait until mflash wakeup */
  607. msleep(1);
  608. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  609. return -EIO;
  610. if (!prv_data->use_polling)
  611. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  612. MG_REG_DRV_CTRL);
  613. return 0;
  614. }
  615. static int mg_probe(struct platform_device *plat_dev)
  616. {
  617. struct mg_host *host;
  618. struct resource *rsc;
  619. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  620. int err = 0;
  621. if (!prv_data) {
  622. printk(KERN_ERR "%s:%d fail (no driver_data)\n",
  623. __func__, __LINE__);
  624. err = -EINVAL;
  625. goto probe_err;
  626. }
  627. /* alloc mg_host */
  628. host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
  629. if (!host) {
  630. printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
  631. __func__, __LINE__);
  632. err = -ENOMEM;
  633. goto probe_err;
  634. }
  635. host->major = MG_DISK_MAJ;
  636. /* link each other */
  637. prv_data->host = host;
  638. host->dev = &plat_dev->dev;
  639. /* io remap */
  640. rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
  641. if (!rsc) {
  642. printk(KERN_ERR "%s:%d platform_get_resource fail\n",
  643. __func__, __LINE__);
  644. err = -EINVAL;
  645. goto probe_err_2;
  646. }
  647. host->dev_base = ioremap(rsc->start , rsc->end + 1);
  648. if (!host->dev_base) {
  649. printk(KERN_ERR "%s:%d ioremap fail\n",
  650. __func__, __LINE__);
  651. err = -EIO;
  652. goto probe_err_2;
  653. }
  654. MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
  655. /* get reset pin */
  656. rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
  657. MG_RST_PIN);
  658. if (!rsc) {
  659. printk(KERN_ERR "%s:%d get reset pin fail\n",
  660. __func__, __LINE__);
  661. err = -EIO;
  662. goto probe_err_3;
  663. }
  664. host->rst = rsc->start;
  665. /* init rst pin */
  666. err = gpio_request(host->rst, MG_RST_PIN);
  667. if (err)
  668. goto probe_err_3;
  669. gpio_direction_output(host->rst, 1);
  670. /* reset out pin */
  671. if (!(prv_data->dev_attr & MG_DEV_MASK))
  672. goto probe_err_3a;
  673. if (prv_data->dev_attr != MG_BOOT_DEV) {
  674. rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
  675. MG_RSTOUT_PIN);
  676. if (!rsc) {
  677. printk(KERN_ERR "%s:%d get reset-out pin fail\n",
  678. __func__, __LINE__);
  679. err = -EIO;
  680. goto probe_err_3a;
  681. }
  682. host->rstout = rsc->start;
  683. err = gpio_request(host->rstout, MG_RSTOUT_PIN);
  684. if (err)
  685. goto probe_err_3a;
  686. gpio_direction_input(host->rstout);
  687. }
  688. /* disk reset */
  689. if (prv_data->dev_attr == MG_STORAGE_DEV) {
  690. /* If POR seq. not yet finised, wait */
  691. err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
  692. if (err)
  693. goto probe_err_3b;
  694. err = mg_disk_init(host);
  695. if (err) {
  696. printk(KERN_ERR "%s:%d fail (err code : %d)\n",
  697. __func__, __LINE__, err);
  698. err = -EIO;
  699. goto probe_err_3b;
  700. }
  701. }
  702. /* get irq resource */
  703. if (!prv_data->use_polling) {
  704. host->irq = platform_get_irq(plat_dev, 0);
  705. if (host->irq == -ENXIO) {
  706. err = host->irq;
  707. goto probe_err_3b;
  708. }
  709. err = request_irq(host->irq, mg_irq,
  710. IRQF_DISABLED | IRQF_TRIGGER_RISING,
  711. MG_DEV_NAME, host);
  712. if (err) {
  713. printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
  714. __func__, __LINE__, err);
  715. goto probe_err_3b;
  716. }
  717. }
  718. /* get disk id */
  719. err = mg_get_disk_id(host);
  720. if (err) {
  721. printk(KERN_ERR "%s:%d fail (err code : %d)\n",
  722. __func__, __LINE__, err);
  723. err = -EIO;
  724. goto probe_err_4;
  725. }
  726. err = register_blkdev(host->major, MG_DISK_NAME);
  727. if (err < 0) {
  728. printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
  729. __func__, __LINE__, err);
  730. goto probe_err_4;
  731. }
  732. if (!host->major)
  733. host->major = err;
  734. spin_lock_init(&host->lock);
  735. if (prv_data->use_polling)
  736. host->breq = blk_init_queue(mg_request_poll, &host->lock);
  737. else
  738. host->breq = blk_init_queue(mg_request, &host->lock);
  739. if (!host->breq) {
  740. err = -ENOMEM;
  741. printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
  742. __func__, __LINE__);
  743. goto probe_err_5;
  744. }
  745. /* mflash is random device, thanx for the noop */
  746. elevator_exit(host->breq->elevator);
  747. err = elevator_init(host->breq, "noop");
  748. if (err) {
  749. printk(KERN_ERR "%s:%d (elevator_init) fail\n",
  750. __func__, __LINE__);
  751. goto probe_err_6;
  752. }
  753. blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
  754. blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
  755. init_timer(&host->timer);
  756. host->timer.function = mg_times_out;
  757. host->timer.data = (unsigned long)host;
  758. host->gd = alloc_disk(MG_DISK_MAX_PART);
  759. if (!host->gd) {
  760. printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
  761. __func__, __LINE__);
  762. err = -ENOMEM;
  763. goto probe_err_7;
  764. }
  765. host->gd->major = host->major;
  766. host->gd->first_minor = 0;
  767. host->gd->fops = &mg_disk_ops;
  768. host->gd->queue = host->breq;
  769. host->gd->private_data = host;
  770. sprintf(host->gd->disk_name, MG_DISK_NAME"a");
  771. set_capacity(host->gd, host->n_sectors);
  772. add_disk(host->gd);
  773. return err;
  774. probe_err_7:
  775. del_timer_sync(&host->timer);
  776. probe_err_6:
  777. blk_cleanup_queue(host->breq);
  778. probe_err_5:
  779. unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
  780. probe_err_4:
  781. if (!prv_data->use_polling)
  782. free_irq(host->irq, host);
  783. probe_err_3b:
  784. gpio_free(host->rstout);
  785. probe_err_3a:
  786. gpio_free(host->rst);
  787. probe_err_3:
  788. iounmap(host->dev_base);
  789. probe_err_2:
  790. kfree(host);
  791. probe_err:
  792. return err;
  793. }
  794. static int mg_remove(struct platform_device *plat_dev)
  795. {
  796. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  797. struct mg_host *host = prv_data->host;
  798. int err = 0;
  799. /* delete timer */
  800. del_timer_sync(&host->timer);
  801. /* remove disk */
  802. if (host->gd) {
  803. del_gendisk(host->gd);
  804. put_disk(host->gd);
  805. }
  806. /* remove queue */
  807. if (host->breq)
  808. blk_cleanup_queue(host->breq);
  809. /* unregister blk device */
  810. unregister_blkdev(host->major, MG_DISK_NAME);
  811. /* free irq */
  812. if (!prv_data->use_polling)
  813. free_irq(host->irq, host);
  814. /* free reset-out pin */
  815. if (prv_data->dev_attr != MG_BOOT_DEV)
  816. gpio_free(host->rstout);
  817. /* free rst pin */
  818. if (host->rst)
  819. gpio_free(host->rst);
  820. /* unmap io */
  821. if (host->dev_base)
  822. iounmap(host->dev_base);
  823. /* free mg_host */
  824. kfree(host);
  825. return err;
  826. }
  827. static struct platform_driver mg_disk_driver = {
  828. .probe = mg_probe,
  829. .remove = mg_remove,
  830. .suspend = mg_suspend,
  831. .resume = mg_resume,
  832. .driver = {
  833. .name = MG_DEV_NAME,
  834. .owner = THIS_MODULE,
  835. }
  836. };
  837. /****************************************************************************
  838. *
  839. * Module stuff
  840. *
  841. ****************************************************************************/
  842. static int __init mg_init(void)
  843. {
  844. printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
  845. return platform_driver_register(&mg_disk_driver);
  846. }
  847. static void __exit mg_exit(void)
  848. {
  849. printk(KERN_INFO "mflash driver : bye bye\n");
  850. platform_driver_unregister(&mg_disk_driver);
  851. }
  852. module_init(mg_init);
  853. module_exit(mg_exit);
  854. MODULE_LICENSE("GPL");
  855. MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
  856. MODULE_DESCRIPTION("mGine m[g]flash device driver");