mg_disk.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * drivers/block/mg_disk.c
  3. *
  4. * Support for the mGine m[g]flash IO mode.
  5. * Based on legacy hd.c
  6. *
  7. * (c) 2008 mGine Co.,LTD
  8. * (c) 2008 unsik Kim <donari75@gmail.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/fs.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/hdreg.h>
  19. #include <linux/libata.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/delay.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/gpio.h>
  24. #include <linux/mg_disk.h>
  25. #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
  26. static void mg_request(struct request_queue *);
  27. static void mg_dump_status(const char *msg, unsigned int stat,
  28. struct mg_host *host)
  29. {
  30. char *name = MG_DISK_NAME;
  31. struct request *req;
  32. if (host->breq) {
  33. req = elv_next_request(host->breq);
  34. if (req)
  35. name = req->rq_disk->disk_name;
  36. }
  37. printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
  38. if (stat & MG_REG_STATUS_BIT_BUSY)
  39. printk("Busy ");
  40. if (stat & MG_REG_STATUS_BIT_READY)
  41. printk("DriveReady ");
  42. if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
  43. printk("WriteFault ");
  44. if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
  45. printk("SeekComplete ");
  46. if (stat & MG_REG_STATUS_BIT_DATA_REQ)
  47. printk("DataRequest ");
  48. if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
  49. printk("CorrectedError ");
  50. if (stat & MG_REG_STATUS_BIT_ERROR)
  51. printk("Error ");
  52. printk("}\n");
  53. if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
  54. host->error = 0;
  55. } else {
  56. host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
  57. printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
  58. host->error & 0xff);
  59. if (host->error & MG_REG_ERR_BBK)
  60. printk("BadSector ");
  61. if (host->error & MG_REG_ERR_UNC)
  62. printk("UncorrectableError ");
  63. if (host->error & MG_REG_ERR_IDNF)
  64. printk("SectorIdNotFound ");
  65. if (host->error & MG_REG_ERR_ABRT)
  66. printk("DriveStatusError ");
  67. if (host->error & MG_REG_ERR_AMNF)
  68. printk("AddrMarkNotFound ");
  69. printk("}");
  70. if (host->error &
  71. (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
  72. MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
  73. if (host->breq) {
  74. req = elv_next_request(host->breq);
  75. if (req)
  76. printk(", sector=%ld", req->sector);
  77. }
  78. }
  79. printk("\n");
  80. }
  81. }
  82. static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
  83. {
  84. u8 status;
  85. unsigned long expire, cur_jiffies;
  86. struct mg_drv_data *prv_data = host->dev->platform_data;
  87. host->error = MG_ERR_NONE;
  88. expire = jiffies + msecs_to_jiffies(msec);
  89. status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  90. do {
  91. cur_jiffies = jiffies;
  92. if (status & MG_REG_STATUS_BIT_BUSY) {
  93. if (expect == MG_REG_STATUS_BIT_BUSY)
  94. break;
  95. } else {
  96. /* Check the error condition! */
  97. if (status & MG_REG_STATUS_BIT_ERROR) {
  98. mg_dump_status("mg_wait", status, host);
  99. break;
  100. }
  101. if (expect == MG_STAT_READY)
  102. if (MG_READY_OK(status))
  103. break;
  104. if (expect == MG_REG_STATUS_BIT_DATA_REQ)
  105. if (status & MG_REG_STATUS_BIT_DATA_REQ)
  106. break;
  107. }
  108. if (!msec) {
  109. mg_dump_status("not ready", status, host);
  110. return MG_ERR_INV_STAT;
  111. }
  112. if (prv_data->use_polling)
  113. msleep(1);
  114. status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  115. } while (time_before(cur_jiffies, expire));
  116. if (time_after_eq(cur_jiffies, expire) && msec)
  117. host->error = MG_ERR_TIMEOUT;
  118. return host->error;
  119. }
  120. static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
  121. {
  122. unsigned long expire;
  123. expire = jiffies + msecs_to_jiffies(msec);
  124. while (time_before(jiffies, expire)) {
  125. if (gpio_get_value(rstout) == 1)
  126. return MG_ERR_NONE;
  127. msleep(10);
  128. }
  129. return MG_ERR_RSTOUT;
  130. }
  131. static void mg_unexpected_intr(struct mg_host *host)
  132. {
  133. u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  134. mg_dump_status("mg_unexpected_intr", status, host);
  135. }
  136. static irqreturn_t mg_irq(int irq, void *dev_id)
  137. {
  138. struct mg_host *host = dev_id;
  139. void (*handler)(struct mg_host *) = host->mg_do_intr;
  140. host->mg_do_intr = 0;
  141. del_timer(&host->timer);
  142. if (!handler)
  143. handler = mg_unexpected_intr;
  144. handler(host);
  145. return IRQ_HANDLED;
  146. }
  147. static int mg_get_disk_id(struct mg_host *host)
  148. {
  149. u32 i;
  150. s32 err;
  151. const u16 *id = host->id;
  152. struct mg_drv_data *prv_data = host->dev->platform_data;
  153. char fwrev[ATA_ID_FW_REV_LEN + 1];
  154. char model[ATA_ID_PROD_LEN + 1];
  155. char serial[ATA_ID_SERNO_LEN + 1];
  156. if (!prv_data->use_polling)
  157. outb(MG_REG_CTRL_INTR_DISABLE,
  158. (unsigned long)host->dev_base +
  159. MG_REG_DRV_CTRL);
  160. outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
  161. err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
  162. if (err)
  163. return err;
  164. for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
  165. host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
  166. MG_BUFF_OFFSET + i * 2));
  167. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  168. err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
  169. if (err)
  170. return err;
  171. if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
  172. return MG_ERR_TRANSLATION;
  173. host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
  174. host->cyls = id[ATA_ID_CYLS];
  175. host->heads = id[ATA_ID_HEADS];
  176. host->sectors = id[ATA_ID_SECTORS];
  177. if (MG_RES_SEC && host->heads && host->sectors) {
  178. /* modify cyls, n_sectors */
  179. host->cyls = (host->n_sectors - MG_RES_SEC) /
  180. host->heads / host->sectors;
  181. host->nres_sectors = host->n_sectors - host->cyls *
  182. host->heads * host->sectors;
  183. host->n_sectors -= host->nres_sectors;
  184. }
  185. ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
  186. ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
  187. ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
  188. printk(KERN_INFO "mg_disk: model: %s\n", model);
  189. printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
  190. printk(KERN_INFO "mg_disk: serial: %s\n", serial);
  191. printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
  192. host->n_sectors, host->nres_sectors);
  193. if (!prv_data->use_polling)
  194. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  195. MG_REG_DRV_CTRL);
  196. return err;
  197. }
  198. static int mg_disk_init(struct mg_host *host)
  199. {
  200. struct mg_drv_data *prv_data = host->dev->platform_data;
  201. s32 err;
  202. u8 init_status;
  203. /* hdd rst low */
  204. gpio_set_value(host->rst, 0);
  205. err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
  206. if (err)
  207. return err;
  208. /* hdd rst high */
  209. gpio_set_value(host->rst, 1);
  210. err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
  211. if (err)
  212. return err;
  213. /* soft reset on */
  214. outb(MG_REG_CTRL_RESET |
  215. (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
  216. MG_REG_CTRL_INTR_ENABLE),
  217. (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
  218. err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
  219. if (err)
  220. return err;
  221. /* soft reset off */
  222. outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
  223. MG_REG_CTRL_INTR_ENABLE,
  224. (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
  225. err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
  226. if (err)
  227. return err;
  228. init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
  229. if (init_status == 0xf)
  230. return MG_ERR_INIT_STAT;
  231. return err;
  232. }
  233. static void mg_bad_rw_intr(struct mg_host *host)
  234. {
  235. struct request *req = elv_next_request(host->breq);
  236. if (req != NULL)
  237. if (++req->errors >= MG_MAX_ERRORS ||
  238. host->error == MG_ERR_TIMEOUT)
  239. end_request(req, 0);
  240. }
  241. static unsigned int mg_out(struct mg_host *host,
  242. unsigned int sect_num,
  243. unsigned int sect_cnt,
  244. unsigned int cmd,
  245. void (*intr_addr)(struct mg_host *))
  246. {
  247. struct mg_drv_data *prv_data = host->dev->platform_data;
  248. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  249. return host->error;
  250. if (!prv_data->use_polling) {
  251. host->mg_do_intr = intr_addr;
  252. mod_timer(&host->timer, jiffies + 3 * HZ);
  253. }
  254. if (MG_RES_SEC)
  255. sect_num += MG_RES_SEC;
  256. outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
  257. outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
  258. outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
  259. MG_REG_CYL_LOW);
  260. outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
  261. MG_REG_CYL_HIGH);
  262. outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
  263. (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
  264. outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
  265. return MG_ERR_NONE;
  266. }
  267. static void mg_read(struct request *req)
  268. {
  269. u32 remains, j;
  270. struct mg_host *host = req->rq_disk->private_data;
  271. remains = req->nr_sectors;
  272. if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, 0) !=
  273. MG_ERR_NONE)
  274. mg_bad_rw_intr(host);
  275. MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
  276. remains, req->sector, req->buffer);
  277. while (remains) {
  278. if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
  279. MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
  280. mg_bad_rw_intr(host);
  281. return;
  282. }
  283. for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
  284. *(u16 *)req->buffer =
  285. inw((unsigned long)host->dev_base +
  286. MG_BUFF_OFFSET + (j << 1));
  287. req->buffer += 2;
  288. }
  289. req->sector++;
  290. req->errors = 0;
  291. remains = --req->nr_sectors;
  292. --req->current_nr_sectors;
  293. if (req->current_nr_sectors <= 0) {
  294. MG_DBG("remain : %d sects\n", remains);
  295. end_request(req, 1);
  296. if (remains > 0)
  297. req = elv_next_request(host->breq);
  298. }
  299. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
  300. MG_REG_COMMAND);
  301. }
  302. }
  303. static void mg_write(struct request *req)
  304. {
  305. u32 remains, j;
  306. struct mg_host *host = req->rq_disk->private_data;
  307. remains = req->nr_sectors;
  308. if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, 0) !=
  309. MG_ERR_NONE) {
  310. mg_bad_rw_intr(host);
  311. return;
  312. }
  313. MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
  314. remains, req->sector, req->buffer);
  315. while (remains) {
  316. if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
  317. MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
  318. mg_bad_rw_intr(host);
  319. return;
  320. }
  321. for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
  322. outw(*(u16 *)req->buffer,
  323. (unsigned long)host->dev_base +
  324. MG_BUFF_OFFSET + (j << 1));
  325. req->buffer += 2;
  326. }
  327. req->sector++;
  328. remains = --req->nr_sectors;
  329. --req->current_nr_sectors;
  330. if (req->current_nr_sectors <= 0) {
  331. MG_DBG("remain : %d sects\n", remains);
  332. end_request(req, 1);
  333. if (remains > 0)
  334. req = elv_next_request(host->breq);
  335. }
  336. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
  337. MG_REG_COMMAND);
  338. }
  339. }
  340. static void mg_read_intr(struct mg_host *host)
  341. {
  342. u32 i;
  343. struct request *req;
  344. /* check status */
  345. do {
  346. i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  347. if (i & MG_REG_STATUS_BIT_BUSY)
  348. break;
  349. if (!MG_READY_OK(i))
  350. break;
  351. if (i & MG_REG_STATUS_BIT_DATA_REQ)
  352. goto ok_to_read;
  353. } while (0);
  354. mg_dump_status("mg_read_intr", i, host);
  355. mg_bad_rw_intr(host);
  356. mg_request(host->breq);
  357. return;
  358. ok_to_read:
  359. /* get current segment of request */
  360. req = elv_next_request(host->breq);
  361. /* read 1 sector */
  362. for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
  363. *(u16 *)req->buffer =
  364. inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
  365. (i << 1));
  366. req->buffer += 2;
  367. }
  368. /* manipulate request */
  369. MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
  370. req->sector, req->nr_sectors - 1, req->buffer);
  371. req->sector++;
  372. req->errors = 0;
  373. i = --req->nr_sectors;
  374. --req->current_nr_sectors;
  375. /* let know if current segment done */
  376. if (req->current_nr_sectors <= 0)
  377. end_request(req, 1);
  378. /* set handler if read remains */
  379. if (i > 0) {
  380. host->mg_do_intr = mg_read_intr;
  381. mod_timer(&host->timer, jiffies + 3 * HZ);
  382. }
  383. /* send read confirm */
  384. outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  385. /* goto next request */
  386. if (!i)
  387. mg_request(host->breq);
  388. }
  389. static void mg_write_intr(struct mg_host *host)
  390. {
  391. u32 i, j;
  392. u16 *buff;
  393. struct request *req;
  394. /* get current segment of request */
  395. req = elv_next_request(host->breq);
  396. /* check status */
  397. do {
  398. i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
  399. if (i & MG_REG_STATUS_BIT_BUSY)
  400. break;
  401. if (!MG_READY_OK(i))
  402. break;
  403. if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
  404. goto ok_to_write;
  405. } while (0);
  406. mg_dump_status("mg_write_intr", i, host);
  407. mg_bad_rw_intr(host);
  408. mg_request(host->breq);
  409. return;
  410. ok_to_write:
  411. /* manipulate request */
  412. req->sector++;
  413. i = --req->nr_sectors;
  414. --req->current_nr_sectors;
  415. req->buffer += MG_SECTOR_SIZE;
  416. /* let know if current segment or all done */
  417. if (!i || (req->bio && req->current_nr_sectors <= 0))
  418. end_request(req, 1);
  419. /* write 1 sector and set handler if remains */
  420. if (i > 0) {
  421. buff = (u16 *)req->buffer;
  422. for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
  423. outw(*buff, (unsigned long)host->dev_base +
  424. MG_BUFF_OFFSET + (j << 1));
  425. buff++;
  426. }
  427. MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
  428. req->sector, req->nr_sectors, req->buffer);
  429. host->mg_do_intr = mg_write_intr;
  430. mod_timer(&host->timer, jiffies + 3 * HZ);
  431. }
  432. /* send write confirm */
  433. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
  434. if (!i)
  435. mg_request(host->breq);
  436. }
  437. void mg_times_out(unsigned long data)
  438. {
  439. struct mg_host *host = (struct mg_host *)data;
  440. char *name;
  441. struct request *req;
  442. req = elv_next_request(host->breq);
  443. if (!req)
  444. return;
  445. host->mg_do_intr = NULL;
  446. name = req->rq_disk->disk_name;
  447. printk(KERN_DEBUG "%s: timeout\n", name);
  448. host->error = MG_ERR_TIMEOUT;
  449. mg_bad_rw_intr(host);
  450. mg_request(host->breq);
  451. }
  452. static void mg_request_poll(struct request_queue *q)
  453. {
  454. struct request *req;
  455. struct mg_host *host;
  456. while ((req = elv_next_request(q)) != NULL) {
  457. host = req->rq_disk->private_data;
  458. if (blk_fs_request(req)) {
  459. switch (rq_data_dir(req)) {
  460. case READ:
  461. mg_read(req);
  462. break;
  463. case WRITE:
  464. mg_write(req);
  465. break;
  466. default:
  467. printk(KERN_WARNING "%s:%d unknown command\n",
  468. __func__, __LINE__);
  469. end_request(req, 0);
  470. break;
  471. }
  472. }
  473. }
  474. }
  475. static unsigned int mg_issue_req(struct request *req,
  476. struct mg_host *host,
  477. unsigned int sect_num,
  478. unsigned int sect_cnt)
  479. {
  480. u16 *buff;
  481. u32 i;
  482. switch (rq_data_dir(req)) {
  483. case READ:
  484. if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
  485. != MG_ERR_NONE) {
  486. mg_bad_rw_intr(host);
  487. return host->error;
  488. }
  489. break;
  490. case WRITE:
  491. /* TODO : handler */
  492. outb(MG_REG_CTRL_INTR_DISABLE,
  493. (unsigned long)host->dev_base +
  494. MG_REG_DRV_CTRL);
  495. if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
  496. != MG_ERR_NONE) {
  497. mg_bad_rw_intr(host);
  498. return host->error;
  499. }
  500. del_timer(&host->timer);
  501. mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
  502. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  503. MG_REG_DRV_CTRL);
  504. if (host->error) {
  505. mg_bad_rw_intr(host);
  506. return host->error;
  507. }
  508. buff = (u16 *)req->buffer;
  509. for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
  510. outw(*buff, (unsigned long)host->dev_base +
  511. MG_BUFF_OFFSET + (i << 1));
  512. buff++;
  513. }
  514. mod_timer(&host->timer, jiffies + 3 * HZ);
  515. outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
  516. MG_REG_COMMAND);
  517. break;
  518. default:
  519. printk(KERN_WARNING "%s:%d unknown command\n",
  520. __func__, __LINE__);
  521. end_request(req, 0);
  522. break;
  523. }
  524. return MG_ERR_NONE;
  525. }
  526. /* This function also called from IRQ context */
  527. static void mg_request(struct request_queue *q)
  528. {
  529. struct request *req;
  530. struct mg_host *host;
  531. u32 sect_num, sect_cnt;
  532. while (1) {
  533. req = elv_next_request(q);
  534. if (!req)
  535. return;
  536. host = req->rq_disk->private_data;
  537. /* check unwanted request call */
  538. if (host->mg_do_intr)
  539. return;
  540. del_timer(&host->timer);
  541. sect_num = req->sector;
  542. /* deal whole segments */
  543. sect_cnt = req->nr_sectors;
  544. /* sanity check */
  545. if (sect_num >= get_capacity(req->rq_disk) ||
  546. ((sect_num + sect_cnt) >
  547. get_capacity(req->rq_disk))) {
  548. printk(KERN_WARNING
  549. "%s: bad access: sector=%d, count=%d\n",
  550. req->rq_disk->disk_name,
  551. sect_num, sect_cnt);
  552. end_request(req, 0);
  553. continue;
  554. }
  555. if (!blk_fs_request(req))
  556. return;
  557. if (!mg_issue_req(req, host, sect_num, sect_cnt))
  558. return;
  559. }
  560. }
  561. static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  562. {
  563. struct mg_host *host = bdev->bd_disk->private_data;
  564. geo->cylinders = (unsigned short)host->cyls;
  565. geo->heads = (unsigned char)host->heads;
  566. geo->sectors = (unsigned char)host->sectors;
  567. return 0;
  568. }
  569. static struct block_device_operations mg_disk_ops = {
  570. .getgeo = mg_getgeo
  571. };
  572. static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
  573. {
  574. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  575. struct mg_host *host = prv_data->host;
  576. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  577. return -EIO;
  578. if (!prv_data->use_polling)
  579. outb(MG_REG_CTRL_INTR_DISABLE,
  580. (unsigned long)host->dev_base +
  581. MG_REG_DRV_CTRL);
  582. outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
  583. /* wait until mflash deep sleep */
  584. msleep(1);
  585. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
  586. if (!prv_data->use_polling)
  587. outb(MG_REG_CTRL_INTR_ENABLE,
  588. (unsigned long)host->dev_base +
  589. MG_REG_DRV_CTRL);
  590. return -EIO;
  591. }
  592. return 0;
  593. }
  594. static int mg_resume(struct platform_device *plat_dev)
  595. {
  596. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  597. struct mg_host *host = prv_data->host;
  598. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  599. return -EIO;
  600. outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
  601. /* wait until mflash wakeup */
  602. msleep(1);
  603. if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
  604. return -EIO;
  605. if (!prv_data->use_polling)
  606. outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
  607. MG_REG_DRV_CTRL);
  608. return 0;
  609. }
  610. static int mg_probe(struct platform_device *plat_dev)
  611. {
  612. struct mg_host *host;
  613. struct resource *rsc;
  614. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  615. int err = 0;
  616. if (!prv_data) {
  617. printk(KERN_ERR "%s:%d fail (no driver_data)\n",
  618. __func__, __LINE__);
  619. err = -EINVAL;
  620. goto probe_err;
  621. }
  622. /* alloc mg_host */
  623. host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
  624. if (!host) {
  625. printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
  626. __func__, __LINE__);
  627. err = -ENOMEM;
  628. goto probe_err;
  629. }
  630. host->major = MG_DISK_MAJ;
  631. /* link each other */
  632. prv_data->host = host;
  633. host->dev = &plat_dev->dev;
  634. /* io remap */
  635. rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
  636. if (!rsc) {
  637. printk(KERN_ERR "%s:%d platform_get_resource fail\n",
  638. __func__, __LINE__);
  639. err = -EINVAL;
  640. goto probe_err_2;
  641. }
  642. host->dev_base = ioremap(rsc->start , rsc->end + 1);
  643. if (!host->dev_base) {
  644. printk(KERN_ERR "%s:%d ioremap fail\n",
  645. __func__, __LINE__);
  646. err = -EIO;
  647. goto probe_err_2;
  648. }
  649. MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
  650. /* get reset pin */
  651. rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
  652. MG_RST_PIN);
  653. if (!rsc) {
  654. printk(KERN_ERR "%s:%d get reset pin fail\n",
  655. __func__, __LINE__);
  656. err = -EIO;
  657. goto probe_err_3;
  658. }
  659. host->rst = rsc->start;
  660. /* init rst pin */
  661. err = gpio_request(host->rst, MG_RST_PIN);
  662. if (err)
  663. goto probe_err_3;
  664. gpio_direction_output(host->rst, 1);
  665. /* reset out pin */
  666. if (!(prv_data->dev_attr & MG_DEV_MASK))
  667. goto probe_err_3a;
  668. if (prv_data->dev_attr != MG_BOOT_DEV) {
  669. rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
  670. MG_RSTOUT_PIN);
  671. if (!rsc) {
  672. printk(KERN_ERR "%s:%d get reset-out pin fail\n",
  673. __func__, __LINE__);
  674. err = -EIO;
  675. goto probe_err_3a;
  676. }
  677. host->rstout = rsc->start;
  678. err = gpio_request(host->rstout, MG_RSTOUT_PIN);
  679. if (err)
  680. goto probe_err_3a;
  681. gpio_direction_input(host->rstout);
  682. }
  683. /* disk reset */
  684. if (prv_data->dev_attr == MG_STORAGE_DEV) {
  685. /* If POR seq. not yet finised, wait */
  686. err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
  687. if (err)
  688. goto probe_err_3b;
  689. err = mg_disk_init(host);
  690. if (err) {
  691. printk(KERN_ERR "%s:%d fail (err code : %d)\n",
  692. __func__, __LINE__, err);
  693. err = -EIO;
  694. goto probe_err_3b;
  695. }
  696. }
  697. /* get irq resource */
  698. if (!prv_data->use_polling) {
  699. host->irq = platform_get_irq(plat_dev, 0);
  700. if (host->irq == -ENXIO) {
  701. err = host->irq;
  702. goto probe_err_3b;
  703. }
  704. err = request_irq(host->irq, mg_irq,
  705. IRQF_DISABLED | IRQF_TRIGGER_RISING,
  706. MG_DEV_NAME, host);
  707. if (err) {
  708. printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
  709. __func__, __LINE__, err);
  710. goto probe_err_3b;
  711. }
  712. }
  713. /* get disk id */
  714. err = mg_get_disk_id(host);
  715. if (err) {
  716. printk(KERN_ERR "%s:%d fail (err code : %d)\n",
  717. __func__, __LINE__, err);
  718. err = -EIO;
  719. goto probe_err_4;
  720. }
  721. err = register_blkdev(host->major, MG_DISK_NAME);
  722. if (err < 0) {
  723. printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
  724. __func__, __LINE__, err);
  725. goto probe_err_4;
  726. }
  727. if (!host->major)
  728. host->major = err;
  729. spin_lock_init(&host->lock);
  730. if (prv_data->use_polling)
  731. host->breq = blk_init_queue(mg_request_poll, &host->lock);
  732. else
  733. host->breq = blk_init_queue(mg_request, &host->lock);
  734. if (!host->breq) {
  735. err = -ENOMEM;
  736. printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
  737. __func__, __LINE__);
  738. goto probe_err_5;
  739. }
  740. /* mflash is random device, thanx for the noop */
  741. elevator_exit(host->breq->elevator);
  742. err = elevator_init(host->breq, "noop");
  743. if (err) {
  744. printk(KERN_ERR "%s:%d (elevator_init) fail\n",
  745. __func__, __LINE__);
  746. goto probe_err_6;
  747. }
  748. blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
  749. blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
  750. init_timer(&host->timer);
  751. host->timer.function = mg_times_out;
  752. host->timer.data = (unsigned long)host;
  753. host->gd = alloc_disk(MG_DISK_MAX_PART);
  754. if (!host->gd) {
  755. printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
  756. __func__, __LINE__);
  757. err = -ENOMEM;
  758. goto probe_err_7;
  759. }
  760. host->gd->major = host->major;
  761. host->gd->first_minor = 0;
  762. host->gd->fops = &mg_disk_ops;
  763. host->gd->queue = host->breq;
  764. host->gd->private_data = host;
  765. sprintf(host->gd->disk_name, MG_DISK_NAME"a");
  766. set_capacity(host->gd, host->n_sectors);
  767. add_disk(host->gd);
  768. return err;
  769. probe_err_7:
  770. del_timer_sync(&host->timer);
  771. probe_err_6:
  772. blk_cleanup_queue(host->breq);
  773. probe_err_5:
  774. unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
  775. probe_err_4:
  776. if (!prv_data->use_polling)
  777. free_irq(host->irq, host);
  778. probe_err_3b:
  779. gpio_free(host->rstout);
  780. probe_err_3a:
  781. gpio_free(host->rst);
  782. probe_err_3:
  783. iounmap(host->dev_base);
  784. probe_err_2:
  785. kfree(host);
  786. probe_err:
  787. return err;
  788. }
  789. static int mg_remove(struct platform_device *plat_dev)
  790. {
  791. struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
  792. struct mg_host *host = prv_data->host;
  793. int err = 0;
  794. /* delete timer */
  795. del_timer_sync(&host->timer);
  796. /* remove disk */
  797. if (host->gd) {
  798. del_gendisk(host->gd);
  799. put_disk(host->gd);
  800. }
  801. /* remove queue */
  802. if (host->breq)
  803. blk_cleanup_queue(host->breq);
  804. /* unregister blk device */
  805. unregister_blkdev(host->major, MG_DISK_NAME);
  806. /* free irq */
  807. if (!prv_data->use_polling)
  808. free_irq(host->irq, host);
  809. /* free reset-out pin */
  810. if (prv_data->dev_attr != MG_BOOT_DEV)
  811. gpio_free(host->rstout);
  812. /* free rst pin */
  813. if (host->rst)
  814. gpio_free(host->rst);
  815. /* unmap io */
  816. if (host->dev_base)
  817. iounmap(host->dev_base);
  818. /* free mg_host */
  819. kfree(host);
  820. return err;
  821. }
  822. static struct platform_driver mg_disk_driver = {
  823. .probe = mg_probe,
  824. .remove = mg_remove,
  825. .suspend = mg_suspend,
  826. .resume = mg_resume,
  827. .driver = {
  828. .name = MG_DEV_NAME,
  829. .owner = THIS_MODULE,
  830. }
  831. };
  832. /****************************************************************************
  833. *
  834. * Module stuff
  835. *
  836. ****************************************************************************/
  837. static int __init mg_init(void)
  838. {
  839. printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
  840. return platform_driver_register(&mg_disk_driver);
  841. }
  842. static void __exit mg_exit(void)
  843. {
  844. printk(KERN_INFO "mflash driver : bye bye\n");
  845. platform_driver_unregister(&mg_disk_driver);
  846. }
  847. module_init(mg_init);
  848. module_exit(mg_exit);
  849. MODULE_LICENSE("GPL");
  850. MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
  851. MODULE_DESCRIPTION("mGine m[g]flash device driver");