pata_octeon_cf.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. * Driver for the Octeon bootbus compact flash.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2005 - 2009 Cavium Networks
  9. * Copyright (C) 2008 Wind River Systems
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/libata.h>
  14. #include <linux/irq.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/workqueue.h>
  17. #include <scsi/scsi_host.h>
  18. #include <asm/octeon/octeon.h>
  19. /*
  20. * The Octeon bootbus compact flash interface is connected in at least
  21. * 3 different configurations on various evaluation boards:
  22. *
  23. * -- 8 bits no irq, no DMA
  24. * -- 16 bits no irq, no DMA
  25. * -- 16 bits True IDE mode with DMA, but no irq.
  26. *
  27. * In the last case the DMA engine can generate an interrupt when the
  28. * transfer is complete. For the first two cases only PIO is supported.
  29. *
  30. */
  31. #define DRV_NAME "pata_octeon_cf"
  32. #define DRV_VERSION "2.1"
  33. struct octeon_cf_port {
  34. struct workqueue_struct *wq;
  35. struct delayed_work delayed_finish;
  36. struct ata_port *ap;
  37. int dma_finished;
  38. };
  39. static struct scsi_host_template octeon_cf_sht = {
  40. ATA_PIO_SHT(DRV_NAME),
  41. };
  42. /**
  43. * Convert nanosecond based time to setting used in the
  44. * boot bus timing register, based on timing multiple
  45. */
  46. static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
  47. {
  48. unsigned int val;
  49. /*
  50. * Compute # of eclock periods to get desired duration in
  51. * nanoseconds.
  52. */
  53. val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
  54. 1000 * tim_mult);
  55. return val;
  56. }
  57. static void octeon_cf_set_boot_reg_cfg(int cs)
  58. {
  59. union cvmx_mio_boot_reg_cfgx reg_cfg;
  60. reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
  61. reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
  62. reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
  63. reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
  64. reg_cfg.s.sam = 0; /* Don't combine write and output enable */
  65. reg_cfg.s.we_ext = 0; /* No write enable extension */
  66. reg_cfg.s.oe_ext = 0; /* No read enable extension */
  67. reg_cfg.s.en = 1; /* Enable this region */
  68. reg_cfg.s.orbit = 0; /* Don't combine with previous region */
  69. reg_cfg.s.ale = 0; /* Don't do address multiplexing */
  70. cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
  71. }
  72. /**
  73. * Called after libata determines the needed PIO mode. This
  74. * function programs the Octeon bootbus regions to support the
  75. * timing requirements of the PIO mode.
  76. *
  77. * @ap: ATA port information
  78. * @dev: ATA device
  79. */
  80. static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
  81. {
  82. struct octeon_cf_data *ocd = ap->dev->platform_data;
  83. union cvmx_mio_boot_reg_timx reg_tim;
  84. int cs = ocd->base_region;
  85. int T;
  86. struct ata_timing timing;
  87. int use_iordy;
  88. int trh;
  89. int pause;
  90. /* These names are timing parameters from the ATA spec */
  91. int t1;
  92. int t2;
  93. int t2i;
  94. T = (int)(2000000000000LL / octeon_get_clock_rate());
  95. if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
  96. BUG();
  97. t1 = timing.setup;
  98. if (t1)
  99. t1--;
  100. t2 = timing.active;
  101. if (t2)
  102. t2--;
  103. t2i = timing.act8b;
  104. if (t2i)
  105. t2i--;
  106. trh = ns_to_tim_reg(2, 20);
  107. if (trh)
  108. trh--;
  109. pause = timing.cycle - timing.active - timing.setup - trh;
  110. if (pause)
  111. pause--;
  112. octeon_cf_set_boot_reg_cfg(cs);
  113. if (ocd->dma_engine >= 0)
  114. /* True IDE mode, program both chip selects. */
  115. octeon_cf_set_boot_reg_cfg(cs + 1);
  116. use_iordy = ata_pio_need_iordy(dev);
  117. reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
  118. /* Disable page mode */
  119. reg_tim.s.pagem = 0;
  120. /* Enable dynamic timing */
  121. reg_tim.s.waitm = use_iordy;
  122. /* Pages are disabled */
  123. reg_tim.s.pages = 0;
  124. /* We don't use multiplexed address mode */
  125. reg_tim.s.ale = 0;
  126. /* Not used */
  127. reg_tim.s.page = 0;
  128. /* Time after IORDY to coninue to assert the data */
  129. reg_tim.s.wait = 0;
  130. /* Time to wait to complete the cycle. */
  131. reg_tim.s.pause = pause;
  132. /* How long to hold after a write to de-assert CE. */
  133. reg_tim.s.wr_hld = trh;
  134. /* How long to wait after a read to de-assert CE. */
  135. reg_tim.s.rd_hld = trh;
  136. /* How long write enable is asserted */
  137. reg_tim.s.we = t2;
  138. /* How long read enable is asserted */
  139. reg_tim.s.oe = t2;
  140. /* Time after CE that read/write starts */
  141. reg_tim.s.ce = ns_to_tim_reg(2, 5);
  142. /* Time before CE that address is valid */
  143. reg_tim.s.adr = 0;
  144. /* Program the bootbus region timing for the data port chip select. */
  145. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
  146. if (ocd->dma_engine >= 0)
  147. /* True IDE mode, program both chip selects. */
  148. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
  149. }
  150. static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
  151. {
  152. struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
  153. union cvmx_mio_boot_dma_timx dma_tim;
  154. unsigned int oe_a;
  155. unsigned int oe_n;
  156. unsigned int dma_ackh;
  157. unsigned int dma_arq;
  158. unsigned int pause;
  159. unsigned int T0, Tkr, Td;
  160. unsigned int tim_mult;
  161. const struct ata_timing *timing;
  162. timing = ata_timing_find_mode(dev->dma_mode);
  163. T0 = timing->cycle;
  164. Td = timing->active;
  165. Tkr = timing->recover;
  166. dma_ackh = timing->dmack_hold;
  167. dma_tim.u64 = 0;
  168. /* dma_tim.s.tim_mult = 0 --> 4x */
  169. tim_mult = 4;
  170. /* not spec'ed, value in eclocks, not affected by tim_mult */
  171. dma_arq = 8;
  172. pause = 25 - dma_arq * 1000 /
  173. (octeon_get_clock_rate() / 1000000); /* Tz */
  174. oe_a = Td;
  175. /* Tkr from cf spec, lengthened to meet T0 */
  176. oe_n = max(T0 - oe_a, Tkr);
  177. dma_tim.s.dmack_pi = 1;
  178. dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
  179. dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
  180. /*
  181. * This is tI, C.F. spec. says 0, but Sony CF card requires
  182. * more, we use 20 nS.
  183. */
  184. dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
  185. dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
  186. dma_tim.s.dmarq = dma_arq;
  187. dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
  188. dma_tim.s.rd_dly = 0; /* Sample right on edge */
  189. /* writes only */
  190. dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
  191. dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
  192. pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
  193. ns_to_tim_reg(tim_mult, 60));
  194. pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
  195. "%d, dmarq: %d, pause: %d\n",
  196. dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
  197. dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
  198. cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
  199. dma_tim.u64);
  200. }
  201. /**
  202. * Handle an 8 bit I/O request.
  203. *
  204. * @dev: Device to access
  205. * @buffer: Data buffer
  206. * @buflen: Length of the buffer.
  207. * @rw: True to write.
  208. */
  209. static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
  210. unsigned char *buffer,
  211. unsigned int buflen,
  212. int rw)
  213. {
  214. struct ata_port *ap = dev->link->ap;
  215. void __iomem *data_addr = ap->ioaddr.data_addr;
  216. unsigned long words;
  217. int count;
  218. words = buflen;
  219. if (rw) {
  220. count = 16;
  221. while (words--) {
  222. iowrite8(*buffer, data_addr);
  223. buffer++;
  224. /*
  225. * Every 16 writes do a read so the bootbus
  226. * FIFO doesn't fill up.
  227. */
  228. if (--count == 0) {
  229. ioread8(ap->ioaddr.altstatus_addr);
  230. count = 16;
  231. }
  232. }
  233. } else {
  234. ioread8_rep(data_addr, buffer, words);
  235. }
  236. return buflen;
  237. }
  238. /**
  239. * Handle a 16 bit I/O request.
  240. *
  241. * @dev: Device to access
  242. * @buffer: Data buffer
  243. * @buflen: Length of the buffer.
  244. * @rw: True to write.
  245. */
  246. static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
  247. unsigned char *buffer,
  248. unsigned int buflen,
  249. int rw)
  250. {
  251. struct ata_port *ap = dev->link->ap;
  252. void __iomem *data_addr = ap->ioaddr.data_addr;
  253. unsigned long words;
  254. int count;
  255. words = buflen / 2;
  256. if (rw) {
  257. count = 16;
  258. while (words--) {
  259. iowrite16(*(uint16_t *)buffer, data_addr);
  260. buffer += sizeof(uint16_t);
  261. /*
  262. * Every 16 writes do a read so the bootbus
  263. * FIFO doesn't fill up.
  264. */
  265. if (--count == 0) {
  266. ioread8(ap->ioaddr.altstatus_addr);
  267. count = 16;
  268. }
  269. }
  270. } else {
  271. while (words--) {
  272. *(uint16_t *)buffer = ioread16(data_addr);
  273. buffer += sizeof(uint16_t);
  274. }
  275. }
  276. /* Transfer trailing 1 byte, if any. */
  277. if (unlikely(buflen & 0x01)) {
  278. __le16 align_buf[1] = { 0 };
  279. if (rw == READ) {
  280. align_buf[0] = cpu_to_le16(ioread16(data_addr));
  281. memcpy(buffer, align_buf, 1);
  282. } else {
  283. memcpy(align_buf, buffer, 1);
  284. iowrite16(le16_to_cpu(align_buf[0]), data_addr);
  285. }
  286. words++;
  287. }
  288. return buflen;
  289. }
  290. /**
  291. * Read the taskfile for 16bit non-True IDE only.
  292. */
  293. static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
  294. {
  295. u16 blob;
  296. /* The base of the registers is at ioaddr.data_addr. */
  297. void __iomem *base = ap->ioaddr.data_addr;
  298. blob = __raw_readw(base + 0xc);
  299. tf->feature = blob >> 8;
  300. blob = __raw_readw(base + 2);
  301. tf->nsect = blob & 0xff;
  302. tf->lbal = blob >> 8;
  303. blob = __raw_readw(base + 4);
  304. tf->lbam = blob & 0xff;
  305. tf->lbah = blob >> 8;
  306. blob = __raw_readw(base + 6);
  307. tf->device = blob & 0xff;
  308. tf->command = blob >> 8;
  309. if (tf->flags & ATA_TFLAG_LBA48) {
  310. if (likely(ap->ioaddr.ctl_addr)) {
  311. iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
  312. blob = __raw_readw(base + 0xc);
  313. tf->hob_feature = blob >> 8;
  314. blob = __raw_readw(base + 2);
  315. tf->hob_nsect = blob & 0xff;
  316. tf->hob_lbal = blob >> 8;
  317. blob = __raw_readw(base + 4);
  318. tf->hob_lbam = blob & 0xff;
  319. tf->hob_lbah = blob >> 8;
  320. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  321. ap->last_ctl = tf->ctl;
  322. } else {
  323. WARN_ON(1);
  324. }
  325. }
  326. }
  327. static u8 octeon_cf_check_status16(struct ata_port *ap)
  328. {
  329. u16 blob;
  330. void __iomem *base = ap->ioaddr.data_addr;
  331. blob = __raw_readw(base + 6);
  332. return blob >> 8;
  333. }
  334. static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
  335. unsigned long deadline)
  336. {
  337. struct ata_port *ap = link->ap;
  338. void __iomem *base = ap->ioaddr.data_addr;
  339. int rc;
  340. u8 err;
  341. DPRINTK("about to softreset\n");
  342. __raw_writew(ap->ctl, base + 0xe);
  343. udelay(20);
  344. __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
  345. udelay(20);
  346. __raw_writew(ap->ctl, base + 0xe);
  347. rc = ata_sff_wait_after_reset(link, 1, deadline);
  348. if (rc) {
  349. ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
  350. return rc;
  351. }
  352. /* determine by signature whether we have ATA or ATAPI devices */
  353. classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
  354. DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
  355. return 0;
  356. }
  357. /**
  358. * Load the taskfile for 16bit non-True IDE only. The device_addr is
  359. * not loaded, we do this as part of octeon_cf_exec_command16.
  360. */
  361. static void octeon_cf_tf_load16(struct ata_port *ap,
  362. const struct ata_taskfile *tf)
  363. {
  364. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  365. /* The base of the registers is at ioaddr.data_addr. */
  366. void __iomem *base = ap->ioaddr.data_addr;
  367. if (tf->ctl != ap->last_ctl) {
  368. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  369. ap->last_ctl = tf->ctl;
  370. ata_wait_idle(ap);
  371. }
  372. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  373. __raw_writew(tf->hob_feature << 8, base + 0xc);
  374. __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
  375. __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
  376. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  377. tf->hob_feature,
  378. tf->hob_nsect,
  379. tf->hob_lbal,
  380. tf->hob_lbam,
  381. tf->hob_lbah);
  382. }
  383. if (is_addr) {
  384. __raw_writew(tf->feature << 8, base + 0xc);
  385. __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
  386. __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
  387. VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  388. tf->feature,
  389. tf->nsect,
  390. tf->lbal,
  391. tf->lbam,
  392. tf->lbah);
  393. }
  394. ata_wait_idle(ap);
  395. }
  396. static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
  397. {
  398. /* There is only one device, do nothing. */
  399. return;
  400. }
  401. /*
  402. * Issue ATA command to host controller. The device_addr is also sent
  403. * as it must be written in a combined write with the command.
  404. */
  405. static void octeon_cf_exec_command16(struct ata_port *ap,
  406. const struct ata_taskfile *tf)
  407. {
  408. /* The base of the registers is at ioaddr.data_addr. */
  409. void __iomem *base = ap->ioaddr.data_addr;
  410. u16 blob;
  411. if (tf->flags & ATA_TFLAG_DEVICE) {
  412. VPRINTK("device 0x%X\n", tf->device);
  413. blob = tf->device;
  414. } else {
  415. blob = 0;
  416. }
  417. DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
  418. blob |= (tf->command << 8);
  419. __raw_writew(blob, base + 6);
  420. ata_wait_idle(ap);
  421. }
  422. static u8 octeon_cf_irq_on(struct ata_port *ap)
  423. {
  424. return 0;
  425. }
  426. static void octeon_cf_irq_clear(struct ata_port *ap)
  427. {
  428. return;
  429. }
  430. static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
  431. {
  432. struct ata_port *ap = qc->ap;
  433. struct octeon_cf_port *cf_port;
  434. cf_port = ap->private_data;
  435. DPRINTK("ENTER\n");
  436. /* issue r/w command */
  437. qc->cursg = qc->sg;
  438. cf_port->dma_finished = 0;
  439. ap->ops->sff_exec_command(ap, &qc->tf);
  440. DPRINTK("EXIT\n");
  441. }
  442. /**
  443. * Start a DMA transfer that was already setup
  444. *
  445. * @qc: Information about the DMA
  446. */
  447. static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
  448. {
  449. struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
  450. union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
  451. union cvmx_mio_boot_dma_intx mio_boot_dma_int;
  452. struct scatterlist *sg;
  453. VPRINTK("%d scatterlists\n", qc->n_elem);
  454. /* Get the scatter list entry we need to DMA into */
  455. sg = qc->cursg;
  456. BUG_ON(!sg);
  457. /*
  458. * Clear the DMA complete status.
  459. */
  460. mio_boot_dma_int.u64 = 0;
  461. mio_boot_dma_int.s.done = 1;
  462. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
  463. mio_boot_dma_int.u64);
  464. /* Enable the interrupt. */
  465. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
  466. mio_boot_dma_int.u64);
  467. /* Set the direction of the DMA */
  468. mio_boot_dma_cfg.u64 = 0;
  469. mio_boot_dma_cfg.s.en = 1;
  470. mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
  471. /*
  472. * Don't stop the DMA if the device deasserts DMARQ. Many
  473. * compact flashes deassert DMARQ for a short time between
  474. * sectors. Instead of stopping and restarting the DMA, we'll
  475. * let the hardware do it. If the DMA is really stopped early
  476. * due to an error condition, a later timeout will force us to
  477. * stop.
  478. */
  479. mio_boot_dma_cfg.s.clr = 0;
  480. /* Size is specified in 16bit words and minus one notation */
  481. mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
  482. /* We need to swap the high and low bytes of every 16 bits */
  483. mio_boot_dma_cfg.s.swap8 = 1;
  484. mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
  485. VPRINTK("%s %d bytes address=%p\n",
  486. (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
  487. (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
  488. cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
  489. mio_boot_dma_cfg.u64);
  490. }
  491. /**
  492. *
  493. * LOCKING:
  494. * spin_lock_irqsave(host lock)
  495. *
  496. */
  497. static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
  498. struct ata_queued_cmd *qc)
  499. {
  500. struct ata_eh_info *ehi = &ap->link.eh_info;
  501. struct octeon_cf_data *ocd = ap->dev->platform_data;
  502. union cvmx_mio_boot_dma_cfgx dma_cfg;
  503. union cvmx_mio_boot_dma_intx dma_int;
  504. struct octeon_cf_port *cf_port;
  505. u8 status;
  506. VPRINTK("ata%u: protocol %d task_state %d\n",
  507. ap->print_id, qc->tf.protocol, ap->hsm_task_state);
  508. if (ap->hsm_task_state != HSM_ST_LAST)
  509. return 0;
  510. cf_port = ap->private_data;
  511. dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
  512. if (dma_cfg.s.size != 0xfffff) {
  513. /* Error, the transfer was not complete. */
  514. qc->err_mask |= AC_ERR_HOST_BUS;
  515. ap->hsm_task_state = HSM_ST_ERR;
  516. }
  517. /* Stop and clear the dma engine. */
  518. dma_cfg.u64 = 0;
  519. dma_cfg.s.size = -1;
  520. cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
  521. /* Disable the interrupt. */
  522. dma_int.u64 = 0;
  523. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
  524. /* Clear the DMA complete status */
  525. dma_int.s.done = 1;
  526. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
  527. status = ap->ops->sff_check_status(ap);
  528. ata_sff_hsm_move(ap, qc, status, 0);
  529. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
  530. ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
  531. return 1;
  532. }
  533. /*
  534. * Check if any queued commands have more DMAs, if so start the next
  535. * transfer, else do end of transfer handling.
  536. */
  537. static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
  538. {
  539. struct ata_host *host = dev_instance;
  540. struct octeon_cf_port *cf_port;
  541. int i;
  542. unsigned int handled = 0;
  543. unsigned long flags;
  544. spin_lock_irqsave(&host->lock, flags);
  545. DPRINTK("ENTER\n");
  546. for (i = 0; i < host->n_ports; i++) {
  547. u8 status;
  548. struct ata_port *ap;
  549. struct ata_queued_cmd *qc;
  550. union cvmx_mio_boot_dma_intx dma_int;
  551. union cvmx_mio_boot_dma_cfgx dma_cfg;
  552. struct octeon_cf_data *ocd;
  553. ap = host->ports[i];
  554. ocd = ap->dev->platform_data;
  555. if (!ap || (ap->flags & ATA_FLAG_DISABLED))
  556. continue;
  557. ocd = ap->dev->platform_data;
  558. cf_port = ap->private_data;
  559. dma_int.u64 =
  560. cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
  561. dma_cfg.u64 =
  562. cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
  563. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  564. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
  565. (qc->flags & ATA_QCFLAG_ACTIVE)) {
  566. if (dma_int.s.done && !dma_cfg.s.en) {
  567. if (!sg_is_last(qc->cursg)) {
  568. qc->cursg = sg_next(qc->cursg);
  569. handled = 1;
  570. octeon_cf_dma_start(qc);
  571. continue;
  572. } else {
  573. cf_port->dma_finished = 1;
  574. }
  575. }
  576. if (!cf_port->dma_finished)
  577. continue;
  578. status = ioread8(ap->ioaddr.altstatus_addr);
  579. if (status & (ATA_BUSY | ATA_DRQ)) {
  580. /*
  581. * We are busy, try to handle it
  582. * later. This is the DMA finished
  583. * interrupt, and it could take a
  584. * little while for the card to be
  585. * ready for more commands.
  586. */
  587. /* Clear DMA irq. */
  588. dma_int.u64 = 0;
  589. dma_int.s.done = 1;
  590. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
  591. dma_int.u64);
  592. queue_delayed_work(cf_port->wq,
  593. &cf_port->delayed_finish, 1);
  594. handled = 1;
  595. } else {
  596. handled |= octeon_cf_dma_finished(ap, qc);
  597. }
  598. }
  599. }
  600. spin_unlock_irqrestore(&host->lock, flags);
  601. DPRINTK("EXIT\n");
  602. return IRQ_RETVAL(handled);
  603. }
  604. static void octeon_cf_delayed_finish(struct work_struct *work)
  605. {
  606. struct octeon_cf_port *cf_port = container_of(work,
  607. struct octeon_cf_port,
  608. delayed_finish.work);
  609. struct ata_port *ap = cf_port->ap;
  610. struct ata_host *host = ap->host;
  611. struct ata_queued_cmd *qc;
  612. unsigned long flags;
  613. u8 status;
  614. spin_lock_irqsave(&host->lock, flags);
  615. /*
  616. * If the port is not waiting for completion, it must have
  617. * handled it previously. The hsm_task_state is
  618. * protected by host->lock.
  619. */
  620. if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
  621. goto out;
  622. status = ioread8(ap->ioaddr.altstatus_addr);
  623. if (status & (ATA_BUSY | ATA_DRQ)) {
  624. /* Still busy, try again. */
  625. queue_delayed_work(cf_port->wq,
  626. &cf_port->delayed_finish, 1);
  627. goto out;
  628. }
  629. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  630. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
  631. (qc->flags & ATA_QCFLAG_ACTIVE))
  632. octeon_cf_dma_finished(ap, qc);
  633. out:
  634. spin_unlock_irqrestore(&host->lock, flags);
  635. }
  636. static void octeon_cf_dev_config(struct ata_device *dev)
  637. {
  638. /*
  639. * A maximum of 2^20 - 1 16 bit transfers are possible with
  640. * the bootbus DMA. So we need to throttle max_sectors to
  641. * (2^12 - 1 == 4095) to assure that this can never happen.
  642. */
  643. dev->max_sectors = min(dev->max_sectors, 4095U);
  644. }
  645. /*
  646. * Trap if driver tries to do standard bmdma commands. They are not
  647. * supported.
  648. */
  649. static void unreachable_qc(struct ata_queued_cmd *qc)
  650. {
  651. BUG();
  652. }
  653. static u8 unreachable_port(struct ata_port *ap)
  654. {
  655. BUG();
  656. }
  657. /*
  658. * We don't do ATAPI DMA so return 0.
  659. */
  660. static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
  661. {
  662. return 0;
  663. }
  664. static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
  665. {
  666. struct ata_port *ap = qc->ap;
  667. switch (qc->tf.protocol) {
  668. case ATA_PROT_DMA:
  669. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  670. ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
  671. octeon_cf_dma_setup(qc); /* set up dma */
  672. octeon_cf_dma_start(qc); /* initiate dma */
  673. ap->hsm_task_state = HSM_ST_LAST;
  674. break;
  675. case ATAPI_PROT_DMA:
  676. dev_err(ap->dev, "Error, ATAPI not supported\n");
  677. BUG();
  678. default:
  679. return ata_sff_qc_issue(qc);
  680. }
  681. return 0;
  682. }
  683. static struct ata_port_operations octeon_cf_ops = {
  684. .inherits = &ata_sff_port_ops,
  685. .check_atapi_dma = octeon_cf_check_atapi_dma,
  686. .qc_prep = ata_noop_qc_prep,
  687. .qc_issue = octeon_cf_qc_issue,
  688. .sff_dev_select = octeon_cf_dev_select,
  689. .sff_irq_on = octeon_cf_irq_on,
  690. .sff_irq_clear = octeon_cf_irq_clear,
  691. .bmdma_setup = unreachable_qc,
  692. .bmdma_start = unreachable_qc,
  693. .bmdma_stop = unreachable_qc,
  694. .bmdma_status = unreachable_port,
  695. .cable_detect = ata_cable_40wire,
  696. .set_piomode = octeon_cf_set_piomode,
  697. .set_dmamode = octeon_cf_set_dmamode,
  698. .dev_config = octeon_cf_dev_config,
  699. };
  700. static int __devinit octeon_cf_probe(struct platform_device *pdev)
  701. {
  702. struct resource *res_cs0, *res_cs1;
  703. void __iomem *cs0;
  704. void __iomem *cs1 = NULL;
  705. struct ata_host *host;
  706. struct ata_port *ap;
  707. struct octeon_cf_data *ocd;
  708. int irq = 0;
  709. irq_handler_t irq_handler = NULL;
  710. void __iomem *base;
  711. struct octeon_cf_port *cf_port;
  712. res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  713. if (!res_cs0)
  714. return -EINVAL;
  715. ocd = pdev->dev.platform_data;
  716. cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
  717. res_cs0->end - res_cs0->start + 1);
  718. if (!cs0)
  719. return -ENOMEM;
  720. /* Determine from availability of DMA if True IDE mode or not */
  721. if (ocd->dma_engine >= 0) {
  722. res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  723. if (!res_cs1)
  724. return -EINVAL;
  725. cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
  726. res_cs0->end - res_cs1->start + 1);
  727. if (!cs1)
  728. return -ENOMEM;
  729. }
  730. cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
  731. if (!cf_port)
  732. return -ENOMEM;
  733. /* allocate host */
  734. host = ata_host_alloc(&pdev->dev, 1);
  735. if (!host)
  736. goto free_cf_port;
  737. ap = host->ports[0];
  738. ap->private_data = cf_port;
  739. cf_port->ap = ap;
  740. ap->ops = &octeon_cf_ops;
  741. ap->pio_mask = ATA_PIO6;
  742. ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
  743. | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
  744. base = cs0 + ocd->base_region_bias;
  745. if (!ocd->is16bit) {
  746. ap->ioaddr.cmd_addr = base;
  747. ata_sff_std_ports(&ap->ioaddr);
  748. ap->ioaddr.altstatus_addr = base + 0xe;
  749. ap->ioaddr.ctl_addr = base + 0xe;
  750. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
  751. } else if (cs1) {
  752. /* Presence of cs1 indicates True IDE mode. */
  753. ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
  754. ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
  755. ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
  756. ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
  757. ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
  758. ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
  759. ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
  760. ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
  761. ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
  762. ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
  763. ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
  764. ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
  765. ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
  766. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  767. ap->mwdma_mask = ATA_MWDMA4;
  768. irq = platform_get_irq(pdev, 0);
  769. irq_handler = octeon_cf_interrupt;
  770. /* True IDE mode needs delayed work to poll for not-busy. */
  771. cf_port->wq = create_singlethread_workqueue(DRV_NAME);
  772. if (!cf_port->wq)
  773. goto free_cf_port;
  774. INIT_DELAYED_WORK(&cf_port->delayed_finish,
  775. octeon_cf_delayed_finish);
  776. } else {
  777. /* 16 bit but not True IDE */
  778. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  779. octeon_cf_ops.softreset = octeon_cf_softreset16;
  780. octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
  781. octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
  782. octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
  783. octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
  784. ap->ioaddr.data_addr = base + ATA_REG_DATA;
  785. ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
  786. ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
  787. ap->ioaddr.ctl_addr = base + 0xe;
  788. ap->ioaddr.altstatus_addr = base + 0xe;
  789. }
  790. ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
  791. dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
  792. (ocd->is16bit) ? 16 : 8,
  793. (cs1) ? ", True IDE" : "");
  794. return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
  795. free_cf_port:
  796. kfree(cf_port);
  797. return -ENOMEM;
  798. }
  799. static struct platform_driver octeon_cf_driver = {
  800. .probe = octeon_cf_probe,
  801. .driver = {
  802. .name = DRV_NAME,
  803. .owner = THIS_MODULE,
  804. },
  805. };
  806. static int __init octeon_cf_init(void)
  807. {
  808. return platform_driver_register(&octeon_cf_driver);
  809. }
  810. MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
  811. MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
  812. MODULE_LICENSE("GPL");
  813. MODULE_VERSION(DRV_VERSION);
  814. MODULE_ALIAS("platform:" DRV_NAME);
  815. module_init(octeon_cf_init);