sata_inic162x.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * sata_inic162x.c - Driver for Initio 162x SATA controllers
  3. *
  4. * Copyright 2006 SUSE Linux Products GmbH
  5. * Copyright 2006 Tejun Heo <teheo@novell.com>
  6. *
  7. * This file is released under GPL v2.
  8. *
  9. * This controller is eccentric and easily locks up if something isn't
  10. * right. Documentation is available at initio's website but it only
  11. * documents registers (not programming model).
  12. *
  13. * - ATA disks work.
  14. * - Hotplug works.
  15. * - ATAPI read works but burning doesn't. This thing is really
  16. * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and
  17. * ATAPI DMA WRITE should be programmed. If you've got a clue, be
  18. * my guest.
  19. * - Both STR and STD work.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <scsi/scsi_host.h>
  25. #include <linux/libata.h>
  26. #include <linux/blkdev.h>
  27. #include <scsi/scsi_device.h>
  28. #define DRV_NAME "sata_inic162x"
  29. #define DRV_VERSION "0.3"
  30. enum {
  31. MMIO_BAR = 5,
  32. NR_PORTS = 2,
  33. HOST_ACTRL = 0x08,
  34. HOST_CTL = 0x7c,
  35. HOST_STAT = 0x7e,
  36. HOST_IRQ_STAT = 0xbc,
  37. HOST_IRQ_MASK = 0xbe,
  38. PORT_SIZE = 0x40,
  39. /* registers for ATA TF operation */
  40. PORT_TF_DATA = 0x00,
  41. PORT_TF_FEATURE = 0x01,
  42. PORT_TF_NSECT = 0x02,
  43. PORT_TF_LBAL = 0x03,
  44. PORT_TF_LBAM = 0x04,
  45. PORT_TF_LBAH = 0x05,
  46. PORT_TF_DEVICE = 0x06,
  47. PORT_TF_COMMAND = 0x07,
  48. PORT_TF_ALT_STAT = 0x08,
  49. PORT_IRQ_STAT = 0x09,
  50. PORT_IRQ_MASK = 0x0a,
  51. PORT_PRD_CTL = 0x0b,
  52. PORT_PRD_ADDR = 0x0c,
  53. PORT_PRD_XFERLEN = 0x10,
  54. PORT_CPB_CPBLAR = 0x18,
  55. PORT_CPB_PTQFIFO = 0x1c,
  56. /* IDMA register */
  57. PORT_IDMA_CTL = 0x14,
  58. PORT_IDMA_STAT = 0x16,
  59. PORT_RPQ_FIFO = 0x1e,
  60. PORT_RPQ_CNT = 0x1f,
  61. PORT_SCR = 0x20,
  62. /* HOST_CTL bits */
  63. HCTL_IRQOFF = (1 << 8), /* global IRQ off */
  64. HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
  65. HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
  66. HCTL_PWRDWN = (1 << 12), /* power down PHYs */
  67. HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
  68. HCTL_RPGSEL = (1 << 15), /* register page select */
  69. HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
  70. HCTL_RPGSEL,
  71. /* HOST_IRQ_(STAT|MASK) bits */
  72. HIRQ_PORT0 = (1 << 0),
  73. HIRQ_PORT1 = (1 << 1),
  74. HIRQ_SOFT = (1 << 14),
  75. HIRQ_GLOBAL = (1 << 15), /* STAT only */
  76. /* PORT_IRQ_(STAT|MASK) bits */
  77. PIRQ_OFFLINE = (1 << 0), /* device unplugged */
  78. PIRQ_ONLINE = (1 << 1), /* device plugged */
  79. PIRQ_COMPLETE = (1 << 2), /* completion interrupt */
  80. PIRQ_FATAL = (1 << 3), /* fatal error */
  81. PIRQ_ATA = (1 << 4), /* ATA interrupt */
  82. PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */
  83. PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
  84. PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
  85. PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA,
  86. PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE,
  87. PIRQ_MASK_FREEZE = 0xff,
  88. /* PORT_PRD_CTL bits */
  89. PRD_CTL_START = (1 << 0),
  90. PRD_CTL_WR = (1 << 3),
  91. PRD_CTL_DMAEN = (1 << 7), /* DMA enable */
  92. /* PORT_IDMA_CTL bits */
  93. IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */
  94. IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
  95. IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
  96. IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
  97. /* PORT_IDMA_STAT bits */
  98. IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
  99. IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
  100. IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
  101. IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
  102. IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
  103. IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
  104. IDMA_STAT_DONE = (1 << 7), /* ADMA done */
  105. IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
  106. /* CPB Control Flags*/
  107. CPB_CTL_VALID = (1 << 0), /* CPB valid */
  108. CPB_CTL_QUEUED = (1 << 1), /* queued command */
  109. CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
  110. CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
  111. CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
  112. /* CPB Response Flags */
  113. CPB_RESP_DONE = (1 << 0), /* ATA command complete */
  114. CPB_RESP_REL = (1 << 1), /* ATA release */
  115. CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
  116. CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
  117. CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
  118. CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
  119. CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
  120. CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
  121. /* PRD Control Flags */
  122. PRD_DRAIN = (1 << 1), /* ignore data excess */
  123. PRD_CDB = (1 << 2), /* atapi packet command pointer */
  124. PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
  125. PRD_DMA = (1 << 4), /* data transfer method */
  126. PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
  127. PRD_IOM = (1 << 6), /* io/memory transfer */
  128. PRD_END = (1 << 7), /* APRD chain end */
  129. };
  130. struct inic_host_priv {
  131. u16 cached_hctl;
  132. };
  133. struct inic_port_priv {
  134. u8 dfl_prdctl;
  135. u8 cached_prdctl;
  136. u8 cached_pirq_mask;
  137. };
  138. static struct scsi_host_template inic_sht = {
  139. ATA_BMDMA_SHT(DRV_NAME),
  140. };
  141. static const int scr_map[] = {
  142. [SCR_STATUS] = 0,
  143. [SCR_ERROR] = 1,
  144. [SCR_CONTROL] = 2,
  145. };
  146. static void __iomem *inic_port_base(struct ata_port *ap)
  147. {
  148. return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
  149. }
  150. static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
  151. {
  152. void __iomem *port_base = inic_port_base(ap);
  153. struct inic_port_priv *pp = ap->private_data;
  154. writeb(mask, port_base + PORT_IRQ_MASK);
  155. pp->cached_pirq_mask = mask;
  156. }
  157. static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
  158. {
  159. struct inic_port_priv *pp = ap->private_data;
  160. if (pp->cached_pirq_mask != mask)
  161. __inic_set_pirq_mask(ap, mask);
  162. }
  163. static void inic_reset_port(void __iomem *port_base)
  164. {
  165. void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
  166. u16 ctl;
  167. ctl = readw(idma_ctl);
  168. ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);
  169. /* mask IRQ and assert reset */
  170. writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);
  171. readw(idma_ctl); /* flush */
  172. /* give it some time */
  173. msleep(1);
  174. /* release reset */
  175. writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);
  176. /* clear irq */
  177. writeb(0xff, port_base + PORT_IRQ_STAT);
  178. /* reenable ATA IRQ, turn off IDMA mode */
  179. writew(ctl, idma_ctl);
  180. }
  181. static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
  182. {
  183. void __iomem *scr_addr = ap->ioaddr.scr_addr;
  184. void __iomem *addr;
  185. if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
  186. return -EINVAL;
  187. addr = scr_addr + scr_map[sc_reg] * 4;
  188. *val = readl(scr_addr + scr_map[sc_reg] * 4);
  189. /* this controller has stuck DIAG.N, ignore it */
  190. if (sc_reg == SCR_ERROR)
  191. *val &= ~SERR_PHYRDY_CHG;
  192. return 0;
  193. }
  194. static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
  195. {
  196. void __iomem *scr_addr = ap->ioaddr.scr_addr;
  197. if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
  198. return -EINVAL;
  199. writel(val, scr_addr + scr_map[sc_reg] * 4);
  200. return 0;
  201. }
  202. /*
  203. * In TF mode, inic162x is very similar to SFF device. TF registers
  204. * function the same. DMA engine behaves similary using the same PRD
  205. * format as BMDMA but different command register, interrupt and event
  206. * notification methods are used. The following inic_bmdma_*()
  207. * functions do the impedance matching.
  208. */
  209. static void inic_bmdma_setup(struct ata_queued_cmd *qc)
  210. {
  211. struct ata_port *ap = qc->ap;
  212. struct inic_port_priv *pp = ap->private_data;
  213. void __iomem *port_base = inic_port_base(ap);
  214. int rw = qc->tf.flags & ATA_TFLAG_WRITE;
  215. /* make sure device sees PRD table writes */
  216. wmb();
  217. /* load transfer length */
  218. writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
  219. /* turn on DMA and specify data direction */
  220. pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
  221. if (!rw)
  222. pp->cached_prdctl |= PRD_CTL_WR;
  223. writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
  224. /* issue r/w command */
  225. ap->ops->sff_exec_command(ap, &qc->tf);
  226. }
  227. static void inic_bmdma_start(struct ata_queued_cmd *qc)
  228. {
  229. struct ata_port *ap = qc->ap;
  230. struct inic_port_priv *pp = ap->private_data;
  231. void __iomem *port_base = inic_port_base(ap);
  232. /* start host DMA transaction */
  233. pp->cached_prdctl |= PRD_CTL_START;
  234. writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
  235. }
  236. static void inic_bmdma_stop(struct ata_queued_cmd *qc)
  237. {
  238. struct ata_port *ap = qc->ap;
  239. struct inic_port_priv *pp = ap->private_data;
  240. void __iomem *port_base = inic_port_base(ap);
  241. /* stop DMA engine */
  242. writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
  243. }
  244. static u8 inic_bmdma_status(struct ata_port *ap)
  245. {
  246. /* event is already verified by the interrupt handler */
  247. return ATA_DMA_INTR;
  248. }
  249. static void inic_host_intr(struct ata_port *ap)
  250. {
  251. void __iomem *port_base = inic_port_base(ap);
  252. struct ata_eh_info *ehi = &ap->link.eh_info;
  253. u8 irq_stat;
  254. /* fetch and clear irq */
  255. irq_stat = readb(port_base + PORT_IRQ_STAT);
  256. writeb(irq_stat, port_base + PORT_IRQ_STAT);
  257. if (likely(!(irq_stat & PIRQ_ERR))) {
  258. struct ata_queued_cmd *qc =
  259. ata_qc_from_tag(ap, ap->link.active_tag);
  260. if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
  261. ap->ops->sff_check_status(ap); /* clear ATA interrupt */
  262. return;
  263. }
  264. if (likely(ata_sff_host_intr(ap, qc)))
  265. return;
  266. ap->ops->sff_check_status(ap); /* clear ATA interrupt */
  267. ata_port_printk(ap, KERN_WARNING, "unhandled "
  268. "interrupt, irq_stat=%x\n", irq_stat);
  269. return;
  270. }
  271. /* error */
  272. ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);
  273. if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
  274. ata_ehi_hotplugged(ehi);
  275. ata_port_freeze(ap);
  276. } else
  277. ata_port_abort(ap);
  278. }
  279. static irqreturn_t inic_interrupt(int irq, void *dev_instance)
  280. {
  281. struct ata_host *host = dev_instance;
  282. void __iomem *mmio_base = host->iomap[MMIO_BAR];
  283. u16 host_irq_stat;
  284. int i, handled = 0;;
  285. host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);
  286. if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
  287. goto out;
  288. spin_lock(&host->lock);
  289. for (i = 0; i < NR_PORTS; i++) {
  290. struct ata_port *ap = host->ports[i];
  291. if (!(host_irq_stat & (HIRQ_PORT0 << i)))
  292. continue;
  293. if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
  294. inic_host_intr(ap);
  295. handled++;
  296. } else {
  297. if (ata_ratelimit())
  298. dev_printk(KERN_ERR, host->dev, "interrupt "
  299. "from disabled port %d (0x%x)\n",
  300. i, host_irq_stat);
  301. }
  302. }
  303. spin_unlock(&host->lock);
  304. out:
  305. return IRQ_RETVAL(handled);
  306. }
  307. static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
  308. {
  309. struct ata_port *ap = qc->ap;
  310. /* ATA IRQ doesn't wait for DMA transfer completion and vice
  311. * versa. Mask IRQ selectively to detect command completion.
  312. * Without it, ATA DMA read command can cause data corruption.
  313. *
  314. * Something similar might be needed for ATAPI writes. I
  315. * tried a lot of combinations but couldn't find the solution.
  316. */
  317. if (qc->tf.protocol == ATA_PROT_DMA &&
  318. !(qc->tf.flags & ATA_TFLAG_WRITE))
  319. inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
  320. else
  321. inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
  322. /* Issuing a command to yet uninitialized port locks up the
  323. * controller. Most of the time, this happens for the first
  324. * command after reset which are ATA and ATAPI IDENTIFYs.
  325. * Fast fail if stat is 0x7f or 0xff for those commands.
  326. */
  327. if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
  328. qc->tf.command == ATA_CMD_ID_ATAPI)) {
  329. u8 stat = ap->ops->sff_check_status(ap);
  330. if (stat == 0x7f || stat == 0xff)
  331. return AC_ERR_HSM;
  332. }
  333. return ata_sff_qc_issue(qc);
  334. }
  335. static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  336. {
  337. void __iomem *port_base = inic_port_base(ap);
  338. tf->feature = readb(port_base + PORT_TF_FEATURE);
  339. tf->nsect = readb(port_base + PORT_TF_NSECT);
  340. tf->lbal = readb(port_base + PORT_TF_LBAL);
  341. tf->lbam = readb(port_base + PORT_TF_LBAM);
  342. tf->lbah = readb(port_base + PORT_TF_LBAH);
  343. tf->device = readb(port_base + PORT_TF_DEVICE);
  344. tf->command = readb(port_base + PORT_TF_COMMAND);
  345. }
  346. static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
  347. {
  348. struct ata_taskfile *rtf = &qc->result_tf;
  349. struct ata_taskfile tf;
  350. /* FIXME: Except for status and error, result TF access
  351. * doesn't work. I tried reading from BAR0/2, CPB and BAR5.
  352. * None works regardless of which command interface is used.
  353. * For now return true iff status indicates device error.
  354. * This means that we're reporting bogus sector for RW
  355. * failures. Eeekk....
  356. */
  357. inic_tf_read(qc->ap, &tf);
  358. if (!(tf.command & ATA_ERR))
  359. return false;
  360. rtf->command = tf.command;
  361. rtf->feature = tf.feature;
  362. return true;
  363. }
  364. static void inic_freeze(struct ata_port *ap)
  365. {
  366. void __iomem *port_base = inic_port_base(ap);
  367. __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
  368. ap->ops->sff_check_status(ap);
  369. writeb(0xff, port_base + PORT_IRQ_STAT);
  370. }
  371. static void inic_thaw(struct ata_port *ap)
  372. {
  373. void __iomem *port_base = inic_port_base(ap);
  374. ap->ops->sff_check_status(ap);
  375. writeb(0xff, port_base + PORT_IRQ_STAT);
  376. __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
  377. }
  378. static int inic_check_ready(struct ata_link *link)
  379. {
  380. void __iomem *port_base = inic_port_base(link->ap);
  381. return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
  382. }
  383. /*
  384. * SRST and SControl hardreset don't give valid signature on this
  385. * controller. Only controller specific hardreset mechanism works.
  386. */
  387. static int inic_hardreset(struct ata_link *link, unsigned int *class,
  388. unsigned long deadline)
  389. {
  390. struct ata_port *ap = link->ap;
  391. void __iomem *port_base = inic_port_base(ap);
  392. void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
  393. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  394. u16 val;
  395. int rc;
  396. /* hammer it into sane state */
  397. inic_reset_port(port_base);
  398. val = readw(idma_ctl);
  399. writew(val | IDMA_CTL_RST_ATA, idma_ctl);
  400. readw(idma_ctl); /* flush */
  401. msleep(1);
  402. writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
  403. rc = sata_link_resume(link, timing, deadline);
  404. if (rc) {
  405. ata_link_printk(link, KERN_WARNING, "failed to resume "
  406. "link after reset (errno=%d)\n", rc);
  407. return rc;
  408. }
  409. *class = ATA_DEV_NONE;
  410. if (ata_link_online(link)) {
  411. struct ata_taskfile tf;
  412. /* wait for link to become ready */
  413. rc = ata_wait_after_reset(link, deadline, inic_check_ready);
  414. /* link occupied, -ENODEV too is an error */
  415. if (rc) {
  416. ata_link_printk(link, KERN_WARNING, "device not ready "
  417. "after hardreset (errno=%d)\n", rc);
  418. return rc;
  419. }
  420. inic_tf_read(ap, &tf);
  421. *class = ata_dev_classify(&tf);
  422. }
  423. return 0;
  424. }
  425. static void inic_error_handler(struct ata_port *ap)
  426. {
  427. void __iomem *port_base = inic_port_base(ap);
  428. struct inic_port_priv *pp = ap->private_data;
  429. unsigned long flags;
  430. /* reset PIO HSM and stop DMA engine */
  431. inic_reset_port(port_base);
  432. spin_lock_irqsave(ap->lock, flags);
  433. ap->hsm_task_state = HSM_ST_IDLE;
  434. writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
  435. spin_unlock_irqrestore(ap->lock, flags);
  436. /* PIO and DMA engines have been stopped, perform recovery */
  437. ata_std_error_handler(ap);
  438. }
  439. static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
  440. {
  441. /* make DMA engine forget about the failed command */
  442. if (qc->flags & ATA_QCFLAG_FAILED)
  443. inic_reset_port(inic_port_base(qc->ap));
  444. }
  445. static void inic_dev_config(struct ata_device *dev)
  446. {
  447. /* inic can only handle upto LBA28 max sectors */
  448. if (dev->max_sectors > ATA_MAX_SECTORS)
  449. dev->max_sectors = ATA_MAX_SECTORS;
  450. if (dev->n_sectors >= 1 << 28) {
  451. ata_dev_printk(dev, KERN_ERR,
  452. "ERROR: This driver doesn't support LBA48 yet and may cause\n"
  453. " data corruption on such devices. Disabling.\n");
  454. ata_dev_disable(dev);
  455. }
  456. }
  457. static void init_port(struct ata_port *ap)
  458. {
  459. void __iomem *port_base = inic_port_base(ap);
  460. /* Setup PRD address */
  461. writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
  462. }
  463. static int inic_port_resume(struct ata_port *ap)
  464. {
  465. init_port(ap);
  466. return 0;
  467. }
  468. static int inic_port_start(struct ata_port *ap)
  469. {
  470. void __iomem *port_base = inic_port_base(ap);
  471. struct inic_port_priv *pp;
  472. u8 tmp;
  473. int rc;
  474. /* alloc and initialize private data */
  475. pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL);
  476. if (!pp)
  477. return -ENOMEM;
  478. ap->private_data = pp;
  479. /* default PRD_CTL value, DMAEN, WR and START off */
  480. tmp = readb(port_base + PORT_PRD_CTL);
  481. tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
  482. pp->dfl_prdctl = tmp;
  483. /* Alloc resources */
  484. rc = ata_port_start(ap);
  485. if (rc)
  486. return rc;
  487. init_port(ap);
  488. return 0;
  489. }
  490. static struct ata_port_operations inic_port_ops = {
  491. .inherits = &ata_sff_port_ops,
  492. .bmdma_setup = inic_bmdma_setup,
  493. .bmdma_start = inic_bmdma_start,
  494. .bmdma_stop = inic_bmdma_stop,
  495. .bmdma_status = inic_bmdma_status,
  496. .qc_issue = inic_qc_issue,
  497. .qc_fill_rtf = inic_qc_fill_rtf,
  498. .freeze = inic_freeze,
  499. .thaw = inic_thaw,
  500. .softreset = ATA_OP_NULL, /* softreset is broken */
  501. .hardreset = inic_hardreset,
  502. .error_handler = inic_error_handler,
  503. .post_internal_cmd = inic_post_internal_cmd,
  504. .dev_config = inic_dev_config,
  505. .scr_read = inic_scr_read,
  506. .scr_write = inic_scr_write,
  507. .port_resume = inic_port_resume,
  508. .port_start = inic_port_start,
  509. };
  510. static struct ata_port_info inic_port_info = {
  511. /* For some reason, ATAPI_PROT_PIO is broken on this
  512. * controller, and no, PIO_POLLING does't fix it. It somehow
  513. * manages to report the wrong ireason and ignoring ireason
  514. * results in machine lock up. Tell libata to always prefer
  515. * DMA.
  516. */
  517. .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
  518. .pio_mask = 0x1f, /* pio0-4 */
  519. .mwdma_mask = 0x07, /* mwdma0-2 */
  520. .udma_mask = ATA_UDMA6,
  521. .port_ops = &inic_port_ops
  522. };
  523. static int init_controller(void __iomem *mmio_base, u16 hctl)
  524. {
  525. int i;
  526. u16 val;
  527. hctl &= ~HCTL_KNOWN_BITS;
  528. /* Soft reset whole controller. Spec says reset duration is 3
  529. * PCI clocks, be generous and give it 10ms.
  530. */
  531. writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
  532. readw(mmio_base + HOST_CTL); /* flush */
  533. for (i = 0; i < 10; i++) {
  534. msleep(1);
  535. val = readw(mmio_base + HOST_CTL);
  536. if (!(val & HCTL_SOFTRST))
  537. break;
  538. }
  539. if (val & HCTL_SOFTRST)
  540. return -EIO;
  541. /* mask all interrupts and reset ports */
  542. for (i = 0; i < NR_PORTS; i++) {
  543. void __iomem *port_base = mmio_base + i * PORT_SIZE;
  544. writeb(0xff, port_base + PORT_IRQ_MASK);
  545. inic_reset_port(port_base);
  546. }
  547. /* port IRQ is masked now, unmask global IRQ */
  548. writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
  549. val = readw(mmio_base + HOST_IRQ_MASK);
  550. val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
  551. writew(val, mmio_base + HOST_IRQ_MASK);
  552. return 0;
  553. }
  554. #ifdef CONFIG_PM
  555. static int inic_pci_device_resume(struct pci_dev *pdev)
  556. {
  557. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  558. struct inic_host_priv *hpriv = host->private_data;
  559. void __iomem *mmio_base = host->iomap[MMIO_BAR];
  560. int rc;
  561. rc = ata_pci_device_do_resume(pdev);
  562. if (rc)
  563. return rc;
  564. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  565. rc = init_controller(mmio_base, hpriv->cached_hctl);
  566. if (rc)
  567. return rc;
  568. }
  569. ata_host_resume(host);
  570. return 0;
  571. }
  572. #endif
  573. static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  574. {
  575. static int printed_version;
  576. const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
  577. struct ata_host *host;
  578. struct inic_host_priv *hpriv;
  579. void __iomem * const *iomap;
  580. int i, rc;
  581. if (!printed_version++)
  582. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  583. /* alloc host */
  584. host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
  585. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  586. if (!host || !hpriv)
  587. return -ENOMEM;
  588. host->private_data = hpriv;
  589. /* acquire resources and fill host */
  590. rc = pcim_enable_device(pdev);
  591. if (rc)
  592. return rc;
  593. rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
  594. if (rc)
  595. return rc;
  596. host->iomap = iomap = pcim_iomap_table(pdev);
  597. for (i = 0; i < NR_PORTS; i++) {
  598. struct ata_port *ap = host->ports[i];
  599. struct ata_ioports *port = &ap->ioaddr;
  600. unsigned int offset = i * PORT_SIZE;
  601. port->cmd_addr = iomap[2 * i];
  602. port->altstatus_addr =
  603. port->ctl_addr = (void __iomem *)
  604. ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
  605. port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
  606. ata_sff_std_ports(port);
  607. ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
  608. ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
  609. ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
  610. (unsigned long long)pci_resource_start(pdev, 2 * i),
  611. (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
  612. ATA_PCI_CTL_OFS);
  613. }
  614. hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
  615. /* Set dma_mask. This devices doesn't support 64bit addressing. */
  616. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  617. if (rc) {
  618. dev_printk(KERN_ERR, &pdev->dev,
  619. "32-bit DMA enable failed\n");
  620. return rc;
  621. }
  622. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  623. if (rc) {
  624. dev_printk(KERN_ERR, &pdev->dev,
  625. "32-bit consistent DMA enable failed\n");
  626. return rc;
  627. }
  628. /*
  629. * This controller is braindamaged. dma_boundary is 0xffff
  630. * like others but it will lock up the whole machine HARD if
  631. * 65536 byte PRD entry is fed. Reduce maximum segment size.
  632. */
  633. rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
  634. if (rc) {
  635. dev_printk(KERN_ERR, &pdev->dev,
  636. "failed to set the maximum segment size.\n");
  637. return rc;
  638. }
  639. rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
  640. if (rc) {
  641. dev_printk(KERN_ERR, &pdev->dev,
  642. "failed to initialize controller\n");
  643. return rc;
  644. }
  645. pci_set_master(pdev);
  646. return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
  647. &inic_sht);
  648. }
  649. static const struct pci_device_id inic_pci_tbl[] = {
  650. { PCI_VDEVICE(INIT, 0x1622), },
  651. { },
  652. };
  653. static struct pci_driver inic_pci_driver = {
  654. .name = DRV_NAME,
  655. .id_table = inic_pci_tbl,
  656. #ifdef CONFIG_PM
  657. .suspend = ata_pci_device_suspend,
  658. .resume = inic_pci_device_resume,
  659. #endif
  660. .probe = inic_init_one,
  661. .remove = ata_pci_remove_one,
  662. };
  663. static int __init inic_init(void)
  664. {
  665. return pci_register_driver(&inic_pci_driver);
  666. }
  667. static void __exit inic_exit(void)
  668. {
  669. pci_unregister_driver(&inic_pci_driver);
  670. }
  671. MODULE_AUTHOR("Tejun Heo");
  672. MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
  673. MODULE_LICENSE("GPL v2");
  674. MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
  675. MODULE_VERSION(DRV_VERSION);
  676. module_init(inic_init);
  677. module_exit(inic_exit);