scc_pata.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Support for IDE interfaces on Celleb platform
  3. *
  4. * (C) Copyright 2006 TOSHIBA CORPORATION
  5. *
  6. * This code is based on drivers/ide/pci/siimage.c:
  7. * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
  8. * Copyright (C) 2003 Red Hat <alan@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, write to the Free Software Foundation, Inc.,
  22. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  23. */
  24. #include <linux/types.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <linux/delay.h>
  28. #include <linux/hdreg.h>
  29. #include <linux/ide.h>
  30. #include <linux/init.h>
  31. #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
  32. #define SCC_PATA_NAME "scc IDE"
  33. #define TDVHSEL_MASTER 0x00000001
  34. #define TDVHSEL_SLAVE 0x00000004
  35. #define MODE_JCUSFEN 0x00000080
  36. #define CCKCTRL_ATARESET 0x00040000
  37. #define CCKCTRL_BUFCNT 0x00020000
  38. #define CCKCTRL_CRST 0x00010000
  39. #define CCKCTRL_OCLKEN 0x00000100
  40. #define CCKCTRL_ATACLKOEN 0x00000002
  41. #define CCKCTRL_LCLKEN 0x00000001
  42. #define QCHCD_IOS_SS 0x00000001
  43. #define QCHSD_STPDIAG 0x00020000
  44. #define INTMASK_MSK 0xD1000012
  45. #define INTSTS_SERROR 0x80000000
  46. #define INTSTS_PRERR 0x40000000
  47. #define INTSTS_RERR 0x10000000
  48. #define INTSTS_ICERR 0x01000000
  49. #define INTSTS_BMSINT 0x00000010
  50. #define INTSTS_BMHE 0x00000008
  51. #define INTSTS_IOIRQS 0x00000004
  52. #define INTSTS_INTRQ 0x00000002
  53. #define INTSTS_ACTEINT 0x00000001
  54. #define ECMODE_VALUE 0x01
  55. static struct scc_ports {
  56. unsigned long ctl, dma;
  57. ide_hwif_t *hwif; /* for removing port from system */
  58. } scc_ports[MAX_HWIFS];
  59. /* PIO transfer mode table */
  60. /* JCHST */
  61. static unsigned long JCHSTtbl[2][7] = {
  62. {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
  63. {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
  64. };
  65. /* JCHHT */
  66. static unsigned long JCHHTtbl[2][7] = {
  67. {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
  68. {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
  69. };
  70. /* JCHCT */
  71. static unsigned long JCHCTtbl[2][7] = {
  72. {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
  73. {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
  74. };
  75. /* DMA transfer mode table */
  76. /* JCHDCTM/JCHDCTS */
  77. static unsigned long JCHDCTxtbl[2][7] = {
  78. {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
  79. {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
  80. };
  81. /* JCSTWTM/JCSTWTS */
  82. static unsigned long JCSTWTxtbl[2][7] = {
  83. {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
  84. {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
  85. };
  86. /* JCTSS */
  87. static unsigned long JCTSStbl[2][7] = {
  88. {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
  89. {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
  90. };
  91. /* JCENVT */
  92. static unsigned long JCENVTtbl[2][7] = {
  93. {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
  94. {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
  95. };
  96. /* JCACTSELS/JCACTSELM */
  97. static unsigned long JCACTSELtbl[2][7] = {
  98. {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
  99. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
  100. };
  101. static u8 scc_ide_inb(unsigned long port)
  102. {
  103. u32 data = in_be32((void*)port);
  104. return (u8)data;
  105. }
  106. static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
  107. {
  108. out_be32((void *)hwif->io_ports.command_addr, cmd);
  109. eieio();
  110. in_be32((void *)(hwif->dma_base + 0x01c));
  111. eieio();
  112. }
  113. static u8 scc_read_status(ide_hwif_t *hwif)
  114. {
  115. return (u8)in_be32((void *)hwif->io_ports.status_addr);
  116. }
  117. static u8 scc_read_altstatus(ide_hwif_t *hwif)
  118. {
  119. return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
  120. }
  121. static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
  122. {
  123. return (u8)in_be32((void *)(hwif->dma_base + 4));
  124. }
  125. static void scc_set_irq(ide_hwif_t *hwif, int on)
  126. {
  127. u8 ctl = ATA_DEVCTL_OBS;
  128. if (on == 4) { /* hack for SRST */
  129. ctl |= 4;
  130. on &= ~4;
  131. }
  132. ctl |= on ? 0 : 2;
  133. out_be32((void *)hwif->io_ports.ctl_addr, ctl);
  134. eieio();
  135. in_be32((void *)(hwif->dma_base + 0x01c));
  136. eieio();
  137. }
  138. static void scc_ide_insw(unsigned long port, void *addr, u32 count)
  139. {
  140. u16 *ptr = (u16 *)addr;
  141. while (count--) {
  142. *ptr++ = le16_to_cpu(in_be32((void*)port));
  143. }
  144. }
  145. static void scc_ide_insl(unsigned long port, void *addr, u32 count)
  146. {
  147. u16 *ptr = (u16 *)addr;
  148. while (count--) {
  149. *ptr++ = le16_to_cpu(in_be32((void*)port));
  150. *ptr++ = le16_to_cpu(in_be32((void*)port));
  151. }
  152. }
  153. static void scc_ide_outb(u8 addr, unsigned long port)
  154. {
  155. out_be32((void*)port, addr);
  156. }
  157. static void
  158. scc_ide_outsw(unsigned long port, void *addr, u32 count)
  159. {
  160. u16 *ptr = (u16 *)addr;
  161. while (count--) {
  162. out_be32((void*)port, cpu_to_le16(*ptr++));
  163. }
  164. }
  165. static void
  166. scc_ide_outsl(unsigned long port, void *addr, u32 count)
  167. {
  168. u16 *ptr = (u16 *)addr;
  169. while (count--) {
  170. out_be32((void*)port, cpu_to_le16(*ptr++));
  171. out_be32((void*)port, cpu_to_le16(*ptr++));
  172. }
  173. }
  174. /**
  175. * scc_set_pio_mode - set host controller for PIO mode
  176. * @drive: drive
  177. * @pio: PIO mode number
  178. *
  179. * Load the timing settings for this device mode into the
  180. * controller.
  181. */
  182. static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
  183. {
  184. ide_hwif_t *hwif = HWIF(drive);
  185. struct scc_ports *ports = ide_get_hwifdata(hwif);
  186. unsigned long ctl_base = ports->ctl;
  187. unsigned long cckctrl_port = ctl_base + 0xff0;
  188. unsigned long piosht_port = ctl_base + 0x000;
  189. unsigned long pioct_port = ctl_base + 0x004;
  190. unsigned long reg;
  191. int offset;
  192. reg = in_be32((void __iomem *)cckctrl_port);
  193. if (reg & CCKCTRL_ATACLKOEN) {
  194. offset = 1; /* 133MHz */
  195. } else {
  196. offset = 0; /* 100MHz */
  197. }
  198. reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
  199. out_be32((void __iomem *)piosht_port, reg);
  200. reg = JCHCTtbl[offset][pio];
  201. out_be32((void __iomem *)pioct_port, reg);
  202. }
  203. /**
  204. * scc_set_dma_mode - set host controller for DMA mode
  205. * @drive: drive
  206. * @speed: DMA mode
  207. *
  208. * Load the timing settings for this device mode into the
  209. * controller.
  210. */
  211. static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
  212. {
  213. ide_hwif_t *hwif = HWIF(drive);
  214. struct scc_ports *ports = ide_get_hwifdata(hwif);
  215. unsigned long ctl_base = ports->ctl;
  216. unsigned long cckctrl_port = ctl_base + 0xff0;
  217. unsigned long mdmact_port = ctl_base + 0x008;
  218. unsigned long mcrcst_port = ctl_base + 0x00c;
  219. unsigned long sdmact_port = ctl_base + 0x010;
  220. unsigned long scrcst_port = ctl_base + 0x014;
  221. unsigned long udenvt_port = ctl_base + 0x018;
  222. unsigned long tdvhsel_port = ctl_base + 0x020;
  223. int is_slave = (&hwif->drives[1] == drive);
  224. int offset, idx;
  225. unsigned long reg;
  226. unsigned long jcactsel;
  227. reg = in_be32((void __iomem *)cckctrl_port);
  228. if (reg & CCKCTRL_ATACLKOEN) {
  229. offset = 1; /* 133MHz */
  230. } else {
  231. offset = 0; /* 100MHz */
  232. }
  233. idx = speed - XFER_UDMA_0;
  234. jcactsel = JCACTSELtbl[offset][idx];
  235. if (is_slave) {
  236. out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
  237. out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
  238. jcactsel = jcactsel << 2;
  239. out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
  240. } else {
  241. out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
  242. out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
  243. out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
  244. }
  245. reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
  246. out_be32((void __iomem *)udenvt_port, reg);
  247. }
  248. static void scc_dma_host_set(ide_drive_t *drive, int on)
  249. {
  250. ide_hwif_t *hwif = drive->hwif;
  251. u8 unit = (drive->select.b.unit & 0x01);
  252. u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
  253. if (on)
  254. dma_stat |= (1 << (5 + unit));
  255. else
  256. dma_stat &= ~(1 << (5 + unit));
  257. scc_ide_outb(dma_stat, hwif->dma_base + 4);
  258. }
  259. /**
  260. * scc_ide_dma_setup - begin a DMA phase
  261. * @drive: target device
  262. *
  263. * Build an IDE DMA PRD (IDE speak for scatter gather table)
  264. * and then set up the DMA transfer registers.
  265. *
  266. * Returns 0 on success. If a PIO fallback is required then 1
  267. * is returned.
  268. */
  269. static int scc_dma_setup(ide_drive_t *drive)
  270. {
  271. ide_hwif_t *hwif = drive->hwif;
  272. struct request *rq = HWGROUP(drive)->rq;
  273. unsigned int reading;
  274. u8 dma_stat;
  275. if (rq_data_dir(rq))
  276. reading = 0;
  277. else
  278. reading = 1 << 3;
  279. /* fall back to pio! */
  280. if (!ide_build_dmatable(drive, rq)) {
  281. ide_map_sg(drive, rq);
  282. return 1;
  283. }
  284. /* PRD table */
  285. out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
  286. /* specify r/w */
  287. out_be32((void __iomem *)hwif->dma_base, reading);
  288. /* read DMA status for INTR & ERROR flags */
  289. dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
  290. /* clear INTR & ERROR flags */
  291. out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
  292. drive->waiting_for_dma = 1;
  293. return 0;
  294. }
  295. static void scc_dma_start(ide_drive_t *drive)
  296. {
  297. ide_hwif_t *hwif = drive->hwif;
  298. u8 dma_cmd = scc_ide_inb(hwif->dma_base);
  299. /* start DMA */
  300. scc_ide_outb(dma_cmd | 1, hwif->dma_base);
  301. hwif->dma = 1;
  302. wmb();
  303. }
  304. static int __scc_dma_end(ide_drive_t *drive)
  305. {
  306. ide_hwif_t *hwif = drive->hwif;
  307. u8 dma_stat, dma_cmd;
  308. drive->waiting_for_dma = 0;
  309. /* get DMA command mode */
  310. dma_cmd = scc_ide_inb(hwif->dma_base);
  311. /* stop DMA */
  312. scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
  313. /* get DMA status */
  314. dma_stat = scc_ide_inb(hwif->dma_base + 4);
  315. /* clear the INTR & ERROR bits */
  316. scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
  317. /* purge DMA mappings */
  318. ide_destroy_dmatable(drive);
  319. /* verify good DMA status */
  320. hwif->dma = 0;
  321. wmb();
  322. return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
  323. }
  324. /**
  325. * scc_dma_end - Stop DMA
  326. * @drive: IDE drive
  327. *
  328. * Check and clear INT Status register.
  329. * Then call __scc_dma_end().
  330. */
  331. static int scc_dma_end(ide_drive_t *drive)
  332. {
  333. ide_hwif_t *hwif = HWIF(drive);
  334. void __iomem *dma_base = (void __iomem *)hwif->dma_base;
  335. unsigned long intsts_port = hwif->dma_base + 0x014;
  336. u32 reg;
  337. int dma_stat, data_loss = 0;
  338. static int retry = 0;
  339. /* errata A308 workaround: Step5 (check data loss) */
  340. /* We don't check non ide_disk because it is limited to UDMA4 */
  341. if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
  342. & ERR_STAT) &&
  343. drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
  344. reg = in_be32((void __iomem *)intsts_port);
  345. if (!(reg & INTSTS_ACTEINT)) {
  346. printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
  347. drive->name);
  348. data_loss = 1;
  349. if (retry++) {
  350. struct request *rq = HWGROUP(drive)->rq;
  351. int unit;
  352. /* ERROR_RESET and drive->crc_count are needed
  353. * to reduce DMA transfer mode in retry process.
  354. */
  355. if (rq)
  356. rq->errors |= ERROR_RESET;
  357. for (unit = 0; unit < MAX_DRIVES; unit++) {
  358. ide_drive_t *drive = &hwif->drives[unit];
  359. drive->crc_count++;
  360. }
  361. }
  362. }
  363. }
  364. while (1) {
  365. reg = in_be32((void __iomem *)intsts_port);
  366. if (reg & INTSTS_SERROR) {
  367. printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
  368. out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
  369. out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
  370. continue;
  371. }
  372. if (reg & INTSTS_PRERR) {
  373. u32 maea0, maec0;
  374. unsigned long ctl_base = hwif->config_data;
  375. maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
  376. maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
  377. printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
  378. out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
  379. out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
  380. continue;
  381. }
  382. if (reg & INTSTS_RERR) {
  383. printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
  384. out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
  385. out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
  386. continue;
  387. }
  388. if (reg & INTSTS_ICERR) {
  389. out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
  390. printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
  391. out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
  392. continue;
  393. }
  394. if (reg & INTSTS_BMSINT) {
  395. printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
  396. out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
  397. ide_do_reset(drive);
  398. continue;
  399. }
  400. if (reg & INTSTS_BMHE) {
  401. out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
  402. continue;
  403. }
  404. if (reg & INTSTS_ACTEINT) {
  405. out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
  406. continue;
  407. }
  408. if (reg & INTSTS_IOIRQS) {
  409. out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
  410. continue;
  411. }
  412. break;
  413. }
  414. dma_stat = __scc_dma_end(drive);
  415. if (data_loss)
  416. dma_stat |= 2; /* emulate DMA error (to retry command) */
  417. return dma_stat;
  418. }
  419. /* returns 1 if dma irq issued, 0 otherwise */
  420. static int scc_dma_test_irq(ide_drive_t *drive)
  421. {
  422. ide_hwif_t *hwif = HWIF(drive);
  423. u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
  424. /* SCC errata A252,A308 workaround: Step4 */
  425. if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
  426. & ERR_STAT) &&
  427. (int_stat & INTSTS_INTRQ))
  428. return 1;
  429. /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
  430. if (int_stat & INTSTS_IOIRQS)
  431. return 1;
  432. if (!drive->waiting_for_dma)
  433. printk(KERN_WARNING "%s: (%s) called while not waiting\n",
  434. drive->name, __func__);
  435. return 0;
  436. }
  437. static u8 scc_udma_filter(ide_drive_t *drive)
  438. {
  439. ide_hwif_t *hwif = drive->hwif;
  440. u8 mask = hwif->ultra_mask;
  441. /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
  442. if ((drive->media != ide_disk) && (mask & 0xE0)) {
  443. printk(KERN_INFO "%s: limit %s to UDMA4\n",
  444. SCC_PATA_NAME, drive->name);
  445. mask = ATA_UDMA4;
  446. }
  447. return mask;
  448. }
  449. /**
  450. * setup_mmio_scc - map CTRL/BMID region
  451. * @dev: PCI device we are configuring
  452. * @name: device name
  453. *
  454. */
  455. static int setup_mmio_scc (struct pci_dev *dev, const char *name)
  456. {
  457. unsigned long ctl_base = pci_resource_start(dev, 0);
  458. unsigned long dma_base = pci_resource_start(dev, 1);
  459. unsigned long ctl_size = pci_resource_len(dev, 0);
  460. unsigned long dma_size = pci_resource_len(dev, 1);
  461. void __iomem *ctl_addr;
  462. void __iomem *dma_addr;
  463. int i, ret;
  464. for (i = 0; i < MAX_HWIFS; i++) {
  465. if (scc_ports[i].ctl == 0)
  466. break;
  467. }
  468. if (i >= MAX_HWIFS)
  469. return -ENOMEM;
  470. ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
  471. if (ret < 0) {
  472. printk(KERN_ERR "%s: can't reserve resources\n", name);
  473. return ret;
  474. }
  475. if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
  476. goto fail_0;
  477. if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
  478. goto fail_1;
  479. pci_set_master(dev);
  480. scc_ports[i].ctl = (unsigned long)ctl_addr;
  481. scc_ports[i].dma = (unsigned long)dma_addr;
  482. pci_set_drvdata(dev, (void *) &scc_ports[i]);
  483. return 1;
  484. fail_1:
  485. iounmap(ctl_addr);
  486. fail_0:
  487. return -ENOMEM;
  488. }
  489. static int scc_ide_setup_pci_device(struct pci_dev *dev,
  490. const struct ide_port_info *d)
  491. {
  492. struct scc_ports *ports = pci_get_drvdata(dev);
  493. ide_hwif_t *hwif = NULL;
  494. hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
  495. u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
  496. int i;
  497. hwif = ide_find_port_slot(d);
  498. if (hwif == NULL)
  499. return -ENOMEM;
  500. memset(&hw, 0, sizeof(hw));
  501. for (i = 0; i <= 8; i++)
  502. hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
  503. hw.irq = dev->irq;
  504. hw.dev = &dev->dev;
  505. hw.chipset = ide_pci;
  506. idx[0] = hwif->index;
  507. ide_device_add(idx, d, hws);
  508. return 0;
  509. }
  510. /**
  511. * init_setup_scc - set up an SCC PATA Controller
  512. * @dev: PCI device
  513. * @d: IDE port info
  514. *
  515. * Perform the initial set up for this device.
  516. */
  517. static int __devinit init_setup_scc(struct pci_dev *dev,
  518. const struct ide_port_info *d)
  519. {
  520. unsigned long ctl_base;
  521. unsigned long dma_base;
  522. unsigned long cckctrl_port;
  523. unsigned long intmask_port;
  524. unsigned long mode_port;
  525. unsigned long ecmode_port;
  526. unsigned long dma_status_port;
  527. u32 reg = 0;
  528. struct scc_ports *ports;
  529. int rc;
  530. rc = pci_enable_device(dev);
  531. if (rc)
  532. goto end;
  533. rc = setup_mmio_scc(dev, d->name);
  534. if (rc < 0)
  535. goto end;
  536. ports = pci_get_drvdata(dev);
  537. ctl_base = ports->ctl;
  538. dma_base = ports->dma;
  539. cckctrl_port = ctl_base + 0xff0;
  540. intmask_port = dma_base + 0x010;
  541. mode_port = ctl_base + 0x024;
  542. ecmode_port = ctl_base + 0xf00;
  543. dma_status_port = dma_base + 0x004;
  544. /* controller initialization */
  545. reg = 0;
  546. out_be32((void*)cckctrl_port, reg);
  547. reg |= CCKCTRL_ATACLKOEN;
  548. out_be32((void*)cckctrl_port, reg);
  549. reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
  550. out_be32((void*)cckctrl_port, reg);
  551. reg |= CCKCTRL_CRST;
  552. out_be32((void*)cckctrl_port, reg);
  553. for (;;) {
  554. reg = in_be32((void*)cckctrl_port);
  555. if (reg & CCKCTRL_CRST)
  556. break;
  557. udelay(5000);
  558. }
  559. reg |= CCKCTRL_ATARESET;
  560. out_be32((void*)cckctrl_port, reg);
  561. out_be32((void*)ecmode_port, ECMODE_VALUE);
  562. out_be32((void*)mode_port, MODE_JCUSFEN);
  563. out_be32((void*)intmask_port, INTMASK_MSK);
  564. rc = scc_ide_setup_pci_device(dev, d);
  565. end:
  566. return rc;
  567. }
  568. static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
  569. {
  570. struct ide_io_ports *io_ports = &drive->hwif->io_ports;
  571. struct ide_taskfile *tf = &task->tf;
  572. u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
  573. if (task->tf_flags & IDE_TFLAG_FLAGGED)
  574. HIHI = 0xFF;
  575. if (task->tf_flags & IDE_TFLAG_OUT_DATA)
  576. out_be32((void *)io_ports->data_addr,
  577. (tf->hob_data << 8) | tf->data);
  578. if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
  579. scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
  580. if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
  581. scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
  582. if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
  583. scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
  584. if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
  585. scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
  586. if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
  587. scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
  588. if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
  589. scc_ide_outb(tf->feature, io_ports->feature_addr);
  590. if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
  591. scc_ide_outb(tf->nsect, io_ports->nsect_addr);
  592. if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
  593. scc_ide_outb(tf->lbal, io_ports->lbal_addr);
  594. if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
  595. scc_ide_outb(tf->lbam, io_ports->lbam_addr);
  596. if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
  597. scc_ide_outb(tf->lbah, io_ports->lbah_addr);
  598. if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
  599. scc_ide_outb((tf->device & HIHI) | drive->select.all,
  600. io_ports->device_addr);
  601. }
  602. static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
  603. {
  604. struct ide_io_ports *io_ports = &drive->hwif->io_ports;
  605. struct ide_taskfile *tf = &task->tf;
  606. if (task->tf_flags & IDE_TFLAG_IN_DATA) {
  607. u16 data = (u16)in_be32((void *)io_ports->data_addr);
  608. tf->data = data & 0xff;
  609. tf->hob_data = (data >> 8) & 0xff;
  610. }
  611. /* be sure we're looking at the low order bits */
  612. scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
  613. if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
  614. tf->feature = scc_ide_inb(io_ports->feature_addr);
  615. if (task->tf_flags & IDE_TFLAG_IN_NSECT)
  616. tf->nsect = scc_ide_inb(io_ports->nsect_addr);
  617. if (task->tf_flags & IDE_TFLAG_IN_LBAL)
  618. tf->lbal = scc_ide_inb(io_ports->lbal_addr);
  619. if (task->tf_flags & IDE_TFLAG_IN_LBAM)
  620. tf->lbam = scc_ide_inb(io_ports->lbam_addr);
  621. if (task->tf_flags & IDE_TFLAG_IN_LBAH)
  622. tf->lbah = scc_ide_inb(io_ports->lbah_addr);
  623. if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
  624. tf->device = scc_ide_inb(io_ports->device_addr);
  625. if (task->tf_flags & IDE_TFLAG_LBA48) {
  626. scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
  627. if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
  628. tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
  629. if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
  630. tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
  631. if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
  632. tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
  633. if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
  634. tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
  635. if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
  636. tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
  637. }
  638. }
  639. static void scc_input_data(ide_drive_t *drive, struct request *rq,
  640. void *buf, unsigned int len)
  641. {
  642. unsigned long data_addr = drive->hwif->io_ports.data_addr;
  643. len++;
  644. if (drive->io_32bit) {
  645. scc_ide_insl(data_addr, buf, len / 4);
  646. if ((len & 3) >= 2)
  647. scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
  648. } else
  649. scc_ide_insw(data_addr, buf, len / 2);
  650. }
  651. static void scc_output_data(ide_drive_t *drive, struct request *rq,
  652. void *buf, unsigned int len)
  653. {
  654. unsigned long data_addr = drive->hwif->io_ports.data_addr;
  655. len++;
  656. if (drive->io_32bit) {
  657. scc_ide_outsl(data_addr, buf, len / 4);
  658. if ((len & 3) >= 2)
  659. scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
  660. } else
  661. scc_ide_outsw(data_addr, buf, len / 2);
  662. }
  663. /**
  664. * init_mmio_iops_scc - set up the iops for MMIO
  665. * @hwif: interface to set up
  666. *
  667. */
  668. static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
  669. {
  670. struct pci_dev *dev = to_pci_dev(hwif->dev);
  671. struct scc_ports *ports = pci_get_drvdata(dev);
  672. unsigned long dma_base = ports->dma;
  673. ide_set_hwifdata(hwif, ports);
  674. hwif->dma_base = dma_base;
  675. hwif->config_data = ports->ctl;
  676. }
  677. /**
  678. * init_iops_scc - set up iops
  679. * @hwif: interface to set up
  680. *
  681. * Do the basic setup for the SCC hardware interface
  682. * and then do the MMIO setup.
  683. */
  684. static void __devinit init_iops_scc(ide_hwif_t *hwif)
  685. {
  686. struct pci_dev *dev = to_pci_dev(hwif->dev);
  687. hwif->hwif_data = NULL;
  688. if (pci_get_drvdata(dev) == NULL)
  689. return;
  690. init_mmio_iops_scc(hwif);
  691. }
  692. static u8 __devinit scc_cable_detect(ide_hwif_t *hwif)
  693. {
  694. return ATA_CBL_PATA80;
  695. }
  696. /**
  697. * init_hwif_scc - set up hwif
  698. * @hwif: interface to set up
  699. *
  700. * We do the basic set up of the interface structure. The SCC
  701. * requires several custom handlers so we override the default
  702. * ide DMA handlers appropriately.
  703. */
  704. static void __devinit init_hwif_scc(ide_hwif_t *hwif)
  705. {
  706. struct scc_ports *ports = ide_get_hwifdata(hwif);
  707. ports->hwif = hwif;
  708. /* PTERADD */
  709. out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
  710. if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
  711. hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
  712. else
  713. hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
  714. }
  715. static const struct ide_tp_ops scc_tp_ops = {
  716. .exec_command = scc_exec_command,
  717. .read_status = scc_read_status,
  718. .read_altstatus = scc_read_altstatus,
  719. .read_sff_dma_status = scc_read_sff_dma_status,
  720. .set_irq = scc_set_irq,
  721. .tf_load = scc_tf_load,
  722. .tf_read = scc_tf_read,
  723. .input_data = scc_input_data,
  724. .output_data = scc_output_data,
  725. };
  726. static const struct ide_port_ops scc_port_ops = {
  727. .set_pio_mode = scc_set_pio_mode,
  728. .set_dma_mode = scc_set_dma_mode,
  729. .udma_filter = scc_udma_filter,
  730. .cable_detect = scc_cable_detect,
  731. };
  732. static const struct ide_dma_ops scc_dma_ops = {
  733. .dma_host_set = scc_dma_host_set,
  734. .dma_setup = scc_dma_setup,
  735. .dma_exec_cmd = ide_dma_exec_cmd,
  736. .dma_start = scc_dma_start,
  737. .dma_end = scc_dma_end,
  738. .dma_test_irq = scc_dma_test_irq,
  739. .dma_lost_irq = ide_dma_lost_irq,
  740. .dma_timeout = ide_dma_timeout,
  741. };
  742. #define DECLARE_SCC_DEV(name_str) \
  743. { \
  744. .name = name_str, \
  745. .init_iops = init_iops_scc, \
  746. .init_hwif = init_hwif_scc, \
  747. .tp_ops = &scc_tp_ops, \
  748. .port_ops = &scc_port_ops, \
  749. .dma_ops = &scc_dma_ops, \
  750. .host_flags = IDE_HFLAG_SINGLE, \
  751. .pio_mask = ATA_PIO4, \
  752. }
  753. static const struct ide_port_info scc_chipsets[] __devinitdata = {
  754. /* 0 */ DECLARE_SCC_DEV("sccIDE"),
  755. };
  756. /**
  757. * scc_init_one - pci layer discovery entry
  758. * @dev: PCI device
  759. * @id: ident table entry
  760. *
  761. * Called by the PCI code when it finds an SCC PATA controller.
  762. * We then use the IDE PCI generic helper to do most of the work.
  763. */
  764. static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
  765. {
  766. return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
  767. }
  768. /**
  769. * scc_remove - pci layer remove entry
  770. * @dev: PCI device
  771. *
  772. * Called by the PCI code when it removes an SCC PATA controller.
  773. */
  774. static void __devexit scc_remove(struct pci_dev *dev)
  775. {
  776. struct scc_ports *ports = pci_get_drvdata(dev);
  777. ide_hwif_t *hwif = ports->hwif;
  778. if (hwif->dmatable_cpu) {
  779. pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
  780. hwif->dmatable_cpu, hwif->dmatable_dma);
  781. hwif->dmatable_cpu = NULL;
  782. }
  783. ide_unregister(hwif);
  784. iounmap((void*)ports->dma);
  785. iounmap((void*)ports->ctl);
  786. pci_release_selected_regions(dev, (1 << 2) - 1);
  787. memset(ports, 0, sizeof(*ports));
  788. }
  789. static const struct pci_device_id scc_pci_tbl[] = {
  790. { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
  791. { 0, },
  792. };
  793. MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
  794. static struct pci_driver driver = {
  795. .name = "SCC IDE",
  796. .id_table = scc_pci_tbl,
  797. .probe = scc_init_one,
  798. .remove = scc_remove,
  799. };
  800. static int scc_ide_init(void)
  801. {
  802. return ide_pci_register_driver(&driver);
  803. }
  804. module_init(scc_ide_init);
  805. /* -- No exit code?
  806. static void scc_ide_exit(void)
  807. {
  808. ide_pci_unregister_driver(&driver);
  809. }
  810. module_exit(scc_ide_exit);
  811. */
  812. MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
  813. MODULE_LICENSE("GPL");