sata_mv.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2005: EMC Corporation, all rights reserved.
  5. *
  6. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; version 2 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/pci.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/delay.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/sched.h>
  30. #include <linux/dma-mapping.h>
  31. #include "scsi.h"
  32. #include <scsi/scsi_host.h>
  33. #include <linux/libata.h>
  34. #include <asm/io.h>
  35. #define DRV_NAME "sata_mv"
  36. #define DRV_VERSION "0.12"
  37. enum {
  38. /* BAR's are enumerated in terms of pci_resource_start() terms */
  39. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  40. MV_IO_BAR = 2, /* offset 0x18: IO space */
  41. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  42. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  43. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  44. MV_PCI_REG_BASE = 0,
  45. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  46. MV_SATAHC0_REG_BASE = 0x20000,
  47. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  48. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  49. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  50. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  51. MV_Q_CT = 32,
  52. MV_CRQB_SZ = 32,
  53. MV_CRPB_SZ = 8,
  54. MV_DMA_BOUNDARY = 0xffffffffU,
  55. SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)),
  56. MV_PORTS_PER_HC = 4,
  57. /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
  58. MV_PORT_HC_SHIFT = 2,
  59. /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */
  60. MV_PORT_MASK = 3,
  61. /* Host Flags */
  62. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  63. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  64. MV_FLAG_BDMA = (1 << 28), /* Basic DMA */
  65. chip_504x = 0,
  66. chip_508x = 1,
  67. chip_604x = 2,
  68. chip_608x = 3,
  69. /* PCI interface registers */
  70. PCI_MAIN_CMD_STS_OFS = 0xd30,
  71. STOP_PCI_MASTER = (1 << 2),
  72. PCI_MASTER_EMPTY = (1 << 3),
  73. GLOB_SFT_RST = (1 << 4),
  74. PCI_IRQ_CAUSE_OFS = 0x1d58,
  75. PCI_IRQ_MASK_OFS = 0x1d5c,
  76. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  77. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  78. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  79. PORT0_ERR = (1 << 0), /* shift by port # */
  80. PORT0_DONE = (1 << 1), /* shift by port # */
  81. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  82. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  83. PCI_ERR = (1 << 18),
  84. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  85. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  86. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  87. GPIO_INT = (1 << 22),
  88. SELF_INT = (1 << 23),
  89. TWSI_INT = (1 << 24),
  90. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  91. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  92. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  93. HC_MAIN_RSVD),
  94. /* SATAHC registers */
  95. HC_CFG_OFS = 0,
  96. HC_IRQ_CAUSE_OFS = 0x14,
  97. CRBP_DMA_DONE = (1 << 0), /* shift by port # */
  98. HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
  99. DEV_IRQ = (1 << 8), /* shift by port # */
  100. /* Shadow block registers */
  101. SHD_PIO_DATA_OFS = 0x100,
  102. SHD_FEA_ERR_OFS = 0x104,
  103. SHD_SECT_CNT_OFS = 0x108,
  104. SHD_LBA_L_OFS = 0x10C,
  105. SHD_LBA_M_OFS = 0x110,
  106. SHD_LBA_H_OFS = 0x114,
  107. SHD_DEV_HD_OFS = 0x118,
  108. SHD_CMD_STA_OFS = 0x11C,
  109. SHD_CTL_AST_OFS = 0x120,
  110. /* SATA registers */
  111. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  112. SATA_ACTIVE_OFS = 0x350,
  113. /* Port registers */
  114. EDMA_CFG_OFS = 0,
  115. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  116. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  117. EDMA_ERR_D_PAR = (1 << 0),
  118. EDMA_ERR_PRD_PAR = (1 << 1),
  119. EDMA_ERR_DEV = (1 << 2),
  120. EDMA_ERR_DEV_DCON = (1 << 3),
  121. EDMA_ERR_DEV_CON = (1 << 4),
  122. EDMA_ERR_SERR = (1 << 5),
  123. EDMA_ERR_SELF_DIS = (1 << 7),
  124. EDMA_ERR_BIST_ASYNC = (1 << 8),
  125. EDMA_ERR_CRBQ_PAR = (1 << 9),
  126. EDMA_ERR_CRPB_PAR = (1 << 10),
  127. EDMA_ERR_INTRL_PAR = (1 << 11),
  128. EDMA_ERR_IORDY = (1 << 12),
  129. EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
  130. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
  131. EDMA_ERR_LNK_DATA_RX = (0xf << 17),
  132. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
  133. EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
  134. EDMA_ERR_TRANS_PROTO = (1 << 31),
  135. EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  136. EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
  137. EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
  138. EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
  139. EDMA_ERR_LNK_DATA_RX |
  140. EDMA_ERR_LNK_DATA_TX |
  141. EDMA_ERR_TRANS_PROTO),
  142. EDMA_CMD_OFS = 0x28,
  143. EDMA_EN = (1 << 0),
  144. EDMA_DS = (1 << 1),
  145. ATA_RST = (1 << 2),
  146. /* BDMA is 6xxx part only */
  147. BDMA_CMD_OFS = 0x224,
  148. BDMA_START = (1 << 0),
  149. MV_UNDEF = 0,
  150. };
  151. struct mv_port_priv {
  152. };
  153. struct mv_host_priv {
  154. };
  155. static void mv_irq_clear(struct ata_port *ap);
  156. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  157. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  158. static void mv_phy_reset(struct ata_port *ap);
  159. static int mv_master_reset(void __iomem *mmio_base);
  160. static irqreturn_t mv_interrupt(int irq, void *dev_instance,
  161. struct pt_regs *regs);
  162. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  163. static Scsi_Host_Template mv_sht = {
  164. .module = THIS_MODULE,
  165. .name = DRV_NAME,
  166. .ioctl = ata_scsi_ioctl,
  167. .queuecommand = ata_scsi_queuecmd,
  168. .eh_strategy_handler = ata_scsi_error,
  169. .can_queue = ATA_DEF_QUEUE,
  170. .this_id = ATA_SHT_THIS_ID,
  171. .sg_tablesize = MV_UNDEF,
  172. .max_sectors = ATA_MAX_SECTORS,
  173. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  174. .emulated = ATA_SHT_EMULATED,
  175. .use_clustering = MV_UNDEF,
  176. .proc_name = DRV_NAME,
  177. .dma_boundary = MV_DMA_BOUNDARY,
  178. .slave_configure = ata_scsi_slave_config,
  179. .bios_param = ata_std_bios_param,
  180. .ordered_flush = 1,
  181. };
  182. static struct ata_port_operations mv_ops = {
  183. .port_disable = ata_port_disable,
  184. .tf_load = ata_tf_load,
  185. .tf_read = ata_tf_read,
  186. .check_status = ata_check_status,
  187. .exec_command = ata_exec_command,
  188. .dev_select = ata_std_dev_select,
  189. .phy_reset = mv_phy_reset,
  190. .qc_prep = ata_qc_prep,
  191. .qc_issue = ata_qc_issue_prot,
  192. .eng_timeout = ata_eng_timeout,
  193. .irq_handler = mv_interrupt,
  194. .irq_clear = mv_irq_clear,
  195. .scr_read = mv_scr_read,
  196. .scr_write = mv_scr_write,
  197. .port_start = ata_port_start,
  198. .port_stop = ata_port_stop,
  199. .host_stop = ata_host_stop,
  200. };
  201. static struct ata_port_info mv_port_info[] = {
  202. { /* chip_504x */
  203. .sht = &mv_sht,
  204. .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  205. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
  206. .pio_mask = 0x1f, /* pio4-0 */
  207. .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
  208. .port_ops = &mv_ops,
  209. },
  210. { /* chip_508x */
  211. .sht = &mv_sht,
  212. .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  213. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  214. MV_FLAG_DUAL_HC),
  215. .pio_mask = 0x1f, /* pio4-0 */
  216. .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
  217. .port_ops = &mv_ops,
  218. },
  219. { /* chip_604x */
  220. .sht = &mv_sht,
  221. .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  222. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  223. MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA),
  224. .pio_mask = 0x1f, /* pio4-0 */
  225. .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
  226. .port_ops = &mv_ops,
  227. },
  228. { /* chip_608x */
  229. .sht = &mv_sht,
  230. .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  231. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  232. MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC |
  233. MV_FLAG_BDMA),
  234. .pio_mask = 0x1f, /* pio4-0 */
  235. .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
  236. .port_ops = &mv_ops,
  237. },
  238. };
  239. static struct pci_device_id mv_pci_tbl[] = {
  240. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
  241. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
  242. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x},
  243. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
  244. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
  245. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
  246. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
  247. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
  248. {} /* terminate list */
  249. };
  250. static struct pci_driver mv_pci_driver = {
  251. .name = DRV_NAME,
  252. .id_table = mv_pci_tbl,
  253. .probe = mv_init_one,
  254. .remove = ata_pci_remove_one,
  255. };
  256. /*
  257. * Functions
  258. */
  259. static inline void writelfl(unsigned long data, void __iomem *addr)
  260. {
  261. writel(data, addr);
  262. (void) readl(addr); /* flush to avoid PCI posted write */
  263. }
  264. static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
  265. {
  266. return ((void __iomem *)((unsigned long)port_mmio &
  267. (unsigned long)SATAHC_MASK));
  268. }
  269. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  270. {
  271. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  272. }
  273. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  274. {
  275. return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
  276. MV_SATAHC_ARBTR_REG_SZ +
  277. ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
  278. }
  279. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  280. {
  281. return mv_port_base(ap->host_set->mmio_base, ap->port_no);
  282. }
  283. static inline int mv_get_hc_count(unsigned long flags)
  284. {
  285. return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  286. }
  287. static inline int mv_is_edma_active(struct ata_port *ap)
  288. {
  289. void __iomem *port_mmio = mv_ap_base(ap);
  290. return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
  291. }
  292. static inline int mv_port_bdma_capable(struct ata_port *ap)
  293. {
  294. return (ap->flags & MV_FLAG_BDMA);
  295. }
  296. static void mv_irq_clear(struct ata_port *ap)
  297. {
  298. }
  299. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  300. {
  301. unsigned int ofs;
  302. switch (sc_reg_in) {
  303. case SCR_STATUS:
  304. case SCR_CONTROL:
  305. case SCR_ERROR:
  306. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  307. break;
  308. case SCR_ACTIVE:
  309. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  310. break;
  311. default:
  312. ofs = 0xffffffffU;
  313. break;
  314. }
  315. return ofs;
  316. }
  317. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  318. {
  319. unsigned int ofs = mv_scr_offset(sc_reg_in);
  320. if (0xffffffffU != ofs) {
  321. return readl(mv_ap_base(ap) + ofs);
  322. } else {
  323. return (u32) ofs;
  324. }
  325. }
  326. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  327. {
  328. unsigned int ofs = mv_scr_offset(sc_reg_in);
  329. if (0xffffffffU != ofs) {
  330. writelfl(val, mv_ap_base(ap) + ofs);
  331. }
  332. }
  333. static int mv_master_reset(void __iomem *mmio_base)
  334. {
  335. void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
  336. int i, rc = 0;
  337. u32 t;
  338. VPRINTK("ENTER\n");
  339. /* Following procedure defined in PCI "main command and status
  340. * register" table.
  341. */
  342. t = readl(reg);
  343. writel(t | STOP_PCI_MASTER, reg);
  344. for (i = 0; i < 100; i++) {
  345. msleep(10);
  346. t = readl(reg);
  347. if (PCI_MASTER_EMPTY & t) {
  348. break;
  349. }
  350. }
  351. if (!(PCI_MASTER_EMPTY & t)) {
  352. printk(KERN_ERR DRV_NAME "PCI master won't flush\n");
  353. rc = 1; /* broken HW? */
  354. goto done;
  355. }
  356. /* set reset */
  357. i = 5;
  358. do {
  359. writel(t | GLOB_SFT_RST, reg);
  360. t = readl(reg);
  361. udelay(1);
  362. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  363. if (!(GLOB_SFT_RST & t)) {
  364. printk(KERN_ERR DRV_NAME "can't set global reset\n");
  365. rc = 1; /* broken HW? */
  366. goto done;
  367. }
  368. /* clear reset */
  369. i = 5;
  370. do {
  371. writel(t & ~GLOB_SFT_RST, reg);
  372. t = readl(reg);
  373. udelay(1);
  374. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  375. if (GLOB_SFT_RST & t) {
  376. printk(KERN_ERR DRV_NAME "can't clear global reset\n");
  377. rc = 1; /* broken HW? */
  378. }
  379. done:
  380. VPRINTK("EXIT, rc = %i\n", rc);
  381. return rc;
  382. }
  383. static void mv_err_intr(struct ata_port *ap)
  384. {
  385. void __iomem *port_mmio;
  386. u32 edma_err_cause, serr = 0;
  387. /* bug here b/c we got an err int on a port we don't know about,
  388. * so there's no way to clear it
  389. */
  390. BUG_ON(NULL == ap);
  391. port_mmio = mv_ap_base(ap);
  392. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  393. if (EDMA_ERR_SERR & edma_err_cause) {
  394. serr = scr_read(ap, SCR_ERROR);
  395. scr_write_flush(ap, SCR_ERROR, serr);
  396. }
  397. DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n",
  398. ap->port_no, edma_err_cause, serr);
  399. /* Clear EDMA now that SERR cleanup done */
  400. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  401. /* check for fatal here and recover if needed */
  402. if (EDMA_ERR_FATAL & edma_err_cause) {
  403. mv_phy_reset(ap);
  404. }
  405. }
  406. /* Handle any outstanding interrupts in a single SATAHC
  407. */
  408. static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
  409. unsigned int hc)
  410. {
  411. void __iomem *mmio = host_set->mmio_base;
  412. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  413. struct ata_port *ap;
  414. struct ata_queued_cmd *qc;
  415. u32 hc_irq_cause;
  416. int shift, port, port0, hard_port;
  417. u8 ata_status;
  418. if (hc == 0) {
  419. port0 = 0;
  420. } else {
  421. port0 = MV_PORTS_PER_HC;
  422. }
  423. /* we'll need the HC success int register in most cases */
  424. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  425. if (hc_irq_cause) {
  426. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  427. }
  428. VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
  429. hc,relevant,hc_irq_cause);
  430. for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
  431. ap = host_set->ports[port];
  432. hard_port = port & MV_PORT_MASK; /* range 0-3 */
  433. ata_status = 0xffU;
  434. if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) {
  435. BUG_ON(NULL == ap);
  436. /* rcv'd new resp, basic DMA complete, or ATA IRQ */
  437. /* This is needed to clear the ATA INTRQ.
  438. * FIXME: don't read the status reg in EDMA mode!
  439. */
  440. ata_status = readb((void __iomem *)
  441. ap->ioaddr.status_addr);
  442. }
  443. shift = port * 2;
  444. if (port >= MV_PORTS_PER_HC) {
  445. shift++; /* skip bit 8 in the HC Main IRQ reg */
  446. }
  447. if ((PORT0_ERR << shift) & relevant) {
  448. mv_err_intr(ap);
  449. /* FIXME: smart to OR in ATA_ERR? */
  450. ata_status = readb((void __iomem *)
  451. ap->ioaddr.status_addr) | ATA_ERR;
  452. }
  453. if (ap) {
  454. qc = ata_qc_from_tag(ap, ap->active_tag);
  455. if (NULL != qc) {
  456. VPRINTK("port %u IRQ found for qc, "
  457. "ata_status 0x%x\n", port,ata_status);
  458. BUG_ON(0xffU == ata_status);
  459. /* mark qc status appropriately */
  460. ata_qc_complete(qc, ata_status);
  461. }
  462. }
  463. }
  464. VPRINTK("EXIT\n");
  465. }
  466. static irqreturn_t mv_interrupt(int irq, void *dev_instance,
  467. struct pt_regs *regs)
  468. {
  469. struct ata_host_set *host_set = dev_instance;
  470. unsigned int hc, handled = 0, n_hcs;
  471. void __iomem *mmio;
  472. u32 irq_stat;
  473. mmio = host_set->mmio_base;
  474. irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
  475. n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
  476. /* check the cases where we either have nothing pending or have read
  477. * a bogus register value which can indicate HW removal or PCI fault
  478. */
  479. if (!irq_stat || (0xffffffffU == irq_stat)) {
  480. return IRQ_NONE;
  481. }
  482. spin_lock(&host_set->lock);
  483. for (hc = 0; hc < n_hcs; hc++) {
  484. u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
  485. if (relevant) {
  486. mv_host_intr(host_set, relevant, hc);
  487. handled = 1;
  488. }
  489. }
  490. if (PCI_ERR & irq_stat) {
  491. /* FIXME: these are all masked by default, but still need
  492. * to recover from them properly.
  493. */
  494. }
  495. spin_unlock(&host_set->lock);
  496. return IRQ_RETVAL(handled);
  497. }
  498. static void mv_phy_reset(struct ata_port *ap)
  499. {
  500. void __iomem *port_mmio = mv_ap_base(ap);
  501. struct ata_taskfile tf;
  502. struct ata_device *dev = &ap->device[0];
  503. u32 edma = 0, bdma;
  504. VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
  505. edma = readl(port_mmio + EDMA_CMD_OFS);
  506. if (EDMA_EN & edma) {
  507. /* disable EDMA if active */
  508. edma &= ~EDMA_EN;
  509. writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
  510. udelay(1);
  511. } else if (mv_port_bdma_capable(ap) &&
  512. (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
  513. /* disable BDMA if active */
  514. writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
  515. }
  516. writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS);
  517. udelay(25); /* allow reset propagation */
  518. /* Spec never mentions clearing the bit. Marvell's driver does
  519. * clear the bit, however.
  520. */
  521. writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS);
  522. VPRINTK("Done. Now calling __sata_phy_reset()\n");
  523. /* proceed to init communications via the scr_control reg */
  524. __sata_phy_reset(ap);
  525. if (ap->flags & ATA_FLAG_PORT_DISABLED) {
  526. VPRINTK("Port disabled pre-sig. Exiting.\n");
  527. return;
  528. }
  529. tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
  530. tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
  531. tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
  532. tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
  533. dev->class = ata_dev_classify(&tf);
  534. if (!ata_dev_present(dev)) {
  535. VPRINTK("Port disabled post-sig: No device present.\n");
  536. ata_port_disable(ap);
  537. }
  538. VPRINTK("EXIT\n");
  539. }
  540. static void mv_port_init(struct ata_ioports *port, unsigned long base)
  541. {
  542. /* PIO related setup */
  543. port->data_addr = base + SHD_PIO_DATA_OFS;
  544. port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS;
  545. port->nsect_addr = base + SHD_SECT_CNT_OFS;
  546. port->lbal_addr = base + SHD_LBA_L_OFS;
  547. port->lbam_addr = base + SHD_LBA_M_OFS;
  548. port->lbah_addr = base + SHD_LBA_H_OFS;
  549. port->device_addr = base + SHD_DEV_HD_OFS;
  550. port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS;
  551. port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS;
  552. /* unused */
  553. port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
  554. /* unmask all EDMA error interrupts */
  555. writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS);
  556. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  557. readl((void __iomem *)base + EDMA_CFG_OFS),
  558. readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS),
  559. readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS));
  560. }
  561. static int mv_host_init(struct ata_probe_ent *probe_ent)
  562. {
  563. int rc = 0, n_hc, port, hc;
  564. void __iomem *mmio = probe_ent->mmio_base;
  565. void __iomem *port_mmio;
  566. if (mv_master_reset(probe_ent->mmio_base)) {
  567. rc = 1;
  568. goto done;
  569. }
  570. n_hc = mv_get_hc_count(probe_ent->host_flags);
  571. probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
  572. for (port = 0; port < probe_ent->n_ports; port++) {
  573. port_mmio = mv_port_base(mmio, port);
  574. mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio);
  575. }
  576. for (hc = 0; hc < n_hc; hc++) {
  577. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc,
  578. readl(mv_hc_base(mmio, hc) + HC_CFG_OFS),
  579. readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS));
  580. }
  581. writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
  582. writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
  583. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  584. "PCI int cause/mask=0x%08x/0x%08x\n",
  585. readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
  586. readl(mmio + HC_MAIN_IRQ_MASK_OFS),
  587. readl(mmio + PCI_IRQ_CAUSE_OFS),
  588. readl(mmio + PCI_IRQ_MASK_OFS));
  589. done:
  590. return rc;
  591. }
  592. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  593. {
  594. static int printed_version = 0;
  595. struct ata_probe_ent *probe_ent = NULL;
  596. struct mv_host_priv *hpriv;
  597. unsigned int board_idx = (unsigned int)ent->driver_data;
  598. void __iomem *mmio_base;
  599. int pci_dev_busy = 0;
  600. int rc;
  601. if (!printed_version++) {
  602. printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
  603. }
  604. VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
  605. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  606. rc = pci_enable_device(pdev);
  607. if (rc) {
  608. return rc;
  609. }
  610. rc = pci_request_regions(pdev, DRV_NAME);
  611. if (rc) {
  612. pci_dev_busy = 1;
  613. goto err_out;
  614. }
  615. pci_intx(pdev, 1);
  616. probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
  617. if (probe_ent == NULL) {
  618. rc = -ENOMEM;
  619. goto err_out_regions;
  620. }
  621. memset(probe_ent, 0, sizeof(*probe_ent));
  622. probe_ent->dev = pci_dev_to_dev(pdev);
  623. INIT_LIST_HEAD(&probe_ent->node);
  624. mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR),
  625. pci_resource_len(pdev, MV_PRIMARY_BAR));
  626. if (mmio_base == NULL) {
  627. rc = -ENOMEM;
  628. goto err_out_free_ent;
  629. }
  630. hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
  631. if (!hpriv) {
  632. rc = -ENOMEM;
  633. goto err_out_iounmap;
  634. }
  635. memset(hpriv, 0, sizeof(*hpriv));
  636. probe_ent->sht = mv_port_info[board_idx].sht;
  637. probe_ent->host_flags = mv_port_info[board_idx].host_flags;
  638. probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
  639. probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
  640. probe_ent->port_ops = mv_port_info[board_idx].port_ops;
  641. probe_ent->irq = pdev->irq;
  642. probe_ent->irq_flags = SA_SHIRQ;
  643. probe_ent->mmio_base = mmio_base;
  644. probe_ent->private_data = hpriv;
  645. /* initialize adapter */
  646. rc = mv_host_init(probe_ent);
  647. if (rc) {
  648. goto err_out_hpriv;
  649. }
  650. /* mv_print_info(probe_ent); */
  651. {
  652. int b, w;
  653. u32 dw[4]; /* hold a line of 16b */
  654. VPRINTK("PCI config space:\n");
  655. for (b = 0; b < 0x40; ) {
  656. for (w = 0; w < 4; w++) {
  657. (void) pci_read_config_dword(pdev,b,&dw[w]);
  658. b += sizeof(*dw);
  659. }
  660. VPRINTK("%08x %08x %08x %08x\n",
  661. dw[0],dw[1],dw[2],dw[3]);
  662. }
  663. }
  664. /* FIXME: check ata_device_add return value */
  665. ata_device_add(probe_ent);
  666. kfree(probe_ent);
  667. return 0;
  668. err_out_hpriv:
  669. kfree(hpriv);
  670. err_out_iounmap:
  671. iounmap(mmio_base);
  672. err_out_free_ent:
  673. kfree(probe_ent);
  674. err_out_regions:
  675. pci_release_regions(pdev);
  676. err_out:
  677. if (!pci_dev_busy) {
  678. pci_disable_device(pdev);
  679. }
  680. return rc;
  681. }
  682. static int __init mv_init(void)
  683. {
  684. return pci_module_init(&mv_pci_driver);
  685. }
  686. static void __exit mv_exit(void)
  687. {
  688. pci_unregister_driver(&mv_pci_driver);
  689. }
  690. MODULE_AUTHOR("Brett Russ");
  691. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  692. MODULE_LICENSE("GPL");
  693. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  694. MODULE_VERSION(DRV_VERSION);
  695. module_init(mv_init);
  696. module_exit(mv_exit);