sata_mv.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2005: EMC Corporation, all rights reserved.
  5. *
  6. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; version 2 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/pci.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/delay.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/sched.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/device.h>
  32. #include <scsi/scsi_host.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <linux/libata.h>
  35. #include <asm/io.h>
  36. #define DRV_NAME "sata_mv"
  37. #define DRV_VERSION "0.25"
  38. enum {
  39. /* BAR's are enumerated in terms of pci_resource_start() terms */
  40. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  41. MV_IO_BAR = 2, /* offset 0x18: IO space */
  42. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  43. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  44. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  45. MV_PCI_REG_BASE = 0,
  46. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  47. MV_SATAHC0_REG_BASE = 0x20000,
  48. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  49. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  50. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  51. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  52. MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
  53. MV_MAX_Q_DEPTH = 32,
  54. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  55. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  56. * CRPB needs alignment on a 256B boundary. Size == 256B
  57. * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
  58. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  59. */
  60. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  61. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  62. MV_MAX_SG_CT = 176,
  63. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  64. MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
  65. /* Our DMA boundary is determined by an ePRD being unable to handle
  66. * anything larger than 64KB
  67. */
  68. MV_DMA_BOUNDARY = 0xffffU,
  69. MV_PORTS_PER_HC = 4,
  70. /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
  71. MV_PORT_HC_SHIFT = 2,
  72. /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
  73. MV_PORT_MASK = 3,
  74. /* Host Flags */
  75. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  76. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  77. MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
  78. MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  79. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
  80. MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
  81. MV_FLAG_GLBL_SFT_RST),
  82. chip_504x = 0,
  83. chip_508x = 1,
  84. chip_604x = 2,
  85. chip_608x = 3,
  86. CRQB_FLAG_READ = (1 << 0),
  87. CRQB_TAG_SHIFT = 1,
  88. CRQB_CMD_ADDR_SHIFT = 8,
  89. CRQB_CMD_CS = (0x2 << 11),
  90. CRQB_CMD_LAST = (1 << 15),
  91. CRPB_FLAG_STATUS_SHIFT = 8,
  92. EPRD_FLAG_END_OF_TBL = (1 << 31),
  93. /* PCI interface registers */
  94. PCI_COMMAND_OFS = 0xc00,
  95. PCI_MAIN_CMD_STS_OFS = 0xd30,
  96. STOP_PCI_MASTER = (1 << 2),
  97. PCI_MASTER_EMPTY = (1 << 3),
  98. GLOB_SFT_RST = (1 << 4),
  99. PCI_IRQ_CAUSE_OFS = 0x1d58,
  100. PCI_IRQ_MASK_OFS = 0x1d5c,
  101. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  102. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  103. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  104. PORT0_ERR = (1 << 0), /* shift by port # */
  105. PORT0_DONE = (1 << 1), /* shift by port # */
  106. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  107. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  108. PCI_ERR = (1 << 18),
  109. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  110. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  111. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  112. GPIO_INT = (1 << 22),
  113. SELF_INT = (1 << 23),
  114. TWSI_INT = (1 << 24),
  115. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  116. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  117. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  118. HC_MAIN_RSVD),
  119. /* SATAHC registers */
  120. HC_CFG_OFS = 0,
  121. HC_IRQ_CAUSE_OFS = 0x14,
  122. CRPB_DMA_DONE = (1 << 0), /* shift by port # */
  123. HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
  124. DEV_IRQ = (1 << 8), /* shift by port # */
  125. /* Shadow block registers */
  126. SHD_BLK_OFS = 0x100,
  127. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  128. /* SATA registers */
  129. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  130. SATA_ACTIVE_OFS = 0x350,
  131. /* Port registers */
  132. EDMA_CFG_OFS = 0,
  133. EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
  134. EDMA_CFG_NCQ = (1 << 5),
  135. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  136. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  137. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  138. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  139. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  140. EDMA_ERR_D_PAR = (1 << 0),
  141. EDMA_ERR_PRD_PAR = (1 << 1),
  142. EDMA_ERR_DEV = (1 << 2),
  143. EDMA_ERR_DEV_DCON = (1 << 3),
  144. EDMA_ERR_DEV_CON = (1 << 4),
  145. EDMA_ERR_SERR = (1 << 5),
  146. EDMA_ERR_SELF_DIS = (1 << 7),
  147. EDMA_ERR_BIST_ASYNC = (1 << 8),
  148. EDMA_ERR_CRBQ_PAR = (1 << 9),
  149. EDMA_ERR_CRPB_PAR = (1 << 10),
  150. EDMA_ERR_INTRL_PAR = (1 << 11),
  151. EDMA_ERR_IORDY = (1 << 12),
  152. EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
  153. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
  154. EDMA_ERR_LNK_DATA_RX = (0xf << 17),
  155. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
  156. EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
  157. EDMA_ERR_TRANS_PROTO = (1 << 31),
  158. EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  159. EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
  160. EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
  161. EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
  162. EDMA_ERR_LNK_DATA_RX |
  163. EDMA_ERR_LNK_DATA_TX |
  164. EDMA_ERR_TRANS_PROTO),
  165. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  166. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  167. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  168. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  169. EDMA_REQ_Q_PTR_SHIFT = 5,
  170. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  171. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  172. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  173. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  174. EDMA_RSP_Q_PTR_SHIFT = 3,
  175. EDMA_CMD_OFS = 0x28,
  176. EDMA_EN = (1 << 0),
  177. EDMA_DS = (1 << 1),
  178. ATA_RST = (1 << 2),
  179. /* Host private flags (hp_flags) */
  180. MV_HP_FLAG_MSI = (1 << 0),
  181. /* Port private flags (pp_flags) */
  182. MV_PP_FLAG_EDMA_EN = (1 << 0),
  183. MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
  184. };
  185. /* Command ReQuest Block: 32B */
  186. struct mv_crqb {
  187. u32 sg_addr;
  188. u32 sg_addr_hi;
  189. u16 ctrl_flags;
  190. u16 ata_cmd[11];
  191. };
  192. /* Command ResPonse Block: 8B */
  193. struct mv_crpb {
  194. u16 id;
  195. u16 flags;
  196. u32 tmstmp;
  197. };
  198. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  199. struct mv_sg {
  200. u32 addr;
  201. u32 flags_size;
  202. u32 addr_hi;
  203. u32 reserved;
  204. };
  205. struct mv_port_priv {
  206. struct mv_crqb *crqb;
  207. dma_addr_t crqb_dma;
  208. struct mv_crpb *crpb;
  209. dma_addr_t crpb_dma;
  210. struct mv_sg *sg_tbl;
  211. dma_addr_t sg_tbl_dma;
  212. unsigned req_producer; /* cp of req_in_ptr */
  213. unsigned rsp_consumer; /* cp of rsp_out_ptr */
  214. u32 pp_flags;
  215. };
  216. struct mv_host_priv {
  217. u32 hp_flags;
  218. };
  219. static void mv_irq_clear(struct ata_port *ap);
  220. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  221. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  222. static void mv_phy_reset(struct ata_port *ap);
  223. static void mv_host_stop(struct ata_host_set *host_set);
  224. static int mv_port_start(struct ata_port *ap);
  225. static void mv_port_stop(struct ata_port *ap);
  226. static void mv_qc_prep(struct ata_queued_cmd *qc);
  227. static int mv_qc_issue(struct ata_queued_cmd *qc);
  228. static irqreturn_t mv_interrupt(int irq, void *dev_instance,
  229. struct pt_regs *regs);
  230. static void mv_eng_timeout(struct ata_port *ap);
  231. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  232. static struct scsi_host_template mv_sht = {
  233. .module = THIS_MODULE,
  234. .name = DRV_NAME,
  235. .ioctl = ata_scsi_ioctl,
  236. .queuecommand = ata_scsi_queuecmd,
  237. .eh_strategy_handler = ata_scsi_error,
  238. .can_queue = MV_USE_Q_DEPTH,
  239. .this_id = ATA_SHT_THIS_ID,
  240. .sg_tablesize = MV_MAX_SG_CT,
  241. .max_sectors = ATA_MAX_SECTORS,
  242. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  243. .emulated = ATA_SHT_EMULATED,
  244. .use_clustering = ATA_SHT_USE_CLUSTERING,
  245. .proc_name = DRV_NAME,
  246. .dma_boundary = MV_DMA_BOUNDARY,
  247. .slave_configure = ata_scsi_slave_config,
  248. .bios_param = ata_std_bios_param,
  249. .ordered_flush = 1,
  250. };
  251. static const struct ata_port_operations mv_ops = {
  252. .port_disable = ata_port_disable,
  253. .tf_load = ata_tf_load,
  254. .tf_read = ata_tf_read,
  255. .check_status = ata_check_status,
  256. .exec_command = ata_exec_command,
  257. .dev_select = ata_std_dev_select,
  258. .phy_reset = mv_phy_reset,
  259. .qc_prep = mv_qc_prep,
  260. .qc_issue = mv_qc_issue,
  261. .eng_timeout = mv_eng_timeout,
  262. .irq_handler = mv_interrupt,
  263. .irq_clear = mv_irq_clear,
  264. .scr_read = mv_scr_read,
  265. .scr_write = mv_scr_write,
  266. .port_start = mv_port_start,
  267. .port_stop = mv_port_stop,
  268. .host_stop = mv_host_stop,
  269. };
  270. static struct ata_port_info mv_port_info[] = {
  271. { /* chip_504x */
  272. .sht = &mv_sht,
  273. .host_flags = MV_COMMON_FLAGS,
  274. .pio_mask = 0x1f, /* pio0-4 */
  275. .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
  276. .port_ops = &mv_ops,
  277. },
  278. { /* chip_508x */
  279. .sht = &mv_sht,
  280. .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
  281. .pio_mask = 0x1f, /* pio0-4 */
  282. .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
  283. .port_ops = &mv_ops,
  284. },
  285. { /* chip_604x */
  286. .sht = &mv_sht,
  287. .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  288. .pio_mask = 0x1f, /* pio0-4 */
  289. .udma_mask = 0x7f, /* udma0-6 */
  290. .port_ops = &mv_ops,
  291. },
  292. { /* chip_608x */
  293. .sht = &mv_sht,
  294. .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  295. MV_FLAG_DUAL_HC),
  296. .pio_mask = 0x1f, /* pio0-4 */
  297. .udma_mask = 0x7f, /* udma0-6 */
  298. .port_ops = &mv_ops,
  299. },
  300. };
  301. static const struct pci_device_id mv_pci_tbl[] = {
  302. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
  303. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
  304. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x},
  305. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
  306. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
  307. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
  308. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
  309. {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
  310. {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
  311. {} /* terminate list */
  312. };
  313. static struct pci_driver mv_pci_driver = {
  314. .name = DRV_NAME,
  315. .id_table = mv_pci_tbl,
  316. .probe = mv_init_one,
  317. .remove = ata_pci_remove_one,
  318. };
  319. /*
  320. * Functions
  321. */
  322. static inline void writelfl(unsigned long data, void __iomem *addr)
  323. {
  324. writel(data, addr);
  325. (void) readl(addr); /* flush to avoid PCI posted write */
  326. }
  327. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  328. {
  329. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  330. }
  331. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  332. {
  333. return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
  334. MV_SATAHC_ARBTR_REG_SZ +
  335. ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
  336. }
  337. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  338. {
  339. return mv_port_base(ap->host_set->mmio_base, ap->port_no);
  340. }
  341. static inline int mv_get_hc_count(unsigned long hp_flags)
  342. {
  343. return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  344. }
  345. static void mv_irq_clear(struct ata_port *ap)
  346. {
  347. }
  348. /**
  349. * mv_start_dma - Enable eDMA engine
  350. * @base: port base address
  351. * @pp: port private data
  352. *
  353. * Verify the local cache of the eDMA state is accurate with an
  354. * assert.
  355. *
  356. * LOCKING:
  357. * Inherited from caller.
  358. */
  359. static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
  360. {
  361. if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
  362. writelfl(EDMA_EN, base + EDMA_CMD_OFS);
  363. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  364. }
  365. assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
  366. }
  367. /**
  368. * mv_stop_dma - Disable eDMA engine
  369. * @ap: ATA channel to manipulate
  370. *
  371. * Verify the local cache of the eDMA state is accurate with an
  372. * assert.
  373. *
  374. * LOCKING:
  375. * Inherited from caller.
  376. */
  377. static void mv_stop_dma(struct ata_port *ap)
  378. {
  379. void __iomem *port_mmio = mv_ap_base(ap);
  380. struct mv_port_priv *pp = ap->private_data;
  381. u32 reg;
  382. int i;
  383. if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
  384. /* Disable EDMA if active. The disable bit auto clears.
  385. */
  386. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  387. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  388. } else {
  389. assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
  390. }
  391. /* now properly wait for the eDMA to stop */
  392. for (i = 1000; i > 0; i--) {
  393. reg = readl(port_mmio + EDMA_CMD_OFS);
  394. if (!(EDMA_EN & reg)) {
  395. break;
  396. }
  397. udelay(100);
  398. }
  399. if (EDMA_EN & reg) {
  400. printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
  401. /* FIXME: Consider doing a reset here to recover */
  402. }
  403. }
  404. #ifdef ATA_DEBUG
  405. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  406. {
  407. int b, w;
  408. for (b = 0; b < bytes; ) {
  409. DPRINTK("%p: ", start + b);
  410. for (w = 0; b < bytes && w < 4; w++) {
  411. printk("%08x ",readl(start + b));
  412. b += sizeof(u32);
  413. }
  414. printk("\n");
  415. }
  416. }
  417. #endif
  418. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  419. {
  420. #ifdef ATA_DEBUG
  421. int b, w;
  422. u32 dw;
  423. for (b = 0; b < bytes; ) {
  424. DPRINTK("%02x: ", b);
  425. for (w = 0; b < bytes && w < 4; w++) {
  426. (void) pci_read_config_dword(pdev,b,&dw);
  427. printk("%08x ",dw);
  428. b += sizeof(u32);
  429. }
  430. printk("\n");
  431. }
  432. #endif
  433. }
  434. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  435. struct pci_dev *pdev)
  436. {
  437. #ifdef ATA_DEBUG
  438. void __iomem *hc_base = mv_hc_base(mmio_base,
  439. port >> MV_PORT_HC_SHIFT);
  440. void __iomem *port_base;
  441. int start_port, num_ports, p, start_hc, num_hcs, hc;
  442. if (0 > port) {
  443. start_hc = start_port = 0;
  444. num_ports = 8; /* shld be benign for 4 port devs */
  445. num_hcs = 2;
  446. } else {
  447. start_hc = port >> MV_PORT_HC_SHIFT;
  448. start_port = port;
  449. num_ports = num_hcs = 1;
  450. }
  451. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  452. num_ports > 1 ? num_ports - 1 : start_port);
  453. if (NULL != pdev) {
  454. DPRINTK("PCI config space regs:\n");
  455. mv_dump_pci_cfg(pdev, 0x68);
  456. }
  457. DPRINTK("PCI regs:\n");
  458. mv_dump_mem(mmio_base+0xc00, 0x3c);
  459. mv_dump_mem(mmio_base+0xd00, 0x34);
  460. mv_dump_mem(mmio_base+0xf00, 0x4);
  461. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  462. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  463. hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
  464. DPRINTK("HC regs (HC %i):\n", hc);
  465. mv_dump_mem(hc_base, 0x1c);
  466. }
  467. for (p = start_port; p < start_port + num_ports; p++) {
  468. port_base = mv_port_base(mmio_base, p);
  469. DPRINTK("EDMA regs (port %i):\n",p);
  470. mv_dump_mem(port_base, 0x54);
  471. DPRINTK("SATA regs (port %i):\n",p);
  472. mv_dump_mem(port_base+0x300, 0x60);
  473. }
  474. #endif
  475. }
  476. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  477. {
  478. unsigned int ofs;
  479. switch (sc_reg_in) {
  480. case SCR_STATUS:
  481. case SCR_CONTROL:
  482. case SCR_ERROR:
  483. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  484. break;
  485. case SCR_ACTIVE:
  486. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  487. break;
  488. default:
  489. ofs = 0xffffffffU;
  490. break;
  491. }
  492. return ofs;
  493. }
  494. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  495. {
  496. unsigned int ofs = mv_scr_offset(sc_reg_in);
  497. if (0xffffffffU != ofs) {
  498. return readl(mv_ap_base(ap) + ofs);
  499. } else {
  500. return (u32) ofs;
  501. }
  502. }
  503. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  504. {
  505. unsigned int ofs = mv_scr_offset(sc_reg_in);
  506. if (0xffffffffU != ofs) {
  507. writelfl(val, mv_ap_base(ap) + ofs);
  508. }
  509. }
  510. /**
  511. * mv_global_soft_reset - Perform the 6xxx global soft reset
  512. * @mmio_base: base address of the HBA
  513. *
  514. * This routine only applies to 6xxx parts.
  515. *
  516. * LOCKING:
  517. * Inherited from caller.
  518. */
  519. static int mv_global_soft_reset(void __iomem *mmio_base)
  520. {
  521. void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
  522. int i, rc = 0;
  523. u32 t;
  524. /* Following procedure defined in PCI "main command and status
  525. * register" table.
  526. */
  527. t = readl(reg);
  528. writel(t | STOP_PCI_MASTER, reg);
  529. for (i = 0; i < 1000; i++) {
  530. udelay(1);
  531. t = readl(reg);
  532. if (PCI_MASTER_EMPTY & t) {
  533. break;
  534. }
  535. }
  536. if (!(PCI_MASTER_EMPTY & t)) {
  537. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  538. rc = 1;
  539. goto done;
  540. }
  541. /* set reset */
  542. i = 5;
  543. do {
  544. writel(t | GLOB_SFT_RST, reg);
  545. t = readl(reg);
  546. udelay(1);
  547. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  548. if (!(GLOB_SFT_RST & t)) {
  549. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  550. rc = 1;
  551. goto done;
  552. }
  553. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  554. i = 5;
  555. do {
  556. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  557. t = readl(reg);
  558. udelay(1);
  559. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  560. if (GLOB_SFT_RST & t) {
  561. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  562. rc = 1;
  563. }
  564. done:
  565. return rc;
  566. }
  567. /**
  568. * mv_host_stop - Host specific cleanup/stop routine.
  569. * @host_set: host data structure
  570. *
  571. * Disable ints, cleanup host memory, call general purpose
  572. * host_stop.
  573. *
  574. * LOCKING:
  575. * Inherited from caller.
  576. */
  577. static void mv_host_stop(struct ata_host_set *host_set)
  578. {
  579. struct mv_host_priv *hpriv = host_set->private_data;
  580. struct pci_dev *pdev = to_pci_dev(host_set->dev);
  581. if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
  582. pci_disable_msi(pdev);
  583. } else {
  584. pci_intx(pdev, 0);
  585. }
  586. kfree(hpriv);
  587. ata_host_stop(host_set);
  588. }
  589. static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
  590. {
  591. dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
  592. }
  593. /**
  594. * mv_port_start - Port specific init/start routine.
  595. * @ap: ATA channel to manipulate
  596. *
  597. * Allocate and point to DMA memory, init port private memory,
  598. * zero indices.
  599. *
  600. * LOCKING:
  601. * Inherited from caller.
  602. */
  603. static int mv_port_start(struct ata_port *ap)
  604. {
  605. struct device *dev = ap->host_set->dev;
  606. struct mv_port_priv *pp;
  607. void __iomem *port_mmio = mv_ap_base(ap);
  608. void *mem;
  609. dma_addr_t mem_dma;
  610. int rc = -ENOMEM;
  611. pp = kmalloc(sizeof(*pp), GFP_KERNEL);
  612. if (!pp)
  613. goto err_out;
  614. memset(pp, 0, sizeof(*pp));
  615. mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
  616. GFP_KERNEL);
  617. if (!mem)
  618. goto err_out_pp;
  619. memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
  620. rc = ata_pad_alloc(ap, dev);
  621. if (rc)
  622. goto err_out_priv;
  623. /* First item in chunk of DMA memory:
  624. * 32-slot command request table (CRQB), 32 bytes each in size
  625. */
  626. pp->crqb = mem;
  627. pp->crqb_dma = mem_dma;
  628. mem += MV_CRQB_Q_SZ;
  629. mem_dma += MV_CRQB_Q_SZ;
  630. /* Second item:
  631. * 32-slot command response table (CRPB), 8 bytes each in size
  632. */
  633. pp->crpb = mem;
  634. pp->crpb_dma = mem_dma;
  635. mem += MV_CRPB_Q_SZ;
  636. mem_dma += MV_CRPB_Q_SZ;
  637. /* Third item:
  638. * Table of scatter-gather descriptors (ePRD), 16 bytes each
  639. */
  640. pp->sg_tbl = mem;
  641. pp->sg_tbl_dma = mem_dma;
  642. writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
  643. EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
  644. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  645. writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
  646. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  647. writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  648. writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  649. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  650. writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
  651. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  652. pp->req_producer = pp->rsp_consumer = 0;
  653. /* Don't turn on EDMA here...do it before DMA commands only. Else
  654. * we'll be unable to send non-data, PIO, etc due to restricted access
  655. * to shadow regs.
  656. */
  657. ap->private_data = pp;
  658. return 0;
  659. err_out_priv:
  660. mv_priv_free(pp, dev);
  661. err_out_pp:
  662. kfree(pp);
  663. err_out:
  664. return rc;
  665. }
  666. /**
  667. * mv_port_stop - Port specific cleanup/stop routine.
  668. * @ap: ATA channel to manipulate
  669. *
  670. * Stop DMA, cleanup port memory.
  671. *
  672. * LOCKING:
  673. * This routine uses the host_set lock to protect the DMA stop.
  674. */
  675. static void mv_port_stop(struct ata_port *ap)
  676. {
  677. struct device *dev = ap->host_set->dev;
  678. struct mv_port_priv *pp = ap->private_data;
  679. unsigned long flags;
  680. spin_lock_irqsave(&ap->host_set->lock, flags);
  681. mv_stop_dma(ap);
  682. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  683. ap->private_data = NULL;
  684. ata_pad_free(ap, dev);
  685. mv_priv_free(pp, dev);
  686. kfree(pp);
  687. }
  688. /**
  689. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  690. * @qc: queued command whose SG list to source from
  691. *
  692. * Populate the SG list and mark the last entry.
  693. *
  694. * LOCKING:
  695. * Inherited from caller.
  696. */
  697. static void mv_fill_sg(struct ata_queued_cmd *qc)
  698. {
  699. struct mv_port_priv *pp = qc->ap->private_data;
  700. unsigned int i = 0;
  701. struct scatterlist *sg;
  702. ata_for_each_sg(sg, qc) {
  703. u32 sg_len;
  704. dma_addr_t addr;
  705. addr = sg_dma_address(sg);
  706. sg_len = sg_dma_len(sg);
  707. pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
  708. pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  709. assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
  710. pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
  711. if (ata_sg_is_last(sg, qc))
  712. pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  713. i++;
  714. }
  715. }
  716. static inline unsigned mv_inc_q_index(unsigned *index)
  717. {
  718. *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
  719. return *index;
  720. }
  721. static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
  722. {
  723. *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  724. (last ? CRQB_CMD_LAST : 0);
  725. }
  726. /**
  727. * mv_qc_prep - Host specific command preparation.
  728. * @qc: queued command to prepare
  729. *
  730. * This routine simply redirects to the general purpose routine
  731. * if command is not DMA. Else, it handles prep of the CRQB
  732. * (command request block), does some sanity checking, and calls
  733. * the SG load routine.
  734. *
  735. * LOCKING:
  736. * Inherited from caller.
  737. */
  738. static void mv_qc_prep(struct ata_queued_cmd *qc)
  739. {
  740. struct ata_port *ap = qc->ap;
  741. struct mv_port_priv *pp = ap->private_data;
  742. u16 *cw;
  743. struct ata_taskfile *tf;
  744. u16 flags = 0;
  745. if (ATA_PROT_DMA != qc->tf.protocol) {
  746. return;
  747. }
  748. /* the req producer index should be the same as we remember it */
  749. assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
  750. EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
  751. pp->req_producer);
  752. /* Fill in command request block
  753. */
  754. if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
  755. flags |= CRQB_FLAG_READ;
  756. }
  757. assert(MV_MAX_Q_DEPTH > qc->tag);
  758. flags |= qc->tag << CRQB_TAG_SHIFT;
  759. pp->crqb[pp->req_producer].sg_addr =
  760. cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  761. pp->crqb[pp->req_producer].sg_addr_hi =
  762. cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  763. pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
  764. cw = &pp->crqb[pp->req_producer].ata_cmd[0];
  765. tf = &qc->tf;
  766. /* Sadly, the CRQB cannot accomodate all registers--there are
  767. * only 11 bytes...so we must pick and choose required
  768. * registers based on the command. So, we drop feature and
  769. * hob_feature for [RW] DMA commands, but they are needed for
  770. * NCQ. NCQ will drop hob_nsect.
  771. */
  772. switch (tf->command) {
  773. case ATA_CMD_READ:
  774. case ATA_CMD_READ_EXT:
  775. case ATA_CMD_WRITE:
  776. case ATA_CMD_WRITE_EXT:
  777. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  778. break;
  779. #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
  780. case ATA_CMD_FPDMA_READ:
  781. case ATA_CMD_FPDMA_WRITE:
  782. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  783. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  784. break;
  785. #endif /* FIXME: remove this line when NCQ added */
  786. default:
  787. /* The only other commands EDMA supports in non-queued and
  788. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  789. * of which are defined/used by Linux. If we get here, this
  790. * driver needs work.
  791. *
  792. * FIXME: modify libata to give qc_prep a return value and
  793. * return error here.
  794. */
  795. BUG_ON(tf->command);
  796. break;
  797. }
  798. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  799. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  800. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  801. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  802. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  803. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  804. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  805. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  806. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  807. if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
  808. return;
  809. }
  810. mv_fill_sg(qc);
  811. }
  812. /**
  813. * mv_qc_issue - Initiate a command to the host
  814. * @qc: queued command to start
  815. *
  816. * This routine simply redirects to the general purpose routine
  817. * if command is not DMA. Else, it sanity checks our local
  818. * caches of the request producer/consumer indices then enables
  819. * DMA and bumps the request producer index.
  820. *
  821. * LOCKING:
  822. * Inherited from caller.
  823. */
  824. static int mv_qc_issue(struct ata_queued_cmd *qc)
  825. {
  826. void __iomem *port_mmio = mv_ap_base(qc->ap);
  827. struct mv_port_priv *pp = qc->ap->private_data;
  828. u32 in_ptr;
  829. if (ATA_PROT_DMA != qc->tf.protocol) {
  830. /* We're about to send a non-EDMA capable command to the
  831. * port. Turn off EDMA so there won't be problems accessing
  832. * shadow block, etc registers.
  833. */
  834. mv_stop_dma(qc->ap);
  835. return ata_qc_issue_prot(qc);
  836. }
  837. in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  838. /* the req producer index should be the same as we remember it */
  839. assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
  840. pp->req_producer);
  841. /* until we do queuing, the queue should be empty at this point */
  842. assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
  843. ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
  844. EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  845. mv_inc_q_index(&pp->req_producer); /* now incr producer index */
  846. mv_start_dma(port_mmio, pp);
  847. /* and write the request in pointer to kick the EDMA to life */
  848. in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
  849. in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
  850. writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  851. return 0;
  852. }
  853. /**
  854. * mv_get_crpb_status - get status from most recently completed cmd
  855. * @ap: ATA channel to manipulate
  856. *
  857. * This routine is for use when the port is in DMA mode, when it
  858. * will be using the CRPB (command response block) method of
  859. * returning command completion information. We assert indices
  860. * are good, grab status, and bump the response consumer index to
  861. * prove that we're up to date.
  862. *
  863. * LOCKING:
  864. * Inherited from caller.
  865. */
  866. static u8 mv_get_crpb_status(struct ata_port *ap)
  867. {
  868. void __iomem *port_mmio = mv_ap_base(ap);
  869. struct mv_port_priv *pp = ap->private_data;
  870. u32 out_ptr;
  871. out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  872. /* the response consumer index should be the same as we remember it */
  873. assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
  874. pp->rsp_consumer);
  875. /* increment our consumer index... */
  876. pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
  877. /* and, until we do NCQ, there should only be 1 CRPB waiting */
  878. assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
  879. EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
  880. pp->rsp_consumer);
  881. /* write out our inc'd consumer index so EDMA knows we're caught up */
  882. out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
  883. out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
  884. writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  885. /* Return ATA status register for completed CRPB */
  886. return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
  887. }
  888. /**
  889. * mv_err_intr - Handle error interrupts on the port
  890. * @ap: ATA channel to manipulate
  891. *
  892. * In most cases, just clear the interrupt and move on. However,
  893. * some cases require an eDMA reset, which is done right before
  894. * the COMRESET in mv_phy_reset(). The SERR case requires a
  895. * clear of pending errors in the SATA SERROR register. Finally,
  896. * if the port disabled DMA, update our cached copy to match.
  897. *
  898. * LOCKING:
  899. * Inherited from caller.
  900. */
  901. static void mv_err_intr(struct ata_port *ap)
  902. {
  903. void __iomem *port_mmio = mv_ap_base(ap);
  904. u32 edma_err_cause, serr = 0;
  905. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  906. if (EDMA_ERR_SERR & edma_err_cause) {
  907. serr = scr_read(ap, SCR_ERROR);
  908. scr_write_flush(ap, SCR_ERROR, serr);
  909. }
  910. if (EDMA_ERR_SELF_DIS & edma_err_cause) {
  911. struct mv_port_priv *pp = ap->private_data;
  912. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  913. }
  914. DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
  915. "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
  916. /* Clear EDMA now that SERR cleanup done */
  917. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  918. /* check for fatal here and recover if needed */
  919. if (EDMA_ERR_FATAL & edma_err_cause) {
  920. mv_phy_reset(ap);
  921. }
  922. }
  923. /**
  924. * mv_host_intr - Handle all interrupts on the given host controller
  925. * @host_set: host specific structure
  926. * @relevant: port error bits relevant to this host controller
  927. * @hc: which host controller we're to look at
  928. *
  929. * Read then write clear the HC interrupt status then walk each
  930. * port connected to the HC and see if it needs servicing. Port
  931. * success ints are reported in the HC interrupt status reg, the
  932. * port error ints are reported in the higher level main
  933. * interrupt status register and thus are passed in via the
  934. * 'relevant' argument.
  935. *
  936. * LOCKING:
  937. * Inherited from caller.
  938. */
  939. static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
  940. unsigned int hc)
  941. {
  942. void __iomem *mmio = host_set->mmio_base;
  943. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  944. struct ata_port *ap;
  945. struct ata_queued_cmd *qc;
  946. u32 hc_irq_cause;
  947. int shift, port, port0, hard_port, handled;
  948. unsigned int err_mask;
  949. u8 ata_status = 0;
  950. if (hc == 0) {
  951. port0 = 0;
  952. } else {
  953. port0 = MV_PORTS_PER_HC;
  954. }
  955. /* we'll need the HC success int register in most cases */
  956. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  957. if (hc_irq_cause) {
  958. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  959. }
  960. VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
  961. hc,relevant,hc_irq_cause);
  962. for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
  963. ap = host_set->ports[port];
  964. hard_port = port & MV_PORT_MASK; /* range 0-3 */
  965. handled = 0; /* ensure ata_status is set if handled++ */
  966. if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
  967. /* new CRPB on the queue; just one at a time until NCQ
  968. */
  969. ata_status = mv_get_crpb_status(ap);
  970. handled++;
  971. } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
  972. /* received ATA IRQ; read the status reg to clear INTRQ
  973. */
  974. ata_status = readb((void __iomem *)
  975. ap->ioaddr.status_addr);
  976. handled++;
  977. }
  978. err_mask = ac_err_mask(ata_status);
  979. shift = port << 1; /* (port * 2) */
  980. if (port >= MV_PORTS_PER_HC) {
  981. shift++; /* skip bit 8 in the HC Main IRQ reg */
  982. }
  983. if ((PORT0_ERR << shift) & relevant) {
  984. mv_err_intr(ap);
  985. err_mask |= AC_ERR_OTHER;
  986. handled++;
  987. }
  988. if (handled && ap) {
  989. qc = ata_qc_from_tag(ap, ap->active_tag);
  990. if (NULL != qc) {
  991. VPRINTK("port %u IRQ found for qc, "
  992. "ata_status 0x%x\n", port,ata_status);
  993. /* mark qc status appropriately */
  994. ata_qc_complete(qc, err_mask);
  995. }
  996. }
  997. }
  998. VPRINTK("EXIT\n");
  999. }
  1000. /**
  1001. * mv_interrupt -
  1002. * @irq: unused
  1003. * @dev_instance: private data; in this case the host structure
  1004. * @regs: unused
  1005. *
  1006. * Read the read only register to determine if any host
  1007. * controllers have pending interrupts. If so, call lower level
  1008. * routine to handle. Also check for PCI errors which are only
  1009. * reported here.
  1010. *
  1011. * LOCKING:
  1012. * This routine holds the host_set lock while processing pending
  1013. * interrupts.
  1014. */
  1015. static irqreturn_t mv_interrupt(int irq, void *dev_instance,
  1016. struct pt_regs *regs)
  1017. {
  1018. struct ata_host_set *host_set = dev_instance;
  1019. unsigned int hc, handled = 0, n_hcs;
  1020. void __iomem *mmio = host_set->mmio_base;
  1021. u32 irq_stat;
  1022. irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
  1023. /* check the cases where we either have nothing pending or have read
  1024. * a bogus register value which can indicate HW removal or PCI fault
  1025. */
  1026. if (!irq_stat || (0xffffffffU == irq_stat)) {
  1027. return IRQ_NONE;
  1028. }
  1029. n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
  1030. spin_lock(&host_set->lock);
  1031. for (hc = 0; hc < n_hcs; hc++) {
  1032. u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
  1033. if (relevant) {
  1034. mv_host_intr(host_set, relevant, hc);
  1035. handled++;
  1036. }
  1037. }
  1038. if (PCI_ERR & irq_stat) {
  1039. printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
  1040. readl(mmio + PCI_IRQ_CAUSE_OFS));
  1041. DPRINTK("All regs @ PCI error\n");
  1042. mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
  1043. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1044. handled++;
  1045. }
  1046. spin_unlock(&host_set->lock);
  1047. return IRQ_RETVAL(handled);
  1048. }
  1049. /**
  1050. * mv_phy_reset - Perform eDMA reset followed by COMRESET
  1051. * @ap: ATA channel to manipulate
  1052. *
  1053. * Part of this is taken from __sata_phy_reset and modified to
  1054. * not sleep since this routine gets called from interrupt level.
  1055. *
  1056. * LOCKING:
  1057. * Inherited from caller. This is coded to safe to call at
  1058. * interrupt level, i.e. it does not sleep.
  1059. */
  1060. static void mv_phy_reset(struct ata_port *ap)
  1061. {
  1062. void __iomem *port_mmio = mv_ap_base(ap);
  1063. struct ata_taskfile tf;
  1064. struct ata_device *dev = &ap->device[0];
  1065. unsigned long timeout;
  1066. VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
  1067. mv_stop_dma(ap);
  1068. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  1069. udelay(25); /* allow reset propagation */
  1070. /* Spec never mentions clearing the bit. Marvell's driver does
  1071. * clear the bit, however.
  1072. */
  1073. writelfl(0, port_mmio + EDMA_CMD_OFS);
  1074. VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
  1075. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1076. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1077. /* proceed to init communications via the scr_control reg */
  1078. scr_write_flush(ap, SCR_CONTROL, 0x301);
  1079. mdelay(1);
  1080. scr_write_flush(ap, SCR_CONTROL, 0x300);
  1081. timeout = jiffies + (HZ * 1);
  1082. do {
  1083. mdelay(10);
  1084. if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
  1085. break;
  1086. } while (time_before(jiffies, timeout));
  1087. VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
  1088. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1089. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1090. if (sata_dev_present(ap)) {
  1091. ata_port_probe(ap);
  1092. } else {
  1093. printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
  1094. ap->id, scr_read(ap, SCR_STATUS));
  1095. ata_port_disable(ap);
  1096. return;
  1097. }
  1098. ap->cbl = ATA_CBL_SATA;
  1099. tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
  1100. tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
  1101. tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
  1102. tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
  1103. dev->class = ata_dev_classify(&tf);
  1104. if (!ata_dev_present(dev)) {
  1105. VPRINTK("Port disabled post-sig: No device present.\n");
  1106. ata_port_disable(ap);
  1107. }
  1108. VPRINTK("EXIT\n");
  1109. }
  1110. /**
  1111. * mv_eng_timeout - Routine called by libata when SCSI times out I/O
  1112. * @ap: ATA channel to manipulate
  1113. *
  1114. * Intent is to clear all pending error conditions, reset the
  1115. * chip/bus, fail the command, and move on.
  1116. *
  1117. * LOCKING:
  1118. * This routine holds the host_set lock while failing the command.
  1119. */
  1120. static void mv_eng_timeout(struct ata_port *ap)
  1121. {
  1122. struct ata_queued_cmd *qc;
  1123. unsigned long flags;
  1124. printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
  1125. DPRINTK("All regs @ start of eng_timeout\n");
  1126. mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
  1127. to_pci_dev(ap->host_set->dev));
  1128. qc = ata_qc_from_tag(ap, ap->active_tag);
  1129. printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
  1130. ap->host_set->mmio_base, ap, qc, qc->scsicmd,
  1131. &qc->scsicmd->cmnd);
  1132. mv_err_intr(ap);
  1133. mv_phy_reset(ap);
  1134. if (!qc) {
  1135. printk(KERN_ERR "ata%u: BUG: timeout without command\n",
  1136. ap->id);
  1137. } else {
  1138. /* hack alert! We cannot use the supplied completion
  1139. * function from inside the ->eh_strategy_handler() thread.
  1140. * libata is the only user of ->eh_strategy_handler() in
  1141. * any kernel, so the default scsi_done() assumes it is
  1142. * not being called from the SCSI EH.
  1143. */
  1144. spin_lock_irqsave(&ap->host_set->lock, flags);
  1145. qc->scsidone = scsi_finish_command;
  1146. ata_qc_complete(qc, AC_ERR_OTHER);
  1147. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  1148. }
  1149. }
  1150. /**
  1151. * mv_port_init - Perform some early initialization on a single port.
  1152. * @port: libata data structure storing shadow register addresses
  1153. * @port_mmio: base address of the port
  1154. *
  1155. * Initialize shadow register mmio addresses, clear outstanding
  1156. * interrupts on the port, and unmask interrupts for the future
  1157. * start of the port.
  1158. *
  1159. * LOCKING:
  1160. * Inherited from caller.
  1161. */
  1162. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  1163. {
  1164. unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
  1165. unsigned serr_ofs;
  1166. /* PIO related setup
  1167. */
  1168. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  1169. port->error_addr =
  1170. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  1171. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  1172. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  1173. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  1174. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  1175. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  1176. port->status_addr =
  1177. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  1178. /* special case: control/altstatus doesn't have ATA_REG_ address */
  1179. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  1180. /* unused: */
  1181. port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
  1182. /* Clear any currently outstanding port interrupt conditions */
  1183. serr_ofs = mv_scr_offset(SCR_ERROR);
  1184. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  1185. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1186. /* unmask all EDMA error interrupts */
  1187. writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  1188. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  1189. readl(port_mmio + EDMA_CFG_OFS),
  1190. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  1191. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  1192. }
  1193. /**
  1194. * mv_host_init - Perform some early initialization of the host.
  1195. * @probe_ent: early data struct representing the host
  1196. *
  1197. * If possible, do an early global reset of the host. Then do
  1198. * our port init and clear/unmask all/relevant host interrupts.
  1199. *
  1200. * LOCKING:
  1201. * Inherited from caller.
  1202. */
  1203. static int mv_host_init(struct ata_probe_ent *probe_ent)
  1204. {
  1205. int rc = 0, n_hc, port, hc;
  1206. void __iomem *mmio = probe_ent->mmio_base;
  1207. void __iomem *port_mmio;
  1208. if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
  1209. mv_global_soft_reset(probe_ent->mmio_base)) {
  1210. rc = 1;
  1211. goto done;
  1212. }
  1213. n_hc = mv_get_hc_count(probe_ent->host_flags);
  1214. probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
  1215. for (port = 0; port < probe_ent->n_ports; port++) {
  1216. port_mmio = mv_port_base(mmio, port);
  1217. mv_port_init(&probe_ent->port[port], port_mmio);
  1218. }
  1219. for (hc = 0; hc < n_hc; hc++) {
  1220. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1221. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  1222. "(before clear)=0x%08x\n", hc,
  1223. readl(hc_mmio + HC_CFG_OFS),
  1224. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  1225. /* Clear any currently outstanding hc interrupt conditions */
  1226. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  1227. }
  1228. /* Clear any currently outstanding host interrupt conditions */
  1229. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1230. /* and unmask interrupt generation for host regs */
  1231. writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
  1232. writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
  1233. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  1234. "PCI int cause/mask=0x%08x/0x%08x\n",
  1235. readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
  1236. readl(mmio + HC_MAIN_IRQ_MASK_OFS),
  1237. readl(mmio + PCI_IRQ_CAUSE_OFS),
  1238. readl(mmio + PCI_IRQ_MASK_OFS));
  1239. done:
  1240. return rc;
  1241. }
  1242. /**
  1243. * mv_print_info - Dump key info to kernel log for perusal.
  1244. * @probe_ent: early data struct representing the host
  1245. *
  1246. * FIXME: complete this.
  1247. *
  1248. * LOCKING:
  1249. * Inherited from caller.
  1250. */
  1251. static void mv_print_info(struct ata_probe_ent *probe_ent)
  1252. {
  1253. struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
  1254. struct mv_host_priv *hpriv = probe_ent->private_data;
  1255. u8 rev_id, scc;
  1256. const char *scc_s;
  1257. /* Use this to determine the HW stepping of the chip so we know
  1258. * what errata to workaround
  1259. */
  1260. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1261. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  1262. if (scc == 0)
  1263. scc_s = "SCSI";
  1264. else if (scc == 0x01)
  1265. scc_s = "RAID";
  1266. else
  1267. scc_s = "unknown";
  1268. dev_printk(KERN_INFO, &pdev->dev,
  1269. "%u slots %u ports %s mode IRQ via %s\n",
  1270. (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
  1271. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  1272. }
  1273. /**
  1274. * mv_init_one - handle a positive probe of a Marvell host
  1275. * @pdev: PCI device found
  1276. * @ent: PCI device ID entry for the matched host
  1277. *
  1278. * LOCKING:
  1279. * Inherited from caller.
  1280. */
  1281. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1282. {
  1283. static int printed_version = 0;
  1284. struct ata_probe_ent *probe_ent = NULL;
  1285. struct mv_host_priv *hpriv;
  1286. unsigned int board_idx = (unsigned int)ent->driver_data;
  1287. void __iomem *mmio_base;
  1288. int pci_dev_busy = 0, rc;
  1289. if (!printed_version++)
  1290. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  1291. rc = pci_enable_device(pdev);
  1292. if (rc) {
  1293. return rc;
  1294. }
  1295. rc = pci_request_regions(pdev, DRV_NAME);
  1296. if (rc) {
  1297. pci_dev_busy = 1;
  1298. goto err_out;
  1299. }
  1300. probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
  1301. if (probe_ent == NULL) {
  1302. rc = -ENOMEM;
  1303. goto err_out_regions;
  1304. }
  1305. memset(probe_ent, 0, sizeof(*probe_ent));
  1306. probe_ent->dev = pci_dev_to_dev(pdev);
  1307. INIT_LIST_HEAD(&probe_ent->node);
  1308. mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
  1309. if (mmio_base == NULL) {
  1310. rc = -ENOMEM;
  1311. goto err_out_free_ent;
  1312. }
  1313. hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
  1314. if (!hpriv) {
  1315. rc = -ENOMEM;
  1316. goto err_out_iounmap;
  1317. }
  1318. memset(hpriv, 0, sizeof(*hpriv));
  1319. probe_ent->sht = mv_port_info[board_idx].sht;
  1320. probe_ent->host_flags = mv_port_info[board_idx].host_flags;
  1321. probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
  1322. probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
  1323. probe_ent->port_ops = mv_port_info[board_idx].port_ops;
  1324. probe_ent->irq = pdev->irq;
  1325. probe_ent->irq_flags = SA_SHIRQ;
  1326. probe_ent->mmio_base = mmio_base;
  1327. probe_ent->private_data = hpriv;
  1328. /* initialize adapter */
  1329. rc = mv_host_init(probe_ent);
  1330. if (rc) {
  1331. goto err_out_hpriv;
  1332. }
  1333. /* Enable interrupts */
  1334. if (pci_enable_msi(pdev) == 0) {
  1335. hpriv->hp_flags |= MV_HP_FLAG_MSI;
  1336. } else {
  1337. pci_intx(pdev, 1);
  1338. }
  1339. mv_dump_pci_cfg(pdev, 0x68);
  1340. mv_print_info(probe_ent);
  1341. if (ata_device_add(probe_ent) == 0) {
  1342. rc = -ENODEV; /* No devices discovered */
  1343. goto err_out_dev_add;
  1344. }
  1345. kfree(probe_ent);
  1346. return 0;
  1347. err_out_dev_add:
  1348. if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
  1349. pci_disable_msi(pdev);
  1350. } else {
  1351. pci_intx(pdev, 0);
  1352. }
  1353. err_out_hpriv:
  1354. kfree(hpriv);
  1355. err_out_iounmap:
  1356. pci_iounmap(pdev, mmio_base);
  1357. err_out_free_ent:
  1358. kfree(probe_ent);
  1359. err_out_regions:
  1360. pci_release_regions(pdev);
  1361. err_out:
  1362. if (!pci_dev_busy) {
  1363. pci_disable_device(pdev);
  1364. }
  1365. return rc;
  1366. }
  1367. static int __init mv_init(void)
  1368. {
  1369. return pci_module_init(&mv_pci_driver);
  1370. }
  1371. static void __exit mv_exit(void)
  1372. {
  1373. pci_unregister_driver(&mv_pci_driver);
  1374. }
  1375. MODULE_AUTHOR("Brett Russ");
  1376. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  1377. MODULE_LICENSE("GPL");
  1378. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  1379. MODULE_VERSION(DRV_VERSION);
  1380. module_init(mv_init);
  1381. module_exit(mv_exit);