sata_mv.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2005: EMC Corporation, all rights reserved.
  5. * Copyright 2005 Red Hat, Inc. All rights reserved.
  6. *
  7. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. /*
  24. sata_mv TODO list:
  25. 1) Needs a full errata audit for all chipsets. I implemented most
  26. of the errata workarounds found in the Marvell vendor driver, but
  27. I distinctly remember a couple workarounds (one related to PCI-X)
  28. are still needed.
  29. 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
  30. probing/error handling in general. MUST HAVE.
  31. 3) Add hotplug support (easy, once new-EH support appears)
  32. 4) Add NCQ support (easy to intermediate, once new-EH support appears)
  33. 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
  34. 6) Add port multiplier support (intermediate)
  35. 7) Test and verify 3.0 Gbps support
  36. 8) Develop a low-power-consumption strategy, and implement it.
  37. 9) [Experiment, low priority] See if ATAPI can be supported using
  38. "unknown FIS" or "vendor-specific FIS" support, or something creative
  39. like that.
  40. 10) [Experiment, low priority] Investigate interrupt coalescing.
  41. Quite often, especially with PCI Message Signalled Interrupts (MSI),
  42. the overhead reduced by interrupt mitigation is quite often not
  43. worth the latency cost.
  44. 11) [Experiment, Marvell value added] Is it possible to use target
  45. mode to cross-connect two Linux boxes with Marvell cards? If so,
  46. creating LibATA target mode support would be very interesting.
  47. Target mode, for those without docs, is the ability to directly
  48. connect two SATA controllers.
  49. 13) Verify that 7042 is fully supported. I only have a 6042.
  50. */
  51. #include <linux/kernel.h>
  52. #include <linux/module.h>
  53. #include <linux/pci.h>
  54. #include <linux/init.h>
  55. #include <linux/blkdev.h>
  56. #include <linux/delay.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/dma-mapping.h>
  59. #include <linux/device.h>
  60. #include <scsi/scsi_host.h>
  61. #include <scsi/scsi_cmnd.h>
  62. #include <linux/libata.h>
  63. #define DRV_NAME "sata_mv"
  64. #define DRV_VERSION "0.81"
  65. enum {
  66. /* BAR's are enumerated in terms of pci_resource_start() terms */
  67. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  68. MV_IO_BAR = 2, /* offset 0x18: IO space */
  69. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  70. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  71. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  72. MV_PCI_REG_BASE = 0,
  73. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  74. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  75. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  76. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  77. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  78. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  79. MV_SATAHC0_REG_BASE = 0x20000,
  80. MV_FLASH_CTL = 0x1046c,
  81. MV_GPIO_PORT_CTL = 0x104f0,
  82. MV_RESET_CFG = 0x180d8,
  83. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  84. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  85. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  86. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  87. MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
  88. MV_MAX_Q_DEPTH = 32,
  89. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  90. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  91. * CRPB needs alignment on a 256B boundary. Size == 256B
  92. * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
  93. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  94. */
  95. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  96. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  97. MV_MAX_SG_CT = 176,
  98. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  99. MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
  100. MV_PORTS_PER_HC = 4,
  101. /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
  102. MV_PORT_HC_SHIFT = 2,
  103. /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
  104. MV_PORT_MASK = 3,
  105. /* Host Flags */
  106. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  107. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  108. MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  109. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  110. ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
  111. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  112. CRQB_FLAG_READ = (1 << 0),
  113. CRQB_TAG_SHIFT = 1,
  114. CRQB_CMD_ADDR_SHIFT = 8,
  115. CRQB_CMD_CS = (0x2 << 11),
  116. CRQB_CMD_LAST = (1 << 15),
  117. CRPB_FLAG_STATUS_SHIFT = 8,
  118. EPRD_FLAG_END_OF_TBL = (1 << 31),
  119. /* PCI interface registers */
  120. PCI_COMMAND_OFS = 0xc00,
  121. PCI_MAIN_CMD_STS_OFS = 0xd30,
  122. STOP_PCI_MASTER = (1 << 2),
  123. PCI_MASTER_EMPTY = (1 << 3),
  124. GLOB_SFT_RST = (1 << 4),
  125. MV_PCI_MODE = 0xd00,
  126. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  127. MV_PCI_DISC_TIMER = 0xd04,
  128. MV_PCI_MSI_TRIGGER = 0xc38,
  129. MV_PCI_SERR_MASK = 0xc28,
  130. MV_PCI_XBAR_TMOUT = 0x1d04,
  131. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  132. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  133. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  134. MV_PCI_ERR_COMMAND = 0x1d50,
  135. PCI_IRQ_CAUSE_OFS = 0x1d58,
  136. PCI_IRQ_MASK_OFS = 0x1d5c,
  137. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  138. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  139. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  140. PORT0_ERR = (1 << 0), /* shift by port # */
  141. PORT0_DONE = (1 << 1), /* shift by port # */
  142. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  143. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  144. PCI_ERR = (1 << 18),
  145. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  146. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  147. PORTS_0_3_COAL_DONE = (1 << 8),
  148. PORTS_4_7_COAL_DONE = (1 << 17),
  149. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  150. GPIO_INT = (1 << 22),
  151. SELF_INT = (1 << 23),
  152. TWSI_INT = (1 << 24),
  153. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  154. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  155. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  156. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  157. HC_MAIN_RSVD),
  158. HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  159. HC_MAIN_RSVD_5),
  160. /* SATAHC registers */
  161. HC_CFG_OFS = 0,
  162. HC_IRQ_CAUSE_OFS = 0x14,
  163. CRPB_DMA_DONE = (1 << 0), /* shift by port # */
  164. HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
  165. DEV_IRQ = (1 << 8), /* shift by port # */
  166. /* Shadow block registers */
  167. SHD_BLK_OFS = 0x100,
  168. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  169. /* SATA registers */
  170. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  171. SATA_ACTIVE_OFS = 0x350,
  172. PHY_MODE3 = 0x310,
  173. PHY_MODE4 = 0x314,
  174. PHY_MODE2 = 0x330,
  175. MV5_PHY_MODE = 0x74,
  176. MV5_LT_MODE = 0x30,
  177. MV5_PHY_CTL = 0x0C,
  178. SATA_INTERFACE_CTL = 0x050,
  179. MV_M2_PREAMP_MASK = 0x7e0,
  180. /* Port registers */
  181. EDMA_CFG_OFS = 0,
  182. EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
  183. EDMA_CFG_NCQ = (1 << 5),
  184. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  185. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  186. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  187. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  188. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  189. EDMA_ERR_D_PAR = (1 << 0),
  190. EDMA_ERR_PRD_PAR = (1 << 1),
  191. EDMA_ERR_DEV = (1 << 2),
  192. EDMA_ERR_DEV_DCON = (1 << 3),
  193. EDMA_ERR_DEV_CON = (1 << 4),
  194. EDMA_ERR_SERR = (1 << 5),
  195. EDMA_ERR_SELF_DIS = (1 << 7),
  196. EDMA_ERR_BIST_ASYNC = (1 << 8),
  197. EDMA_ERR_CRBQ_PAR = (1 << 9),
  198. EDMA_ERR_CRPB_PAR = (1 << 10),
  199. EDMA_ERR_INTRL_PAR = (1 << 11),
  200. EDMA_ERR_IORDY = (1 << 12),
  201. EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
  202. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
  203. EDMA_ERR_LNK_DATA_RX = (0xf << 17),
  204. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
  205. EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
  206. EDMA_ERR_TRANS_PROTO = (1 << 31),
  207. EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  208. EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
  209. EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
  210. EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
  211. EDMA_ERR_LNK_DATA_RX |
  212. EDMA_ERR_LNK_DATA_TX |
  213. EDMA_ERR_TRANS_PROTO),
  214. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  215. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  216. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  217. EDMA_REQ_Q_PTR_SHIFT = 5,
  218. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  219. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  220. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  221. EDMA_RSP_Q_PTR_SHIFT = 3,
  222. EDMA_CMD_OFS = 0x28,
  223. EDMA_EN = (1 << 0),
  224. EDMA_DS = (1 << 1),
  225. ATA_RST = (1 << 2),
  226. EDMA_IORDY_TMOUT = 0x34,
  227. EDMA_ARB_CFG = 0x38,
  228. /* Host private flags (hp_flags) */
  229. MV_HP_FLAG_MSI = (1 << 0),
  230. MV_HP_ERRATA_50XXB0 = (1 << 1),
  231. MV_HP_ERRATA_50XXB2 = (1 << 2),
  232. MV_HP_ERRATA_60X1B2 = (1 << 3),
  233. MV_HP_ERRATA_60X1C0 = (1 << 4),
  234. MV_HP_ERRATA_XX42A0 = (1 << 5),
  235. MV_HP_50XX = (1 << 6),
  236. MV_HP_GEN_IIE = (1 << 7),
  237. /* Port private flags (pp_flags) */
  238. MV_PP_FLAG_EDMA_EN = (1 << 0),
  239. MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
  240. };
  241. #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
  242. #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
  243. #define IS_GEN_I(hpriv) IS_50XX(hpriv)
  244. #define IS_GEN_II(hpriv) IS_60XX(hpriv)
  245. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  246. enum {
  247. MV_DMA_BOUNDARY = 0xffffffffU,
  248. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  249. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  250. };
  251. enum chip_type {
  252. chip_504x,
  253. chip_508x,
  254. chip_5080,
  255. chip_604x,
  256. chip_608x,
  257. chip_6042,
  258. chip_7042,
  259. };
  260. /* Command ReQuest Block: 32B */
  261. struct mv_crqb {
  262. __le32 sg_addr;
  263. __le32 sg_addr_hi;
  264. __le16 ctrl_flags;
  265. __le16 ata_cmd[11];
  266. };
  267. struct mv_crqb_iie {
  268. __le32 addr;
  269. __le32 addr_hi;
  270. __le32 flags;
  271. __le32 len;
  272. __le32 ata_cmd[4];
  273. };
  274. /* Command ResPonse Block: 8B */
  275. struct mv_crpb {
  276. __le16 id;
  277. __le16 flags;
  278. __le32 tmstmp;
  279. };
  280. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  281. struct mv_sg {
  282. __le32 addr;
  283. __le32 flags_size;
  284. __le32 addr_hi;
  285. __le32 reserved;
  286. };
  287. struct mv_port_priv {
  288. struct mv_crqb *crqb;
  289. dma_addr_t crqb_dma;
  290. struct mv_crpb *crpb;
  291. dma_addr_t crpb_dma;
  292. struct mv_sg *sg_tbl;
  293. dma_addr_t sg_tbl_dma;
  294. u32 pp_flags;
  295. };
  296. struct mv_port_signal {
  297. u32 amps;
  298. u32 pre;
  299. };
  300. struct mv_host_priv;
  301. struct mv_hw_ops {
  302. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  303. unsigned int port);
  304. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  305. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  306. void __iomem *mmio);
  307. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  308. unsigned int n_hc);
  309. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  310. void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
  311. };
  312. struct mv_host_priv {
  313. u32 hp_flags;
  314. struct mv_port_signal signal[8];
  315. const struct mv_hw_ops *ops;
  316. };
  317. static void mv_irq_clear(struct ata_port *ap);
  318. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  319. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  320. static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  321. static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  322. static void mv_phy_reset(struct ata_port *ap);
  323. static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
  324. static int mv_port_start(struct ata_port *ap);
  325. static void mv_port_stop(struct ata_port *ap);
  326. static void mv_qc_prep(struct ata_queued_cmd *qc);
  327. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  328. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  329. static void mv_eng_timeout(struct ata_port *ap);
  330. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  331. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  332. unsigned int port);
  333. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  334. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  335. void __iomem *mmio);
  336. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  337. unsigned int n_hc);
  338. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  339. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
  340. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  341. unsigned int port);
  342. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  343. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  344. void __iomem *mmio);
  345. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  346. unsigned int n_hc);
  347. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  348. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
  349. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  350. unsigned int port_no);
  351. static void mv_stop_and_reset(struct ata_port *ap);
  352. static struct scsi_host_template mv_sht = {
  353. .module = THIS_MODULE,
  354. .name = DRV_NAME,
  355. .ioctl = ata_scsi_ioctl,
  356. .queuecommand = ata_scsi_queuecmd,
  357. .can_queue = MV_USE_Q_DEPTH,
  358. .this_id = ATA_SHT_THIS_ID,
  359. .sg_tablesize = MV_MAX_SG_CT,
  360. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  361. .emulated = ATA_SHT_EMULATED,
  362. .use_clustering = 1,
  363. .proc_name = DRV_NAME,
  364. .dma_boundary = MV_DMA_BOUNDARY,
  365. .slave_configure = ata_scsi_slave_config,
  366. .slave_destroy = ata_scsi_slave_destroy,
  367. .bios_param = ata_std_bios_param,
  368. };
  369. static const struct ata_port_operations mv5_ops = {
  370. .port_disable = ata_port_disable,
  371. .tf_load = ata_tf_load,
  372. .tf_read = ata_tf_read,
  373. .check_status = ata_check_status,
  374. .exec_command = ata_exec_command,
  375. .dev_select = ata_std_dev_select,
  376. .phy_reset = mv_phy_reset,
  377. .cable_detect = ata_cable_sata,
  378. .qc_prep = mv_qc_prep,
  379. .qc_issue = mv_qc_issue,
  380. .data_xfer = ata_data_xfer,
  381. .eng_timeout = mv_eng_timeout,
  382. .irq_clear = mv_irq_clear,
  383. .irq_on = ata_irq_on,
  384. .irq_ack = ata_irq_ack,
  385. .scr_read = mv5_scr_read,
  386. .scr_write = mv5_scr_write,
  387. .port_start = mv_port_start,
  388. .port_stop = mv_port_stop,
  389. };
  390. static const struct ata_port_operations mv6_ops = {
  391. .port_disable = ata_port_disable,
  392. .tf_load = ata_tf_load,
  393. .tf_read = ata_tf_read,
  394. .check_status = ata_check_status,
  395. .exec_command = ata_exec_command,
  396. .dev_select = ata_std_dev_select,
  397. .phy_reset = mv_phy_reset,
  398. .cable_detect = ata_cable_sata,
  399. .qc_prep = mv_qc_prep,
  400. .qc_issue = mv_qc_issue,
  401. .data_xfer = ata_data_xfer,
  402. .eng_timeout = mv_eng_timeout,
  403. .irq_clear = mv_irq_clear,
  404. .irq_on = ata_irq_on,
  405. .irq_ack = ata_irq_ack,
  406. .scr_read = mv_scr_read,
  407. .scr_write = mv_scr_write,
  408. .port_start = mv_port_start,
  409. .port_stop = mv_port_stop,
  410. };
  411. static const struct ata_port_operations mv_iie_ops = {
  412. .port_disable = ata_port_disable,
  413. .tf_load = ata_tf_load,
  414. .tf_read = ata_tf_read,
  415. .check_status = ata_check_status,
  416. .exec_command = ata_exec_command,
  417. .dev_select = ata_std_dev_select,
  418. .phy_reset = mv_phy_reset,
  419. .cable_detect = ata_cable_sata,
  420. .qc_prep = mv_qc_prep_iie,
  421. .qc_issue = mv_qc_issue,
  422. .data_xfer = ata_data_xfer,
  423. .eng_timeout = mv_eng_timeout,
  424. .irq_clear = mv_irq_clear,
  425. .irq_on = ata_irq_on,
  426. .irq_ack = ata_irq_ack,
  427. .scr_read = mv_scr_read,
  428. .scr_write = mv_scr_write,
  429. .port_start = mv_port_start,
  430. .port_stop = mv_port_stop,
  431. };
  432. static const struct ata_port_info mv_port_info[] = {
  433. { /* chip_504x */
  434. .flags = MV_COMMON_FLAGS,
  435. .pio_mask = 0x1f, /* pio0-4 */
  436. .udma_mask = 0x7f, /* udma0-6 */
  437. .port_ops = &mv5_ops,
  438. },
  439. { /* chip_508x */
  440. .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
  441. .pio_mask = 0x1f, /* pio0-4 */
  442. .udma_mask = 0x7f, /* udma0-6 */
  443. .port_ops = &mv5_ops,
  444. },
  445. { /* chip_5080 */
  446. .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
  447. .pio_mask = 0x1f, /* pio0-4 */
  448. .udma_mask = 0x7f, /* udma0-6 */
  449. .port_ops = &mv5_ops,
  450. },
  451. { /* chip_604x */
  452. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  453. .pio_mask = 0x1f, /* pio0-4 */
  454. .udma_mask = 0x7f, /* udma0-6 */
  455. .port_ops = &mv6_ops,
  456. },
  457. { /* chip_608x */
  458. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  459. MV_FLAG_DUAL_HC),
  460. .pio_mask = 0x1f, /* pio0-4 */
  461. .udma_mask = 0x7f, /* udma0-6 */
  462. .port_ops = &mv6_ops,
  463. },
  464. { /* chip_6042 */
  465. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  466. .pio_mask = 0x1f, /* pio0-4 */
  467. .udma_mask = 0x7f, /* udma0-6 */
  468. .port_ops = &mv_iie_ops,
  469. },
  470. { /* chip_7042 */
  471. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  472. .pio_mask = 0x1f, /* pio0-4 */
  473. .udma_mask = 0x7f, /* udma0-6 */
  474. .port_ops = &mv_iie_ops,
  475. },
  476. };
  477. static const struct pci_device_id mv_pci_tbl[] = {
  478. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  479. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  480. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  481. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  482. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  483. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  484. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  485. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  486. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  487. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  488. /* Adaptec 1430SA */
  489. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  490. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  491. /* add Marvell 7042 support */
  492. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  493. { } /* terminate list */
  494. };
  495. static struct pci_driver mv_pci_driver = {
  496. .name = DRV_NAME,
  497. .id_table = mv_pci_tbl,
  498. .probe = mv_init_one,
  499. .remove = ata_pci_remove_one,
  500. };
  501. static const struct mv_hw_ops mv5xxx_ops = {
  502. .phy_errata = mv5_phy_errata,
  503. .enable_leds = mv5_enable_leds,
  504. .read_preamp = mv5_read_preamp,
  505. .reset_hc = mv5_reset_hc,
  506. .reset_flash = mv5_reset_flash,
  507. .reset_bus = mv5_reset_bus,
  508. };
  509. static const struct mv_hw_ops mv6xxx_ops = {
  510. .phy_errata = mv6_phy_errata,
  511. .enable_leds = mv6_enable_leds,
  512. .read_preamp = mv6_read_preamp,
  513. .reset_hc = mv6_reset_hc,
  514. .reset_flash = mv6_reset_flash,
  515. .reset_bus = mv_reset_pci_bus,
  516. };
  517. /*
  518. * module options
  519. */
  520. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  521. /* move to PCI layer or libata core? */
  522. static int pci_go_64(struct pci_dev *pdev)
  523. {
  524. int rc;
  525. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  526. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  527. if (rc) {
  528. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  529. if (rc) {
  530. dev_printk(KERN_ERR, &pdev->dev,
  531. "64-bit DMA enable failed\n");
  532. return rc;
  533. }
  534. }
  535. } else {
  536. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  537. if (rc) {
  538. dev_printk(KERN_ERR, &pdev->dev,
  539. "32-bit DMA enable failed\n");
  540. return rc;
  541. }
  542. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  543. if (rc) {
  544. dev_printk(KERN_ERR, &pdev->dev,
  545. "32-bit consistent DMA enable failed\n");
  546. return rc;
  547. }
  548. }
  549. return rc;
  550. }
  551. /*
  552. * Functions
  553. */
  554. static inline void writelfl(unsigned long data, void __iomem *addr)
  555. {
  556. writel(data, addr);
  557. (void) readl(addr); /* flush to avoid PCI posted write */
  558. }
  559. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  560. {
  561. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  562. }
  563. static inline unsigned int mv_hc_from_port(unsigned int port)
  564. {
  565. return port >> MV_PORT_HC_SHIFT;
  566. }
  567. static inline unsigned int mv_hardport_from_port(unsigned int port)
  568. {
  569. return port & MV_PORT_MASK;
  570. }
  571. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  572. unsigned int port)
  573. {
  574. return mv_hc_base(base, mv_hc_from_port(port));
  575. }
  576. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  577. {
  578. return mv_hc_base_from_port(base, port) +
  579. MV_SATAHC_ARBTR_REG_SZ +
  580. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  581. }
  582. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  583. {
  584. return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
  585. }
  586. static inline int mv_get_hc_count(unsigned long port_flags)
  587. {
  588. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  589. }
  590. static void mv_irq_clear(struct ata_port *ap)
  591. {
  592. }
  593. /**
  594. * mv_start_dma - Enable eDMA engine
  595. * @base: port base address
  596. * @pp: port private data
  597. *
  598. * Verify the local cache of the eDMA state is accurate with a
  599. * WARN_ON.
  600. *
  601. * LOCKING:
  602. * Inherited from caller.
  603. */
  604. static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
  605. {
  606. if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
  607. writelfl(EDMA_EN, base + EDMA_CMD_OFS);
  608. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  609. }
  610. WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
  611. }
  612. /**
  613. * mv_stop_dma - Disable eDMA engine
  614. * @ap: ATA channel to manipulate
  615. *
  616. * Verify the local cache of the eDMA state is accurate with a
  617. * WARN_ON.
  618. *
  619. * LOCKING:
  620. * Inherited from caller.
  621. */
  622. static void mv_stop_dma(struct ata_port *ap)
  623. {
  624. void __iomem *port_mmio = mv_ap_base(ap);
  625. struct mv_port_priv *pp = ap->private_data;
  626. u32 reg;
  627. int i;
  628. if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
  629. /* Disable EDMA if active. The disable bit auto clears.
  630. */
  631. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  632. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  633. } else {
  634. WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
  635. }
  636. /* now properly wait for the eDMA to stop */
  637. for (i = 1000; i > 0; i--) {
  638. reg = readl(port_mmio + EDMA_CMD_OFS);
  639. if (!(EDMA_EN & reg)) {
  640. break;
  641. }
  642. udelay(100);
  643. }
  644. if (EDMA_EN & reg) {
  645. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  646. /* FIXME: Consider doing a reset here to recover */
  647. }
  648. }
  649. #ifdef ATA_DEBUG
  650. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  651. {
  652. int b, w;
  653. for (b = 0; b < bytes; ) {
  654. DPRINTK("%p: ", start + b);
  655. for (w = 0; b < bytes && w < 4; w++) {
  656. printk("%08x ",readl(start + b));
  657. b += sizeof(u32);
  658. }
  659. printk("\n");
  660. }
  661. }
  662. #endif
  663. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  664. {
  665. #ifdef ATA_DEBUG
  666. int b, w;
  667. u32 dw;
  668. for (b = 0; b < bytes; ) {
  669. DPRINTK("%02x: ", b);
  670. for (w = 0; b < bytes && w < 4; w++) {
  671. (void) pci_read_config_dword(pdev,b,&dw);
  672. printk("%08x ",dw);
  673. b += sizeof(u32);
  674. }
  675. printk("\n");
  676. }
  677. #endif
  678. }
  679. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  680. struct pci_dev *pdev)
  681. {
  682. #ifdef ATA_DEBUG
  683. void __iomem *hc_base = mv_hc_base(mmio_base,
  684. port >> MV_PORT_HC_SHIFT);
  685. void __iomem *port_base;
  686. int start_port, num_ports, p, start_hc, num_hcs, hc;
  687. if (0 > port) {
  688. start_hc = start_port = 0;
  689. num_ports = 8; /* shld be benign for 4 port devs */
  690. num_hcs = 2;
  691. } else {
  692. start_hc = port >> MV_PORT_HC_SHIFT;
  693. start_port = port;
  694. num_ports = num_hcs = 1;
  695. }
  696. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  697. num_ports > 1 ? num_ports - 1 : start_port);
  698. if (NULL != pdev) {
  699. DPRINTK("PCI config space regs:\n");
  700. mv_dump_pci_cfg(pdev, 0x68);
  701. }
  702. DPRINTK("PCI regs:\n");
  703. mv_dump_mem(mmio_base+0xc00, 0x3c);
  704. mv_dump_mem(mmio_base+0xd00, 0x34);
  705. mv_dump_mem(mmio_base+0xf00, 0x4);
  706. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  707. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  708. hc_base = mv_hc_base(mmio_base, hc);
  709. DPRINTK("HC regs (HC %i):\n", hc);
  710. mv_dump_mem(hc_base, 0x1c);
  711. }
  712. for (p = start_port; p < start_port + num_ports; p++) {
  713. port_base = mv_port_base(mmio_base, p);
  714. DPRINTK("EDMA regs (port %i):\n",p);
  715. mv_dump_mem(port_base, 0x54);
  716. DPRINTK("SATA regs (port %i):\n",p);
  717. mv_dump_mem(port_base+0x300, 0x60);
  718. }
  719. #endif
  720. }
  721. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  722. {
  723. unsigned int ofs;
  724. switch (sc_reg_in) {
  725. case SCR_STATUS:
  726. case SCR_CONTROL:
  727. case SCR_ERROR:
  728. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  729. break;
  730. case SCR_ACTIVE:
  731. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  732. break;
  733. default:
  734. ofs = 0xffffffffU;
  735. break;
  736. }
  737. return ofs;
  738. }
  739. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  740. {
  741. unsigned int ofs = mv_scr_offset(sc_reg_in);
  742. if (0xffffffffU != ofs)
  743. return readl(mv_ap_base(ap) + ofs);
  744. else
  745. return (u32) ofs;
  746. }
  747. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  748. {
  749. unsigned int ofs = mv_scr_offset(sc_reg_in);
  750. if (0xffffffffU != ofs)
  751. writelfl(val, mv_ap_base(ap) + ofs);
  752. }
  753. static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
  754. {
  755. u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
  756. /* set up non-NCQ EDMA configuration */
  757. cfg &= ~(1 << 9); /* disable equeue */
  758. if (IS_GEN_I(hpriv)) {
  759. cfg &= ~0x1f; /* clear queue depth */
  760. cfg |= (1 << 8); /* enab config burst size mask */
  761. }
  762. else if (IS_GEN_II(hpriv)) {
  763. cfg &= ~0x1f; /* clear queue depth */
  764. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  765. cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
  766. }
  767. else if (IS_GEN_IIE(hpriv)) {
  768. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  769. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  770. cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
  771. cfg |= (1 << 18); /* enab early completion */
  772. cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
  773. cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
  774. cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
  775. }
  776. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  777. }
  778. /**
  779. * mv_port_start - Port specific init/start routine.
  780. * @ap: ATA channel to manipulate
  781. *
  782. * Allocate and point to DMA memory, init port private memory,
  783. * zero indices.
  784. *
  785. * LOCKING:
  786. * Inherited from caller.
  787. */
  788. static int mv_port_start(struct ata_port *ap)
  789. {
  790. struct device *dev = ap->host->dev;
  791. struct mv_host_priv *hpriv = ap->host->private_data;
  792. struct mv_port_priv *pp;
  793. void __iomem *port_mmio = mv_ap_base(ap);
  794. void *mem;
  795. dma_addr_t mem_dma;
  796. int rc;
  797. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  798. if (!pp)
  799. return -ENOMEM;
  800. mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
  801. GFP_KERNEL);
  802. if (!mem)
  803. return -ENOMEM;
  804. memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
  805. rc = ata_pad_alloc(ap, dev);
  806. if (rc)
  807. return rc;
  808. /* First item in chunk of DMA memory:
  809. * 32-slot command request table (CRQB), 32 bytes each in size
  810. */
  811. pp->crqb = mem;
  812. pp->crqb_dma = mem_dma;
  813. mem += MV_CRQB_Q_SZ;
  814. mem_dma += MV_CRQB_Q_SZ;
  815. /* Second item:
  816. * 32-slot command response table (CRPB), 8 bytes each in size
  817. */
  818. pp->crpb = mem;
  819. pp->crpb_dma = mem_dma;
  820. mem += MV_CRPB_Q_SZ;
  821. mem_dma += MV_CRPB_Q_SZ;
  822. /* Third item:
  823. * Table of scatter-gather descriptors (ePRD), 16 bytes each
  824. */
  825. pp->sg_tbl = mem;
  826. pp->sg_tbl_dma = mem_dma;
  827. mv_edma_cfg(hpriv, port_mmio);
  828. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  829. writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
  830. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  831. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  832. writelfl(pp->crqb_dma & 0xffffffff,
  833. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  834. else
  835. writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  836. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  837. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  838. writelfl(pp->crpb_dma & 0xffffffff,
  839. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  840. else
  841. writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  842. writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
  843. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  844. /* Don't turn on EDMA here...do it before DMA commands only. Else
  845. * we'll be unable to send non-data, PIO, etc due to restricted access
  846. * to shadow regs.
  847. */
  848. ap->private_data = pp;
  849. return 0;
  850. }
  851. /**
  852. * mv_port_stop - Port specific cleanup/stop routine.
  853. * @ap: ATA channel to manipulate
  854. *
  855. * Stop DMA, cleanup port memory.
  856. *
  857. * LOCKING:
  858. * This routine uses the host lock to protect the DMA stop.
  859. */
  860. static void mv_port_stop(struct ata_port *ap)
  861. {
  862. unsigned long flags;
  863. spin_lock_irqsave(&ap->host->lock, flags);
  864. mv_stop_dma(ap);
  865. spin_unlock_irqrestore(&ap->host->lock, flags);
  866. }
  867. /**
  868. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  869. * @qc: queued command whose SG list to source from
  870. *
  871. * Populate the SG list and mark the last entry.
  872. *
  873. * LOCKING:
  874. * Inherited from caller.
  875. */
  876. static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
  877. {
  878. struct mv_port_priv *pp = qc->ap->private_data;
  879. unsigned int n_sg = 0;
  880. struct scatterlist *sg;
  881. struct mv_sg *mv_sg;
  882. mv_sg = pp->sg_tbl;
  883. ata_for_each_sg(sg, qc) {
  884. dma_addr_t addr = sg_dma_address(sg);
  885. u32 sg_len = sg_dma_len(sg);
  886. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  887. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  888. mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
  889. if (ata_sg_is_last(sg, qc))
  890. mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  891. mv_sg++;
  892. n_sg++;
  893. }
  894. return n_sg;
  895. }
  896. static inline unsigned mv_inc_q_index(unsigned index)
  897. {
  898. return (index + 1) & MV_MAX_Q_DEPTH_MASK;
  899. }
  900. static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  901. {
  902. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  903. (last ? CRQB_CMD_LAST : 0);
  904. *cmdw = cpu_to_le16(tmp);
  905. }
  906. /**
  907. * mv_qc_prep - Host specific command preparation.
  908. * @qc: queued command to prepare
  909. *
  910. * This routine simply redirects to the general purpose routine
  911. * if command is not DMA. Else, it handles prep of the CRQB
  912. * (command request block), does some sanity checking, and calls
  913. * the SG load routine.
  914. *
  915. * LOCKING:
  916. * Inherited from caller.
  917. */
  918. static void mv_qc_prep(struct ata_queued_cmd *qc)
  919. {
  920. struct ata_port *ap = qc->ap;
  921. struct mv_port_priv *pp = ap->private_data;
  922. __le16 *cw;
  923. struct ata_taskfile *tf;
  924. u16 flags = 0;
  925. unsigned in_index;
  926. if (ATA_PROT_DMA != qc->tf.protocol)
  927. return;
  928. /* Fill in command request block
  929. */
  930. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  931. flags |= CRQB_FLAG_READ;
  932. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  933. flags |= qc->tag << CRQB_TAG_SHIFT;
  934. /* get current queue index from hardware */
  935. in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
  936. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  937. pp->crqb[in_index].sg_addr =
  938. cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  939. pp->crqb[in_index].sg_addr_hi =
  940. cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  941. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  942. cw = &pp->crqb[in_index].ata_cmd[0];
  943. tf = &qc->tf;
  944. /* Sadly, the CRQB cannot accomodate all registers--there are
  945. * only 11 bytes...so we must pick and choose required
  946. * registers based on the command. So, we drop feature and
  947. * hob_feature for [RW] DMA commands, but they are needed for
  948. * NCQ. NCQ will drop hob_nsect.
  949. */
  950. switch (tf->command) {
  951. case ATA_CMD_READ:
  952. case ATA_CMD_READ_EXT:
  953. case ATA_CMD_WRITE:
  954. case ATA_CMD_WRITE_EXT:
  955. case ATA_CMD_WRITE_FUA_EXT:
  956. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  957. break;
  958. #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
  959. case ATA_CMD_FPDMA_READ:
  960. case ATA_CMD_FPDMA_WRITE:
  961. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  962. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  963. break;
  964. #endif /* FIXME: remove this line when NCQ added */
  965. default:
  966. /* The only other commands EDMA supports in non-queued and
  967. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  968. * of which are defined/used by Linux. If we get here, this
  969. * driver needs work.
  970. *
  971. * FIXME: modify libata to give qc_prep a return value and
  972. * return error here.
  973. */
  974. BUG_ON(tf->command);
  975. break;
  976. }
  977. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  978. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  979. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  980. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  981. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  982. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  983. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  984. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  985. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  986. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  987. return;
  988. mv_fill_sg(qc);
  989. }
  990. /**
  991. * mv_qc_prep_iie - Host specific command preparation.
  992. * @qc: queued command to prepare
  993. *
  994. * This routine simply redirects to the general purpose routine
  995. * if command is not DMA. Else, it handles prep of the CRQB
  996. * (command request block), does some sanity checking, and calls
  997. * the SG load routine.
  998. *
  999. * LOCKING:
  1000. * Inherited from caller.
  1001. */
  1002. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1003. {
  1004. struct ata_port *ap = qc->ap;
  1005. struct mv_port_priv *pp = ap->private_data;
  1006. struct mv_crqb_iie *crqb;
  1007. struct ata_taskfile *tf;
  1008. unsigned in_index;
  1009. u32 flags = 0;
  1010. if (ATA_PROT_DMA != qc->tf.protocol)
  1011. return;
  1012. /* Fill in Gen IIE command request block
  1013. */
  1014. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1015. flags |= CRQB_FLAG_READ;
  1016. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1017. flags |= qc->tag << CRQB_TAG_SHIFT;
  1018. /* get current queue index from hardware */
  1019. in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
  1020. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1021. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1022. crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  1023. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  1024. crqb->flags = cpu_to_le32(flags);
  1025. tf = &qc->tf;
  1026. crqb->ata_cmd[0] = cpu_to_le32(
  1027. (tf->command << 16) |
  1028. (tf->feature << 24)
  1029. );
  1030. crqb->ata_cmd[1] = cpu_to_le32(
  1031. (tf->lbal << 0) |
  1032. (tf->lbam << 8) |
  1033. (tf->lbah << 16) |
  1034. (tf->device << 24)
  1035. );
  1036. crqb->ata_cmd[2] = cpu_to_le32(
  1037. (tf->hob_lbal << 0) |
  1038. (tf->hob_lbam << 8) |
  1039. (tf->hob_lbah << 16) |
  1040. (tf->hob_feature << 24)
  1041. );
  1042. crqb->ata_cmd[3] = cpu_to_le32(
  1043. (tf->nsect << 0) |
  1044. (tf->hob_nsect << 8)
  1045. );
  1046. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1047. return;
  1048. mv_fill_sg(qc);
  1049. }
  1050. /**
  1051. * mv_qc_issue - Initiate a command to the host
  1052. * @qc: queued command to start
  1053. *
  1054. * This routine simply redirects to the general purpose routine
  1055. * if command is not DMA. Else, it sanity checks our local
  1056. * caches of the request producer/consumer indices then enables
  1057. * DMA and bumps the request producer index.
  1058. *
  1059. * LOCKING:
  1060. * Inherited from caller.
  1061. */
  1062. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1063. {
  1064. void __iomem *port_mmio = mv_ap_base(qc->ap);
  1065. struct mv_port_priv *pp = qc->ap->private_data;
  1066. unsigned in_index;
  1067. u32 in_ptr;
  1068. if (ATA_PROT_DMA != qc->tf.protocol) {
  1069. /* We're about to send a non-EDMA capable command to the
  1070. * port. Turn off EDMA so there won't be problems accessing
  1071. * shadow block, etc registers.
  1072. */
  1073. mv_stop_dma(qc->ap);
  1074. return ata_qc_issue_prot(qc);
  1075. }
  1076. in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1077. in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1078. /* until we do queuing, the queue should be empty at this point */
  1079. WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
  1080. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  1081. in_index = mv_inc_q_index(in_index); /* now incr producer index */
  1082. mv_start_dma(port_mmio, pp);
  1083. /* and write the request in pointer to kick the EDMA to life */
  1084. in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
  1085. in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
  1086. writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1087. return 0;
  1088. }
  1089. /**
  1090. * mv_get_crpb_status - get status from most recently completed cmd
  1091. * @ap: ATA channel to manipulate
  1092. *
  1093. * This routine is for use when the port is in DMA mode, when it
  1094. * will be using the CRPB (command response block) method of
  1095. * returning command completion information. We check indices
  1096. * are good, grab status, and bump the response consumer index to
  1097. * prove that we're up to date.
  1098. *
  1099. * LOCKING:
  1100. * Inherited from caller.
  1101. */
  1102. static u8 mv_get_crpb_status(struct ata_port *ap)
  1103. {
  1104. void __iomem *port_mmio = mv_ap_base(ap);
  1105. struct mv_port_priv *pp = ap->private_data;
  1106. unsigned out_index;
  1107. u32 out_ptr;
  1108. u8 ata_status;
  1109. out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1110. out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1111. ata_status = le16_to_cpu(pp->crpb[out_index].flags)
  1112. >> CRPB_FLAG_STATUS_SHIFT;
  1113. /* increment our consumer index... */
  1114. out_index = mv_inc_q_index(out_index);
  1115. /* and, until we do NCQ, there should only be 1 CRPB waiting */
  1116. WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1117. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  1118. /* write out our inc'd consumer index so EDMA knows we're caught up */
  1119. out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
  1120. out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
  1121. writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1122. /* Return ATA status register for completed CRPB */
  1123. return ata_status;
  1124. }
  1125. /**
  1126. * mv_err_intr - Handle error interrupts on the port
  1127. * @ap: ATA channel to manipulate
  1128. * @reset_allowed: bool: 0 == don't trigger from reset here
  1129. *
  1130. * In most cases, just clear the interrupt and move on. However,
  1131. * some cases require an eDMA reset, which is done right before
  1132. * the COMRESET in mv_phy_reset(). The SERR case requires a
  1133. * clear of pending errors in the SATA SERROR register. Finally,
  1134. * if the port disabled DMA, update our cached copy to match.
  1135. *
  1136. * LOCKING:
  1137. * Inherited from caller.
  1138. */
  1139. static void mv_err_intr(struct ata_port *ap, int reset_allowed)
  1140. {
  1141. void __iomem *port_mmio = mv_ap_base(ap);
  1142. u32 edma_err_cause, serr = 0;
  1143. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1144. if (EDMA_ERR_SERR & edma_err_cause) {
  1145. sata_scr_read(ap, SCR_ERROR, &serr);
  1146. sata_scr_write_flush(ap, SCR_ERROR, serr);
  1147. }
  1148. if (EDMA_ERR_SELF_DIS & edma_err_cause) {
  1149. struct mv_port_priv *pp = ap->private_data;
  1150. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1151. }
  1152. DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
  1153. "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
  1154. /* Clear EDMA now that SERR cleanup done */
  1155. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1156. /* check for fatal here and recover if needed */
  1157. if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
  1158. mv_stop_and_reset(ap);
  1159. }
  1160. /**
  1161. * mv_host_intr - Handle all interrupts on the given host controller
  1162. * @host: host specific structure
  1163. * @relevant: port error bits relevant to this host controller
  1164. * @hc: which host controller we're to look at
  1165. *
  1166. * Read then write clear the HC interrupt status then walk each
  1167. * port connected to the HC and see if it needs servicing. Port
  1168. * success ints are reported in the HC interrupt status reg, the
  1169. * port error ints are reported in the higher level main
  1170. * interrupt status register and thus are passed in via the
  1171. * 'relevant' argument.
  1172. *
  1173. * LOCKING:
  1174. * Inherited from caller.
  1175. */
  1176. static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
  1177. {
  1178. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1179. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1180. struct ata_queued_cmd *qc;
  1181. u32 hc_irq_cause;
  1182. int shift, port, port0, hard_port, handled;
  1183. unsigned int err_mask;
  1184. if (hc == 0)
  1185. port0 = 0;
  1186. else
  1187. port0 = MV_PORTS_PER_HC;
  1188. /* we'll need the HC success int register in most cases */
  1189. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1190. if (hc_irq_cause)
  1191. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1192. VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
  1193. hc,relevant,hc_irq_cause);
  1194. for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
  1195. u8 ata_status = 0;
  1196. struct ata_port *ap = host->ports[port];
  1197. struct mv_port_priv *pp = ap->private_data;
  1198. hard_port = mv_hardport_from_port(port); /* range 0..3 */
  1199. handled = 0; /* ensure ata_status is set if handled++ */
  1200. /* Note that DEV_IRQ might happen spuriously during EDMA,
  1201. * and should be ignored in such cases.
  1202. * The cause of this is still under investigation.
  1203. */
  1204. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  1205. /* EDMA: check for response queue interrupt */
  1206. if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
  1207. ata_status = mv_get_crpb_status(ap);
  1208. handled = 1;
  1209. }
  1210. } else {
  1211. /* PIO: check for device (drive) interrupt */
  1212. if ((DEV_IRQ << hard_port) & hc_irq_cause) {
  1213. ata_status = readb(ap->ioaddr.status_addr);
  1214. handled = 1;
  1215. /* ignore spurious intr if drive still BUSY */
  1216. if (ata_status & ATA_BUSY) {
  1217. ata_status = 0;
  1218. handled = 0;
  1219. }
  1220. }
  1221. }
  1222. if (ap && (ap->flags & ATA_FLAG_DISABLED))
  1223. continue;
  1224. err_mask = ac_err_mask(ata_status);
  1225. shift = port << 1; /* (port * 2) */
  1226. if (port >= MV_PORTS_PER_HC) {
  1227. shift++; /* skip bit 8 in the HC Main IRQ reg */
  1228. }
  1229. if ((PORT0_ERR << shift) & relevant) {
  1230. mv_err_intr(ap, 1);
  1231. err_mask |= AC_ERR_OTHER;
  1232. handled = 1;
  1233. }
  1234. if (handled) {
  1235. qc = ata_qc_from_tag(ap, ap->active_tag);
  1236. if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
  1237. VPRINTK("port %u IRQ found for qc, "
  1238. "ata_status 0x%x\n", port,ata_status);
  1239. /* mark qc status appropriately */
  1240. if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
  1241. qc->err_mask |= err_mask;
  1242. ata_qc_complete(qc);
  1243. }
  1244. }
  1245. }
  1246. }
  1247. VPRINTK("EXIT\n");
  1248. }
  1249. /**
  1250. * mv_interrupt -
  1251. * @irq: unused
  1252. * @dev_instance: private data; in this case the host structure
  1253. * @regs: unused
  1254. *
  1255. * Read the read only register to determine if any host
  1256. * controllers have pending interrupts. If so, call lower level
  1257. * routine to handle. Also check for PCI errors which are only
  1258. * reported here.
  1259. *
  1260. * LOCKING:
  1261. * This routine holds the host lock while processing pending
  1262. * interrupts.
  1263. */
  1264. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1265. {
  1266. struct ata_host *host = dev_instance;
  1267. unsigned int hc, handled = 0, n_hcs;
  1268. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1269. struct mv_host_priv *hpriv;
  1270. u32 irq_stat;
  1271. irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
  1272. /* check the cases where we either have nothing pending or have read
  1273. * a bogus register value which can indicate HW removal or PCI fault
  1274. */
  1275. if (!irq_stat || (0xffffffffU == irq_stat))
  1276. return IRQ_NONE;
  1277. n_hcs = mv_get_hc_count(host->ports[0]->flags);
  1278. spin_lock(&host->lock);
  1279. for (hc = 0; hc < n_hcs; hc++) {
  1280. u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
  1281. if (relevant) {
  1282. mv_host_intr(host, relevant, hc);
  1283. handled++;
  1284. }
  1285. }
  1286. hpriv = host->private_data;
  1287. if (IS_60XX(hpriv)) {
  1288. /* deal with the interrupt coalescing bits */
  1289. if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
  1290. writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
  1291. writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
  1292. writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
  1293. }
  1294. }
  1295. if (PCI_ERR & irq_stat) {
  1296. printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
  1297. readl(mmio + PCI_IRQ_CAUSE_OFS));
  1298. DPRINTK("All regs @ PCI error\n");
  1299. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1300. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1301. handled++;
  1302. }
  1303. spin_unlock(&host->lock);
  1304. return IRQ_RETVAL(handled);
  1305. }
  1306. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  1307. {
  1308. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  1309. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  1310. return hc_mmio + ofs;
  1311. }
  1312. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1313. {
  1314. unsigned int ofs;
  1315. switch (sc_reg_in) {
  1316. case SCR_STATUS:
  1317. case SCR_ERROR:
  1318. case SCR_CONTROL:
  1319. ofs = sc_reg_in * sizeof(u32);
  1320. break;
  1321. default:
  1322. ofs = 0xffffffffU;
  1323. break;
  1324. }
  1325. return ofs;
  1326. }
  1327. static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  1328. {
  1329. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1330. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1331. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1332. if (ofs != 0xffffffffU)
  1333. return readl(addr + ofs);
  1334. else
  1335. return (u32) ofs;
  1336. }
  1337. static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1338. {
  1339. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1340. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1341. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1342. if (ofs != 0xffffffffU)
  1343. writelfl(val, addr + ofs);
  1344. }
  1345. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
  1346. {
  1347. u8 rev_id;
  1348. int early_5080;
  1349. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1350. early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
  1351. if (!early_5080) {
  1352. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1353. tmp |= (1 << 0);
  1354. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1355. }
  1356. mv_reset_pci_bus(pdev, mmio);
  1357. }
  1358. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1359. {
  1360. writel(0x0fcfffff, mmio + MV_FLASH_CTL);
  1361. }
  1362. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  1363. void __iomem *mmio)
  1364. {
  1365. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  1366. u32 tmp;
  1367. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1368. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  1369. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  1370. }
  1371. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1372. {
  1373. u32 tmp;
  1374. writel(0, mmio + MV_GPIO_PORT_CTL);
  1375. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  1376. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1377. tmp |= ~(1 << 0);
  1378. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1379. }
  1380. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1381. unsigned int port)
  1382. {
  1383. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  1384. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  1385. u32 tmp;
  1386. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  1387. if (fix_apm_sq) {
  1388. tmp = readl(phy_mmio + MV5_LT_MODE);
  1389. tmp |= (1 << 19);
  1390. writel(tmp, phy_mmio + MV5_LT_MODE);
  1391. tmp = readl(phy_mmio + MV5_PHY_CTL);
  1392. tmp &= ~0x3;
  1393. tmp |= 0x1;
  1394. writel(tmp, phy_mmio + MV5_PHY_CTL);
  1395. }
  1396. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1397. tmp &= ~mask;
  1398. tmp |= hpriv->signal[port].pre;
  1399. tmp |= hpriv->signal[port].amps;
  1400. writel(tmp, phy_mmio + MV5_PHY_MODE);
  1401. }
  1402. #undef ZERO
  1403. #define ZERO(reg) writel(0, port_mmio + (reg))
  1404. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  1405. unsigned int port)
  1406. {
  1407. void __iomem *port_mmio = mv_port_base(mmio, port);
  1408. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  1409. mv_channel_reset(hpriv, mmio, port);
  1410. ZERO(0x028); /* command */
  1411. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  1412. ZERO(0x004); /* timer */
  1413. ZERO(0x008); /* irq err cause */
  1414. ZERO(0x00c); /* irq err mask */
  1415. ZERO(0x010); /* rq bah */
  1416. ZERO(0x014); /* rq inp */
  1417. ZERO(0x018); /* rq outp */
  1418. ZERO(0x01c); /* respq bah */
  1419. ZERO(0x024); /* respq outp */
  1420. ZERO(0x020); /* respq inp */
  1421. ZERO(0x02c); /* test control */
  1422. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
  1423. }
  1424. #undef ZERO
  1425. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1426. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1427. unsigned int hc)
  1428. {
  1429. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1430. u32 tmp;
  1431. ZERO(0x00c);
  1432. ZERO(0x010);
  1433. ZERO(0x014);
  1434. ZERO(0x018);
  1435. tmp = readl(hc_mmio + 0x20);
  1436. tmp &= 0x1c1c1c1c;
  1437. tmp |= 0x03030303;
  1438. writel(tmp, hc_mmio + 0x20);
  1439. }
  1440. #undef ZERO
  1441. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1442. unsigned int n_hc)
  1443. {
  1444. unsigned int hc, port;
  1445. for (hc = 0; hc < n_hc; hc++) {
  1446. for (port = 0; port < MV_PORTS_PER_HC; port++)
  1447. mv5_reset_hc_port(hpriv, mmio,
  1448. (hc * MV_PORTS_PER_HC) + port);
  1449. mv5_reset_one_hc(hpriv, mmio, hc);
  1450. }
  1451. return 0;
  1452. }
  1453. #undef ZERO
  1454. #define ZERO(reg) writel(0, mmio + (reg))
  1455. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
  1456. {
  1457. u32 tmp;
  1458. tmp = readl(mmio + MV_PCI_MODE);
  1459. tmp &= 0xff00ffff;
  1460. writel(tmp, mmio + MV_PCI_MODE);
  1461. ZERO(MV_PCI_DISC_TIMER);
  1462. ZERO(MV_PCI_MSI_TRIGGER);
  1463. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
  1464. ZERO(HC_MAIN_IRQ_MASK_OFS);
  1465. ZERO(MV_PCI_SERR_MASK);
  1466. ZERO(PCI_IRQ_CAUSE_OFS);
  1467. ZERO(PCI_IRQ_MASK_OFS);
  1468. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  1469. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  1470. ZERO(MV_PCI_ERR_ATTRIBUTE);
  1471. ZERO(MV_PCI_ERR_COMMAND);
  1472. }
  1473. #undef ZERO
  1474. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1475. {
  1476. u32 tmp;
  1477. mv5_reset_flash(hpriv, mmio);
  1478. tmp = readl(mmio + MV_GPIO_PORT_CTL);
  1479. tmp &= 0x3;
  1480. tmp |= (1 << 5) | (1 << 6);
  1481. writel(tmp, mmio + MV_GPIO_PORT_CTL);
  1482. }
  1483. /**
  1484. * mv6_reset_hc - Perform the 6xxx global soft reset
  1485. * @mmio: base address of the HBA
  1486. *
  1487. * This routine only applies to 6xxx parts.
  1488. *
  1489. * LOCKING:
  1490. * Inherited from caller.
  1491. */
  1492. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1493. unsigned int n_hc)
  1494. {
  1495. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  1496. int i, rc = 0;
  1497. u32 t;
  1498. /* Following procedure defined in PCI "main command and status
  1499. * register" table.
  1500. */
  1501. t = readl(reg);
  1502. writel(t | STOP_PCI_MASTER, reg);
  1503. for (i = 0; i < 1000; i++) {
  1504. udelay(1);
  1505. t = readl(reg);
  1506. if (PCI_MASTER_EMPTY & t) {
  1507. break;
  1508. }
  1509. }
  1510. if (!(PCI_MASTER_EMPTY & t)) {
  1511. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  1512. rc = 1;
  1513. goto done;
  1514. }
  1515. /* set reset */
  1516. i = 5;
  1517. do {
  1518. writel(t | GLOB_SFT_RST, reg);
  1519. t = readl(reg);
  1520. udelay(1);
  1521. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  1522. if (!(GLOB_SFT_RST & t)) {
  1523. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  1524. rc = 1;
  1525. goto done;
  1526. }
  1527. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  1528. i = 5;
  1529. do {
  1530. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  1531. t = readl(reg);
  1532. udelay(1);
  1533. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  1534. if (GLOB_SFT_RST & t) {
  1535. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  1536. rc = 1;
  1537. }
  1538. done:
  1539. return rc;
  1540. }
  1541. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  1542. void __iomem *mmio)
  1543. {
  1544. void __iomem *port_mmio;
  1545. u32 tmp;
  1546. tmp = readl(mmio + MV_RESET_CFG);
  1547. if ((tmp & (1 << 0)) == 0) {
  1548. hpriv->signal[idx].amps = 0x7 << 8;
  1549. hpriv->signal[idx].pre = 0x1 << 5;
  1550. return;
  1551. }
  1552. port_mmio = mv_port_base(mmio, idx);
  1553. tmp = readl(port_mmio + PHY_MODE2);
  1554. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1555. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1556. }
  1557. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1558. {
  1559. writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
  1560. }
  1561. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1562. unsigned int port)
  1563. {
  1564. void __iomem *port_mmio = mv_port_base(mmio, port);
  1565. u32 hp_flags = hpriv->hp_flags;
  1566. int fix_phy_mode2 =
  1567. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1568. int fix_phy_mode4 =
  1569. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1570. u32 m2, tmp;
  1571. if (fix_phy_mode2) {
  1572. m2 = readl(port_mmio + PHY_MODE2);
  1573. m2 &= ~(1 << 16);
  1574. m2 |= (1 << 31);
  1575. writel(m2, port_mmio + PHY_MODE2);
  1576. udelay(200);
  1577. m2 = readl(port_mmio + PHY_MODE2);
  1578. m2 &= ~((1 << 16) | (1 << 31));
  1579. writel(m2, port_mmio + PHY_MODE2);
  1580. udelay(200);
  1581. }
  1582. /* who knows what this magic does */
  1583. tmp = readl(port_mmio + PHY_MODE3);
  1584. tmp &= ~0x7F800000;
  1585. tmp |= 0x2A800000;
  1586. writel(tmp, port_mmio + PHY_MODE3);
  1587. if (fix_phy_mode4) {
  1588. u32 m4;
  1589. m4 = readl(port_mmio + PHY_MODE4);
  1590. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1591. tmp = readl(port_mmio + 0x310);
  1592. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  1593. writel(m4, port_mmio + PHY_MODE4);
  1594. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1595. writel(tmp, port_mmio + 0x310);
  1596. }
  1597. /* Revert values of pre-emphasis and signal amps to the saved ones */
  1598. m2 = readl(port_mmio + PHY_MODE2);
  1599. m2 &= ~MV_M2_PREAMP_MASK;
  1600. m2 |= hpriv->signal[port].amps;
  1601. m2 |= hpriv->signal[port].pre;
  1602. m2 &= ~(1 << 16);
  1603. /* according to mvSata 3.6.1, some IIE values are fixed */
  1604. if (IS_GEN_IIE(hpriv)) {
  1605. m2 &= ~0xC30FF01F;
  1606. m2 |= 0x0000900F;
  1607. }
  1608. writel(m2, port_mmio + PHY_MODE2);
  1609. }
  1610. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  1611. unsigned int port_no)
  1612. {
  1613. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  1614. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  1615. if (IS_60XX(hpriv)) {
  1616. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  1617. ifctl |= (1 << 7); /* enable gen2i speed */
  1618. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  1619. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  1620. }
  1621. udelay(25); /* allow reset propagation */
  1622. /* Spec never mentions clearing the bit. Marvell's driver does
  1623. * clear the bit, however.
  1624. */
  1625. writelfl(0, port_mmio + EDMA_CMD_OFS);
  1626. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  1627. if (IS_50XX(hpriv))
  1628. mdelay(1);
  1629. }
  1630. static void mv_stop_and_reset(struct ata_port *ap)
  1631. {
  1632. struct mv_host_priv *hpriv = ap->host->private_data;
  1633. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1634. mv_stop_dma(ap);
  1635. mv_channel_reset(hpriv, mmio, ap->port_no);
  1636. __mv_phy_reset(ap, 0);
  1637. }
  1638. static inline void __msleep(unsigned int msec, int can_sleep)
  1639. {
  1640. if (can_sleep)
  1641. msleep(msec);
  1642. else
  1643. mdelay(msec);
  1644. }
  1645. /**
  1646. * __mv_phy_reset - Perform eDMA reset followed by COMRESET
  1647. * @ap: ATA channel to manipulate
  1648. *
  1649. * Part of this is taken from __sata_phy_reset and modified to
  1650. * not sleep since this routine gets called from interrupt level.
  1651. *
  1652. * LOCKING:
  1653. * Inherited from caller. This is coded to safe to call at
  1654. * interrupt level, i.e. it does not sleep.
  1655. */
  1656. static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
  1657. {
  1658. struct mv_port_priv *pp = ap->private_data;
  1659. struct mv_host_priv *hpriv = ap->host->private_data;
  1660. void __iomem *port_mmio = mv_ap_base(ap);
  1661. struct ata_taskfile tf;
  1662. struct ata_device *dev = &ap->device[0];
  1663. unsigned long timeout;
  1664. int retry = 5;
  1665. u32 sstatus;
  1666. VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
  1667. DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
  1668. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1669. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1670. /* Issue COMRESET via SControl */
  1671. comreset_retry:
  1672. sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
  1673. __msleep(1, can_sleep);
  1674. sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
  1675. __msleep(20, can_sleep);
  1676. timeout = jiffies + msecs_to_jiffies(200);
  1677. do {
  1678. sata_scr_read(ap, SCR_STATUS, &sstatus);
  1679. if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
  1680. break;
  1681. __msleep(1, can_sleep);
  1682. } while (time_before(jiffies, timeout));
  1683. /* work around errata */
  1684. if (IS_60XX(hpriv) &&
  1685. (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
  1686. (retry-- > 0))
  1687. goto comreset_retry;
  1688. DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
  1689. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1690. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1691. if (ata_port_online(ap)) {
  1692. ata_port_probe(ap);
  1693. } else {
  1694. sata_scr_read(ap, SCR_STATUS, &sstatus);
  1695. ata_port_printk(ap, KERN_INFO,
  1696. "no device found (phy stat %08x)\n", sstatus);
  1697. ata_port_disable(ap);
  1698. return;
  1699. }
  1700. /* even after SStatus reflects that device is ready,
  1701. * it seems to take a while for link to be fully
  1702. * established (and thus Status no longer 0x80/0x7F),
  1703. * so we poll a bit for that, here.
  1704. */
  1705. retry = 20;
  1706. while (1) {
  1707. u8 drv_stat = ata_check_status(ap);
  1708. if ((drv_stat != 0x80) && (drv_stat != 0x7f))
  1709. break;
  1710. __msleep(500, can_sleep);
  1711. if (retry-- <= 0)
  1712. break;
  1713. }
  1714. tf.lbah = readb(ap->ioaddr.lbah_addr);
  1715. tf.lbam = readb(ap->ioaddr.lbam_addr);
  1716. tf.lbal = readb(ap->ioaddr.lbal_addr);
  1717. tf.nsect = readb(ap->ioaddr.nsect_addr);
  1718. dev->class = ata_dev_classify(&tf);
  1719. if (!ata_dev_enabled(dev)) {
  1720. VPRINTK("Port disabled post-sig: No device present.\n");
  1721. ata_port_disable(ap);
  1722. }
  1723. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1724. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1725. VPRINTK("EXIT\n");
  1726. }
  1727. static void mv_phy_reset(struct ata_port *ap)
  1728. {
  1729. __mv_phy_reset(ap, 1);
  1730. }
  1731. /**
  1732. * mv_eng_timeout - Routine called by libata when SCSI times out I/O
  1733. * @ap: ATA channel to manipulate
  1734. *
  1735. * Intent is to clear all pending error conditions, reset the
  1736. * chip/bus, fail the command, and move on.
  1737. *
  1738. * LOCKING:
  1739. * This routine holds the host lock while failing the command.
  1740. */
  1741. static void mv_eng_timeout(struct ata_port *ap)
  1742. {
  1743. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1744. struct ata_queued_cmd *qc;
  1745. unsigned long flags;
  1746. ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
  1747. DPRINTK("All regs @ start of eng_timeout\n");
  1748. mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
  1749. qc = ata_qc_from_tag(ap, ap->active_tag);
  1750. printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
  1751. mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
  1752. spin_lock_irqsave(&ap->host->lock, flags);
  1753. mv_err_intr(ap, 0);
  1754. mv_stop_and_reset(ap);
  1755. spin_unlock_irqrestore(&ap->host->lock, flags);
  1756. WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
  1757. if (qc->flags & ATA_QCFLAG_ACTIVE) {
  1758. qc->err_mask |= AC_ERR_TIMEOUT;
  1759. ata_eh_qc_complete(qc);
  1760. }
  1761. }
  1762. /**
  1763. * mv_port_init - Perform some early initialization on a single port.
  1764. * @port: libata data structure storing shadow register addresses
  1765. * @port_mmio: base address of the port
  1766. *
  1767. * Initialize shadow register mmio addresses, clear outstanding
  1768. * interrupts on the port, and unmask interrupts for the future
  1769. * start of the port.
  1770. *
  1771. * LOCKING:
  1772. * Inherited from caller.
  1773. */
  1774. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  1775. {
  1776. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  1777. unsigned serr_ofs;
  1778. /* PIO related setup
  1779. */
  1780. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  1781. port->error_addr =
  1782. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  1783. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  1784. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  1785. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  1786. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  1787. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  1788. port->status_addr =
  1789. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  1790. /* special case: control/altstatus doesn't have ATA_REG_ address */
  1791. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  1792. /* unused: */
  1793. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  1794. /* Clear any currently outstanding port interrupt conditions */
  1795. serr_ofs = mv_scr_offset(SCR_ERROR);
  1796. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  1797. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1798. /* unmask all EDMA error interrupts */
  1799. writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  1800. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  1801. readl(port_mmio + EDMA_CFG_OFS),
  1802. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  1803. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  1804. }
  1805. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  1806. {
  1807. struct pci_dev *pdev = to_pci_dev(host->dev);
  1808. struct mv_host_priv *hpriv = host->private_data;
  1809. u8 rev_id;
  1810. u32 hp_flags = hpriv->hp_flags;
  1811. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1812. switch(board_idx) {
  1813. case chip_5080:
  1814. hpriv->ops = &mv5xxx_ops;
  1815. hp_flags |= MV_HP_50XX;
  1816. switch (rev_id) {
  1817. case 0x1:
  1818. hp_flags |= MV_HP_ERRATA_50XXB0;
  1819. break;
  1820. case 0x3:
  1821. hp_flags |= MV_HP_ERRATA_50XXB2;
  1822. break;
  1823. default:
  1824. dev_printk(KERN_WARNING, &pdev->dev,
  1825. "Applying 50XXB2 workarounds to unknown rev\n");
  1826. hp_flags |= MV_HP_ERRATA_50XXB2;
  1827. break;
  1828. }
  1829. break;
  1830. case chip_504x:
  1831. case chip_508x:
  1832. hpriv->ops = &mv5xxx_ops;
  1833. hp_flags |= MV_HP_50XX;
  1834. switch (rev_id) {
  1835. case 0x0:
  1836. hp_flags |= MV_HP_ERRATA_50XXB0;
  1837. break;
  1838. case 0x3:
  1839. hp_flags |= MV_HP_ERRATA_50XXB2;
  1840. break;
  1841. default:
  1842. dev_printk(KERN_WARNING, &pdev->dev,
  1843. "Applying B2 workarounds to unknown rev\n");
  1844. hp_flags |= MV_HP_ERRATA_50XXB2;
  1845. break;
  1846. }
  1847. break;
  1848. case chip_604x:
  1849. case chip_608x:
  1850. hpriv->ops = &mv6xxx_ops;
  1851. switch (rev_id) {
  1852. case 0x7:
  1853. hp_flags |= MV_HP_ERRATA_60X1B2;
  1854. break;
  1855. case 0x9:
  1856. hp_flags |= MV_HP_ERRATA_60X1C0;
  1857. break;
  1858. default:
  1859. dev_printk(KERN_WARNING, &pdev->dev,
  1860. "Applying B2 workarounds to unknown rev\n");
  1861. hp_flags |= MV_HP_ERRATA_60X1B2;
  1862. break;
  1863. }
  1864. break;
  1865. case chip_7042:
  1866. case chip_6042:
  1867. hpriv->ops = &mv6xxx_ops;
  1868. hp_flags |= MV_HP_GEN_IIE;
  1869. switch (rev_id) {
  1870. case 0x0:
  1871. hp_flags |= MV_HP_ERRATA_XX42A0;
  1872. break;
  1873. case 0x1:
  1874. hp_flags |= MV_HP_ERRATA_60X1C0;
  1875. break;
  1876. default:
  1877. dev_printk(KERN_WARNING, &pdev->dev,
  1878. "Applying 60X1C0 workarounds to unknown rev\n");
  1879. hp_flags |= MV_HP_ERRATA_60X1C0;
  1880. break;
  1881. }
  1882. break;
  1883. default:
  1884. printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
  1885. return 1;
  1886. }
  1887. hpriv->hp_flags = hp_flags;
  1888. return 0;
  1889. }
  1890. /**
  1891. * mv_init_host - Perform some early initialization of the host.
  1892. * @host: ATA host to initialize
  1893. * @board_idx: controller index
  1894. *
  1895. * If possible, do an early global reset of the host. Then do
  1896. * our port init and clear/unmask all/relevant host interrupts.
  1897. *
  1898. * LOCKING:
  1899. * Inherited from caller.
  1900. */
  1901. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  1902. {
  1903. int rc = 0, n_hc, port, hc;
  1904. struct pci_dev *pdev = to_pci_dev(host->dev);
  1905. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1906. struct mv_host_priv *hpriv = host->private_data;
  1907. /* global interrupt mask */
  1908. writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
  1909. rc = mv_chip_id(host, board_idx);
  1910. if (rc)
  1911. goto done;
  1912. n_hc = mv_get_hc_count(host->ports[0]->flags);
  1913. for (port = 0; port < host->n_ports; port++)
  1914. hpriv->ops->read_preamp(hpriv, port, mmio);
  1915. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  1916. if (rc)
  1917. goto done;
  1918. hpriv->ops->reset_flash(hpriv, mmio);
  1919. hpriv->ops->reset_bus(pdev, mmio);
  1920. hpriv->ops->enable_leds(hpriv, mmio);
  1921. for (port = 0; port < host->n_ports; port++) {
  1922. if (IS_60XX(hpriv)) {
  1923. void __iomem *port_mmio = mv_port_base(mmio, port);
  1924. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  1925. ifctl |= (1 << 7); /* enable gen2i speed */
  1926. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  1927. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  1928. }
  1929. hpriv->ops->phy_errata(hpriv, mmio, port);
  1930. }
  1931. for (port = 0; port < host->n_ports; port++) {
  1932. void __iomem *port_mmio = mv_port_base(mmio, port);
  1933. mv_port_init(&host->ports[port]->ioaddr, port_mmio);
  1934. }
  1935. for (hc = 0; hc < n_hc; hc++) {
  1936. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1937. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  1938. "(before clear)=0x%08x\n", hc,
  1939. readl(hc_mmio + HC_CFG_OFS),
  1940. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  1941. /* Clear any currently outstanding hc interrupt conditions */
  1942. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  1943. }
  1944. /* Clear any currently outstanding host interrupt conditions */
  1945. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1946. /* and unmask interrupt generation for host regs */
  1947. writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
  1948. if (IS_50XX(hpriv))
  1949. writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
  1950. else
  1951. writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
  1952. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  1953. "PCI int cause/mask=0x%08x/0x%08x\n",
  1954. readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
  1955. readl(mmio + HC_MAIN_IRQ_MASK_OFS),
  1956. readl(mmio + PCI_IRQ_CAUSE_OFS),
  1957. readl(mmio + PCI_IRQ_MASK_OFS));
  1958. done:
  1959. return rc;
  1960. }
  1961. /**
  1962. * mv_print_info - Dump key info to kernel log for perusal.
  1963. * @host: ATA host to print info about
  1964. *
  1965. * FIXME: complete this.
  1966. *
  1967. * LOCKING:
  1968. * Inherited from caller.
  1969. */
  1970. static void mv_print_info(struct ata_host *host)
  1971. {
  1972. struct pci_dev *pdev = to_pci_dev(host->dev);
  1973. struct mv_host_priv *hpriv = host->private_data;
  1974. u8 rev_id, scc;
  1975. const char *scc_s;
  1976. /* Use this to determine the HW stepping of the chip so we know
  1977. * what errata to workaround
  1978. */
  1979. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1980. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  1981. if (scc == 0)
  1982. scc_s = "SCSI";
  1983. else if (scc == 0x01)
  1984. scc_s = "RAID";
  1985. else
  1986. scc_s = "unknown";
  1987. dev_printk(KERN_INFO, &pdev->dev,
  1988. "%u slots %u ports %s mode IRQ via %s\n",
  1989. (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  1990. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  1991. }
  1992. /**
  1993. * mv_init_one - handle a positive probe of a Marvell host
  1994. * @pdev: PCI device found
  1995. * @ent: PCI device ID entry for the matched host
  1996. *
  1997. * LOCKING:
  1998. * Inherited from caller.
  1999. */
  2000. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2001. {
  2002. static int printed_version = 0;
  2003. unsigned int board_idx = (unsigned int)ent->driver_data;
  2004. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  2005. struct ata_host *host;
  2006. struct mv_host_priv *hpriv;
  2007. int n_ports, rc;
  2008. if (!printed_version++)
  2009. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2010. /* allocate host */
  2011. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  2012. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2013. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2014. if (!host || !hpriv)
  2015. return -ENOMEM;
  2016. host->private_data = hpriv;
  2017. /* acquire resources */
  2018. rc = pcim_enable_device(pdev);
  2019. if (rc)
  2020. return rc;
  2021. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  2022. if (rc == -EBUSY)
  2023. pcim_pin_device(pdev);
  2024. if (rc)
  2025. return rc;
  2026. host->iomap = pcim_iomap_table(pdev);
  2027. rc = pci_go_64(pdev);
  2028. if (rc)
  2029. return rc;
  2030. /* initialize adapter */
  2031. rc = mv_init_host(host, board_idx);
  2032. if (rc)
  2033. return rc;
  2034. /* Enable interrupts */
  2035. if (msi && pci_enable_msi(pdev))
  2036. pci_intx(pdev, 1);
  2037. mv_dump_pci_cfg(pdev, 0x68);
  2038. mv_print_info(host);
  2039. pci_set_master(pdev);
  2040. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2041. &mv_sht);
  2042. }
  2043. static int __init mv_init(void)
  2044. {
  2045. return pci_register_driver(&mv_pci_driver);
  2046. }
  2047. static void __exit mv_exit(void)
  2048. {
  2049. pci_unregister_driver(&mv_pci_driver);
  2050. }
  2051. MODULE_AUTHOR("Brett Russ");
  2052. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  2053. MODULE_LICENSE("GPL");
  2054. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  2055. MODULE_VERSION(DRV_VERSION);
  2056. module_param(msi, int, 0444);
  2057. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  2058. module_init(mv_init);
  2059. module_exit(mv_exit);