sata_mv.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2005: EMC Corporation, all rights reserved.
  5. * Copyright 2005 Red Hat, Inc. All rights reserved.
  6. *
  7. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. /*
  24. sata_mv TODO list:
  25. 1) Needs a full errata audit for all chipsets. I implemented most
  26. of the errata workarounds found in the Marvell vendor driver, but
  27. I distinctly remember a couple workarounds (one related to PCI-X)
  28. are still needed.
  29. 4) Add NCQ support (easy to intermediate, once new-EH support appears)
  30. 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
  31. 6) Add port multiplier support (intermediate)
  32. 8) Develop a low-power-consumption strategy, and implement it.
  33. 9) [Experiment, low priority] See if ATAPI can be supported using
  34. "unknown FIS" or "vendor-specific FIS" support, or something creative
  35. like that.
  36. 10) [Experiment, low priority] Investigate interrupt coalescing.
  37. Quite often, especially with PCI Message Signalled Interrupts (MSI),
  38. the overhead reduced by interrupt mitigation is quite often not
  39. worth the latency cost.
  40. 11) [Experiment, Marvell value added] Is it possible to use target
  41. mode to cross-connect two Linux boxes with Marvell cards? If so,
  42. creating LibATA target mode support would be very interesting.
  43. Target mode, for those without docs, is the ability to directly
  44. connect two SATA controllers.
  45. 13) Verify that 7042 is fully supported. I only have a 6042.
  46. */
  47. #include <linux/kernel.h>
  48. #include <linux/module.h>
  49. #include <linux/pci.h>
  50. #include <linux/init.h>
  51. #include <linux/blkdev.h>
  52. #include <linux/delay.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/dma-mapping.h>
  55. #include <linux/device.h>
  56. #include <scsi/scsi_host.h>
  57. #include <scsi/scsi_cmnd.h>
  58. #include <scsi/scsi_device.h>
  59. #include <linux/libata.h>
  60. #define DRV_NAME "sata_mv"
  61. #define DRV_VERSION "1.01"
  62. enum {
  63. /* BAR's are enumerated in terms of pci_resource_start() terms */
  64. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  65. MV_IO_BAR = 2, /* offset 0x18: IO space */
  66. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  67. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  68. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  69. MV_PCI_REG_BASE = 0,
  70. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  71. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  72. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  73. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  74. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  75. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  76. MV_SATAHC0_REG_BASE = 0x20000,
  77. MV_FLASH_CTL = 0x1046c,
  78. MV_GPIO_PORT_CTL = 0x104f0,
  79. MV_RESET_CFG = 0x180d8,
  80. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  81. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  82. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  83. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  84. MV_MAX_Q_DEPTH = 32,
  85. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  86. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  87. * CRPB needs alignment on a 256B boundary. Size == 256B
  88. * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
  89. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  90. */
  91. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  92. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  93. MV_MAX_SG_CT = 176,
  94. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  95. MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
  96. MV_PORTS_PER_HC = 4,
  97. /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
  98. MV_PORT_HC_SHIFT = 2,
  99. /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
  100. MV_PORT_MASK = 3,
  101. /* Host Flags */
  102. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  103. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  104. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  105. ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
  106. ATA_FLAG_PIO_POLLING,
  107. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  108. CRQB_FLAG_READ = (1 << 0),
  109. CRQB_TAG_SHIFT = 1,
  110. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  111. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  112. CRQB_CMD_ADDR_SHIFT = 8,
  113. CRQB_CMD_CS = (0x2 << 11),
  114. CRQB_CMD_LAST = (1 << 15),
  115. CRPB_FLAG_STATUS_SHIFT = 8,
  116. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  117. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  118. EPRD_FLAG_END_OF_TBL = (1 << 31),
  119. /* PCI interface registers */
  120. PCI_COMMAND_OFS = 0xc00,
  121. PCI_MAIN_CMD_STS_OFS = 0xd30,
  122. STOP_PCI_MASTER = (1 << 2),
  123. PCI_MASTER_EMPTY = (1 << 3),
  124. GLOB_SFT_RST = (1 << 4),
  125. MV_PCI_MODE = 0xd00,
  126. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  127. MV_PCI_DISC_TIMER = 0xd04,
  128. MV_PCI_MSI_TRIGGER = 0xc38,
  129. MV_PCI_SERR_MASK = 0xc28,
  130. MV_PCI_XBAR_TMOUT = 0x1d04,
  131. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  132. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  133. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  134. MV_PCI_ERR_COMMAND = 0x1d50,
  135. PCI_IRQ_CAUSE_OFS = 0x1d58,
  136. PCI_IRQ_MASK_OFS = 0x1d5c,
  137. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  138. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  139. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  140. PORT0_ERR = (1 << 0), /* shift by port # */
  141. PORT0_DONE = (1 << 1), /* shift by port # */
  142. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  143. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  144. PCI_ERR = (1 << 18),
  145. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  146. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  147. PORTS_0_3_COAL_DONE = (1 << 8),
  148. PORTS_4_7_COAL_DONE = (1 << 17),
  149. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  150. GPIO_INT = (1 << 22),
  151. SELF_INT = (1 << 23),
  152. TWSI_INT = (1 << 24),
  153. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  154. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  155. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  156. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  157. HC_MAIN_RSVD),
  158. HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  159. HC_MAIN_RSVD_5),
  160. /* SATAHC registers */
  161. HC_CFG_OFS = 0,
  162. HC_IRQ_CAUSE_OFS = 0x14,
  163. CRPB_DMA_DONE = (1 << 0), /* shift by port # */
  164. HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
  165. DEV_IRQ = (1 << 8), /* shift by port # */
  166. /* Shadow block registers */
  167. SHD_BLK_OFS = 0x100,
  168. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  169. /* SATA registers */
  170. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  171. SATA_ACTIVE_OFS = 0x350,
  172. PHY_MODE3 = 0x310,
  173. PHY_MODE4 = 0x314,
  174. PHY_MODE2 = 0x330,
  175. MV5_PHY_MODE = 0x74,
  176. MV5_LT_MODE = 0x30,
  177. MV5_PHY_CTL = 0x0C,
  178. SATA_INTERFACE_CTL = 0x050,
  179. MV_M2_PREAMP_MASK = 0x7e0,
  180. /* Port registers */
  181. EDMA_CFG_OFS = 0,
  182. EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
  183. EDMA_CFG_NCQ = (1 << 5),
  184. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  185. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  186. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  187. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  188. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  189. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  190. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  191. EDMA_ERR_DEV = (1 << 2), /* device error */
  192. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  193. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  194. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  195. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  196. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  197. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  198. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  199. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  200. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  201. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  202. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  203. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  204. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
  205. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  206. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  207. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  208. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  209. EDMA_ERR_OVERRUN_5 = (1 << 5),
  210. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  211. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  212. EDMA_ERR_PRD_PAR |
  213. EDMA_ERR_DEV_DCON |
  214. EDMA_ERR_DEV_CON |
  215. EDMA_ERR_SERR |
  216. EDMA_ERR_SELF_DIS |
  217. EDMA_ERR_CRQB_PAR |
  218. EDMA_ERR_CRPB_PAR |
  219. EDMA_ERR_INTRL_PAR |
  220. EDMA_ERR_IORDY |
  221. EDMA_ERR_LNK_CTRL_RX_2 |
  222. EDMA_ERR_LNK_DATA_RX |
  223. EDMA_ERR_LNK_DATA_TX |
  224. EDMA_ERR_TRANS_PROTO,
  225. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  226. EDMA_ERR_PRD_PAR |
  227. EDMA_ERR_DEV_DCON |
  228. EDMA_ERR_DEV_CON |
  229. EDMA_ERR_OVERRUN_5 |
  230. EDMA_ERR_UNDERRUN_5 |
  231. EDMA_ERR_SELF_DIS_5 |
  232. EDMA_ERR_CRQB_PAR |
  233. EDMA_ERR_CRPB_PAR |
  234. EDMA_ERR_INTRL_PAR |
  235. EDMA_ERR_IORDY,
  236. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  237. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  238. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  239. EDMA_REQ_Q_PTR_SHIFT = 5,
  240. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  241. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  242. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  243. EDMA_RSP_Q_PTR_SHIFT = 3,
  244. EDMA_CMD_OFS = 0x28, /* EDMA command register */
  245. EDMA_EN = (1 << 0), /* enable EDMA */
  246. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  247. ATA_RST = (1 << 2), /* reset trans/link/phy */
  248. EDMA_IORDY_TMOUT = 0x34,
  249. EDMA_ARB_CFG = 0x38,
  250. /* Host private flags (hp_flags) */
  251. MV_HP_FLAG_MSI = (1 << 0),
  252. MV_HP_ERRATA_50XXB0 = (1 << 1),
  253. MV_HP_ERRATA_50XXB2 = (1 << 2),
  254. MV_HP_ERRATA_60X1B2 = (1 << 3),
  255. MV_HP_ERRATA_60X1C0 = (1 << 4),
  256. MV_HP_ERRATA_XX42A0 = (1 << 5),
  257. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  258. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  259. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  260. /* Port private flags (pp_flags) */
  261. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  262. MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
  263. };
  264. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  265. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  266. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  267. enum {
  268. /* DMA boundary 0xffff is required by the s/g splitting
  269. * we need on /length/ in mv_fill-sg().
  270. */
  271. MV_DMA_BOUNDARY = 0xffffU,
  272. /* mask of register bits containing lower 32 bits
  273. * of EDMA request queue DMA address
  274. */
  275. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  276. /* ditto, for response queue */
  277. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  278. };
  279. enum chip_type {
  280. chip_504x,
  281. chip_508x,
  282. chip_5080,
  283. chip_604x,
  284. chip_608x,
  285. chip_6042,
  286. chip_7042,
  287. };
  288. /* Command ReQuest Block: 32B */
  289. struct mv_crqb {
  290. __le32 sg_addr;
  291. __le32 sg_addr_hi;
  292. __le16 ctrl_flags;
  293. __le16 ata_cmd[11];
  294. };
  295. struct mv_crqb_iie {
  296. __le32 addr;
  297. __le32 addr_hi;
  298. __le32 flags;
  299. __le32 len;
  300. __le32 ata_cmd[4];
  301. };
  302. /* Command ResPonse Block: 8B */
  303. struct mv_crpb {
  304. __le16 id;
  305. __le16 flags;
  306. __le32 tmstmp;
  307. };
  308. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  309. struct mv_sg {
  310. __le32 addr;
  311. __le32 flags_size;
  312. __le32 addr_hi;
  313. __le32 reserved;
  314. };
  315. struct mv_port_priv {
  316. struct mv_crqb *crqb;
  317. dma_addr_t crqb_dma;
  318. struct mv_crpb *crpb;
  319. dma_addr_t crpb_dma;
  320. struct mv_sg *sg_tbl;
  321. dma_addr_t sg_tbl_dma;
  322. unsigned int req_idx;
  323. unsigned int resp_idx;
  324. u32 pp_flags;
  325. };
  326. struct mv_port_signal {
  327. u32 amps;
  328. u32 pre;
  329. };
  330. struct mv_host_priv;
  331. struct mv_hw_ops {
  332. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  333. unsigned int port);
  334. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  335. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  336. void __iomem *mmio);
  337. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  338. unsigned int n_hc);
  339. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  340. void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
  341. };
  342. struct mv_host_priv {
  343. u32 hp_flags;
  344. struct mv_port_signal signal[8];
  345. const struct mv_hw_ops *ops;
  346. };
  347. static void mv_irq_clear(struct ata_port *ap);
  348. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  349. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  350. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  351. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  352. static int mv_port_start(struct ata_port *ap);
  353. static void mv_port_stop(struct ata_port *ap);
  354. static void mv_qc_prep(struct ata_queued_cmd *qc);
  355. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  356. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  357. static void mv_error_handler(struct ata_port *ap);
  358. static void mv_post_int_cmd(struct ata_queued_cmd *qc);
  359. static void mv_eh_freeze(struct ata_port *ap);
  360. static void mv_eh_thaw(struct ata_port *ap);
  361. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  362. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  363. unsigned int port);
  364. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  365. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  366. void __iomem *mmio);
  367. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  368. unsigned int n_hc);
  369. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  370. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
  371. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  372. unsigned int port);
  373. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  374. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  375. void __iomem *mmio);
  376. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  377. unsigned int n_hc);
  378. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  379. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
  380. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  381. unsigned int port_no);
  382. static struct scsi_host_template mv5_sht = {
  383. .module = THIS_MODULE,
  384. .name = DRV_NAME,
  385. .ioctl = ata_scsi_ioctl,
  386. .queuecommand = ata_scsi_queuecmd,
  387. .can_queue = ATA_DEF_QUEUE,
  388. .this_id = ATA_SHT_THIS_ID,
  389. .sg_tablesize = MV_MAX_SG_CT / 2,
  390. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  391. .emulated = ATA_SHT_EMULATED,
  392. .use_clustering = 1,
  393. .proc_name = DRV_NAME,
  394. .dma_boundary = MV_DMA_BOUNDARY,
  395. .slave_configure = ata_scsi_slave_config,
  396. .slave_destroy = ata_scsi_slave_destroy,
  397. .bios_param = ata_std_bios_param,
  398. };
  399. static struct scsi_host_template mv6_sht = {
  400. .module = THIS_MODULE,
  401. .name = DRV_NAME,
  402. .ioctl = ata_scsi_ioctl,
  403. .queuecommand = ata_scsi_queuecmd,
  404. .can_queue = ATA_DEF_QUEUE,
  405. .this_id = ATA_SHT_THIS_ID,
  406. .sg_tablesize = MV_MAX_SG_CT / 2,
  407. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  408. .emulated = ATA_SHT_EMULATED,
  409. .use_clustering = 1,
  410. .proc_name = DRV_NAME,
  411. .dma_boundary = MV_DMA_BOUNDARY,
  412. .slave_configure = ata_scsi_slave_config,
  413. .slave_destroy = ata_scsi_slave_destroy,
  414. .bios_param = ata_std_bios_param,
  415. };
  416. static const struct ata_port_operations mv5_ops = {
  417. .tf_load = ata_tf_load,
  418. .tf_read = ata_tf_read,
  419. .check_status = ata_check_status,
  420. .exec_command = ata_exec_command,
  421. .dev_select = ata_std_dev_select,
  422. .cable_detect = ata_cable_sata,
  423. .qc_prep = mv_qc_prep,
  424. .qc_issue = mv_qc_issue,
  425. .data_xfer = ata_data_xfer,
  426. .irq_clear = mv_irq_clear,
  427. .irq_on = ata_irq_on,
  428. .error_handler = mv_error_handler,
  429. .post_internal_cmd = mv_post_int_cmd,
  430. .freeze = mv_eh_freeze,
  431. .thaw = mv_eh_thaw,
  432. .scr_read = mv5_scr_read,
  433. .scr_write = mv5_scr_write,
  434. .port_start = mv_port_start,
  435. .port_stop = mv_port_stop,
  436. };
  437. static const struct ata_port_operations mv6_ops = {
  438. .tf_load = ata_tf_load,
  439. .tf_read = ata_tf_read,
  440. .check_status = ata_check_status,
  441. .exec_command = ata_exec_command,
  442. .dev_select = ata_std_dev_select,
  443. .cable_detect = ata_cable_sata,
  444. .qc_prep = mv_qc_prep,
  445. .qc_issue = mv_qc_issue,
  446. .data_xfer = ata_data_xfer,
  447. .irq_clear = mv_irq_clear,
  448. .irq_on = ata_irq_on,
  449. .error_handler = mv_error_handler,
  450. .post_internal_cmd = mv_post_int_cmd,
  451. .freeze = mv_eh_freeze,
  452. .thaw = mv_eh_thaw,
  453. .scr_read = mv_scr_read,
  454. .scr_write = mv_scr_write,
  455. .port_start = mv_port_start,
  456. .port_stop = mv_port_stop,
  457. };
  458. static const struct ata_port_operations mv_iie_ops = {
  459. .tf_load = ata_tf_load,
  460. .tf_read = ata_tf_read,
  461. .check_status = ata_check_status,
  462. .exec_command = ata_exec_command,
  463. .dev_select = ata_std_dev_select,
  464. .cable_detect = ata_cable_sata,
  465. .qc_prep = mv_qc_prep_iie,
  466. .qc_issue = mv_qc_issue,
  467. .data_xfer = ata_data_xfer,
  468. .irq_clear = mv_irq_clear,
  469. .irq_on = ata_irq_on,
  470. .error_handler = mv_error_handler,
  471. .post_internal_cmd = mv_post_int_cmd,
  472. .freeze = mv_eh_freeze,
  473. .thaw = mv_eh_thaw,
  474. .scr_read = mv_scr_read,
  475. .scr_write = mv_scr_write,
  476. .port_start = mv_port_start,
  477. .port_stop = mv_port_stop,
  478. };
  479. static const struct ata_port_info mv_port_info[] = {
  480. { /* chip_504x */
  481. .flags = MV_COMMON_FLAGS,
  482. .pio_mask = 0x1f, /* pio0-4 */
  483. .udma_mask = ATA_UDMA6,
  484. .port_ops = &mv5_ops,
  485. },
  486. { /* chip_508x */
  487. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  488. .pio_mask = 0x1f, /* pio0-4 */
  489. .udma_mask = ATA_UDMA6,
  490. .port_ops = &mv5_ops,
  491. },
  492. { /* chip_5080 */
  493. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  494. .pio_mask = 0x1f, /* pio0-4 */
  495. .udma_mask = ATA_UDMA6,
  496. .port_ops = &mv5_ops,
  497. },
  498. { /* chip_604x */
  499. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
  500. .pio_mask = 0x1f, /* pio0-4 */
  501. .udma_mask = ATA_UDMA6,
  502. .port_ops = &mv6_ops,
  503. },
  504. { /* chip_608x */
  505. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  506. MV_FLAG_DUAL_HC,
  507. .pio_mask = 0x1f, /* pio0-4 */
  508. .udma_mask = ATA_UDMA6,
  509. .port_ops = &mv6_ops,
  510. },
  511. { /* chip_6042 */
  512. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
  513. .pio_mask = 0x1f, /* pio0-4 */
  514. .udma_mask = ATA_UDMA6,
  515. .port_ops = &mv_iie_ops,
  516. },
  517. { /* chip_7042 */
  518. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
  519. .pio_mask = 0x1f, /* pio0-4 */
  520. .udma_mask = ATA_UDMA6,
  521. .port_ops = &mv_iie_ops,
  522. },
  523. };
  524. static const struct pci_device_id mv_pci_tbl[] = {
  525. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  526. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  527. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  528. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  529. /* RocketRAID 1740/174x have different identifiers */
  530. { PCI_VDEVICE(TTI, 0x1740), chip_508x },
  531. { PCI_VDEVICE(TTI, 0x1742), chip_508x },
  532. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  533. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  534. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  535. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  536. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  537. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  538. /* Adaptec 1430SA */
  539. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  540. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  541. /* add Marvell 7042 support */
  542. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  543. { } /* terminate list */
  544. };
  545. static struct pci_driver mv_pci_driver = {
  546. .name = DRV_NAME,
  547. .id_table = mv_pci_tbl,
  548. .probe = mv_init_one,
  549. .remove = ata_pci_remove_one,
  550. };
  551. static const struct mv_hw_ops mv5xxx_ops = {
  552. .phy_errata = mv5_phy_errata,
  553. .enable_leds = mv5_enable_leds,
  554. .read_preamp = mv5_read_preamp,
  555. .reset_hc = mv5_reset_hc,
  556. .reset_flash = mv5_reset_flash,
  557. .reset_bus = mv5_reset_bus,
  558. };
  559. static const struct mv_hw_ops mv6xxx_ops = {
  560. .phy_errata = mv6_phy_errata,
  561. .enable_leds = mv6_enable_leds,
  562. .read_preamp = mv6_read_preamp,
  563. .reset_hc = mv6_reset_hc,
  564. .reset_flash = mv6_reset_flash,
  565. .reset_bus = mv_reset_pci_bus,
  566. };
  567. /*
  568. * module options
  569. */
  570. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  571. /* move to PCI layer or libata core? */
  572. static int pci_go_64(struct pci_dev *pdev)
  573. {
  574. int rc;
  575. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  576. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  577. if (rc) {
  578. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  579. if (rc) {
  580. dev_printk(KERN_ERR, &pdev->dev,
  581. "64-bit DMA enable failed\n");
  582. return rc;
  583. }
  584. }
  585. } else {
  586. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  587. if (rc) {
  588. dev_printk(KERN_ERR, &pdev->dev,
  589. "32-bit DMA enable failed\n");
  590. return rc;
  591. }
  592. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  593. if (rc) {
  594. dev_printk(KERN_ERR, &pdev->dev,
  595. "32-bit consistent DMA enable failed\n");
  596. return rc;
  597. }
  598. }
  599. return rc;
  600. }
  601. /*
  602. * Functions
  603. */
  604. static inline void writelfl(unsigned long data, void __iomem *addr)
  605. {
  606. writel(data, addr);
  607. (void) readl(addr); /* flush to avoid PCI posted write */
  608. }
  609. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  610. {
  611. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  612. }
  613. static inline unsigned int mv_hc_from_port(unsigned int port)
  614. {
  615. return port >> MV_PORT_HC_SHIFT;
  616. }
  617. static inline unsigned int mv_hardport_from_port(unsigned int port)
  618. {
  619. return port & MV_PORT_MASK;
  620. }
  621. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  622. unsigned int port)
  623. {
  624. return mv_hc_base(base, mv_hc_from_port(port));
  625. }
  626. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  627. {
  628. return mv_hc_base_from_port(base, port) +
  629. MV_SATAHC_ARBTR_REG_SZ +
  630. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  631. }
  632. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  633. {
  634. return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
  635. }
  636. static inline int mv_get_hc_count(unsigned long port_flags)
  637. {
  638. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  639. }
  640. static void mv_irq_clear(struct ata_port *ap)
  641. {
  642. }
  643. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  644. struct mv_host_priv *hpriv,
  645. struct mv_port_priv *pp)
  646. {
  647. u32 index;
  648. /*
  649. * initialize request queue
  650. */
  651. index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
  652. WARN_ON(pp->crqb_dma & 0x3ff);
  653. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  654. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  655. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  656. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  657. writelfl((pp->crqb_dma & 0xffffffff) | index,
  658. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  659. else
  660. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  661. /*
  662. * initialize response queue
  663. */
  664. index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
  665. WARN_ON(pp->crpb_dma & 0xff);
  666. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  667. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  668. writelfl((pp->crpb_dma & 0xffffffff) | index,
  669. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  670. else
  671. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  672. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  673. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  674. }
  675. /**
  676. * mv_start_dma - Enable eDMA engine
  677. * @base: port base address
  678. * @pp: port private data
  679. *
  680. * Verify the local cache of the eDMA state is accurate with a
  681. * WARN_ON.
  682. *
  683. * LOCKING:
  684. * Inherited from caller.
  685. */
  686. static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
  687. struct mv_port_priv *pp)
  688. {
  689. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  690. /* clear EDMA event indicators, if any */
  691. writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
  692. mv_set_edma_ptrs(base, hpriv, pp);
  693. writelfl(EDMA_EN, base + EDMA_CMD_OFS);
  694. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  695. }
  696. WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
  697. }
  698. /**
  699. * __mv_stop_dma - Disable eDMA engine
  700. * @ap: ATA channel to manipulate
  701. *
  702. * Verify the local cache of the eDMA state is accurate with a
  703. * WARN_ON.
  704. *
  705. * LOCKING:
  706. * Inherited from caller.
  707. */
  708. static int __mv_stop_dma(struct ata_port *ap)
  709. {
  710. void __iomem *port_mmio = mv_ap_base(ap);
  711. struct mv_port_priv *pp = ap->private_data;
  712. u32 reg;
  713. int i, err = 0;
  714. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  715. /* Disable EDMA if active. The disable bit auto clears.
  716. */
  717. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  718. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  719. } else {
  720. WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
  721. }
  722. /* now properly wait for the eDMA to stop */
  723. for (i = 1000; i > 0; i--) {
  724. reg = readl(port_mmio + EDMA_CMD_OFS);
  725. if (!(reg & EDMA_EN))
  726. break;
  727. udelay(100);
  728. }
  729. if (reg & EDMA_EN) {
  730. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  731. err = -EIO;
  732. }
  733. return err;
  734. }
  735. static int mv_stop_dma(struct ata_port *ap)
  736. {
  737. unsigned long flags;
  738. int rc;
  739. spin_lock_irqsave(&ap->host->lock, flags);
  740. rc = __mv_stop_dma(ap);
  741. spin_unlock_irqrestore(&ap->host->lock, flags);
  742. return rc;
  743. }
  744. #ifdef ATA_DEBUG
  745. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  746. {
  747. int b, w;
  748. for (b = 0; b < bytes; ) {
  749. DPRINTK("%p: ", start + b);
  750. for (w = 0; b < bytes && w < 4; w++) {
  751. printk("%08x ",readl(start + b));
  752. b += sizeof(u32);
  753. }
  754. printk("\n");
  755. }
  756. }
  757. #endif
  758. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  759. {
  760. #ifdef ATA_DEBUG
  761. int b, w;
  762. u32 dw;
  763. for (b = 0; b < bytes; ) {
  764. DPRINTK("%02x: ", b);
  765. for (w = 0; b < bytes && w < 4; w++) {
  766. (void) pci_read_config_dword(pdev,b,&dw);
  767. printk("%08x ",dw);
  768. b += sizeof(u32);
  769. }
  770. printk("\n");
  771. }
  772. #endif
  773. }
  774. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  775. struct pci_dev *pdev)
  776. {
  777. #ifdef ATA_DEBUG
  778. void __iomem *hc_base = mv_hc_base(mmio_base,
  779. port >> MV_PORT_HC_SHIFT);
  780. void __iomem *port_base;
  781. int start_port, num_ports, p, start_hc, num_hcs, hc;
  782. if (0 > port) {
  783. start_hc = start_port = 0;
  784. num_ports = 8; /* shld be benign for 4 port devs */
  785. num_hcs = 2;
  786. } else {
  787. start_hc = port >> MV_PORT_HC_SHIFT;
  788. start_port = port;
  789. num_ports = num_hcs = 1;
  790. }
  791. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  792. num_ports > 1 ? num_ports - 1 : start_port);
  793. if (NULL != pdev) {
  794. DPRINTK("PCI config space regs:\n");
  795. mv_dump_pci_cfg(pdev, 0x68);
  796. }
  797. DPRINTK("PCI regs:\n");
  798. mv_dump_mem(mmio_base+0xc00, 0x3c);
  799. mv_dump_mem(mmio_base+0xd00, 0x34);
  800. mv_dump_mem(mmio_base+0xf00, 0x4);
  801. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  802. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  803. hc_base = mv_hc_base(mmio_base, hc);
  804. DPRINTK("HC regs (HC %i):\n", hc);
  805. mv_dump_mem(hc_base, 0x1c);
  806. }
  807. for (p = start_port; p < start_port + num_ports; p++) {
  808. port_base = mv_port_base(mmio_base, p);
  809. DPRINTK("EDMA regs (port %i):\n",p);
  810. mv_dump_mem(port_base, 0x54);
  811. DPRINTK("SATA regs (port %i):\n",p);
  812. mv_dump_mem(port_base+0x300, 0x60);
  813. }
  814. #endif
  815. }
  816. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  817. {
  818. unsigned int ofs;
  819. switch (sc_reg_in) {
  820. case SCR_STATUS:
  821. case SCR_CONTROL:
  822. case SCR_ERROR:
  823. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  824. break;
  825. case SCR_ACTIVE:
  826. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  827. break;
  828. default:
  829. ofs = 0xffffffffU;
  830. break;
  831. }
  832. return ofs;
  833. }
  834. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  835. {
  836. unsigned int ofs = mv_scr_offset(sc_reg_in);
  837. if (ofs != 0xffffffffU) {
  838. *val = readl(mv_ap_base(ap) + ofs);
  839. return 0;
  840. } else
  841. return -EINVAL;
  842. }
  843. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  844. {
  845. unsigned int ofs = mv_scr_offset(sc_reg_in);
  846. if (ofs != 0xffffffffU) {
  847. writelfl(val, mv_ap_base(ap) + ofs);
  848. return 0;
  849. } else
  850. return -EINVAL;
  851. }
  852. static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
  853. void __iomem *port_mmio)
  854. {
  855. u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
  856. /* set up non-NCQ EDMA configuration */
  857. cfg &= ~(1 << 9); /* disable eQue */
  858. if (IS_GEN_I(hpriv)) {
  859. cfg &= ~0x1f; /* clear queue depth */
  860. cfg |= (1 << 8); /* enab config burst size mask */
  861. }
  862. else if (IS_GEN_II(hpriv)) {
  863. cfg &= ~0x1f; /* clear queue depth */
  864. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  865. cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
  866. }
  867. else if (IS_GEN_IIE(hpriv)) {
  868. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  869. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  870. cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
  871. cfg |= (1 << 18); /* enab early completion */
  872. cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
  873. cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
  874. cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
  875. }
  876. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  877. }
  878. /**
  879. * mv_port_start - Port specific init/start routine.
  880. * @ap: ATA channel to manipulate
  881. *
  882. * Allocate and point to DMA memory, init port private memory,
  883. * zero indices.
  884. *
  885. * LOCKING:
  886. * Inherited from caller.
  887. */
  888. static int mv_port_start(struct ata_port *ap)
  889. {
  890. struct device *dev = ap->host->dev;
  891. struct mv_host_priv *hpriv = ap->host->private_data;
  892. struct mv_port_priv *pp;
  893. void __iomem *port_mmio = mv_ap_base(ap);
  894. void *mem;
  895. dma_addr_t mem_dma;
  896. unsigned long flags;
  897. int rc;
  898. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  899. if (!pp)
  900. return -ENOMEM;
  901. mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
  902. GFP_KERNEL);
  903. if (!mem)
  904. return -ENOMEM;
  905. memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
  906. rc = ata_pad_alloc(ap, dev);
  907. if (rc)
  908. return rc;
  909. /* First item in chunk of DMA memory:
  910. * 32-slot command request table (CRQB), 32 bytes each in size
  911. */
  912. pp->crqb = mem;
  913. pp->crqb_dma = mem_dma;
  914. mem += MV_CRQB_Q_SZ;
  915. mem_dma += MV_CRQB_Q_SZ;
  916. /* Second item:
  917. * 32-slot command response table (CRPB), 8 bytes each in size
  918. */
  919. pp->crpb = mem;
  920. pp->crpb_dma = mem_dma;
  921. mem += MV_CRPB_Q_SZ;
  922. mem_dma += MV_CRPB_Q_SZ;
  923. /* Third item:
  924. * Table of scatter-gather descriptors (ePRD), 16 bytes each
  925. */
  926. pp->sg_tbl = mem;
  927. pp->sg_tbl_dma = mem_dma;
  928. spin_lock_irqsave(&ap->host->lock, flags);
  929. mv_edma_cfg(ap, hpriv, port_mmio);
  930. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  931. spin_unlock_irqrestore(&ap->host->lock, flags);
  932. /* Don't turn on EDMA here...do it before DMA commands only. Else
  933. * we'll be unable to send non-data, PIO, etc due to restricted access
  934. * to shadow regs.
  935. */
  936. ap->private_data = pp;
  937. return 0;
  938. }
  939. /**
  940. * mv_port_stop - Port specific cleanup/stop routine.
  941. * @ap: ATA channel to manipulate
  942. *
  943. * Stop DMA, cleanup port memory.
  944. *
  945. * LOCKING:
  946. * This routine uses the host lock to protect the DMA stop.
  947. */
  948. static void mv_port_stop(struct ata_port *ap)
  949. {
  950. mv_stop_dma(ap);
  951. }
  952. /**
  953. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  954. * @qc: queued command whose SG list to source from
  955. *
  956. * Populate the SG list and mark the last entry.
  957. *
  958. * LOCKING:
  959. * Inherited from caller.
  960. */
  961. static void mv_fill_sg(struct ata_queued_cmd *qc)
  962. {
  963. struct mv_port_priv *pp = qc->ap->private_data;
  964. struct scatterlist *sg;
  965. struct mv_sg *mv_sg, *last_sg = NULL;
  966. mv_sg = pp->sg_tbl;
  967. ata_for_each_sg(sg, qc) {
  968. dma_addr_t addr = sg_dma_address(sg);
  969. u32 sg_len = sg_dma_len(sg);
  970. while (sg_len) {
  971. u32 offset = addr & 0xffff;
  972. u32 len = sg_len;
  973. if ((offset + sg_len > 0x10000))
  974. len = 0x10000 - offset;
  975. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  976. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  977. mv_sg->flags_size = cpu_to_le32(len & 0xffff);
  978. sg_len -= len;
  979. addr += len;
  980. last_sg = mv_sg;
  981. mv_sg++;
  982. }
  983. }
  984. if (likely(last_sg))
  985. last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  986. }
  987. static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  988. {
  989. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  990. (last ? CRQB_CMD_LAST : 0);
  991. *cmdw = cpu_to_le16(tmp);
  992. }
  993. /**
  994. * mv_qc_prep - Host specific command preparation.
  995. * @qc: queued command to prepare
  996. *
  997. * This routine simply redirects to the general purpose routine
  998. * if command is not DMA. Else, it handles prep of the CRQB
  999. * (command request block), does some sanity checking, and calls
  1000. * the SG load routine.
  1001. *
  1002. * LOCKING:
  1003. * Inherited from caller.
  1004. */
  1005. static void mv_qc_prep(struct ata_queued_cmd *qc)
  1006. {
  1007. struct ata_port *ap = qc->ap;
  1008. struct mv_port_priv *pp = ap->private_data;
  1009. __le16 *cw;
  1010. struct ata_taskfile *tf;
  1011. u16 flags = 0;
  1012. unsigned in_index;
  1013. if (qc->tf.protocol != ATA_PROT_DMA)
  1014. return;
  1015. /* Fill in command request block
  1016. */
  1017. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1018. flags |= CRQB_FLAG_READ;
  1019. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1020. flags |= qc->tag << CRQB_TAG_SHIFT;
  1021. flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
  1022. /* get current queue index from software */
  1023. in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
  1024. pp->crqb[in_index].sg_addr =
  1025. cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  1026. pp->crqb[in_index].sg_addr_hi =
  1027. cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  1028. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  1029. cw = &pp->crqb[in_index].ata_cmd[0];
  1030. tf = &qc->tf;
  1031. /* Sadly, the CRQB cannot accomodate all registers--there are
  1032. * only 11 bytes...so we must pick and choose required
  1033. * registers based on the command. So, we drop feature and
  1034. * hob_feature for [RW] DMA commands, but they are needed for
  1035. * NCQ. NCQ will drop hob_nsect.
  1036. */
  1037. switch (tf->command) {
  1038. case ATA_CMD_READ:
  1039. case ATA_CMD_READ_EXT:
  1040. case ATA_CMD_WRITE:
  1041. case ATA_CMD_WRITE_EXT:
  1042. case ATA_CMD_WRITE_FUA_EXT:
  1043. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  1044. break;
  1045. #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
  1046. case ATA_CMD_FPDMA_READ:
  1047. case ATA_CMD_FPDMA_WRITE:
  1048. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  1049. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  1050. break;
  1051. #endif /* FIXME: remove this line when NCQ added */
  1052. default:
  1053. /* The only other commands EDMA supports in non-queued and
  1054. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  1055. * of which are defined/used by Linux. If we get here, this
  1056. * driver needs work.
  1057. *
  1058. * FIXME: modify libata to give qc_prep a return value and
  1059. * return error here.
  1060. */
  1061. BUG_ON(tf->command);
  1062. break;
  1063. }
  1064. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  1065. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  1066. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  1067. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  1068. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  1069. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  1070. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  1071. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  1072. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  1073. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1074. return;
  1075. mv_fill_sg(qc);
  1076. }
  1077. /**
  1078. * mv_qc_prep_iie - Host specific command preparation.
  1079. * @qc: queued command to prepare
  1080. *
  1081. * This routine simply redirects to the general purpose routine
  1082. * if command is not DMA. Else, it handles prep of the CRQB
  1083. * (command request block), does some sanity checking, and calls
  1084. * the SG load routine.
  1085. *
  1086. * LOCKING:
  1087. * Inherited from caller.
  1088. */
  1089. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1090. {
  1091. struct ata_port *ap = qc->ap;
  1092. struct mv_port_priv *pp = ap->private_data;
  1093. struct mv_crqb_iie *crqb;
  1094. struct ata_taskfile *tf;
  1095. unsigned in_index;
  1096. u32 flags = 0;
  1097. if (qc->tf.protocol != ATA_PROT_DMA)
  1098. return;
  1099. /* Fill in Gen IIE command request block
  1100. */
  1101. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1102. flags |= CRQB_FLAG_READ;
  1103. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1104. flags |= qc->tag << CRQB_TAG_SHIFT;
  1105. flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
  1106. what we use as our tag */
  1107. /* get current queue index from software */
  1108. in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
  1109. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1110. crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  1111. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  1112. crqb->flags = cpu_to_le32(flags);
  1113. tf = &qc->tf;
  1114. crqb->ata_cmd[0] = cpu_to_le32(
  1115. (tf->command << 16) |
  1116. (tf->feature << 24)
  1117. );
  1118. crqb->ata_cmd[1] = cpu_to_le32(
  1119. (tf->lbal << 0) |
  1120. (tf->lbam << 8) |
  1121. (tf->lbah << 16) |
  1122. (tf->device << 24)
  1123. );
  1124. crqb->ata_cmd[2] = cpu_to_le32(
  1125. (tf->hob_lbal << 0) |
  1126. (tf->hob_lbam << 8) |
  1127. (tf->hob_lbah << 16) |
  1128. (tf->hob_feature << 24)
  1129. );
  1130. crqb->ata_cmd[3] = cpu_to_le32(
  1131. (tf->nsect << 0) |
  1132. (tf->hob_nsect << 8)
  1133. );
  1134. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1135. return;
  1136. mv_fill_sg(qc);
  1137. }
  1138. /**
  1139. * mv_qc_issue - Initiate a command to the host
  1140. * @qc: queued command to start
  1141. *
  1142. * This routine simply redirects to the general purpose routine
  1143. * if command is not DMA. Else, it sanity checks our local
  1144. * caches of the request producer/consumer indices then enables
  1145. * DMA and bumps the request producer index.
  1146. *
  1147. * LOCKING:
  1148. * Inherited from caller.
  1149. */
  1150. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1151. {
  1152. struct ata_port *ap = qc->ap;
  1153. void __iomem *port_mmio = mv_ap_base(ap);
  1154. struct mv_port_priv *pp = ap->private_data;
  1155. struct mv_host_priv *hpriv = ap->host->private_data;
  1156. u32 in_index;
  1157. if (qc->tf.protocol != ATA_PROT_DMA) {
  1158. /* We're about to send a non-EDMA capable command to the
  1159. * port. Turn off EDMA so there won't be problems accessing
  1160. * shadow block, etc registers.
  1161. */
  1162. __mv_stop_dma(ap);
  1163. return ata_qc_issue_prot(qc);
  1164. }
  1165. mv_start_dma(port_mmio, hpriv, pp);
  1166. in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
  1167. /* until we do queuing, the queue should be empty at this point */
  1168. WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
  1169. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  1170. pp->req_idx++;
  1171. in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
  1172. /* and write the request in pointer to kick the EDMA to life */
  1173. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
  1174. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1175. return 0;
  1176. }
  1177. /**
  1178. * mv_err_intr - Handle error interrupts on the port
  1179. * @ap: ATA channel to manipulate
  1180. * @reset_allowed: bool: 0 == don't trigger from reset here
  1181. *
  1182. * In most cases, just clear the interrupt and move on. However,
  1183. * some cases require an eDMA reset, which is done right before
  1184. * the COMRESET in mv_phy_reset(). The SERR case requires a
  1185. * clear of pending errors in the SATA SERROR register. Finally,
  1186. * if the port disabled DMA, update our cached copy to match.
  1187. *
  1188. * LOCKING:
  1189. * Inherited from caller.
  1190. */
  1191. static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
  1192. {
  1193. void __iomem *port_mmio = mv_ap_base(ap);
  1194. u32 edma_err_cause, eh_freeze_mask, serr = 0;
  1195. struct mv_port_priv *pp = ap->private_data;
  1196. struct mv_host_priv *hpriv = ap->host->private_data;
  1197. unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
  1198. unsigned int action = 0, err_mask = 0;
  1199. struct ata_eh_info *ehi = &ap->link.eh_info;
  1200. ata_ehi_clear_desc(ehi);
  1201. if (!edma_enabled) {
  1202. /* just a guess: do we need to do this? should we
  1203. * expand this, and do it in all cases?
  1204. */
  1205. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  1206. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  1207. }
  1208. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1209. ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
  1210. /*
  1211. * all generations share these EDMA error cause bits
  1212. */
  1213. if (edma_err_cause & EDMA_ERR_DEV)
  1214. err_mask |= AC_ERR_DEV;
  1215. if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  1216. EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
  1217. EDMA_ERR_INTRL_PAR)) {
  1218. err_mask |= AC_ERR_ATA_BUS;
  1219. action |= ATA_EH_HARDRESET;
  1220. ata_ehi_push_desc(ehi, "parity error");
  1221. }
  1222. if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
  1223. ata_ehi_hotplugged(ehi);
  1224. ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
  1225. "dev disconnect" : "dev connect");
  1226. }
  1227. if (IS_GEN_I(hpriv)) {
  1228. eh_freeze_mask = EDMA_EH_FREEZE_5;
  1229. if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
  1230. struct mv_port_priv *pp = ap->private_data;
  1231. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1232. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1233. }
  1234. } else {
  1235. eh_freeze_mask = EDMA_EH_FREEZE;
  1236. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  1237. struct mv_port_priv *pp = ap->private_data;
  1238. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1239. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1240. }
  1241. if (edma_err_cause & EDMA_ERR_SERR) {
  1242. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  1243. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  1244. err_mask = AC_ERR_ATA_BUS;
  1245. action |= ATA_EH_HARDRESET;
  1246. }
  1247. }
  1248. /* Clear EDMA now that SERR cleanup done */
  1249. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1250. if (!err_mask) {
  1251. err_mask = AC_ERR_OTHER;
  1252. action |= ATA_EH_HARDRESET;
  1253. }
  1254. ehi->serror |= serr;
  1255. ehi->action |= action;
  1256. if (qc)
  1257. qc->err_mask |= err_mask;
  1258. else
  1259. ehi->err_mask |= err_mask;
  1260. if (edma_err_cause & eh_freeze_mask)
  1261. ata_port_freeze(ap);
  1262. else
  1263. ata_port_abort(ap);
  1264. }
  1265. static void mv_intr_pio(struct ata_port *ap)
  1266. {
  1267. struct ata_queued_cmd *qc;
  1268. u8 ata_status;
  1269. /* ignore spurious intr if drive still BUSY */
  1270. ata_status = readb(ap->ioaddr.status_addr);
  1271. if (unlikely(ata_status & ATA_BUSY))
  1272. return;
  1273. /* get active ATA command */
  1274. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1275. if (unlikely(!qc)) /* no active tag */
  1276. return;
  1277. if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
  1278. return;
  1279. /* and finally, complete the ATA command */
  1280. qc->err_mask |= ac_err_mask(ata_status);
  1281. ata_qc_complete(qc);
  1282. }
  1283. static void mv_intr_edma(struct ata_port *ap)
  1284. {
  1285. void __iomem *port_mmio = mv_ap_base(ap);
  1286. struct mv_host_priv *hpriv = ap->host->private_data;
  1287. struct mv_port_priv *pp = ap->private_data;
  1288. struct ata_queued_cmd *qc;
  1289. u32 out_index, in_index;
  1290. bool work_done = false;
  1291. /* get h/w response queue pointer */
  1292. in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1293. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1294. while (1) {
  1295. u16 status;
  1296. unsigned int tag;
  1297. /* get s/w response queue last-read pointer, and compare */
  1298. out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
  1299. if (in_index == out_index)
  1300. break;
  1301. /* 50xx: get active ATA command */
  1302. if (IS_GEN_I(hpriv))
  1303. tag = ap->link.active_tag;
  1304. /* Gen II/IIE: get active ATA command via tag, to enable
  1305. * support for queueing. this works transparently for
  1306. * queued and non-queued modes.
  1307. */
  1308. else if (IS_GEN_II(hpriv))
  1309. tag = (le16_to_cpu(pp->crpb[out_index].id)
  1310. >> CRPB_IOID_SHIFT_6) & 0x3f;
  1311. else /* IS_GEN_IIE */
  1312. tag = (le16_to_cpu(pp->crpb[out_index].id)
  1313. >> CRPB_IOID_SHIFT_7) & 0x3f;
  1314. qc = ata_qc_from_tag(ap, tag);
  1315. /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
  1316. * bits (WARNING: might not necessarily be associated
  1317. * with this command), which -should- be clear
  1318. * if all is well
  1319. */
  1320. status = le16_to_cpu(pp->crpb[out_index].flags);
  1321. if (unlikely(status & 0xff)) {
  1322. mv_err_intr(ap, qc);
  1323. return;
  1324. }
  1325. /* and finally, complete the ATA command */
  1326. if (qc) {
  1327. qc->err_mask |=
  1328. ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
  1329. ata_qc_complete(qc);
  1330. }
  1331. /* advance software response queue pointer, to
  1332. * indicate (after the loop completes) to hardware
  1333. * that we have consumed a response queue entry.
  1334. */
  1335. work_done = true;
  1336. pp->resp_idx++;
  1337. }
  1338. if (work_done)
  1339. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
  1340. (out_index << EDMA_RSP_Q_PTR_SHIFT),
  1341. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1342. }
  1343. /**
  1344. * mv_host_intr - Handle all interrupts on the given host controller
  1345. * @host: host specific structure
  1346. * @relevant: port error bits relevant to this host controller
  1347. * @hc: which host controller we're to look at
  1348. *
  1349. * Read then write clear the HC interrupt status then walk each
  1350. * port connected to the HC and see if it needs servicing. Port
  1351. * success ints are reported in the HC interrupt status reg, the
  1352. * port error ints are reported in the higher level main
  1353. * interrupt status register and thus are passed in via the
  1354. * 'relevant' argument.
  1355. *
  1356. * LOCKING:
  1357. * Inherited from caller.
  1358. */
  1359. static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
  1360. {
  1361. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1362. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1363. u32 hc_irq_cause;
  1364. int port, port0;
  1365. if (hc == 0)
  1366. port0 = 0;
  1367. else
  1368. port0 = MV_PORTS_PER_HC;
  1369. /* we'll need the HC success int register in most cases */
  1370. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1371. if (!hc_irq_cause)
  1372. return;
  1373. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1374. VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
  1375. hc,relevant,hc_irq_cause);
  1376. for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
  1377. struct ata_port *ap = host->ports[port];
  1378. struct mv_port_priv *pp = ap->private_data;
  1379. int have_err_bits, hard_port, shift;
  1380. if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
  1381. continue;
  1382. shift = port << 1; /* (port * 2) */
  1383. if (port >= MV_PORTS_PER_HC) {
  1384. shift++; /* skip bit 8 in the HC Main IRQ reg */
  1385. }
  1386. have_err_bits = ((PORT0_ERR << shift) & relevant);
  1387. if (unlikely(have_err_bits)) {
  1388. struct ata_queued_cmd *qc;
  1389. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1390. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1391. continue;
  1392. mv_err_intr(ap, qc);
  1393. continue;
  1394. }
  1395. hard_port = mv_hardport_from_port(port); /* range 0..3 */
  1396. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  1397. if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
  1398. mv_intr_edma(ap);
  1399. } else {
  1400. if ((DEV_IRQ << hard_port) & hc_irq_cause)
  1401. mv_intr_pio(ap);
  1402. }
  1403. }
  1404. VPRINTK("EXIT\n");
  1405. }
  1406. static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
  1407. {
  1408. struct ata_port *ap;
  1409. struct ata_queued_cmd *qc;
  1410. struct ata_eh_info *ehi;
  1411. unsigned int i, err_mask, printed = 0;
  1412. u32 err_cause;
  1413. err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
  1414. dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
  1415. err_cause);
  1416. DPRINTK("All regs @ PCI error\n");
  1417. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1418. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1419. for (i = 0; i < host->n_ports; i++) {
  1420. ap = host->ports[i];
  1421. if (!ata_link_offline(&ap->link)) {
  1422. ehi = &ap->link.eh_info;
  1423. ata_ehi_clear_desc(ehi);
  1424. if (!printed++)
  1425. ata_ehi_push_desc(ehi,
  1426. "PCI err cause 0x%08x", err_cause);
  1427. err_mask = AC_ERR_HOST_BUS;
  1428. ehi->action = ATA_EH_HARDRESET;
  1429. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1430. if (qc)
  1431. qc->err_mask |= err_mask;
  1432. else
  1433. ehi->err_mask |= err_mask;
  1434. ata_port_freeze(ap);
  1435. }
  1436. }
  1437. }
  1438. /**
  1439. * mv_interrupt - Main interrupt event handler
  1440. * @irq: unused
  1441. * @dev_instance: private data; in this case the host structure
  1442. *
  1443. * Read the read only register to determine if any host
  1444. * controllers have pending interrupts. If so, call lower level
  1445. * routine to handle. Also check for PCI errors which are only
  1446. * reported here.
  1447. *
  1448. * LOCKING:
  1449. * This routine holds the host lock while processing pending
  1450. * interrupts.
  1451. */
  1452. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1453. {
  1454. struct ata_host *host = dev_instance;
  1455. unsigned int hc, handled = 0, n_hcs;
  1456. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1457. u32 irq_stat;
  1458. irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
  1459. /* check the cases where we either have nothing pending or have read
  1460. * a bogus register value which can indicate HW removal or PCI fault
  1461. */
  1462. if (!irq_stat || (0xffffffffU == irq_stat))
  1463. return IRQ_NONE;
  1464. n_hcs = mv_get_hc_count(host->ports[0]->flags);
  1465. spin_lock(&host->lock);
  1466. if (unlikely(irq_stat & PCI_ERR)) {
  1467. mv_pci_error(host, mmio);
  1468. handled = 1;
  1469. goto out_unlock; /* skip all other HC irq handling */
  1470. }
  1471. for (hc = 0; hc < n_hcs; hc++) {
  1472. u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
  1473. if (relevant) {
  1474. mv_host_intr(host, relevant, hc);
  1475. handled = 1;
  1476. }
  1477. }
  1478. out_unlock:
  1479. spin_unlock(&host->lock);
  1480. return IRQ_RETVAL(handled);
  1481. }
  1482. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  1483. {
  1484. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  1485. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  1486. return hc_mmio + ofs;
  1487. }
  1488. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1489. {
  1490. unsigned int ofs;
  1491. switch (sc_reg_in) {
  1492. case SCR_STATUS:
  1493. case SCR_ERROR:
  1494. case SCR_CONTROL:
  1495. ofs = sc_reg_in * sizeof(u32);
  1496. break;
  1497. default:
  1498. ofs = 0xffffffffU;
  1499. break;
  1500. }
  1501. return ofs;
  1502. }
  1503. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  1504. {
  1505. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1506. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1507. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1508. if (ofs != 0xffffffffU) {
  1509. *val = readl(addr + ofs);
  1510. return 0;
  1511. } else
  1512. return -EINVAL;
  1513. }
  1514. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1515. {
  1516. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1517. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1518. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1519. if (ofs != 0xffffffffU) {
  1520. writelfl(val, addr + ofs);
  1521. return 0;
  1522. } else
  1523. return -EINVAL;
  1524. }
  1525. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
  1526. {
  1527. int early_5080;
  1528. early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
  1529. if (!early_5080) {
  1530. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1531. tmp |= (1 << 0);
  1532. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1533. }
  1534. mv_reset_pci_bus(pdev, mmio);
  1535. }
  1536. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1537. {
  1538. writel(0x0fcfffff, mmio + MV_FLASH_CTL);
  1539. }
  1540. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  1541. void __iomem *mmio)
  1542. {
  1543. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  1544. u32 tmp;
  1545. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1546. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  1547. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  1548. }
  1549. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1550. {
  1551. u32 tmp;
  1552. writel(0, mmio + MV_GPIO_PORT_CTL);
  1553. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  1554. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1555. tmp |= ~(1 << 0);
  1556. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1557. }
  1558. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1559. unsigned int port)
  1560. {
  1561. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  1562. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  1563. u32 tmp;
  1564. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  1565. if (fix_apm_sq) {
  1566. tmp = readl(phy_mmio + MV5_LT_MODE);
  1567. tmp |= (1 << 19);
  1568. writel(tmp, phy_mmio + MV5_LT_MODE);
  1569. tmp = readl(phy_mmio + MV5_PHY_CTL);
  1570. tmp &= ~0x3;
  1571. tmp |= 0x1;
  1572. writel(tmp, phy_mmio + MV5_PHY_CTL);
  1573. }
  1574. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1575. tmp &= ~mask;
  1576. tmp |= hpriv->signal[port].pre;
  1577. tmp |= hpriv->signal[port].amps;
  1578. writel(tmp, phy_mmio + MV5_PHY_MODE);
  1579. }
  1580. #undef ZERO
  1581. #define ZERO(reg) writel(0, port_mmio + (reg))
  1582. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  1583. unsigned int port)
  1584. {
  1585. void __iomem *port_mmio = mv_port_base(mmio, port);
  1586. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  1587. mv_channel_reset(hpriv, mmio, port);
  1588. ZERO(0x028); /* command */
  1589. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  1590. ZERO(0x004); /* timer */
  1591. ZERO(0x008); /* irq err cause */
  1592. ZERO(0x00c); /* irq err mask */
  1593. ZERO(0x010); /* rq bah */
  1594. ZERO(0x014); /* rq inp */
  1595. ZERO(0x018); /* rq outp */
  1596. ZERO(0x01c); /* respq bah */
  1597. ZERO(0x024); /* respq outp */
  1598. ZERO(0x020); /* respq inp */
  1599. ZERO(0x02c); /* test control */
  1600. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
  1601. }
  1602. #undef ZERO
  1603. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1604. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1605. unsigned int hc)
  1606. {
  1607. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1608. u32 tmp;
  1609. ZERO(0x00c);
  1610. ZERO(0x010);
  1611. ZERO(0x014);
  1612. ZERO(0x018);
  1613. tmp = readl(hc_mmio + 0x20);
  1614. tmp &= 0x1c1c1c1c;
  1615. tmp |= 0x03030303;
  1616. writel(tmp, hc_mmio + 0x20);
  1617. }
  1618. #undef ZERO
  1619. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1620. unsigned int n_hc)
  1621. {
  1622. unsigned int hc, port;
  1623. for (hc = 0; hc < n_hc; hc++) {
  1624. for (port = 0; port < MV_PORTS_PER_HC; port++)
  1625. mv5_reset_hc_port(hpriv, mmio,
  1626. (hc * MV_PORTS_PER_HC) + port);
  1627. mv5_reset_one_hc(hpriv, mmio, hc);
  1628. }
  1629. return 0;
  1630. }
  1631. #undef ZERO
  1632. #define ZERO(reg) writel(0, mmio + (reg))
  1633. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
  1634. {
  1635. u32 tmp;
  1636. tmp = readl(mmio + MV_PCI_MODE);
  1637. tmp &= 0xff00ffff;
  1638. writel(tmp, mmio + MV_PCI_MODE);
  1639. ZERO(MV_PCI_DISC_TIMER);
  1640. ZERO(MV_PCI_MSI_TRIGGER);
  1641. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
  1642. ZERO(HC_MAIN_IRQ_MASK_OFS);
  1643. ZERO(MV_PCI_SERR_MASK);
  1644. ZERO(PCI_IRQ_CAUSE_OFS);
  1645. ZERO(PCI_IRQ_MASK_OFS);
  1646. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  1647. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  1648. ZERO(MV_PCI_ERR_ATTRIBUTE);
  1649. ZERO(MV_PCI_ERR_COMMAND);
  1650. }
  1651. #undef ZERO
  1652. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1653. {
  1654. u32 tmp;
  1655. mv5_reset_flash(hpriv, mmio);
  1656. tmp = readl(mmio + MV_GPIO_PORT_CTL);
  1657. tmp &= 0x3;
  1658. tmp |= (1 << 5) | (1 << 6);
  1659. writel(tmp, mmio + MV_GPIO_PORT_CTL);
  1660. }
  1661. /**
  1662. * mv6_reset_hc - Perform the 6xxx global soft reset
  1663. * @mmio: base address of the HBA
  1664. *
  1665. * This routine only applies to 6xxx parts.
  1666. *
  1667. * LOCKING:
  1668. * Inherited from caller.
  1669. */
  1670. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1671. unsigned int n_hc)
  1672. {
  1673. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  1674. int i, rc = 0;
  1675. u32 t;
  1676. /* Following procedure defined in PCI "main command and status
  1677. * register" table.
  1678. */
  1679. t = readl(reg);
  1680. writel(t | STOP_PCI_MASTER, reg);
  1681. for (i = 0; i < 1000; i++) {
  1682. udelay(1);
  1683. t = readl(reg);
  1684. if (PCI_MASTER_EMPTY & t) {
  1685. break;
  1686. }
  1687. }
  1688. if (!(PCI_MASTER_EMPTY & t)) {
  1689. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  1690. rc = 1;
  1691. goto done;
  1692. }
  1693. /* set reset */
  1694. i = 5;
  1695. do {
  1696. writel(t | GLOB_SFT_RST, reg);
  1697. t = readl(reg);
  1698. udelay(1);
  1699. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  1700. if (!(GLOB_SFT_RST & t)) {
  1701. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  1702. rc = 1;
  1703. goto done;
  1704. }
  1705. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  1706. i = 5;
  1707. do {
  1708. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  1709. t = readl(reg);
  1710. udelay(1);
  1711. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  1712. if (GLOB_SFT_RST & t) {
  1713. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  1714. rc = 1;
  1715. }
  1716. done:
  1717. return rc;
  1718. }
  1719. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  1720. void __iomem *mmio)
  1721. {
  1722. void __iomem *port_mmio;
  1723. u32 tmp;
  1724. tmp = readl(mmio + MV_RESET_CFG);
  1725. if ((tmp & (1 << 0)) == 0) {
  1726. hpriv->signal[idx].amps = 0x7 << 8;
  1727. hpriv->signal[idx].pre = 0x1 << 5;
  1728. return;
  1729. }
  1730. port_mmio = mv_port_base(mmio, idx);
  1731. tmp = readl(port_mmio + PHY_MODE2);
  1732. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1733. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1734. }
  1735. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1736. {
  1737. writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
  1738. }
  1739. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1740. unsigned int port)
  1741. {
  1742. void __iomem *port_mmio = mv_port_base(mmio, port);
  1743. u32 hp_flags = hpriv->hp_flags;
  1744. int fix_phy_mode2 =
  1745. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1746. int fix_phy_mode4 =
  1747. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1748. u32 m2, tmp;
  1749. if (fix_phy_mode2) {
  1750. m2 = readl(port_mmio + PHY_MODE2);
  1751. m2 &= ~(1 << 16);
  1752. m2 |= (1 << 31);
  1753. writel(m2, port_mmio + PHY_MODE2);
  1754. udelay(200);
  1755. m2 = readl(port_mmio + PHY_MODE2);
  1756. m2 &= ~((1 << 16) | (1 << 31));
  1757. writel(m2, port_mmio + PHY_MODE2);
  1758. udelay(200);
  1759. }
  1760. /* who knows what this magic does */
  1761. tmp = readl(port_mmio + PHY_MODE3);
  1762. tmp &= ~0x7F800000;
  1763. tmp |= 0x2A800000;
  1764. writel(tmp, port_mmio + PHY_MODE3);
  1765. if (fix_phy_mode4) {
  1766. u32 m4;
  1767. m4 = readl(port_mmio + PHY_MODE4);
  1768. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1769. tmp = readl(port_mmio + 0x310);
  1770. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  1771. writel(m4, port_mmio + PHY_MODE4);
  1772. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1773. writel(tmp, port_mmio + 0x310);
  1774. }
  1775. /* Revert values of pre-emphasis and signal amps to the saved ones */
  1776. m2 = readl(port_mmio + PHY_MODE2);
  1777. m2 &= ~MV_M2_PREAMP_MASK;
  1778. m2 |= hpriv->signal[port].amps;
  1779. m2 |= hpriv->signal[port].pre;
  1780. m2 &= ~(1 << 16);
  1781. /* according to mvSata 3.6.1, some IIE values are fixed */
  1782. if (IS_GEN_IIE(hpriv)) {
  1783. m2 &= ~0xC30FF01F;
  1784. m2 |= 0x0000900F;
  1785. }
  1786. writel(m2, port_mmio + PHY_MODE2);
  1787. }
  1788. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  1789. unsigned int port_no)
  1790. {
  1791. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  1792. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  1793. if (IS_GEN_II(hpriv)) {
  1794. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  1795. ifctl |= (1 << 7); /* enable gen2i speed */
  1796. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  1797. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  1798. }
  1799. udelay(25); /* allow reset propagation */
  1800. /* Spec never mentions clearing the bit. Marvell's driver does
  1801. * clear the bit, however.
  1802. */
  1803. writelfl(0, port_mmio + EDMA_CMD_OFS);
  1804. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  1805. if (IS_GEN_I(hpriv))
  1806. mdelay(1);
  1807. }
  1808. /**
  1809. * mv_phy_reset - Perform eDMA reset followed by COMRESET
  1810. * @ap: ATA channel to manipulate
  1811. *
  1812. * Part of this is taken from __sata_phy_reset and modified to
  1813. * not sleep since this routine gets called from interrupt level.
  1814. *
  1815. * LOCKING:
  1816. * Inherited from caller. This is coded to safe to call at
  1817. * interrupt level, i.e. it does not sleep.
  1818. */
  1819. static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
  1820. unsigned long deadline)
  1821. {
  1822. struct mv_port_priv *pp = ap->private_data;
  1823. struct mv_host_priv *hpriv = ap->host->private_data;
  1824. void __iomem *port_mmio = mv_ap_base(ap);
  1825. int retry = 5;
  1826. u32 sstatus;
  1827. VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
  1828. #ifdef DEBUG
  1829. {
  1830. u32 sstatus, serror, scontrol;
  1831. mv_scr_read(ap, SCR_STATUS, &sstatus);
  1832. mv_scr_read(ap, SCR_ERROR, &serror);
  1833. mv_scr_read(ap, SCR_CONTROL, &scontrol);
  1834. DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
  1835. "SCtrl 0x%08x\n", status, serror, scontrol);
  1836. }
  1837. #endif
  1838. /* Issue COMRESET via SControl */
  1839. comreset_retry:
  1840. sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
  1841. msleep(1);
  1842. sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
  1843. msleep(20);
  1844. do {
  1845. sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
  1846. if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
  1847. break;
  1848. msleep(1);
  1849. } while (time_before(jiffies, deadline));
  1850. /* work around errata */
  1851. if (IS_GEN_II(hpriv) &&
  1852. (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
  1853. (retry-- > 0))
  1854. goto comreset_retry;
  1855. #ifdef DEBUG
  1856. {
  1857. u32 sstatus, serror, scontrol;
  1858. mv_scr_read(ap, SCR_STATUS, &sstatus);
  1859. mv_scr_read(ap, SCR_ERROR, &serror);
  1860. mv_scr_read(ap, SCR_CONTROL, &scontrol);
  1861. DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
  1862. "SCtrl 0x%08x\n", sstatus, serror, scontrol);
  1863. }
  1864. #endif
  1865. if (ata_link_offline(&ap->link)) {
  1866. *class = ATA_DEV_NONE;
  1867. return;
  1868. }
  1869. /* even after SStatus reflects that device is ready,
  1870. * it seems to take a while for link to be fully
  1871. * established (and thus Status no longer 0x80/0x7F),
  1872. * so we poll a bit for that, here.
  1873. */
  1874. retry = 20;
  1875. while (1) {
  1876. u8 drv_stat = ata_check_status(ap);
  1877. if ((drv_stat != 0x80) && (drv_stat != 0x7f))
  1878. break;
  1879. msleep(500);
  1880. if (retry-- <= 0)
  1881. break;
  1882. if (time_after(jiffies, deadline))
  1883. break;
  1884. }
  1885. /* FIXME: if we passed the deadline, the following
  1886. * code probably produces an invalid result
  1887. */
  1888. /* finally, read device signature from TF registers */
  1889. *class = ata_dev_try_classify(ap->link.device, 1, NULL);
  1890. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1891. WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
  1892. VPRINTK("EXIT\n");
  1893. }
  1894. static int mv_prereset(struct ata_link *link, unsigned long deadline)
  1895. {
  1896. struct ata_port *ap = link->ap;
  1897. struct mv_port_priv *pp = ap->private_data;
  1898. struct ata_eh_context *ehc = &link->eh_context;
  1899. int rc;
  1900. rc = mv_stop_dma(ap);
  1901. if (rc)
  1902. ehc->i.action |= ATA_EH_HARDRESET;
  1903. if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
  1904. pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
  1905. ehc->i.action |= ATA_EH_HARDRESET;
  1906. }
  1907. /* if we're about to do hardreset, nothing more to do */
  1908. if (ehc->i.action & ATA_EH_HARDRESET)
  1909. return 0;
  1910. if (ata_link_online(link))
  1911. rc = ata_wait_ready(ap, deadline);
  1912. else
  1913. rc = -ENODEV;
  1914. return rc;
  1915. }
  1916. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  1917. unsigned long deadline)
  1918. {
  1919. struct ata_port *ap = link->ap;
  1920. struct mv_host_priv *hpriv = ap->host->private_data;
  1921. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1922. mv_stop_dma(ap);
  1923. mv_channel_reset(hpriv, mmio, ap->port_no);
  1924. mv_phy_reset(ap, class, deadline);
  1925. return 0;
  1926. }
  1927. static void mv_postreset(struct ata_link *link, unsigned int *classes)
  1928. {
  1929. struct ata_port *ap = link->ap;
  1930. u32 serr;
  1931. /* print link status */
  1932. sata_print_link_status(link);
  1933. /* clear SError */
  1934. sata_scr_read(link, SCR_ERROR, &serr);
  1935. sata_scr_write_flush(link, SCR_ERROR, serr);
  1936. /* bail out if no device is present */
  1937. if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
  1938. DPRINTK("EXIT, no device\n");
  1939. return;
  1940. }
  1941. /* set up device control */
  1942. iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
  1943. }
  1944. static void mv_error_handler(struct ata_port *ap)
  1945. {
  1946. ata_do_eh(ap, mv_prereset, ata_std_softreset,
  1947. mv_hardreset, mv_postreset);
  1948. }
  1949. static void mv_post_int_cmd(struct ata_queued_cmd *qc)
  1950. {
  1951. mv_stop_dma(qc->ap);
  1952. }
  1953. static void mv_eh_freeze(struct ata_port *ap)
  1954. {
  1955. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1956. unsigned int hc = (ap->port_no > 3) ? 1 : 0;
  1957. u32 tmp, mask;
  1958. unsigned int shift;
  1959. /* FIXME: handle coalescing completion events properly */
  1960. shift = ap->port_no * 2;
  1961. if (hc > 0)
  1962. shift++;
  1963. mask = 0x3 << shift;
  1964. /* disable assertion of portN err, done events */
  1965. tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
  1966. writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
  1967. }
  1968. static void mv_eh_thaw(struct ata_port *ap)
  1969. {
  1970. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1971. unsigned int hc = (ap->port_no > 3) ? 1 : 0;
  1972. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1973. void __iomem *port_mmio = mv_ap_base(ap);
  1974. u32 tmp, mask, hc_irq_cause;
  1975. unsigned int shift, hc_port_no = ap->port_no;
  1976. /* FIXME: handle coalescing completion events properly */
  1977. shift = ap->port_no * 2;
  1978. if (hc > 0) {
  1979. shift++;
  1980. hc_port_no -= 4;
  1981. }
  1982. mask = 0x3 << shift;
  1983. /* clear EDMA errors on this port */
  1984. writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1985. /* clear pending irq events */
  1986. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1987. hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
  1988. hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
  1989. writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1990. /* enable assertion of portN err, done events */
  1991. tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
  1992. writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
  1993. }
  1994. /**
  1995. * mv_port_init - Perform some early initialization on a single port.
  1996. * @port: libata data structure storing shadow register addresses
  1997. * @port_mmio: base address of the port
  1998. *
  1999. * Initialize shadow register mmio addresses, clear outstanding
  2000. * interrupts on the port, and unmask interrupts for the future
  2001. * start of the port.
  2002. *
  2003. * LOCKING:
  2004. * Inherited from caller.
  2005. */
  2006. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  2007. {
  2008. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  2009. unsigned serr_ofs;
  2010. /* PIO related setup
  2011. */
  2012. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  2013. port->error_addr =
  2014. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  2015. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  2016. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  2017. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  2018. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  2019. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  2020. port->status_addr =
  2021. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  2022. /* special case: control/altstatus doesn't have ATA_REG_ address */
  2023. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  2024. /* unused: */
  2025. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  2026. /* Clear any currently outstanding port interrupt conditions */
  2027. serr_ofs = mv_scr_offset(SCR_ERROR);
  2028. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  2029. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2030. /* unmask all EDMA error interrupts */
  2031. writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  2032. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  2033. readl(port_mmio + EDMA_CFG_OFS),
  2034. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  2035. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  2036. }
  2037. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  2038. {
  2039. struct pci_dev *pdev = to_pci_dev(host->dev);
  2040. struct mv_host_priv *hpriv = host->private_data;
  2041. u32 hp_flags = hpriv->hp_flags;
  2042. switch(board_idx) {
  2043. case chip_5080:
  2044. hpriv->ops = &mv5xxx_ops;
  2045. hp_flags |= MV_HP_GEN_I;
  2046. switch (pdev->revision) {
  2047. case 0x1:
  2048. hp_flags |= MV_HP_ERRATA_50XXB0;
  2049. break;
  2050. case 0x3:
  2051. hp_flags |= MV_HP_ERRATA_50XXB2;
  2052. break;
  2053. default:
  2054. dev_printk(KERN_WARNING, &pdev->dev,
  2055. "Applying 50XXB2 workarounds to unknown rev\n");
  2056. hp_flags |= MV_HP_ERRATA_50XXB2;
  2057. break;
  2058. }
  2059. break;
  2060. case chip_504x:
  2061. case chip_508x:
  2062. hpriv->ops = &mv5xxx_ops;
  2063. hp_flags |= MV_HP_GEN_I;
  2064. switch (pdev->revision) {
  2065. case 0x0:
  2066. hp_flags |= MV_HP_ERRATA_50XXB0;
  2067. break;
  2068. case 0x3:
  2069. hp_flags |= MV_HP_ERRATA_50XXB2;
  2070. break;
  2071. default:
  2072. dev_printk(KERN_WARNING, &pdev->dev,
  2073. "Applying B2 workarounds to unknown rev\n");
  2074. hp_flags |= MV_HP_ERRATA_50XXB2;
  2075. break;
  2076. }
  2077. break;
  2078. case chip_604x:
  2079. case chip_608x:
  2080. hpriv->ops = &mv6xxx_ops;
  2081. hp_flags |= MV_HP_GEN_II;
  2082. switch (pdev->revision) {
  2083. case 0x7:
  2084. hp_flags |= MV_HP_ERRATA_60X1B2;
  2085. break;
  2086. case 0x9:
  2087. hp_flags |= MV_HP_ERRATA_60X1C0;
  2088. break;
  2089. default:
  2090. dev_printk(KERN_WARNING, &pdev->dev,
  2091. "Applying B2 workarounds to unknown rev\n");
  2092. hp_flags |= MV_HP_ERRATA_60X1B2;
  2093. break;
  2094. }
  2095. break;
  2096. case chip_7042:
  2097. case chip_6042:
  2098. hpriv->ops = &mv6xxx_ops;
  2099. hp_flags |= MV_HP_GEN_IIE;
  2100. switch (pdev->revision) {
  2101. case 0x0:
  2102. hp_flags |= MV_HP_ERRATA_XX42A0;
  2103. break;
  2104. case 0x1:
  2105. hp_flags |= MV_HP_ERRATA_60X1C0;
  2106. break;
  2107. default:
  2108. dev_printk(KERN_WARNING, &pdev->dev,
  2109. "Applying 60X1C0 workarounds to unknown rev\n");
  2110. hp_flags |= MV_HP_ERRATA_60X1C0;
  2111. break;
  2112. }
  2113. break;
  2114. default:
  2115. printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
  2116. return 1;
  2117. }
  2118. hpriv->hp_flags = hp_flags;
  2119. return 0;
  2120. }
  2121. /**
  2122. * mv_init_host - Perform some early initialization of the host.
  2123. * @host: ATA host to initialize
  2124. * @board_idx: controller index
  2125. *
  2126. * If possible, do an early global reset of the host. Then do
  2127. * our port init and clear/unmask all/relevant host interrupts.
  2128. *
  2129. * LOCKING:
  2130. * Inherited from caller.
  2131. */
  2132. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  2133. {
  2134. int rc = 0, n_hc, port, hc;
  2135. struct pci_dev *pdev = to_pci_dev(host->dev);
  2136. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  2137. struct mv_host_priv *hpriv = host->private_data;
  2138. /* global interrupt mask */
  2139. writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
  2140. rc = mv_chip_id(host, board_idx);
  2141. if (rc)
  2142. goto done;
  2143. n_hc = mv_get_hc_count(host->ports[0]->flags);
  2144. for (port = 0; port < host->n_ports; port++)
  2145. hpriv->ops->read_preamp(hpriv, port, mmio);
  2146. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  2147. if (rc)
  2148. goto done;
  2149. hpriv->ops->reset_flash(hpriv, mmio);
  2150. hpriv->ops->reset_bus(pdev, mmio);
  2151. hpriv->ops->enable_leds(hpriv, mmio);
  2152. for (port = 0; port < host->n_ports; port++) {
  2153. if (IS_GEN_II(hpriv)) {
  2154. void __iomem *port_mmio = mv_port_base(mmio, port);
  2155. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  2156. ifctl |= (1 << 7); /* enable gen2i speed */
  2157. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  2158. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  2159. }
  2160. hpriv->ops->phy_errata(hpriv, mmio, port);
  2161. }
  2162. for (port = 0; port < host->n_ports; port++) {
  2163. struct ata_port *ap = host->ports[port];
  2164. void __iomem *port_mmio = mv_port_base(mmio, port);
  2165. unsigned int offset = port_mmio - mmio;
  2166. mv_port_init(&ap->ioaddr, port_mmio);
  2167. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
  2168. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
  2169. }
  2170. for (hc = 0; hc < n_hc; hc++) {
  2171. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2172. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  2173. "(before clear)=0x%08x\n", hc,
  2174. readl(hc_mmio + HC_CFG_OFS),
  2175. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  2176. /* Clear any currently outstanding hc interrupt conditions */
  2177. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  2178. }
  2179. /* Clear any currently outstanding host interrupt conditions */
  2180. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  2181. /* and unmask interrupt generation for host regs */
  2182. writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
  2183. if (IS_GEN_I(hpriv))
  2184. writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
  2185. else
  2186. writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
  2187. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  2188. "PCI int cause/mask=0x%08x/0x%08x\n",
  2189. readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
  2190. readl(mmio + HC_MAIN_IRQ_MASK_OFS),
  2191. readl(mmio + PCI_IRQ_CAUSE_OFS),
  2192. readl(mmio + PCI_IRQ_MASK_OFS));
  2193. done:
  2194. return rc;
  2195. }
  2196. /**
  2197. * mv_print_info - Dump key info to kernel log for perusal.
  2198. * @host: ATA host to print info about
  2199. *
  2200. * FIXME: complete this.
  2201. *
  2202. * LOCKING:
  2203. * Inherited from caller.
  2204. */
  2205. static void mv_print_info(struct ata_host *host)
  2206. {
  2207. struct pci_dev *pdev = to_pci_dev(host->dev);
  2208. struct mv_host_priv *hpriv = host->private_data;
  2209. u8 scc;
  2210. const char *scc_s, *gen;
  2211. /* Use this to determine the HW stepping of the chip so we know
  2212. * what errata to workaround
  2213. */
  2214. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  2215. if (scc == 0)
  2216. scc_s = "SCSI";
  2217. else if (scc == 0x01)
  2218. scc_s = "RAID";
  2219. else
  2220. scc_s = "?";
  2221. if (IS_GEN_I(hpriv))
  2222. gen = "I";
  2223. else if (IS_GEN_II(hpriv))
  2224. gen = "II";
  2225. else if (IS_GEN_IIE(hpriv))
  2226. gen = "IIE";
  2227. else
  2228. gen = "?";
  2229. dev_printk(KERN_INFO, &pdev->dev,
  2230. "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
  2231. gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  2232. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  2233. }
  2234. /**
  2235. * mv_init_one - handle a positive probe of a Marvell host
  2236. * @pdev: PCI device found
  2237. * @ent: PCI device ID entry for the matched host
  2238. *
  2239. * LOCKING:
  2240. * Inherited from caller.
  2241. */
  2242. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2243. {
  2244. static int printed_version = 0;
  2245. unsigned int board_idx = (unsigned int)ent->driver_data;
  2246. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  2247. struct ata_host *host;
  2248. struct mv_host_priv *hpriv;
  2249. int n_ports, rc;
  2250. if (!printed_version++)
  2251. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2252. /* allocate host */
  2253. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  2254. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2255. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2256. if (!host || !hpriv)
  2257. return -ENOMEM;
  2258. host->private_data = hpriv;
  2259. /* acquire resources */
  2260. rc = pcim_enable_device(pdev);
  2261. if (rc)
  2262. return rc;
  2263. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  2264. if (rc == -EBUSY)
  2265. pcim_pin_device(pdev);
  2266. if (rc)
  2267. return rc;
  2268. host->iomap = pcim_iomap_table(pdev);
  2269. rc = pci_go_64(pdev);
  2270. if (rc)
  2271. return rc;
  2272. /* initialize adapter */
  2273. rc = mv_init_host(host, board_idx);
  2274. if (rc)
  2275. return rc;
  2276. /* Enable interrupts */
  2277. if (msi && pci_enable_msi(pdev))
  2278. pci_intx(pdev, 1);
  2279. mv_dump_pci_cfg(pdev, 0x68);
  2280. mv_print_info(host);
  2281. pci_set_master(pdev);
  2282. pci_try_set_mwi(pdev);
  2283. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2284. IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
  2285. }
  2286. static int __init mv_init(void)
  2287. {
  2288. return pci_register_driver(&mv_pci_driver);
  2289. }
  2290. static void __exit mv_exit(void)
  2291. {
  2292. pci_unregister_driver(&mv_pci_driver);
  2293. }
  2294. MODULE_AUTHOR("Brett Russ");
  2295. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  2296. MODULE_LICENSE("GPL");
  2297. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  2298. MODULE_VERSION(DRV_VERSION);
  2299. module_param(msi, int, 0444);
  2300. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  2301. module_init(mv_init);
  2302. module_exit(mv_exit);