sata_mv.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2008: Marvell Corporation, all rights reserved.
  5. * Copyright 2005: EMC Corporation, all rights reserved.
  6. * Copyright 2005 Red Hat, Inc. All rights reserved.
  7. *
  8. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. */
  24. /*
  25. sata_mv TODO list:
  26. 1) Needs a full errata audit for all chipsets. I implemented most
  27. of the errata workarounds found in the Marvell vendor driver, but
  28. I distinctly remember a couple workarounds (one related to PCI-X)
  29. are still needed.
  30. 2) Improve/fix IRQ and error handling sequences.
  31. 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
  32. 4) Think about TCQ support here, and for libata in general
  33. with controllers that suppport it via host-queuing hardware
  34. (a software-only implementation could be a nightmare).
  35. 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
  36. 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
  37. 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
  38. 8) Develop a low-power-consumption strategy, and implement it.
  39. 9) [Experiment, low priority] See if ATAPI can be supported using
  40. "unknown FIS" or "vendor-specific FIS" support, or something creative
  41. like that.
  42. 10) [Experiment, low priority] Investigate interrupt coalescing.
  43. Quite often, especially with PCI Message Signalled Interrupts (MSI),
  44. the overhead reduced by interrupt mitigation is quite often not
  45. worth the latency cost.
  46. 11) [Experiment, Marvell value added] Is it possible to use target
  47. mode to cross-connect two Linux boxes with Marvell cards? If so,
  48. creating LibATA target mode support would be very interesting.
  49. Target mode, for those without docs, is the ability to directly
  50. connect two SATA controllers.
  51. */
  52. #include <linux/kernel.h>
  53. #include <linux/module.h>
  54. #include <linux/pci.h>
  55. #include <linux/init.h>
  56. #include <linux/blkdev.h>
  57. #include <linux/delay.h>
  58. #include <linux/interrupt.h>
  59. #include <linux/dmapool.h>
  60. #include <linux/dma-mapping.h>
  61. #include <linux/device.h>
  62. #include <linux/platform_device.h>
  63. #include <linux/ata_platform.h>
  64. #include <linux/mbus.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi_cmnd.h>
  67. #include <scsi/scsi_device.h>
  68. #include <linux/libata.h>
  69. #define DRV_NAME "sata_mv"
  70. #define DRV_VERSION "1.20"
  71. enum {
  72. /* BAR's are enumerated in terms of pci_resource_start() terms */
  73. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  74. MV_IO_BAR = 2, /* offset 0x18: IO space */
  75. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  76. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  77. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  78. MV_PCI_REG_BASE = 0,
  79. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  80. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  81. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  82. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  83. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  84. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  85. MV_SATAHC0_REG_BASE = 0x20000,
  86. MV_FLASH_CTL = 0x1046c,
  87. MV_GPIO_PORT_CTL = 0x104f0,
  88. MV_RESET_CFG = 0x180d8,
  89. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  90. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  91. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  92. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  93. MV_MAX_Q_DEPTH = 32,
  94. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  95. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  96. * CRPB needs alignment on a 256B boundary. Size == 256B
  97. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  98. */
  99. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  100. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  101. MV_MAX_SG_CT = 256,
  102. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  103. /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
  104. MV_PORT_HC_SHIFT = 2,
  105. MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
  106. /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
  107. MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
  108. /* Host Flags */
  109. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  110. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  111. /* SoC integrated controllers, no PCI interface */
  112. MV_FLAG_SOC = (1 << 28),
  113. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  114. ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
  115. ATA_FLAG_PIO_POLLING,
  116. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  117. CRQB_FLAG_READ = (1 << 0),
  118. CRQB_TAG_SHIFT = 1,
  119. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  120. CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
  121. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  122. CRQB_CMD_ADDR_SHIFT = 8,
  123. CRQB_CMD_CS = (0x2 << 11),
  124. CRQB_CMD_LAST = (1 << 15),
  125. CRPB_FLAG_STATUS_SHIFT = 8,
  126. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  127. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  128. EPRD_FLAG_END_OF_TBL = (1 << 31),
  129. /* PCI interface registers */
  130. PCI_COMMAND_OFS = 0xc00,
  131. PCI_MAIN_CMD_STS_OFS = 0xd30,
  132. STOP_PCI_MASTER = (1 << 2),
  133. PCI_MASTER_EMPTY = (1 << 3),
  134. GLOB_SFT_RST = (1 << 4),
  135. MV_PCI_MODE = 0xd00,
  136. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  137. MV_PCI_DISC_TIMER = 0xd04,
  138. MV_PCI_MSI_TRIGGER = 0xc38,
  139. MV_PCI_SERR_MASK = 0xc28,
  140. MV_PCI_XBAR_TMOUT = 0x1d04,
  141. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  142. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  143. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  144. MV_PCI_ERR_COMMAND = 0x1d50,
  145. PCI_IRQ_CAUSE_OFS = 0x1d58,
  146. PCI_IRQ_MASK_OFS = 0x1d5c,
  147. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  148. PCIE_IRQ_CAUSE_OFS = 0x1900,
  149. PCIE_IRQ_MASK_OFS = 0x1910,
  150. PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
  151. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  152. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  153. HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
  154. HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
  155. ERR_IRQ = (1 << 0), /* shift by port # */
  156. DONE_IRQ = (1 << 1), /* shift by port # */
  157. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  158. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  159. PCI_ERR = (1 << 18),
  160. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  161. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  162. PORTS_0_3_COAL_DONE = (1 << 8),
  163. PORTS_4_7_COAL_DONE = (1 << 17),
  164. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  165. GPIO_INT = (1 << 22),
  166. SELF_INT = (1 << 23),
  167. TWSI_INT = (1 << 24),
  168. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  169. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  170. HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
  171. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  172. PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  173. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  174. HC_MAIN_RSVD),
  175. HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  176. HC_MAIN_RSVD_5),
  177. HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
  178. /* SATAHC registers */
  179. HC_CFG_OFS = 0,
  180. HC_IRQ_CAUSE_OFS = 0x14,
  181. DMA_IRQ = (1 << 0), /* shift by port # */
  182. HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
  183. DEV_IRQ = (1 << 8), /* shift by port # */
  184. /* Shadow block registers */
  185. SHD_BLK_OFS = 0x100,
  186. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  187. /* SATA registers */
  188. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  189. SATA_ACTIVE_OFS = 0x350,
  190. SATA_FIS_IRQ_CAUSE_OFS = 0x364,
  191. LTMODE_OFS = 0x30c,
  192. LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
  193. PHY_MODE3 = 0x310,
  194. PHY_MODE4 = 0x314,
  195. PHY_MODE2 = 0x330,
  196. SATA_IFCTL_OFS = 0x344,
  197. SATA_IFSTAT_OFS = 0x34c,
  198. VENDOR_UNIQUE_FIS_OFS = 0x35c,
  199. FIS_CFG_OFS = 0x360,
  200. FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
  201. MV5_PHY_MODE = 0x74,
  202. MV5_LT_MODE = 0x30,
  203. MV5_PHY_CTL = 0x0C,
  204. SATA_INTERFACE_CFG = 0x050,
  205. MV_M2_PREAMP_MASK = 0x7e0,
  206. /* Port registers */
  207. EDMA_CFG_OFS = 0,
  208. EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
  209. EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
  210. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  211. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  212. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  213. EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
  214. EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
  215. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  216. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  217. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  218. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  219. EDMA_ERR_DEV = (1 << 2), /* device error */
  220. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  221. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  222. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  223. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  224. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  225. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  226. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  227. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  228. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  229. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  230. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  231. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  232. EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
  233. EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
  234. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
  235. EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
  236. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  237. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  238. EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
  239. EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
  240. EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
  241. EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
  242. EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
  243. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  244. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  245. EDMA_ERR_OVERRUN_5 = (1 << 5),
  246. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  247. EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
  248. EDMA_ERR_LNK_CTRL_RX_1 |
  249. EDMA_ERR_LNK_CTRL_RX_3 |
  250. EDMA_ERR_LNK_CTRL_TX |
  251. /* temporary, until we fix hotplug: */
  252. (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
  253. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  254. EDMA_ERR_PRD_PAR |
  255. EDMA_ERR_DEV_DCON |
  256. EDMA_ERR_DEV_CON |
  257. EDMA_ERR_SERR |
  258. EDMA_ERR_SELF_DIS |
  259. EDMA_ERR_CRQB_PAR |
  260. EDMA_ERR_CRPB_PAR |
  261. EDMA_ERR_INTRL_PAR |
  262. EDMA_ERR_IORDY |
  263. EDMA_ERR_LNK_CTRL_RX_2 |
  264. EDMA_ERR_LNK_DATA_RX |
  265. EDMA_ERR_LNK_DATA_TX |
  266. EDMA_ERR_TRANS_PROTO,
  267. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  268. EDMA_ERR_PRD_PAR |
  269. EDMA_ERR_DEV_DCON |
  270. EDMA_ERR_DEV_CON |
  271. EDMA_ERR_OVERRUN_5 |
  272. EDMA_ERR_UNDERRUN_5 |
  273. EDMA_ERR_SELF_DIS_5 |
  274. EDMA_ERR_CRQB_PAR |
  275. EDMA_ERR_CRPB_PAR |
  276. EDMA_ERR_INTRL_PAR |
  277. EDMA_ERR_IORDY,
  278. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  279. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  280. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  281. EDMA_REQ_Q_PTR_SHIFT = 5,
  282. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  283. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  284. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  285. EDMA_RSP_Q_PTR_SHIFT = 3,
  286. EDMA_CMD_OFS = 0x28, /* EDMA command register */
  287. EDMA_EN = (1 << 0), /* enable EDMA */
  288. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  289. ATA_RST = (1 << 2), /* reset trans/link/phy */
  290. EDMA_IORDY_TMOUT = 0x34,
  291. EDMA_ARB_CFG = 0x38,
  292. GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
  293. /* Host private flags (hp_flags) */
  294. MV_HP_FLAG_MSI = (1 << 0),
  295. MV_HP_ERRATA_50XXB0 = (1 << 1),
  296. MV_HP_ERRATA_50XXB2 = (1 << 2),
  297. MV_HP_ERRATA_60X1B2 = (1 << 3),
  298. MV_HP_ERRATA_60X1C0 = (1 << 4),
  299. MV_HP_ERRATA_XX42A0 = (1 << 5),
  300. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  301. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  302. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  303. MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
  304. /* Port private flags (pp_flags) */
  305. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  306. MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
  307. };
  308. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  309. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  310. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  311. #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
  312. #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
  313. #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
  314. enum {
  315. /* DMA boundary 0xffff is required by the s/g splitting
  316. * we need on /length/ in mv_fill-sg().
  317. */
  318. MV_DMA_BOUNDARY = 0xffffU,
  319. /* mask of register bits containing lower 32 bits
  320. * of EDMA request queue DMA address
  321. */
  322. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  323. /* ditto, for response queue */
  324. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  325. };
  326. enum chip_type {
  327. chip_504x,
  328. chip_508x,
  329. chip_5080,
  330. chip_604x,
  331. chip_608x,
  332. chip_6042,
  333. chip_7042,
  334. chip_soc,
  335. };
  336. /* Command ReQuest Block: 32B */
  337. struct mv_crqb {
  338. __le32 sg_addr;
  339. __le32 sg_addr_hi;
  340. __le16 ctrl_flags;
  341. __le16 ata_cmd[11];
  342. };
  343. struct mv_crqb_iie {
  344. __le32 addr;
  345. __le32 addr_hi;
  346. __le32 flags;
  347. __le32 len;
  348. __le32 ata_cmd[4];
  349. };
  350. /* Command ResPonse Block: 8B */
  351. struct mv_crpb {
  352. __le16 id;
  353. __le16 flags;
  354. __le32 tmstmp;
  355. };
  356. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  357. struct mv_sg {
  358. __le32 addr;
  359. __le32 flags_size;
  360. __le32 addr_hi;
  361. __le32 reserved;
  362. };
  363. struct mv_port_priv {
  364. struct mv_crqb *crqb;
  365. dma_addr_t crqb_dma;
  366. struct mv_crpb *crpb;
  367. dma_addr_t crpb_dma;
  368. struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
  369. dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
  370. unsigned int req_idx;
  371. unsigned int resp_idx;
  372. u32 pp_flags;
  373. };
  374. struct mv_port_signal {
  375. u32 amps;
  376. u32 pre;
  377. };
  378. struct mv_host_priv {
  379. u32 hp_flags;
  380. struct mv_port_signal signal[8];
  381. const struct mv_hw_ops *ops;
  382. int n_ports;
  383. void __iomem *base;
  384. void __iomem *main_cause_reg_addr;
  385. void __iomem *main_mask_reg_addr;
  386. u32 irq_cause_ofs;
  387. u32 irq_mask_ofs;
  388. u32 unmask_all_irqs;
  389. /*
  390. * These consistent DMA memory pools give us guaranteed
  391. * alignment for hardware-accessed data structures,
  392. * and less memory waste in accomplishing the alignment.
  393. */
  394. struct dma_pool *crqb_pool;
  395. struct dma_pool *crpb_pool;
  396. struct dma_pool *sg_tbl_pool;
  397. };
  398. struct mv_hw_ops {
  399. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  400. unsigned int port);
  401. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  402. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  403. void __iomem *mmio);
  404. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  405. unsigned int n_hc);
  406. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  407. void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
  408. };
  409. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  410. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  411. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  412. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  413. static int mv_port_start(struct ata_port *ap);
  414. static void mv_port_stop(struct ata_port *ap);
  415. static void mv_qc_prep(struct ata_queued_cmd *qc);
  416. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  417. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  418. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  419. unsigned long deadline);
  420. static void mv_eh_freeze(struct ata_port *ap);
  421. static void mv_eh_thaw(struct ata_port *ap);
  422. static void mv6_dev_config(struct ata_device *dev);
  423. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  424. unsigned int port);
  425. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  426. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  427. void __iomem *mmio);
  428. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  429. unsigned int n_hc);
  430. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  431. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
  432. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  433. unsigned int port);
  434. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  435. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  436. void __iomem *mmio);
  437. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  438. unsigned int n_hc);
  439. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  440. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  441. void __iomem *mmio);
  442. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  443. void __iomem *mmio);
  444. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  445. void __iomem *mmio, unsigned int n_hc);
  446. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  447. void __iomem *mmio);
  448. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
  449. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
  450. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  451. unsigned int port_no);
  452. static int mv_stop_edma(struct ata_port *ap);
  453. static int mv_stop_edma_engine(void __iomem *port_mmio);
  454. static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
  455. static void mv_pmp_select(struct ata_port *ap, int pmp);
  456. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  457. unsigned long deadline);
  458. static int mv_softreset(struct ata_link *link, unsigned int *class,
  459. unsigned long deadline);
  460. /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  461. * because we have to allow room for worst case splitting of
  462. * PRDs for 64K boundaries in mv_fill_sg().
  463. */
  464. static struct scsi_host_template mv5_sht = {
  465. ATA_BASE_SHT(DRV_NAME),
  466. .sg_tablesize = MV_MAX_SG_CT / 2,
  467. .dma_boundary = MV_DMA_BOUNDARY,
  468. };
  469. static struct scsi_host_template mv6_sht = {
  470. ATA_NCQ_SHT(DRV_NAME),
  471. .can_queue = MV_MAX_Q_DEPTH - 1,
  472. .sg_tablesize = MV_MAX_SG_CT / 2,
  473. .dma_boundary = MV_DMA_BOUNDARY,
  474. };
  475. static struct ata_port_operations mv5_ops = {
  476. .inherits = &ata_sff_port_ops,
  477. .qc_prep = mv_qc_prep,
  478. .qc_issue = mv_qc_issue,
  479. .freeze = mv_eh_freeze,
  480. .thaw = mv_eh_thaw,
  481. .hardreset = mv_hardreset,
  482. .error_handler = ata_std_error_handler, /* avoid SFF EH */
  483. .post_internal_cmd = ATA_OP_NULL,
  484. .scr_read = mv5_scr_read,
  485. .scr_write = mv5_scr_write,
  486. .port_start = mv_port_start,
  487. .port_stop = mv_port_stop,
  488. };
  489. static struct ata_port_operations mv6_ops = {
  490. .inherits = &mv5_ops,
  491. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  492. .dev_config = mv6_dev_config,
  493. .scr_read = mv_scr_read,
  494. .scr_write = mv_scr_write,
  495. .pmp_hardreset = mv_pmp_hardreset,
  496. .pmp_softreset = mv_softreset,
  497. .softreset = mv_softreset,
  498. .error_handler = sata_pmp_error_handler,
  499. };
  500. static struct ata_port_operations mv_iie_ops = {
  501. .inherits = &mv6_ops,
  502. .qc_defer = ata_std_qc_defer, /* FIS-based switching */
  503. .dev_config = ATA_OP_NULL,
  504. .qc_prep = mv_qc_prep_iie,
  505. };
  506. static const struct ata_port_info mv_port_info[] = {
  507. { /* chip_504x */
  508. .flags = MV_COMMON_FLAGS,
  509. .pio_mask = 0x1f, /* pio0-4 */
  510. .udma_mask = ATA_UDMA6,
  511. .port_ops = &mv5_ops,
  512. },
  513. { /* chip_508x */
  514. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  515. .pio_mask = 0x1f, /* pio0-4 */
  516. .udma_mask = ATA_UDMA6,
  517. .port_ops = &mv5_ops,
  518. },
  519. { /* chip_5080 */
  520. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  521. .pio_mask = 0x1f, /* pio0-4 */
  522. .udma_mask = ATA_UDMA6,
  523. .port_ops = &mv5_ops,
  524. },
  525. { /* chip_604x */
  526. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  527. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  528. ATA_FLAG_NCQ,
  529. .pio_mask = 0x1f, /* pio0-4 */
  530. .udma_mask = ATA_UDMA6,
  531. .port_ops = &mv6_ops,
  532. },
  533. { /* chip_608x */
  534. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  535. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  536. ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
  537. .pio_mask = 0x1f, /* pio0-4 */
  538. .udma_mask = ATA_UDMA6,
  539. .port_ops = &mv6_ops,
  540. },
  541. { /* chip_6042 */
  542. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  543. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  544. ATA_FLAG_NCQ,
  545. .pio_mask = 0x1f, /* pio0-4 */
  546. .udma_mask = ATA_UDMA6,
  547. .port_ops = &mv_iie_ops,
  548. },
  549. { /* chip_7042 */
  550. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  551. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  552. ATA_FLAG_NCQ,
  553. .pio_mask = 0x1f, /* pio0-4 */
  554. .udma_mask = ATA_UDMA6,
  555. .port_ops = &mv_iie_ops,
  556. },
  557. { /* chip_soc */
  558. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  559. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  560. ATA_FLAG_NCQ | MV_FLAG_SOC,
  561. .pio_mask = 0x1f, /* pio0-4 */
  562. .udma_mask = ATA_UDMA6,
  563. .port_ops = &mv_iie_ops,
  564. },
  565. };
  566. static const struct pci_device_id mv_pci_tbl[] = {
  567. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  568. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  569. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  570. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  571. /* RocketRAID 1740/174x have different identifiers */
  572. { PCI_VDEVICE(TTI, 0x1740), chip_508x },
  573. { PCI_VDEVICE(TTI, 0x1742), chip_508x },
  574. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  575. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  576. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  577. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  578. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  579. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  580. /* Adaptec 1430SA */
  581. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  582. /* Marvell 7042 support */
  583. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  584. /* Highpoint RocketRAID PCIe series */
  585. { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
  586. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  587. { } /* terminate list */
  588. };
  589. static const struct mv_hw_ops mv5xxx_ops = {
  590. .phy_errata = mv5_phy_errata,
  591. .enable_leds = mv5_enable_leds,
  592. .read_preamp = mv5_read_preamp,
  593. .reset_hc = mv5_reset_hc,
  594. .reset_flash = mv5_reset_flash,
  595. .reset_bus = mv5_reset_bus,
  596. };
  597. static const struct mv_hw_ops mv6xxx_ops = {
  598. .phy_errata = mv6_phy_errata,
  599. .enable_leds = mv6_enable_leds,
  600. .read_preamp = mv6_read_preamp,
  601. .reset_hc = mv6_reset_hc,
  602. .reset_flash = mv6_reset_flash,
  603. .reset_bus = mv_reset_pci_bus,
  604. };
  605. static const struct mv_hw_ops mv_soc_ops = {
  606. .phy_errata = mv6_phy_errata,
  607. .enable_leds = mv_soc_enable_leds,
  608. .read_preamp = mv_soc_read_preamp,
  609. .reset_hc = mv_soc_reset_hc,
  610. .reset_flash = mv_soc_reset_flash,
  611. .reset_bus = mv_soc_reset_bus,
  612. };
  613. /*
  614. * Functions
  615. */
  616. static inline void writelfl(unsigned long data, void __iomem *addr)
  617. {
  618. writel(data, addr);
  619. (void) readl(addr); /* flush to avoid PCI posted write */
  620. }
  621. static inline unsigned int mv_hc_from_port(unsigned int port)
  622. {
  623. return port >> MV_PORT_HC_SHIFT;
  624. }
  625. static inline unsigned int mv_hardport_from_port(unsigned int port)
  626. {
  627. return port & MV_PORT_MASK;
  628. }
  629. /*
  630. * Consolidate some rather tricky bit shift calculations.
  631. * This is hot-path stuff, so not a function.
  632. * Simple code, with two return values, so macro rather than inline.
  633. *
  634. * port is the sole input, in range 0..7.
  635. * shift is one output, for use with the main_cause and main_mask registers.
  636. * hardport is the other output, in range 0..3
  637. *
  638. * Note that port and hardport may be the same variable in some cases.
  639. */
  640. #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
  641. { \
  642. shift = mv_hc_from_port(port) * HC_SHIFT; \
  643. hardport = mv_hardport_from_port(port); \
  644. shift += hardport * 2; \
  645. }
  646. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  647. {
  648. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  649. }
  650. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  651. unsigned int port)
  652. {
  653. return mv_hc_base(base, mv_hc_from_port(port));
  654. }
  655. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  656. {
  657. return mv_hc_base_from_port(base, port) +
  658. MV_SATAHC_ARBTR_REG_SZ +
  659. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  660. }
  661. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  662. {
  663. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  664. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  665. return hc_mmio + ofs;
  666. }
  667. static inline void __iomem *mv_host_base(struct ata_host *host)
  668. {
  669. struct mv_host_priv *hpriv = host->private_data;
  670. return hpriv->base;
  671. }
  672. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  673. {
  674. return mv_port_base(mv_host_base(ap->host), ap->port_no);
  675. }
  676. static inline int mv_get_hc_count(unsigned long port_flags)
  677. {
  678. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  679. }
  680. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  681. struct mv_host_priv *hpriv,
  682. struct mv_port_priv *pp)
  683. {
  684. u32 index;
  685. /*
  686. * initialize request queue
  687. */
  688. pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  689. index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  690. WARN_ON(pp->crqb_dma & 0x3ff);
  691. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  692. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  693. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  694. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  695. writelfl((pp->crqb_dma & 0xffffffff) | index,
  696. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  697. else
  698. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  699. /*
  700. * initialize response queue
  701. */
  702. pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  703. index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
  704. WARN_ON(pp->crpb_dma & 0xff);
  705. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  706. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  707. writelfl((pp->crpb_dma & 0xffffffff) | index,
  708. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  709. else
  710. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  711. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  712. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  713. }
  714. /**
  715. * mv_start_dma - Enable eDMA engine
  716. * @base: port base address
  717. * @pp: port private data
  718. *
  719. * Verify the local cache of the eDMA state is accurate with a
  720. * WARN_ON.
  721. *
  722. * LOCKING:
  723. * Inherited from caller.
  724. */
  725. static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
  726. struct mv_port_priv *pp, u8 protocol)
  727. {
  728. int want_ncq = (protocol == ATA_PROT_NCQ);
  729. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  730. int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
  731. if (want_ncq != using_ncq)
  732. mv_stop_edma(ap);
  733. }
  734. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  735. struct mv_host_priv *hpriv = ap->host->private_data;
  736. int hardport = mv_hardport_from_port(ap->port_no);
  737. void __iomem *hc_mmio = mv_hc_base_from_port(
  738. mv_host_base(ap->host), hardport);
  739. u32 hc_irq_cause, ipending;
  740. /* clear EDMA event indicators, if any */
  741. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  742. /* clear EDMA interrupt indicator, if any */
  743. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  744. ipending = (DEV_IRQ | DMA_IRQ) << hardport;
  745. if (hc_irq_cause & ipending) {
  746. writelfl(hc_irq_cause & ~ipending,
  747. hc_mmio + HC_IRQ_CAUSE_OFS);
  748. }
  749. mv_edma_cfg(ap, want_ncq);
  750. /* clear FIS IRQ Cause */
  751. writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  752. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  753. writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
  754. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  755. }
  756. }
  757. /**
  758. * mv_stop_edma_engine - Disable eDMA engine
  759. * @port_mmio: io base address
  760. *
  761. * LOCKING:
  762. * Inherited from caller.
  763. */
  764. static int mv_stop_edma_engine(void __iomem *port_mmio)
  765. {
  766. int i;
  767. /* Disable eDMA. The disable bit auto clears. */
  768. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  769. /* Wait for the chip to confirm eDMA is off. */
  770. for (i = 10000; i > 0; i--) {
  771. u32 reg = readl(port_mmio + EDMA_CMD_OFS);
  772. if (!(reg & EDMA_EN))
  773. return 0;
  774. udelay(10);
  775. }
  776. return -EIO;
  777. }
  778. static int mv_stop_edma(struct ata_port *ap)
  779. {
  780. void __iomem *port_mmio = mv_ap_base(ap);
  781. struct mv_port_priv *pp = ap->private_data;
  782. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  783. return 0;
  784. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  785. if (mv_stop_edma_engine(port_mmio)) {
  786. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  787. return -EIO;
  788. }
  789. return 0;
  790. }
  791. #ifdef ATA_DEBUG
  792. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  793. {
  794. int b, w;
  795. for (b = 0; b < bytes; ) {
  796. DPRINTK("%p: ", start + b);
  797. for (w = 0; b < bytes && w < 4; w++) {
  798. printk("%08x ", readl(start + b));
  799. b += sizeof(u32);
  800. }
  801. printk("\n");
  802. }
  803. }
  804. #endif
  805. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  806. {
  807. #ifdef ATA_DEBUG
  808. int b, w;
  809. u32 dw;
  810. for (b = 0; b < bytes; ) {
  811. DPRINTK("%02x: ", b);
  812. for (w = 0; b < bytes && w < 4; w++) {
  813. (void) pci_read_config_dword(pdev, b, &dw);
  814. printk("%08x ", dw);
  815. b += sizeof(u32);
  816. }
  817. printk("\n");
  818. }
  819. #endif
  820. }
  821. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  822. struct pci_dev *pdev)
  823. {
  824. #ifdef ATA_DEBUG
  825. void __iomem *hc_base = mv_hc_base(mmio_base,
  826. port >> MV_PORT_HC_SHIFT);
  827. void __iomem *port_base;
  828. int start_port, num_ports, p, start_hc, num_hcs, hc;
  829. if (0 > port) {
  830. start_hc = start_port = 0;
  831. num_ports = 8; /* shld be benign for 4 port devs */
  832. num_hcs = 2;
  833. } else {
  834. start_hc = port >> MV_PORT_HC_SHIFT;
  835. start_port = port;
  836. num_ports = num_hcs = 1;
  837. }
  838. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  839. num_ports > 1 ? num_ports - 1 : start_port);
  840. if (NULL != pdev) {
  841. DPRINTK("PCI config space regs:\n");
  842. mv_dump_pci_cfg(pdev, 0x68);
  843. }
  844. DPRINTK("PCI regs:\n");
  845. mv_dump_mem(mmio_base+0xc00, 0x3c);
  846. mv_dump_mem(mmio_base+0xd00, 0x34);
  847. mv_dump_mem(mmio_base+0xf00, 0x4);
  848. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  849. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  850. hc_base = mv_hc_base(mmio_base, hc);
  851. DPRINTK("HC regs (HC %i):\n", hc);
  852. mv_dump_mem(hc_base, 0x1c);
  853. }
  854. for (p = start_port; p < start_port + num_ports; p++) {
  855. port_base = mv_port_base(mmio_base, p);
  856. DPRINTK("EDMA regs (port %i):\n", p);
  857. mv_dump_mem(port_base, 0x54);
  858. DPRINTK("SATA regs (port %i):\n", p);
  859. mv_dump_mem(port_base+0x300, 0x60);
  860. }
  861. #endif
  862. }
  863. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  864. {
  865. unsigned int ofs;
  866. switch (sc_reg_in) {
  867. case SCR_STATUS:
  868. case SCR_CONTROL:
  869. case SCR_ERROR:
  870. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  871. break;
  872. case SCR_ACTIVE:
  873. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  874. break;
  875. default:
  876. ofs = 0xffffffffU;
  877. break;
  878. }
  879. return ofs;
  880. }
  881. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  882. {
  883. unsigned int ofs = mv_scr_offset(sc_reg_in);
  884. if (ofs != 0xffffffffU) {
  885. *val = readl(mv_ap_base(ap) + ofs);
  886. return 0;
  887. } else
  888. return -EINVAL;
  889. }
  890. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  891. {
  892. unsigned int ofs = mv_scr_offset(sc_reg_in);
  893. if (ofs != 0xffffffffU) {
  894. writelfl(val, mv_ap_base(ap) + ofs);
  895. return 0;
  896. } else
  897. return -EINVAL;
  898. }
  899. static void mv6_dev_config(struct ata_device *adev)
  900. {
  901. /*
  902. * Deal with Gen-II ("mv6") hardware quirks/restrictions:
  903. *
  904. * Gen-II does not support NCQ over a port multiplier
  905. * (no FIS-based switching).
  906. *
  907. * We don't have hob_nsect when doing NCQ commands on Gen-II.
  908. * See mv_qc_prep() for more info.
  909. */
  910. if (adev->flags & ATA_DFLAG_NCQ) {
  911. if (sata_pmp_attached(adev->link->ap)) {
  912. adev->flags &= ~ATA_DFLAG_NCQ;
  913. ata_dev_printk(adev, KERN_INFO,
  914. "NCQ disabled for command-based switching\n");
  915. } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
  916. adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
  917. ata_dev_printk(adev, KERN_INFO,
  918. "max_sectors limited to %u for NCQ\n",
  919. adev->max_sectors);
  920. }
  921. }
  922. }
  923. static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
  924. {
  925. u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
  926. /*
  927. * Various bit settings required for operation
  928. * in FIS-based switching (fbs) mode on GenIIe:
  929. */
  930. old_fcfg = readl(port_mmio + FIS_CFG_OFS);
  931. old_ltmode = readl(port_mmio + LTMODE_OFS);
  932. if (enable_fbs) {
  933. new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
  934. new_ltmode = old_ltmode | LTMODE_BIT8;
  935. } else { /* disable fbs */
  936. new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
  937. new_ltmode = old_ltmode & ~LTMODE_BIT8;
  938. }
  939. if (new_fcfg != old_fcfg)
  940. writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
  941. if (new_ltmode != old_ltmode)
  942. writelfl(new_ltmode, port_mmio + LTMODE_OFS);
  943. }
  944. static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
  945. {
  946. u32 cfg;
  947. struct mv_port_priv *pp = ap->private_data;
  948. struct mv_host_priv *hpriv = ap->host->private_data;
  949. void __iomem *port_mmio = mv_ap_base(ap);
  950. /* set up non-NCQ EDMA configuration */
  951. cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
  952. if (IS_GEN_I(hpriv))
  953. cfg |= (1 << 8); /* enab config burst size mask */
  954. else if (IS_GEN_II(hpriv))
  955. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  956. else if (IS_GEN_IIE(hpriv)) {
  957. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  958. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  959. cfg |= (1 << 18); /* enab early completion */
  960. cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
  961. if (want_ncq && sata_pmp_attached(ap)) {
  962. cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
  963. mv_config_fbs(port_mmio, 1);
  964. } else {
  965. mv_config_fbs(port_mmio, 0);
  966. }
  967. }
  968. if (want_ncq) {
  969. cfg |= EDMA_CFG_NCQ;
  970. pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
  971. } else
  972. pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
  973. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  974. }
  975. static void mv_port_free_dma_mem(struct ata_port *ap)
  976. {
  977. struct mv_host_priv *hpriv = ap->host->private_data;
  978. struct mv_port_priv *pp = ap->private_data;
  979. int tag;
  980. if (pp->crqb) {
  981. dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
  982. pp->crqb = NULL;
  983. }
  984. if (pp->crpb) {
  985. dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
  986. pp->crpb = NULL;
  987. }
  988. /*
  989. * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
  990. * For later hardware, we have one unique sg_tbl per NCQ tag.
  991. */
  992. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  993. if (pp->sg_tbl[tag]) {
  994. if (tag == 0 || !IS_GEN_I(hpriv))
  995. dma_pool_free(hpriv->sg_tbl_pool,
  996. pp->sg_tbl[tag],
  997. pp->sg_tbl_dma[tag]);
  998. pp->sg_tbl[tag] = NULL;
  999. }
  1000. }
  1001. }
  1002. /**
  1003. * mv_port_start - Port specific init/start routine.
  1004. * @ap: ATA channel to manipulate
  1005. *
  1006. * Allocate and point to DMA memory, init port private memory,
  1007. * zero indices.
  1008. *
  1009. * LOCKING:
  1010. * Inherited from caller.
  1011. */
  1012. static int mv_port_start(struct ata_port *ap)
  1013. {
  1014. struct device *dev = ap->host->dev;
  1015. struct mv_host_priv *hpriv = ap->host->private_data;
  1016. struct mv_port_priv *pp;
  1017. int tag;
  1018. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1019. if (!pp)
  1020. return -ENOMEM;
  1021. ap->private_data = pp;
  1022. pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
  1023. if (!pp->crqb)
  1024. return -ENOMEM;
  1025. memset(pp->crqb, 0, MV_CRQB_Q_SZ);
  1026. pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
  1027. if (!pp->crpb)
  1028. goto out_port_free_dma_mem;
  1029. memset(pp->crpb, 0, MV_CRPB_Q_SZ);
  1030. /*
  1031. * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
  1032. * For later hardware, we need one unique sg_tbl per NCQ tag.
  1033. */
  1034. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1035. if (tag == 0 || !IS_GEN_I(hpriv)) {
  1036. pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
  1037. GFP_KERNEL, &pp->sg_tbl_dma[tag]);
  1038. if (!pp->sg_tbl[tag])
  1039. goto out_port_free_dma_mem;
  1040. } else {
  1041. pp->sg_tbl[tag] = pp->sg_tbl[0];
  1042. pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
  1043. }
  1044. }
  1045. return 0;
  1046. out_port_free_dma_mem:
  1047. mv_port_free_dma_mem(ap);
  1048. return -ENOMEM;
  1049. }
  1050. /**
  1051. * mv_port_stop - Port specific cleanup/stop routine.
  1052. * @ap: ATA channel to manipulate
  1053. *
  1054. * Stop DMA, cleanup port memory.
  1055. *
  1056. * LOCKING:
  1057. * This routine uses the host lock to protect the DMA stop.
  1058. */
  1059. static void mv_port_stop(struct ata_port *ap)
  1060. {
  1061. mv_stop_edma(ap);
  1062. mv_port_free_dma_mem(ap);
  1063. }
  1064. /**
  1065. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  1066. * @qc: queued command whose SG list to source from
  1067. *
  1068. * Populate the SG list and mark the last entry.
  1069. *
  1070. * LOCKING:
  1071. * Inherited from caller.
  1072. */
  1073. static void mv_fill_sg(struct ata_queued_cmd *qc)
  1074. {
  1075. struct mv_port_priv *pp = qc->ap->private_data;
  1076. struct scatterlist *sg;
  1077. struct mv_sg *mv_sg, *last_sg = NULL;
  1078. unsigned int si;
  1079. mv_sg = pp->sg_tbl[qc->tag];
  1080. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1081. dma_addr_t addr = sg_dma_address(sg);
  1082. u32 sg_len = sg_dma_len(sg);
  1083. while (sg_len) {
  1084. u32 offset = addr & 0xffff;
  1085. u32 len = sg_len;
  1086. if ((offset + sg_len > 0x10000))
  1087. len = 0x10000 - offset;
  1088. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  1089. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1090. mv_sg->flags_size = cpu_to_le32(len & 0xffff);
  1091. sg_len -= len;
  1092. addr += len;
  1093. last_sg = mv_sg;
  1094. mv_sg++;
  1095. }
  1096. }
  1097. if (likely(last_sg))
  1098. last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  1099. }
  1100. static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  1101. {
  1102. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  1103. (last ? CRQB_CMD_LAST : 0);
  1104. *cmdw = cpu_to_le16(tmp);
  1105. }
  1106. /**
  1107. * mv_qc_prep - Host specific command preparation.
  1108. * @qc: queued command to prepare
  1109. *
  1110. * This routine simply redirects to the general purpose routine
  1111. * if command is not DMA. Else, it handles prep of the CRQB
  1112. * (command request block), does some sanity checking, and calls
  1113. * the SG load routine.
  1114. *
  1115. * LOCKING:
  1116. * Inherited from caller.
  1117. */
  1118. static void mv_qc_prep(struct ata_queued_cmd *qc)
  1119. {
  1120. struct ata_port *ap = qc->ap;
  1121. struct mv_port_priv *pp = ap->private_data;
  1122. __le16 *cw;
  1123. struct ata_taskfile *tf;
  1124. u16 flags = 0;
  1125. unsigned in_index;
  1126. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1127. (qc->tf.protocol != ATA_PROT_NCQ))
  1128. return;
  1129. /* Fill in command request block
  1130. */
  1131. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1132. flags |= CRQB_FLAG_READ;
  1133. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1134. flags |= qc->tag << CRQB_TAG_SHIFT;
  1135. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1136. /* get current queue index from software */
  1137. in_index = pp->req_idx;
  1138. pp->crqb[in_index].sg_addr =
  1139. cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1140. pp->crqb[in_index].sg_addr_hi =
  1141. cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1142. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  1143. cw = &pp->crqb[in_index].ata_cmd[0];
  1144. tf = &qc->tf;
  1145. /* Sadly, the CRQB cannot accomodate all registers--there are
  1146. * only 11 bytes...so we must pick and choose required
  1147. * registers based on the command. So, we drop feature and
  1148. * hob_feature for [RW] DMA commands, but they are needed for
  1149. * NCQ. NCQ will drop hob_nsect.
  1150. */
  1151. switch (tf->command) {
  1152. case ATA_CMD_READ:
  1153. case ATA_CMD_READ_EXT:
  1154. case ATA_CMD_WRITE:
  1155. case ATA_CMD_WRITE_EXT:
  1156. case ATA_CMD_WRITE_FUA_EXT:
  1157. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  1158. break;
  1159. case ATA_CMD_FPDMA_READ:
  1160. case ATA_CMD_FPDMA_WRITE:
  1161. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  1162. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  1163. break;
  1164. default:
  1165. /* The only other commands EDMA supports in non-queued and
  1166. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  1167. * of which are defined/used by Linux. If we get here, this
  1168. * driver needs work.
  1169. *
  1170. * FIXME: modify libata to give qc_prep a return value and
  1171. * return error here.
  1172. */
  1173. BUG_ON(tf->command);
  1174. break;
  1175. }
  1176. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  1177. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  1178. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  1179. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  1180. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  1181. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  1182. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  1183. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  1184. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  1185. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1186. return;
  1187. mv_fill_sg(qc);
  1188. }
  1189. /**
  1190. * mv_qc_prep_iie - Host specific command preparation.
  1191. * @qc: queued command to prepare
  1192. *
  1193. * This routine simply redirects to the general purpose routine
  1194. * if command is not DMA. Else, it handles prep of the CRQB
  1195. * (command request block), does some sanity checking, and calls
  1196. * the SG load routine.
  1197. *
  1198. * LOCKING:
  1199. * Inherited from caller.
  1200. */
  1201. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1202. {
  1203. struct ata_port *ap = qc->ap;
  1204. struct mv_port_priv *pp = ap->private_data;
  1205. struct mv_crqb_iie *crqb;
  1206. struct ata_taskfile *tf;
  1207. unsigned in_index;
  1208. u32 flags = 0;
  1209. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1210. (qc->tf.protocol != ATA_PROT_NCQ))
  1211. return;
  1212. /* Fill in Gen IIE command request block */
  1213. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1214. flags |= CRQB_FLAG_READ;
  1215. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1216. flags |= qc->tag << CRQB_TAG_SHIFT;
  1217. flags |= qc->tag << CRQB_HOSTQ_SHIFT;
  1218. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1219. /* get current queue index from software */
  1220. in_index = pp->req_idx;
  1221. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1222. crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1223. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1224. crqb->flags = cpu_to_le32(flags);
  1225. tf = &qc->tf;
  1226. crqb->ata_cmd[0] = cpu_to_le32(
  1227. (tf->command << 16) |
  1228. (tf->feature << 24)
  1229. );
  1230. crqb->ata_cmd[1] = cpu_to_le32(
  1231. (tf->lbal << 0) |
  1232. (tf->lbam << 8) |
  1233. (tf->lbah << 16) |
  1234. (tf->device << 24)
  1235. );
  1236. crqb->ata_cmd[2] = cpu_to_le32(
  1237. (tf->hob_lbal << 0) |
  1238. (tf->hob_lbam << 8) |
  1239. (tf->hob_lbah << 16) |
  1240. (tf->hob_feature << 24)
  1241. );
  1242. crqb->ata_cmd[3] = cpu_to_le32(
  1243. (tf->nsect << 0) |
  1244. (tf->hob_nsect << 8)
  1245. );
  1246. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1247. return;
  1248. mv_fill_sg(qc);
  1249. }
  1250. /**
  1251. * mv_qc_issue - Initiate a command to the host
  1252. * @qc: queued command to start
  1253. *
  1254. * This routine simply redirects to the general purpose routine
  1255. * if command is not DMA. Else, it sanity checks our local
  1256. * caches of the request producer/consumer indices then enables
  1257. * DMA and bumps the request producer index.
  1258. *
  1259. * LOCKING:
  1260. * Inherited from caller.
  1261. */
  1262. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1263. {
  1264. struct ata_port *ap = qc->ap;
  1265. void __iomem *port_mmio = mv_ap_base(ap);
  1266. struct mv_port_priv *pp = ap->private_data;
  1267. u32 in_index;
  1268. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1269. (qc->tf.protocol != ATA_PROT_NCQ)) {
  1270. /*
  1271. * We're about to send a non-EDMA capable command to the
  1272. * port. Turn off EDMA so there won't be problems accessing
  1273. * shadow block, etc registers.
  1274. */
  1275. mv_stop_edma(ap);
  1276. mv_pmp_select(ap, qc->dev->link->pmp);
  1277. return ata_sff_qc_issue(qc);
  1278. }
  1279. mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
  1280. pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1281. in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  1282. /* and write the request in pointer to kick the EDMA to life */
  1283. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
  1284. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1285. return 0;
  1286. }
  1287. static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
  1288. {
  1289. struct mv_port_priv *pp = ap->private_data;
  1290. struct ata_queued_cmd *qc;
  1291. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1292. return NULL;
  1293. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1294. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1295. qc = NULL;
  1296. return qc;
  1297. }
  1298. static void mv_unexpected_intr(struct ata_port *ap)
  1299. {
  1300. struct mv_port_priv *pp = ap->private_data;
  1301. struct ata_eh_info *ehi = &ap->link.eh_info;
  1302. char *when = "";
  1303. /*
  1304. * We got a device interrupt from something that
  1305. * was supposed to be using EDMA or polling.
  1306. */
  1307. ata_ehi_clear_desc(ehi);
  1308. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  1309. when = " while EDMA enabled";
  1310. } else {
  1311. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1312. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1313. when = " while polling";
  1314. }
  1315. ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
  1316. ehi->err_mask |= AC_ERR_OTHER;
  1317. ehi->action |= ATA_EH_RESET;
  1318. ata_port_freeze(ap);
  1319. }
  1320. /**
  1321. * mv_err_intr - Handle error interrupts on the port
  1322. * @ap: ATA channel to manipulate
  1323. * @reset_allowed: bool: 0 == don't trigger from reset here
  1324. *
  1325. * In most cases, just clear the interrupt and move on. However,
  1326. * some cases require an eDMA reset, which also performs a COMRESET.
  1327. * The SERR case requires a clear of pending errors in the SATA
  1328. * SERROR register. Finally, if the port disabled DMA,
  1329. * update our cached copy to match.
  1330. *
  1331. * LOCKING:
  1332. * Inherited from caller.
  1333. */
  1334. static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
  1335. {
  1336. void __iomem *port_mmio = mv_ap_base(ap);
  1337. u32 edma_err_cause, eh_freeze_mask, serr = 0;
  1338. struct mv_port_priv *pp = ap->private_data;
  1339. struct mv_host_priv *hpriv = ap->host->private_data;
  1340. unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
  1341. unsigned int action = 0, err_mask = 0;
  1342. struct ata_eh_info *ehi = &ap->link.eh_info;
  1343. ata_ehi_clear_desc(ehi);
  1344. if (!edma_enabled) {
  1345. /* just a guess: do we need to do this? should we
  1346. * expand this, and do it in all cases?
  1347. */
  1348. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  1349. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  1350. }
  1351. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1352. ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
  1353. /*
  1354. * All generations share these EDMA error cause bits:
  1355. */
  1356. if (edma_err_cause & EDMA_ERR_DEV)
  1357. err_mask |= AC_ERR_DEV;
  1358. if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  1359. EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
  1360. EDMA_ERR_INTRL_PAR)) {
  1361. err_mask |= AC_ERR_ATA_BUS;
  1362. action |= ATA_EH_RESET;
  1363. ata_ehi_push_desc(ehi, "parity error");
  1364. }
  1365. if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
  1366. ata_ehi_hotplugged(ehi);
  1367. ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
  1368. "dev disconnect" : "dev connect");
  1369. action |= ATA_EH_RESET;
  1370. }
  1371. /*
  1372. * Gen-I has a different SELF_DIS bit,
  1373. * different FREEZE bits, and no SERR bit:
  1374. */
  1375. if (IS_GEN_I(hpriv)) {
  1376. eh_freeze_mask = EDMA_EH_FREEZE_5;
  1377. if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
  1378. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1379. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1380. }
  1381. } else {
  1382. eh_freeze_mask = EDMA_EH_FREEZE;
  1383. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  1384. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1385. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1386. }
  1387. if (edma_err_cause & EDMA_ERR_SERR) {
  1388. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  1389. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  1390. err_mask = AC_ERR_ATA_BUS;
  1391. action |= ATA_EH_RESET;
  1392. }
  1393. }
  1394. /* Clear EDMA now that SERR cleanup done */
  1395. writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1396. if (!err_mask) {
  1397. err_mask = AC_ERR_OTHER;
  1398. action |= ATA_EH_RESET;
  1399. }
  1400. ehi->serror |= serr;
  1401. ehi->action |= action;
  1402. if (qc)
  1403. qc->err_mask |= err_mask;
  1404. else
  1405. ehi->err_mask |= err_mask;
  1406. if (edma_err_cause & eh_freeze_mask)
  1407. ata_port_freeze(ap);
  1408. else
  1409. ata_port_abort(ap);
  1410. }
  1411. static void mv_process_crpb_response(struct ata_port *ap,
  1412. struct mv_crpb *response, unsigned int tag, int ncq_enabled)
  1413. {
  1414. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
  1415. if (qc) {
  1416. u8 ata_status;
  1417. u16 edma_status = le16_to_cpu(response->flags);
  1418. /*
  1419. * edma_status from a response queue entry:
  1420. * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
  1421. * MSB is saved ATA status from command completion.
  1422. */
  1423. if (!ncq_enabled) {
  1424. u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
  1425. if (err_cause) {
  1426. /*
  1427. * Error will be seen/handled by mv_err_intr().
  1428. * So do nothing at all here.
  1429. */
  1430. return;
  1431. }
  1432. }
  1433. ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
  1434. qc->err_mask |= ac_err_mask(ata_status);
  1435. ata_qc_complete(qc);
  1436. } else {
  1437. ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
  1438. __func__, tag);
  1439. }
  1440. }
  1441. static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
  1442. {
  1443. void __iomem *port_mmio = mv_ap_base(ap);
  1444. struct mv_host_priv *hpriv = ap->host->private_data;
  1445. u32 in_index;
  1446. bool work_done = false;
  1447. int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
  1448. /* Get the hardware queue position index */
  1449. in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1450. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1451. /* Process new responses from since the last time we looked */
  1452. while (in_index != pp->resp_idx) {
  1453. unsigned int tag;
  1454. struct mv_crpb *response = &pp->crpb[pp->resp_idx];
  1455. pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1456. if (IS_GEN_I(hpriv)) {
  1457. /* 50xx: no NCQ, only one command active at a time */
  1458. tag = ap->link.active_tag;
  1459. } else {
  1460. /* Gen II/IIE: get command tag from CRPB entry */
  1461. tag = le16_to_cpu(response->id) & 0x1f;
  1462. }
  1463. mv_process_crpb_response(ap, response, tag, ncq_enabled);
  1464. work_done = true;
  1465. }
  1466. /* Update the software queue position index in hardware */
  1467. if (work_done)
  1468. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
  1469. (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
  1470. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1471. }
  1472. /**
  1473. * mv_host_intr - Handle all interrupts on the given host controller
  1474. * @host: host specific structure
  1475. * @main_cause: Main interrupt cause register for the chip.
  1476. *
  1477. * LOCKING:
  1478. * Inherited from caller.
  1479. */
  1480. static int mv_host_intr(struct ata_host *host, u32 main_cause)
  1481. {
  1482. struct mv_host_priv *hpriv = host->private_data;
  1483. void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
  1484. u32 hc_irq_cause = 0;
  1485. unsigned int handled = 0, port;
  1486. for (port = 0; port < hpriv->n_ports; port++) {
  1487. struct ata_port *ap = host->ports[port];
  1488. struct mv_port_priv *pp;
  1489. unsigned int shift, hardport, port_cause;
  1490. /*
  1491. * When we move to the second hc, flag our cached
  1492. * copies of hc_mmio (and hc_irq_cause) as invalid again.
  1493. */
  1494. if (port == MV_PORTS_PER_HC)
  1495. hc_mmio = NULL;
  1496. /*
  1497. * Do nothing if port is not interrupting or is disabled:
  1498. */
  1499. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  1500. port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ);
  1501. if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
  1502. continue;
  1503. /*
  1504. * Each hc within the host has its own hc_irq_cause register.
  1505. * We defer reading it until we know we need it, right now:
  1506. *
  1507. * FIXME later: we don't really need to read this register
  1508. * (some logic changes required below if we go that way),
  1509. * because it doesn't tell us anything new. But we do need
  1510. * to write to it, outside the top of this loop,
  1511. * to reset the interrupt triggers for next time.
  1512. */
  1513. if (!hc_mmio) {
  1514. hc_mmio = mv_hc_base_from_port(mmio, port);
  1515. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1516. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1517. handled = 1;
  1518. }
  1519. /*
  1520. * Process completed CRPB response(s) before other events.
  1521. */
  1522. pp = ap->private_data;
  1523. if (hc_irq_cause & (DMA_IRQ << hardport)) {
  1524. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
  1525. mv_process_crpb_entries(ap, pp);
  1526. }
  1527. /*
  1528. * Handle chip-reported errors, or continue on to handle PIO.
  1529. */
  1530. if (unlikely(port_cause & ERR_IRQ)) {
  1531. mv_err_intr(ap, mv_get_active_qc(ap));
  1532. } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
  1533. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  1534. struct ata_queued_cmd *qc = mv_get_active_qc(ap);
  1535. if (qc) {
  1536. ata_sff_host_intr(ap, qc);
  1537. continue;
  1538. }
  1539. }
  1540. mv_unexpected_intr(ap);
  1541. }
  1542. }
  1543. return handled;
  1544. }
  1545. static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
  1546. {
  1547. struct mv_host_priv *hpriv = host->private_data;
  1548. struct ata_port *ap;
  1549. struct ata_queued_cmd *qc;
  1550. struct ata_eh_info *ehi;
  1551. unsigned int i, err_mask, printed = 0;
  1552. u32 err_cause;
  1553. err_cause = readl(mmio + hpriv->irq_cause_ofs);
  1554. dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
  1555. err_cause);
  1556. DPRINTK("All regs @ PCI error\n");
  1557. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1558. writelfl(0, mmio + hpriv->irq_cause_ofs);
  1559. for (i = 0; i < host->n_ports; i++) {
  1560. ap = host->ports[i];
  1561. if (!ata_link_offline(&ap->link)) {
  1562. ehi = &ap->link.eh_info;
  1563. ata_ehi_clear_desc(ehi);
  1564. if (!printed++)
  1565. ata_ehi_push_desc(ehi,
  1566. "PCI err cause 0x%08x", err_cause);
  1567. err_mask = AC_ERR_HOST_BUS;
  1568. ehi->action = ATA_EH_RESET;
  1569. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1570. if (qc)
  1571. qc->err_mask |= err_mask;
  1572. else
  1573. ehi->err_mask |= err_mask;
  1574. ata_port_freeze(ap);
  1575. }
  1576. }
  1577. return 1; /* handled */
  1578. }
  1579. /**
  1580. * mv_interrupt - Main interrupt event handler
  1581. * @irq: unused
  1582. * @dev_instance: private data; in this case the host structure
  1583. *
  1584. * Read the read only register to determine if any host
  1585. * controllers have pending interrupts. If so, call lower level
  1586. * routine to handle. Also check for PCI errors which are only
  1587. * reported here.
  1588. *
  1589. * LOCKING:
  1590. * This routine holds the host lock while processing pending
  1591. * interrupts.
  1592. */
  1593. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1594. {
  1595. struct ata_host *host = dev_instance;
  1596. struct mv_host_priv *hpriv = host->private_data;
  1597. unsigned int handled = 0;
  1598. u32 main_cause, main_mask;
  1599. spin_lock(&host->lock);
  1600. main_cause = readl(hpriv->main_cause_reg_addr);
  1601. main_mask = readl(hpriv->main_mask_reg_addr);
  1602. /*
  1603. * Deal with cases where we either have nothing pending, or have read
  1604. * a bogus register value which can indicate HW removal or PCI fault.
  1605. */
  1606. if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) {
  1607. if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host)))
  1608. handled = mv_pci_error(host, hpriv->base);
  1609. else
  1610. handled = mv_host_intr(host, main_cause);
  1611. }
  1612. spin_unlock(&host->lock);
  1613. return IRQ_RETVAL(handled);
  1614. }
  1615. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1616. {
  1617. unsigned int ofs;
  1618. switch (sc_reg_in) {
  1619. case SCR_STATUS:
  1620. case SCR_ERROR:
  1621. case SCR_CONTROL:
  1622. ofs = sc_reg_in * sizeof(u32);
  1623. break;
  1624. default:
  1625. ofs = 0xffffffffU;
  1626. break;
  1627. }
  1628. return ofs;
  1629. }
  1630. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  1631. {
  1632. struct mv_host_priv *hpriv = ap->host->private_data;
  1633. void __iomem *mmio = hpriv->base;
  1634. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1635. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1636. if (ofs != 0xffffffffU) {
  1637. *val = readl(addr + ofs);
  1638. return 0;
  1639. } else
  1640. return -EINVAL;
  1641. }
  1642. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1643. {
  1644. struct mv_host_priv *hpriv = ap->host->private_data;
  1645. void __iomem *mmio = hpriv->base;
  1646. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1647. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1648. if (ofs != 0xffffffffU) {
  1649. writelfl(val, addr + ofs);
  1650. return 0;
  1651. } else
  1652. return -EINVAL;
  1653. }
  1654. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
  1655. {
  1656. struct pci_dev *pdev = to_pci_dev(host->dev);
  1657. int early_5080;
  1658. early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
  1659. if (!early_5080) {
  1660. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1661. tmp |= (1 << 0);
  1662. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1663. }
  1664. mv_reset_pci_bus(host, mmio);
  1665. }
  1666. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1667. {
  1668. writel(0x0fcfffff, mmio + MV_FLASH_CTL);
  1669. }
  1670. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  1671. void __iomem *mmio)
  1672. {
  1673. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  1674. u32 tmp;
  1675. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1676. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  1677. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  1678. }
  1679. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1680. {
  1681. u32 tmp;
  1682. writel(0, mmio + MV_GPIO_PORT_CTL);
  1683. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  1684. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1685. tmp |= ~(1 << 0);
  1686. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1687. }
  1688. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1689. unsigned int port)
  1690. {
  1691. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  1692. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  1693. u32 tmp;
  1694. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  1695. if (fix_apm_sq) {
  1696. tmp = readl(phy_mmio + MV5_LT_MODE);
  1697. tmp |= (1 << 19);
  1698. writel(tmp, phy_mmio + MV5_LT_MODE);
  1699. tmp = readl(phy_mmio + MV5_PHY_CTL);
  1700. tmp &= ~0x3;
  1701. tmp |= 0x1;
  1702. writel(tmp, phy_mmio + MV5_PHY_CTL);
  1703. }
  1704. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1705. tmp &= ~mask;
  1706. tmp |= hpriv->signal[port].pre;
  1707. tmp |= hpriv->signal[port].amps;
  1708. writel(tmp, phy_mmio + MV5_PHY_MODE);
  1709. }
  1710. #undef ZERO
  1711. #define ZERO(reg) writel(0, port_mmio + (reg))
  1712. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  1713. unsigned int port)
  1714. {
  1715. void __iomem *port_mmio = mv_port_base(mmio, port);
  1716. /*
  1717. * The datasheet warns against setting ATA_RST when EDMA is active
  1718. * (but doesn't say what the problem might be). So we first try
  1719. * to disable the EDMA engine before doing the ATA_RST operation.
  1720. */
  1721. mv_reset_channel(hpriv, mmio, port);
  1722. ZERO(0x028); /* command */
  1723. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  1724. ZERO(0x004); /* timer */
  1725. ZERO(0x008); /* irq err cause */
  1726. ZERO(0x00c); /* irq err mask */
  1727. ZERO(0x010); /* rq bah */
  1728. ZERO(0x014); /* rq inp */
  1729. ZERO(0x018); /* rq outp */
  1730. ZERO(0x01c); /* respq bah */
  1731. ZERO(0x024); /* respq outp */
  1732. ZERO(0x020); /* respq inp */
  1733. ZERO(0x02c); /* test control */
  1734. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
  1735. }
  1736. #undef ZERO
  1737. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1738. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1739. unsigned int hc)
  1740. {
  1741. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1742. u32 tmp;
  1743. ZERO(0x00c);
  1744. ZERO(0x010);
  1745. ZERO(0x014);
  1746. ZERO(0x018);
  1747. tmp = readl(hc_mmio + 0x20);
  1748. tmp &= 0x1c1c1c1c;
  1749. tmp |= 0x03030303;
  1750. writel(tmp, hc_mmio + 0x20);
  1751. }
  1752. #undef ZERO
  1753. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1754. unsigned int n_hc)
  1755. {
  1756. unsigned int hc, port;
  1757. for (hc = 0; hc < n_hc; hc++) {
  1758. for (port = 0; port < MV_PORTS_PER_HC; port++)
  1759. mv5_reset_hc_port(hpriv, mmio,
  1760. (hc * MV_PORTS_PER_HC) + port);
  1761. mv5_reset_one_hc(hpriv, mmio, hc);
  1762. }
  1763. return 0;
  1764. }
  1765. #undef ZERO
  1766. #define ZERO(reg) writel(0, mmio + (reg))
  1767. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
  1768. {
  1769. struct mv_host_priv *hpriv = host->private_data;
  1770. u32 tmp;
  1771. tmp = readl(mmio + MV_PCI_MODE);
  1772. tmp &= 0xff00ffff;
  1773. writel(tmp, mmio + MV_PCI_MODE);
  1774. ZERO(MV_PCI_DISC_TIMER);
  1775. ZERO(MV_PCI_MSI_TRIGGER);
  1776. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
  1777. ZERO(HC_MAIN_IRQ_MASK_OFS);
  1778. ZERO(MV_PCI_SERR_MASK);
  1779. ZERO(hpriv->irq_cause_ofs);
  1780. ZERO(hpriv->irq_mask_ofs);
  1781. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  1782. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  1783. ZERO(MV_PCI_ERR_ATTRIBUTE);
  1784. ZERO(MV_PCI_ERR_COMMAND);
  1785. }
  1786. #undef ZERO
  1787. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1788. {
  1789. u32 tmp;
  1790. mv5_reset_flash(hpriv, mmio);
  1791. tmp = readl(mmio + MV_GPIO_PORT_CTL);
  1792. tmp &= 0x3;
  1793. tmp |= (1 << 5) | (1 << 6);
  1794. writel(tmp, mmio + MV_GPIO_PORT_CTL);
  1795. }
  1796. /**
  1797. * mv6_reset_hc - Perform the 6xxx global soft reset
  1798. * @mmio: base address of the HBA
  1799. *
  1800. * This routine only applies to 6xxx parts.
  1801. *
  1802. * LOCKING:
  1803. * Inherited from caller.
  1804. */
  1805. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1806. unsigned int n_hc)
  1807. {
  1808. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  1809. int i, rc = 0;
  1810. u32 t;
  1811. /* Following procedure defined in PCI "main command and status
  1812. * register" table.
  1813. */
  1814. t = readl(reg);
  1815. writel(t | STOP_PCI_MASTER, reg);
  1816. for (i = 0; i < 1000; i++) {
  1817. udelay(1);
  1818. t = readl(reg);
  1819. if (PCI_MASTER_EMPTY & t)
  1820. break;
  1821. }
  1822. if (!(PCI_MASTER_EMPTY & t)) {
  1823. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  1824. rc = 1;
  1825. goto done;
  1826. }
  1827. /* set reset */
  1828. i = 5;
  1829. do {
  1830. writel(t | GLOB_SFT_RST, reg);
  1831. t = readl(reg);
  1832. udelay(1);
  1833. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  1834. if (!(GLOB_SFT_RST & t)) {
  1835. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  1836. rc = 1;
  1837. goto done;
  1838. }
  1839. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  1840. i = 5;
  1841. do {
  1842. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  1843. t = readl(reg);
  1844. udelay(1);
  1845. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  1846. if (GLOB_SFT_RST & t) {
  1847. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  1848. rc = 1;
  1849. }
  1850. /*
  1851. * Temporary: wait 3 seconds before port-probing can happen,
  1852. * so that we don't miss finding sleepy SilXXXX port-multipliers.
  1853. * This can go away once hotplug is fully/correctly implemented.
  1854. */
  1855. if (rc == 0)
  1856. msleep(3000);
  1857. done:
  1858. return rc;
  1859. }
  1860. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  1861. void __iomem *mmio)
  1862. {
  1863. void __iomem *port_mmio;
  1864. u32 tmp;
  1865. tmp = readl(mmio + MV_RESET_CFG);
  1866. if ((tmp & (1 << 0)) == 0) {
  1867. hpriv->signal[idx].amps = 0x7 << 8;
  1868. hpriv->signal[idx].pre = 0x1 << 5;
  1869. return;
  1870. }
  1871. port_mmio = mv_port_base(mmio, idx);
  1872. tmp = readl(port_mmio + PHY_MODE2);
  1873. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1874. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1875. }
  1876. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1877. {
  1878. writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
  1879. }
  1880. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1881. unsigned int port)
  1882. {
  1883. void __iomem *port_mmio = mv_port_base(mmio, port);
  1884. u32 hp_flags = hpriv->hp_flags;
  1885. int fix_phy_mode2 =
  1886. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1887. int fix_phy_mode4 =
  1888. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1889. u32 m2, tmp;
  1890. if (fix_phy_mode2) {
  1891. m2 = readl(port_mmio + PHY_MODE2);
  1892. m2 &= ~(1 << 16);
  1893. m2 |= (1 << 31);
  1894. writel(m2, port_mmio + PHY_MODE2);
  1895. udelay(200);
  1896. m2 = readl(port_mmio + PHY_MODE2);
  1897. m2 &= ~((1 << 16) | (1 << 31));
  1898. writel(m2, port_mmio + PHY_MODE2);
  1899. udelay(200);
  1900. }
  1901. /* who knows what this magic does */
  1902. tmp = readl(port_mmio + PHY_MODE3);
  1903. tmp &= ~0x7F800000;
  1904. tmp |= 0x2A800000;
  1905. writel(tmp, port_mmio + PHY_MODE3);
  1906. if (fix_phy_mode4) {
  1907. u32 m4;
  1908. m4 = readl(port_mmio + PHY_MODE4);
  1909. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1910. tmp = readl(port_mmio + PHY_MODE3);
  1911. /* workaround for errata FEr SATA#10 (part 1) */
  1912. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  1913. writel(m4, port_mmio + PHY_MODE4);
  1914. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1915. writel(tmp, port_mmio + PHY_MODE3);
  1916. }
  1917. /* Revert values of pre-emphasis and signal amps to the saved ones */
  1918. m2 = readl(port_mmio + PHY_MODE2);
  1919. m2 &= ~MV_M2_PREAMP_MASK;
  1920. m2 |= hpriv->signal[port].amps;
  1921. m2 |= hpriv->signal[port].pre;
  1922. m2 &= ~(1 << 16);
  1923. /* according to mvSata 3.6.1, some IIE values are fixed */
  1924. if (IS_GEN_IIE(hpriv)) {
  1925. m2 &= ~0xC30FF01F;
  1926. m2 |= 0x0000900F;
  1927. }
  1928. writel(m2, port_mmio + PHY_MODE2);
  1929. }
  1930. /* TODO: use the generic LED interface to configure the SATA Presence */
  1931. /* & Acitivy LEDs on the board */
  1932. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  1933. void __iomem *mmio)
  1934. {
  1935. return;
  1936. }
  1937. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  1938. void __iomem *mmio)
  1939. {
  1940. void __iomem *port_mmio;
  1941. u32 tmp;
  1942. port_mmio = mv_port_base(mmio, idx);
  1943. tmp = readl(port_mmio + PHY_MODE2);
  1944. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1945. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1946. }
  1947. #undef ZERO
  1948. #define ZERO(reg) writel(0, port_mmio + (reg))
  1949. static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
  1950. void __iomem *mmio, unsigned int port)
  1951. {
  1952. void __iomem *port_mmio = mv_port_base(mmio, port);
  1953. /*
  1954. * The datasheet warns against setting ATA_RST when EDMA is active
  1955. * (but doesn't say what the problem might be). So we first try
  1956. * to disable the EDMA engine before doing the ATA_RST operation.
  1957. */
  1958. mv_reset_channel(hpriv, mmio, port);
  1959. ZERO(0x028); /* command */
  1960. writel(0x101f, port_mmio + EDMA_CFG_OFS);
  1961. ZERO(0x004); /* timer */
  1962. ZERO(0x008); /* irq err cause */
  1963. ZERO(0x00c); /* irq err mask */
  1964. ZERO(0x010); /* rq bah */
  1965. ZERO(0x014); /* rq inp */
  1966. ZERO(0x018); /* rq outp */
  1967. ZERO(0x01c); /* respq bah */
  1968. ZERO(0x024); /* respq outp */
  1969. ZERO(0x020); /* respq inp */
  1970. ZERO(0x02c); /* test control */
  1971. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
  1972. }
  1973. #undef ZERO
  1974. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1975. static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
  1976. void __iomem *mmio)
  1977. {
  1978. void __iomem *hc_mmio = mv_hc_base(mmio, 0);
  1979. ZERO(0x00c);
  1980. ZERO(0x010);
  1981. ZERO(0x014);
  1982. }
  1983. #undef ZERO
  1984. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  1985. void __iomem *mmio, unsigned int n_hc)
  1986. {
  1987. unsigned int port;
  1988. for (port = 0; port < hpriv->n_ports; port++)
  1989. mv_soc_reset_hc_port(hpriv, mmio, port);
  1990. mv_soc_reset_one_hc(hpriv, mmio);
  1991. return 0;
  1992. }
  1993. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  1994. void __iomem *mmio)
  1995. {
  1996. return;
  1997. }
  1998. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
  1999. {
  2000. return;
  2001. }
  2002. static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
  2003. {
  2004. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
  2005. ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
  2006. if (want_gen2i)
  2007. ifctl |= (1 << 7); /* enable gen2i speed */
  2008. writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
  2009. }
  2010. /*
  2011. * Caller must ensure that EDMA is not active,
  2012. * by first doing mv_stop_edma() where needed.
  2013. */
  2014. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  2015. unsigned int port_no)
  2016. {
  2017. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  2018. mv_stop_edma_engine(port_mmio);
  2019. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  2020. if (!IS_GEN_I(hpriv)) {
  2021. /* Enable 3.0gb/s link speed */
  2022. mv_setup_ifctl(port_mmio, 1);
  2023. }
  2024. /*
  2025. * Strobing ATA_RST here causes a hard reset of the SATA transport,
  2026. * link, and physical layers. It resets all SATA interface registers
  2027. * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
  2028. */
  2029. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  2030. udelay(25); /* allow reset propagation */
  2031. writelfl(0, port_mmio + EDMA_CMD_OFS);
  2032. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  2033. if (IS_GEN_I(hpriv))
  2034. mdelay(1);
  2035. }
  2036. static void mv_pmp_select(struct ata_port *ap, int pmp)
  2037. {
  2038. if (sata_pmp_supported(ap)) {
  2039. void __iomem *port_mmio = mv_ap_base(ap);
  2040. u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
  2041. int old = reg & 0xf;
  2042. if (old != pmp) {
  2043. reg = (reg & ~0xf) | pmp;
  2044. writelfl(reg, port_mmio + SATA_IFCTL_OFS);
  2045. }
  2046. }
  2047. }
  2048. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  2049. unsigned long deadline)
  2050. {
  2051. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2052. return sata_std_hardreset(link, class, deadline);
  2053. }
  2054. static int mv_softreset(struct ata_link *link, unsigned int *class,
  2055. unsigned long deadline)
  2056. {
  2057. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2058. return ata_sff_softreset(link, class, deadline);
  2059. }
  2060. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  2061. unsigned long deadline)
  2062. {
  2063. struct ata_port *ap = link->ap;
  2064. struct mv_host_priv *hpriv = ap->host->private_data;
  2065. struct mv_port_priv *pp = ap->private_data;
  2066. void __iomem *mmio = hpriv->base;
  2067. int rc, attempts = 0, extra = 0;
  2068. u32 sstatus;
  2069. bool online;
  2070. mv_reset_channel(hpriv, mmio, ap->port_no);
  2071. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2072. /* Workaround for errata FEr SATA#10 (part 2) */
  2073. do {
  2074. const unsigned long *timing =
  2075. sata_ehc_deb_timing(&link->eh_context);
  2076. rc = sata_link_hardreset(link, timing, deadline + extra,
  2077. &online, NULL);
  2078. if (rc)
  2079. return rc;
  2080. sata_scr_read(link, SCR_STATUS, &sstatus);
  2081. if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
  2082. /* Force 1.5gb/s link speed and try again */
  2083. mv_setup_ifctl(mv_ap_base(ap), 0);
  2084. if (time_after(jiffies + HZ, deadline))
  2085. extra = HZ; /* only extend it once, max */
  2086. }
  2087. } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
  2088. return rc;
  2089. }
  2090. static void mv_eh_freeze(struct ata_port *ap)
  2091. {
  2092. struct mv_host_priv *hpriv = ap->host->private_data;
  2093. unsigned int shift, hardport, port = ap->port_no;
  2094. u32 main_mask;
  2095. /* FIXME: handle coalescing completion events properly */
  2096. mv_stop_edma(ap);
  2097. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  2098. /* disable assertion of portN err, done events */
  2099. main_mask = readl(hpriv->main_mask_reg_addr);
  2100. main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
  2101. writelfl(main_mask, hpriv->main_mask_reg_addr);
  2102. }
  2103. static void mv_eh_thaw(struct ata_port *ap)
  2104. {
  2105. struct mv_host_priv *hpriv = ap->host->private_data;
  2106. unsigned int shift, hardport, port = ap->port_no;
  2107. void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
  2108. void __iomem *port_mmio = mv_ap_base(ap);
  2109. u32 main_mask, hc_irq_cause;
  2110. /* FIXME: handle coalescing completion events properly */
  2111. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  2112. /* clear EDMA errors on this port */
  2113. writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2114. /* clear pending irq events */
  2115. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  2116. hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
  2117. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  2118. /* enable assertion of portN err, done events */
  2119. main_mask = readl(hpriv->main_mask_reg_addr);
  2120. main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
  2121. writelfl(main_mask, hpriv->main_mask_reg_addr);
  2122. }
  2123. /**
  2124. * mv_port_init - Perform some early initialization on a single port.
  2125. * @port: libata data structure storing shadow register addresses
  2126. * @port_mmio: base address of the port
  2127. *
  2128. * Initialize shadow register mmio addresses, clear outstanding
  2129. * interrupts on the port, and unmask interrupts for the future
  2130. * start of the port.
  2131. *
  2132. * LOCKING:
  2133. * Inherited from caller.
  2134. */
  2135. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  2136. {
  2137. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  2138. unsigned serr_ofs;
  2139. /* PIO related setup
  2140. */
  2141. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  2142. port->error_addr =
  2143. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  2144. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  2145. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  2146. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  2147. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  2148. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  2149. port->status_addr =
  2150. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  2151. /* special case: control/altstatus doesn't have ATA_REG_ address */
  2152. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  2153. /* unused: */
  2154. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  2155. /* Clear any currently outstanding port interrupt conditions */
  2156. serr_ofs = mv_scr_offset(SCR_ERROR);
  2157. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  2158. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2159. /* unmask all non-transient EDMA error interrupts */
  2160. writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  2161. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  2162. readl(port_mmio + EDMA_CFG_OFS),
  2163. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  2164. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  2165. }
  2166. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  2167. {
  2168. struct pci_dev *pdev = to_pci_dev(host->dev);
  2169. struct mv_host_priv *hpriv = host->private_data;
  2170. u32 hp_flags = hpriv->hp_flags;
  2171. switch (board_idx) {
  2172. case chip_5080:
  2173. hpriv->ops = &mv5xxx_ops;
  2174. hp_flags |= MV_HP_GEN_I;
  2175. switch (pdev->revision) {
  2176. case 0x1:
  2177. hp_flags |= MV_HP_ERRATA_50XXB0;
  2178. break;
  2179. case 0x3:
  2180. hp_flags |= MV_HP_ERRATA_50XXB2;
  2181. break;
  2182. default:
  2183. dev_printk(KERN_WARNING, &pdev->dev,
  2184. "Applying 50XXB2 workarounds to unknown rev\n");
  2185. hp_flags |= MV_HP_ERRATA_50XXB2;
  2186. break;
  2187. }
  2188. break;
  2189. case chip_504x:
  2190. case chip_508x:
  2191. hpriv->ops = &mv5xxx_ops;
  2192. hp_flags |= MV_HP_GEN_I;
  2193. switch (pdev->revision) {
  2194. case 0x0:
  2195. hp_flags |= MV_HP_ERRATA_50XXB0;
  2196. break;
  2197. case 0x3:
  2198. hp_flags |= MV_HP_ERRATA_50XXB2;
  2199. break;
  2200. default:
  2201. dev_printk(KERN_WARNING, &pdev->dev,
  2202. "Applying B2 workarounds to unknown rev\n");
  2203. hp_flags |= MV_HP_ERRATA_50XXB2;
  2204. break;
  2205. }
  2206. break;
  2207. case chip_604x:
  2208. case chip_608x:
  2209. hpriv->ops = &mv6xxx_ops;
  2210. hp_flags |= MV_HP_GEN_II;
  2211. switch (pdev->revision) {
  2212. case 0x7:
  2213. hp_flags |= MV_HP_ERRATA_60X1B2;
  2214. break;
  2215. case 0x9:
  2216. hp_flags |= MV_HP_ERRATA_60X1C0;
  2217. break;
  2218. default:
  2219. dev_printk(KERN_WARNING, &pdev->dev,
  2220. "Applying B2 workarounds to unknown rev\n");
  2221. hp_flags |= MV_HP_ERRATA_60X1B2;
  2222. break;
  2223. }
  2224. break;
  2225. case chip_7042:
  2226. hp_flags |= MV_HP_PCIE;
  2227. if (pdev->vendor == PCI_VENDOR_ID_TTI &&
  2228. (pdev->device == 0x2300 || pdev->device == 0x2310))
  2229. {
  2230. /*
  2231. * Highpoint RocketRAID PCIe 23xx series cards:
  2232. *
  2233. * Unconfigured drives are treated as "Legacy"
  2234. * by the BIOS, and it overwrites sector 8 with
  2235. * a "Lgcy" metadata block prior to Linux boot.
  2236. *
  2237. * Configured drives (RAID or JBOD) leave sector 8
  2238. * alone, but instead overwrite a high numbered
  2239. * sector for the RAID metadata. This sector can
  2240. * be determined exactly, by truncating the physical
  2241. * drive capacity to a nice even GB value.
  2242. *
  2243. * RAID metadata is at: (dev->n_sectors & ~0xfffff)
  2244. *
  2245. * Warn the user, lest they think we're just buggy.
  2246. */
  2247. printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
  2248. " BIOS CORRUPTS DATA on all attached drives,"
  2249. " regardless of if/how they are configured."
  2250. " BEWARE!\n");
  2251. printk(KERN_WARNING DRV_NAME ": For data safety, do not"
  2252. " use sectors 8-9 on \"Legacy\" drives,"
  2253. " and avoid the final two gigabytes on"
  2254. " all RocketRAID BIOS initialized drives.\n");
  2255. }
  2256. case chip_6042:
  2257. hpriv->ops = &mv6xxx_ops;
  2258. hp_flags |= MV_HP_GEN_IIE;
  2259. switch (pdev->revision) {
  2260. case 0x0:
  2261. hp_flags |= MV_HP_ERRATA_XX42A0;
  2262. break;
  2263. case 0x1:
  2264. hp_flags |= MV_HP_ERRATA_60X1C0;
  2265. break;
  2266. default:
  2267. dev_printk(KERN_WARNING, &pdev->dev,
  2268. "Applying 60X1C0 workarounds to unknown rev\n");
  2269. hp_flags |= MV_HP_ERRATA_60X1C0;
  2270. break;
  2271. }
  2272. break;
  2273. case chip_soc:
  2274. hpriv->ops = &mv_soc_ops;
  2275. hp_flags |= MV_HP_ERRATA_60X1C0;
  2276. break;
  2277. default:
  2278. dev_printk(KERN_ERR, host->dev,
  2279. "BUG: invalid board index %u\n", board_idx);
  2280. return 1;
  2281. }
  2282. hpriv->hp_flags = hp_flags;
  2283. if (hp_flags & MV_HP_PCIE) {
  2284. hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
  2285. hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
  2286. hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
  2287. } else {
  2288. hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
  2289. hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
  2290. hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
  2291. }
  2292. return 0;
  2293. }
  2294. /**
  2295. * mv_init_host - Perform some early initialization of the host.
  2296. * @host: ATA host to initialize
  2297. * @board_idx: controller index
  2298. *
  2299. * If possible, do an early global reset of the host. Then do
  2300. * our port init and clear/unmask all/relevant host interrupts.
  2301. *
  2302. * LOCKING:
  2303. * Inherited from caller.
  2304. */
  2305. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  2306. {
  2307. int rc = 0, n_hc, port, hc;
  2308. struct mv_host_priv *hpriv = host->private_data;
  2309. void __iomem *mmio = hpriv->base;
  2310. rc = mv_chip_id(host, board_idx);
  2311. if (rc)
  2312. goto done;
  2313. if (HAS_PCI(host)) {
  2314. hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
  2315. hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
  2316. } else {
  2317. hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
  2318. hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
  2319. }
  2320. /* global interrupt mask: 0 == mask everything */
  2321. writel(0, hpriv->main_mask_reg_addr);
  2322. n_hc = mv_get_hc_count(host->ports[0]->flags);
  2323. for (port = 0; port < host->n_ports; port++)
  2324. hpriv->ops->read_preamp(hpriv, port, mmio);
  2325. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  2326. if (rc)
  2327. goto done;
  2328. hpriv->ops->reset_flash(hpriv, mmio);
  2329. hpriv->ops->reset_bus(host, mmio);
  2330. hpriv->ops->enable_leds(hpriv, mmio);
  2331. for (port = 0; port < host->n_ports; port++) {
  2332. struct ata_port *ap = host->ports[port];
  2333. void __iomem *port_mmio = mv_port_base(mmio, port);
  2334. mv_port_init(&ap->ioaddr, port_mmio);
  2335. #ifdef CONFIG_PCI
  2336. if (HAS_PCI(host)) {
  2337. unsigned int offset = port_mmio - mmio;
  2338. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
  2339. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
  2340. }
  2341. #endif
  2342. }
  2343. for (hc = 0; hc < n_hc; hc++) {
  2344. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2345. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  2346. "(before clear)=0x%08x\n", hc,
  2347. readl(hc_mmio + HC_CFG_OFS),
  2348. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  2349. /* Clear any currently outstanding hc interrupt conditions */
  2350. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  2351. }
  2352. if (HAS_PCI(host)) {
  2353. /* Clear any currently outstanding host interrupt conditions */
  2354. writelfl(0, mmio + hpriv->irq_cause_ofs);
  2355. /* and unmask interrupt generation for host regs */
  2356. writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
  2357. if (IS_GEN_I(hpriv))
  2358. writelfl(~HC_MAIN_MASKED_IRQS_5,
  2359. hpriv->main_mask_reg_addr);
  2360. else
  2361. writelfl(~HC_MAIN_MASKED_IRQS,
  2362. hpriv->main_mask_reg_addr);
  2363. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  2364. "PCI int cause/mask=0x%08x/0x%08x\n",
  2365. readl(hpriv->main_cause_reg_addr),
  2366. readl(hpriv->main_mask_reg_addr),
  2367. readl(mmio + hpriv->irq_cause_ofs),
  2368. readl(mmio + hpriv->irq_mask_ofs));
  2369. } else {
  2370. writelfl(~HC_MAIN_MASKED_IRQS_SOC,
  2371. hpriv->main_mask_reg_addr);
  2372. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
  2373. readl(hpriv->main_cause_reg_addr),
  2374. readl(hpriv->main_mask_reg_addr));
  2375. }
  2376. done:
  2377. return rc;
  2378. }
  2379. static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
  2380. {
  2381. hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
  2382. MV_CRQB_Q_SZ, 0);
  2383. if (!hpriv->crqb_pool)
  2384. return -ENOMEM;
  2385. hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
  2386. MV_CRPB_Q_SZ, 0);
  2387. if (!hpriv->crpb_pool)
  2388. return -ENOMEM;
  2389. hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
  2390. MV_SG_TBL_SZ, 0);
  2391. if (!hpriv->sg_tbl_pool)
  2392. return -ENOMEM;
  2393. return 0;
  2394. }
  2395. static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
  2396. struct mbus_dram_target_info *dram)
  2397. {
  2398. int i;
  2399. for (i = 0; i < 4; i++) {
  2400. writel(0, hpriv->base + WINDOW_CTRL(i));
  2401. writel(0, hpriv->base + WINDOW_BASE(i));
  2402. }
  2403. for (i = 0; i < dram->num_cs; i++) {
  2404. struct mbus_dram_window *cs = dram->cs + i;
  2405. writel(((cs->size - 1) & 0xffff0000) |
  2406. (cs->mbus_attr << 8) |
  2407. (dram->mbus_dram_target_id << 4) | 1,
  2408. hpriv->base + WINDOW_CTRL(i));
  2409. writel(cs->base, hpriv->base + WINDOW_BASE(i));
  2410. }
  2411. }
  2412. /**
  2413. * mv_platform_probe - handle a positive probe of an soc Marvell
  2414. * host
  2415. * @pdev: platform device found
  2416. *
  2417. * LOCKING:
  2418. * Inherited from caller.
  2419. */
  2420. static int mv_platform_probe(struct platform_device *pdev)
  2421. {
  2422. static int printed_version;
  2423. const struct mv_sata_platform_data *mv_platform_data;
  2424. const struct ata_port_info *ppi[] =
  2425. { &mv_port_info[chip_soc], NULL };
  2426. struct ata_host *host;
  2427. struct mv_host_priv *hpriv;
  2428. struct resource *res;
  2429. int n_ports, rc;
  2430. if (!printed_version++)
  2431. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2432. /*
  2433. * Simple resource validation ..
  2434. */
  2435. if (unlikely(pdev->num_resources != 2)) {
  2436. dev_err(&pdev->dev, "invalid number of resources\n");
  2437. return -EINVAL;
  2438. }
  2439. /*
  2440. * Get the register base first
  2441. */
  2442. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2443. if (res == NULL)
  2444. return -EINVAL;
  2445. /* allocate host */
  2446. mv_platform_data = pdev->dev.platform_data;
  2447. n_ports = mv_platform_data->n_ports;
  2448. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2449. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2450. if (!host || !hpriv)
  2451. return -ENOMEM;
  2452. host->private_data = hpriv;
  2453. hpriv->n_ports = n_ports;
  2454. host->iomap = NULL;
  2455. hpriv->base = devm_ioremap(&pdev->dev, res->start,
  2456. res->end - res->start + 1);
  2457. hpriv->base -= MV_SATAHC0_REG_BASE;
  2458. /*
  2459. * (Re-)program MBUS remapping windows if we are asked to.
  2460. */
  2461. if (mv_platform_data->dram != NULL)
  2462. mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
  2463. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2464. if (rc)
  2465. return rc;
  2466. /* initialize adapter */
  2467. rc = mv_init_host(host, chip_soc);
  2468. if (rc)
  2469. return rc;
  2470. dev_printk(KERN_INFO, &pdev->dev,
  2471. "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
  2472. host->n_ports);
  2473. return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
  2474. IRQF_SHARED, &mv6_sht);
  2475. }
  2476. /*
  2477. *
  2478. * mv_platform_remove - unplug a platform interface
  2479. * @pdev: platform device
  2480. *
  2481. * A platform bus SATA device has been unplugged. Perform the needed
  2482. * cleanup. Also called on module unload for any active devices.
  2483. */
  2484. static int __devexit mv_platform_remove(struct platform_device *pdev)
  2485. {
  2486. struct device *dev = &pdev->dev;
  2487. struct ata_host *host = dev_get_drvdata(dev);
  2488. ata_host_detach(host);
  2489. return 0;
  2490. }
  2491. static struct platform_driver mv_platform_driver = {
  2492. .probe = mv_platform_probe,
  2493. .remove = __devexit_p(mv_platform_remove),
  2494. .driver = {
  2495. .name = DRV_NAME,
  2496. .owner = THIS_MODULE,
  2497. },
  2498. };
  2499. #ifdef CONFIG_PCI
  2500. static int mv_pci_init_one(struct pci_dev *pdev,
  2501. const struct pci_device_id *ent);
  2502. static struct pci_driver mv_pci_driver = {
  2503. .name = DRV_NAME,
  2504. .id_table = mv_pci_tbl,
  2505. .probe = mv_pci_init_one,
  2506. .remove = ata_pci_remove_one,
  2507. };
  2508. /*
  2509. * module options
  2510. */
  2511. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  2512. /* move to PCI layer or libata core? */
  2513. static int pci_go_64(struct pci_dev *pdev)
  2514. {
  2515. int rc;
  2516. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  2517. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  2518. if (rc) {
  2519. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2520. if (rc) {
  2521. dev_printk(KERN_ERR, &pdev->dev,
  2522. "64-bit DMA enable failed\n");
  2523. return rc;
  2524. }
  2525. }
  2526. } else {
  2527. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2528. if (rc) {
  2529. dev_printk(KERN_ERR, &pdev->dev,
  2530. "32-bit DMA enable failed\n");
  2531. return rc;
  2532. }
  2533. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2534. if (rc) {
  2535. dev_printk(KERN_ERR, &pdev->dev,
  2536. "32-bit consistent DMA enable failed\n");
  2537. return rc;
  2538. }
  2539. }
  2540. return rc;
  2541. }
  2542. /**
  2543. * mv_print_info - Dump key info to kernel log for perusal.
  2544. * @host: ATA host to print info about
  2545. *
  2546. * FIXME: complete this.
  2547. *
  2548. * LOCKING:
  2549. * Inherited from caller.
  2550. */
  2551. static void mv_print_info(struct ata_host *host)
  2552. {
  2553. struct pci_dev *pdev = to_pci_dev(host->dev);
  2554. struct mv_host_priv *hpriv = host->private_data;
  2555. u8 scc;
  2556. const char *scc_s, *gen;
  2557. /* Use this to determine the HW stepping of the chip so we know
  2558. * what errata to workaround
  2559. */
  2560. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  2561. if (scc == 0)
  2562. scc_s = "SCSI";
  2563. else if (scc == 0x01)
  2564. scc_s = "RAID";
  2565. else
  2566. scc_s = "?";
  2567. if (IS_GEN_I(hpriv))
  2568. gen = "I";
  2569. else if (IS_GEN_II(hpriv))
  2570. gen = "II";
  2571. else if (IS_GEN_IIE(hpriv))
  2572. gen = "IIE";
  2573. else
  2574. gen = "?";
  2575. dev_printk(KERN_INFO, &pdev->dev,
  2576. "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
  2577. gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  2578. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  2579. }
  2580. /**
  2581. * mv_pci_init_one - handle a positive probe of a PCI Marvell host
  2582. * @pdev: PCI device found
  2583. * @ent: PCI device ID entry for the matched host
  2584. *
  2585. * LOCKING:
  2586. * Inherited from caller.
  2587. */
  2588. static int mv_pci_init_one(struct pci_dev *pdev,
  2589. const struct pci_device_id *ent)
  2590. {
  2591. static int printed_version;
  2592. unsigned int board_idx = (unsigned int)ent->driver_data;
  2593. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  2594. struct ata_host *host;
  2595. struct mv_host_priv *hpriv;
  2596. int n_ports, rc;
  2597. if (!printed_version++)
  2598. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2599. /* allocate host */
  2600. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  2601. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2602. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2603. if (!host || !hpriv)
  2604. return -ENOMEM;
  2605. host->private_data = hpriv;
  2606. hpriv->n_ports = n_ports;
  2607. /* acquire resources */
  2608. rc = pcim_enable_device(pdev);
  2609. if (rc)
  2610. return rc;
  2611. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  2612. if (rc == -EBUSY)
  2613. pcim_pin_device(pdev);
  2614. if (rc)
  2615. return rc;
  2616. host->iomap = pcim_iomap_table(pdev);
  2617. hpriv->base = host->iomap[MV_PRIMARY_BAR];
  2618. rc = pci_go_64(pdev);
  2619. if (rc)
  2620. return rc;
  2621. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2622. if (rc)
  2623. return rc;
  2624. /* initialize adapter */
  2625. rc = mv_init_host(host, board_idx);
  2626. if (rc)
  2627. return rc;
  2628. /* Enable interrupts */
  2629. if (msi && pci_enable_msi(pdev))
  2630. pci_intx(pdev, 1);
  2631. mv_dump_pci_cfg(pdev, 0x68);
  2632. mv_print_info(host);
  2633. pci_set_master(pdev);
  2634. pci_try_set_mwi(pdev);
  2635. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2636. IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
  2637. }
  2638. #endif
  2639. static int mv_platform_probe(struct platform_device *pdev);
  2640. static int __devexit mv_platform_remove(struct platform_device *pdev);
  2641. static int __init mv_init(void)
  2642. {
  2643. int rc = -ENODEV;
  2644. #ifdef CONFIG_PCI
  2645. rc = pci_register_driver(&mv_pci_driver);
  2646. if (rc < 0)
  2647. return rc;
  2648. #endif
  2649. rc = platform_driver_register(&mv_platform_driver);
  2650. #ifdef CONFIG_PCI
  2651. if (rc < 0)
  2652. pci_unregister_driver(&mv_pci_driver);
  2653. #endif
  2654. return rc;
  2655. }
  2656. static void __exit mv_exit(void)
  2657. {
  2658. #ifdef CONFIG_PCI
  2659. pci_unregister_driver(&mv_pci_driver);
  2660. #endif
  2661. platform_driver_unregister(&mv_platform_driver);
  2662. }
  2663. MODULE_AUTHOR("Brett Russ");
  2664. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  2665. MODULE_LICENSE("GPL");
  2666. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  2667. MODULE_VERSION(DRV_VERSION);
  2668. MODULE_ALIAS("platform:" DRV_NAME);
  2669. #ifdef CONFIG_PCI
  2670. module_param(msi, int, 0444);
  2671. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  2672. #endif
  2673. module_init(mv_init);
  2674. module_exit(mv_exit);