sata_mv.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2008: Marvell Corporation, all rights reserved.
  5. * Copyright 2005: EMC Corporation, all rights reserved.
  6. * Copyright 2005 Red Hat, Inc. All rights reserved.
  7. *
  8. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. */
  24. /*
  25. * sata_mv TODO list:
  26. *
  27. * --> Errata workaround for NCQ device errors.
  28. *
  29. * --> More errata workarounds for PCI-X.
  30. *
  31. * --> Complete a full errata audit for all chipsets to identify others.
  32. *
  33. * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
  34. *
  35. * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
  36. *
  37. * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
  38. *
  39. * --> Develop a low-power-consumption strategy, and implement it.
  40. *
  41. * --> [Experiment, low priority] Investigate interrupt coalescing.
  42. * Quite often, especially with PCI Message Signalled Interrupts (MSI),
  43. * the overhead reduced by interrupt mitigation is quite often not
  44. * worth the latency cost.
  45. *
  46. * --> [Experiment, Marvell value added] Is it possible to use target
  47. * mode to cross-connect two Linux boxes with Marvell cards? If so,
  48. * creating LibATA target mode support would be very interesting.
  49. *
  50. * Target mode, for those without docs, is the ability to directly
  51. * connect two SATA ports.
  52. */
  53. #include <linux/kernel.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/init.h>
  57. #include <linux/blkdev.h>
  58. #include <linux/delay.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/dmapool.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/device.h>
  63. #include <linux/platform_device.h>
  64. #include <linux/ata_platform.h>
  65. #include <linux/mbus.h>
  66. #include <linux/bitops.h>
  67. #include <scsi/scsi_host.h>
  68. #include <scsi/scsi_cmnd.h>
  69. #include <scsi/scsi_device.h>
  70. #include <linux/libata.h>
  71. #define DRV_NAME "sata_mv"
  72. #define DRV_VERSION "1.21"
  73. enum {
  74. /* BAR's are enumerated in terms of pci_resource_start() terms */
  75. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  76. MV_IO_BAR = 2, /* offset 0x18: IO space */
  77. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  78. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  79. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  80. MV_PCI_REG_BASE = 0,
  81. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  82. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  83. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  84. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  85. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  86. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  87. MV_SATAHC0_REG_BASE = 0x20000,
  88. MV_FLASH_CTL_OFS = 0x1046c,
  89. MV_GPIO_PORT_CTL_OFS = 0x104f0,
  90. MV_RESET_CFG_OFS = 0x180d8,
  91. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  92. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  93. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  94. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  95. MV_MAX_Q_DEPTH = 32,
  96. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  97. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  98. * CRPB needs alignment on a 256B boundary. Size == 256B
  99. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  100. */
  101. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  102. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  103. MV_MAX_SG_CT = 256,
  104. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  105. /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
  106. MV_PORT_HC_SHIFT = 2,
  107. MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
  108. /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
  109. MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
  110. /* Host Flags */
  111. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  112. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  113. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  114. ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
  115. ATA_FLAG_PIO_POLLING,
  116. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  117. MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  118. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  119. ATA_FLAG_NCQ | ATA_FLAG_AN,
  120. CRQB_FLAG_READ = (1 << 0),
  121. CRQB_TAG_SHIFT = 1,
  122. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  123. CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
  124. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  125. CRQB_CMD_ADDR_SHIFT = 8,
  126. CRQB_CMD_CS = (0x2 << 11),
  127. CRQB_CMD_LAST = (1 << 15),
  128. CRPB_FLAG_STATUS_SHIFT = 8,
  129. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  130. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  131. EPRD_FLAG_END_OF_TBL = (1 << 31),
  132. /* PCI interface registers */
  133. PCI_COMMAND_OFS = 0xc00,
  134. PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
  135. PCI_MAIN_CMD_STS_OFS = 0xd30,
  136. STOP_PCI_MASTER = (1 << 2),
  137. PCI_MASTER_EMPTY = (1 << 3),
  138. GLOB_SFT_RST = (1 << 4),
  139. MV_PCI_MODE_OFS = 0xd00,
  140. MV_PCI_MODE_MASK = 0x30,
  141. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  142. MV_PCI_DISC_TIMER = 0xd04,
  143. MV_PCI_MSI_TRIGGER = 0xc38,
  144. MV_PCI_SERR_MASK = 0xc28,
  145. MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
  146. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  147. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  148. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  149. MV_PCI_ERR_COMMAND = 0x1d50,
  150. PCI_IRQ_CAUSE_OFS = 0x1d58,
  151. PCI_IRQ_MASK_OFS = 0x1d5c,
  152. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  153. PCIE_IRQ_CAUSE_OFS = 0x1900,
  154. PCIE_IRQ_MASK_OFS = 0x1910,
  155. PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
  156. /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
  157. PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  158. PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  159. SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
  160. SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
  161. ERR_IRQ = (1 << 0), /* shift by port # */
  162. DONE_IRQ = (1 << 1), /* shift by port # */
  163. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  164. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  165. PCI_ERR = (1 << 18),
  166. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  167. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  168. PORTS_0_3_COAL_DONE = (1 << 8),
  169. PORTS_4_7_COAL_DONE = (1 << 17),
  170. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  171. GPIO_INT = (1 << 22),
  172. SELF_INT = (1 << 23),
  173. TWSI_INT = (1 << 24),
  174. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  175. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  176. HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
  177. /* SATAHC registers */
  178. HC_CFG_OFS = 0,
  179. HC_IRQ_CAUSE_OFS = 0x14,
  180. DMA_IRQ = (1 << 0), /* shift by port # */
  181. HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
  182. DEV_IRQ = (1 << 8), /* shift by port # */
  183. /* Shadow block registers */
  184. SHD_BLK_OFS = 0x100,
  185. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  186. /* SATA registers */
  187. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  188. SATA_ACTIVE_OFS = 0x350,
  189. SATA_FIS_IRQ_CAUSE_OFS = 0x364,
  190. SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
  191. LTMODE_OFS = 0x30c,
  192. LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
  193. PHY_MODE3 = 0x310,
  194. PHY_MODE4 = 0x314,
  195. PHY_MODE2 = 0x330,
  196. SATA_IFCTL_OFS = 0x344,
  197. SATA_TESTCTL_OFS = 0x348,
  198. SATA_IFSTAT_OFS = 0x34c,
  199. VENDOR_UNIQUE_FIS_OFS = 0x35c,
  200. FISCFG_OFS = 0x360,
  201. FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
  202. FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
  203. MV5_PHY_MODE = 0x74,
  204. MV5_LTMODE_OFS = 0x30,
  205. MV5_PHY_CTL_OFS = 0x0C,
  206. SATA_INTERFACE_CFG_OFS = 0x050,
  207. MV_M2_PREAMP_MASK = 0x7e0,
  208. /* Port registers */
  209. EDMA_CFG_OFS = 0,
  210. EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
  211. EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
  212. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  213. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  214. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  215. EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
  216. EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
  217. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  218. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  219. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  220. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  221. EDMA_ERR_DEV = (1 << 2), /* device error */
  222. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  223. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  224. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  225. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  226. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  227. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  228. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  229. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  230. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  231. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  232. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  233. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  234. EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
  235. EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
  236. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
  237. EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
  238. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  239. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  240. EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
  241. EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
  242. EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
  243. EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
  244. EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
  245. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  246. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  247. EDMA_ERR_OVERRUN_5 = (1 << 5),
  248. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  249. EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
  250. EDMA_ERR_LNK_CTRL_RX_1 |
  251. EDMA_ERR_LNK_CTRL_RX_3 |
  252. EDMA_ERR_LNK_CTRL_TX,
  253. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  254. EDMA_ERR_PRD_PAR |
  255. EDMA_ERR_DEV_DCON |
  256. EDMA_ERR_DEV_CON |
  257. EDMA_ERR_SERR |
  258. EDMA_ERR_SELF_DIS |
  259. EDMA_ERR_CRQB_PAR |
  260. EDMA_ERR_CRPB_PAR |
  261. EDMA_ERR_INTRL_PAR |
  262. EDMA_ERR_IORDY |
  263. EDMA_ERR_LNK_CTRL_RX_2 |
  264. EDMA_ERR_LNK_DATA_RX |
  265. EDMA_ERR_LNK_DATA_TX |
  266. EDMA_ERR_TRANS_PROTO,
  267. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  268. EDMA_ERR_PRD_PAR |
  269. EDMA_ERR_DEV_DCON |
  270. EDMA_ERR_DEV_CON |
  271. EDMA_ERR_OVERRUN_5 |
  272. EDMA_ERR_UNDERRUN_5 |
  273. EDMA_ERR_SELF_DIS_5 |
  274. EDMA_ERR_CRQB_PAR |
  275. EDMA_ERR_CRPB_PAR |
  276. EDMA_ERR_INTRL_PAR |
  277. EDMA_ERR_IORDY,
  278. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  279. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  280. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  281. EDMA_REQ_Q_PTR_SHIFT = 5,
  282. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  283. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  284. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  285. EDMA_RSP_Q_PTR_SHIFT = 3,
  286. EDMA_CMD_OFS = 0x28, /* EDMA command register */
  287. EDMA_EN = (1 << 0), /* enable EDMA */
  288. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  289. EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
  290. EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
  291. EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
  292. EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
  293. EDMA_IORDY_TMOUT_OFS = 0x34,
  294. EDMA_ARB_CFG_OFS = 0x38,
  295. EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
  296. GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
  297. /* Host private flags (hp_flags) */
  298. MV_HP_FLAG_MSI = (1 << 0),
  299. MV_HP_ERRATA_50XXB0 = (1 << 1),
  300. MV_HP_ERRATA_50XXB2 = (1 << 2),
  301. MV_HP_ERRATA_60X1B2 = (1 << 3),
  302. MV_HP_ERRATA_60X1C0 = (1 << 4),
  303. MV_HP_ERRATA_XX42A0 = (1 << 5),
  304. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  305. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  306. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  307. MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
  308. MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
  309. MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
  310. /* Port private flags (pp_flags) */
  311. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  312. MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
  313. MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
  314. MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
  315. };
  316. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  317. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  318. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  319. #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
  320. #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
  321. #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
  322. #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
  323. enum {
  324. /* DMA boundary 0xffff is required by the s/g splitting
  325. * we need on /length/ in mv_fill-sg().
  326. */
  327. MV_DMA_BOUNDARY = 0xffffU,
  328. /* mask of register bits containing lower 32 bits
  329. * of EDMA request queue DMA address
  330. */
  331. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  332. /* ditto, for response queue */
  333. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  334. };
  335. enum chip_type {
  336. chip_504x,
  337. chip_508x,
  338. chip_5080,
  339. chip_604x,
  340. chip_608x,
  341. chip_6042,
  342. chip_7042,
  343. chip_soc,
  344. };
  345. /* Command ReQuest Block: 32B */
  346. struct mv_crqb {
  347. __le32 sg_addr;
  348. __le32 sg_addr_hi;
  349. __le16 ctrl_flags;
  350. __le16 ata_cmd[11];
  351. };
  352. struct mv_crqb_iie {
  353. __le32 addr;
  354. __le32 addr_hi;
  355. __le32 flags;
  356. __le32 len;
  357. __le32 ata_cmd[4];
  358. };
  359. /* Command ResPonse Block: 8B */
  360. struct mv_crpb {
  361. __le16 id;
  362. __le16 flags;
  363. __le32 tmstmp;
  364. };
  365. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  366. struct mv_sg {
  367. __le32 addr;
  368. __le32 flags_size;
  369. __le32 addr_hi;
  370. __le32 reserved;
  371. };
  372. struct mv_port_priv {
  373. struct mv_crqb *crqb;
  374. dma_addr_t crqb_dma;
  375. struct mv_crpb *crpb;
  376. dma_addr_t crpb_dma;
  377. struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
  378. dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
  379. unsigned int req_idx;
  380. unsigned int resp_idx;
  381. u32 pp_flags;
  382. unsigned int delayed_eh_pmp_map;
  383. };
  384. struct mv_port_signal {
  385. u32 amps;
  386. u32 pre;
  387. };
  388. struct mv_host_priv {
  389. u32 hp_flags;
  390. u32 main_irq_mask;
  391. struct mv_port_signal signal[8];
  392. const struct mv_hw_ops *ops;
  393. int n_ports;
  394. void __iomem *base;
  395. void __iomem *main_irq_cause_addr;
  396. void __iomem *main_irq_mask_addr;
  397. u32 irq_cause_ofs;
  398. u32 irq_mask_ofs;
  399. u32 unmask_all_irqs;
  400. /*
  401. * These consistent DMA memory pools give us guaranteed
  402. * alignment for hardware-accessed data structures,
  403. * and less memory waste in accomplishing the alignment.
  404. */
  405. struct dma_pool *crqb_pool;
  406. struct dma_pool *crpb_pool;
  407. struct dma_pool *sg_tbl_pool;
  408. };
  409. struct mv_hw_ops {
  410. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  411. unsigned int port);
  412. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  413. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  414. void __iomem *mmio);
  415. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  416. unsigned int n_hc);
  417. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  418. void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
  419. };
  420. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  421. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  422. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  423. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  424. static int mv_port_start(struct ata_port *ap);
  425. static void mv_port_stop(struct ata_port *ap);
  426. static int mv_qc_defer(struct ata_queued_cmd *qc);
  427. static void mv_qc_prep(struct ata_queued_cmd *qc);
  428. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  429. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  430. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  431. unsigned long deadline);
  432. static void mv_eh_freeze(struct ata_port *ap);
  433. static void mv_eh_thaw(struct ata_port *ap);
  434. static void mv6_dev_config(struct ata_device *dev);
  435. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  436. unsigned int port);
  437. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  438. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  439. void __iomem *mmio);
  440. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  441. unsigned int n_hc);
  442. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  443. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
  444. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  445. unsigned int port);
  446. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  447. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  448. void __iomem *mmio);
  449. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  450. unsigned int n_hc);
  451. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  452. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  453. void __iomem *mmio);
  454. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  455. void __iomem *mmio);
  456. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  457. void __iomem *mmio, unsigned int n_hc);
  458. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  459. void __iomem *mmio);
  460. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
  461. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
  462. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  463. unsigned int port_no);
  464. static int mv_stop_edma(struct ata_port *ap);
  465. static int mv_stop_edma_engine(void __iomem *port_mmio);
  466. static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
  467. static void mv_pmp_select(struct ata_port *ap, int pmp);
  468. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  469. unsigned long deadline);
  470. static int mv_softreset(struct ata_link *link, unsigned int *class,
  471. unsigned long deadline);
  472. static void mv_pmp_error_handler(struct ata_port *ap);
  473. static void mv_process_crpb_entries(struct ata_port *ap,
  474. struct mv_port_priv *pp);
  475. /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  476. * because we have to allow room for worst case splitting of
  477. * PRDs for 64K boundaries in mv_fill_sg().
  478. */
  479. static struct scsi_host_template mv5_sht = {
  480. ATA_BASE_SHT(DRV_NAME),
  481. .sg_tablesize = MV_MAX_SG_CT / 2,
  482. .dma_boundary = MV_DMA_BOUNDARY,
  483. };
  484. static struct scsi_host_template mv6_sht = {
  485. ATA_NCQ_SHT(DRV_NAME),
  486. .can_queue = MV_MAX_Q_DEPTH - 1,
  487. .sg_tablesize = MV_MAX_SG_CT / 2,
  488. .dma_boundary = MV_DMA_BOUNDARY,
  489. };
  490. static struct ata_port_operations mv5_ops = {
  491. .inherits = &ata_sff_port_ops,
  492. .qc_defer = mv_qc_defer,
  493. .qc_prep = mv_qc_prep,
  494. .qc_issue = mv_qc_issue,
  495. .freeze = mv_eh_freeze,
  496. .thaw = mv_eh_thaw,
  497. .hardreset = mv_hardreset,
  498. .error_handler = ata_std_error_handler, /* avoid SFF EH */
  499. .post_internal_cmd = ATA_OP_NULL,
  500. .scr_read = mv5_scr_read,
  501. .scr_write = mv5_scr_write,
  502. .port_start = mv_port_start,
  503. .port_stop = mv_port_stop,
  504. };
  505. static struct ata_port_operations mv6_ops = {
  506. .inherits = &mv5_ops,
  507. .dev_config = mv6_dev_config,
  508. .scr_read = mv_scr_read,
  509. .scr_write = mv_scr_write,
  510. .pmp_hardreset = mv_pmp_hardreset,
  511. .pmp_softreset = mv_softreset,
  512. .softreset = mv_softreset,
  513. .error_handler = mv_pmp_error_handler,
  514. };
  515. static struct ata_port_operations mv_iie_ops = {
  516. .inherits = &mv6_ops,
  517. .dev_config = ATA_OP_NULL,
  518. .qc_prep = mv_qc_prep_iie,
  519. };
  520. static const struct ata_port_info mv_port_info[] = {
  521. { /* chip_504x */
  522. .flags = MV_COMMON_FLAGS,
  523. .pio_mask = 0x1f, /* pio0-4 */
  524. .udma_mask = ATA_UDMA6,
  525. .port_ops = &mv5_ops,
  526. },
  527. { /* chip_508x */
  528. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  529. .pio_mask = 0x1f, /* pio0-4 */
  530. .udma_mask = ATA_UDMA6,
  531. .port_ops = &mv5_ops,
  532. },
  533. { /* chip_5080 */
  534. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  535. .pio_mask = 0x1f, /* pio0-4 */
  536. .udma_mask = ATA_UDMA6,
  537. .port_ops = &mv5_ops,
  538. },
  539. { /* chip_604x */
  540. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  541. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  542. ATA_FLAG_NCQ,
  543. .pio_mask = 0x1f, /* pio0-4 */
  544. .udma_mask = ATA_UDMA6,
  545. .port_ops = &mv6_ops,
  546. },
  547. { /* chip_608x */
  548. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  549. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  550. ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
  551. .pio_mask = 0x1f, /* pio0-4 */
  552. .udma_mask = ATA_UDMA6,
  553. .port_ops = &mv6_ops,
  554. },
  555. { /* chip_6042 */
  556. .flags = MV_GENIIE_FLAGS,
  557. .pio_mask = 0x1f, /* pio0-4 */
  558. .udma_mask = ATA_UDMA6,
  559. .port_ops = &mv_iie_ops,
  560. },
  561. { /* chip_7042 */
  562. .flags = MV_GENIIE_FLAGS,
  563. .pio_mask = 0x1f, /* pio0-4 */
  564. .udma_mask = ATA_UDMA6,
  565. .port_ops = &mv_iie_ops,
  566. },
  567. { /* chip_soc */
  568. .flags = MV_GENIIE_FLAGS,
  569. .pio_mask = 0x1f, /* pio0-4 */
  570. .udma_mask = ATA_UDMA6,
  571. .port_ops = &mv_iie_ops,
  572. },
  573. };
  574. static const struct pci_device_id mv_pci_tbl[] = {
  575. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  576. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  577. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  578. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  579. /* RocketRAID 1740/174x have different identifiers */
  580. { PCI_VDEVICE(TTI, 0x1740), chip_508x },
  581. { PCI_VDEVICE(TTI, 0x1742), chip_508x },
  582. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  583. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  584. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  585. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  586. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  587. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  588. /* Adaptec 1430SA */
  589. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  590. /* Marvell 7042 support */
  591. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  592. /* Highpoint RocketRAID PCIe series */
  593. { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
  594. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  595. { } /* terminate list */
  596. };
  597. static const struct mv_hw_ops mv5xxx_ops = {
  598. .phy_errata = mv5_phy_errata,
  599. .enable_leds = mv5_enable_leds,
  600. .read_preamp = mv5_read_preamp,
  601. .reset_hc = mv5_reset_hc,
  602. .reset_flash = mv5_reset_flash,
  603. .reset_bus = mv5_reset_bus,
  604. };
  605. static const struct mv_hw_ops mv6xxx_ops = {
  606. .phy_errata = mv6_phy_errata,
  607. .enable_leds = mv6_enable_leds,
  608. .read_preamp = mv6_read_preamp,
  609. .reset_hc = mv6_reset_hc,
  610. .reset_flash = mv6_reset_flash,
  611. .reset_bus = mv_reset_pci_bus,
  612. };
  613. static const struct mv_hw_ops mv_soc_ops = {
  614. .phy_errata = mv6_phy_errata,
  615. .enable_leds = mv_soc_enable_leds,
  616. .read_preamp = mv_soc_read_preamp,
  617. .reset_hc = mv_soc_reset_hc,
  618. .reset_flash = mv_soc_reset_flash,
  619. .reset_bus = mv_soc_reset_bus,
  620. };
  621. /*
  622. * Functions
  623. */
  624. static inline void writelfl(unsigned long data, void __iomem *addr)
  625. {
  626. writel(data, addr);
  627. (void) readl(addr); /* flush to avoid PCI posted write */
  628. }
  629. static inline unsigned int mv_hc_from_port(unsigned int port)
  630. {
  631. return port >> MV_PORT_HC_SHIFT;
  632. }
  633. static inline unsigned int mv_hardport_from_port(unsigned int port)
  634. {
  635. return port & MV_PORT_MASK;
  636. }
  637. /*
  638. * Consolidate some rather tricky bit shift calculations.
  639. * This is hot-path stuff, so not a function.
  640. * Simple code, with two return values, so macro rather than inline.
  641. *
  642. * port is the sole input, in range 0..7.
  643. * shift is one output, for use with main_irq_cause / main_irq_mask registers.
  644. * hardport is the other output, in range 0..3.
  645. *
  646. * Note that port and hardport may be the same variable in some cases.
  647. */
  648. #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
  649. { \
  650. shift = mv_hc_from_port(port) * HC_SHIFT; \
  651. hardport = mv_hardport_from_port(port); \
  652. shift += hardport * 2; \
  653. }
  654. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  655. {
  656. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  657. }
  658. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  659. unsigned int port)
  660. {
  661. return mv_hc_base(base, mv_hc_from_port(port));
  662. }
  663. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  664. {
  665. return mv_hc_base_from_port(base, port) +
  666. MV_SATAHC_ARBTR_REG_SZ +
  667. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  668. }
  669. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  670. {
  671. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  672. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  673. return hc_mmio + ofs;
  674. }
  675. static inline void __iomem *mv_host_base(struct ata_host *host)
  676. {
  677. struct mv_host_priv *hpriv = host->private_data;
  678. return hpriv->base;
  679. }
  680. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  681. {
  682. return mv_port_base(mv_host_base(ap->host), ap->port_no);
  683. }
  684. static inline int mv_get_hc_count(unsigned long port_flags)
  685. {
  686. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  687. }
  688. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  689. struct mv_host_priv *hpriv,
  690. struct mv_port_priv *pp)
  691. {
  692. u32 index;
  693. /*
  694. * initialize request queue
  695. */
  696. pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  697. index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  698. WARN_ON(pp->crqb_dma & 0x3ff);
  699. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  700. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  701. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  702. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  703. writelfl((pp->crqb_dma & 0xffffffff) | index,
  704. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  705. else
  706. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  707. /*
  708. * initialize response queue
  709. */
  710. pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  711. index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
  712. WARN_ON(pp->crpb_dma & 0xff);
  713. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  714. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  715. writelfl((pp->crpb_dma & 0xffffffff) | index,
  716. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  717. else
  718. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  719. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  720. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  721. }
  722. static void mv_set_main_irq_mask(struct ata_host *host,
  723. u32 disable_bits, u32 enable_bits)
  724. {
  725. struct mv_host_priv *hpriv = host->private_data;
  726. u32 old_mask, new_mask;
  727. old_mask = hpriv->main_irq_mask;
  728. new_mask = (old_mask & ~disable_bits) | enable_bits;
  729. if (new_mask != old_mask) {
  730. hpriv->main_irq_mask = new_mask;
  731. writelfl(new_mask, hpriv->main_irq_mask_addr);
  732. }
  733. }
  734. static void mv_enable_port_irqs(struct ata_port *ap,
  735. unsigned int port_bits)
  736. {
  737. unsigned int shift, hardport, port = ap->port_no;
  738. u32 disable_bits, enable_bits;
  739. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  740. disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
  741. enable_bits = port_bits << shift;
  742. mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
  743. }
  744. /**
  745. * mv_start_dma - Enable eDMA engine
  746. * @base: port base address
  747. * @pp: port private data
  748. *
  749. * Verify the local cache of the eDMA state is accurate with a
  750. * WARN_ON.
  751. *
  752. * LOCKING:
  753. * Inherited from caller.
  754. */
  755. static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
  756. struct mv_port_priv *pp, u8 protocol)
  757. {
  758. int want_ncq = (protocol == ATA_PROT_NCQ);
  759. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  760. int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
  761. if (want_ncq != using_ncq)
  762. mv_stop_edma(ap);
  763. }
  764. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  765. struct mv_host_priv *hpriv = ap->host->private_data;
  766. int hardport = mv_hardport_from_port(ap->port_no);
  767. void __iomem *hc_mmio = mv_hc_base_from_port(
  768. mv_host_base(ap->host), hardport);
  769. u32 hc_irq_cause, ipending;
  770. /* clear EDMA event indicators, if any */
  771. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  772. /* clear EDMA interrupt indicator, if any */
  773. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  774. ipending = (DEV_IRQ | DMA_IRQ) << hardport;
  775. if (hc_irq_cause & ipending) {
  776. writelfl(hc_irq_cause & ~ipending,
  777. hc_mmio + HC_IRQ_CAUSE_OFS);
  778. }
  779. mv_edma_cfg(ap, want_ncq);
  780. /* clear FIS IRQ Cause */
  781. if (IS_GEN_IIE(hpriv))
  782. writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  783. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  784. mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
  785. writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
  786. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  787. }
  788. }
  789. static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
  790. {
  791. void __iomem *port_mmio = mv_ap_base(ap);
  792. const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
  793. const int per_loop = 5, timeout = (15 * 1000 / per_loop);
  794. int i;
  795. /*
  796. * Wait for the EDMA engine to finish transactions in progress.
  797. * No idea what a good "timeout" value might be, but measurements
  798. * indicate that it often requires hundreds of microseconds
  799. * with two drives in-use. So we use the 15msec value above
  800. * as a rough guess at what even more drives might require.
  801. */
  802. for (i = 0; i < timeout; ++i) {
  803. u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
  804. if ((edma_stat & empty_idle) == empty_idle)
  805. break;
  806. udelay(per_loop);
  807. }
  808. /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
  809. }
  810. /**
  811. * mv_stop_edma_engine - Disable eDMA engine
  812. * @port_mmio: io base address
  813. *
  814. * LOCKING:
  815. * Inherited from caller.
  816. */
  817. static int mv_stop_edma_engine(void __iomem *port_mmio)
  818. {
  819. int i;
  820. /* Disable eDMA. The disable bit auto clears. */
  821. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  822. /* Wait for the chip to confirm eDMA is off. */
  823. for (i = 10000; i > 0; i--) {
  824. u32 reg = readl(port_mmio + EDMA_CMD_OFS);
  825. if (!(reg & EDMA_EN))
  826. return 0;
  827. udelay(10);
  828. }
  829. return -EIO;
  830. }
  831. static int mv_stop_edma(struct ata_port *ap)
  832. {
  833. void __iomem *port_mmio = mv_ap_base(ap);
  834. struct mv_port_priv *pp = ap->private_data;
  835. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  836. return 0;
  837. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  838. mv_wait_for_edma_empty_idle(ap);
  839. if (mv_stop_edma_engine(port_mmio)) {
  840. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  841. return -EIO;
  842. }
  843. return 0;
  844. }
  845. #ifdef ATA_DEBUG
  846. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  847. {
  848. int b, w;
  849. for (b = 0; b < bytes; ) {
  850. DPRINTK("%p: ", start + b);
  851. for (w = 0; b < bytes && w < 4; w++) {
  852. printk("%08x ", readl(start + b));
  853. b += sizeof(u32);
  854. }
  855. printk("\n");
  856. }
  857. }
  858. #endif
  859. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  860. {
  861. #ifdef ATA_DEBUG
  862. int b, w;
  863. u32 dw;
  864. for (b = 0; b < bytes; ) {
  865. DPRINTK("%02x: ", b);
  866. for (w = 0; b < bytes && w < 4; w++) {
  867. (void) pci_read_config_dword(pdev, b, &dw);
  868. printk("%08x ", dw);
  869. b += sizeof(u32);
  870. }
  871. printk("\n");
  872. }
  873. #endif
  874. }
  875. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  876. struct pci_dev *pdev)
  877. {
  878. #ifdef ATA_DEBUG
  879. void __iomem *hc_base = mv_hc_base(mmio_base,
  880. port >> MV_PORT_HC_SHIFT);
  881. void __iomem *port_base;
  882. int start_port, num_ports, p, start_hc, num_hcs, hc;
  883. if (0 > port) {
  884. start_hc = start_port = 0;
  885. num_ports = 8; /* shld be benign for 4 port devs */
  886. num_hcs = 2;
  887. } else {
  888. start_hc = port >> MV_PORT_HC_SHIFT;
  889. start_port = port;
  890. num_ports = num_hcs = 1;
  891. }
  892. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  893. num_ports > 1 ? num_ports - 1 : start_port);
  894. if (NULL != pdev) {
  895. DPRINTK("PCI config space regs:\n");
  896. mv_dump_pci_cfg(pdev, 0x68);
  897. }
  898. DPRINTK("PCI regs:\n");
  899. mv_dump_mem(mmio_base+0xc00, 0x3c);
  900. mv_dump_mem(mmio_base+0xd00, 0x34);
  901. mv_dump_mem(mmio_base+0xf00, 0x4);
  902. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  903. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  904. hc_base = mv_hc_base(mmio_base, hc);
  905. DPRINTK("HC regs (HC %i):\n", hc);
  906. mv_dump_mem(hc_base, 0x1c);
  907. }
  908. for (p = start_port; p < start_port + num_ports; p++) {
  909. port_base = mv_port_base(mmio_base, p);
  910. DPRINTK("EDMA regs (port %i):\n", p);
  911. mv_dump_mem(port_base, 0x54);
  912. DPRINTK("SATA regs (port %i):\n", p);
  913. mv_dump_mem(port_base+0x300, 0x60);
  914. }
  915. #endif
  916. }
  917. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  918. {
  919. unsigned int ofs;
  920. switch (sc_reg_in) {
  921. case SCR_STATUS:
  922. case SCR_CONTROL:
  923. case SCR_ERROR:
  924. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  925. break;
  926. case SCR_ACTIVE:
  927. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  928. break;
  929. default:
  930. ofs = 0xffffffffU;
  931. break;
  932. }
  933. return ofs;
  934. }
  935. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  936. {
  937. unsigned int ofs = mv_scr_offset(sc_reg_in);
  938. if (ofs != 0xffffffffU) {
  939. *val = readl(mv_ap_base(ap) + ofs);
  940. return 0;
  941. } else
  942. return -EINVAL;
  943. }
  944. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  945. {
  946. unsigned int ofs = mv_scr_offset(sc_reg_in);
  947. if (ofs != 0xffffffffU) {
  948. writelfl(val, mv_ap_base(ap) + ofs);
  949. return 0;
  950. } else
  951. return -EINVAL;
  952. }
  953. static void mv6_dev_config(struct ata_device *adev)
  954. {
  955. /*
  956. * Deal with Gen-II ("mv6") hardware quirks/restrictions:
  957. *
  958. * Gen-II does not support NCQ over a port multiplier
  959. * (no FIS-based switching).
  960. *
  961. * We don't have hob_nsect when doing NCQ commands on Gen-II.
  962. * See mv_qc_prep() for more info.
  963. */
  964. if (adev->flags & ATA_DFLAG_NCQ) {
  965. if (sata_pmp_attached(adev->link->ap)) {
  966. adev->flags &= ~ATA_DFLAG_NCQ;
  967. ata_dev_printk(adev, KERN_INFO,
  968. "NCQ disabled for command-based switching\n");
  969. } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
  970. adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
  971. ata_dev_printk(adev, KERN_INFO,
  972. "max_sectors limited to %u for NCQ\n",
  973. adev->max_sectors);
  974. }
  975. }
  976. }
  977. static int mv_qc_defer(struct ata_queued_cmd *qc)
  978. {
  979. struct ata_link *link = qc->dev->link;
  980. struct ata_port *ap = link->ap;
  981. struct mv_port_priv *pp = ap->private_data;
  982. /*
  983. * Don't allow new commands if we're in a delayed EH state
  984. * for NCQ and/or FIS-based switching.
  985. */
  986. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
  987. return ATA_DEFER_PORT;
  988. /*
  989. * If the port is completely idle, then allow the new qc.
  990. */
  991. if (ap->nr_active_links == 0)
  992. return 0;
  993. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  994. /*
  995. * The port is operating in host queuing mode (EDMA).
  996. * It can accomodate a new qc if the qc protocol
  997. * is compatible with the current host queue mode.
  998. */
  999. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
  1000. /*
  1001. * The host queue (EDMA) is in NCQ mode.
  1002. * If the new qc is also an NCQ command,
  1003. * then allow the new qc.
  1004. */
  1005. if (qc->tf.protocol == ATA_PROT_NCQ)
  1006. return 0;
  1007. } else {
  1008. /*
  1009. * The host queue (EDMA) is in non-NCQ, DMA mode.
  1010. * If the new qc is also a non-NCQ, DMA command,
  1011. * then allow the new qc.
  1012. */
  1013. if (qc->tf.protocol == ATA_PROT_DMA)
  1014. return 0;
  1015. }
  1016. }
  1017. return ATA_DEFER_PORT;
  1018. }
  1019. static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
  1020. {
  1021. u32 new_fiscfg, old_fiscfg;
  1022. u32 new_ltmode, old_ltmode;
  1023. u32 new_haltcond, old_haltcond;
  1024. old_fiscfg = readl(port_mmio + FISCFG_OFS);
  1025. old_ltmode = readl(port_mmio + LTMODE_OFS);
  1026. old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
  1027. new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
  1028. new_ltmode = old_ltmode & ~LTMODE_BIT8;
  1029. new_haltcond = old_haltcond | EDMA_ERR_DEV;
  1030. if (want_fbs) {
  1031. new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
  1032. new_ltmode = old_ltmode | LTMODE_BIT8;
  1033. if (want_ncq)
  1034. new_haltcond &= ~EDMA_ERR_DEV;
  1035. else
  1036. new_fiscfg |= FISCFG_WAIT_DEV_ERR;
  1037. }
  1038. if (new_fiscfg != old_fiscfg)
  1039. writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
  1040. if (new_ltmode != old_ltmode)
  1041. writelfl(new_ltmode, port_mmio + LTMODE_OFS);
  1042. if (new_haltcond != old_haltcond)
  1043. writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
  1044. }
  1045. static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
  1046. {
  1047. struct mv_host_priv *hpriv = ap->host->private_data;
  1048. u32 old, new;
  1049. /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
  1050. old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
  1051. if (want_ncq)
  1052. new = old | (1 << 22);
  1053. else
  1054. new = old & ~(1 << 22);
  1055. if (new != old)
  1056. writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
  1057. }
  1058. static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
  1059. {
  1060. u32 cfg;
  1061. struct mv_port_priv *pp = ap->private_data;
  1062. struct mv_host_priv *hpriv = ap->host->private_data;
  1063. void __iomem *port_mmio = mv_ap_base(ap);
  1064. /* set up non-NCQ EDMA configuration */
  1065. cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
  1066. pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
  1067. if (IS_GEN_I(hpriv))
  1068. cfg |= (1 << 8); /* enab config burst size mask */
  1069. else if (IS_GEN_II(hpriv)) {
  1070. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  1071. mv_60x1_errata_sata25(ap, want_ncq);
  1072. } else if (IS_GEN_IIE(hpriv)) {
  1073. int want_fbs = sata_pmp_attached(ap);
  1074. /*
  1075. * Possible future enhancement:
  1076. *
  1077. * The chip can use FBS with non-NCQ, if we allow it,
  1078. * But first we need to have the error handling in place
  1079. * for this mode (datasheet section 7.3.15.4.2.3).
  1080. * So disallow non-NCQ FBS for now.
  1081. */
  1082. want_fbs &= want_ncq;
  1083. mv_config_fbs(port_mmio, want_ncq, want_fbs);
  1084. if (want_fbs) {
  1085. pp->pp_flags |= MV_PP_FLAG_FBS_EN;
  1086. cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
  1087. }
  1088. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  1089. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  1090. if (!IS_SOC(hpriv))
  1091. cfg |= (1 << 18); /* enab early completion */
  1092. if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
  1093. cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
  1094. }
  1095. if (want_ncq) {
  1096. cfg |= EDMA_CFG_NCQ;
  1097. pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
  1098. } else
  1099. pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
  1100. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  1101. }
  1102. static void mv_port_free_dma_mem(struct ata_port *ap)
  1103. {
  1104. struct mv_host_priv *hpriv = ap->host->private_data;
  1105. struct mv_port_priv *pp = ap->private_data;
  1106. int tag;
  1107. if (pp->crqb) {
  1108. dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
  1109. pp->crqb = NULL;
  1110. }
  1111. if (pp->crpb) {
  1112. dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
  1113. pp->crpb = NULL;
  1114. }
  1115. /*
  1116. * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
  1117. * For later hardware, we have one unique sg_tbl per NCQ tag.
  1118. */
  1119. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1120. if (pp->sg_tbl[tag]) {
  1121. if (tag == 0 || !IS_GEN_I(hpriv))
  1122. dma_pool_free(hpriv->sg_tbl_pool,
  1123. pp->sg_tbl[tag],
  1124. pp->sg_tbl_dma[tag]);
  1125. pp->sg_tbl[tag] = NULL;
  1126. }
  1127. }
  1128. }
  1129. /**
  1130. * mv_port_start - Port specific init/start routine.
  1131. * @ap: ATA channel to manipulate
  1132. *
  1133. * Allocate and point to DMA memory, init port private memory,
  1134. * zero indices.
  1135. *
  1136. * LOCKING:
  1137. * Inherited from caller.
  1138. */
  1139. static int mv_port_start(struct ata_port *ap)
  1140. {
  1141. struct device *dev = ap->host->dev;
  1142. struct mv_host_priv *hpriv = ap->host->private_data;
  1143. struct mv_port_priv *pp;
  1144. int tag;
  1145. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1146. if (!pp)
  1147. return -ENOMEM;
  1148. ap->private_data = pp;
  1149. pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
  1150. if (!pp->crqb)
  1151. return -ENOMEM;
  1152. memset(pp->crqb, 0, MV_CRQB_Q_SZ);
  1153. pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
  1154. if (!pp->crpb)
  1155. goto out_port_free_dma_mem;
  1156. memset(pp->crpb, 0, MV_CRPB_Q_SZ);
  1157. /*
  1158. * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
  1159. * For later hardware, we need one unique sg_tbl per NCQ tag.
  1160. */
  1161. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1162. if (tag == 0 || !IS_GEN_I(hpriv)) {
  1163. pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
  1164. GFP_KERNEL, &pp->sg_tbl_dma[tag]);
  1165. if (!pp->sg_tbl[tag])
  1166. goto out_port_free_dma_mem;
  1167. } else {
  1168. pp->sg_tbl[tag] = pp->sg_tbl[0];
  1169. pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
  1170. }
  1171. }
  1172. return 0;
  1173. out_port_free_dma_mem:
  1174. mv_port_free_dma_mem(ap);
  1175. return -ENOMEM;
  1176. }
  1177. /**
  1178. * mv_port_stop - Port specific cleanup/stop routine.
  1179. * @ap: ATA channel to manipulate
  1180. *
  1181. * Stop DMA, cleanup port memory.
  1182. *
  1183. * LOCKING:
  1184. * This routine uses the host lock to protect the DMA stop.
  1185. */
  1186. static void mv_port_stop(struct ata_port *ap)
  1187. {
  1188. mv_stop_edma(ap);
  1189. mv_enable_port_irqs(ap, 0);
  1190. mv_port_free_dma_mem(ap);
  1191. }
  1192. /**
  1193. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  1194. * @qc: queued command whose SG list to source from
  1195. *
  1196. * Populate the SG list and mark the last entry.
  1197. *
  1198. * LOCKING:
  1199. * Inherited from caller.
  1200. */
  1201. static void mv_fill_sg(struct ata_queued_cmd *qc)
  1202. {
  1203. struct mv_port_priv *pp = qc->ap->private_data;
  1204. struct scatterlist *sg;
  1205. struct mv_sg *mv_sg, *last_sg = NULL;
  1206. unsigned int si;
  1207. mv_sg = pp->sg_tbl[qc->tag];
  1208. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1209. dma_addr_t addr = sg_dma_address(sg);
  1210. u32 sg_len = sg_dma_len(sg);
  1211. while (sg_len) {
  1212. u32 offset = addr & 0xffff;
  1213. u32 len = sg_len;
  1214. if ((offset + sg_len > 0x10000))
  1215. len = 0x10000 - offset;
  1216. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  1217. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1218. mv_sg->flags_size = cpu_to_le32(len & 0xffff);
  1219. sg_len -= len;
  1220. addr += len;
  1221. last_sg = mv_sg;
  1222. mv_sg++;
  1223. }
  1224. }
  1225. if (likely(last_sg))
  1226. last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  1227. }
  1228. static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  1229. {
  1230. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  1231. (last ? CRQB_CMD_LAST : 0);
  1232. *cmdw = cpu_to_le16(tmp);
  1233. }
  1234. /**
  1235. * mv_qc_prep - Host specific command preparation.
  1236. * @qc: queued command to prepare
  1237. *
  1238. * This routine simply redirects to the general purpose routine
  1239. * if command is not DMA. Else, it handles prep of the CRQB
  1240. * (command request block), does some sanity checking, and calls
  1241. * the SG load routine.
  1242. *
  1243. * LOCKING:
  1244. * Inherited from caller.
  1245. */
  1246. static void mv_qc_prep(struct ata_queued_cmd *qc)
  1247. {
  1248. struct ata_port *ap = qc->ap;
  1249. struct mv_port_priv *pp = ap->private_data;
  1250. __le16 *cw;
  1251. struct ata_taskfile *tf;
  1252. u16 flags = 0;
  1253. unsigned in_index;
  1254. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1255. (qc->tf.protocol != ATA_PROT_NCQ))
  1256. return;
  1257. /* Fill in command request block
  1258. */
  1259. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1260. flags |= CRQB_FLAG_READ;
  1261. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1262. flags |= qc->tag << CRQB_TAG_SHIFT;
  1263. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1264. /* get current queue index from software */
  1265. in_index = pp->req_idx;
  1266. pp->crqb[in_index].sg_addr =
  1267. cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1268. pp->crqb[in_index].sg_addr_hi =
  1269. cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1270. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  1271. cw = &pp->crqb[in_index].ata_cmd[0];
  1272. tf = &qc->tf;
  1273. /* Sadly, the CRQB cannot accomodate all registers--there are
  1274. * only 11 bytes...so we must pick and choose required
  1275. * registers based on the command. So, we drop feature and
  1276. * hob_feature for [RW] DMA commands, but they are needed for
  1277. * NCQ. NCQ will drop hob_nsect.
  1278. */
  1279. switch (tf->command) {
  1280. case ATA_CMD_READ:
  1281. case ATA_CMD_READ_EXT:
  1282. case ATA_CMD_WRITE:
  1283. case ATA_CMD_WRITE_EXT:
  1284. case ATA_CMD_WRITE_FUA_EXT:
  1285. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  1286. break;
  1287. case ATA_CMD_FPDMA_READ:
  1288. case ATA_CMD_FPDMA_WRITE:
  1289. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  1290. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  1291. break;
  1292. default:
  1293. /* The only other commands EDMA supports in non-queued and
  1294. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  1295. * of which are defined/used by Linux. If we get here, this
  1296. * driver needs work.
  1297. *
  1298. * FIXME: modify libata to give qc_prep a return value and
  1299. * return error here.
  1300. */
  1301. BUG_ON(tf->command);
  1302. break;
  1303. }
  1304. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  1305. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  1306. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  1307. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  1308. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  1309. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  1310. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  1311. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  1312. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  1313. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1314. return;
  1315. mv_fill_sg(qc);
  1316. }
  1317. /**
  1318. * mv_qc_prep_iie - Host specific command preparation.
  1319. * @qc: queued command to prepare
  1320. *
  1321. * This routine simply redirects to the general purpose routine
  1322. * if command is not DMA. Else, it handles prep of the CRQB
  1323. * (command request block), does some sanity checking, and calls
  1324. * the SG load routine.
  1325. *
  1326. * LOCKING:
  1327. * Inherited from caller.
  1328. */
  1329. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1330. {
  1331. struct ata_port *ap = qc->ap;
  1332. struct mv_port_priv *pp = ap->private_data;
  1333. struct mv_crqb_iie *crqb;
  1334. struct ata_taskfile *tf;
  1335. unsigned in_index;
  1336. u32 flags = 0;
  1337. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1338. (qc->tf.protocol != ATA_PROT_NCQ))
  1339. return;
  1340. /* Fill in Gen IIE command request block */
  1341. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1342. flags |= CRQB_FLAG_READ;
  1343. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1344. flags |= qc->tag << CRQB_TAG_SHIFT;
  1345. flags |= qc->tag << CRQB_HOSTQ_SHIFT;
  1346. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1347. /* get current queue index from software */
  1348. in_index = pp->req_idx;
  1349. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1350. crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1351. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1352. crqb->flags = cpu_to_le32(flags);
  1353. tf = &qc->tf;
  1354. crqb->ata_cmd[0] = cpu_to_le32(
  1355. (tf->command << 16) |
  1356. (tf->feature << 24)
  1357. );
  1358. crqb->ata_cmd[1] = cpu_to_le32(
  1359. (tf->lbal << 0) |
  1360. (tf->lbam << 8) |
  1361. (tf->lbah << 16) |
  1362. (tf->device << 24)
  1363. );
  1364. crqb->ata_cmd[2] = cpu_to_le32(
  1365. (tf->hob_lbal << 0) |
  1366. (tf->hob_lbam << 8) |
  1367. (tf->hob_lbah << 16) |
  1368. (tf->hob_feature << 24)
  1369. );
  1370. crqb->ata_cmd[3] = cpu_to_le32(
  1371. (tf->nsect << 0) |
  1372. (tf->hob_nsect << 8)
  1373. );
  1374. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1375. return;
  1376. mv_fill_sg(qc);
  1377. }
  1378. /**
  1379. * mv_qc_issue - Initiate a command to the host
  1380. * @qc: queued command to start
  1381. *
  1382. * This routine simply redirects to the general purpose routine
  1383. * if command is not DMA. Else, it sanity checks our local
  1384. * caches of the request producer/consumer indices then enables
  1385. * DMA and bumps the request producer index.
  1386. *
  1387. * LOCKING:
  1388. * Inherited from caller.
  1389. */
  1390. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1391. {
  1392. struct ata_port *ap = qc->ap;
  1393. void __iomem *port_mmio = mv_ap_base(ap);
  1394. struct mv_port_priv *pp = ap->private_data;
  1395. u32 in_index;
  1396. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1397. (qc->tf.protocol != ATA_PROT_NCQ)) {
  1398. /*
  1399. * We're about to send a non-EDMA capable command to the
  1400. * port. Turn off EDMA so there won't be problems accessing
  1401. * shadow block, etc registers.
  1402. */
  1403. mv_stop_edma(ap);
  1404. mv_enable_port_irqs(ap, ERR_IRQ);
  1405. mv_pmp_select(ap, qc->dev->link->pmp);
  1406. return ata_sff_qc_issue(qc);
  1407. }
  1408. mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
  1409. pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1410. in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  1411. /* and write the request in pointer to kick the EDMA to life */
  1412. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
  1413. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1414. return 0;
  1415. }
  1416. static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
  1417. {
  1418. struct mv_port_priv *pp = ap->private_data;
  1419. struct ata_queued_cmd *qc;
  1420. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1421. return NULL;
  1422. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1423. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1424. qc = NULL;
  1425. return qc;
  1426. }
  1427. static void mv_pmp_error_handler(struct ata_port *ap)
  1428. {
  1429. unsigned int pmp, pmp_map;
  1430. struct mv_port_priv *pp = ap->private_data;
  1431. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
  1432. /*
  1433. * Perform NCQ error analysis on failed PMPs
  1434. * before we freeze the port entirely.
  1435. *
  1436. * The failed PMPs are marked earlier by mv_pmp_eh_prep().
  1437. */
  1438. pmp_map = pp->delayed_eh_pmp_map;
  1439. pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
  1440. for (pmp = 0; pmp_map != 0; pmp++) {
  1441. unsigned int this_pmp = (1 << pmp);
  1442. if (pmp_map & this_pmp) {
  1443. struct ata_link *link = &ap->pmp_link[pmp];
  1444. pmp_map &= ~this_pmp;
  1445. ata_eh_analyze_ncq_error(link);
  1446. }
  1447. }
  1448. ata_port_freeze(ap);
  1449. }
  1450. sata_pmp_error_handler(ap);
  1451. }
  1452. static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
  1453. {
  1454. void __iomem *port_mmio = mv_ap_base(ap);
  1455. return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
  1456. }
  1457. static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
  1458. {
  1459. struct ata_eh_info *ehi;
  1460. unsigned int pmp;
  1461. /*
  1462. * Initialize EH info for PMPs which saw device errors
  1463. */
  1464. ehi = &ap->link.eh_info;
  1465. for (pmp = 0; pmp_map != 0; pmp++) {
  1466. unsigned int this_pmp = (1 << pmp);
  1467. if (pmp_map & this_pmp) {
  1468. struct ata_link *link = &ap->pmp_link[pmp];
  1469. pmp_map &= ~this_pmp;
  1470. ehi = &link->eh_info;
  1471. ata_ehi_clear_desc(ehi);
  1472. ata_ehi_push_desc(ehi, "dev err");
  1473. ehi->err_mask |= AC_ERR_DEV;
  1474. ehi->action |= ATA_EH_RESET;
  1475. ata_link_abort(link);
  1476. }
  1477. }
  1478. }
  1479. static int mv_req_q_empty(struct ata_port *ap)
  1480. {
  1481. void __iomem *port_mmio = mv_ap_base(ap);
  1482. u32 in_ptr, out_ptr;
  1483. in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
  1484. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1485. out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
  1486. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1487. return (in_ptr == out_ptr); /* 1 == queue_is_empty */
  1488. }
  1489. static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
  1490. {
  1491. struct mv_port_priv *pp = ap->private_data;
  1492. int failed_links;
  1493. unsigned int old_map, new_map;
  1494. /*
  1495. * Device error during FBS+NCQ operation:
  1496. *
  1497. * Set a port flag to prevent further I/O being enqueued.
  1498. * Leave the EDMA running to drain outstanding commands from this port.
  1499. * Perform the post-mortem/EH only when all responses are complete.
  1500. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
  1501. */
  1502. if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
  1503. pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
  1504. pp->delayed_eh_pmp_map = 0;
  1505. }
  1506. old_map = pp->delayed_eh_pmp_map;
  1507. new_map = old_map | mv_get_err_pmp_map(ap);
  1508. if (old_map != new_map) {
  1509. pp->delayed_eh_pmp_map = new_map;
  1510. mv_pmp_eh_prep(ap, new_map & ~old_map);
  1511. }
  1512. failed_links = hweight16(new_map);
  1513. ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
  1514. "failed_links=%d nr_active_links=%d\n",
  1515. __func__, pp->delayed_eh_pmp_map,
  1516. ap->qc_active, failed_links,
  1517. ap->nr_active_links);
  1518. if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
  1519. mv_process_crpb_entries(ap, pp);
  1520. mv_stop_edma(ap);
  1521. mv_eh_freeze(ap);
  1522. ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
  1523. return 1; /* handled */
  1524. }
  1525. ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
  1526. return 1; /* handled */
  1527. }
  1528. static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
  1529. {
  1530. /*
  1531. * Possible future enhancement:
  1532. *
  1533. * FBS+non-NCQ operation is not yet implemented.
  1534. * See related notes in mv_edma_cfg().
  1535. *
  1536. * Device error during FBS+non-NCQ operation:
  1537. *
  1538. * We need to snapshot the shadow registers for each failed command.
  1539. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
  1540. */
  1541. return 0; /* not handled */
  1542. }
  1543. static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
  1544. {
  1545. struct mv_port_priv *pp = ap->private_data;
  1546. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  1547. return 0; /* EDMA was not active: not handled */
  1548. if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
  1549. return 0; /* FBS was not active: not handled */
  1550. if (!(edma_err_cause & EDMA_ERR_DEV))
  1551. return 0; /* non DEV error: not handled */
  1552. edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
  1553. if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
  1554. return 0; /* other problems: not handled */
  1555. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
  1556. /*
  1557. * EDMA should NOT have self-disabled for this case.
  1558. * If it did, then something is wrong elsewhere,
  1559. * and we cannot handle it here.
  1560. */
  1561. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  1562. ata_port_printk(ap, KERN_WARNING,
  1563. "%s: err_cause=0x%x pp_flags=0x%x\n",
  1564. __func__, edma_err_cause, pp->pp_flags);
  1565. return 0; /* not handled */
  1566. }
  1567. return mv_handle_fbs_ncq_dev_err(ap);
  1568. } else {
  1569. /*
  1570. * EDMA should have self-disabled for this case.
  1571. * If it did not, then something is wrong elsewhere,
  1572. * and we cannot handle it here.
  1573. */
  1574. if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
  1575. ata_port_printk(ap, KERN_WARNING,
  1576. "%s: err_cause=0x%x pp_flags=0x%x\n",
  1577. __func__, edma_err_cause, pp->pp_flags);
  1578. return 0; /* not handled */
  1579. }
  1580. return mv_handle_fbs_non_ncq_dev_err(ap);
  1581. }
  1582. return 0; /* not handled */
  1583. }
  1584. static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
  1585. {
  1586. struct ata_eh_info *ehi = &ap->link.eh_info;
  1587. char *when = "idle";
  1588. ata_ehi_clear_desc(ehi);
  1589. if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
  1590. when = "disabled";
  1591. } else if (edma_was_enabled) {
  1592. when = "EDMA enabled";
  1593. } else {
  1594. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1595. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1596. when = "polling";
  1597. }
  1598. ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
  1599. ehi->err_mask |= AC_ERR_OTHER;
  1600. ehi->action |= ATA_EH_RESET;
  1601. ata_port_freeze(ap);
  1602. }
  1603. /**
  1604. * mv_err_intr - Handle error interrupts on the port
  1605. * @ap: ATA channel to manipulate
  1606. * @qc: affected command (non-NCQ), or NULL
  1607. *
  1608. * Most cases require a full reset of the chip's state machine,
  1609. * which also performs a COMRESET.
  1610. * Also, if the port disabled DMA, update our cached copy to match.
  1611. *
  1612. * LOCKING:
  1613. * Inherited from caller.
  1614. */
  1615. static void mv_err_intr(struct ata_port *ap)
  1616. {
  1617. void __iomem *port_mmio = mv_ap_base(ap);
  1618. u32 edma_err_cause, eh_freeze_mask, serr = 0;
  1619. u32 fis_cause = 0;
  1620. struct mv_port_priv *pp = ap->private_data;
  1621. struct mv_host_priv *hpriv = ap->host->private_data;
  1622. unsigned int action = 0, err_mask = 0;
  1623. struct ata_eh_info *ehi = &ap->link.eh_info;
  1624. struct ata_queued_cmd *qc;
  1625. int abort = 0;
  1626. /*
  1627. * Read and clear the SError and err_cause bits.
  1628. * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
  1629. * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
  1630. */
  1631. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  1632. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  1633. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1634. if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
  1635. fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  1636. writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  1637. }
  1638. writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1639. if (edma_err_cause & EDMA_ERR_DEV) {
  1640. /*
  1641. * Device errors during FIS-based switching operation
  1642. * require special handling.
  1643. */
  1644. if (mv_handle_dev_err(ap, edma_err_cause))
  1645. return;
  1646. }
  1647. qc = mv_get_active_qc(ap);
  1648. ata_ehi_clear_desc(ehi);
  1649. ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
  1650. edma_err_cause, pp->pp_flags);
  1651. if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
  1652. ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
  1653. if (fis_cause & SATA_FIS_IRQ_AN) {
  1654. u32 ec = edma_err_cause &
  1655. ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
  1656. sata_async_notification(ap);
  1657. if (!ec)
  1658. return; /* Just an AN; no need for the nukes */
  1659. ata_ehi_push_desc(ehi, "SDB notify");
  1660. }
  1661. }
  1662. /*
  1663. * All generations share these EDMA error cause bits:
  1664. */
  1665. if (edma_err_cause & EDMA_ERR_DEV) {
  1666. err_mask |= AC_ERR_DEV;
  1667. action |= ATA_EH_RESET;
  1668. ata_ehi_push_desc(ehi, "dev error");
  1669. }
  1670. if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  1671. EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
  1672. EDMA_ERR_INTRL_PAR)) {
  1673. err_mask |= AC_ERR_ATA_BUS;
  1674. action |= ATA_EH_RESET;
  1675. ata_ehi_push_desc(ehi, "parity error");
  1676. }
  1677. if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
  1678. ata_ehi_hotplugged(ehi);
  1679. ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
  1680. "dev disconnect" : "dev connect");
  1681. action |= ATA_EH_RESET;
  1682. }
  1683. /*
  1684. * Gen-I has a different SELF_DIS bit,
  1685. * different FREEZE bits, and no SERR bit:
  1686. */
  1687. if (IS_GEN_I(hpriv)) {
  1688. eh_freeze_mask = EDMA_EH_FREEZE_5;
  1689. if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
  1690. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1691. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1692. }
  1693. } else {
  1694. eh_freeze_mask = EDMA_EH_FREEZE;
  1695. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  1696. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1697. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1698. }
  1699. if (edma_err_cause & EDMA_ERR_SERR) {
  1700. ata_ehi_push_desc(ehi, "SError=%08x", serr);
  1701. err_mask |= AC_ERR_ATA_BUS;
  1702. action |= ATA_EH_RESET;
  1703. }
  1704. }
  1705. if (!err_mask) {
  1706. err_mask = AC_ERR_OTHER;
  1707. action |= ATA_EH_RESET;
  1708. }
  1709. ehi->serror |= serr;
  1710. ehi->action |= action;
  1711. if (qc)
  1712. qc->err_mask |= err_mask;
  1713. else
  1714. ehi->err_mask |= err_mask;
  1715. if (err_mask == AC_ERR_DEV) {
  1716. /*
  1717. * Cannot do ata_port_freeze() here,
  1718. * because it would kill PIO access,
  1719. * which is needed for further diagnosis.
  1720. */
  1721. mv_eh_freeze(ap);
  1722. abort = 1;
  1723. } else if (edma_err_cause & eh_freeze_mask) {
  1724. /*
  1725. * Note to self: ata_port_freeze() calls ata_port_abort()
  1726. */
  1727. ata_port_freeze(ap);
  1728. } else {
  1729. abort = 1;
  1730. }
  1731. if (abort) {
  1732. if (qc)
  1733. ata_link_abort(qc->dev->link);
  1734. else
  1735. ata_port_abort(ap);
  1736. }
  1737. }
  1738. static void mv_process_crpb_response(struct ata_port *ap,
  1739. struct mv_crpb *response, unsigned int tag, int ncq_enabled)
  1740. {
  1741. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
  1742. if (qc) {
  1743. u8 ata_status;
  1744. u16 edma_status = le16_to_cpu(response->flags);
  1745. /*
  1746. * edma_status from a response queue entry:
  1747. * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
  1748. * MSB is saved ATA status from command completion.
  1749. */
  1750. if (!ncq_enabled) {
  1751. u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
  1752. if (err_cause) {
  1753. /*
  1754. * Error will be seen/handled by mv_err_intr().
  1755. * So do nothing at all here.
  1756. */
  1757. return;
  1758. }
  1759. }
  1760. ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
  1761. if (!ac_err_mask(ata_status))
  1762. ata_qc_complete(qc);
  1763. /* else: leave it for mv_err_intr() */
  1764. } else {
  1765. ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
  1766. __func__, tag);
  1767. }
  1768. }
  1769. static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
  1770. {
  1771. void __iomem *port_mmio = mv_ap_base(ap);
  1772. struct mv_host_priv *hpriv = ap->host->private_data;
  1773. u32 in_index;
  1774. bool work_done = false;
  1775. int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
  1776. /* Get the hardware queue position index */
  1777. in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1778. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1779. /* Process new responses from since the last time we looked */
  1780. while (in_index != pp->resp_idx) {
  1781. unsigned int tag;
  1782. struct mv_crpb *response = &pp->crpb[pp->resp_idx];
  1783. pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1784. if (IS_GEN_I(hpriv)) {
  1785. /* 50xx: no NCQ, only one command active at a time */
  1786. tag = ap->link.active_tag;
  1787. } else {
  1788. /* Gen II/IIE: get command tag from CRPB entry */
  1789. tag = le16_to_cpu(response->id) & 0x1f;
  1790. }
  1791. mv_process_crpb_response(ap, response, tag, ncq_enabled);
  1792. work_done = true;
  1793. }
  1794. /* Update the software queue position index in hardware */
  1795. if (work_done)
  1796. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
  1797. (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
  1798. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1799. }
  1800. static void mv_port_intr(struct ata_port *ap, u32 port_cause)
  1801. {
  1802. struct mv_port_priv *pp;
  1803. int edma_was_enabled;
  1804. if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
  1805. mv_unexpected_intr(ap, 0);
  1806. return;
  1807. }
  1808. /*
  1809. * Grab a snapshot of the EDMA_EN flag setting,
  1810. * so that we have a consistent view for this port,
  1811. * even if something we call of our routines changes it.
  1812. */
  1813. pp = ap->private_data;
  1814. edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
  1815. /*
  1816. * Process completed CRPB response(s) before other events.
  1817. */
  1818. if (edma_was_enabled && (port_cause & DONE_IRQ)) {
  1819. mv_process_crpb_entries(ap, pp);
  1820. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
  1821. mv_handle_fbs_ncq_dev_err(ap);
  1822. }
  1823. /*
  1824. * Handle chip-reported errors, or continue on to handle PIO.
  1825. */
  1826. if (unlikely(port_cause & ERR_IRQ)) {
  1827. mv_err_intr(ap);
  1828. } else if (!edma_was_enabled) {
  1829. struct ata_queued_cmd *qc = mv_get_active_qc(ap);
  1830. if (qc)
  1831. ata_sff_host_intr(ap, qc);
  1832. else
  1833. mv_unexpected_intr(ap, edma_was_enabled);
  1834. }
  1835. }
  1836. /**
  1837. * mv_host_intr - Handle all interrupts on the given host controller
  1838. * @host: host specific structure
  1839. * @main_irq_cause: Main interrupt cause register for the chip.
  1840. *
  1841. * LOCKING:
  1842. * Inherited from caller.
  1843. */
  1844. static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
  1845. {
  1846. struct mv_host_priv *hpriv = host->private_data;
  1847. void __iomem *mmio = hpriv->base, *hc_mmio;
  1848. unsigned int handled = 0, port;
  1849. for (port = 0; port < hpriv->n_ports; port++) {
  1850. struct ata_port *ap = host->ports[port];
  1851. unsigned int p, shift, hardport, port_cause;
  1852. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  1853. /*
  1854. * Each hc within the host has its own hc_irq_cause register,
  1855. * where the interrupting ports bits get ack'd.
  1856. */
  1857. if (hardport == 0) { /* first port on this hc ? */
  1858. u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
  1859. u32 port_mask, ack_irqs;
  1860. /*
  1861. * Skip this entire hc if nothing pending for any ports
  1862. */
  1863. if (!hc_cause) {
  1864. port += MV_PORTS_PER_HC - 1;
  1865. continue;
  1866. }
  1867. /*
  1868. * We don't need/want to read the hc_irq_cause register,
  1869. * because doing so hurts performance, and
  1870. * main_irq_cause already gives us everything we need.
  1871. *
  1872. * But we do have to *write* to the hc_irq_cause to ack
  1873. * the ports that we are handling this time through.
  1874. *
  1875. * This requires that we create a bitmap for those
  1876. * ports which interrupted us, and use that bitmap
  1877. * to ack (only) those ports via hc_irq_cause.
  1878. */
  1879. ack_irqs = 0;
  1880. for (p = 0; p < MV_PORTS_PER_HC; ++p) {
  1881. if ((port + p) >= hpriv->n_ports)
  1882. break;
  1883. port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
  1884. if (hc_cause & port_mask)
  1885. ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
  1886. }
  1887. hc_mmio = mv_hc_base_from_port(mmio, port);
  1888. writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
  1889. handled = 1;
  1890. }
  1891. /*
  1892. * Handle interrupts signalled for this port:
  1893. */
  1894. port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
  1895. if (port_cause)
  1896. mv_port_intr(ap, port_cause);
  1897. }
  1898. return handled;
  1899. }
  1900. static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
  1901. {
  1902. struct mv_host_priv *hpriv = host->private_data;
  1903. struct ata_port *ap;
  1904. struct ata_queued_cmd *qc;
  1905. struct ata_eh_info *ehi;
  1906. unsigned int i, err_mask, printed = 0;
  1907. u32 err_cause;
  1908. err_cause = readl(mmio + hpriv->irq_cause_ofs);
  1909. dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
  1910. err_cause);
  1911. DPRINTK("All regs @ PCI error\n");
  1912. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1913. writelfl(0, mmio + hpriv->irq_cause_ofs);
  1914. for (i = 0; i < host->n_ports; i++) {
  1915. ap = host->ports[i];
  1916. if (!ata_link_offline(&ap->link)) {
  1917. ehi = &ap->link.eh_info;
  1918. ata_ehi_clear_desc(ehi);
  1919. if (!printed++)
  1920. ata_ehi_push_desc(ehi,
  1921. "PCI err cause 0x%08x", err_cause);
  1922. err_mask = AC_ERR_HOST_BUS;
  1923. ehi->action = ATA_EH_RESET;
  1924. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1925. if (qc)
  1926. qc->err_mask |= err_mask;
  1927. else
  1928. ehi->err_mask |= err_mask;
  1929. ata_port_freeze(ap);
  1930. }
  1931. }
  1932. return 1; /* handled */
  1933. }
  1934. /**
  1935. * mv_interrupt - Main interrupt event handler
  1936. * @irq: unused
  1937. * @dev_instance: private data; in this case the host structure
  1938. *
  1939. * Read the read only register to determine if any host
  1940. * controllers have pending interrupts. If so, call lower level
  1941. * routine to handle. Also check for PCI errors which are only
  1942. * reported here.
  1943. *
  1944. * LOCKING:
  1945. * This routine holds the host lock while processing pending
  1946. * interrupts.
  1947. */
  1948. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1949. {
  1950. struct ata_host *host = dev_instance;
  1951. struct mv_host_priv *hpriv = host->private_data;
  1952. unsigned int handled = 0;
  1953. u32 main_irq_cause, pending_irqs;
  1954. spin_lock(&host->lock);
  1955. main_irq_cause = readl(hpriv->main_irq_cause_addr);
  1956. pending_irqs = main_irq_cause & hpriv->main_irq_mask;
  1957. /*
  1958. * Deal with cases where we either have nothing pending, or have read
  1959. * a bogus register value which can indicate HW removal or PCI fault.
  1960. */
  1961. if (pending_irqs && main_irq_cause != 0xffffffffU) {
  1962. if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
  1963. handled = mv_pci_error(host, hpriv->base);
  1964. else
  1965. handled = mv_host_intr(host, pending_irqs);
  1966. }
  1967. spin_unlock(&host->lock);
  1968. return IRQ_RETVAL(handled);
  1969. }
  1970. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1971. {
  1972. unsigned int ofs;
  1973. switch (sc_reg_in) {
  1974. case SCR_STATUS:
  1975. case SCR_ERROR:
  1976. case SCR_CONTROL:
  1977. ofs = sc_reg_in * sizeof(u32);
  1978. break;
  1979. default:
  1980. ofs = 0xffffffffU;
  1981. break;
  1982. }
  1983. return ofs;
  1984. }
  1985. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  1986. {
  1987. struct mv_host_priv *hpriv = ap->host->private_data;
  1988. void __iomem *mmio = hpriv->base;
  1989. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1990. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1991. if (ofs != 0xffffffffU) {
  1992. *val = readl(addr + ofs);
  1993. return 0;
  1994. } else
  1995. return -EINVAL;
  1996. }
  1997. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1998. {
  1999. struct mv_host_priv *hpriv = ap->host->private_data;
  2000. void __iomem *mmio = hpriv->base;
  2001. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  2002. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  2003. if (ofs != 0xffffffffU) {
  2004. writelfl(val, addr + ofs);
  2005. return 0;
  2006. } else
  2007. return -EINVAL;
  2008. }
  2009. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
  2010. {
  2011. struct pci_dev *pdev = to_pci_dev(host->dev);
  2012. int early_5080;
  2013. early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
  2014. if (!early_5080) {
  2015. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2016. tmp |= (1 << 0);
  2017. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2018. }
  2019. mv_reset_pci_bus(host, mmio);
  2020. }
  2021. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  2022. {
  2023. writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
  2024. }
  2025. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  2026. void __iomem *mmio)
  2027. {
  2028. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  2029. u32 tmp;
  2030. tmp = readl(phy_mmio + MV5_PHY_MODE);
  2031. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  2032. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  2033. }
  2034. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  2035. {
  2036. u32 tmp;
  2037. writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
  2038. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  2039. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2040. tmp |= ~(1 << 0);
  2041. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2042. }
  2043. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  2044. unsigned int port)
  2045. {
  2046. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  2047. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  2048. u32 tmp;
  2049. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  2050. if (fix_apm_sq) {
  2051. tmp = readl(phy_mmio + MV5_LTMODE_OFS);
  2052. tmp |= (1 << 19);
  2053. writel(tmp, phy_mmio + MV5_LTMODE_OFS);
  2054. tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
  2055. tmp &= ~0x3;
  2056. tmp |= 0x1;
  2057. writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
  2058. }
  2059. tmp = readl(phy_mmio + MV5_PHY_MODE);
  2060. tmp &= ~mask;
  2061. tmp |= hpriv->signal[port].pre;
  2062. tmp |= hpriv->signal[port].amps;
  2063. writel(tmp, phy_mmio + MV5_PHY_MODE);
  2064. }
  2065. #undef ZERO
  2066. #define ZERO(reg) writel(0, port_mmio + (reg))
  2067. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  2068. unsigned int port)
  2069. {
  2070. void __iomem *port_mmio = mv_port_base(mmio, port);
  2071. mv_reset_channel(hpriv, mmio, port);
  2072. ZERO(0x028); /* command */
  2073. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  2074. ZERO(0x004); /* timer */
  2075. ZERO(0x008); /* irq err cause */
  2076. ZERO(0x00c); /* irq err mask */
  2077. ZERO(0x010); /* rq bah */
  2078. ZERO(0x014); /* rq inp */
  2079. ZERO(0x018); /* rq outp */
  2080. ZERO(0x01c); /* respq bah */
  2081. ZERO(0x024); /* respq outp */
  2082. ZERO(0x020); /* respq inp */
  2083. ZERO(0x02c); /* test control */
  2084. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  2085. }
  2086. #undef ZERO
  2087. #define ZERO(reg) writel(0, hc_mmio + (reg))
  2088. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2089. unsigned int hc)
  2090. {
  2091. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2092. u32 tmp;
  2093. ZERO(0x00c);
  2094. ZERO(0x010);
  2095. ZERO(0x014);
  2096. ZERO(0x018);
  2097. tmp = readl(hc_mmio + 0x20);
  2098. tmp &= 0x1c1c1c1c;
  2099. tmp |= 0x03030303;
  2100. writel(tmp, hc_mmio + 0x20);
  2101. }
  2102. #undef ZERO
  2103. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2104. unsigned int n_hc)
  2105. {
  2106. unsigned int hc, port;
  2107. for (hc = 0; hc < n_hc; hc++) {
  2108. for (port = 0; port < MV_PORTS_PER_HC; port++)
  2109. mv5_reset_hc_port(hpriv, mmio,
  2110. (hc * MV_PORTS_PER_HC) + port);
  2111. mv5_reset_one_hc(hpriv, mmio, hc);
  2112. }
  2113. return 0;
  2114. }
  2115. #undef ZERO
  2116. #define ZERO(reg) writel(0, mmio + (reg))
  2117. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
  2118. {
  2119. struct mv_host_priv *hpriv = host->private_data;
  2120. u32 tmp;
  2121. tmp = readl(mmio + MV_PCI_MODE_OFS);
  2122. tmp &= 0xff00ffff;
  2123. writel(tmp, mmio + MV_PCI_MODE_OFS);
  2124. ZERO(MV_PCI_DISC_TIMER);
  2125. ZERO(MV_PCI_MSI_TRIGGER);
  2126. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
  2127. ZERO(MV_PCI_SERR_MASK);
  2128. ZERO(hpriv->irq_cause_ofs);
  2129. ZERO(hpriv->irq_mask_ofs);
  2130. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  2131. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  2132. ZERO(MV_PCI_ERR_ATTRIBUTE);
  2133. ZERO(MV_PCI_ERR_COMMAND);
  2134. }
  2135. #undef ZERO
  2136. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  2137. {
  2138. u32 tmp;
  2139. mv5_reset_flash(hpriv, mmio);
  2140. tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
  2141. tmp &= 0x3;
  2142. tmp |= (1 << 5) | (1 << 6);
  2143. writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
  2144. }
  2145. /**
  2146. * mv6_reset_hc - Perform the 6xxx global soft reset
  2147. * @mmio: base address of the HBA
  2148. *
  2149. * This routine only applies to 6xxx parts.
  2150. *
  2151. * LOCKING:
  2152. * Inherited from caller.
  2153. */
  2154. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2155. unsigned int n_hc)
  2156. {
  2157. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  2158. int i, rc = 0;
  2159. u32 t;
  2160. /* Following procedure defined in PCI "main command and status
  2161. * register" table.
  2162. */
  2163. t = readl(reg);
  2164. writel(t | STOP_PCI_MASTER, reg);
  2165. for (i = 0; i < 1000; i++) {
  2166. udelay(1);
  2167. t = readl(reg);
  2168. if (PCI_MASTER_EMPTY & t)
  2169. break;
  2170. }
  2171. if (!(PCI_MASTER_EMPTY & t)) {
  2172. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  2173. rc = 1;
  2174. goto done;
  2175. }
  2176. /* set reset */
  2177. i = 5;
  2178. do {
  2179. writel(t | GLOB_SFT_RST, reg);
  2180. t = readl(reg);
  2181. udelay(1);
  2182. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  2183. if (!(GLOB_SFT_RST & t)) {
  2184. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  2185. rc = 1;
  2186. goto done;
  2187. }
  2188. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  2189. i = 5;
  2190. do {
  2191. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  2192. t = readl(reg);
  2193. udelay(1);
  2194. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  2195. if (GLOB_SFT_RST & t) {
  2196. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  2197. rc = 1;
  2198. }
  2199. done:
  2200. return rc;
  2201. }
  2202. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  2203. void __iomem *mmio)
  2204. {
  2205. void __iomem *port_mmio;
  2206. u32 tmp;
  2207. tmp = readl(mmio + MV_RESET_CFG_OFS);
  2208. if ((tmp & (1 << 0)) == 0) {
  2209. hpriv->signal[idx].amps = 0x7 << 8;
  2210. hpriv->signal[idx].pre = 0x1 << 5;
  2211. return;
  2212. }
  2213. port_mmio = mv_port_base(mmio, idx);
  2214. tmp = readl(port_mmio + PHY_MODE2);
  2215. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  2216. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  2217. }
  2218. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  2219. {
  2220. writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
  2221. }
  2222. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  2223. unsigned int port)
  2224. {
  2225. void __iomem *port_mmio = mv_port_base(mmio, port);
  2226. u32 hp_flags = hpriv->hp_flags;
  2227. int fix_phy_mode2 =
  2228. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  2229. int fix_phy_mode4 =
  2230. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  2231. u32 m2, m3;
  2232. if (fix_phy_mode2) {
  2233. m2 = readl(port_mmio + PHY_MODE2);
  2234. m2 &= ~(1 << 16);
  2235. m2 |= (1 << 31);
  2236. writel(m2, port_mmio + PHY_MODE2);
  2237. udelay(200);
  2238. m2 = readl(port_mmio + PHY_MODE2);
  2239. m2 &= ~((1 << 16) | (1 << 31));
  2240. writel(m2, port_mmio + PHY_MODE2);
  2241. udelay(200);
  2242. }
  2243. /*
  2244. * Gen-II/IIe PHY_MODE3 errata RM#2:
  2245. * Achieves better receiver noise performance than the h/w default:
  2246. */
  2247. m3 = readl(port_mmio + PHY_MODE3);
  2248. m3 = (m3 & 0x1f) | (0x5555601 << 5);
  2249. writel(m3, port_mmio + PHY_MODE3);
  2250. if (fix_phy_mode4) {
  2251. u32 m4;
  2252. m4 = readl(port_mmio + PHY_MODE4);
  2253. /* workaround for errata FEr SATA#10 (part 1) */
  2254. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  2255. /* enforce bit restrictions on GenIIe devices */
  2256. if (IS_GEN_IIE(hpriv))
  2257. m4 = (m4 & ~0x5DE3FFFC) | (1 << 2);
  2258. writel(m4, port_mmio + PHY_MODE4);
  2259. }
  2260. /* Revert values of pre-emphasis and signal amps to the saved ones */
  2261. m2 = readl(port_mmio + PHY_MODE2);
  2262. m2 &= ~MV_M2_PREAMP_MASK;
  2263. m2 |= hpriv->signal[port].amps;
  2264. m2 |= hpriv->signal[port].pre;
  2265. m2 &= ~(1 << 16);
  2266. /* according to mvSata 3.6.1, some IIE values are fixed */
  2267. if (IS_GEN_IIE(hpriv)) {
  2268. m2 &= ~0xC30FF01F;
  2269. m2 |= 0x0000900F;
  2270. }
  2271. writel(m2, port_mmio + PHY_MODE2);
  2272. }
  2273. /* TODO: use the generic LED interface to configure the SATA Presence */
  2274. /* & Acitivy LEDs on the board */
  2275. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  2276. void __iomem *mmio)
  2277. {
  2278. return;
  2279. }
  2280. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  2281. void __iomem *mmio)
  2282. {
  2283. void __iomem *port_mmio;
  2284. u32 tmp;
  2285. port_mmio = mv_port_base(mmio, idx);
  2286. tmp = readl(port_mmio + PHY_MODE2);
  2287. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  2288. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  2289. }
  2290. #undef ZERO
  2291. #define ZERO(reg) writel(0, port_mmio + (reg))
  2292. static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
  2293. void __iomem *mmio, unsigned int port)
  2294. {
  2295. void __iomem *port_mmio = mv_port_base(mmio, port);
  2296. mv_reset_channel(hpriv, mmio, port);
  2297. ZERO(0x028); /* command */
  2298. writel(0x101f, port_mmio + EDMA_CFG_OFS);
  2299. ZERO(0x004); /* timer */
  2300. ZERO(0x008); /* irq err cause */
  2301. ZERO(0x00c); /* irq err mask */
  2302. ZERO(0x010); /* rq bah */
  2303. ZERO(0x014); /* rq inp */
  2304. ZERO(0x018); /* rq outp */
  2305. ZERO(0x01c); /* respq bah */
  2306. ZERO(0x024); /* respq outp */
  2307. ZERO(0x020); /* respq inp */
  2308. ZERO(0x02c); /* test control */
  2309. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  2310. }
  2311. #undef ZERO
  2312. #define ZERO(reg) writel(0, hc_mmio + (reg))
  2313. static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
  2314. void __iomem *mmio)
  2315. {
  2316. void __iomem *hc_mmio = mv_hc_base(mmio, 0);
  2317. ZERO(0x00c);
  2318. ZERO(0x010);
  2319. ZERO(0x014);
  2320. }
  2321. #undef ZERO
  2322. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  2323. void __iomem *mmio, unsigned int n_hc)
  2324. {
  2325. unsigned int port;
  2326. for (port = 0; port < hpriv->n_ports; port++)
  2327. mv_soc_reset_hc_port(hpriv, mmio, port);
  2328. mv_soc_reset_one_hc(hpriv, mmio);
  2329. return 0;
  2330. }
  2331. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  2332. void __iomem *mmio)
  2333. {
  2334. return;
  2335. }
  2336. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
  2337. {
  2338. return;
  2339. }
  2340. static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
  2341. {
  2342. u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
  2343. ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
  2344. if (want_gen2i)
  2345. ifcfg |= (1 << 7); /* enable gen2i speed */
  2346. writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
  2347. }
  2348. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  2349. unsigned int port_no)
  2350. {
  2351. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  2352. /*
  2353. * The datasheet warns against setting EDMA_RESET when EDMA is active
  2354. * (but doesn't say what the problem might be). So we first try
  2355. * to disable the EDMA engine before doing the EDMA_RESET operation.
  2356. */
  2357. mv_stop_edma_engine(port_mmio);
  2358. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2359. if (!IS_GEN_I(hpriv)) {
  2360. /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
  2361. mv_setup_ifcfg(port_mmio, 1);
  2362. }
  2363. /*
  2364. * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
  2365. * link, and physical layers. It resets all SATA interface registers
  2366. * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
  2367. */
  2368. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2369. udelay(25); /* allow reset propagation */
  2370. writelfl(0, port_mmio + EDMA_CMD_OFS);
  2371. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  2372. if (IS_GEN_I(hpriv))
  2373. mdelay(1);
  2374. }
  2375. static void mv_pmp_select(struct ata_port *ap, int pmp)
  2376. {
  2377. if (sata_pmp_supported(ap)) {
  2378. void __iomem *port_mmio = mv_ap_base(ap);
  2379. u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
  2380. int old = reg & 0xf;
  2381. if (old != pmp) {
  2382. reg = (reg & ~0xf) | pmp;
  2383. writelfl(reg, port_mmio + SATA_IFCTL_OFS);
  2384. }
  2385. }
  2386. }
  2387. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  2388. unsigned long deadline)
  2389. {
  2390. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2391. return sata_std_hardreset(link, class, deadline);
  2392. }
  2393. static int mv_softreset(struct ata_link *link, unsigned int *class,
  2394. unsigned long deadline)
  2395. {
  2396. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2397. return ata_sff_softreset(link, class, deadline);
  2398. }
  2399. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  2400. unsigned long deadline)
  2401. {
  2402. struct ata_port *ap = link->ap;
  2403. struct mv_host_priv *hpriv = ap->host->private_data;
  2404. struct mv_port_priv *pp = ap->private_data;
  2405. void __iomem *mmio = hpriv->base;
  2406. int rc, attempts = 0, extra = 0;
  2407. u32 sstatus;
  2408. bool online;
  2409. mv_reset_channel(hpriv, mmio, ap->port_no);
  2410. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2411. /* Workaround for errata FEr SATA#10 (part 2) */
  2412. do {
  2413. const unsigned long *timing =
  2414. sata_ehc_deb_timing(&link->eh_context);
  2415. rc = sata_link_hardreset(link, timing, deadline + extra,
  2416. &online, NULL);
  2417. rc = online ? -EAGAIN : rc;
  2418. if (rc)
  2419. return rc;
  2420. sata_scr_read(link, SCR_STATUS, &sstatus);
  2421. if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
  2422. /* Force 1.5gb/s link speed and try again */
  2423. mv_setup_ifcfg(mv_ap_base(ap), 0);
  2424. if (time_after(jiffies + HZ, deadline))
  2425. extra = HZ; /* only extend it once, max */
  2426. }
  2427. } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
  2428. return rc;
  2429. }
  2430. static void mv_eh_freeze(struct ata_port *ap)
  2431. {
  2432. mv_stop_edma(ap);
  2433. mv_enable_port_irqs(ap, 0);
  2434. }
  2435. static void mv_eh_thaw(struct ata_port *ap)
  2436. {
  2437. struct mv_host_priv *hpriv = ap->host->private_data;
  2438. unsigned int port = ap->port_no;
  2439. unsigned int hardport = mv_hardport_from_port(port);
  2440. void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
  2441. void __iomem *port_mmio = mv_ap_base(ap);
  2442. u32 hc_irq_cause;
  2443. /* clear EDMA errors on this port */
  2444. writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2445. /* clear pending irq events */
  2446. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  2447. hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
  2448. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  2449. mv_enable_port_irqs(ap, ERR_IRQ);
  2450. }
  2451. /**
  2452. * mv_port_init - Perform some early initialization on a single port.
  2453. * @port: libata data structure storing shadow register addresses
  2454. * @port_mmio: base address of the port
  2455. *
  2456. * Initialize shadow register mmio addresses, clear outstanding
  2457. * interrupts on the port, and unmask interrupts for the future
  2458. * start of the port.
  2459. *
  2460. * LOCKING:
  2461. * Inherited from caller.
  2462. */
  2463. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  2464. {
  2465. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  2466. unsigned serr_ofs;
  2467. /* PIO related setup
  2468. */
  2469. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  2470. port->error_addr =
  2471. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  2472. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  2473. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  2474. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  2475. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  2476. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  2477. port->status_addr =
  2478. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  2479. /* special case: control/altstatus doesn't have ATA_REG_ address */
  2480. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  2481. /* unused: */
  2482. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  2483. /* Clear any currently outstanding port interrupt conditions */
  2484. serr_ofs = mv_scr_offset(SCR_ERROR);
  2485. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  2486. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2487. /* unmask all non-transient EDMA error interrupts */
  2488. writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  2489. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  2490. readl(port_mmio + EDMA_CFG_OFS),
  2491. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  2492. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  2493. }
  2494. static unsigned int mv_in_pcix_mode(struct ata_host *host)
  2495. {
  2496. struct mv_host_priv *hpriv = host->private_data;
  2497. void __iomem *mmio = hpriv->base;
  2498. u32 reg;
  2499. if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
  2500. return 0; /* not PCI-X capable */
  2501. reg = readl(mmio + MV_PCI_MODE_OFS);
  2502. if ((reg & MV_PCI_MODE_MASK) == 0)
  2503. return 0; /* conventional PCI mode */
  2504. return 1; /* chip is in PCI-X mode */
  2505. }
  2506. static int mv_pci_cut_through_okay(struct ata_host *host)
  2507. {
  2508. struct mv_host_priv *hpriv = host->private_data;
  2509. void __iomem *mmio = hpriv->base;
  2510. u32 reg;
  2511. if (!mv_in_pcix_mode(host)) {
  2512. reg = readl(mmio + PCI_COMMAND_OFS);
  2513. if (reg & PCI_COMMAND_MRDTRIG)
  2514. return 0; /* not okay */
  2515. }
  2516. return 1; /* okay */
  2517. }
  2518. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  2519. {
  2520. struct pci_dev *pdev = to_pci_dev(host->dev);
  2521. struct mv_host_priv *hpriv = host->private_data;
  2522. u32 hp_flags = hpriv->hp_flags;
  2523. switch (board_idx) {
  2524. case chip_5080:
  2525. hpriv->ops = &mv5xxx_ops;
  2526. hp_flags |= MV_HP_GEN_I;
  2527. switch (pdev->revision) {
  2528. case 0x1:
  2529. hp_flags |= MV_HP_ERRATA_50XXB0;
  2530. break;
  2531. case 0x3:
  2532. hp_flags |= MV_HP_ERRATA_50XXB2;
  2533. break;
  2534. default:
  2535. dev_printk(KERN_WARNING, &pdev->dev,
  2536. "Applying 50XXB2 workarounds to unknown rev\n");
  2537. hp_flags |= MV_HP_ERRATA_50XXB2;
  2538. break;
  2539. }
  2540. break;
  2541. case chip_504x:
  2542. case chip_508x:
  2543. hpriv->ops = &mv5xxx_ops;
  2544. hp_flags |= MV_HP_GEN_I;
  2545. switch (pdev->revision) {
  2546. case 0x0:
  2547. hp_flags |= MV_HP_ERRATA_50XXB0;
  2548. break;
  2549. case 0x3:
  2550. hp_flags |= MV_HP_ERRATA_50XXB2;
  2551. break;
  2552. default:
  2553. dev_printk(KERN_WARNING, &pdev->dev,
  2554. "Applying B2 workarounds to unknown rev\n");
  2555. hp_flags |= MV_HP_ERRATA_50XXB2;
  2556. break;
  2557. }
  2558. break;
  2559. case chip_604x:
  2560. case chip_608x:
  2561. hpriv->ops = &mv6xxx_ops;
  2562. hp_flags |= MV_HP_GEN_II;
  2563. switch (pdev->revision) {
  2564. case 0x7:
  2565. hp_flags |= MV_HP_ERRATA_60X1B2;
  2566. break;
  2567. case 0x9:
  2568. hp_flags |= MV_HP_ERRATA_60X1C0;
  2569. break;
  2570. default:
  2571. dev_printk(KERN_WARNING, &pdev->dev,
  2572. "Applying B2 workarounds to unknown rev\n");
  2573. hp_flags |= MV_HP_ERRATA_60X1B2;
  2574. break;
  2575. }
  2576. break;
  2577. case chip_7042:
  2578. hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
  2579. if (pdev->vendor == PCI_VENDOR_ID_TTI &&
  2580. (pdev->device == 0x2300 || pdev->device == 0x2310))
  2581. {
  2582. /*
  2583. * Highpoint RocketRAID PCIe 23xx series cards:
  2584. *
  2585. * Unconfigured drives are treated as "Legacy"
  2586. * by the BIOS, and it overwrites sector 8 with
  2587. * a "Lgcy" metadata block prior to Linux boot.
  2588. *
  2589. * Configured drives (RAID or JBOD) leave sector 8
  2590. * alone, but instead overwrite a high numbered
  2591. * sector for the RAID metadata. This sector can
  2592. * be determined exactly, by truncating the physical
  2593. * drive capacity to a nice even GB value.
  2594. *
  2595. * RAID metadata is at: (dev->n_sectors & ~0xfffff)
  2596. *
  2597. * Warn the user, lest they think we're just buggy.
  2598. */
  2599. printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
  2600. " BIOS CORRUPTS DATA on all attached drives,"
  2601. " regardless of if/how they are configured."
  2602. " BEWARE!\n");
  2603. printk(KERN_WARNING DRV_NAME ": For data safety, do not"
  2604. " use sectors 8-9 on \"Legacy\" drives,"
  2605. " and avoid the final two gigabytes on"
  2606. " all RocketRAID BIOS initialized drives.\n");
  2607. }
  2608. /* drop through */
  2609. case chip_6042:
  2610. hpriv->ops = &mv6xxx_ops;
  2611. hp_flags |= MV_HP_GEN_IIE;
  2612. if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
  2613. hp_flags |= MV_HP_CUT_THROUGH;
  2614. switch (pdev->revision) {
  2615. case 0x0:
  2616. hp_flags |= MV_HP_ERRATA_XX42A0;
  2617. break;
  2618. case 0x1:
  2619. hp_flags |= MV_HP_ERRATA_60X1C0;
  2620. break;
  2621. default:
  2622. dev_printk(KERN_WARNING, &pdev->dev,
  2623. "Applying 60X1C0 workarounds to unknown rev\n");
  2624. hp_flags |= MV_HP_ERRATA_60X1C0;
  2625. break;
  2626. }
  2627. break;
  2628. case chip_soc:
  2629. hpriv->ops = &mv_soc_ops;
  2630. hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0;
  2631. break;
  2632. default:
  2633. dev_printk(KERN_ERR, host->dev,
  2634. "BUG: invalid board index %u\n", board_idx);
  2635. return 1;
  2636. }
  2637. hpriv->hp_flags = hp_flags;
  2638. if (hp_flags & MV_HP_PCIE) {
  2639. hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
  2640. hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
  2641. hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
  2642. } else {
  2643. hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
  2644. hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
  2645. hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
  2646. }
  2647. return 0;
  2648. }
  2649. /**
  2650. * mv_init_host - Perform some early initialization of the host.
  2651. * @host: ATA host to initialize
  2652. * @board_idx: controller index
  2653. *
  2654. * If possible, do an early global reset of the host. Then do
  2655. * our port init and clear/unmask all/relevant host interrupts.
  2656. *
  2657. * LOCKING:
  2658. * Inherited from caller.
  2659. */
  2660. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  2661. {
  2662. int rc = 0, n_hc, port, hc;
  2663. struct mv_host_priv *hpriv = host->private_data;
  2664. void __iomem *mmio = hpriv->base;
  2665. rc = mv_chip_id(host, board_idx);
  2666. if (rc)
  2667. goto done;
  2668. if (IS_SOC(hpriv)) {
  2669. hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
  2670. hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
  2671. } else {
  2672. hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
  2673. hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
  2674. }
  2675. /* global interrupt mask: 0 == mask everything */
  2676. mv_set_main_irq_mask(host, ~0, 0);
  2677. n_hc = mv_get_hc_count(host->ports[0]->flags);
  2678. for (port = 0; port < host->n_ports; port++)
  2679. hpriv->ops->read_preamp(hpriv, port, mmio);
  2680. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  2681. if (rc)
  2682. goto done;
  2683. hpriv->ops->reset_flash(hpriv, mmio);
  2684. hpriv->ops->reset_bus(host, mmio);
  2685. hpriv->ops->enable_leds(hpriv, mmio);
  2686. for (port = 0; port < host->n_ports; port++) {
  2687. struct ata_port *ap = host->ports[port];
  2688. void __iomem *port_mmio = mv_port_base(mmio, port);
  2689. mv_port_init(&ap->ioaddr, port_mmio);
  2690. #ifdef CONFIG_PCI
  2691. if (!IS_SOC(hpriv)) {
  2692. unsigned int offset = port_mmio - mmio;
  2693. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
  2694. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
  2695. }
  2696. #endif
  2697. }
  2698. for (hc = 0; hc < n_hc; hc++) {
  2699. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2700. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  2701. "(before clear)=0x%08x\n", hc,
  2702. readl(hc_mmio + HC_CFG_OFS),
  2703. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  2704. /* Clear any currently outstanding hc interrupt conditions */
  2705. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  2706. }
  2707. if (!IS_SOC(hpriv)) {
  2708. /* Clear any currently outstanding host interrupt conditions */
  2709. writelfl(0, mmio + hpriv->irq_cause_ofs);
  2710. /* and unmask interrupt generation for host regs */
  2711. writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
  2712. /*
  2713. * enable only global host interrupts for now.
  2714. * The per-port interrupts get done later as ports are set up.
  2715. */
  2716. mv_set_main_irq_mask(host, 0, PCI_ERR);
  2717. }
  2718. done:
  2719. return rc;
  2720. }
  2721. static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
  2722. {
  2723. hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
  2724. MV_CRQB_Q_SZ, 0);
  2725. if (!hpriv->crqb_pool)
  2726. return -ENOMEM;
  2727. hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
  2728. MV_CRPB_Q_SZ, 0);
  2729. if (!hpriv->crpb_pool)
  2730. return -ENOMEM;
  2731. hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
  2732. MV_SG_TBL_SZ, 0);
  2733. if (!hpriv->sg_tbl_pool)
  2734. return -ENOMEM;
  2735. return 0;
  2736. }
  2737. static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
  2738. struct mbus_dram_target_info *dram)
  2739. {
  2740. int i;
  2741. for (i = 0; i < 4; i++) {
  2742. writel(0, hpriv->base + WINDOW_CTRL(i));
  2743. writel(0, hpriv->base + WINDOW_BASE(i));
  2744. }
  2745. for (i = 0; i < dram->num_cs; i++) {
  2746. struct mbus_dram_window *cs = dram->cs + i;
  2747. writel(((cs->size - 1) & 0xffff0000) |
  2748. (cs->mbus_attr << 8) |
  2749. (dram->mbus_dram_target_id << 4) | 1,
  2750. hpriv->base + WINDOW_CTRL(i));
  2751. writel(cs->base, hpriv->base + WINDOW_BASE(i));
  2752. }
  2753. }
  2754. /**
  2755. * mv_platform_probe - handle a positive probe of an soc Marvell
  2756. * host
  2757. * @pdev: platform device found
  2758. *
  2759. * LOCKING:
  2760. * Inherited from caller.
  2761. */
  2762. static int mv_platform_probe(struct platform_device *pdev)
  2763. {
  2764. static int printed_version;
  2765. const struct mv_sata_platform_data *mv_platform_data;
  2766. const struct ata_port_info *ppi[] =
  2767. { &mv_port_info[chip_soc], NULL };
  2768. struct ata_host *host;
  2769. struct mv_host_priv *hpriv;
  2770. struct resource *res;
  2771. int n_ports, rc;
  2772. if (!printed_version++)
  2773. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2774. /*
  2775. * Simple resource validation ..
  2776. */
  2777. if (unlikely(pdev->num_resources != 2)) {
  2778. dev_err(&pdev->dev, "invalid number of resources\n");
  2779. return -EINVAL;
  2780. }
  2781. /*
  2782. * Get the register base first
  2783. */
  2784. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2785. if (res == NULL)
  2786. return -EINVAL;
  2787. /* allocate host */
  2788. mv_platform_data = pdev->dev.platform_data;
  2789. n_ports = mv_platform_data->n_ports;
  2790. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2791. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2792. if (!host || !hpriv)
  2793. return -ENOMEM;
  2794. host->private_data = hpriv;
  2795. hpriv->n_ports = n_ports;
  2796. host->iomap = NULL;
  2797. hpriv->base = devm_ioremap(&pdev->dev, res->start,
  2798. res->end - res->start + 1);
  2799. hpriv->base -= MV_SATAHC0_REG_BASE;
  2800. /*
  2801. * (Re-)program MBUS remapping windows if we are asked to.
  2802. */
  2803. if (mv_platform_data->dram != NULL)
  2804. mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
  2805. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2806. if (rc)
  2807. return rc;
  2808. /* initialize adapter */
  2809. rc = mv_init_host(host, chip_soc);
  2810. if (rc)
  2811. return rc;
  2812. dev_printk(KERN_INFO, &pdev->dev,
  2813. "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
  2814. host->n_ports);
  2815. return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
  2816. IRQF_SHARED, &mv6_sht);
  2817. }
  2818. /*
  2819. *
  2820. * mv_platform_remove - unplug a platform interface
  2821. * @pdev: platform device
  2822. *
  2823. * A platform bus SATA device has been unplugged. Perform the needed
  2824. * cleanup. Also called on module unload for any active devices.
  2825. */
  2826. static int __devexit mv_platform_remove(struct platform_device *pdev)
  2827. {
  2828. struct device *dev = &pdev->dev;
  2829. struct ata_host *host = dev_get_drvdata(dev);
  2830. ata_host_detach(host);
  2831. return 0;
  2832. }
  2833. static struct platform_driver mv_platform_driver = {
  2834. .probe = mv_platform_probe,
  2835. .remove = __devexit_p(mv_platform_remove),
  2836. .driver = {
  2837. .name = DRV_NAME,
  2838. .owner = THIS_MODULE,
  2839. },
  2840. };
  2841. #ifdef CONFIG_PCI
  2842. static int mv_pci_init_one(struct pci_dev *pdev,
  2843. const struct pci_device_id *ent);
  2844. static struct pci_driver mv_pci_driver = {
  2845. .name = DRV_NAME,
  2846. .id_table = mv_pci_tbl,
  2847. .probe = mv_pci_init_one,
  2848. .remove = ata_pci_remove_one,
  2849. };
  2850. /*
  2851. * module options
  2852. */
  2853. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  2854. /* move to PCI layer or libata core? */
  2855. static int pci_go_64(struct pci_dev *pdev)
  2856. {
  2857. int rc;
  2858. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  2859. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  2860. if (rc) {
  2861. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2862. if (rc) {
  2863. dev_printk(KERN_ERR, &pdev->dev,
  2864. "64-bit DMA enable failed\n");
  2865. return rc;
  2866. }
  2867. }
  2868. } else {
  2869. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2870. if (rc) {
  2871. dev_printk(KERN_ERR, &pdev->dev,
  2872. "32-bit DMA enable failed\n");
  2873. return rc;
  2874. }
  2875. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2876. if (rc) {
  2877. dev_printk(KERN_ERR, &pdev->dev,
  2878. "32-bit consistent DMA enable failed\n");
  2879. return rc;
  2880. }
  2881. }
  2882. return rc;
  2883. }
  2884. /**
  2885. * mv_print_info - Dump key info to kernel log for perusal.
  2886. * @host: ATA host to print info about
  2887. *
  2888. * FIXME: complete this.
  2889. *
  2890. * LOCKING:
  2891. * Inherited from caller.
  2892. */
  2893. static void mv_print_info(struct ata_host *host)
  2894. {
  2895. struct pci_dev *pdev = to_pci_dev(host->dev);
  2896. struct mv_host_priv *hpriv = host->private_data;
  2897. u8 scc;
  2898. const char *scc_s, *gen;
  2899. /* Use this to determine the HW stepping of the chip so we know
  2900. * what errata to workaround
  2901. */
  2902. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  2903. if (scc == 0)
  2904. scc_s = "SCSI";
  2905. else if (scc == 0x01)
  2906. scc_s = "RAID";
  2907. else
  2908. scc_s = "?";
  2909. if (IS_GEN_I(hpriv))
  2910. gen = "I";
  2911. else if (IS_GEN_II(hpriv))
  2912. gen = "II";
  2913. else if (IS_GEN_IIE(hpriv))
  2914. gen = "IIE";
  2915. else
  2916. gen = "?";
  2917. dev_printk(KERN_INFO, &pdev->dev,
  2918. "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
  2919. gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  2920. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  2921. }
  2922. /**
  2923. * mv_pci_init_one - handle a positive probe of a PCI Marvell host
  2924. * @pdev: PCI device found
  2925. * @ent: PCI device ID entry for the matched host
  2926. *
  2927. * LOCKING:
  2928. * Inherited from caller.
  2929. */
  2930. static int mv_pci_init_one(struct pci_dev *pdev,
  2931. const struct pci_device_id *ent)
  2932. {
  2933. static int printed_version;
  2934. unsigned int board_idx = (unsigned int)ent->driver_data;
  2935. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  2936. struct ata_host *host;
  2937. struct mv_host_priv *hpriv;
  2938. int n_ports, rc;
  2939. if (!printed_version++)
  2940. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2941. /* allocate host */
  2942. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  2943. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2944. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2945. if (!host || !hpriv)
  2946. return -ENOMEM;
  2947. host->private_data = hpriv;
  2948. hpriv->n_ports = n_ports;
  2949. /* acquire resources */
  2950. rc = pcim_enable_device(pdev);
  2951. if (rc)
  2952. return rc;
  2953. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  2954. if (rc == -EBUSY)
  2955. pcim_pin_device(pdev);
  2956. if (rc)
  2957. return rc;
  2958. host->iomap = pcim_iomap_table(pdev);
  2959. hpriv->base = host->iomap[MV_PRIMARY_BAR];
  2960. rc = pci_go_64(pdev);
  2961. if (rc)
  2962. return rc;
  2963. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2964. if (rc)
  2965. return rc;
  2966. /* initialize adapter */
  2967. rc = mv_init_host(host, board_idx);
  2968. if (rc)
  2969. return rc;
  2970. /* Enable interrupts */
  2971. if (msi && pci_enable_msi(pdev))
  2972. pci_intx(pdev, 1);
  2973. mv_dump_pci_cfg(pdev, 0x68);
  2974. mv_print_info(host);
  2975. pci_set_master(pdev);
  2976. pci_try_set_mwi(pdev);
  2977. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2978. IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
  2979. }
  2980. #endif
  2981. static int mv_platform_probe(struct platform_device *pdev);
  2982. static int __devexit mv_platform_remove(struct platform_device *pdev);
  2983. static int __init mv_init(void)
  2984. {
  2985. int rc = -ENODEV;
  2986. #ifdef CONFIG_PCI
  2987. rc = pci_register_driver(&mv_pci_driver);
  2988. if (rc < 0)
  2989. return rc;
  2990. #endif
  2991. rc = platform_driver_register(&mv_platform_driver);
  2992. #ifdef CONFIG_PCI
  2993. if (rc < 0)
  2994. pci_unregister_driver(&mv_pci_driver);
  2995. #endif
  2996. return rc;
  2997. }
  2998. static void __exit mv_exit(void)
  2999. {
  3000. #ifdef CONFIG_PCI
  3001. pci_unregister_driver(&mv_pci_driver);
  3002. #endif
  3003. platform_driver_unregister(&mv_platform_driver);
  3004. }
  3005. MODULE_AUTHOR("Brett Russ");
  3006. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  3007. MODULE_LICENSE("GPL");
  3008. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  3009. MODULE_VERSION(DRV_VERSION);
  3010. MODULE_ALIAS("platform:" DRV_NAME);
  3011. #ifdef CONFIG_PCI
  3012. module_param(msi, int, 0444);
  3013. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  3014. #endif
  3015. module_init(mv_init);
  3016. module_exit(mv_exit);