sata_mv.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2008-2009: Marvell Corporation, all rights reserved.
  5. * Copyright 2005: EMC Corporation, all rights reserved.
  6. * Copyright 2005 Red Hat, Inc. All rights reserved.
  7. *
  8. * Originally written by Brett Russ.
  9. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10. *
  11. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; version 2 of the License.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. *
  26. */
  27. /*
  28. * sata_mv TODO list:
  29. *
  30. * --> Develop a low-power-consumption strategy, and implement it.
  31. *
  32. * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  33. *
  34. * --> [Experiment, Marvell value added] Is it possible to use target
  35. * mode to cross-connect two Linux boxes with Marvell cards? If so,
  36. * creating LibATA target mode support would be very interesting.
  37. *
  38. * Target mode, for those without docs, is the ability to directly
  39. * connect two SATA ports.
  40. */
  41. /*
  42. * 80x1-B2 errata PCI#11:
  43. *
  44. * Users of the 6041/6081 Rev.B2 chips (current is C0)
  45. * should be careful to insert those cards only onto PCI-X bus #0,
  46. * and only in device slots 0..7, not higher. The chips may not
  47. * work correctly otherwise (note: this is a pretty rare condition).
  48. */
  49. #include <linux/kernel.h>
  50. #include <linux/module.h>
  51. #include <linux/pci.h>
  52. #include <linux/init.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/delay.h>
  55. #include <linux/interrupt.h>
  56. #include <linux/dmapool.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/device.h>
  59. #include <linux/platform_device.h>
  60. #include <linux/ata_platform.h>
  61. #include <linux/mbus.h>
  62. #include <linux/bitops.h>
  63. #include <scsi/scsi_host.h>
  64. #include <scsi/scsi_cmnd.h>
  65. #include <scsi/scsi_device.h>
  66. #include <linux/libata.h>
  67. #define DRV_NAME "sata_mv"
  68. #define DRV_VERSION "1.27"
  69. /*
  70. * module options
  71. */
  72. static int msi;
  73. #ifdef CONFIG_PCI
  74. module_param(msi, int, S_IRUGO);
  75. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  76. #endif
  77. static int irq_coalescing_io_count;
  78. module_param(irq_coalescing_io_count, int, S_IRUGO);
  79. MODULE_PARM_DESC(irq_coalescing_io_count,
  80. "IRQ coalescing I/O count threshold (0..255)");
  81. static int irq_coalescing_usecs;
  82. module_param(irq_coalescing_usecs, int, S_IRUGO);
  83. MODULE_PARM_DESC(irq_coalescing_usecs,
  84. "IRQ coalescing time threshold in usecs");
  85. enum {
  86. /* BAR's are enumerated in terms of pci_resource_start() terms */
  87. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  88. MV_IO_BAR = 2, /* offset 0x18: IO space */
  89. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  90. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  91. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  92. /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
  93. COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
  94. MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
  95. MAX_COAL_IO_COUNT = 255, /* completed I/O count */
  96. MV_PCI_REG_BASE = 0,
  97. /*
  98. * Per-chip ("all ports") interrupt coalescing feature.
  99. * This is only for GEN_II / GEN_IIE hardware.
  100. *
  101. * Coalescing defers the interrupt until either the IO_THRESHOLD
  102. * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
  103. */
  104. MV_COAL_REG_BASE = 0x18000,
  105. MV_IRQ_COAL_CAUSE = (MV_COAL_REG_BASE + 0x08),
  106. ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
  107. MV_IRQ_COAL_IO_THRESHOLD = (MV_COAL_REG_BASE + 0xcc),
  108. MV_IRQ_COAL_TIME_THRESHOLD = (MV_COAL_REG_BASE + 0xd0),
  109. /*
  110. * Registers for the (unused here) transaction coalescing feature:
  111. */
  112. MV_TRAN_COAL_CAUSE_LO = (MV_COAL_REG_BASE + 0x88),
  113. MV_TRAN_COAL_CAUSE_HI = (MV_COAL_REG_BASE + 0x8c),
  114. MV_SATAHC0_REG_BASE = 0x20000,
  115. MV_FLASH_CTL_OFS = 0x1046c,
  116. MV_GPIO_PORT_CTL_OFS = 0x104f0,
  117. MV_RESET_CFG_OFS = 0x180d8,
  118. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  119. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  120. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  121. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  122. MV_MAX_Q_DEPTH = 32,
  123. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  124. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  125. * CRPB needs alignment on a 256B boundary. Size == 256B
  126. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  127. */
  128. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  129. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  130. MV_MAX_SG_CT = 256,
  131. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  132. /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
  133. MV_PORT_HC_SHIFT = 2,
  134. MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
  135. /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
  136. MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
  137. /* Host Flags */
  138. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  139. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  140. ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
  141. MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
  142. MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
  143. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
  144. MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
  145. CRQB_FLAG_READ = (1 << 0),
  146. CRQB_TAG_SHIFT = 1,
  147. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  148. CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
  149. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  150. CRQB_CMD_ADDR_SHIFT = 8,
  151. CRQB_CMD_CS = (0x2 << 11),
  152. CRQB_CMD_LAST = (1 << 15),
  153. CRPB_FLAG_STATUS_SHIFT = 8,
  154. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  155. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  156. EPRD_FLAG_END_OF_TBL = (1 << 31),
  157. /* PCI interface registers */
  158. PCI_COMMAND_OFS = 0xc00,
  159. PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
  160. PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
  161. PCI_MAIN_CMD_STS_OFS = 0xd30,
  162. STOP_PCI_MASTER = (1 << 2),
  163. PCI_MASTER_EMPTY = (1 << 3),
  164. GLOB_SFT_RST = (1 << 4),
  165. MV_PCI_MODE_OFS = 0xd00,
  166. MV_PCI_MODE_MASK = 0x30,
  167. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  168. MV_PCI_DISC_TIMER = 0xd04,
  169. MV_PCI_MSI_TRIGGER = 0xc38,
  170. MV_PCI_SERR_MASK = 0xc28,
  171. MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
  172. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  173. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  174. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  175. MV_PCI_ERR_COMMAND = 0x1d50,
  176. PCI_IRQ_CAUSE_OFS = 0x1d58,
  177. PCI_IRQ_MASK_OFS = 0x1d5c,
  178. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  179. PCIE_IRQ_CAUSE_OFS = 0x1900,
  180. PCIE_IRQ_MASK_OFS = 0x1910,
  181. PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
  182. /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
  183. PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  184. PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  185. SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
  186. SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
  187. ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
  188. DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
  189. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  190. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  191. DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
  192. DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
  193. PCI_ERR = (1 << 18),
  194. TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
  195. TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
  196. PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
  197. PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
  198. ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
  199. GPIO_INT = (1 << 22),
  200. SELF_INT = (1 << 23),
  201. TWSI_INT = (1 << 24),
  202. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  203. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  204. HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
  205. /* SATAHC registers */
  206. HC_CFG_OFS = 0,
  207. HC_IRQ_CAUSE_OFS = 0x14,
  208. DMA_IRQ = (1 << 0), /* shift by port # */
  209. HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
  210. DEV_IRQ = (1 << 8), /* shift by port # */
  211. /*
  212. * Per-HC (Host-Controller) interrupt coalescing feature.
  213. * This is present on all chip generations.
  214. *
  215. * Coalescing defers the interrupt until either the IO_THRESHOLD
  216. * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
  217. */
  218. HC_IRQ_COAL_IO_THRESHOLD_OFS = 0x000c,
  219. HC_IRQ_COAL_TIME_THRESHOLD_OFS = 0x0010,
  220. SOC_LED_CTRL_OFS = 0x2c,
  221. SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
  222. SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
  223. /* with dev activity LED */
  224. /* Shadow block registers */
  225. SHD_BLK_OFS = 0x100,
  226. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  227. /* SATA registers */
  228. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  229. SATA_ACTIVE_OFS = 0x350,
  230. SATA_FIS_IRQ_CAUSE_OFS = 0x364,
  231. SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
  232. LTMODE_OFS = 0x30c, /* requires read-after-write */
  233. LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
  234. PHY_MODE2_OFS = 0x330,
  235. PHY_MODE3_OFS = 0x310,
  236. PHY_MODE4_OFS = 0x314, /* requires read-after-write */
  237. PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
  238. PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
  239. PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
  240. PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
  241. SATA_IFCTL_OFS = 0x344,
  242. SATA_TESTCTL_OFS = 0x348,
  243. SATA_IFSTAT_OFS = 0x34c,
  244. VENDOR_UNIQUE_FIS_OFS = 0x35c,
  245. FISCFG_OFS = 0x360,
  246. FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
  247. FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
  248. MV5_PHY_MODE = 0x74,
  249. MV5_LTMODE_OFS = 0x30,
  250. MV5_PHY_CTL_OFS = 0x0C,
  251. SATA_INTERFACE_CFG_OFS = 0x050,
  252. MV_M2_PREAMP_MASK = 0x7e0,
  253. /* Port registers */
  254. EDMA_CFG_OFS = 0,
  255. EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
  256. EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
  257. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  258. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  259. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  260. EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
  261. EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
  262. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  263. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  264. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  265. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  266. EDMA_ERR_DEV = (1 << 2), /* device error */
  267. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  268. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  269. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  270. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  271. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  272. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  273. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  274. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  275. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  276. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  277. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  278. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  279. EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
  280. EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
  281. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
  282. EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
  283. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  284. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  285. EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
  286. EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
  287. EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
  288. EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
  289. EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
  290. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  291. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  292. EDMA_ERR_OVERRUN_5 = (1 << 5),
  293. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  294. EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
  295. EDMA_ERR_LNK_CTRL_RX_1 |
  296. EDMA_ERR_LNK_CTRL_RX_3 |
  297. EDMA_ERR_LNK_CTRL_TX,
  298. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  299. EDMA_ERR_PRD_PAR |
  300. EDMA_ERR_DEV_DCON |
  301. EDMA_ERR_DEV_CON |
  302. EDMA_ERR_SERR |
  303. EDMA_ERR_SELF_DIS |
  304. EDMA_ERR_CRQB_PAR |
  305. EDMA_ERR_CRPB_PAR |
  306. EDMA_ERR_INTRL_PAR |
  307. EDMA_ERR_IORDY |
  308. EDMA_ERR_LNK_CTRL_RX_2 |
  309. EDMA_ERR_LNK_DATA_RX |
  310. EDMA_ERR_LNK_DATA_TX |
  311. EDMA_ERR_TRANS_PROTO,
  312. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  313. EDMA_ERR_PRD_PAR |
  314. EDMA_ERR_DEV_DCON |
  315. EDMA_ERR_DEV_CON |
  316. EDMA_ERR_OVERRUN_5 |
  317. EDMA_ERR_UNDERRUN_5 |
  318. EDMA_ERR_SELF_DIS_5 |
  319. EDMA_ERR_CRQB_PAR |
  320. EDMA_ERR_CRPB_PAR |
  321. EDMA_ERR_INTRL_PAR |
  322. EDMA_ERR_IORDY,
  323. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  324. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  325. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  326. EDMA_REQ_Q_PTR_SHIFT = 5,
  327. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  328. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  329. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  330. EDMA_RSP_Q_PTR_SHIFT = 3,
  331. EDMA_CMD_OFS = 0x28, /* EDMA command register */
  332. EDMA_EN = (1 << 0), /* enable EDMA */
  333. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  334. EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
  335. EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
  336. EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
  337. EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
  338. EDMA_IORDY_TMOUT_OFS = 0x34,
  339. EDMA_ARB_CFG_OFS = 0x38,
  340. EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
  341. EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */
  342. BMDMA_CMD_OFS = 0x224, /* bmdma command register */
  343. BMDMA_STATUS_OFS = 0x228, /* bmdma status register */
  344. BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */
  345. BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */
  346. /* Host private flags (hp_flags) */
  347. MV_HP_FLAG_MSI = (1 << 0),
  348. MV_HP_ERRATA_50XXB0 = (1 << 1),
  349. MV_HP_ERRATA_50XXB2 = (1 << 2),
  350. MV_HP_ERRATA_60X1B2 = (1 << 3),
  351. MV_HP_ERRATA_60X1C0 = (1 << 4),
  352. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  353. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  354. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  355. MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
  356. MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
  357. MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
  358. MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
  359. /* Port private flags (pp_flags) */
  360. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  361. MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
  362. MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
  363. MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
  364. MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
  365. };
  366. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  367. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  368. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  369. #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
  370. #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
  371. #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
  372. #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
  373. enum {
  374. /* DMA boundary 0xffff is required by the s/g splitting
  375. * we need on /length/ in mv_fill-sg().
  376. */
  377. MV_DMA_BOUNDARY = 0xffffU,
  378. /* mask of register bits containing lower 32 bits
  379. * of EDMA request queue DMA address
  380. */
  381. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  382. /* ditto, for response queue */
  383. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  384. };
  385. enum chip_type {
  386. chip_504x,
  387. chip_508x,
  388. chip_5080,
  389. chip_604x,
  390. chip_608x,
  391. chip_6042,
  392. chip_7042,
  393. chip_soc,
  394. };
  395. /* Command ReQuest Block: 32B */
  396. struct mv_crqb {
  397. __le32 sg_addr;
  398. __le32 sg_addr_hi;
  399. __le16 ctrl_flags;
  400. __le16 ata_cmd[11];
  401. };
  402. struct mv_crqb_iie {
  403. __le32 addr;
  404. __le32 addr_hi;
  405. __le32 flags;
  406. __le32 len;
  407. __le32 ata_cmd[4];
  408. };
  409. /* Command ResPonse Block: 8B */
  410. struct mv_crpb {
  411. __le16 id;
  412. __le16 flags;
  413. __le32 tmstmp;
  414. };
  415. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  416. struct mv_sg {
  417. __le32 addr;
  418. __le32 flags_size;
  419. __le32 addr_hi;
  420. __le32 reserved;
  421. };
  422. /*
  423. * We keep a local cache of a few frequently accessed port
  424. * registers here, to avoid having to read them (very slow)
  425. * when switching between EDMA and non-EDMA modes.
  426. */
  427. struct mv_cached_regs {
  428. u32 fiscfg;
  429. u32 ltmode;
  430. u32 haltcond;
  431. u32 unknown_rsvd;
  432. };
  433. struct mv_port_priv {
  434. struct mv_crqb *crqb;
  435. dma_addr_t crqb_dma;
  436. struct mv_crpb *crpb;
  437. dma_addr_t crpb_dma;
  438. struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
  439. dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
  440. unsigned int req_idx;
  441. unsigned int resp_idx;
  442. u32 pp_flags;
  443. struct mv_cached_regs cached;
  444. unsigned int delayed_eh_pmp_map;
  445. };
  446. struct mv_port_signal {
  447. u32 amps;
  448. u32 pre;
  449. };
  450. struct mv_host_priv {
  451. u32 hp_flags;
  452. u32 main_irq_mask;
  453. struct mv_port_signal signal[8];
  454. const struct mv_hw_ops *ops;
  455. int n_ports;
  456. void __iomem *base;
  457. void __iomem *main_irq_cause_addr;
  458. void __iomem *main_irq_mask_addr;
  459. u32 irq_cause_ofs;
  460. u32 irq_mask_ofs;
  461. u32 unmask_all_irqs;
  462. /*
  463. * These consistent DMA memory pools give us guaranteed
  464. * alignment for hardware-accessed data structures,
  465. * and less memory waste in accomplishing the alignment.
  466. */
  467. struct dma_pool *crqb_pool;
  468. struct dma_pool *crpb_pool;
  469. struct dma_pool *sg_tbl_pool;
  470. };
  471. struct mv_hw_ops {
  472. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  473. unsigned int port);
  474. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  475. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  476. void __iomem *mmio);
  477. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  478. unsigned int n_hc);
  479. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  480. void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
  481. };
  482. static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
  483. static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
  484. static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
  485. static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
  486. static int mv_port_start(struct ata_port *ap);
  487. static void mv_port_stop(struct ata_port *ap);
  488. static int mv_qc_defer(struct ata_queued_cmd *qc);
  489. static void mv_qc_prep(struct ata_queued_cmd *qc);
  490. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  491. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  492. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  493. unsigned long deadline);
  494. static void mv_eh_freeze(struct ata_port *ap);
  495. static void mv_eh_thaw(struct ata_port *ap);
  496. static void mv6_dev_config(struct ata_device *dev);
  497. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  498. unsigned int port);
  499. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  500. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  501. void __iomem *mmio);
  502. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  503. unsigned int n_hc);
  504. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  505. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
  506. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  507. unsigned int port);
  508. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  509. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  510. void __iomem *mmio);
  511. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  512. unsigned int n_hc);
  513. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  514. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  515. void __iomem *mmio);
  516. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  517. void __iomem *mmio);
  518. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  519. void __iomem *mmio, unsigned int n_hc);
  520. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  521. void __iomem *mmio);
  522. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
  523. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
  524. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  525. unsigned int port_no);
  526. static int mv_stop_edma(struct ata_port *ap);
  527. static int mv_stop_edma_engine(void __iomem *port_mmio);
  528. static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
  529. static void mv_pmp_select(struct ata_port *ap, int pmp);
  530. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  531. unsigned long deadline);
  532. static int mv_softreset(struct ata_link *link, unsigned int *class,
  533. unsigned long deadline);
  534. static void mv_pmp_error_handler(struct ata_port *ap);
  535. static void mv_process_crpb_entries(struct ata_port *ap,
  536. struct mv_port_priv *pp);
  537. static void mv_sff_irq_clear(struct ata_port *ap);
  538. static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
  539. static void mv_bmdma_setup(struct ata_queued_cmd *qc);
  540. static void mv_bmdma_start(struct ata_queued_cmd *qc);
  541. static void mv_bmdma_stop(struct ata_queued_cmd *qc);
  542. static u8 mv_bmdma_status(struct ata_port *ap);
  543. static u8 mv_sff_check_status(struct ata_port *ap);
  544. /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  545. * because we have to allow room for worst case splitting of
  546. * PRDs for 64K boundaries in mv_fill_sg().
  547. */
  548. static struct scsi_host_template mv5_sht = {
  549. ATA_BASE_SHT(DRV_NAME),
  550. .sg_tablesize = MV_MAX_SG_CT / 2,
  551. .dma_boundary = MV_DMA_BOUNDARY,
  552. };
  553. static struct scsi_host_template mv6_sht = {
  554. ATA_NCQ_SHT(DRV_NAME),
  555. .can_queue = MV_MAX_Q_DEPTH - 1,
  556. .sg_tablesize = MV_MAX_SG_CT / 2,
  557. .dma_boundary = MV_DMA_BOUNDARY,
  558. };
  559. static struct ata_port_operations mv5_ops = {
  560. .inherits = &ata_sff_port_ops,
  561. .lost_interrupt = ATA_OP_NULL,
  562. .qc_defer = mv_qc_defer,
  563. .qc_prep = mv_qc_prep,
  564. .qc_issue = mv_qc_issue,
  565. .freeze = mv_eh_freeze,
  566. .thaw = mv_eh_thaw,
  567. .hardreset = mv_hardreset,
  568. .error_handler = ata_std_error_handler, /* avoid SFF EH */
  569. .post_internal_cmd = ATA_OP_NULL,
  570. .scr_read = mv5_scr_read,
  571. .scr_write = mv5_scr_write,
  572. .port_start = mv_port_start,
  573. .port_stop = mv_port_stop,
  574. };
  575. static struct ata_port_operations mv6_ops = {
  576. .inherits = &mv5_ops,
  577. .dev_config = mv6_dev_config,
  578. .scr_read = mv_scr_read,
  579. .scr_write = mv_scr_write,
  580. .pmp_hardreset = mv_pmp_hardreset,
  581. .pmp_softreset = mv_softreset,
  582. .softreset = mv_softreset,
  583. .error_handler = mv_pmp_error_handler,
  584. .sff_check_status = mv_sff_check_status,
  585. .sff_irq_clear = mv_sff_irq_clear,
  586. .check_atapi_dma = mv_check_atapi_dma,
  587. .bmdma_setup = mv_bmdma_setup,
  588. .bmdma_start = mv_bmdma_start,
  589. .bmdma_stop = mv_bmdma_stop,
  590. .bmdma_status = mv_bmdma_status,
  591. };
  592. static struct ata_port_operations mv_iie_ops = {
  593. .inherits = &mv6_ops,
  594. .dev_config = ATA_OP_NULL,
  595. .qc_prep = mv_qc_prep_iie,
  596. };
  597. static const struct ata_port_info mv_port_info[] = {
  598. { /* chip_504x */
  599. .flags = MV_GEN_I_FLAGS,
  600. .pio_mask = ATA_PIO4,
  601. .udma_mask = ATA_UDMA6,
  602. .port_ops = &mv5_ops,
  603. },
  604. { /* chip_508x */
  605. .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
  606. .pio_mask = ATA_PIO4,
  607. .udma_mask = ATA_UDMA6,
  608. .port_ops = &mv5_ops,
  609. },
  610. { /* chip_5080 */
  611. .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
  612. .pio_mask = ATA_PIO4,
  613. .udma_mask = ATA_UDMA6,
  614. .port_ops = &mv5_ops,
  615. },
  616. { /* chip_604x */
  617. .flags = MV_GEN_II_FLAGS,
  618. .pio_mask = ATA_PIO4,
  619. .udma_mask = ATA_UDMA6,
  620. .port_ops = &mv6_ops,
  621. },
  622. { /* chip_608x */
  623. .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
  624. .pio_mask = ATA_PIO4,
  625. .udma_mask = ATA_UDMA6,
  626. .port_ops = &mv6_ops,
  627. },
  628. { /* chip_6042 */
  629. .flags = MV_GEN_IIE_FLAGS,
  630. .pio_mask = ATA_PIO4,
  631. .udma_mask = ATA_UDMA6,
  632. .port_ops = &mv_iie_ops,
  633. },
  634. { /* chip_7042 */
  635. .flags = MV_GEN_IIE_FLAGS,
  636. .pio_mask = ATA_PIO4,
  637. .udma_mask = ATA_UDMA6,
  638. .port_ops = &mv_iie_ops,
  639. },
  640. { /* chip_soc */
  641. .flags = MV_GEN_IIE_FLAGS,
  642. .pio_mask = ATA_PIO4,
  643. .udma_mask = ATA_UDMA6,
  644. .port_ops = &mv_iie_ops,
  645. },
  646. };
  647. static const struct pci_device_id mv_pci_tbl[] = {
  648. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  649. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  650. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  651. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  652. /* RocketRAID 1720/174x have different identifiers */
  653. { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
  654. { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
  655. { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
  656. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  657. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  658. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  659. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  660. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  661. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  662. /* Adaptec 1430SA */
  663. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  664. /* Marvell 7042 support */
  665. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  666. /* Highpoint RocketRAID PCIe series */
  667. { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
  668. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  669. { } /* terminate list */
  670. };
  671. static const struct mv_hw_ops mv5xxx_ops = {
  672. .phy_errata = mv5_phy_errata,
  673. .enable_leds = mv5_enable_leds,
  674. .read_preamp = mv5_read_preamp,
  675. .reset_hc = mv5_reset_hc,
  676. .reset_flash = mv5_reset_flash,
  677. .reset_bus = mv5_reset_bus,
  678. };
  679. static const struct mv_hw_ops mv6xxx_ops = {
  680. .phy_errata = mv6_phy_errata,
  681. .enable_leds = mv6_enable_leds,
  682. .read_preamp = mv6_read_preamp,
  683. .reset_hc = mv6_reset_hc,
  684. .reset_flash = mv6_reset_flash,
  685. .reset_bus = mv_reset_pci_bus,
  686. };
  687. static const struct mv_hw_ops mv_soc_ops = {
  688. .phy_errata = mv6_phy_errata,
  689. .enable_leds = mv_soc_enable_leds,
  690. .read_preamp = mv_soc_read_preamp,
  691. .reset_hc = mv_soc_reset_hc,
  692. .reset_flash = mv_soc_reset_flash,
  693. .reset_bus = mv_soc_reset_bus,
  694. };
  695. /*
  696. * Functions
  697. */
  698. static inline void writelfl(unsigned long data, void __iomem *addr)
  699. {
  700. writel(data, addr);
  701. (void) readl(addr); /* flush to avoid PCI posted write */
  702. }
  703. static inline unsigned int mv_hc_from_port(unsigned int port)
  704. {
  705. return port >> MV_PORT_HC_SHIFT;
  706. }
  707. static inline unsigned int mv_hardport_from_port(unsigned int port)
  708. {
  709. return port & MV_PORT_MASK;
  710. }
  711. /*
  712. * Consolidate some rather tricky bit shift calculations.
  713. * This is hot-path stuff, so not a function.
  714. * Simple code, with two return values, so macro rather than inline.
  715. *
  716. * port is the sole input, in range 0..7.
  717. * shift is one output, for use with main_irq_cause / main_irq_mask registers.
  718. * hardport is the other output, in range 0..3.
  719. *
  720. * Note that port and hardport may be the same variable in some cases.
  721. */
  722. #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
  723. { \
  724. shift = mv_hc_from_port(port) * HC_SHIFT; \
  725. hardport = mv_hardport_from_port(port); \
  726. shift += hardport * 2; \
  727. }
  728. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  729. {
  730. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  731. }
  732. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  733. unsigned int port)
  734. {
  735. return mv_hc_base(base, mv_hc_from_port(port));
  736. }
  737. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  738. {
  739. return mv_hc_base_from_port(base, port) +
  740. MV_SATAHC_ARBTR_REG_SZ +
  741. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  742. }
  743. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  744. {
  745. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  746. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  747. return hc_mmio + ofs;
  748. }
  749. static inline void __iomem *mv_host_base(struct ata_host *host)
  750. {
  751. struct mv_host_priv *hpriv = host->private_data;
  752. return hpriv->base;
  753. }
  754. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  755. {
  756. return mv_port_base(mv_host_base(ap->host), ap->port_no);
  757. }
  758. static inline int mv_get_hc_count(unsigned long port_flags)
  759. {
  760. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  761. }
  762. /**
  763. * mv_save_cached_regs - (re-)initialize cached port registers
  764. * @ap: the port whose registers we are caching
  765. *
  766. * Initialize the local cache of port registers,
  767. * so that reading them over and over again can
  768. * be avoided on the hotter paths of this driver.
  769. * This saves a few microseconds each time we switch
  770. * to/from EDMA mode to perform (eg.) a drive cache flush.
  771. */
  772. static void mv_save_cached_regs(struct ata_port *ap)
  773. {
  774. void __iomem *port_mmio = mv_ap_base(ap);
  775. struct mv_port_priv *pp = ap->private_data;
  776. pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS);
  777. pp->cached.ltmode = readl(port_mmio + LTMODE_OFS);
  778. pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
  779. pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS);
  780. }
  781. /**
  782. * mv_write_cached_reg - write to a cached port register
  783. * @addr: hardware address of the register
  784. * @old: pointer to cached value of the register
  785. * @new: new value for the register
  786. *
  787. * Write a new value to a cached register,
  788. * but only if the value is different from before.
  789. */
  790. static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
  791. {
  792. if (new != *old) {
  793. *old = new;
  794. writel(new, addr);
  795. }
  796. }
  797. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  798. struct mv_host_priv *hpriv,
  799. struct mv_port_priv *pp)
  800. {
  801. u32 index;
  802. /*
  803. * initialize request queue
  804. */
  805. pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  806. index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  807. WARN_ON(pp->crqb_dma & 0x3ff);
  808. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  809. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  810. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  811. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  812. /*
  813. * initialize response queue
  814. */
  815. pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  816. index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
  817. WARN_ON(pp->crpb_dma & 0xff);
  818. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  819. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  820. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  821. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  822. }
  823. static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
  824. {
  825. /*
  826. * When writing to the main_irq_mask in hardware,
  827. * we must ensure exclusivity between the interrupt coalescing bits
  828. * and the corresponding individual port DONE_IRQ bits.
  829. *
  830. * Note that this register is really an "IRQ enable" register,
  831. * not an "IRQ mask" register as Marvell's naming might suggest.
  832. */
  833. if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
  834. mask &= ~DONE_IRQ_0_3;
  835. if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
  836. mask &= ~DONE_IRQ_4_7;
  837. writelfl(mask, hpriv->main_irq_mask_addr);
  838. }
  839. static void mv_set_main_irq_mask(struct ata_host *host,
  840. u32 disable_bits, u32 enable_bits)
  841. {
  842. struct mv_host_priv *hpriv = host->private_data;
  843. u32 old_mask, new_mask;
  844. old_mask = hpriv->main_irq_mask;
  845. new_mask = (old_mask & ~disable_bits) | enable_bits;
  846. if (new_mask != old_mask) {
  847. hpriv->main_irq_mask = new_mask;
  848. mv_write_main_irq_mask(new_mask, hpriv);
  849. }
  850. }
  851. static void mv_enable_port_irqs(struct ata_port *ap,
  852. unsigned int port_bits)
  853. {
  854. unsigned int shift, hardport, port = ap->port_no;
  855. u32 disable_bits, enable_bits;
  856. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  857. disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
  858. enable_bits = port_bits << shift;
  859. mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
  860. }
  861. static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
  862. void __iomem *port_mmio,
  863. unsigned int port_irqs)
  864. {
  865. struct mv_host_priv *hpriv = ap->host->private_data;
  866. int hardport = mv_hardport_from_port(ap->port_no);
  867. void __iomem *hc_mmio = mv_hc_base_from_port(
  868. mv_host_base(ap->host), ap->port_no);
  869. u32 hc_irq_cause;
  870. /* clear EDMA event indicators, if any */
  871. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  872. /* clear pending irq events */
  873. hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
  874. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  875. /* clear FIS IRQ Cause */
  876. if (IS_GEN_IIE(hpriv))
  877. writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  878. mv_enable_port_irqs(ap, port_irqs);
  879. }
  880. static void mv_set_irq_coalescing(struct ata_host *host,
  881. unsigned int count, unsigned int usecs)
  882. {
  883. struct mv_host_priv *hpriv = host->private_data;
  884. void __iomem *mmio = hpriv->base, *hc_mmio;
  885. u32 coal_enable = 0;
  886. unsigned long flags;
  887. unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
  888. const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  889. ALL_PORTS_COAL_DONE;
  890. /* Disable IRQ coalescing if either threshold is zero */
  891. if (!usecs || !count) {
  892. clks = count = 0;
  893. } else {
  894. /* Respect maximum limits of the hardware */
  895. clks = usecs * COAL_CLOCKS_PER_USEC;
  896. if (clks > MAX_COAL_TIME_THRESHOLD)
  897. clks = MAX_COAL_TIME_THRESHOLD;
  898. if (count > MAX_COAL_IO_COUNT)
  899. count = MAX_COAL_IO_COUNT;
  900. }
  901. spin_lock_irqsave(&host->lock, flags);
  902. mv_set_main_irq_mask(host, coal_disable, 0);
  903. if (is_dual_hc && !IS_GEN_I(hpriv)) {
  904. /*
  905. * GEN_II/GEN_IIE with dual host controllers:
  906. * one set of global thresholds for the entire chip.
  907. */
  908. writel(clks, mmio + MV_IRQ_COAL_TIME_THRESHOLD);
  909. writel(count, mmio + MV_IRQ_COAL_IO_THRESHOLD);
  910. /* clear leftover coal IRQ bit */
  911. writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE);
  912. if (count)
  913. coal_enable = ALL_PORTS_COAL_DONE;
  914. clks = count = 0; /* force clearing of regular regs below */
  915. }
  916. /*
  917. * All chips: independent thresholds for each HC on the chip.
  918. */
  919. hc_mmio = mv_hc_base_from_port(mmio, 0);
  920. writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS);
  921. writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS);
  922. writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS);
  923. if (count)
  924. coal_enable |= PORTS_0_3_COAL_DONE;
  925. if (is_dual_hc) {
  926. hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
  927. writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS);
  928. writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS);
  929. writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS);
  930. if (count)
  931. coal_enable |= PORTS_4_7_COAL_DONE;
  932. }
  933. mv_set_main_irq_mask(host, 0, coal_enable);
  934. spin_unlock_irqrestore(&host->lock, flags);
  935. }
  936. /**
  937. * mv_start_edma - Enable eDMA engine
  938. * @base: port base address
  939. * @pp: port private data
  940. *
  941. * Verify the local cache of the eDMA state is accurate with a
  942. * WARN_ON.
  943. *
  944. * LOCKING:
  945. * Inherited from caller.
  946. */
  947. static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
  948. struct mv_port_priv *pp, u8 protocol)
  949. {
  950. int want_ncq = (protocol == ATA_PROT_NCQ);
  951. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  952. int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
  953. if (want_ncq != using_ncq)
  954. mv_stop_edma(ap);
  955. }
  956. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  957. struct mv_host_priv *hpriv = ap->host->private_data;
  958. mv_edma_cfg(ap, want_ncq, 1);
  959. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  960. mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
  961. writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
  962. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  963. }
  964. }
  965. static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
  966. {
  967. void __iomem *port_mmio = mv_ap_base(ap);
  968. const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
  969. const int per_loop = 5, timeout = (15 * 1000 / per_loop);
  970. int i;
  971. /*
  972. * Wait for the EDMA engine to finish transactions in progress.
  973. * No idea what a good "timeout" value might be, but measurements
  974. * indicate that it often requires hundreds of microseconds
  975. * with two drives in-use. So we use the 15msec value above
  976. * as a rough guess at what even more drives might require.
  977. */
  978. for (i = 0; i < timeout; ++i) {
  979. u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
  980. if ((edma_stat & empty_idle) == empty_idle)
  981. break;
  982. udelay(per_loop);
  983. }
  984. /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
  985. }
  986. /**
  987. * mv_stop_edma_engine - Disable eDMA engine
  988. * @port_mmio: io base address
  989. *
  990. * LOCKING:
  991. * Inherited from caller.
  992. */
  993. static int mv_stop_edma_engine(void __iomem *port_mmio)
  994. {
  995. int i;
  996. /* Disable eDMA. The disable bit auto clears. */
  997. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  998. /* Wait for the chip to confirm eDMA is off. */
  999. for (i = 10000; i > 0; i--) {
  1000. u32 reg = readl(port_mmio + EDMA_CMD_OFS);
  1001. if (!(reg & EDMA_EN))
  1002. return 0;
  1003. udelay(10);
  1004. }
  1005. return -EIO;
  1006. }
  1007. static int mv_stop_edma(struct ata_port *ap)
  1008. {
  1009. void __iomem *port_mmio = mv_ap_base(ap);
  1010. struct mv_port_priv *pp = ap->private_data;
  1011. int err = 0;
  1012. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  1013. return 0;
  1014. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1015. mv_wait_for_edma_empty_idle(ap);
  1016. if (mv_stop_edma_engine(port_mmio)) {
  1017. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  1018. err = -EIO;
  1019. }
  1020. mv_edma_cfg(ap, 0, 0);
  1021. return err;
  1022. }
  1023. #ifdef ATA_DEBUG
  1024. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  1025. {
  1026. int b, w;
  1027. for (b = 0; b < bytes; ) {
  1028. DPRINTK("%p: ", start + b);
  1029. for (w = 0; b < bytes && w < 4; w++) {
  1030. printk("%08x ", readl(start + b));
  1031. b += sizeof(u32);
  1032. }
  1033. printk("\n");
  1034. }
  1035. }
  1036. #endif
  1037. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  1038. {
  1039. #ifdef ATA_DEBUG
  1040. int b, w;
  1041. u32 dw;
  1042. for (b = 0; b < bytes; ) {
  1043. DPRINTK("%02x: ", b);
  1044. for (w = 0; b < bytes && w < 4; w++) {
  1045. (void) pci_read_config_dword(pdev, b, &dw);
  1046. printk("%08x ", dw);
  1047. b += sizeof(u32);
  1048. }
  1049. printk("\n");
  1050. }
  1051. #endif
  1052. }
  1053. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  1054. struct pci_dev *pdev)
  1055. {
  1056. #ifdef ATA_DEBUG
  1057. void __iomem *hc_base = mv_hc_base(mmio_base,
  1058. port >> MV_PORT_HC_SHIFT);
  1059. void __iomem *port_base;
  1060. int start_port, num_ports, p, start_hc, num_hcs, hc;
  1061. if (0 > port) {
  1062. start_hc = start_port = 0;
  1063. num_ports = 8; /* shld be benign for 4 port devs */
  1064. num_hcs = 2;
  1065. } else {
  1066. start_hc = port >> MV_PORT_HC_SHIFT;
  1067. start_port = port;
  1068. num_ports = num_hcs = 1;
  1069. }
  1070. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  1071. num_ports > 1 ? num_ports - 1 : start_port);
  1072. if (NULL != pdev) {
  1073. DPRINTK("PCI config space regs:\n");
  1074. mv_dump_pci_cfg(pdev, 0x68);
  1075. }
  1076. DPRINTK("PCI regs:\n");
  1077. mv_dump_mem(mmio_base+0xc00, 0x3c);
  1078. mv_dump_mem(mmio_base+0xd00, 0x34);
  1079. mv_dump_mem(mmio_base+0xf00, 0x4);
  1080. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  1081. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  1082. hc_base = mv_hc_base(mmio_base, hc);
  1083. DPRINTK("HC regs (HC %i):\n", hc);
  1084. mv_dump_mem(hc_base, 0x1c);
  1085. }
  1086. for (p = start_port; p < start_port + num_ports; p++) {
  1087. port_base = mv_port_base(mmio_base, p);
  1088. DPRINTK("EDMA regs (port %i):\n", p);
  1089. mv_dump_mem(port_base, 0x54);
  1090. DPRINTK("SATA regs (port %i):\n", p);
  1091. mv_dump_mem(port_base+0x300, 0x60);
  1092. }
  1093. #endif
  1094. }
  1095. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  1096. {
  1097. unsigned int ofs;
  1098. switch (sc_reg_in) {
  1099. case SCR_STATUS:
  1100. case SCR_CONTROL:
  1101. case SCR_ERROR:
  1102. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  1103. break;
  1104. case SCR_ACTIVE:
  1105. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  1106. break;
  1107. default:
  1108. ofs = 0xffffffffU;
  1109. break;
  1110. }
  1111. return ofs;
  1112. }
  1113. static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
  1114. {
  1115. unsigned int ofs = mv_scr_offset(sc_reg_in);
  1116. if (ofs != 0xffffffffU) {
  1117. *val = readl(mv_ap_base(link->ap) + ofs);
  1118. return 0;
  1119. } else
  1120. return -EINVAL;
  1121. }
  1122. static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
  1123. {
  1124. unsigned int ofs = mv_scr_offset(sc_reg_in);
  1125. if (ofs != 0xffffffffU) {
  1126. void __iomem *addr = mv_ap_base(link->ap) + ofs;
  1127. if (sc_reg_in == SCR_CONTROL) {
  1128. /*
  1129. * Workaround for 88SX60x1 FEr SATA#26:
  1130. *
  1131. * COMRESETs have to take care not to accidently
  1132. * put the drive to sleep when writing SCR_CONTROL.
  1133. * Setting bits 12..15 prevents this problem.
  1134. *
  1135. * So if we see an outbound COMMRESET, set those bits.
  1136. * Ditto for the followup write that clears the reset.
  1137. *
  1138. * The proprietary driver does this for
  1139. * all chip versions, and so do we.
  1140. */
  1141. if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
  1142. val |= 0xf000;
  1143. }
  1144. writelfl(val, addr);
  1145. return 0;
  1146. } else
  1147. return -EINVAL;
  1148. }
  1149. static void mv6_dev_config(struct ata_device *adev)
  1150. {
  1151. /*
  1152. * Deal with Gen-II ("mv6") hardware quirks/restrictions:
  1153. *
  1154. * Gen-II does not support NCQ over a port multiplier
  1155. * (no FIS-based switching).
  1156. */
  1157. if (adev->flags & ATA_DFLAG_NCQ) {
  1158. if (sata_pmp_attached(adev->link->ap)) {
  1159. adev->flags &= ~ATA_DFLAG_NCQ;
  1160. ata_dev_printk(adev, KERN_INFO,
  1161. "NCQ disabled for command-based switching\n");
  1162. }
  1163. }
  1164. }
  1165. static int mv_qc_defer(struct ata_queued_cmd *qc)
  1166. {
  1167. struct ata_link *link = qc->dev->link;
  1168. struct ata_port *ap = link->ap;
  1169. struct mv_port_priv *pp = ap->private_data;
  1170. /*
  1171. * Don't allow new commands if we're in a delayed EH state
  1172. * for NCQ and/or FIS-based switching.
  1173. */
  1174. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
  1175. return ATA_DEFER_PORT;
  1176. /*
  1177. * If the port is completely idle, then allow the new qc.
  1178. */
  1179. if (ap->nr_active_links == 0)
  1180. return 0;
  1181. /*
  1182. * The port is operating in host queuing mode (EDMA) with NCQ
  1183. * enabled, allow multiple NCQ commands. EDMA also allows
  1184. * queueing multiple DMA commands but libata core currently
  1185. * doesn't allow it.
  1186. */
  1187. if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
  1188. (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
  1189. return 0;
  1190. return ATA_DEFER_PORT;
  1191. }
  1192. static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
  1193. {
  1194. struct mv_port_priv *pp = ap->private_data;
  1195. void __iomem *port_mmio;
  1196. u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
  1197. u32 ltmode, *old_ltmode = &pp->cached.ltmode;
  1198. u32 haltcond, *old_haltcond = &pp->cached.haltcond;
  1199. ltmode = *old_ltmode & ~LTMODE_BIT8;
  1200. haltcond = *old_haltcond | EDMA_ERR_DEV;
  1201. if (want_fbs) {
  1202. fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
  1203. ltmode = *old_ltmode | LTMODE_BIT8;
  1204. if (want_ncq)
  1205. haltcond &= ~EDMA_ERR_DEV;
  1206. else
  1207. fiscfg |= FISCFG_WAIT_DEV_ERR;
  1208. } else {
  1209. fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
  1210. }
  1211. port_mmio = mv_ap_base(ap);
  1212. mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg);
  1213. mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode);
  1214. mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond);
  1215. }
  1216. static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
  1217. {
  1218. struct mv_host_priv *hpriv = ap->host->private_data;
  1219. u32 old, new;
  1220. /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
  1221. old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
  1222. if (want_ncq)
  1223. new = old | (1 << 22);
  1224. else
  1225. new = old & ~(1 << 22);
  1226. if (new != old)
  1227. writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
  1228. }
  1229. /**
  1230. * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
  1231. * @ap: Port being initialized
  1232. *
  1233. * There are two DMA modes on these chips: basic DMA, and EDMA.
  1234. *
  1235. * Bit-0 of the "EDMA RESERVED" register enables/disables use
  1236. * of basic DMA on the GEN_IIE versions of the chips.
  1237. *
  1238. * This bit survives EDMA resets, and must be set for basic DMA
  1239. * to function, and should be cleared when EDMA is active.
  1240. */
  1241. static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
  1242. {
  1243. struct mv_port_priv *pp = ap->private_data;
  1244. u32 new, *old = &pp->cached.unknown_rsvd;
  1245. if (enable_bmdma)
  1246. new = *old | 1;
  1247. else
  1248. new = *old & ~1;
  1249. mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new);
  1250. }
  1251. /*
  1252. * SOC chips have an issue whereby the HDD LEDs don't always blink
  1253. * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
  1254. * of the SOC takes care of it, generating a steady blink rate when
  1255. * any drive on the chip is active.
  1256. *
  1257. * Unfortunately, the blink mode is a global hardware setting for the SOC,
  1258. * so we must use it whenever at least one port on the SOC has NCQ enabled.
  1259. *
  1260. * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
  1261. * LED operation works then, and provides better (more accurate) feedback.
  1262. *
  1263. * Note that this code assumes that an SOC never has more than one HC onboard.
  1264. */
  1265. static void mv_soc_led_blink_enable(struct ata_port *ap)
  1266. {
  1267. struct ata_host *host = ap->host;
  1268. struct mv_host_priv *hpriv = host->private_data;
  1269. void __iomem *hc_mmio;
  1270. u32 led_ctrl;
  1271. if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
  1272. return;
  1273. hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
  1274. hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
  1275. led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS);
  1276. writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS);
  1277. }
  1278. static void mv_soc_led_blink_disable(struct ata_port *ap)
  1279. {
  1280. struct ata_host *host = ap->host;
  1281. struct mv_host_priv *hpriv = host->private_data;
  1282. void __iomem *hc_mmio;
  1283. u32 led_ctrl;
  1284. unsigned int port;
  1285. if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
  1286. return;
  1287. /* disable led-blink only if no ports are using NCQ */
  1288. for (port = 0; port < hpriv->n_ports; port++) {
  1289. struct ata_port *this_ap = host->ports[port];
  1290. struct mv_port_priv *pp = this_ap->private_data;
  1291. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1292. return;
  1293. }
  1294. hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
  1295. hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
  1296. led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS);
  1297. writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS);
  1298. }
  1299. static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
  1300. {
  1301. u32 cfg;
  1302. struct mv_port_priv *pp = ap->private_data;
  1303. struct mv_host_priv *hpriv = ap->host->private_data;
  1304. void __iomem *port_mmio = mv_ap_base(ap);
  1305. /* set up non-NCQ EDMA configuration */
  1306. cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
  1307. pp->pp_flags &=
  1308. ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
  1309. if (IS_GEN_I(hpriv))
  1310. cfg |= (1 << 8); /* enab config burst size mask */
  1311. else if (IS_GEN_II(hpriv)) {
  1312. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  1313. mv_60x1_errata_sata25(ap, want_ncq);
  1314. } else if (IS_GEN_IIE(hpriv)) {
  1315. int want_fbs = sata_pmp_attached(ap);
  1316. /*
  1317. * Possible future enhancement:
  1318. *
  1319. * The chip can use FBS with non-NCQ, if we allow it,
  1320. * But first we need to have the error handling in place
  1321. * for this mode (datasheet section 7.3.15.4.2.3).
  1322. * So disallow non-NCQ FBS for now.
  1323. */
  1324. want_fbs &= want_ncq;
  1325. mv_config_fbs(ap, want_ncq, want_fbs);
  1326. if (want_fbs) {
  1327. pp->pp_flags |= MV_PP_FLAG_FBS_EN;
  1328. cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
  1329. }
  1330. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  1331. if (want_edma) {
  1332. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  1333. if (!IS_SOC(hpriv))
  1334. cfg |= (1 << 18); /* enab early completion */
  1335. }
  1336. if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
  1337. cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
  1338. mv_bmdma_enable_iie(ap, !want_edma);
  1339. if (IS_SOC(hpriv)) {
  1340. if (want_ncq)
  1341. mv_soc_led_blink_enable(ap);
  1342. else
  1343. mv_soc_led_blink_disable(ap);
  1344. }
  1345. }
  1346. if (want_ncq) {
  1347. cfg |= EDMA_CFG_NCQ;
  1348. pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
  1349. }
  1350. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  1351. }
  1352. static void mv_port_free_dma_mem(struct ata_port *ap)
  1353. {
  1354. struct mv_host_priv *hpriv = ap->host->private_data;
  1355. struct mv_port_priv *pp = ap->private_data;
  1356. int tag;
  1357. if (pp->crqb) {
  1358. dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
  1359. pp->crqb = NULL;
  1360. }
  1361. if (pp->crpb) {
  1362. dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
  1363. pp->crpb = NULL;
  1364. }
  1365. /*
  1366. * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
  1367. * For later hardware, we have one unique sg_tbl per NCQ tag.
  1368. */
  1369. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1370. if (pp->sg_tbl[tag]) {
  1371. if (tag == 0 || !IS_GEN_I(hpriv))
  1372. dma_pool_free(hpriv->sg_tbl_pool,
  1373. pp->sg_tbl[tag],
  1374. pp->sg_tbl_dma[tag]);
  1375. pp->sg_tbl[tag] = NULL;
  1376. }
  1377. }
  1378. }
  1379. /**
  1380. * mv_port_start - Port specific init/start routine.
  1381. * @ap: ATA channel to manipulate
  1382. *
  1383. * Allocate and point to DMA memory, init port private memory,
  1384. * zero indices.
  1385. *
  1386. * LOCKING:
  1387. * Inherited from caller.
  1388. */
  1389. static int mv_port_start(struct ata_port *ap)
  1390. {
  1391. struct device *dev = ap->host->dev;
  1392. struct mv_host_priv *hpriv = ap->host->private_data;
  1393. struct mv_port_priv *pp;
  1394. unsigned long flags;
  1395. int tag;
  1396. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1397. if (!pp)
  1398. return -ENOMEM;
  1399. ap->private_data = pp;
  1400. pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
  1401. if (!pp->crqb)
  1402. return -ENOMEM;
  1403. memset(pp->crqb, 0, MV_CRQB_Q_SZ);
  1404. pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
  1405. if (!pp->crpb)
  1406. goto out_port_free_dma_mem;
  1407. memset(pp->crpb, 0, MV_CRPB_Q_SZ);
  1408. /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
  1409. if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
  1410. ap->flags |= ATA_FLAG_AN;
  1411. /*
  1412. * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
  1413. * For later hardware, we need one unique sg_tbl per NCQ tag.
  1414. */
  1415. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1416. if (tag == 0 || !IS_GEN_I(hpriv)) {
  1417. pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
  1418. GFP_KERNEL, &pp->sg_tbl_dma[tag]);
  1419. if (!pp->sg_tbl[tag])
  1420. goto out_port_free_dma_mem;
  1421. } else {
  1422. pp->sg_tbl[tag] = pp->sg_tbl[0];
  1423. pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
  1424. }
  1425. }
  1426. spin_lock_irqsave(ap->lock, flags);
  1427. mv_save_cached_regs(ap);
  1428. mv_edma_cfg(ap, 0, 0);
  1429. spin_unlock_irqrestore(ap->lock, flags);
  1430. return 0;
  1431. out_port_free_dma_mem:
  1432. mv_port_free_dma_mem(ap);
  1433. return -ENOMEM;
  1434. }
  1435. /**
  1436. * mv_port_stop - Port specific cleanup/stop routine.
  1437. * @ap: ATA channel to manipulate
  1438. *
  1439. * Stop DMA, cleanup port memory.
  1440. *
  1441. * LOCKING:
  1442. * This routine uses the host lock to protect the DMA stop.
  1443. */
  1444. static void mv_port_stop(struct ata_port *ap)
  1445. {
  1446. unsigned long flags;
  1447. spin_lock_irqsave(ap->lock, flags);
  1448. mv_stop_edma(ap);
  1449. mv_enable_port_irqs(ap, 0);
  1450. spin_unlock_irqrestore(ap->lock, flags);
  1451. mv_port_free_dma_mem(ap);
  1452. }
  1453. /**
  1454. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  1455. * @qc: queued command whose SG list to source from
  1456. *
  1457. * Populate the SG list and mark the last entry.
  1458. *
  1459. * LOCKING:
  1460. * Inherited from caller.
  1461. */
  1462. static void mv_fill_sg(struct ata_queued_cmd *qc)
  1463. {
  1464. struct mv_port_priv *pp = qc->ap->private_data;
  1465. struct scatterlist *sg;
  1466. struct mv_sg *mv_sg, *last_sg = NULL;
  1467. unsigned int si;
  1468. mv_sg = pp->sg_tbl[qc->tag];
  1469. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1470. dma_addr_t addr = sg_dma_address(sg);
  1471. u32 sg_len = sg_dma_len(sg);
  1472. while (sg_len) {
  1473. u32 offset = addr & 0xffff;
  1474. u32 len = sg_len;
  1475. if (offset + len > 0x10000)
  1476. len = 0x10000 - offset;
  1477. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  1478. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1479. mv_sg->flags_size = cpu_to_le32(len & 0xffff);
  1480. mv_sg->reserved = 0;
  1481. sg_len -= len;
  1482. addr += len;
  1483. last_sg = mv_sg;
  1484. mv_sg++;
  1485. }
  1486. }
  1487. if (likely(last_sg))
  1488. last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  1489. mb(); /* ensure data structure is visible to the chipset */
  1490. }
  1491. static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  1492. {
  1493. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  1494. (last ? CRQB_CMD_LAST : 0);
  1495. *cmdw = cpu_to_le16(tmp);
  1496. }
  1497. /**
  1498. * mv_sff_irq_clear - Clear hardware interrupt after DMA.
  1499. * @ap: Port associated with this ATA transaction.
  1500. *
  1501. * We need this only for ATAPI bmdma transactions,
  1502. * as otherwise we experience spurious interrupts
  1503. * after libata-sff handles the bmdma interrupts.
  1504. */
  1505. static void mv_sff_irq_clear(struct ata_port *ap)
  1506. {
  1507. mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
  1508. }
  1509. /**
  1510. * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
  1511. * @qc: queued command to check for chipset/DMA compatibility.
  1512. *
  1513. * The bmdma engines cannot handle speculative data sizes
  1514. * (bytecount under/over flow). So only allow DMA for
  1515. * data transfer commands with known data sizes.
  1516. *
  1517. * LOCKING:
  1518. * Inherited from caller.
  1519. */
  1520. static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
  1521. {
  1522. struct scsi_cmnd *scmd = qc->scsicmd;
  1523. if (scmd) {
  1524. switch (scmd->cmnd[0]) {
  1525. case READ_6:
  1526. case READ_10:
  1527. case READ_12:
  1528. case WRITE_6:
  1529. case WRITE_10:
  1530. case WRITE_12:
  1531. case GPCMD_READ_CD:
  1532. case GPCMD_SEND_DVD_STRUCTURE:
  1533. case GPCMD_SEND_CUE_SHEET:
  1534. return 0; /* DMA is safe */
  1535. }
  1536. }
  1537. return -EOPNOTSUPP; /* use PIO instead */
  1538. }
  1539. /**
  1540. * mv_bmdma_setup - Set up BMDMA transaction
  1541. * @qc: queued command to prepare DMA for.
  1542. *
  1543. * LOCKING:
  1544. * Inherited from caller.
  1545. */
  1546. static void mv_bmdma_setup(struct ata_queued_cmd *qc)
  1547. {
  1548. struct ata_port *ap = qc->ap;
  1549. void __iomem *port_mmio = mv_ap_base(ap);
  1550. struct mv_port_priv *pp = ap->private_data;
  1551. mv_fill_sg(qc);
  1552. /* clear all DMA cmd bits */
  1553. writel(0, port_mmio + BMDMA_CMD_OFS);
  1554. /* load PRD table addr. */
  1555. writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
  1556. port_mmio + BMDMA_PRD_HIGH_OFS);
  1557. writelfl(pp->sg_tbl_dma[qc->tag],
  1558. port_mmio + BMDMA_PRD_LOW_OFS);
  1559. /* issue r/w command */
  1560. ap->ops->sff_exec_command(ap, &qc->tf);
  1561. }
  1562. /**
  1563. * mv_bmdma_start - Start a BMDMA transaction
  1564. * @qc: queued command to start DMA on.
  1565. *
  1566. * LOCKING:
  1567. * Inherited from caller.
  1568. */
  1569. static void mv_bmdma_start(struct ata_queued_cmd *qc)
  1570. {
  1571. struct ata_port *ap = qc->ap;
  1572. void __iomem *port_mmio = mv_ap_base(ap);
  1573. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  1574. u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
  1575. /* start host DMA transaction */
  1576. writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
  1577. }
  1578. /**
  1579. * mv_bmdma_stop - Stop BMDMA transfer
  1580. * @qc: queued command to stop DMA on.
  1581. *
  1582. * Clears the ATA_DMA_START flag in the bmdma control register
  1583. *
  1584. * LOCKING:
  1585. * Inherited from caller.
  1586. */
  1587. static void mv_bmdma_stop(struct ata_queued_cmd *qc)
  1588. {
  1589. struct ata_port *ap = qc->ap;
  1590. void __iomem *port_mmio = mv_ap_base(ap);
  1591. u32 cmd;
  1592. /* clear start/stop bit */
  1593. cmd = readl(port_mmio + BMDMA_CMD_OFS);
  1594. cmd &= ~ATA_DMA_START;
  1595. writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
  1596. /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
  1597. ata_sff_dma_pause(ap);
  1598. }
  1599. /**
  1600. * mv_bmdma_status - Read BMDMA status
  1601. * @ap: port for which to retrieve DMA status.
  1602. *
  1603. * Read and return equivalent of the sff BMDMA status register.
  1604. *
  1605. * LOCKING:
  1606. * Inherited from caller.
  1607. */
  1608. static u8 mv_bmdma_status(struct ata_port *ap)
  1609. {
  1610. void __iomem *port_mmio = mv_ap_base(ap);
  1611. u32 reg, status;
  1612. /*
  1613. * Other bits are valid only if ATA_DMA_ACTIVE==0,
  1614. * and the ATA_DMA_INTR bit doesn't exist.
  1615. */
  1616. reg = readl(port_mmio + BMDMA_STATUS_OFS);
  1617. if (reg & ATA_DMA_ACTIVE)
  1618. status = ATA_DMA_ACTIVE;
  1619. else
  1620. status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
  1621. return status;
  1622. }
  1623. /**
  1624. * mv_qc_prep - Host specific command preparation.
  1625. * @qc: queued command to prepare
  1626. *
  1627. * This routine simply redirects to the general purpose routine
  1628. * if command is not DMA. Else, it handles prep of the CRQB
  1629. * (command request block), does some sanity checking, and calls
  1630. * the SG load routine.
  1631. *
  1632. * LOCKING:
  1633. * Inherited from caller.
  1634. */
  1635. static void mv_qc_prep(struct ata_queued_cmd *qc)
  1636. {
  1637. struct ata_port *ap = qc->ap;
  1638. struct mv_port_priv *pp = ap->private_data;
  1639. __le16 *cw;
  1640. struct ata_taskfile *tf;
  1641. u16 flags = 0;
  1642. unsigned in_index;
  1643. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1644. (qc->tf.protocol != ATA_PROT_NCQ))
  1645. return;
  1646. /* Fill in command request block
  1647. */
  1648. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1649. flags |= CRQB_FLAG_READ;
  1650. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1651. flags |= qc->tag << CRQB_TAG_SHIFT;
  1652. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1653. /* get current queue index from software */
  1654. in_index = pp->req_idx;
  1655. pp->crqb[in_index].sg_addr =
  1656. cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1657. pp->crqb[in_index].sg_addr_hi =
  1658. cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1659. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  1660. cw = &pp->crqb[in_index].ata_cmd[0];
  1661. tf = &qc->tf;
  1662. /* Sadly, the CRQB cannot accomodate all registers--there are
  1663. * only 11 bytes...so we must pick and choose required
  1664. * registers based on the command. So, we drop feature and
  1665. * hob_feature for [RW] DMA commands, but they are needed for
  1666. * NCQ. NCQ will drop hob_nsect, which is not needed there
  1667. * (nsect is used only for the tag; feat/hob_feat hold true nsect).
  1668. */
  1669. switch (tf->command) {
  1670. case ATA_CMD_READ:
  1671. case ATA_CMD_READ_EXT:
  1672. case ATA_CMD_WRITE:
  1673. case ATA_CMD_WRITE_EXT:
  1674. case ATA_CMD_WRITE_FUA_EXT:
  1675. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  1676. break;
  1677. case ATA_CMD_FPDMA_READ:
  1678. case ATA_CMD_FPDMA_WRITE:
  1679. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  1680. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  1681. break;
  1682. default:
  1683. /* The only other commands EDMA supports in non-queued and
  1684. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  1685. * of which are defined/used by Linux. If we get here, this
  1686. * driver needs work.
  1687. *
  1688. * FIXME: modify libata to give qc_prep a return value and
  1689. * return error here.
  1690. */
  1691. BUG_ON(tf->command);
  1692. break;
  1693. }
  1694. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  1695. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  1696. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  1697. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  1698. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  1699. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  1700. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  1701. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  1702. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  1703. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1704. return;
  1705. mv_fill_sg(qc);
  1706. }
  1707. /**
  1708. * mv_qc_prep_iie - Host specific command preparation.
  1709. * @qc: queued command to prepare
  1710. *
  1711. * This routine simply redirects to the general purpose routine
  1712. * if command is not DMA. Else, it handles prep of the CRQB
  1713. * (command request block), does some sanity checking, and calls
  1714. * the SG load routine.
  1715. *
  1716. * LOCKING:
  1717. * Inherited from caller.
  1718. */
  1719. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1720. {
  1721. struct ata_port *ap = qc->ap;
  1722. struct mv_port_priv *pp = ap->private_data;
  1723. struct mv_crqb_iie *crqb;
  1724. struct ata_taskfile *tf;
  1725. unsigned in_index;
  1726. u32 flags = 0;
  1727. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1728. (qc->tf.protocol != ATA_PROT_NCQ))
  1729. return;
  1730. /* Fill in Gen IIE command request block */
  1731. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1732. flags |= CRQB_FLAG_READ;
  1733. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1734. flags |= qc->tag << CRQB_TAG_SHIFT;
  1735. flags |= qc->tag << CRQB_HOSTQ_SHIFT;
  1736. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1737. /* get current queue index from software */
  1738. in_index = pp->req_idx;
  1739. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1740. crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1741. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1742. crqb->flags = cpu_to_le32(flags);
  1743. tf = &qc->tf;
  1744. crqb->ata_cmd[0] = cpu_to_le32(
  1745. (tf->command << 16) |
  1746. (tf->feature << 24)
  1747. );
  1748. crqb->ata_cmd[1] = cpu_to_le32(
  1749. (tf->lbal << 0) |
  1750. (tf->lbam << 8) |
  1751. (tf->lbah << 16) |
  1752. (tf->device << 24)
  1753. );
  1754. crqb->ata_cmd[2] = cpu_to_le32(
  1755. (tf->hob_lbal << 0) |
  1756. (tf->hob_lbam << 8) |
  1757. (tf->hob_lbah << 16) |
  1758. (tf->hob_feature << 24)
  1759. );
  1760. crqb->ata_cmd[3] = cpu_to_le32(
  1761. (tf->nsect << 0) |
  1762. (tf->hob_nsect << 8)
  1763. );
  1764. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1765. return;
  1766. mv_fill_sg(qc);
  1767. }
  1768. /**
  1769. * mv_sff_check_status - fetch device status, if valid
  1770. * @ap: ATA port to fetch status from
  1771. *
  1772. * When using command issue via mv_qc_issue_fis(),
  1773. * the initial ATA_BUSY state does not show up in the
  1774. * ATA status (shadow) register. This can confuse libata!
  1775. *
  1776. * So we have a hook here to fake ATA_BUSY for that situation,
  1777. * until the first time a BUSY, DRQ, or ERR bit is seen.
  1778. *
  1779. * The rest of the time, it simply returns the ATA status register.
  1780. */
  1781. static u8 mv_sff_check_status(struct ata_port *ap)
  1782. {
  1783. u8 stat = ioread8(ap->ioaddr.status_addr);
  1784. struct mv_port_priv *pp = ap->private_data;
  1785. if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
  1786. if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
  1787. pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
  1788. else
  1789. stat = ATA_BUSY;
  1790. }
  1791. return stat;
  1792. }
  1793. /**
  1794. * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
  1795. * @fis: fis to be sent
  1796. * @nwords: number of 32-bit words in the fis
  1797. */
  1798. static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
  1799. {
  1800. void __iomem *port_mmio = mv_ap_base(ap);
  1801. u32 ifctl, old_ifctl, ifstat;
  1802. int i, timeout = 200, final_word = nwords - 1;
  1803. /* Initiate FIS transmission mode */
  1804. old_ifctl = readl(port_mmio + SATA_IFCTL_OFS);
  1805. ifctl = 0x100 | (old_ifctl & 0xf);
  1806. writelfl(ifctl, port_mmio + SATA_IFCTL_OFS);
  1807. /* Send all words of the FIS except for the final word */
  1808. for (i = 0; i < final_word; ++i)
  1809. writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS);
  1810. /* Flag end-of-transmission, and then send the final word */
  1811. writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS);
  1812. writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS);
  1813. /*
  1814. * Wait for FIS transmission to complete.
  1815. * This typically takes just a single iteration.
  1816. */
  1817. do {
  1818. ifstat = readl(port_mmio + SATA_IFSTAT_OFS);
  1819. } while (!(ifstat & 0x1000) && --timeout);
  1820. /* Restore original port configuration */
  1821. writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS);
  1822. /* See if it worked */
  1823. if ((ifstat & 0x3000) != 0x1000) {
  1824. ata_port_printk(ap, KERN_WARNING,
  1825. "%s transmission error, ifstat=%08x\n",
  1826. __func__, ifstat);
  1827. return AC_ERR_OTHER;
  1828. }
  1829. return 0;
  1830. }
  1831. /**
  1832. * mv_qc_issue_fis - Issue a command directly as a FIS
  1833. * @qc: queued command to start
  1834. *
  1835. * Note that the ATA shadow registers are not updated
  1836. * after command issue, so the device will appear "READY"
  1837. * if polled, even while it is BUSY processing the command.
  1838. *
  1839. * So we use a status hook to fake ATA_BUSY until the drive changes state.
  1840. *
  1841. * Note: we don't get updated shadow regs on *completion*
  1842. * of non-data commands. So avoid sending them via this function,
  1843. * as they will appear to have completed immediately.
  1844. *
  1845. * GEN_IIE has special registers that we could get the result tf from,
  1846. * but earlier chipsets do not. For now, we ignore those registers.
  1847. */
  1848. static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
  1849. {
  1850. struct ata_port *ap = qc->ap;
  1851. struct mv_port_priv *pp = ap->private_data;
  1852. struct ata_link *link = qc->dev->link;
  1853. u32 fis[5];
  1854. int err = 0;
  1855. ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
  1856. err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0]));
  1857. if (err)
  1858. return err;
  1859. switch (qc->tf.protocol) {
  1860. case ATAPI_PROT_PIO:
  1861. pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
  1862. /* fall through */
  1863. case ATAPI_PROT_NODATA:
  1864. ap->hsm_task_state = HSM_ST_FIRST;
  1865. break;
  1866. case ATA_PROT_PIO:
  1867. pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
  1868. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1869. ap->hsm_task_state = HSM_ST_FIRST;
  1870. else
  1871. ap->hsm_task_state = HSM_ST;
  1872. break;
  1873. default:
  1874. ap->hsm_task_state = HSM_ST_LAST;
  1875. break;
  1876. }
  1877. if (qc->tf.flags & ATA_TFLAG_POLLING)
  1878. ata_pio_queue_task(ap, qc, 0);
  1879. return 0;
  1880. }
  1881. /**
  1882. * mv_qc_issue - Initiate a command to the host
  1883. * @qc: queued command to start
  1884. *
  1885. * This routine simply redirects to the general purpose routine
  1886. * if command is not DMA. Else, it sanity checks our local
  1887. * caches of the request producer/consumer indices then enables
  1888. * DMA and bumps the request producer index.
  1889. *
  1890. * LOCKING:
  1891. * Inherited from caller.
  1892. */
  1893. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1894. {
  1895. static int limit_warnings = 10;
  1896. struct ata_port *ap = qc->ap;
  1897. void __iomem *port_mmio = mv_ap_base(ap);
  1898. struct mv_port_priv *pp = ap->private_data;
  1899. u32 in_index;
  1900. unsigned int port_irqs;
  1901. pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
  1902. switch (qc->tf.protocol) {
  1903. case ATA_PROT_DMA:
  1904. case ATA_PROT_NCQ:
  1905. mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
  1906. pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1907. in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  1908. /* Write the request in pointer to kick the EDMA to life */
  1909. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
  1910. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1911. return 0;
  1912. case ATA_PROT_PIO:
  1913. /*
  1914. * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
  1915. *
  1916. * Someday, we might implement special polling workarounds
  1917. * for these, but it all seems rather unnecessary since we
  1918. * normally use only DMA for commands which transfer more
  1919. * than a single block of data.
  1920. *
  1921. * Much of the time, this could just work regardless.
  1922. * So for now, just log the incident, and allow the attempt.
  1923. */
  1924. if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
  1925. --limit_warnings;
  1926. ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
  1927. ": attempting PIO w/multiple DRQ: "
  1928. "this may fail due to h/w errata\n");
  1929. }
  1930. /* drop through */
  1931. case ATA_PROT_NODATA:
  1932. case ATAPI_PROT_PIO:
  1933. case ATAPI_PROT_NODATA:
  1934. if (ap->flags & ATA_FLAG_PIO_POLLING)
  1935. qc->tf.flags |= ATA_TFLAG_POLLING;
  1936. break;
  1937. }
  1938. if (qc->tf.flags & ATA_TFLAG_POLLING)
  1939. port_irqs = ERR_IRQ; /* mask device interrupt when polling */
  1940. else
  1941. port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
  1942. /*
  1943. * We're about to send a non-EDMA capable command to the
  1944. * port. Turn off EDMA so there won't be problems accessing
  1945. * shadow block, etc registers.
  1946. */
  1947. mv_stop_edma(ap);
  1948. mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
  1949. mv_pmp_select(ap, qc->dev->link->pmp);
  1950. if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
  1951. struct mv_host_priv *hpriv = ap->host->private_data;
  1952. /*
  1953. * Workaround for 88SX60x1 FEr SATA#25 (part 2).
  1954. *
  1955. * After any NCQ error, the READ_LOG_EXT command
  1956. * from libata-eh *must* use mv_qc_issue_fis().
  1957. * Otherwise it might fail, due to chip errata.
  1958. *
  1959. * Rather than special-case it, we'll just *always*
  1960. * use this method here for READ_LOG_EXT, making for
  1961. * easier testing.
  1962. */
  1963. if (IS_GEN_II(hpriv))
  1964. return mv_qc_issue_fis(qc);
  1965. }
  1966. return ata_sff_qc_issue(qc);
  1967. }
  1968. static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
  1969. {
  1970. struct mv_port_priv *pp = ap->private_data;
  1971. struct ata_queued_cmd *qc;
  1972. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1973. return NULL;
  1974. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1975. if (qc) {
  1976. if (qc->tf.flags & ATA_TFLAG_POLLING)
  1977. qc = NULL;
  1978. else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
  1979. qc = NULL;
  1980. }
  1981. return qc;
  1982. }
  1983. static void mv_pmp_error_handler(struct ata_port *ap)
  1984. {
  1985. unsigned int pmp, pmp_map;
  1986. struct mv_port_priv *pp = ap->private_data;
  1987. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
  1988. /*
  1989. * Perform NCQ error analysis on failed PMPs
  1990. * before we freeze the port entirely.
  1991. *
  1992. * The failed PMPs are marked earlier by mv_pmp_eh_prep().
  1993. */
  1994. pmp_map = pp->delayed_eh_pmp_map;
  1995. pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
  1996. for (pmp = 0; pmp_map != 0; pmp++) {
  1997. unsigned int this_pmp = (1 << pmp);
  1998. if (pmp_map & this_pmp) {
  1999. struct ata_link *link = &ap->pmp_link[pmp];
  2000. pmp_map &= ~this_pmp;
  2001. ata_eh_analyze_ncq_error(link);
  2002. }
  2003. }
  2004. ata_port_freeze(ap);
  2005. }
  2006. sata_pmp_error_handler(ap);
  2007. }
  2008. static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
  2009. {
  2010. void __iomem *port_mmio = mv_ap_base(ap);
  2011. return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
  2012. }
  2013. static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
  2014. {
  2015. struct ata_eh_info *ehi;
  2016. unsigned int pmp;
  2017. /*
  2018. * Initialize EH info for PMPs which saw device errors
  2019. */
  2020. ehi = &ap->link.eh_info;
  2021. for (pmp = 0; pmp_map != 0; pmp++) {
  2022. unsigned int this_pmp = (1 << pmp);
  2023. if (pmp_map & this_pmp) {
  2024. struct ata_link *link = &ap->pmp_link[pmp];
  2025. pmp_map &= ~this_pmp;
  2026. ehi = &link->eh_info;
  2027. ata_ehi_clear_desc(ehi);
  2028. ata_ehi_push_desc(ehi, "dev err");
  2029. ehi->err_mask |= AC_ERR_DEV;
  2030. ehi->action |= ATA_EH_RESET;
  2031. ata_link_abort(link);
  2032. }
  2033. }
  2034. }
  2035. static int mv_req_q_empty(struct ata_port *ap)
  2036. {
  2037. void __iomem *port_mmio = mv_ap_base(ap);
  2038. u32 in_ptr, out_ptr;
  2039. in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
  2040. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  2041. out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
  2042. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  2043. return (in_ptr == out_ptr); /* 1 == queue_is_empty */
  2044. }
  2045. static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
  2046. {
  2047. struct mv_port_priv *pp = ap->private_data;
  2048. int failed_links;
  2049. unsigned int old_map, new_map;
  2050. /*
  2051. * Device error during FBS+NCQ operation:
  2052. *
  2053. * Set a port flag to prevent further I/O being enqueued.
  2054. * Leave the EDMA running to drain outstanding commands from this port.
  2055. * Perform the post-mortem/EH only when all responses are complete.
  2056. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
  2057. */
  2058. if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
  2059. pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
  2060. pp->delayed_eh_pmp_map = 0;
  2061. }
  2062. old_map = pp->delayed_eh_pmp_map;
  2063. new_map = old_map | mv_get_err_pmp_map(ap);
  2064. if (old_map != new_map) {
  2065. pp->delayed_eh_pmp_map = new_map;
  2066. mv_pmp_eh_prep(ap, new_map & ~old_map);
  2067. }
  2068. failed_links = hweight16(new_map);
  2069. ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
  2070. "failed_links=%d nr_active_links=%d\n",
  2071. __func__, pp->delayed_eh_pmp_map,
  2072. ap->qc_active, failed_links,
  2073. ap->nr_active_links);
  2074. if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
  2075. mv_process_crpb_entries(ap, pp);
  2076. mv_stop_edma(ap);
  2077. mv_eh_freeze(ap);
  2078. ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
  2079. return 1; /* handled */
  2080. }
  2081. ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
  2082. return 1; /* handled */
  2083. }
  2084. static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
  2085. {
  2086. /*
  2087. * Possible future enhancement:
  2088. *
  2089. * FBS+non-NCQ operation is not yet implemented.
  2090. * See related notes in mv_edma_cfg().
  2091. *
  2092. * Device error during FBS+non-NCQ operation:
  2093. *
  2094. * We need to snapshot the shadow registers for each failed command.
  2095. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
  2096. */
  2097. return 0; /* not handled */
  2098. }
  2099. static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
  2100. {
  2101. struct mv_port_priv *pp = ap->private_data;
  2102. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  2103. return 0; /* EDMA was not active: not handled */
  2104. if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
  2105. return 0; /* FBS was not active: not handled */
  2106. if (!(edma_err_cause & EDMA_ERR_DEV))
  2107. return 0; /* non DEV error: not handled */
  2108. edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
  2109. if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
  2110. return 0; /* other problems: not handled */
  2111. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
  2112. /*
  2113. * EDMA should NOT have self-disabled for this case.
  2114. * If it did, then something is wrong elsewhere,
  2115. * and we cannot handle it here.
  2116. */
  2117. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  2118. ata_port_printk(ap, KERN_WARNING,
  2119. "%s: err_cause=0x%x pp_flags=0x%x\n",
  2120. __func__, edma_err_cause, pp->pp_flags);
  2121. return 0; /* not handled */
  2122. }
  2123. return mv_handle_fbs_ncq_dev_err(ap);
  2124. } else {
  2125. /*
  2126. * EDMA should have self-disabled for this case.
  2127. * If it did not, then something is wrong elsewhere,
  2128. * and we cannot handle it here.
  2129. */
  2130. if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
  2131. ata_port_printk(ap, KERN_WARNING,
  2132. "%s: err_cause=0x%x pp_flags=0x%x\n",
  2133. __func__, edma_err_cause, pp->pp_flags);
  2134. return 0; /* not handled */
  2135. }
  2136. return mv_handle_fbs_non_ncq_dev_err(ap);
  2137. }
  2138. return 0; /* not handled */
  2139. }
  2140. static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
  2141. {
  2142. struct ata_eh_info *ehi = &ap->link.eh_info;
  2143. char *when = "idle";
  2144. ata_ehi_clear_desc(ehi);
  2145. if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
  2146. when = "disabled";
  2147. } else if (edma_was_enabled) {
  2148. when = "EDMA enabled";
  2149. } else {
  2150. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
  2151. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  2152. when = "polling";
  2153. }
  2154. ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
  2155. ehi->err_mask |= AC_ERR_OTHER;
  2156. ehi->action |= ATA_EH_RESET;
  2157. ata_port_freeze(ap);
  2158. }
  2159. /**
  2160. * mv_err_intr - Handle error interrupts on the port
  2161. * @ap: ATA channel to manipulate
  2162. *
  2163. * Most cases require a full reset of the chip's state machine,
  2164. * which also performs a COMRESET.
  2165. * Also, if the port disabled DMA, update our cached copy to match.
  2166. *
  2167. * LOCKING:
  2168. * Inherited from caller.
  2169. */
  2170. static void mv_err_intr(struct ata_port *ap)
  2171. {
  2172. void __iomem *port_mmio = mv_ap_base(ap);
  2173. u32 edma_err_cause, eh_freeze_mask, serr = 0;
  2174. u32 fis_cause = 0;
  2175. struct mv_port_priv *pp = ap->private_data;
  2176. struct mv_host_priv *hpriv = ap->host->private_data;
  2177. unsigned int action = 0, err_mask = 0;
  2178. struct ata_eh_info *ehi = &ap->link.eh_info;
  2179. struct ata_queued_cmd *qc;
  2180. int abort = 0;
  2181. /*
  2182. * Read and clear the SError and err_cause bits.
  2183. * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
  2184. * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
  2185. */
  2186. sata_scr_read(&ap->link, SCR_ERROR, &serr);
  2187. sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
  2188. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2189. if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
  2190. fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  2191. writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  2192. }
  2193. writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2194. if (edma_err_cause & EDMA_ERR_DEV) {
  2195. /*
  2196. * Device errors during FIS-based switching operation
  2197. * require special handling.
  2198. */
  2199. if (mv_handle_dev_err(ap, edma_err_cause))
  2200. return;
  2201. }
  2202. qc = mv_get_active_qc(ap);
  2203. ata_ehi_clear_desc(ehi);
  2204. ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
  2205. edma_err_cause, pp->pp_flags);
  2206. if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
  2207. ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
  2208. if (fis_cause & SATA_FIS_IRQ_AN) {
  2209. u32 ec = edma_err_cause &
  2210. ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
  2211. sata_async_notification(ap);
  2212. if (!ec)
  2213. return; /* Just an AN; no need for the nukes */
  2214. ata_ehi_push_desc(ehi, "SDB notify");
  2215. }
  2216. }
  2217. /*
  2218. * All generations share these EDMA error cause bits:
  2219. */
  2220. if (edma_err_cause & EDMA_ERR_DEV) {
  2221. err_mask |= AC_ERR_DEV;
  2222. action |= ATA_EH_RESET;
  2223. ata_ehi_push_desc(ehi, "dev error");
  2224. }
  2225. if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  2226. EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
  2227. EDMA_ERR_INTRL_PAR)) {
  2228. err_mask |= AC_ERR_ATA_BUS;
  2229. action |= ATA_EH_RESET;
  2230. ata_ehi_push_desc(ehi, "parity error");
  2231. }
  2232. if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
  2233. ata_ehi_hotplugged(ehi);
  2234. ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
  2235. "dev disconnect" : "dev connect");
  2236. action |= ATA_EH_RESET;
  2237. }
  2238. /*
  2239. * Gen-I has a different SELF_DIS bit,
  2240. * different FREEZE bits, and no SERR bit:
  2241. */
  2242. if (IS_GEN_I(hpriv)) {
  2243. eh_freeze_mask = EDMA_EH_FREEZE_5;
  2244. if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
  2245. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2246. ata_ehi_push_desc(ehi, "EDMA self-disable");
  2247. }
  2248. } else {
  2249. eh_freeze_mask = EDMA_EH_FREEZE;
  2250. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  2251. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2252. ata_ehi_push_desc(ehi, "EDMA self-disable");
  2253. }
  2254. if (edma_err_cause & EDMA_ERR_SERR) {
  2255. ata_ehi_push_desc(ehi, "SError=%08x", serr);
  2256. err_mask |= AC_ERR_ATA_BUS;
  2257. action |= ATA_EH_RESET;
  2258. }
  2259. }
  2260. if (!err_mask) {
  2261. err_mask = AC_ERR_OTHER;
  2262. action |= ATA_EH_RESET;
  2263. }
  2264. ehi->serror |= serr;
  2265. ehi->action |= action;
  2266. if (qc)
  2267. qc->err_mask |= err_mask;
  2268. else
  2269. ehi->err_mask |= err_mask;
  2270. if (err_mask == AC_ERR_DEV) {
  2271. /*
  2272. * Cannot do ata_port_freeze() here,
  2273. * because it would kill PIO access,
  2274. * which is needed for further diagnosis.
  2275. */
  2276. mv_eh_freeze(ap);
  2277. abort = 1;
  2278. } else if (edma_err_cause & eh_freeze_mask) {
  2279. /*
  2280. * Note to self: ata_port_freeze() calls ata_port_abort()
  2281. */
  2282. ata_port_freeze(ap);
  2283. } else {
  2284. abort = 1;
  2285. }
  2286. if (abort) {
  2287. if (qc)
  2288. ata_link_abort(qc->dev->link);
  2289. else
  2290. ata_port_abort(ap);
  2291. }
  2292. }
  2293. static void mv_process_crpb_response(struct ata_port *ap,
  2294. struct mv_crpb *response, unsigned int tag, int ncq_enabled)
  2295. {
  2296. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
  2297. if (qc) {
  2298. u8 ata_status;
  2299. u16 edma_status = le16_to_cpu(response->flags);
  2300. /*
  2301. * edma_status from a response queue entry:
  2302. * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
  2303. * MSB is saved ATA status from command completion.
  2304. */
  2305. if (!ncq_enabled) {
  2306. u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
  2307. if (err_cause) {
  2308. /*
  2309. * Error will be seen/handled by mv_err_intr().
  2310. * So do nothing at all here.
  2311. */
  2312. return;
  2313. }
  2314. }
  2315. ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
  2316. if (!ac_err_mask(ata_status))
  2317. ata_qc_complete(qc);
  2318. /* else: leave it for mv_err_intr() */
  2319. } else {
  2320. ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
  2321. __func__, tag);
  2322. }
  2323. }
  2324. static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
  2325. {
  2326. void __iomem *port_mmio = mv_ap_base(ap);
  2327. struct mv_host_priv *hpriv = ap->host->private_data;
  2328. u32 in_index;
  2329. bool work_done = false;
  2330. int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
  2331. /* Get the hardware queue position index */
  2332. in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  2333. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  2334. /* Process new responses from since the last time we looked */
  2335. while (in_index != pp->resp_idx) {
  2336. unsigned int tag;
  2337. struct mv_crpb *response = &pp->crpb[pp->resp_idx];
  2338. pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  2339. if (IS_GEN_I(hpriv)) {
  2340. /* 50xx: no NCQ, only one command active at a time */
  2341. tag = ap->link.active_tag;
  2342. } else {
  2343. /* Gen II/IIE: get command tag from CRPB entry */
  2344. tag = le16_to_cpu(response->id) & 0x1f;
  2345. }
  2346. mv_process_crpb_response(ap, response, tag, ncq_enabled);
  2347. work_done = true;
  2348. }
  2349. /* Update the software queue position index in hardware */
  2350. if (work_done)
  2351. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
  2352. (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
  2353. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  2354. }
  2355. static void mv_port_intr(struct ata_port *ap, u32 port_cause)
  2356. {
  2357. struct mv_port_priv *pp;
  2358. int edma_was_enabled;
  2359. if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
  2360. mv_unexpected_intr(ap, 0);
  2361. return;
  2362. }
  2363. /*
  2364. * Grab a snapshot of the EDMA_EN flag setting,
  2365. * so that we have a consistent view for this port,
  2366. * even if something we call of our routines changes it.
  2367. */
  2368. pp = ap->private_data;
  2369. edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
  2370. /*
  2371. * Process completed CRPB response(s) before other events.
  2372. */
  2373. if (edma_was_enabled && (port_cause & DONE_IRQ)) {
  2374. mv_process_crpb_entries(ap, pp);
  2375. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
  2376. mv_handle_fbs_ncq_dev_err(ap);
  2377. }
  2378. /*
  2379. * Handle chip-reported errors, or continue on to handle PIO.
  2380. */
  2381. if (unlikely(port_cause & ERR_IRQ)) {
  2382. mv_err_intr(ap);
  2383. } else if (!edma_was_enabled) {
  2384. struct ata_queued_cmd *qc = mv_get_active_qc(ap);
  2385. if (qc)
  2386. ata_sff_host_intr(ap, qc);
  2387. else
  2388. mv_unexpected_intr(ap, edma_was_enabled);
  2389. }
  2390. }
  2391. /**
  2392. * mv_host_intr - Handle all interrupts on the given host controller
  2393. * @host: host specific structure
  2394. * @main_irq_cause: Main interrupt cause register for the chip.
  2395. *
  2396. * LOCKING:
  2397. * Inherited from caller.
  2398. */
  2399. static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
  2400. {
  2401. struct mv_host_priv *hpriv = host->private_data;
  2402. void __iomem *mmio = hpriv->base, *hc_mmio;
  2403. unsigned int handled = 0, port;
  2404. /* If asserted, clear the "all ports" IRQ coalescing bit */
  2405. if (main_irq_cause & ALL_PORTS_COAL_DONE)
  2406. writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE);
  2407. for (port = 0; port < hpriv->n_ports; port++) {
  2408. struct ata_port *ap = host->ports[port];
  2409. unsigned int p, shift, hardport, port_cause;
  2410. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  2411. /*
  2412. * Each hc within the host has its own hc_irq_cause register,
  2413. * where the interrupting ports bits get ack'd.
  2414. */
  2415. if (hardport == 0) { /* first port on this hc ? */
  2416. u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
  2417. u32 port_mask, ack_irqs;
  2418. /*
  2419. * Skip this entire hc if nothing pending for any ports
  2420. */
  2421. if (!hc_cause) {
  2422. port += MV_PORTS_PER_HC - 1;
  2423. continue;
  2424. }
  2425. /*
  2426. * We don't need/want to read the hc_irq_cause register,
  2427. * because doing so hurts performance, and
  2428. * main_irq_cause already gives us everything we need.
  2429. *
  2430. * But we do have to *write* to the hc_irq_cause to ack
  2431. * the ports that we are handling this time through.
  2432. *
  2433. * This requires that we create a bitmap for those
  2434. * ports which interrupted us, and use that bitmap
  2435. * to ack (only) those ports via hc_irq_cause.
  2436. */
  2437. ack_irqs = 0;
  2438. if (hc_cause & PORTS_0_3_COAL_DONE)
  2439. ack_irqs = HC_COAL_IRQ;
  2440. for (p = 0; p < MV_PORTS_PER_HC; ++p) {
  2441. if ((port + p) >= hpriv->n_ports)
  2442. break;
  2443. port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
  2444. if (hc_cause & port_mask)
  2445. ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
  2446. }
  2447. hc_mmio = mv_hc_base_from_port(mmio, port);
  2448. writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
  2449. handled = 1;
  2450. }
  2451. /*
  2452. * Handle interrupts signalled for this port:
  2453. */
  2454. port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
  2455. if (port_cause)
  2456. mv_port_intr(ap, port_cause);
  2457. }
  2458. return handled;
  2459. }
  2460. static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
  2461. {
  2462. struct mv_host_priv *hpriv = host->private_data;
  2463. struct ata_port *ap;
  2464. struct ata_queued_cmd *qc;
  2465. struct ata_eh_info *ehi;
  2466. unsigned int i, err_mask, printed = 0;
  2467. u32 err_cause;
  2468. err_cause = readl(mmio + hpriv->irq_cause_ofs);
  2469. dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
  2470. err_cause);
  2471. DPRINTK("All regs @ PCI error\n");
  2472. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  2473. writelfl(0, mmio + hpriv->irq_cause_ofs);
  2474. for (i = 0; i < host->n_ports; i++) {
  2475. ap = host->ports[i];
  2476. if (!ata_link_offline(&ap->link)) {
  2477. ehi = &ap->link.eh_info;
  2478. ata_ehi_clear_desc(ehi);
  2479. if (!printed++)
  2480. ata_ehi_push_desc(ehi,
  2481. "PCI err cause 0x%08x", err_cause);
  2482. err_mask = AC_ERR_HOST_BUS;
  2483. ehi->action = ATA_EH_RESET;
  2484. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  2485. if (qc)
  2486. qc->err_mask |= err_mask;
  2487. else
  2488. ehi->err_mask |= err_mask;
  2489. ata_port_freeze(ap);
  2490. }
  2491. }
  2492. return 1; /* handled */
  2493. }
  2494. /**
  2495. * mv_interrupt - Main interrupt event handler
  2496. * @irq: unused
  2497. * @dev_instance: private data; in this case the host structure
  2498. *
  2499. * Read the read only register to determine if any host
  2500. * controllers have pending interrupts. If so, call lower level
  2501. * routine to handle. Also check for PCI errors which are only
  2502. * reported here.
  2503. *
  2504. * LOCKING:
  2505. * This routine holds the host lock while processing pending
  2506. * interrupts.
  2507. */
  2508. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  2509. {
  2510. struct ata_host *host = dev_instance;
  2511. struct mv_host_priv *hpriv = host->private_data;
  2512. unsigned int handled = 0;
  2513. int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
  2514. u32 main_irq_cause, pending_irqs;
  2515. spin_lock(&host->lock);
  2516. /* for MSI: block new interrupts while in here */
  2517. if (using_msi)
  2518. mv_write_main_irq_mask(0, hpriv);
  2519. main_irq_cause = readl(hpriv->main_irq_cause_addr);
  2520. pending_irqs = main_irq_cause & hpriv->main_irq_mask;
  2521. /*
  2522. * Deal with cases where we either have nothing pending, or have read
  2523. * a bogus register value which can indicate HW removal or PCI fault.
  2524. */
  2525. if (pending_irqs && main_irq_cause != 0xffffffffU) {
  2526. if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
  2527. handled = mv_pci_error(host, hpriv->base);
  2528. else
  2529. handled = mv_host_intr(host, pending_irqs);
  2530. }
  2531. /* for MSI: unmask; interrupt cause bits will retrigger now */
  2532. if (using_msi)
  2533. mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
  2534. spin_unlock(&host->lock);
  2535. return IRQ_RETVAL(handled);
  2536. }
  2537. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  2538. {
  2539. unsigned int ofs;
  2540. switch (sc_reg_in) {
  2541. case SCR_STATUS:
  2542. case SCR_ERROR:
  2543. case SCR_CONTROL:
  2544. ofs = sc_reg_in * sizeof(u32);
  2545. break;
  2546. default:
  2547. ofs = 0xffffffffU;
  2548. break;
  2549. }
  2550. return ofs;
  2551. }
  2552. static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
  2553. {
  2554. struct mv_host_priv *hpriv = link->ap->host->private_data;
  2555. void __iomem *mmio = hpriv->base;
  2556. void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
  2557. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  2558. if (ofs != 0xffffffffU) {
  2559. *val = readl(addr + ofs);
  2560. return 0;
  2561. } else
  2562. return -EINVAL;
  2563. }
  2564. static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
  2565. {
  2566. struct mv_host_priv *hpriv = link->ap->host->private_data;
  2567. void __iomem *mmio = hpriv->base;
  2568. void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
  2569. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  2570. if (ofs != 0xffffffffU) {
  2571. writelfl(val, addr + ofs);
  2572. return 0;
  2573. } else
  2574. return -EINVAL;
  2575. }
  2576. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
  2577. {
  2578. struct pci_dev *pdev = to_pci_dev(host->dev);
  2579. int early_5080;
  2580. early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
  2581. if (!early_5080) {
  2582. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2583. tmp |= (1 << 0);
  2584. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2585. }
  2586. mv_reset_pci_bus(host, mmio);
  2587. }
  2588. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  2589. {
  2590. writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
  2591. }
  2592. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  2593. void __iomem *mmio)
  2594. {
  2595. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  2596. u32 tmp;
  2597. tmp = readl(phy_mmio + MV5_PHY_MODE);
  2598. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  2599. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  2600. }
  2601. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  2602. {
  2603. u32 tmp;
  2604. writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
  2605. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  2606. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2607. tmp |= ~(1 << 0);
  2608. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  2609. }
  2610. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  2611. unsigned int port)
  2612. {
  2613. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  2614. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  2615. u32 tmp;
  2616. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  2617. if (fix_apm_sq) {
  2618. tmp = readl(phy_mmio + MV5_LTMODE_OFS);
  2619. tmp |= (1 << 19);
  2620. writel(tmp, phy_mmio + MV5_LTMODE_OFS);
  2621. tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
  2622. tmp &= ~0x3;
  2623. tmp |= 0x1;
  2624. writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
  2625. }
  2626. tmp = readl(phy_mmio + MV5_PHY_MODE);
  2627. tmp &= ~mask;
  2628. tmp |= hpriv->signal[port].pre;
  2629. tmp |= hpriv->signal[port].amps;
  2630. writel(tmp, phy_mmio + MV5_PHY_MODE);
  2631. }
  2632. #undef ZERO
  2633. #define ZERO(reg) writel(0, port_mmio + (reg))
  2634. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  2635. unsigned int port)
  2636. {
  2637. void __iomem *port_mmio = mv_port_base(mmio, port);
  2638. mv_reset_channel(hpriv, mmio, port);
  2639. ZERO(0x028); /* command */
  2640. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  2641. ZERO(0x004); /* timer */
  2642. ZERO(0x008); /* irq err cause */
  2643. ZERO(0x00c); /* irq err mask */
  2644. ZERO(0x010); /* rq bah */
  2645. ZERO(0x014); /* rq inp */
  2646. ZERO(0x018); /* rq outp */
  2647. ZERO(0x01c); /* respq bah */
  2648. ZERO(0x024); /* respq outp */
  2649. ZERO(0x020); /* respq inp */
  2650. ZERO(0x02c); /* test control */
  2651. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  2652. }
  2653. #undef ZERO
  2654. #define ZERO(reg) writel(0, hc_mmio + (reg))
  2655. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2656. unsigned int hc)
  2657. {
  2658. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2659. u32 tmp;
  2660. ZERO(0x00c);
  2661. ZERO(0x010);
  2662. ZERO(0x014);
  2663. ZERO(0x018);
  2664. tmp = readl(hc_mmio + 0x20);
  2665. tmp &= 0x1c1c1c1c;
  2666. tmp |= 0x03030303;
  2667. writel(tmp, hc_mmio + 0x20);
  2668. }
  2669. #undef ZERO
  2670. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2671. unsigned int n_hc)
  2672. {
  2673. unsigned int hc, port;
  2674. for (hc = 0; hc < n_hc; hc++) {
  2675. for (port = 0; port < MV_PORTS_PER_HC; port++)
  2676. mv5_reset_hc_port(hpriv, mmio,
  2677. (hc * MV_PORTS_PER_HC) + port);
  2678. mv5_reset_one_hc(hpriv, mmio, hc);
  2679. }
  2680. return 0;
  2681. }
  2682. #undef ZERO
  2683. #define ZERO(reg) writel(0, mmio + (reg))
  2684. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
  2685. {
  2686. struct mv_host_priv *hpriv = host->private_data;
  2687. u32 tmp;
  2688. tmp = readl(mmio + MV_PCI_MODE_OFS);
  2689. tmp &= 0xff00ffff;
  2690. writel(tmp, mmio + MV_PCI_MODE_OFS);
  2691. ZERO(MV_PCI_DISC_TIMER);
  2692. ZERO(MV_PCI_MSI_TRIGGER);
  2693. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
  2694. ZERO(MV_PCI_SERR_MASK);
  2695. ZERO(hpriv->irq_cause_ofs);
  2696. ZERO(hpriv->irq_mask_ofs);
  2697. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  2698. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  2699. ZERO(MV_PCI_ERR_ATTRIBUTE);
  2700. ZERO(MV_PCI_ERR_COMMAND);
  2701. }
  2702. #undef ZERO
  2703. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  2704. {
  2705. u32 tmp;
  2706. mv5_reset_flash(hpriv, mmio);
  2707. tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
  2708. tmp &= 0x3;
  2709. tmp |= (1 << 5) | (1 << 6);
  2710. writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
  2711. }
  2712. /**
  2713. * mv6_reset_hc - Perform the 6xxx global soft reset
  2714. * @mmio: base address of the HBA
  2715. *
  2716. * This routine only applies to 6xxx parts.
  2717. *
  2718. * LOCKING:
  2719. * Inherited from caller.
  2720. */
  2721. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  2722. unsigned int n_hc)
  2723. {
  2724. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  2725. int i, rc = 0;
  2726. u32 t;
  2727. /* Following procedure defined in PCI "main command and status
  2728. * register" table.
  2729. */
  2730. t = readl(reg);
  2731. writel(t | STOP_PCI_MASTER, reg);
  2732. for (i = 0; i < 1000; i++) {
  2733. udelay(1);
  2734. t = readl(reg);
  2735. if (PCI_MASTER_EMPTY & t)
  2736. break;
  2737. }
  2738. if (!(PCI_MASTER_EMPTY & t)) {
  2739. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  2740. rc = 1;
  2741. goto done;
  2742. }
  2743. /* set reset */
  2744. i = 5;
  2745. do {
  2746. writel(t | GLOB_SFT_RST, reg);
  2747. t = readl(reg);
  2748. udelay(1);
  2749. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  2750. if (!(GLOB_SFT_RST & t)) {
  2751. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  2752. rc = 1;
  2753. goto done;
  2754. }
  2755. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  2756. i = 5;
  2757. do {
  2758. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  2759. t = readl(reg);
  2760. udelay(1);
  2761. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  2762. if (GLOB_SFT_RST & t) {
  2763. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  2764. rc = 1;
  2765. }
  2766. done:
  2767. return rc;
  2768. }
  2769. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  2770. void __iomem *mmio)
  2771. {
  2772. void __iomem *port_mmio;
  2773. u32 tmp;
  2774. tmp = readl(mmio + MV_RESET_CFG_OFS);
  2775. if ((tmp & (1 << 0)) == 0) {
  2776. hpriv->signal[idx].amps = 0x7 << 8;
  2777. hpriv->signal[idx].pre = 0x1 << 5;
  2778. return;
  2779. }
  2780. port_mmio = mv_port_base(mmio, idx);
  2781. tmp = readl(port_mmio + PHY_MODE2_OFS);
  2782. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  2783. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  2784. }
  2785. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  2786. {
  2787. writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
  2788. }
  2789. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  2790. unsigned int port)
  2791. {
  2792. void __iomem *port_mmio = mv_port_base(mmio, port);
  2793. u32 hp_flags = hpriv->hp_flags;
  2794. int fix_phy_mode2 =
  2795. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  2796. int fix_phy_mode4 =
  2797. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  2798. u32 m2, m3;
  2799. if (fix_phy_mode2) {
  2800. m2 = readl(port_mmio + PHY_MODE2_OFS);
  2801. m2 &= ~(1 << 16);
  2802. m2 |= (1 << 31);
  2803. writel(m2, port_mmio + PHY_MODE2_OFS);
  2804. udelay(200);
  2805. m2 = readl(port_mmio + PHY_MODE2_OFS);
  2806. m2 &= ~((1 << 16) | (1 << 31));
  2807. writel(m2, port_mmio + PHY_MODE2_OFS);
  2808. udelay(200);
  2809. }
  2810. /*
  2811. * Gen-II/IIe PHY_MODE3_OFS errata RM#2:
  2812. * Achieves better receiver noise performance than the h/w default:
  2813. */
  2814. m3 = readl(port_mmio + PHY_MODE3_OFS);
  2815. m3 = (m3 & 0x1f) | (0x5555601 << 5);
  2816. /* Guideline 88F5182 (GL# SATA-S11) */
  2817. if (IS_SOC(hpriv))
  2818. m3 &= ~0x1c;
  2819. if (fix_phy_mode4) {
  2820. u32 m4 = readl(port_mmio + PHY_MODE4_OFS);
  2821. /*
  2822. * Enforce reserved-bit restrictions on GenIIe devices only.
  2823. * For earlier chipsets, force only the internal config field
  2824. * (workaround for errata FEr SATA#10 part 1).
  2825. */
  2826. if (IS_GEN_IIE(hpriv))
  2827. m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
  2828. else
  2829. m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
  2830. writel(m4, port_mmio + PHY_MODE4_OFS);
  2831. }
  2832. /*
  2833. * Workaround for 60x1-B2 errata SATA#13:
  2834. * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
  2835. * so we must always rewrite PHY_MODE3 after PHY_MODE4.
  2836. * Or ensure we use writelfl() when writing PHY_MODE4.
  2837. */
  2838. writel(m3, port_mmio + PHY_MODE3_OFS);
  2839. /* Revert values of pre-emphasis and signal amps to the saved ones */
  2840. m2 = readl(port_mmio + PHY_MODE2_OFS);
  2841. m2 &= ~MV_M2_PREAMP_MASK;
  2842. m2 |= hpriv->signal[port].amps;
  2843. m2 |= hpriv->signal[port].pre;
  2844. m2 &= ~(1 << 16);
  2845. /* according to mvSata 3.6.1, some IIE values are fixed */
  2846. if (IS_GEN_IIE(hpriv)) {
  2847. m2 &= ~0xC30FF01F;
  2848. m2 |= 0x0000900F;
  2849. }
  2850. writel(m2, port_mmio + PHY_MODE2_OFS);
  2851. }
  2852. /* TODO: use the generic LED interface to configure the SATA Presence */
  2853. /* & Acitivy LEDs on the board */
  2854. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  2855. void __iomem *mmio)
  2856. {
  2857. return;
  2858. }
  2859. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  2860. void __iomem *mmio)
  2861. {
  2862. void __iomem *port_mmio;
  2863. u32 tmp;
  2864. port_mmio = mv_port_base(mmio, idx);
  2865. tmp = readl(port_mmio + PHY_MODE2_OFS);
  2866. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  2867. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  2868. }
  2869. #undef ZERO
  2870. #define ZERO(reg) writel(0, port_mmio + (reg))
  2871. static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
  2872. void __iomem *mmio, unsigned int port)
  2873. {
  2874. void __iomem *port_mmio = mv_port_base(mmio, port);
  2875. mv_reset_channel(hpriv, mmio, port);
  2876. ZERO(0x028); /* command */
  2877. writel(0x101f, port_mmio + EDMA_CFG_OFS);
  2878. ZERO(0x004); /* timer */
  2879. ZERO(0x008); /* irq err cause */
  2880. ZERO(0x00c); /* irq err mask */
  2881. ZERO(0x010); /* rq bah */
  2882. ZERO(0x014); /* rq inp */
  2883. ZERO(0x018); /* rq outp */
  2884. ZERO(0x01c); /* respq bah */
  2885. ZERO(0x024); /* respq outp */
  2886. ZERO(0x020); /* respq inp */
  2887. ZERO(0x02c); /* test control */
  2888. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  2889. }
  2890. #undef ZERO
  2891. #define ZERO(reg) writel(0, hc_mmio + (reg))
  2892. static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
  2893. void __iomem *mmio)
  2894. {
  2895. void __iomem *hc_mmio = mv_hc_base(mmio, 0);
  2896. ZERO(0x00c);
  2897. ZERO(0x010);
  2898. ZERO(0x014);
  2899. }
  2900. #undef ZERO
  2901. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  2902. void __iomem *mmio, unsigned int n_hc)
  2903. {
  2904. unsigned int port;
  2905. for (port = 0; port < hpriv->n_ports; port++)
  2906. mv_soc_reset_hc_port(hpriv, mmio, port);
  2907. mv_soc_reset_one_hc(hpriv, mmio);
  2908. return 0;
  2909. }
  2910. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  2911. void __iomem *mmio)
  2912. {
  2913. return;
  2914. }
  2915. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
  2916. {
  2917. return;
  2918. }
  2919. static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
  2920. {
  2921. u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
  2922. ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
  2923. if (want_gen2i)
  2924. ifcfg |= (1 << 7); /* enable gen2i speed */
  2925. writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
  2926. }
  2927. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  2928. unsigned int port_no)
  2929. {
  2930. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  2931. /*
  2932. * The datasheet warns against setting EDMA_RESET when EDMA is active
  2933. * (but doesn't say what the problem might be). So we first try
  2934. * to disable the EDMA engine before doing the EDMA_RESET operation.
  2935. */
  2936. mv_stop_edma_engine(port_mmio);
  2937. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2938. if (!IS_GEN_I(hpriv)) {
  2939. /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
  2940. mv_setup_ifcfg(port_mmio, 1);
  2941. }
  2942. /*
  2943. * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
  2944. * link, and physical layers. It resets all SATA interface registers
  2945. * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
  2946. */
  2947. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2948. udelay(25); /* allow reset propagation */
  2949. writelfl(0, port_mmio + EDMA_CMD_OFS);
  2950. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  2951. if (IS_GEN_I(hpriv))
  2952. mdelay(1);
  2953. }
  2954. static void mv_pmp_select(struct ata_port *ap, int pmp)
  2955. {
  2956. if (sata_pmp_supported(ap)) {
  2957. void __iomem *port_mmio = mv_ap_base(ap);
  2958. u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
  2959. int old = reg & 0xf;
  2960. if (old != pmp) {
  2961. reg = (reg & ~0xf) | pmp;
  2962. writelfl(reg, port_mmio + SATA_IFCTL_OFS);
  2963. }
  2964. }
  2965. }
  2966. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  2967. unsigned long deadline)
  2968. {
  2969. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2970. return sata_std_hardreset(link, class, deadline);
  2971. }
  2972. static int mv_softreset(struct ata_link *link, unsigned int *class,
  2973. unsigned long deadline)
  2974. {
  2975. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2976. return ata_sff_softreset(link, class, deadline);
  2977. }
  2978. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  2979. unsigned long deadline)
  2980. {
  2981. struct ata_port *ap = link->ap;
  2982. struct mv_host_priv *hpriv = ap->host->private_data;
  2983. struct mv_port_priv *pp = ap->private_data;
  2984. void __iomem *mmio = hpriv->base;
  2985. int rc, attempts = 0, extra = 0;
  2986. u32 sstatus;
  2987. bool online;
  2988. mv_reset_channel(hpriv, mmio, ap->port_no);
  2989. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2990. pp->pp_flags &=
  2991. ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
  2992. /* Workaround for errata FEr SATA#10 (part 2) */
  2993. do {
  2994. const unsigned long *timing =
  2995. sata_ehc_deb_timing(&link->eh_context);
  2996. rc = sata_link_hardreset(link, timing, deadline + extra,
  2997. &online, NULL);
  2998. rc = online ? -EAGAIN : rc;
  2999. if (rc)
  3000. return rc;
  3001. sata_scr_read(link, SCR_STATUS, &sstatus);
  3002. if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
  3003. /* Force 1.5gb/s link speed and try again */
  3004. mv_setup_ifcfg(mv_ap_base(ap), 0);
  3005. if (time_after(jiffies + HZ, deadline))
  3006. extra = HZ; /* only extend it once, max */
  3007. }
  3008. } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
  3009. mv_save_cached_regs(ap);
  3010. mv_edma_cfg(ap, 0, 0);
  3011. return rc;
  3012. }
  3013. static void mv_eh_freeze(struct ata_port *ap)
  3014. {
  3015. mv_stop_edma(ap);
  3016. mv_enable_port_irqs(ap, 0);
  3017. }
  3018. static void mv_eh_thaw(struct ata_port *ap)
  3019. {
  3020. struct mv_host_priv *hpriv = ap->host->private_data;
  3021. unsigned int port = ap->port_no;
  3022. unsigned int hardport = mv_hardport_from_port(port);
  3023. void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
  3024. void __iomem *port_mmio = mv_ap_base(ap);
  3025. u32 hc_irq_cause;
  3026. /* clear EDMA errors on this port */
  3027. writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  3028. /* clear pending irq events */
  3029. hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
  3030. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  3031. mv_enable_port_irqs(ap, ERR_IRQ);
  3032. }
  3033. /**
  3034. * mv_port_init - Perform some early initialization on a single port.
  3035. * @port: libata data structure storing shadow register addresses
  3036. * @port_mmio: base address of the port
  3037. *
  3038. * Initialize shadow register mmio addresses, clear outstanding
  3039. * interrupts on the port, and unmask interrupts for the future
  3040. * start of the port.
  3041. *
  3042. * LOCKING:
  3043. * Inherited from caller.
  3044. */
  3045. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  3046. {
  3047. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  3048. unsigned serr_ofs;
  3049. /* PIO related setup
  3050. */
  3051. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  3052. port->error_addr =
  3053. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  3054. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  3055. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  3056. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  3057. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  3058. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  3059. port->status_addr =
  3060. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  3061. /* special case: control/altstatus doesn't have ATA_REG_ address */
  3062. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  3063. /* unused: */
  3064. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  3065. /* Clear any currently outstanding port interrupt conditions */
  3066. serr_ofs = mv_scr_offset(SCR_ERROR);
  3067. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  3068. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  3069. /* unmask all non-transient EDMA error interrupts */
  3070. writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  3071. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  3072. readl(port_mmio + EDMA_CFG_OFS),
  3073. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  3074. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  3075. }
  3076. static unsigned int mv_in_pcix_mode(struct ata_host *host)
  3077. {
  3078. struct mv_host_priv *hpriv = host->private_data;
  3079. void __iomem *mmio = hpriv->base;
  3080. u32 reg;
  3081. if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
  3082. return 0; /* not PCI-X capable */
  3083. reg = readl(mmio + MV_PCI_MODE_OFS);
  3084. if ((reg & MV_PCI_MODE_MASK) == 0)
  3085. return 0; /* conventional PCI mode */
  3086. return 1; /* chip is in PCI-X mode */
  3087. }
  3088. static int mv_pci_cut_through_okay(struct ata_host *host)
  3089. {
  3090. struct mv_host_priv *hpriv = host->private_data;
  3091. void __iomem *mmio = hpriv->base;
  3092. u32 reg;
  3093. if (!mv_in_pcix_mode(host)) {
  3094. reg = readl(mmio + PCI_COMMAND_OFS);
  3095. if (reg & PCI_COMMAND_MRDTRIG)
  3096. return 0; /* not okay */
  3097. }
  3098. return 1; /* okay */
  3099. }
  3100. static void mv_60x1b2_errata_pci7(struct ata_host *host)
  3101. {
  3102. struct mv_host_priv *hpriv = host->private_data;
  3103. void __iomem *mmio = hpriv->base;
  3104. /* workaround for 60x1-B2 errata PCI#7 */
  3105. if (mv_in_pcix_mode(host)) {
  3106. u32 reg = readl(mmio + PCI_COMMAND_OFS);
  3107. writelfl(reg & ~PCI_COMMAND_MWRCOM, mmio + PCI_COMMAND_OFS);
  3108. }
  3109. }
  3110. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  3111. {
  3112. struct pci_dev *pdev = to_pci_dev(host->dev);
  3113. struct mv_host_priv *hpriv = host->private_data;
  3114. u32 hp_flags = hpriv->hp_flags;
  3115. switch (board_idx) {
  3116. case chip_5080:
  3117. hpriv->ops = &mv5xxx_ops;
  3118. hp_flags |= MV_HP_GEN_I;
  3119. switch (pdev->revision) {
  3120. case 0x1:
  3121. hp_flags |= MV_HP_ERRATA_50XXB0;
  3122. break;
  3123. case 0x3:
  3124. hp_flags |= MV_HP_ERRATA_50XXB2;
  3125. break;
  3126. default:
  3127. dev_printk(KERN_WARNING, &pdev->dev,
  3128. "Applying 50XXB2 workarounds to unknown rev\n");
  3129. hp_flags |= MV_HP_ERRATA_50XXB2;
  3130. break;
  3131. }
  3132. break;
  3133. case chip_504x:
  3134. case chip_508x:
  3135. hpriv->ops = &mv5xxx_ops;
  3136. hp_flags |= MV_HP_GEN_I;
  3137. switch (pdev->revision) {
  3138. case 0x0:
  3139. hp_flags |= MV_HP_ERRATA_50XXB0;
  3140. break;
  3141. case 0x3:
  3142. hp_flags |= MV_HP_ERRATA_50XXB2;
  3143. break;
  3144. default:
  3145. dev_printk(KERN_WARNING, &pdev->dev,
  3146. "Applying B2 workarounds to unknown rev\n");
  3147. hp_flags |= MV_HP_ERRATA_50XXB2;
  3148. break;
  3149. }
  3150. break;
  3151. case chip_604x:
  3152. case chip_608x:
  3153. hpriv->ops = &mv6xxx_ops;
  3154. hp_flags |= MV_HP_GEN_II;
  3155. switch (pdev->revision) {
  3156. case 0x7:
  3157. mv_60x1b2_errata_pci7(host);
  3158. hp_flags |= MV_HP_ERRATA_60X1B2;
  3159. break;
  3160. case 0x9:
  3161. hp_flags |= MV_HP_ERRATA_60X1C0;
  3162. break;
  3163. default:
  3164. dev_printk(KERN_WARNING, &pdev->dev,
  3165. "Applying B2 workarounds to unknown rev\n");
  3166. hp_flags |= MV_HP_ERRATA_60X1B2;
  3167. break;
  3168. }
  3169. break;
  3170. case chip_7042:
  3171. hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
  3172. if (pdev->vendor == PCI_VENDOR_ID_TTI &&
  3173. (pdev->device == 0x2300 || pdev->device == 0x2310))
  3174. {
  3175. /*
  3176. * Highpoint RocketRAID PCIe 23xx series cards:
  3177. *
  3178. * Unconfigured drives are treated as "Legacy"
  3179. * by the BIOS, and it overwrites sector 8 with
  3180. * a "Lgcy" metadata block prior to Linux boot.
  3181. *
  3182. * Configured drives (RAID or JBOD) leave sector 8
  3183. * alone, but instead overwrite a high numbered
  3184. * sector for the RAID metadata. This sector can
  3185. * be determined exactly, by truncating the physical
  3186. * drive capacity to a nice even GB value.
  3187. *
  3188. * RAID metadata is at: (dev->n_sectors & ~0xfffff)
  3189. *
  3190. * Warn the user, lest they think we're just buggy.
  3191. */
  3192. printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
  3193. " BIOS CORRUPTS DATA on all attached drives,"
  3194. " regardless of if/how they are configured."
  3195. " BEWARE!\n");
  3196. printk(KERN_WARNING DRV_NAME ": For data safety, do not"
  3197. " use sectors 8-9 on \"Legacy\" drives,"
  3198. " and avoid the final two gigabytes on"
  3199. " all RocketRAID BIOS initialized drives.\n");
  3200. }
  3201. /* drop through */
  3202. case chip_6042:
  3203. hpriv->ops = &mv6xxx_ops;
  3204. hp_flags |= MV_HP_GEN_IIE;
  3205. if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
  3206. hp_flags |= MV_HP_CUT_THROUGH;
  3207. switch (pdev->revision) {
  3208. case 0x2: /* Rev.B0: the first/only public release */
  3209. hp_flags |= MV_HP_ERRATA_60X1C0;
  3210. break;
  3211. default:
  3212. dev_printk(KERN_WARNING, &pdev->dev,
  3213. "Applying 60X1C0 workarounds to unknown rev\n");
  3214. hp_flags |= MV_HP_ERRATA_60X1C0;
  3215. break;
  3216. }
  3217. break;
  3218. case chip_soc:
  3219. hpriv->ops = &mv_soc_ops;
  3220. hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
  3221. MV_HP_ERRATA_60X1C0;
  3222. break;
  3223. default:
  3224. dev_printk(KERN_ERR, host->dev,
  3225. "BUG: invalid board index %u\n", board_idx);
  3226. return 1;
  3227. }
  3228. hpriv->hp_flags = hp_flags;
  3229. if (hp_flags & MV_HP_PCIE) {
  3230. hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
  3231. hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
  3232. hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
  3233. } else {
  3234. hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
  3235. hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
  3236. hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
  3237. }
  3238. return 0;
  3239. }
  3240. /**
  3241. * mv_init_host - Perform some early initialization of the host.
  3242. * @host: ATA host to initialize
  3243. * @board_idx: controller index
  3244. *
  3245. * If possible, do an early global reset of the host. Then do
  3246. * our port init and clear/unmask all/relevant host interrupts.
  3247. *
  3248. * LOCKING:
  3249. * Inherited from caller.
  3250. */
  3251. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  3252. {
  3253. int rc = 0, n_hc, port, hc;
  3254. struct mv_host_priv *hpriv = host->private_data;
  3255. void __iomem *mmio = hpriv->base;
  3256. rc = mv_chip_id(host, board_idx);
  3257. if (rc)
  3258. goto done;
  3259. if (IS_SOC(hpriv)) {
  3260. hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
  3261. hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
  3262. } else {
  3263. hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
  3264. hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
  3265. }
  3266. /* initialize shadow irq mask with register's value */
  3267. hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
  3268. /* global interrupt mask: 0 == mask everything */
  3269. mv_set_main_irq_mask(host, ~0, 0);
  3270. n_hc = mv_get_hc_count(host->ports[0]->flags);
  3271. for (port = 0; port < host->n_ports; port++)
  3272. hpriv->ops->read_preamp(hpriv, port, mmio);
  3273. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  3274. if (rc)
  3275. goto done;
  3276. hpriv->ops->reset_flash(hpriv, mmio);
  3277. hpriv->ops->reset_bus(host, mmio);
  3278. hpriv->ops->enable_leds(hpriv, mmio);
  3279. for (port = 0; port < host->n_ports; port++) {
  3280. struct ata_port *ap = host->ports[port];
  3281. void __iomem *port_mmio = mv_port_base(mmio, port);
  3282. mv_port_init(&ap->ioaddr, port_mmio);
  3283. #ifdef CONFIG_PCI
  3284. if (!IS_SOC(hpriv)) {
  3285. unsigned int offset = port_mmio - mmio;
  3286. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
  3287. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
  3288. }
  3289. #endif
  3290. }
  3291. for (hc = 0; hc < n_hc; hc++) {
  3292. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  3293. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  3294. "(before clear)=0x%08x\n", hc,
  3295. readl(hc_mmio + HC_CFG_OFS),
  3296. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  3297. /* Clear any currently outstanding hc interrupt conditions */
  3298. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  3299. }
  3300. if (!IS_SOC(hpriv)) {
  3301. /* Clear any currently outstanding host interrupt conditions */
  3302. writelfl(0, mmio + hpriv->irq_cause_ofs);
  3303. /* and unmask interrupt generation for host regs */
  3304. writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
  3305. }
  3306. /*
  3307. * enable only global host interrupts for now.
  3308. * The per-port interrupts get done later as ports are set up.
  3309. */
  3310. mv_set_main_irq_mask(host, 0, PCI_ERR);
  3311. mv_set_irq_coalescing(host, irq_coalescing_io_count,
  3312. irq_coalescing_usecs);
  3313. done:
  3314. return rc;
  3315. }
  3316. static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
  3317. {
  3318. hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
  3319. MV_CRQB_Q_SZ, 0);
  3320. if (!hpriv->crqb_pool)
  3321. return -ENOMEM;
  3322. hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
  3323. MV_CRPB_Q_SZ, 0);
  3324. if (!hpriv->crpb_pool)
  3325. return -ENOMEM;
  3326. hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
  3327. MV_SG_TBL_SZ, 0);
  3328. if (!hpriv->sg_tbl_pool)
  3329. return -ENOMEM;
  3330. return 0;
  3331. }
  3332. static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
  3333. struct mbus_dram_target_info *dram)
  3334. {
  3335. int i;
  3336. for (i = 0; i < 4; i++) {
  3337. writel(0, hpriv->base + WINDOW_CTRL(i));
  3338. writel(0, hpriv->base + WINDOW_BASE(i));
  3339. }
  3340. for (i = 0; i < dram->num_cs; i++) {
  3341. struct mbus_dram_window *cs = dram->cs + i;
  3342. writel(((cs->size - 1) & 0xffff0000) |
  3343. (cs->mbus_attr << 8) |
  3344. (dram->mbus_dram_target_id << 4) | 1,
  3345. hpriv->base + WINDOW_CTRL(i));
  3346. writel(cs->base, hpriv->base + WINDOW_BASE(i));
  3347. }
  3348. }
  3349. /**
  3350. * mv_platform_probe - handle a positive probe of an soc Marvell
  3351. * host
  3352. * @pdev: platform device found
  3353. *
  3354. * LOCKING:
  3355. * Inherited from caller.
  3356. */
  3357. static int mv_platform_probe(struct platform_device *pdev)
  3358. {
  3359. static int printed_version;
  3360. const struct mv_sata_platform_data *mv_platform_data;
  3361. const struct ata_port_info *ppi[] =
  3362. { &mv_port_info[chip_soc], NULL };
  3363. struct ata_host *host;
  3364. struct mv_host_priv *hpriv;
  3365. struct resource *res;
  3366. int n_ports, rc;
  3367. if (!printed_version++)
  3368. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  3369. /*
  3370. * Simple resource validation ..
  3371. */
  3372. if (unlikely(pdev->num_resources != 2)) {
  3373. dev_err(&pdev->dev, "invalid number of resources\n");
  3374. return -EINVAL;
  3375. }
  3376. /*
  3377. * Get the register base first
  3378. */
  3379. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3380. if (res == NULL)
  3381. return -EINVAL;
  3382. /* allocate host */
  3383. mv_platform_data = pdev->dev.platform_data;
  3384. n_ports = mv_platform_data->n_ports;
  3385. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  3386. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  3387. if (!host || !hpriv)
  3388. return -ENOMEM;
  3389. host->private_data = hpriv;
  3390. hpriv->n_ports = n_ports;
  3391. host->iomap = NULL;
  3392. hpriv->base = devm_ioremap(&pdev->dev, res->start,
  3393. res->end - res->start + 1);
  3394. hpriv->base -= MV_SATAHC0_REG_BASE;
  3395. /*
  3396. * (Re-)program MBUS remapping windows if we are asked to.
  3397. */
  3398. if (mv_platform_data->dram != NULL)
  3399. mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
  3400. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  3401. if (rc)
  3402. return rc;
  3403. /* initialize adapter */
  3404. rc = mv_init_host(host, chip_soc);
  3405. if (rc)
  3406. return rc;
  3407. dev_printk(KERN_INFO, &pdev->dev,
  3408. "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
  3409. host->n_ports);
  3410. return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
  3411. IRQF_SHARED, &mv6_sht);
  3412. }
  3413. /*
  3414. *
  3415. * mv_platform_remove - unplug a platform interface
  3416. * @pdev: platform device
  3417. *
  3418. * A platform bus SATA device has been unplugged. Perform the needed
  3419. * cleanup. Also called on module unload for any active devices.
  3420. */
  3421. static int __devexit mv_platform_remove(struct platform_device *pdev)
  3422. {
  3423. struct device *dev = &pdev->dev;
  3424. struct ata_host *host = dev_get_drvdata(dev);
  3425. ata_host_detach(host);
  3426. return 0;
  3427. }
  3428. static struct platform_driver mv_platform_driver = {
  3429. .probe = mv_platform_probe,
  3430. .remove = __devexit_p(mv_platform_remove),
  3431. .driver = {
  3432. .name = DRV_NAME,
  3433. .owner = THIS_MODULE,
  3434. },
  3435. };
  3436. #ifdef CONFIG_PCI
  3437. static int mv_pci_init_one(struct pci_dev *pdev,
  3438. const struct pci_device_id *ent);
  3439. static struct pci_driver mv_pci_driver = {
  3440. .name = DRV_NAME,
  3441. .id_table = mv_pci_tbl,
  3442. .probe = mv_pci_init_one,
  3443. .remove = ata_pci_remove_one,
  3444. };
  3445. /* move to PCI layer or libata core? */
  3446. static int pci_go_64(struct pci_dev *pdev)
  3447. {
  3448. int rc;
  3449. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3450. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3451. if (rc) {
  3452. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3453. if (rc) {
  3454. dev_printk(KERN_ERR, &pdev->dev,
  3455. "64-bit DMA enable failed\n");
  3456. return rc;
  3457. }
  3458. }
  3459. } else {
  3460. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  3461. if (rc) {
  3462. dev_printk(KERN_ERR, &pdev->dev,
  3463. "32-bit DMA enable failed\n");
  3464. return rc;
  3465. }
  3466. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3467. if (rc) {
  3468. dev_printk(KERN_ERR, &pdev->dev,
  3469. "32-bit consistent DMA enable failed\n");
  3470. return rc;
  3471. }
  3472. }
  3473. return rc;
  3474. }
  3475. /**
  3476. * mv_print_info - Dump key info to kernel log for perusal.
  3477. * @host: ATA host to print info about
  3478. *
  3479. * FIXME: complete this.
  3480. *
  3481. * LOCKING:
  3482. * Inherited from caller.
  3483. */
  3484. static void mv_print_info(struct ata_host *host)
  3485. {
  3486. struct pci_dev *pdev = to_pci_dev(host->dev);
  3487. struct mv_host_priv *hpriv = host->private_data;
  3488. u8 scc;
  3489. const char *scc_s, *gen;
  3490. /* Use this to determine the HW stepping of the chip so we know
  3491. * what errata to workaround
  3492. */
  3493. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  3494. if (scc == 0)
  3495. scc_s = "SCSI";
  3496. else if (scc == 0x01)
  3497. scc_s = "RAID";
  3498. else
  3499. scc_s = "?";
  3500. if (IS_GEN_I(hpriv))
  3501. gen = "I";
  3502. else if (IS_GEN_II(hpriv))
  3503. gen = "II";
  3504. else if (IS_GEN_IIE(hpriv))
  3505. gen = "IIE";
  3506. else
  3507. gen = "?";
  3508. dev_printk(KERN_INFO, &pdev->dev,
  3509. "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
  3510. gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  3511. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  3512. }
  3513. /**
  3514. * mv_pci_init_one - handle a positive probe of a PCI Marvell host
  3515. * @pdev: PCI device found
  3516. * @ent: PCI device ID entry for the matched host
  3517. *
  3518. * LOCKING:
  3519. * Inherited from caller.
  3520. */
  3521. static int mv_pci_init_one(struct pci_dev *pdev,
  3522. const struct pci_device_id *ent)
  3523. {
  3524. static int printed_version;
  3525. unsigned int board_idx = (unsigned int)ent->driver_data;
  3526. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  3527. struct ata_host *host;
  3528. struct mv_host_priv *hpriv;
  3529. int n_ports, rc;
  3530. if (!printed_version++)
  3531. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  3532. /* allocate host */
  3533. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  3534. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  3535. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  3536. if (!host || !hpriv)
  3537. return -ENOMEM;
  3538. host->private_data = hpriv;
  3539. hpriv->n_ports = n_ports;
  3540. /* acquire resources */
  3541. rc = pcim_enable_device(pdev);
  3542. if (rc)
  3543. return rc;
  3544. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  3545. if (rc == -EBUSY)
  3546. pcim_pin_device(pdev);
  3547. if (rc)
  3548. return rc;
  3549. host->iomap = pcim_iomap_table(pdev);
  3550. hpriv->base = host->iomap[MV_PRIMARY_BAR];
  3551. rc = pci_go_64(pdev);
  3552. if (rc)
  3553. return rc;
  3554. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  3555. if (rc)
  3556. return rc;
  3557. /* initialize adapter */
  3558. rc = mv_init_host(host, board_idx);
  3559. if (rc)
  3560. return rc;
  3561. /* Enable message-switched interrupts, if requested */
  3562. if (msi && pci_enable_msi(pdev) == 0)
  3563. hpriv->hp_flags |= MV_HP_FLAG_MSI;
  3564. mv_dump_pci_cfg(pdev, 0x68);
  3565. mv_print_info(host);
  3566. pci_set_master(pdev);
  3567. pci_try_set_mwi(pdev);
  3568. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  3569. IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
  3570. }
  3571. #endif
  3572. static int mv_platform_probe(struct platform_device *pdev);
  3573. static int __devexit mv_platform_remove(struct platform_device *pdev);
  3574. static int __init mv_init(void)
  3575. {
  3576. int rc = -ENODEV;
  3577. #ifdef CONFIG_PCI
  3578. rc = pci_register_driver(&mv_pci_driver);
  3579. if (rc < 0)
  3580. return rc;
  3581. #endif
  3582. rc = platform_driver_register(&mv_platform_driver);
  3583. #ifdef CONFIG_PCI
  3584. if (rc < 0)
  3585. pci_unregister_driver(&mv_pci_driver);
  3586. #endif
  3587. return rc;
  3588. }
  3589. static void __exit mv_exit(void)
  3590. {
  3591. #ifdef CONFIG_PCI
  3592. pci_unregister_driver(&mv_pci_driver);
  3593. #endif
  3594. platform_driver_unregister(&mv_platform_driver);
  3595. }
  3596. MODULE_AUTHOR("Brett Russ");
  3597. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  3598. MODULE_LICENSE("GPL");
  3599. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  3600. MODULE_VERSION(DRV_VERSION);
  3601. MODULE_ALIAS("platform:" DRV_NAME);
  3602. module_init(mv_init);
  3603. module_exit(mv_exit);