libata-core.c 170 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963
  1. /*
  2. * libata-core.c - helper library for ATA
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
  9. * Copyright 2003-2004 Jeff Garzik
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. *
  27. * libata documentation is available via 'make {ps|pdf}docs',
  28. * as Documentation/DocBook/libata.*
  29. *
  30. * Hardware documentation available from http://www.t13.org/ and
  31. * http://www.sata-io.org/
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/list.h>
  39. #include <linux/mm.h>
  40. #include <linux/highmem.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/blkdev.h>
  43. #include <linux/delay.h>
  44. #include <linux/timer.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/completion.h>
  47. #include <linux/suspend.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/jiffies.h>
  50. #include <linux/scatterlist.h>
  51. #include <scsi/scsi.h>
  52. #include <scsi/scsi_cmnd.h>
  53. #include <scsi/scsi_host.h>
  54. #include <linux/libata.h>
  55. #include <asm/io.h>
  56. #include <asm/semaphore.h>
  57. #include <asm/byteorder.h>
  58. #include "libata.h"
  59. #define DRV_VERSION "2.21" /* must be exactly four chars */
  60. /* debounce timing parameters in msecs { interval, duration, timeout } */
  61. const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
  62. const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
  63. const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
  64. static unsigned int ata_dev_init_params(struct ata_device *dev,
  65. u16 heads, u16 sectors);
  66. static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  67. static void ata_dev_xfermask(struct ata_device *dev);
  68. unsigned int ata_print_id = 1;
  69. static struct workqueue_struct *ata_wq;
  70. struct workqueue_struct *ata_aux_wq;
  71. int atapi_enabled = 1;
  72. module_param(atapi_enabled, int, 0444);
  73. MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
  74. int atapi_dmadir = 0;
  75. module_param(atapi_dmadir, int, 0444);
  76. MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
  77. int libata_fua = 0;
  78. module_param_named(fua, libata_fua, int, 0444);
  79. MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
  80. static int ata_ignore_hpa = 0;
  81. module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
  82. MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
  83. static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
  84. module_param(ata_probe_timeout, int, 0444);
  85. MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
  86. int libata_noacpi = 1;
  87. module_param_named(noacpi, libata_noacpi, int, 0444);
  88. MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
  89. MODULE_AUTHOR("Jeff Garzik");
  90. MODULE_DESCRIPTION("Library module for ATA devices");
  91. MODULE_LICENSE("GPL");
  92. MODULE_VERSION(DRV_VERSION);
  93. /**
  94. * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
  95. * @tf: Taskfile to convert
  96. * @fis: Buffer into which data will output
  97. * @pmp: Port multiplier port
  98. *
  99. * Converts a standard ATA taskfile to a Serial ATA
  100. * FIS structure (Register - Host to Device).
  101. *
  102. * LOCKING:
  103. * Inherited from caller.
  104. */
  105. void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
  106. {
  107. fis[0] = 0x27; /* Register - Host to Device FIS */
  108. fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
  109. bit 7 indicates Command FIS */
  110. fis[2] = tf->command;
  111. fis[3] = tf->feature;
  112. fis[4] = tf->lbal;
  113. fis[5] = tf->lbam;
  114. fis[6] = tf->lbah;
  115. fis[7] = tf->device;
  116. fis[8] = tf->hob_lbal;
  117. fis[9] = tf->hob_lbam;
  118. fis[10] = tf->hob_lbah;
  119. fis[11] = tf->hob_feature;
  120. fis[12] = tf->nsect;
  121. fis[13] = tf->hob_nsect;
  122. fis[14] = 0;
  123. fis[15] = tf->ctl;
  124. fis[16] = 0;
  125. fis[17] = 0;
  126. fis[18] = 0;
  127. fis[19] = 0;
  128. }
  129. /**
  130. * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
  131. * @fis: Buffer from which data will be input
  132. * @tf: Taskfile to output
  133. *
  134. * Converts a serial ATA FIS structure to a standard ATA taskfile.
  135. *
  136. * LOCKING:
  137. * Inherited from caller.
  138. */
  139. void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
  140. {
  141. tf->command = fis[2]; /* status */
  142. tf->feature = fis[3]; /* error */
  143. tf->lbal = fis[4];
  144. tf->lbam = fis[5];
  145. tf->lbah = fis[6];
  146. tf->device = fis[7];
  147. tf->hob_lbal = fis[8];
  148. tf->hob_lbam = fis[9];
  149. tf->hob_lbah = fis[10];
  150. tf->nsect = fis[12];
  151. tf->hob_nsect = fis[13];
  152. }
  153. static const u8 ata_rw_cmds[] = {
  154. /* pio multi */
  155. ATA_CMD_READ_MULTI,
  156. ATA_CMD_WRITE_MULTI,
  157. ATA_CMD_READ_MULTI_EXT,
  158. ATA_CMD_WRITE_MULTI_EXT,
  159. 0,
  160. 0,
  161. 0,
  162. ATA_CMD_WRITE_MULTI_FUA_EXT,
  163. /* pio */
  164. ATA_CMD_PIO_READ,
  165. ATA_CMD_PIO_WRITE,
  166. ATA_CMD_PIO_READ_EXT,
  167. ATA_CMD_PIO_WRITE_EXT,
  168. 0,
  169. 0,
  170. 0,
  171. 0,
  172. /* dma */
  173. ATA_CMD_READ,
  174. ATA_CMD_WRITE,
  175. ATA_CMD_READ_EXT,
  176. ATA_CMD_WRITE_EXT,
  177. 0,
  178. 0,
  179. 0,
  180. ATA_CMD_WRITE_FUA_EXT
  181. };
  182. /**
  183. * ata_rwcmd_protocol - set taskfile r/w commands and protocol
  184. * @tf: command to examine and configure
  185. * @dev: device tf belongs to
  186. *
  187. * Examine the device configuration and tf->flags to calculate
  188. * the proper read/write commands and protocol to use.
  189. *
  190. * LOCKING:
  191. * caller.
  192. */
  193. static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  194. {
  195. u8 cmd;
  196. int index, fua, lba48, write;
  197. fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
  198. lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
  199. write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
  200. if (dev->flags & ATA_DFLAG_PIO) {
  201. tf->protocol = ATA_PROT_PIO;
  202. index = dev->multi_count ? 0 : 8;
  203. } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
  204. /* Unable to use DMA due to host limitation */
  205. tf->protocol = ATA_PROT_PIO;
  206. index = dev->multi_count ? 0 : 8;
  207. } else {
  208. tf->protocol = ATA_PROT_DMA;
  209. index = 16;
  210. }
  211. cmd = ata_rw_cmds[index + fua + lba48 + write];
  212. if (cmd) {
  213. tf->command = cmd;
  214. return 0;
  215. }
  216. return -1;
  217. }
  218. /**
  219. * ata_tf_read_block - Read block address from ATA taskfile
  220. * @tf: ATA taskfile of interest
  221. * @dev: ATA device @tf belongs to
  222. *
  223. * LOCKING:
  224. * None.
  225. *
  226. * Read block address from @tf. This function can handle all
  227. * three address formats - LBA, LBA48 and CHS. tf->protocol and
  228. * flags select the address format to use.
  229. *
  230. * RETURNS:
  231. * Block address read from @tf.
  232. */
  233. u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
  234. {
  235. u64 block = 0;
  236. if (tf->flags & ATA_TFLAG_LBA) {
  237. if (tf->flags & ATA_TFLAG_LBA48) {
  238. block |= (u64)tf->hob_lbah << 40;
  239. block |= (u64)tf->hob_lbam << 32;
  240. block |= tf->hob_lbal << 24;
  241. } else
  242. block |= (tf->device & 0xf) << 24;
  243. block |= tf->lbah << 16;
  244. block |= tf->lbam << 8;
  245. block |= tf->lbal;
  246. } else {
  247. u32 cyl, head, sect;
  248. cyl = tf->lbam | (tf->lbah << 8);
  249. head = tf->device & 0xf;
  250. sect = tf->lbal;
  251. block = (cyl * dev->heads + head) * dev->sectors + sect;
  252. }
  253. return block;
  254. }
  255. /**
  256. * ata_build_rw_tf - Build ATA taskfile for given read/write request
  257. * @tf: Target ATA taskfile
  258. * @dev: ATA device @tf belongs to
  259. * @block: Block address
  260. * @n_block: Number of blocks
  261. * @tf_flags: RW/FUA etc...
  262. * @tag: tag
  263. *
  264. * LOCKING:
  265. * None.
  266. *
  267. * Build ATA taskfile @tf for read/write request described by
  268. * @block, @n_block, @tf_flags and @tag on @dev.
  269. *
  270. * RETURNS:
  271. *
  272. * 0 on success, -ERANGE if the request is too large for @dev,
  273. * -EINVAL if the request is invalid.
  274. */
  275. int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
  276. u64 block, u32 n_block, unsigned int tf_flags,
  277. unsigned int tag)
  278. {
  279. tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  280. tf->flags |= tf_flags;
  281. if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
  282. /* yay, NCQ */
  283. if (!lba_48_ok(block, n_block))
  284. return -ERANGE;
  285. tf->protocol = ATA_PROT_NCQ;
  286. tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
  287. if (tf->flags & ATA_TFLAG_WRITE)
  288. tf->command = ATA_CMD_FPDMA_WRITE;
  289. else
  290. tf->command = ATA_CMD_FPDMA_READ;
  291. tf->nsect = tag << 3;
  292. tf->hob_feature = (n_block >> 8) & 0xff;
  293. tf->feature = n_block & 0xff;
  294. tf->hob_lbah = (block >> 40) & 0xff;
  295. tf->hob_lbam = (block >> 32) & 0xff;
  296. tf->hob_lbal = (block >> 24) & 0xff;
  297. tf->lbah = (block >> 16) & 0xff;
  298. tf->lbam = (block >> 8) & 0xff;
  299. tf->lbal = block & 0xff;
  300. tf->device = 1 << 6;
  301. if (tf->flags & ATA_TFLAG_FUA)
  302. tf->device |= 1 << 7;
  303. } else if (dev->flags & ATA_DFLAG_LBA) {
  304. tf->flags |= ATA_TFLAG_LBA;
  305. if (lba_28_ok(block, n_block)) {
  306. /* use LBA28 */
  307. tf->device |= (block >> 24) & 0xf;
  308. } else if (lba_48_ok(block, n_block)) {
  309. if (!(dev->flags & ATA_DFLAG_LBA48))
  310. return -ERANGE;
  311. /* use LBA48 */
  312. tf->flags |= ATA_TFLAG_LBA48;
  313. tf->hob_nsect = (n_block >> 8) & 0xff;
  314. tf->hob_lbah = (block >> 40) & 0xff;
  315. tf->hob_lbam = (block >> 32) & 0xff;
  316. tf->hob_lbal = (block >> 24) & 0xff;
  317. } else
  318. /* request too large even for LBA48 */
  319. return -ERANGE;
  320. if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
  321. return -EINVAL;
  322. tf->nsect = n_block & 0xff;
  323. tf->lbah = (block >> 16) & 0xff;
  324. tf->lbam = (block >> 8) & 0xff;
  325. tf->lbal = block & 0xff;
  326. tf->device |= ATA_LBA;
  327. } else {
  328. /* CHS */
  329. u32 sect, head, cyl, track;
  330. /* The request -may- be too large for CHS addressing. */
  331. if (!lba_28_ok(block, n_block))
  332. return -ERANGE;
  333. if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
  334. return -EINVAL;
  335. /* Convert LBA to CHS */
  336. track = (u32)block / dev->sectors;
  337. cyl = track / dev->heads;
  338. head = track % dev->heads;
  339. sect = (u32)block % dev->sectors + 1;
  340. DPRINTK("block %u track %u cyl %u head %u sect %u\n",
  341. (u32)block, track, cyl, head, sect);
  342. /* Check whether the converted CHS can fit.
  343. Cylinder: 0-65535
  344. Head: 0-15
  345. Sector: 1-255*/
  346. if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
  347. return -ERANGE;
  348. tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
  349. tf->lbal = sect;
  350. tf->lbam = cyl;
  351. tf->lbah = cyl >> 8;
  352. tf->device |= head;
  353. }
  354. return 0;
  355. }
  356. /**
  357. * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
  358. * @pio_mask: pio_mask
  359. * @mwdma_mask: mwdma_mask
  360. * @udma_mask: udma_mask
  361. *
  362. * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
  363. * unsigned int xfer_mask.
  364. *
  365. * LOCKING:
  366. * None.
  367. *
  368. * RETURNS:
  369. * Packed xfer_mask.
  370. */
  371. static unsigned int ata_pack_xfermask(unsigned int pio_mask,
  372. unsigned int mwdma_mask,
  373. unsigned int udma_mask)
  374. {
  375. return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
  376. ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
  377. ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
  378. }
  379. /**
  380. * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
  381. * @xfer_mask: xfer_mask to unpack
  382. * @pio_mask: resulting pio_mask
  383. * @mwdma_mask: resulting mwdma_mask
  384. * @udma_mask: resulting udma_mask
  385. *
  386. * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
  387. * Any NULL distination masks will be ignored.
  388. */
  389. static void ata_unpack_xfermask(unsigned int xfer_mask,
  390. unsigned int *pio_mask,
  391. unsigned int *mwdma_mask,
  392. unsigned int *udma_mask)
  393. {
  394. if (pio_mask)
  395. *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
  396. if (mwdma_mask)
  397. *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
  398. if (udma_mask)
  399. *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
  400. }
  401. static const struct ata_xfer_ent {
  402. int shift, bits;
  403. u8 base;
  404. } ata_xfer_tbl[] = {
  405. { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
  406. { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
  407. { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
  408. { -1, },
  409. };
  410. /**
  411. * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
  412. * @xfer_mask: xfer_mask of interest
  413. *
  414. * Return matching XFER_* value for @xfer_mask. Only the highest
  415. * bit of @xfer_mask is considered.
  416. *
  417. * LOCKING:
  418. * None.
  419. *
  420. * RETURNS:
  421. * Matching XFER_* value, 0 if no match found.
  422. */
  423. static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
  424. {
  425. int highbit = fls(xfer_mask) - 1;
  426. const struct ata_xfer_ent *ent;
  427. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  428. if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
  429. return ent->base + highbit - ent->shift;
  430. return 0;
  431. }
  432. /**
  433. * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
  434. * @xfer_mode: XFER_* of interest
  435. *
  436. * Return matching xfer_mask for @xfer_mode.
  437. *
  438. * LOCKING:
  439. * None.
  440. *
  441. * RETURNS:
  442. * Matching xfer_mask, 0 if no match found.
  443. */
  444. static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
  445. {
  446. const struct ata_xfer_ent *ent;
  447. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  448. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  449. return 1 << (ent->shift + xfer_mode - ent->base);
  450. return 0;
  451. }
  452. /**
  453. * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
  454. * @xfer_mode: XFER_* of interest
  455. *
  456. * Return matching xfer_shift for @xfer_mode.
  457. *
  458. * LOCKING:
  459. * None.
  460. *
  461. * RETURNS:
  462. * Matching xfer_shift, -1 if no match found.
  463. */
  464. static int ata_xfer_mode2shift(unsigned int xfer_mode)
  465. {
  466. const struct ata_xfer_ent *ent;
  467. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  468. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  469. return ent->shift;
  470. return -1;
  471. }
  472. /**
  473. * ata_mode_string - convert xfer_mask to string
  474. * @xfer_mask: mask of bits supported; only highest bit counts.
  475. *
  476. * Determine string which represents the highest speed
  477. * (highest bit in @modemask).
  478. *
  479. * LOCKING:
  480. * None.
  481. *
  482. * RETURNS:
  483. * Constant C string representing highest speed listed in
  484. * @mode_mask, or the constant C string "<n/a>".
  485. */
  486. static const char *ata_mode_string(unsigned int xfer_mask)
  487. {
  488. static const char * const xfer_mode_str[] = {
  489. "PIO0",
  490. "PIO1",
  491. "PIO2",
  492. "PIO3",
  493. "PIO4",
  494. "PIO5",
  495. "PIO6",
  496. "MWDMA0",
  497. "MWDMA1",
  498. "MWDMA2",
  499. "MWDMA3",
  500. "MWDMA4",
  501. "UDMA/16",
  502. "UDMA/25",
  503. "UDMA/33",
  504. "UDMA/44",
  505. "UDMA/66",
  506. "UDMA/100",
  507. "UDMA/133",
  508. "UDMA7",
  509. };
  510. int highbit;
  511. highbit = fls(xfer_mask) - 1;
  512. if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
  513. return xfer_mode_str[highbit];
  514. return "<n/a>";
  515. }
  516. static const char *sata_spd_string(unsigned int spd)
  517. {
  518. static const char * const spd_str[] = {
  519. "1.5 Gbps",
  520. "3.0 Gbps",
  521. };
  522. if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
  523. return "<unknown>";
  524. return spd_str[spd - 1];
  525. }
  526. void ata_dev_disable(struct ata_device *dev)
  527. {
  528. if (ata_dev_enabled(dev)) {
  529. if (ata_msg_drv(dev->ap))
  530. ata_dev_printk(dev, KERN_WARNING, "disabled\n");
  531. ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
  532. ATA_DNXFER_QUIET);
  533. dev->class++;
  534. }
  535. }
  536. /**
  537. * ata_devchk - PATA device presence detection
  538. * @ap: ATA channel to examine
  539. * @device: Device to examine (starting at zero)
  540. *
  541. * This technique was originally described in
  542. * Hale Landis's ATADRVR (www.ata-atapi.com), and
  543. * later found its way into the ATA/ATAPI spec.
  544. *
  545. * Write a pattern to the ATA shadow registers,
  546. * and if a device is present, it will respond by
  547. * correctly storing and echoing back the
  548. * ATA shadow register contents.
  549. *
  550. * LOCKING:
  551. * caller.
  552. */
  553. static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
  554. {
  555. struct ata_ioports *ioaddr = &ap->ioaddr;
  556. u8 nsect, lbal;
  557. ap->ops->dev_select(ap, device);
  558. iowrite8(0x55, ioaddr->nsect_addr);
  559. iowrite8(0xaa, ioaddr->lbal_addr);
  560. iowrite8(0xaa, ioaddr->nsect_addr);
  561. iowrite8(0x55, ioaddr->lbal_addr);
  562. iowrite8(0x55, ioaddr->nsect_addr);
  563. iowrite8(0xaa, ioaddr->lbal_addr);
  564. nsect = ioread8(ioaddr->nsect_addr);
  565. lbal = ioread8(ioaddr->lbal_addr);
  566. if ((nsect == 0x55) && (lbal == 0xaa))
  567. return 1; /* we found a device */
  568. return 0; /* nothing found */
  569. }
  570. /**
  571. * ata_dev_classify - determine device type based on ATA-spec signature
  572. * @tf: ATA taskfile register set for device to be identified
  573. *
  574. * Determine from taskfile register contents whether a device is
  575. * ATA or ATAPI, as per "Signature and persistence" section
  576. * of ATA/PI spec (volume 1, sect 5.14).
  577. *
  578. * LOCKING:
  579. * None.
  580. *
  581. * RETURNS:
  582. * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
  583. * the event of failure.
  584. */
  585. unsigned int ata_dev_classify(const struct ata_taskfile *tf)
  586. {
  587. /* Apple's open source Darwin code hints that some devices only
  588. * put a proper signature into the LBA mid/high registers,
  589. * So, we only check those. It's sufficient for uniqueness.
  590. */
  591. if (((tf->lbam == 0) && (tf->lbah == 0)) ||
  592. ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
  593. DPRINTK("found ATA device by sig\n");
  594. return ATA_DEV_ATA;
  595. }
  596. if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
  597. ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
  598. DPRINTK("found ATAPI device by sig\n");
  599. return ATA_DEV_ATAPI;
  600. }
  601. DPRINTK("unknown device\n");
  602. return ATA_DEV_UNKNOWN;
  603. }
  604. /**
  605. * ata_dev_try_classify - Parse returned ATA device signature
  606. * @ap: ATA channel to examine
  607. * @device: Device to examine (starting at zero)
  608. * @r_err: Value of error register on completion
  609. *
  610. * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
  611. * an ATA/ATAPI-defined set of values is placed in the ATA
  612. * shadow registers, indicating the results of device detection
  613. * and diagnostics.
  614. *
  615. * Select the ATA device, and read the values from the ATA shadow
  616. * registers. Then parse according to the Error register value,
  617. * and the spec-defined values examined by ata_dev_classify().
  618. *
  619. * LOCKING:
  620. * caller.
  621. *
  622. * RETURNS:
  623. * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
  624. */
  625. unsigned int
  626. ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
  627. {
  628. struct ata_taskfile tf;
  629. unsigned int class;
  630. u8 err;
  631. ap->ops->dev_select(ap, device);
  632. memset(&tf, 0, sizeof(tf));
  633. ap->ops->tf_read(ap, &tf);
  634. err = tf.feature;
  635. if (r_err)
  636. *r_err = err;
  637. /* see if device passed diags: if master then continue and warn later */
  638. if (err == 0 && device == 0)
  639. /* diagnostic fail : do nothing _YET_ */
  640. ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
  641. else if (err == 1)
  642. /* do nothing */ ;
  643. else if ((device == 0) && (err == 0x81))
  644. /* do nothing */ ;
  645. else
  646. return ATA_DEV_NONE;
  647. /* determine if device is ATA or ATAPI */
  648. class = ata_dev_classify(&tf);
  649. if (class == ATA_DEV_UNKNOWN)
  650. return ATA_DEV_NONE;
  651. if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
  652. return ATA_DEV_NONE;
  653. return class;
  654. }
  655. /**
  656. * ata_id_string - Convert IDENTIFY DEVICE page into string
  657. * @id: IDENTIFY DEVICE results we will examine
  658. * @s: string into which data is output
  659. * @ofs: offset into identify device page
  660. * @len: length of string to return. must be an even number.
  661. *
  662. * The strings in the IDENTIFY DEVICE page are broken up into
  663. * 16-bit chunks. Run through the string, and output each
  664. * 8-bit chunk linearly, regardless of platform.
  665. *
  666. * LOCKING:
  667. * caller.
  668. */
  669. void ata_id_string(const u16 *id, unsigned char *s,
  670. unsigned int ofs, unsigned int len)
  671. {
  672. unsigned int c;
  673. while (len > 0) {
  674. c = id[ofs] >> 8;
  675. *s = c;
  676. s++;
  677. c = id[ofs] & 0xff;
  678. *s = c;
  679. s++;
  680. ofs++;
  681. len -= 2;
  682. }
  683. }
  684. /**
  685. * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
  686. * @id: IDENTIFY DEVICE results we will examine
  687. * @s: string into which data is output
  688. * @ofs: offset into identify device page
  689. * @len: length of string to return. must be an odd number.
  690. *
  691. * This function is identical to ata_id_string except that it
  692. * trims trailing spaces and terminates the resulting string with
  693. * null. @len must be actual maximum length (even number) + 1.
  694. *
  695. * LOCKING:
  696. * caller.
  697. */
  698. void ata_id_c_string(const u16 *id, unsigned char *s,
  699. unsigned int ofs, unsigned int len)
  700. {
  701. unsigned char *p;
  702. WARN_ON(!(len & 1));
  703. ata_id_string(id, s, ofs, len - 1);
  704. p = s + strnlen(s, len - 1);
  705. while (p > s && p[-1] == ' ')
  706. p--;
  707. *p = '\0';
  708. }
  709. static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
  710. {
  711. u64 sectors = 0;
  712. sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
  713. sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
  714. sectors |= (tf->hob_lbal & 0xff) << 24;
  715. sectors |= (tf->lbah & 0xff) << 16;
  716. sectors |= (tf->lbam & 0xff) << 8;
  717. sectors |= (tf->lbal & 0xff);
  718. return ++sectors;
  719. }
  720. static u64 ata_tf_to_lba(struct ata_taskfile *tf)
  721. {
  722. u64 sectors = 0;
  723. sectors |= (tf->device & 0x0f) << 24;
  724. sectors |= (tf->lbah & 0xff) << 16;
  725. sectors |= (tf->lbam & 0xff) << 8;
  726. sectors |= (tf->lbal & 0xff);
  727. return ++sectors;
  728. }
  729. /**
  730. * ata_read_native_max_address_ext - LBA48 native max query
  731. * @dev: Device to query
  732. *
  733. * Perform an LBA48 size query upon the device in question. Return the
  734. * actual LBA48 size or zero if the command fails.
  735. */
  736. static u64 ata_read_native_max_address_ext(struct ata_device *dev)
  737. {
  738. unsigned int err;
  739. struct ata_taskfile tf;
  740. ata_tf_init(dev, &tf);
  741. tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
  742. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
  743. tf.protocol |= ATA_PROT_NODATA;
  744. tf.device |= 0x40;
  745. err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  746. if (err)
  747. return 0;
  748. return ata_tf_to_lba48(&tf);
  749. }
  750. /**
  751. * ata_read_native_max_address - LBA28 native max query
  752. * @dev: Device to query
  753. *
  754. * Performa an LBA28 size query upon the device in question. Return the
  755. * actual LBA28 size or zero if the command fails.
  756. */
  757. static u64 ata_read_native_max_address(struct ata_device *dev)
  758. {
  759. unsigned int err;
  760. struct ata_taskfile tf;
  761. ata_tf_init(dev, &tf);
  762. tf.command = ATA_CMD_READ_NATIVE_MAX;
  763. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  764. tf.protocol |= ATA_PROT_NODATA;
  765. tf.device |= 0x40;
  766. err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  767. if (err)
  768. return 0;
  769. return ata_tf_to_lba(&tf);
  770. }
  771. /**
  772. * ata_set_native_max_address_ext - LBA48 native max set
  773. * @dev: Device to query
  774. * @new_sectors: new max sectors value to set for the device
  775. *
  776. * Perform an LBA48 size set max upon the device in question. Return the
  777. * actual LBA48 size or zero if the command fails.
  778. */
  779. static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
  780. {
  781. unsigned int err;
  782. struct ata_taskfile tf;
  783. new_sectors--;
  784. ata_tf_init(dev, &tf);
  785. tf.command = ATA_CMD_SET_MAX_EXT;
  786. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
  787. tf.protocol |= ATA_PROT_NODATA;
  788. tf.device |= 0x40;
  789. tf.lbal = (new_sectors >> 0) & 0xff;
  790. tf.lbam = (new_sectors >> 8) & 0xff;
  791. tf.lbah = (new_sectors >> 16) & 0xff;
  792. tf.hob_lbal = (new_sectors >> 24) & 0xff;
  793. tf.hob_lbam = (new_sectors >> 32) & 0xff;
  794. tf.hob_lbah = (new_sectors >> 40) & 0xff;
  795. err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  796. if (err)
  797. return 0;
  798. return ata_tf_to_lba48(&tf);
  799. }
  800. /**
  801. * ata_set_native_max_address - LBA28 native max set
  802. * @dev: Device to query
  803. * @new_sectors: new max sectors value to set for the device
  804. *
  805. * Perform an LBA28 size set max upon the device in question. Return the
  806. * actual LBA28 size or zero if the command fails.
  807. */
  808. static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
  809. {
  810. unsigned int err;
  811. struct ata_taskfile tf;
  812. new_sectors--;
  813. ata_tf_init(dev, &tf);
  814. tf.command = ATA_CMD_SET_MAX;
  815. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  816. tf.protocol |= ATA_PROT_NODATA;
  817. tf.lbal = (new_sectors >> 0) & 0xff;
  818. tf.lbam = (new_sectors >> 8) & 0xff;
  819. tf.lbah = (new_sectors >> 16) & 0xff;
  820. tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
  821. err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  822. if (err)
  823. return 0;
  824. return ata_tf_to_lba(&tf);
  825. }
  826. /**
  827. * ata_hpa_resize - Resize a device with an HPA set
  828. * @dev: Device to resize
  829. *
  830. * Read the size of an LBA28 or LBA48 disk with HPA features and resize
  831. * it if required to the full size of the media. The caller must check
  832. * the drive has the HPA feature set enabled.
  833. */
  834. static u64 ata_hpa_resize(struct ata_device *dev)
  835. {
  836. u64 sectors = dev->n_sectors;
  837. u64 hpa_sectors;
  838. if (ata_id_has_lba48(dev->id))
  839. hpa_sectors = ata_read_native_max_address_ext(dev);
  840. else
  841. hpa_sectors = ata_read_native_max_address(dev);
  842. if (hpa_sectors > sectors) {
  843. ata_dev_printk(dev, KERN_INFO,
  844. "Host Protected Area detected:\n"
  845. "\tcurrent size: %lld sectors\n"
  846. "\tnative size: %lld sectors\n",
  847. (long long)sectors, (long long)hpa_sectors);
  848. if (ata_ignore_hpa) {
  849. if (ata_id_has_lba48(dev->id))
  850. hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
  851. else
  852. hpa_sectors = ata_set_native_max_address(dev,
  853. hpa_sectors);
  854. if (hpa_sectors) {
  855. ata_dev_printk(dev, KERN_INFO, "native size "
  856. "increased to %lld sectors\n",
  857. (long long)hpa_sectors);
  858. return hpa_sectors;
  859. }
  860. }
  861. } else if (hpa_sectors < sectors)
  862. ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
  863. "is smaller than sectors (%lld)\n", __FUNCTION__,
  864. (long long)hpa_sectors, (long long)sectors);
  865. return sectors;
  866. }
  867. static u64 ata_id_n_sectors(const u16 *id)
  868. {
  869. if (ata_id_has_lba(id)) {
  870. if (ata_id_has_lba48(id))
  871. return ata_id_u64(id, 100);
  872. else
  873. return ata_id_u32(id, 60);
  874. } else {
  875. if (ata_id_current_chs_valid(id))
  876. return ata_id_u32(id, 57);
  877. else
  878. return id[1] * id[3] * id[6];
  879. }
  880. }
  881. /**
  882. * ata_id_to_dma_mode - Identify DMA mode from id block
  883. * @dev: device to identify
  884. * @unknown: mode to assume if we cannot tell
  885. *
  886. * Set up the timing values for the device based upon the identify
  887. * reported values for the DMA mode. This function is used by drivers
  888. * which rely upon firmware configured modes, but wish to report the
  889. * mode correctly when possible.
  890. *
  891. * In addition we emit similarly formatted messages to the default
  892. * ata_dev_set_mode handler, in order to provide consistency of
  893. * presentation.
  894. */
  895. void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
  896. {
  897. unsigned int mask;
  898. u8 mode;
  899. /* Pack the DMA modes */
  900. mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
  901. if (dev->id[53] & 0x04)
  902. mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
  903. /* Select the mode in use */
  904. mode = ata_xfer_mask2mode(mask);
  905. if (mode != 0) {
  906. ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
  907. ata_mode_string(mask));
  908. } else {
  909. /* SWDMA perhaps ? */
  910. mode = unknown;
  911. ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
  912. }
  913. /* Configure the device reporting */
  914. dev->xfer_mode = mode;
  915. dev->xfer_shift = ata_xfer_mode2shift(mode);
  916. }
  917. /**
  918. * ata_noop_dev_select - Select device 0/1 on ATA bus
  919. * @ap: ATA channel to manipulate
  920. * @device: ATA device (numbered from zero) to select
  921. *
  922. * This function performs no actual function.
  923. *
  924. * May be used as the dev_select() entry in ata_port_operations.
  925. *
  926. * LOCKING:
  927. * caller.
  928. */
  929. void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
  930. {
  931. }
  932. /**
  933. * ata_std_dev_select - Select device 0/1 on ATA bus
  934. * @ap: ATA channel to manipulate
  935. * @device: ATA device (numbered from zero) to select
  936. *
  937. * Use the method defined in the ATA specification to
  938. * make either device 0, or device 1, active on the
  939. * ATA channel. Works with both PIO and MMIO.
  940. *
  941. * May be used as the dev_select() entry in ata_port_operations.
  942. *
  943. * LOCKING:
  944. * caller.
  945. */
  946. void ata_std_dev_select (struct ata_port *ap, unsigned int device)
  947. {
  948. u8 tmp;
  949. if (device == 0)
  950. tmp = ATA_DEVICE_OBS;
  951. else
  952. tmp = ATA_DEVICE_OBS | ATA_DEV1;
  953. iowrite8(tmp, ap->ioaddr.device_addr);
  954. ata_pause(ap); /* needed; also flushes, for mmio */
  955. }
  956. /**
  957. * ata_dev_select - Select device 0/1 on ATA bus
  958. * @ap: ATA channel to manipulate
  959. * @device: ATA device (numbered from zero) to select
  960. * @wait: non-zero to wait for Status register BSY bit to clear
  961. * @can_sleep: non-zero if context allows sleeping
  962. *
  963. * Use the method defined in the ATA specification to
  964. * make either device 0, or device 1, active on the
  965. * ATA channel.
  966. *
  967. * This is a high-level version of ata_std_dev_select(),
  968. * which additionally provides the services of inserting
  969. * the proper pauses and status polling, where needed.
  970. *
  971. * LOCKING:
  972. * caller.
  973. */
  974. void ata_dev_select(struct ata_port *ap, unsigned int device,
  975. unsigned int wait, unsigned int can_sleep)
  976. {
  977. if (ata_msg_probe(ap))
  978. ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
  979. "device %u, wait %u\n", device, wait);
  980. if (wait)
  981. ata_wait_idle(ap);
  982. ap->ops->dev_select(ap, device);
  983. if (wait) {
  984. if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
  985. msleep(150);
  986. ata_wait_idle(ap);
  987. }
  988. }
  989. /**
  990. * ata_dump_id - IDENTIFY DEVICE info debugging output
  991. * @id: IDENTIFY DEVICE page to dump
  992. *
  993. * Dump selected 16-bit words from the given IDENTIFY DEVICE
  994. * page.
  995. *
  996. * LOCKING:
  997. * caller.
  998. */
  999. static inline void ata_dump_id(const u16 *id)
  1000. {
  1001. DPRINTK("49==0x%04x "
  1002. "53==0x%04x "
  1003. "63==0x%04x "
  1004. "64==0x%04x "
  1005. "75==0x%04x \n",
  1006. id[49],
  1007. id[53],
  1008. id[63],
  1009. id[64],
  1010. id[75]);
  1011. DPRINTK("80==0x%04x "
  1012. "81==0x%04x "
  1013. "82==0x%04x "
  1014. "83==0x%04x "
  1015. "84==0x%04x \n",
  1016. id[80],
  1017. id[81],
  1018. id[82],
  1019. id[83],
  1020. id[84]);
  1021. DPRINTK("88==0x%04x "
  1022. "93==0x%04x\n",
  1023. id[88],
  1024. id[93]);
  1025. }
  1026. /**
  1027. * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
  1028. * @id: IDENTIFY data to compute xfer mask from
  1029. *
  1030. * Compute the xfermask for this device. This is not as trivial
  1031. * as it seems if we must consider early devices correctly.
  1032. *
  1033. * FIXME: pre IDE drive timing (do we care ?).
  1034. *
  1035. * LOCKING:
  1036. * None.
  1037. *
  1038. * RETURNS:
  1039. * Computed xfermask
  1040. */
  1041. static unsigned int ata_id_xfermask(const u16 *id)
  1042. {
  1043. unsigned int pio_mask, mwdma_mask, udma_mask;
  1044. /* Usual case. Word 53 indicates word 64 is valid */
  1045. if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
  1046. pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
  1047. pio_mask <<= 3;
  1048. pio_mask |= 0x7;
  1049. } else {
  1050. /* If word 64 isn't valid then Word 51 high byte holds
  1051. * the PIO timing number for the maximum. Turn it into
  1052. * a mask.
  1053. */
  1054. u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
  1055. if (mode < 5) /* Valid PIO range */
  1056. pio_mask = (2 << mode) - 1;
  1057. else
  1058. pio_mask = 1;
  1059. /* But wait.. there's more. Design your standards by
  1060. * committee and you too can get a free iordy field to
  1061. * process. However its the speeds not the modes that
  1062. * are supported... Note drivers using the timing API
  1063. * will get this right anyway
  1064. */
  1065. }
  1066. mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
  1067. if (ata_id_is_cfa(id)) {
  1068. /*
  1069. * Process compact flash extended modes
  1070. */
  1071. int pio = id[163] & 0x7;
  1072. int dma = (id[163] >> 3) & 7;
  1073. if (pio)
  1074. pio_mask |= (1 << 5);
  1075. if (pio > 1)
  1076. pio_mask |= (1 << 6);
  1077. if (dma)
  1078. mwdma_mask |= (1 << 3);
  1079. if (dma > 1)
  1080. mwdma_mask |= (1 << 4);
  1081. }
  1082. udma_mask = 0;
  1083. if (id[ATA_ID_FIELD_VALID] & (1 << 2))
  1084. udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
  1085. return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  1086. }
  1087. /**
  1088. * ata_port_queue_task - Queue port_task
  1089. * @ap: The ata_port to queue port_task for
  1090. * @fn: workqueue function to be scheduled
  1091. * @data: data for @fn to use
  1092. * @delay: delay time for workqueue function
  1093. *
  1094. * Schedule @fn(@data) for execution after @delay jiffies using
  1095. * port_task. There is one port_task per port and it's the
  1096. * user(low level driver)'s responsibility to make sure that only
  1097. * one task is active at any given time.
  1098. *
  1099. * libata core layer takes care of synchronization between
  1100. * port_task and EH. ata_port_queue_task() may be ignored for EH
  1101. * synchronization.
  1102. *
  1103. * LOCKING:
  1104. * Inherited from caller.
  1105. */
  1106. void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
  1107. unsigned long delay)
  1108. {
  1109. PREPARE_DELAYED_WORK(&ap->port_task, fn);
  1110. ap->port_task_data = data;
  1111. /* may fail if ata_port_flush_task() in progress */
  1112. queue_delayed_work(ata_wq, &ap->port_task, delay);
  1113. }
  1114. /**
  1115. * ata_port_flush_task - Flush port_task
  1116. * @ap: The ata_port to flush port_task for
  1117. *
  1118. * After this function completes, port_task is guranteed not to
  1119. * be running or scheduled.
  1120. *
  1121. * LOCKING:
  1122. * Kernel thread context (may sleep)
  1123. */
  1124. void ata_port_flush_task(struct ata_port *ap)
  1125. {
  1126. DPRINTK("ENTER\n");
  1127. cancel_rearming_delayed_work(&ap->port_task);
  1128. if (ata_msg_ctl(ap))
  1129. ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
  1130. }
  1131. static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  1132. {
  1133. struct completion *waiting = qc->private_data;
  1134. complete(waiting);
  1135. }
  1136. /**
  1137. * ata_exec_internal_sg - execute libata internal command
  1138. * @dev: Device to which the command is sent
  1139. * @tf: Taskfile registers for the command and the result
  1140. * @cdb: CDB for packet command
  1141. * @dma_dir: Data tranfer direction of the command
  1142. * @sg: sg list for the data buffer of the command
  1143. * @n_elem: Number of sg entries
  1144. *
  1145. * Executes libata internal command with timeout. @tf contains
  1146. * command on entry and result on return. Timeout and error
  1147. * conditions are reported via return value. No recovery action
  1148. * is taken after a command times out. It's caller's duty to
  1149. * clean up after timeout.
  1150. *
  1151. * LOCKING:
  1152. * None. Should be called with kernel context, might sleep.
  1153. *
  1154. * RETURNS:
  1155. * Zero on success, AC_ERR_* mask on failure
  1156. */
  1157. unsigned ata_exec_internal_sg(struct ata_device *dev,
  1158. struct ata_taskfile *tf, const u8 *cdb,
  1159. int dma_dir, struct scatterlist *sg,
  1160. unsigned int n_elem)
  1161. {
  1162. struct ata_port *ap = dev->ap;
  1163. u8 command = tf->command;
  1164. struct ata_queued_cmd *qc;
  1165. unsigned int tag, preempted_tag;
  1166. u32 preempted_sactive, preempted_qc_active;
  1167. DECLARE_COMPLETION_ONSTACK(wait);
  1168. unsigned long flags;
  1169. unsigned int err_mask;
  1170. int rc;
  1171. spin_lock_irqsave(ap->lock, flags);
  1172. /* no internal command while frozen */
  1173. if (ap->pflags & ATA_PFLAG_FROZEN) {
  1174. spin_unlock_irqrestore(ap->lock, flags);
  1175. return AC_ERR_SYSTEM;
  1176. }
  1177. /* initialize internal qc */
  1178. /* XXX: Tag 0 is used for drivers with legacy EH as some
  1179. * drivers choke if any other tag is given. This breaks
  1180. * ata_tag_internal() test for those drivers. Don't use new
  1181. * EH stuff without converting to it.
  1182. */
  1183. if (ap->ops->error_handler)
  1184. tag = ATA_TAG_INTERNAL;
  1185. else
  1186. tag = 0;
  1187. if (test_and_set_bit(tag, &ap->qc_allocated))
  1188. BUG();
  1189. qc = __ata_qc_from_tag(ap, tag);
  1190. qc->tag = tag;
  1191. qc->scsicmd = NULL;
  1192. qc->ap = ap;
  1193. qc->dev = dev;
  1194. ata_qc_reinit(qc);
  1195. preempted_tag = ap->active_tag;
  1196. preempted_sactive = ap->sactive;
  1197. preempted_qc_active = ap->qc_active;
  1198. ap->active_tag = ATA_TAG_POISON;
  1199. ap->sactive = 0;
  1200. ap->qc_active = 0;
  1201. /* prepare & issue qc */
  1202. qc->tf = *tf;
  1203. if (cdb)
  1204. memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
  1205. qc->flags |= ATA_QCFLAG_RESULT_TF;
  1206. qc->dma_dir = dma_dir;
  1207. if (dma_dir != DMA_NONE) {
  1208. unsigned int i, buflen = 0;
  1209. for (i = 0; i < n_elem; i++)
  1210. buflen += sg[i].length;
  1211. ata_sg_init(qc, sg, n_elem);
  1212. qc->nbytes = buflen;
  1213. }
  1214. qc->private_data = &wait;
  1215. qc->complete_fn = ata_qc_complete_internal;
  1216. ata_qc_issue(qc);
  1217. spin_unlock_irqrestore(ap->lock, flags);
  1218. rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
  1219. ata_port_flush_task(ap);
  1220. if (!rc) {
  1221. spin_lock_irqsave(ap->lock, flags);
  1222. /* We're racing with irq here. If we lose, the
  1223. * following test prevents us from completing the qc
  1224. * twice. If we win, the port is frozen and will be
  1225. * cleaned up by ->post_internal_cmd().
  1226. */
  1227. if (qc->flags & ATA_QCFLAG_ACTIVE) {
  1228. qc->err_mask |= AC_ERR_TIMEOUT;
  1229. if (ap->ops->error_handler)
  1230. ata_port_freeze(ap);
  1231. else
  1232. ata_qc_complete(qc);
  1233. if (ata_msg_warn(ap))
  1234. ata_dev_printk(dev, KERN_WARNING,
  1235. "qc timeout (cmd 0x%x)\n", command);
  1236. }
  1237. spin_unlock_irqrestore(ap->lock, flags);
  1238. }
  1239. /* do post_internal_cmd */
  1240. if (ap->ops->post_internal_cmd)
  1241. ap->ops->post_internal_cmd(qc);
  1242. /* perform minimal error analysis */
  1243. if (qc->flags & ATA_QCFLAG_FAILED) {
  1244. if (qc->result_tf.command & (ATA_ERR | ATA_DF))
  1245. qc->err_mask |= AC_ERR_DEV;
  1246. if (!qc->err_mask)
  1247. qc->err_mask |= AC_ERR_OTHER;
  1248. if (qc->err_mask & ~AC_ERR_OTHER)
  1249. qc->err_mask &= ~AC_ERR_OTHER;
  1250. }
  1251. /* finish up */
  1252. spin_lock_irqsave(ap->lock, flags);
  1253. *tf = qc->result_tf;
  1254. err_mask = qc->err_mask;
  1255. ata_qc_free(qc);
  1256. ap->active_tag = preempted_tag;
  1257. ap->sactive = preempted_sactive;
  1258. ap->qc_active = preempted_qc_active;
  1259. /* XXX - Some LLDDs (sata_mv) disable port on command failure.
  1260. * Until those drivers are fixed, we detect the condition
  1261. * here, fail the command with AC_ERR_SYSTEM and reenable the
  1262. * port.
  1263. *
  1264. * Note that this doesn't change any behavior as internal
  1265. * command failure results in disabling the device in the
  1266. * higher layer for LLDDs without new reset/EH callbacks.
  1267. *
  1268. * Kill the following code as soon as those drivers are fixed.
  1269. */
  1270. if (ap->flags & ATA_FLAG_DISABLED) {
  1271. err_mask |= AC_ERR_SYSTEM;
  1272. ata_port_probe(ap);
  1273. }
  1274. spin_unlock_irqrestore(ap->lock, flags);
  1275. return err_mask;
  1276. }
  1277. /**
  1278. * ata_exec_internal - execute libata internal command
  1279. * @dev: Device to which the command is sent
  1280. * @tf: Taskfile registers for the command and the result
  1281. * @cdb: CDB for packet command
  1282. * @dma_dir: Data tranfer direction of the command
  1283. * @buf: Data buffer of the command
  1284. * @buflen: Length of data buffer
  1285. *
  1286. * Wrapper around ata_exec_internal_sg() which takes simple
  1287. * buffer instead of sg list.
  1288. *
  1289. * LOCKING:
  1290. * None. Should be called with kernel context, might sleep.
  1291. *
  1292. * RETURNS:
  1293. * Zero on success, AC_ERR_* mask on failure
  1294. */
  1295. unsigned ata_exec_internal(struct ata_device *dev,
  1296. struct ata_taskfile *tf, const u8 *cdb,
  1297. int dma_dir, void *buf, unsigned int buflen)
  1298. {
  1299. struct scatterlist *psg = NULL, sg;
  1300. unsigned int n_elem = 0;
  1301. if (dma_dir != DMA_NONE) {
  1302. WARN_ON(!buf);
  1303. sg_init_one(&sg, buf, buflen);
  1304. psg = &sg;
  1305. n_elem++;
  1306. }
  1307. return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
  1308. }
  1309. /**
  1310. * ata_do_simple_cmd - execute simple internal command
  1311. * @dev: Device to which the command is sent
  1312. * @cmd: Opcode to execute
  1313. *
  1314. * Execute a 'simple' command, that only consists of the opcode
  1315. * 'cmd' itself, without filling any other registers
  1316. *
  1317. * LOCKING:
  1318. * Kernel thread context (may sleep).
  1319. *
  1320. * RETURNS:
  1321. * Zero on success, AC_ERR_* mask on failure
  1322. */
  1323. unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
  1324. {
  1325. struct ata_taskfile tf;
  1326. ata_tf_init(dev, &tf);
  1327. tf.command = cmd;
  1328. tf.flags |= ATA_TFLAG_DEVICE;
  1329. tf.protocol = ATA_PROT_NODATA;
  1330. return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  1331. }
  1332. /**
  1333. * ata_pio_need_iordy - check if iordy needed
  1334. * @adev: ATA device
  1335. *
  1336. * Check if the current speed of the device requires IORDY. Used
  1337. * by various controllers for chip configuration.
  1338. */
  1339. unsigned int ata_pio_need_iordy(const struct ata_device *adev)
  1340. {
  1341. /* Controller doesn't support IORDY. Probably a pointless check
  1342. as the caller should know this */
  1343. if (adev->ap->flags & ATA_FLAG_NO_IORDY)
  1344. return 0;
  1345. /* PIO3 and higher it is mandatory */
  1346. if (adev->pio_mode > XFER_PIO_2)
  1347. return 1;
  1348. /* We turn it on when possible */
  1349. if (ata_id_has_iordy(adev->id))
  1350. return 1;
  1351. return 0;
  1352. }
  1353. /**
  1354. * ata_pio_mask_no_iordy - Return the non IORDY mask
  1355. * @adev: ATA device
  1356. *
  1357. * Compute the highest mode possible if we are not using iordy. Return
  1358. * -1 if no iordy mode is available.
  1359. */
  1360. static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
  1361. {
  1362. /* If we have no drive specific rule, then PIO 2 is non IORDY */
  1363. if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
  1364. u16 pio = adev->id[ATA_ID_EIDE_PIO];
  1365. /* Is the speed faster than the drive allows non IORDY ? */
  1366. if (pio) {
  1367. /* This is cycle times not frequency - watch the logic! */
  1368. if (pio > 240) /* PIO2 is 240nS per cycle */
  1369. return 3 << ATA_SHIFT_PIO;
  1370. return 7 << ATA_SHIFT_PIO;
  1371. }
  1372. }
  1373. return 3 << ATA_SHIFT_PIO;
  1374. }
  1375. /**
  1376. * ata_dev_read_id - Read ID data from the specified device
  1377. * @dev: target device
  1378. * @p_class: pointer to class of the target device (may be changed)
  1379. * @flags: ATA_READID_* flags
  1380. * @id: buffer to read IDENTIFY data into
  1381. *
  1382. * Read ID data from the specified device. ATA_CMD_ID_ATA is
  1383. * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
  1384. * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
  1385. * for pre-ATA4 drives.
  1386. *
  1387. * LOCKING:
  1388. * Kernel thread context (may sleep)
  1389. *
  1390. * RETURNS:
  1391. * 0 on success, -errno otherwise.
  1392. */
  1393. int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
  1394. unsigned int flags, u16 *id)
  1395. {
  1396. struct ata_port *ap = dev->ap;
  1397. unsigned int class = *p_class;
  1398. struct ata_taskfile tf;
  1399. unsigned int err_mask = 0;
  1400. const char *reason;
  1401. int may_fallback = 1, tried_spinup = 0;
  1402. int rc;
  1403. if (ata_msg_ctl(ap))
  1404. ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
  1405. ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
  1406. retry:
  1407. ata_tf_init(dev, &tf);
  1408. switch (class) {
  1409. case ATA_DEV_ATA:
  1410. tf.command = ATA_CMD_ID_ATA;
  1411. break;
  1412. case ATA_DEV_ATAPI:
  1413. tf.command = ATA_CMD_ID_ATAPI;
  1414. break;
  1415. default:
  1416. rc = -ENODEV;
  1417. reason = "unsupported class";
  1418. goto err_out;
  1419. }
  1420. tf.protocol = ATA_PROT_PIO;
  1421. /* Some devices choke if TF registers contain garbage. Make
  1422. * sure those are properly initialized.
  1423. */
  1424. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  1425. /* Device presence detection is unreliable on some
  1426. * controllers. Always poll IDENTIFY if available.
  1427. */
  1428. tf.flags |= ATA_TFLAG_POLLING;
  1429. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
  1430. id, sizeof(id[0]) * ATA_ID_WORDS);
  1431. if (err_mask) {
  1432. if (err_mask & AC_ERR_NODEV_HINT) {
  1433. DPRINTK("ata%u.%d: NODEV after polling detection\n",
  1434. ap->print_id, dev->devno);
  1435. return -ENOENT;
  1436. }
  1437. /* Device or controller might have reported the wrong
  1438. * device class. Give a shot at the other IDENTIFY if
  1439. * the current one is aborted by the device.
  1440. */
  1441. if (may_fallback &&
  1442. (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
  1443. may_fallback = 0;
  1444. if (class == ATA_DEV_ATA)
  1445. class = ATA_DEV_ATAPI;
  1446. else
  1447. class = ATA_DEV_ATA;
  1448. goto retry;
  1449. }
  1450. rc = -EIO;
  1451. reason = "I/O error";
  1452. goto err_out;
  1453. }
  1454. /* Falling back doesn't make sense if ID data was read
  1455. * successfully at least once.
  1456. */
  1457. may_fallback = 0;
  1458. swap_buf_le16(id, ATA_ID_WORDS);
  1459. /* sanity check */
  1460. rc = -EINVAL;
  1461. reason = "device reports invalid type";
  1462. if (class == ATA_DEV_ATA) {
  1463. if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
  1464. goto err_out;
  1465. } else {
  1466. if (ata_id_is_ata(id))
  1467. goto err_out;
  1468. }
  1469. if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
  1470. tried_spinup = 1;
  1471. /*
  1472. * Drive powered-up in standby mode, and requires a specific
  1473. * SET_FEATURES spin-up subcommand before it will accept
  1474. * anything other than the original IDENTIFY command.
  1475. */
  1476. ata_tf_init(dev, &tf);
  1477. tf.command = ATA_CMD_SET_FEATURES;
  1478. tf.feature = SETFEATURES_SPINUP;
  1479. tf.protocol = ATA_PROT_NODATA;
  1480. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  1481. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  1482. if (err_mask) {
  1483. rc = -EIO;
  1484. reason = "SPINUP failed";
  1485. goto err_out;
  1486. }
  1487. /*
  1488. * If the drive initially returned incomplete IDENTIFY info,
  1489. * we now must reissue the IDENTIFY command.
  1490. */
  1491. if (id[2] == 0x37c8)
  1492. goto retry;
  1493. }
  1494. if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
  1495. /*
  1496. * The exact sequence expected by certain pre-ATA4 drives is:
  1497. * SRST RESET
  1498. * IDENTIFY
  1499. * INITIALIZE DEVICE PARAMETERS
  1500. * anything else..
  1501. * Some drives were very specific about that exact sequence.
  1502. */
  1503. if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
  1504. err_mask = ata_dev_init_params(dev, id[3], id[6]);
  1505. if (err_mask) {
  1506. rc = -EIO;
  1507. reason = "INIT_DEV_PARAMS failed";
  1508. goto err_out;
  1509. }
  1510. /* current CHS translation info (id[53-58]) might be
  1511. * changed. reread the identify device info.
  1512. */
  1513. flags &= ~ATA_READID_POSTRESET;
  1514. goto retry;
  1515. }
  1516. }
  1517. *p_class = class;
  1518. return 0;
  1519. err_out:
  1520. if (ata_msg_warn(ap))
  1521. ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
  1522. "(%s, err_mask=0x%x)\n", reason, err_mask);
  1523. return rc;
  1524. }
  1525. static inline u8 ata_dev_knobble(struct ata_device *dev)
  1526. {
  1527. return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
  1528. }
  1529. static void ata_dev_config_ncq(struct ata_device *dev,
  1530. char *desc, size_t desc_sz)
  1531. {
  1532. struct ata_port *ap = dev->ap;
  1533. int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
  1534. if (!ata_id_has_ncq(dev->id)) {
  1535. desc[0] = '\0';
  1536. return;
  1537. }
  1538. if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
  1539. snprintf(desc, desc_sz, "NCQ (not used)");
  1540. return;
  1541. }
  1542. if (ap->flags & ATA_FLAG_NCQ) {
  1543. hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
  1544. dev->flags |= ATA_DFLAG_NCQ;
  1545. }
  1546. if (hdepth >= ddepth)
  1547. snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
  1548. else
  1549. snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
  1550. }
  1551. /**
  1552. * ata_dev_configure - Configure the specified ATA/ATAPI device
  1553. * @dev: Target device to configure
  1554. *
  1555. * Configure @dev according to @dev->id. Generic and low-level
  1556. * driver specific fixups are also applied.
  1557. *
  1558. * LOCKING:
  1559. * Kernel thread context (may sleep)
  1560. *
  1561. * RETURNS:
  1562. * 0 on success, -errno otherwise
  1563. */
  1564. int ata_dev_configure(struct ata_device *dev)
  1565. {
  1566. struct ata_port *ap = dev->ap;
  1567. struct ata_eh_context *ehc = &ap->eh_context;
  1568. int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
  1569. const u16 *id = dev->id;
  1570. unsigned int xfer_mask;
  1571. char revbuf[7]; /* XYZ-99\0 */
  1572. char fwrevbuf[ATA_ID_FW_REV_LEN+1];
  1573. char modelbuf[ATA_ID_PROD_LEN+1];
  1574. int rc;
  1575. if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
  1576. ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
  1577. __FUNCTION__);
  1578. return 0;
  1579. }
  1580. if (ata_msg_probe(ap))
  1581. ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
  1582. /* let ACPI work its magic */
  1583. rc = ata_acpi_on_devcfg(dev);
  1584. if (rc)
  1585. return rc;
  1586. /* print device capabilities */
  1587. if (ata_msg_probe(ap))
  1588. ata_dev_printk(dev, KERN_DEBUG,
  1589. "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
  1590. "85:%04x 86:%04x 87:%04x 88:%04x\n",
  1591. __FUNCTION__,
  1592. id[49], id[82], id[83], id[84],
  1593. id[85], id[86], id[87], id[88]);
  1594. /* initialize to-be-configured parameters */
  1595. dev->flags &= ~ATA_DFLAG_CFG_MASK;
  1596. dev->max_sectors = 0;
  1597. dev->cdb_len = 0;
  1598. dev->n_sectors = 0;
  1599. dev->cylinders = 0;
  1600. dev->heads = 0;
  1601. dev->sectors = 0;
  1602. /*
  1603. * common ATA, ATAPI feature tests
  1604. */
  1605. /* find max transfer mode; for printk only */
  1606. xfer_mask = ata_id_xfermask(id);
  1607. if (ata_msg_probe(ap))
  1608. ata_dump_id(id);
  1609. /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
  1610. ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
  1611. sizeof(fwrevbuf));
  1612. ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
  1613. sizeof(modelbuf));
  1614. /* ATA-specific feature tests */
  1615. if (dev->class == ATA_DEV_ATA) {
  1616. if (ata_id_is_cfa(id)) {
  1617. if (id[162] & 1) /* CPRM may make this media unusable */
  1618. ata_dev_printk(dev, KERN_WARNING,
  1619. "supports DRM functions and may "
  1620. "not be fully accessable.\n");
  1621. snprintf(revbuf, 7, "CFA");
  1622. }
  1623. else
  1624. snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
  1625. dev->n_sectors = ata_id_n_sectors(id);
  1626. if (dev->id[59] & 0x100)
  1627. dev->multi_count = dev->id[59] & 0xff;
  1628. if (ata_id_has_lba(id)) {
  1629. const char *lba_desc;
  1630. char ncq_desc[20];
  1631. lba_desc = "LBA";
  1632. dev->flags |= ATA_DFLAG_LBA;
  1633. if (ata_id_has_lba48(id)) {
  1634. dev->flags |= ATA_DFLAG_LBA48;
  1635. lba_desc = "LBA48";
  1636. if (dev->n_sectors >= (1UL << 28) &&
  1637. ata_id_has_flush_ext(id))
  1638. dev->flags |= ATA_DFLAG_FLUSH_EXT;
  1639. }
  1640. if (ata_id_hpa_enabled(dev->id))
  1641. dev->n_sectors = ata_hpa_resize(dev);
  1642. /* config NCQ */
  1643. ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
  1644. /* print device info to dmesg */
  1645. if (ata_msg_drv(ap) && print_info) {
  1646. ata_dev_printk(dev, KERN_INFO,
  1647. "%s: %s, %s, max %s\n",
  1648. revbuf, modelbuf, fwrevbuf,
  1649. ata_mode_string(xfer_mask));
  1650. ata_dev_printk(dev, KERN_INFO,
  1651. "%Lu sectors, multi %u: %s %s\n",
  1652. (unsigned long long)dev->n_sectors,
  1653. dev->multi_count, lba_desc, ncq_desc);
  1654. }
  1655. } else {
  1656. /* CHS */
  1657. /* Default translation */
  1658. dev->cylinders = id[1];
  1659. dev->heads = id[3];
  1660. dev->sectors = id[6];
  1661. if (ata_id_current_chs_valid(id)) {
  1662. /* Current CHS translation is valid. */
  1663. dev->cylinders = id[54];
  1664. dev->heads = id[55];
  1665. dev->sectors = id[56];
  1666. }
  1667. /* print device info to dmesg */
  1668. if (ata_msg_drv(ap) && print_info) {
  1669. ata_dev_printk(dev, KERN_INFO,
  1670. "%s: %s, %s, max %s\n",
  1671. revbuf, modelbuf, fwrevbuf,
  1672. ata_mode_string(xfer_mask));
  1673. ata_dev_printk(dev, KERN_INFO,
  1674. "%Lu sectors, multi %u, CHS %u/%u/%u\n",
  1675. (unsigned long long)dev->n_sectors,
  1676. dev->multi_count, dev->cylinders,
  1677. dev->heads, dev->sectors);
  1678. }
  1679. }
  1680. dev->cdb_len = 16;
  1681. }
  1682. /* ATAPI-specific feature tests */
  1683. else if (dev->class == ATA_DEV_ATAPI) {
  1684. char *cdb_intr_string = "";
  1685. rc = atapi_cdb_len(id);
  1686. if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
  1687. if (ata_msg_warn(ap))
  1688. ata_dev_printk(dev, KERN_WARNING,
  1689. "unsupported CDB len\n");
  1690. rc = -EINVAL;
  1691. goto err_out_nosup;
  1692. }
  1693. dev->cdb_len = (unsigned int) rc;
  1694. if (ata_id_cdb_intr(dev->id)) {
  1695. dev->flags |= ATA_DFLAG_CDB_INTR;
  1696. cdb_intr_string = ", CDB intr";
  1697. }
  1698. /* print device info to dmesg */
  1699. if (ata_msg_drv(ap) && print_info)
  1700. ata_dev_printk(dev, KERN_INFO,
  1701. "ATAPI: %s, %s, max %s%s\n",
  1702. modelbuf, fwrevbuf,
  1703. ata_mode_string(xfer_mask),
  1704. cdb_intr_string);
  1705. }
  1706. /* determine max_sectors */
  1707. dev->max_sectors = ATA_MAX_SECTORS;
  1708. if (dev->flags & ATA_DFLAG_LBA48)
  1709. dev->max_sectors = ATA_MAX_SECTORS_LBA48;
  1710. if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
  1711. /* Let the user know. We don't want to disallow opens for
  1712. rescue purposes, or in case the vendor is just a blithering
  1713. idiot */
  1714. if (print_info) {
  1715. ata_dev_printk(dev, KERN_WARNING,
  1716. "Drive reports diagnostics failure. This may indicate a drive\n");
  1717. ata_dev_printk(dev, KERN_WARNING,
  1718. "fault or invalid emulation. Contact drive vendor for information.\n");
  1719. }
  1720. }
  1721. /* limit bridge transfers to udma5, 200 sectors */
  1722. if (ata_dev_knobble(dev)) {
  1723. if (ata_msg_drv(ap) && print_info)
  1724. ata_dev_printk(dev, KERN_INFO,
  1725. "applying bridge limits\n");
  1726. dev->udma_mask &= ATA_UDMA5;
  1727. dev->max_sectors = ATA_MAX_SECTORS;
  1728. }
  1729. if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
  1730. dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
  1731. dev->max_sectors);
  1732. if (ap->ops->dev_config)
  1733. ap->ops->dev_config(dev);
  1734. if (ata_msg_probe(ap))
  1735. ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
  1736. __FUNCTION__, ata_chk_status(ap));
  1737. return 0;
  1738. err_out_nosup:
  1739. if (ata_msg_probe(ap))
  1740. ata_dev_printk(dev, KERN_DEBUG,
  1741. "%s: EXIT, err\n", __FUNCTION__);
  1742. return rc;
  1743. }
  1744. /**
  1745. * ata_cable_40wire - return 40 wire cable type
  1746. * @ap: port
  1747. *
  1748. * Helper method for drivers which want to hardwire 40 wire cable
  1749. * detection.
  1750. */
  1751. int ata_cable_40wire(struct ata_port *ap)
  1752. {
  1753. return ATA_CBL_PATA40;
  1754. }
  1755. /**
  1756. * ata_cable_80wire - return 80 wire cable type
  1757. * @ap: port
  1758. *
  1759. * Helper method for drivers which want to hardwire 80 wire cable
  1760. * detection.
  1761. */
  1762. int ata_cable_80wire(struct ata_port *ap)
  1763. {
  1764. return ATA_CBL_PATA80;
  1765. }
  1766. /**
  1767. * ata_cable_unknown - return unknown PATA cable.
  1768. * @ap: port
  1769. *
  1770. * Helper method for drivers which have no PATA cable detection.
  1771. */
  1772. int ata_cable_unknown(struct ata_port *ap)
  1773. {
  1774. return ATA_CBL_PATA_UNK;
  1775. }
  1776. /**
  1777. * ata_cable_sata - return SATA cable type
  1778. * @ap: port
  1779. *
  1780. * Helper method for drivers which have SATA cables
  1781. */
  1782. int ata_cable_sata(struct ata_port *ap)
  1783. {
  1784. return ATA_CBL_SATA;
  1785. }
  1786. /**
  1787. * ata_bus_probe - Reset and probe ATA bus
  1788. * @ap: Bus to probe
  1789. *
  1790. * Master ATA bus probing function. Initiates a hardware-dependent
  1791. * bus reset, then attempts to identify any devices found on
  1792. * the bus.
  1793. *
  1794. * LOCKING:
  1795. * PCI/etc. bus probe sem.
  1796. *
  1797. * RETURNS:
  1798. * Zero on success, negative errno otherwise.
  1799. */
  1800. int ata_bus_probe(struct ata_port *ap)
  1801. {
  1802. unsigned int classes[ATA_MAX_DEVICES];
  1803. int tries[ATA_MAX_DEVICES];
  1804. int i, rc;
  1805. struct ata_device *dev;
  1806. ata_port_probe(ap);
  1807. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1808. tries[i] = ATA_PROBE_MAX_TRIES;
  1809. retry:
  1810. /* reset and determine device classes */
  1811. ap->ops->phy_reset(ap);
  1812. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1813. dev = &ap->device[i];
  1814. if (!(ap->flags & ATA_FLAG_DISABLED) &&
  1815. dev->class != ATA_DEV_UNKNOWN)
  1816. classes[dev->devno] = dev->class;
  1817. else
  1818. classes[dev->devno] = ATA_DEV_NONE;
  1819. dev->class = ATA_DEV_UNKNOWN;
  1820. }
  1821. ata_port_probe(ap);
  1822. /* after the reset the device state is PIO 0 and the controller
  1823. state is undefined. Record the mode */
  1824. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1825. ap->device[i].pio_mode = XFER_PIO_0;
  1826. /* read IDENTIFY page and configure devices. We have to do the identify
  1827. specific sequence bass-ackwards so that PDIAG- is released by
  1828. the slave device */
  1829. for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
  1830. dev = &ap->device[i];
  1831. if (tries[i])
  1832. dev->class = classes[i];
  1833. if (!ata_dev_enabled(dev))
  1834. continue;
  1835. rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
  1836. dev->id);
  1837. if (rc)
  1838. goto fail;
  1839. }
  1840. /* Now ask for the cable type as PDIAG- should have been released */
  1841. if (ap->ops->cable_detect)
  1842. ap->cbl = ap->ops->cable_detect(ap);
  1843. /* After the identify sequence we can now set up the devices. We do
  1844. this in the normal order so that the user doesn't get confused */
  1845. for(i = 0; i < ATA_MAX_DEVICES; i++) {
  1846. dev = &ap->device[i];
  1847. if (!ata_dev_enabled(dev))
  1848. continue;
  1849. ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
  1850. rc = ata_dev_configure(dev);
  1851. ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
  1852. if (rc)
  1853. goto fail;
  1854. }
  1855. /* configure transfer mode */
  1856. rc = ata_set_mode(ap, &dev);
  1857. if (rc)
  1858. goto fail;
  1859. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1860. if (ata_dev_enabled(&ap->device[i]))
  1861. return 0;
  1862. /* no device present, disable port */
  1863. ata_port_disable(ap);
  1864. ap->ops->port_disable(ap);
  1865. return -ENODEV;
  1866. fail:
  1867. tries[dev->devno]--;
  1868. switch (rc) {
  1869. case -EINVAL:
  1870. /* eeek, something went very wrong, give up */
  1871. tries[dev->devno] = 0;
  1872. break;
  1873. case -ENODEV:
  1874. /* give it just one more chance */
  1875. tries[dev->devno] = min(tries[dev->devno], 1);
  1876. case -EIO:
  1877. if (tries[dev->devno] == 1) {
  1878. /* This is the last chance, better to slow
  1879. * down than lose it.
  1880. */
  1881. sata_down_spd_limit(ap);
  1882. ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
  1883. }
  1884. }
  1885. if (!tries[dev->devno])
  1886. ata_dev_disable(dev);
  1887. goto retry;
  1888. }
  1889. /**
  1890. * ata_port_probe - Mark port as enabled
  1891. * @ap: Port for which we indicate enablement
  1892. *
  1893. * Modify @ap data structure such that the system
  1894. * thinks that the entire port is enabled.
  1895. *
  1896. * LOCKING: host lock, or some other form of
  1897. * serialization.
  1898. */
  1899. void ata_port_probe(struct ata_port *ap)
  1900. {
  1901. ap->flags &= ~ATA_FLAG_DISABLED;
  1902. }
  1903. /**
  1904. * sata_print_link_status - Print SATA link status
  1905. * @ap: SATA port to printk link status about
  1906. *
  1907. * This function prints link speed and status of a SATA link.
  1908. *
  1909. * LOCKING:
  1910. * None.
  1911. */
  1912. void sata_print_link_status(struct ata_port *ap)
  1913. {
  1914. u32 sstatus, scontrol, tmp;
  1915. if (sata_scr_read(ap, SCR_STATUS, &sstatus))
  1916. return;
  1917. sata_scr_read(ap, SCR_CONTROL, &scontrol);
  1918. if (ata_port_online(ap)) {
  1919. tmp = (sstatus >> 4) & 0xf;
  1920. ata_port_printk(ap, KERN_INFO,
  1921. "SATA link up %s (SStatus %X SControl %X)\n",
  1922. sata_spd_string(tmp), sstatus, scontrol);
  1923. } else {
  1924. ata_port_printk(ap, KERN_INFO,
  1925. "SATA link down (SStatus %X SControl %X)\n",
  1926. sstatus, scontrol);
  1927. }
  1928. }
  1929. /**
  1930. * __sata_phy_reset - Wake/reset a low-level SATA PHY
  1931. * @ap: SATA port associated with target SATA PHY.
  1932. *
  1933. * This function issues commands to standard SATA Sxxx
  1934. * PHY registers, to wake up the phy (and device), and
  1935. * clear any reset condition.
  1936. *
  1937. * LOCKING:
  1938. * PCI/etc. bus probe sem.
  1939. *
  1940. */
  1941. void __sata_phy_reset(struct ata_port *ap)
  1942. {
  1943. u32 sstatus;
  1944. unsigned long timeout = jiffies + (HZ * 5);
  1945. if (ap->flags & ATA_FLAG_SATA_RESET) {
  1946. /* issue phy wake/reset */
  1947. sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
  1948. /* Couldn't find anything in SATA I/II specs, but
  1949. * AHCI-1.1 10.4.2 says at least 1 ms. */
  1950. mdelay(1);
  1951. }
  1952. /* phy wake/clear reset */
  1953. sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
  1954. /* wait for phy to become ready, if necessary */
  1955. do {
  1956. msleep(200);
  1957. sata_scr_read(ap, SCR_STATUS, &sstatus);
  1958. if ((sstatus & 0xf) != 1)
  1959. break;
  1960. } while (time_before(jiffies, timeout));
  1961. /* print link status */
  1962. sata_print_link_status(ap);
  1963. /* TODO: phy layer with polling, timeouts, etc. */
  1964. if (!ata_port_offline(ap))
  1965. ata_port_probe(ap);
  1966. else
  1967. ata_port_disable(ap);
  1968. if (ap->flags & ATA_FLAG_DISABLED)
  1969. return;
  1970. if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
  1971. ata_port_disable(ap);
  1972. return;
  1973. }
  1974. ap->cbl = ATA_CBL_SATA;
  1975. }
  1976. /**
  1977. * sata_phy_reset - Reset SATA bus.
  1978. * @ap: SATA port associated with target SATA PHY.
  1979. *
  1980. * This function resets the SATA bus, and then probes
  1981. * the bus for devices.
  1982. *
  1983. * LOCKING:
  1984. * PCI/etc. bus probe sem.
  1985. *
  1986. */
  1987. void sata_phy_reset(struct ata_port *ap)
  1988. {
  1989. __sata_phy_reset(ap);
  1990. if (ap->flags & ATA_FLAG_DISABLED)
  1991. return;
  1992. ata_bus_reset(ap);
  1993. }
  1994. /**
  1995. * ata_dev_pair - return other device on cable
  1996. * @adev: device
  1997. *
  1998. * Obtain the other device on the same cable, or if none is
  1999. * present NULL is returned
  2000. */
  2001. struct ata_device *ata_dev_pair(struct ata_device *adev)
  2002. {
  2003. struct ata_port *ap = adev->ap;
  2004. struct ata_device *pair = &ap->device[1 - adev->devno];
  2005. if (!ata_dev_enabled(pair))
  2006. return NULL;
  2007. return pair;
  2008. }
  2009. /**
  2010. * ata_port_disable - Disable port.
  2011. * @ap: Port to be disabled.
  2012. *
  2013. * Modify @ap data structure such that the system
  2014. * thinks that the entire port is disabled, and should
  2015. * never attempt to probe or communicate with devices
  2016. * on this port.
  2017. *
  2018. * LOCKING: host lock, or some other form of
  2019. * serialization.
  2020. */
  2021. void ata_port_disable(struct ata_port *ap)
  2022. {
  2023. ap->device[0].class = ATA_DEV_NONE;
  2024. ap->device[1].class = ATA_DEV_NONE;
  2025. ap->flags |= ATA_FLAG_DISABLED;
  2026. }
  2027. /**
  2028. * sata_down_spd_limit - adjust SATA spd limit downward
  2029. * @ap: Port to adjust SATA spd limit for
  2030. *
  2031. * Adjust SATA spd limit of @ap downward. Note that this
  2032. * function only adjusts the limit. The change must be applied
  2033. * using sata_set_spd().
  2034. *
  2035. * LOCKING:
  2036. * Inherited from caller.
  2037. *
  2038. * RETURNS:
  2039. * 0 on success, negative errno on failure
  2040. */
  2041. int sata_down_spd_limit(struct ata_port *ap)
  2042. {
  2043. u32 sstatus, spd, mask;
  2044. int rc, highbit;
  2045. rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
  2046. if (rc)
  2047. return rc;
  2048. mask = ap->sata_spd_limit;
  2049. if (mask <= 1)
  2050. return -EINVAL;
  2051. highbit = fls(mask) - 1;
  2052. mask &= ~(1 << highbit);
  2053. spd = (sstatus >> 4) & 0xf;
  2054. if (spd <= 1)
  2055. return -EINVAL;
  2056. spd--;
  2057. mask &= (1 << spd) - 1;
  2058. if (!mask)
  2059. return -EINVAL;
  2060. ap->sata_spd_limit = mask;
  2061. ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
  2062. sata_spd_string(fls(mask)));
  2063. return 0;
  2064. }
  2065. static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
  2066. {
  2067. u32 spd, limit;
  2068. if (ap->sata_spd_limit == UINT_MAX)
  2069. limit = 0;
  2070. else
  2071. limit = fls(ap->sata_spd_limit);
  2072. spd = (*scontrol >> 4) & 0xf;
  2073. *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
  2074. return spd != limit;
  2075. }
  2076. /**
  2077. * sata_set_spd_needed - is SATA spd configuration needed
  2078. * @ap: Port in question
  2079. *
  2080. * Test whether the spd limit in SControl matches
  2081. * @ap->sata_spd_limit. This function is used to determine
  2082. * whether hardreset is necessary to apply SATA spd
  2083. * configuration.
  2084. *
  2085. * LOCKING:
  2086. * Inherited from caller.
  2087. *
  2088. * RETURNS:
  2089. * 1 if SATA spd configuration is needed, 0 otherwise.
  2090. */
  2091. int sata_set_spd_needed(struct ata_port *ap)
  2092. {
  2093. u32 scontrol;
  2094. if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
  2095. return 0;
  2096. return __sata_set_spd_needed(ap, &scontrol);
  2097. }
  2098. /**
  2099. * sata_set_spd - set SATA spd according to spd limit
  2100. * @ap: Port to set SATA spd for
  2101. *
  2102. * Set SATA spd of @ap according to sata_spd_limit.
  2103. *
  2104. * LOCKING:
  2105. * Inherited from caller.
  2106. *
  2107. * RETURNS:
  2108. * 0 if spd doesn't need to be changed, 1 if spd has been
  2109. * changed. Negative errno if SCR registers are inaccessible.
  2110. */
  2111. int sata_set_spd(struct ata_port *ap)
  2112. {
  2113. u32 scontrol;
  2114. int rc;
  2115. if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
  2116. return rc;
  2117. if (!__sata_set_spd_needed(ap, &scontrol))
  2118. return 0;
  2119. if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
  2120. return rc;
  2121. return 1;
  2122. }
  2123. /*
  2124. * This mode timing computation functionality is ported over from
  2125. * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
  2126. */
  2127. /*
  2128. * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
  2129. * These were taken from ATA/ATAPI-6 standard, rev 0a, except
  2130. * for UDMA6, which is currently supported only by Maxtor drives.
  2131. *
  2132. * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
  2133. */
  2134. static const struct ata_timing ata_timing[] = {
  2135. { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
  2136. { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
  2137. { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
  2138. { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
  2139. { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
  2140. { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
  2141. { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
  2142. { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
  2143. { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
  2144. /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
  2145. { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
  2146. { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
  2147. { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
  2148. { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
  2149. { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
  2150. { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
  2151. { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
  2152. { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
  2153. { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
  2154. { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
  2155. { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
  2156. { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
  2157. { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
  2158. /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
  2159. { 0xFF }
  2160. };
  2161. #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
  2162. #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
  2163. static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
  2164. {
  2165. q->setup = EZ(t->setup * 1000, T);
  2166. q->act8b = EZ(t->act8b * 1000, T);
  2167. q->rec8b = EZ(t->rec8b * 1000, T);
  2168. q->cyc8b = EZ(t->cyc8b * 1000, T);
  2169. q->active = EZ(t->active * 1000, T);
  2170. q->recover = EZ(t->recover * 1000, T);
  2171. q->cycle = EZ(t->cycle * 1000, T);
  2172. q->udma = EZ(t->udma * 1000, UT);
  2173. }
  2174. void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
  2175. struct ata_timing *m, unsigned int what)
  2176. {
  2177. if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
  2178. if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
  2179. if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
  2180. if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
  2181. if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
  2182. if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
  2183. if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
  2184. if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
  2185. }
  2186. static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
  2187. {
  2188. const struct ata_timing *t;
  2189. for (t = ata_timing; t->mode != speed; t++)
  2190. if (t->mode == 0xFF)
  2191. return NULL;
  2192. return t;
  2193. }
  2194. int ata_timing_compute(struct ata_device *adev, unsigned short speed,
  2195. struct ata_timing *t, int T, int UT)
  2196. {
  2197. const struct ata_timing *s;
  2198. struct ata_timing p;
  2199. /*
  2200. * Find the mode.
  2201. */
  2202. if (!(s = ata_timing_find_mode(speed)))
  2203. return -EINVAL;
  2204. memcpy(t, s, sizeof(*s));
  2205. /*
  2206. * If the drive is an EIDE drive, it can tell us it needs extended
  2207. * PIO/MW_DMA cycle timing.
  2208. */
  2209. if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
  2210. memset(&p, 0, sizeof(p));
  2211. if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
  2212. if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
  2213. else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
  2214. } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
  2215. p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
  2216. }
  2217. ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
  2218. }
  2219. /*
  2220. * Convert the timing to bus clock counts.
  2221. */
  2222. ata_timing_quantize(t, t, T, UT);
  2223. /*
  2224. * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
  2225. * S.M.A.R.T * and some other commands. We have to ensure that the
  2226. * DMA cycle timing is slower/equal than the fastest PIO timing.
  2227. */
  2228. if (speed > XFER_PIO_6) {
  2229. ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
  2230. ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
  2231. }
  2232. /*
  2233. * Lengthen active & recovery time so that cycle time is correct.
  2234. */
  2235. if (t->act8b + t->rec8b < t->cyc8b) {
  2236. t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
  2237. t->rec8b = t->cyc8b - t->act8b;
  2238. }
  2239. if (t->active + t->recover < t->cycle) {
  2240. t->active += (t->cycle - (t->active + t->recover)) / 2;
  2241. t->recover = t->cycle - t->active;
  2242. }
  2243. /* In a few cases quantisation may produce enough errors to
  2244. leave t->cycle too low for the sum of active and recovery
  2245. if so we must correct this */
  2246. if (t->active + t->recover > t->cycle)
  2247. t->cycle = t->active + t->recover;
  2248. return 0;
  2249. }
  2250. /**
  2251. * ata_down_xfermask_limit - adjust dev xfer masks downward
  2252. * @dev: Device to adjust xfer masks
  2253. * @sel: ATA_DNXFER_* selector
  2254. *
  2255. * Adjust xfer masks of @dev downward. Note that this function
  2256. * does not apply the change. Invoking ata_set_mode() afterwards
  2257. * will apply the limit.
  2258. *
  2259. * LOCKING:
  2260. * Inherited from caller.
  2261. *
  2262. * RETURNS:
  2263. * 0 on success, negative errno on failure
  2264. */
  2265. int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
  2266. {
  2267. char buf[32];
  2268. unsigned int orig_mask, xfer_mask;
  2269. unsigned int pio_mask, mwdma_mask, udma_mask;
  2270. int quiet, highbit;
  2271. quiet = !!(sel & ATA_DNXFER_QUIET);
  2272. sel &= ~ATA_DNXFER_QUIET;
  2273. xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
  2274. dev->mwdma_mask,
  2275. dev->udma_mask);
  2276. ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
  2277. switch (sel) {
  2278. case ATA_DNXFER_PIO:
  2279. highbit = fls(pio_mask) - 1;
  2280. pio_mask &= ~(1 << highbit);
  2281. break;
  2282. case ATA_DNXFER_DMA:
  2283. if (udma_mask) {
  2284. highbit = fls(udma_mask) - 1;
  2285. udma_mask &= ~(1 << highbit);
  2286. if (!udma_mask)
  2287. return -ENOENT;
  2288. } else if (mwdma_mask) {
  2289. highbit = fls(mwdma_mask) - 1;
  2290. mwdma_mask &= ~(1 << highbit);
  2291. if (!mwdma_mask)
  2292. return -ENOENT;
  2293. }
  2294. break;
  2295. case ATA_DNXFER_40C:
  2296. udma_mask &= ATA_UDMA_MASK_40C;
  2297. break;
  2298. case ATA_DNXFER_FORCE_PIO0:
  2299. pio_mask &= 1;
  2300. case ATA_DNXFER_FORCE_PIO:
  2301. mwdma_mask = 0;
  2302. udma_mask = 0;
  2303. break;
  2304. default:
  2305. BUG();
  2306. }
  2307. xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  2308. if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
  2309. return -ENOENT;
  2310. if (!quiet) {
  2311. if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
  2312. snprintf(buf, sizeof(buf), "%s:%s",
  2313. ata_mode_string(xfer_mask),
  2314. ata_mode_string(xfer_mask & ATA_MASK_PIO));
  2315. else
  2316. snprintf(buf, sizeof(buf), "%s",
  2317. ata_mode_string(xfer_mask));
  2318. ata_dev_printk(dev, KERN_WARNING,
  2319. "limiting speed to %s\n", buf);
  2320. }
  2321. ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
  2322. &dev->udma_mask);
  2323. return 0;
  2324. }
  2325. static int ata_dev_set_mode(struct ata_device *dev)
  2326. {
  2327. struct ata_eh_context *ehc = &dev->ap->eh_context;
  2328. unsigned int err_mask;
  2329. int rc;
  2330. dev->flags &= ~ATA_DFLAG_PIO;
  2331. if (dev->xfer_shift == ATA_SHIFT_PIO)
  2332. dev->flags |= ATA_DFLAG_PIO;
  2333. err_mask = ata_dev_set_xfermode(dev);
  2334. /* Old CFA may refuse this command, which is just fine */
  2335. if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
  2336. err_mask &= ~AC_ERR_DEV;
  2337. if (err_mask) {
  2338. ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
  2339. "(err_mask=0x%x)\n", err_mask);
  2340. return -EIO;
  2341. }
  2342. ehc->i.flags |= ATA_EHI_POST_SETMODE;
  2343. rc = ata_dev_revalidate(dev, 0);
  2344. ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
  2345. if (rc)
  2346. return rc;
  2347. DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
  2348. dev->xfer_shift, (int)dev->xfer_mode);
  2349. ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
  2350. ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
  2351. return 0;
  2352. }
  2353. /**
  2354. * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
  2355. * @ap: port on which timings will be programmed
  2356. * @r_failed_dev: out paramter for failed device
  2357. *
  2358. * Standard implementation of the function used to tune and set
  2359. * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
  2360. * ata_dev_set_mode() fails, pointer to the failing device is
  2361. * returned in @r_failed_dev.
  2362. *
  2363. * LOCKING:
  2364. * PCI/etc. bus probe sem.
  2365. *
  2366. * RETURNS:
  2367. * 0 on success, negative errno otherwise
  2368. */
  2369. int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
  2370. {
  2371. struct ata_device *dev;
  2372. int i, rc = 0, used_dma = 0, found = 0;
  2373. /* step 1: calculate xfer_mask */
  2374. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  2375. unsigned int pio_mask, dma_mask;
  2376. dev = &ap->device[i];
  2377. if (!ata_dev_enabled(dev))
  2378. continue;
  2379. ata_dev_xfermask(dev);
  2380. pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
  2381. dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
  2382. dev->pio_mode = ata_xfer_mask2mode(pio_mask);
  2383. dev->dma_mode = ata_xfer_mask2mode(dma_mask);
  2384. found = 1;
  2385. if (dev->dma_mode)
  2386. used_dma = 1;
  2387. }
  2388. if (!found)
  2389. goto out;
  2390. /* step 2: always set host PIO timings */
  2391. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  2392. dev = &ap->device[i];
  2393. if (!ata_dev_enabled(dev))
  2394. continue;
  2395. if (!dev->pio_mode) {
  2396. ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
  2397. rc = -EINVAL;
  2398. goto out;
  2399. }
  2400. dev->xfer_mode = dev->pio_mode;
  2401. dev->xfer_shift = ATA_SHIFT_PIO;
  2402. if (ap->ops->set_piomode)
  2403. ap->ops->set_piomode(ap, dev);
  2404. }
  2405. /* step 3: set host DMA timings */
  2406. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  2407. dev = &ap->device[i];
  2408. if (!ata_dev_enabled(dev) || !dev->dma_mode)
  2409. continue;
  2410. dev->xfer_mode = dev->dma_mode;
  2411. dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
  2412. if (ap->ops->set_dmamode)
  2413. ap->ops->set_dmamode(ap, dev);
  2414. }
  2415. /* step 4: update devices' xfer mode */
  2416. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  2417. dev = &ap->device[i];
  2418. /* don't update suspended devices' xfer mode */
  2419. if (!ata_dev_enabled(dev))
  2420. continue;
  2421. rc = ata_dev_set_mode(dev);
  2422. if (rc)
  2423. goto out;
  2424. }
  2425. /* Record simplex status. If we selected DMA then the other
  2426. * host channels are not permitted to do so.
  2427. */
  2428. if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
  2429. ap->host->simplex_claimed = ap;
  2430. out:
  2431. if (rc)
  2432. *r_failed_dev = dev;
  2433. return rc;
  2434. }
  2435. /**
  2436. * ata_set_mode - Program timings and issue SET FEATURES - XFER
  2437. * @ap: port on which timings will be programmed
  2438. * @r_failed_dev: out paramter for failed device
  2439. *
  2440. * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
  2441. * ata_set_mode() fails, pointer to the failing device is
  2442. * returned in @r_failed_dev.
  2443. *
  2444. * LOCKING:
  2445. * PCI/etc. bus probe sem.
  2446. *
  2447. * RETURNS:
  2448. * 0 on success, negative errno otherwise
  2449. */
  2450. int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
  2451. {
  2452. /* has private set_mode? */
  2453. if (ap->ops->set_mode)
  2454. return ap->ops->set_mode(ap, r_failed_dev);
  2455. return ata_do_set_mode(ap, r_failed_dev);
  2456. }
  2457. /**
  2458. * ata_tf_to_host - issue ATA taskfile to host controller
  2459. * @ap: port to which command is being issued
  2460. * @tf: ATA taskfile register set
  2461. *
  2462. * Issues ATA taskfile register set to ATA host controller,
  2463. * with proper synchronization with interrupt handler and
  2464. * other threads.
  2465. *
  2466. * LOCKING:
  2467. * spin_lock_irqsave(host lock)
  2468. */
  2469. static inline void ata_tf_to_host(struct ata_port *ap,
  2470. const struct ata_taskfile *tf)
  2471. {
  2472. ap->ops->tf_load(ap, tf);
  2473. ap->ops->exec_command(ap, tf);
  2474. }
  2475. /**
  2476. * ata_busy_sleep - sleep until BSY clears, or timeout
  2477. * @ap: port containing status register to be polled
  2478. * @tmout_pat: impatience timeout
  2479. * @tmout: overall timeout
  2480. *
  2481. * Sleep until ATA Status register bit BSY clears,
  2482. * or a timeout occurs.
  2483. *
  2484. * LOCKING:
  2485. * Kernel thread context (may sleep).
  2486. *
  2487. * RETURNS:
  2488. * 0 on success, -errno otherwise.
  2489. */
  2490. int ata_busy_sleep(struct ata_port *ap,
  2491. unsigned long tmout_pat, unsigned long tmout)
  2492. {
  2493. unsigned long timer_start, timeout;
  2494. u8 status;
  2495. status = ata_busy_wait(ap, ATA_BUSY, 300);
  2496. timer_start = jiffies;
  2497. timeout = timer_start + tmout_pat;
  2498. while (status != 0xff && (status & ATA_BUSY) &&
  2499. time_before(jiffies, timeout)) {
  2500. msleep(50);
  2501. status = ata_busy_wait(ap, ATA_BUSY, 3);
  2502. }
  2503. if (status != 0xff && (status & ATA_BUSY))
  2504. ata_port_printk(ap, KERN_WARNING,
  2505. "port is slow to respond, please be patient "
  2506. "(Status 0x%x)\n", status);
  2507. timeout = timer_start + tmout;
  2508. while (status != 0xff && (status & ATA_BUSY) &&
  2509. time_before(jiffies, timeout)) {
  2510. msleep(50);
  2511. status = ata_chk_status(ap);
  2512. }
  2513. if (status == 0xff)
  2514. return -ENODEV;
  2515. if (status & ATA_BUSY) {
  2516. ata_port_printk(ap, KERN_ERR, "port failed to respond "
  2517. "(%lu secs, Status 0x%x)\n",
  2518. tmout / HZ, status);
  2519. return -EBUSY;
  2520. }
  2521. return 0;
  2522. }
  2523. /**
  2524. * ata_wait_ready - sleep until BSY clears, or timeout
  2525. * @ap: port containing status register to be polled
  2526. * @deadline: deadline jiffies for the operation
  2527. *
  2528. * Sleep until ATA Status register bit BSY clears, or timeout
  2529. * occurs.
  2530. *
  2531. * LOCKING:
  2532. * Kernel thread context (may sleep).
  2533. *
  2534. * RETURNS:
  2535. * 0 on success, -errno otherwise.
  2536. */
  2537. int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
  2538. {
  2539. unsigned long start = jiffies;
  2540. int warned = 0;
  2541. while (1) {
  2542. u8 status = ata_chk_status(ap);
  2543. unsigned long now = jiffies;
  2544. if (!(status & ATA_BUSY))
  2545. return 0;
  2546. if (!ata_port_online(ap) && status == 0xff)
  2547. return -ENODEV;
  2548. if (time_after(now, deadline))
  2549. return -EBUSY;
  2550. if (!warned && time_after(now, start + 5 * HZ) &&
  2551. (deadline - now > 3 * HZ)) {
  2552. ata_port_printk(ap, KERN_WARNING,
  2553. "port is slow to respond, please be patient "
  2554. "(Status 0x%x)\n", status);
  2555. warned = 1;
  2556. }
  2557. msleep(50);
  2558. }
  2559. }
  2560. static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
  2561. unsigned long deadline)
  2562. {
  2563. struct ata_ioports *ioaddr = &ap->ioaddr;
  2564. unsigned int dev0 = devmask & (1 << 0);
  2565. unsigned int dev1 = devmask & (1 << 1);
  2566. int rc, ret = 0;
  2567. /* if device 0 was found in ata_devchk, wait for its
  2568. * BSY bit to clear
  2569. */
  2570. if (dev0) {
  2571. rc = ata_wait_ready(ap, deadline);
  2572. if (rc) {
  2573. if (rc != -ENODEV)
  2574. return rc;
  2575. ret = rc;
  2576. }
  2577. }
  2578. /* if device 1 was found in ata_devchk, wait for register
  2579. * access briefly, then wait for BSY to clear.
  2580. */
  2581. if (dev1) {
  2582. int i;
  2583. ap->ops->dev_select(ap, 1);
  2584. /* Wait for register access. Some ATAPI devices fail
  2585. * to set nsect/lbal after reset, so don't waste too
  2586. * much time on it. We're gonna wait for !BSY anyway.
  2587. */
  2588. for (i = 0; i < 2; i++) {
  2589. u8 nsect, lbal;
  2590. nsect = ioread8(ioaddr->nsect_addr);
  2591. lbal = ioread8(ioaddr->lbal_addr);
  2592. if ((nsect == 1) && (lbal == 1))
  2593. break;
  2594. msleep(50); /* give drive a breather */
  2595. }
  2596. rc = ata_wait_ready(ap, deadline);
  2597. if (rc) {
  2598. if (rc != -ENODEV)
  2599. return rc;
  2600. ret = rc;
  2601. }
  2602. }
  2603. /* is all this really necessary? */
  2604. ap->ops->dev_select(ap, 0);
  2605. if (dev1)
  2606. ap->ops->dev_select(ap, 1);
  2607. if (dev0)
  2608. ap->ops->dev_select(ap, 0);
  2609. return ret;
  2610. }
  2611. static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
  2612. unsigned long deadline)
  2613. {
  2614. struct ata_ioports *ioaddr = &ap->ioaddr;
  2615. DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
  2616. /* software reset. causes dev0 to be selected */
  2617. iowrite8(ap->ctl, ioaddr->ctl_addr);
  2618. udelay(20); /* FIXME: flush */
  2619. iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
  2620. udelay(20); /* FIXME: flush */
  2621. iowrite8(ap->ctl, ioaddr->ctl_addr);
  2622. /* spec mandates ">= 2ms" before checking status.
  2623. * We wait 150ms, because that was the magic delay used for
  2624. * ATAPI devices in Hale Landis's ATADRVR, for the period of time
  2625. * between when the ATA command register is written, and then
  2626. * status is checked. Because waiting for "a while" before
  2627. * checking status is fine, post SRST, we perform this magic
  2628. * delay here as well.
  2629. *
  2630. * Old drivers/ide uses the 2mS rule and then waits for ready
  2631. */
  2632. msleep(150);
  2633. /* Before we perform post reset processing we want to see if
  2634. * the bus shows 0xFF because the odd clown forgets the D7
  2635. * pulldown resistor.
  2636. */
  2637. if (ata_check_status(ap) == 0xFF)
  2638. return -ENODEV;
  2639. return ata_bus_post_reset(ap, devmask, deadline);
  2640. }
  2641. /**
  2642. * ata_bus_reset - reset host port and associated ATA channel
  2643. * @ap: port to reset
  2644. *
  2645. * This is typically the first time we actually start issuing
  2646. * commands to the ATA channel. We wait for BSY to clear, then
  2647. * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
  2648. * result. Determine what devices, if any, are on the channel
  2649. * by looking at the device 0/1 error register. Look at the signature
  2650. * stored in each device's taskfile registers, to determine if
  2651. * the device is ATA or ATAPI.
  2652. *
  2653. * LOCKING:
  2654. * PCI/etc. bus probe sem.
  2655. * Obtains host lock.
  2656. *
  2657. * SIDE EFFECTS:
  2658. * Sets ATA_FLAG_DISABLED if bus reset fails.
  2659. */
  2660. void ata_bus_reset(struct ata_port *ap)
  2661. {
  2662. struct ata_ioports *ioaddr = &ap->ioaddr;
  2663. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  2664. u8 err;
  2665. unsigned int dev0, dev1 = 0, devmask = 0;
  2666. int rc;
  2667. DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
  2668. /* determine if device 0/1 are present */
  2669. if (ap->flags & ATA_FLAG_SATA_RESET)
  2670. dev0 = 1;
  2671. else {
  2672. dev0 = ata_devchk(ap, 0);
  2673. if (slave_possible)
  2674. dev1 = ata_devchk(ap, 1);
  2675. }
  2676. if (dev0)
  2677. devmask |= (1 << 0);
  2678. if (dev1)
  2679. devmask |= (1 << 1);
  2680. /* select device 0 again */
  2681. ap->ops->dev_select(ap, 0);
  2682. /* issue bus reset */
  2683. if (ap->flags & ATA_FLAG_SRST) {
  2684. rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
  2685. if (rc && rc != -ENODEV)
  2686. goto err_out;
  2687. }
  2688. /*
  2689. * determine by signature whether we have ATA or ATAPI devices
  2690. */
  2691. ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
  2692. if ((slave_possible) && (err != 0x81))
  2693. ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
  2694. /* re-enable interrupts */
  2695. ap->ops->irq_on(ap);
  2696. /* is double-select really necessary? */
  2697. if (ap->device[1].class != ATA_DEV_NONE)
  2698. ap->ops->dev_select(ap, 1);
  2699. if (ap->device[0].class != ATA_DEV_NONE)
  2700. ap->ops->dev_select(ap, 0);
  2701. /* if no devices were detected, disable this port */
  2702. if ((ap->device[0].class == ATA_DEV_NONE) &&
  2703. (ap->device[1].class == ATA_DEV_NONE))
  2704. goto err_out;
  2705. if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
  2706. /* set up device control for ATA_FLAG_SATA_RESET */
  2707. iowrite8(ap->ctl, ioaddr->ctl_addr);
  2708. }
  2709. DPRINTK("EXIT\n");
  2710. return;
  2711. err_out:
  2712. ata_port_printk(ap, KERN_ERR, "disabling port\n");
  2713. ap->ops->port_disable(ap);
  2714. DPRINTK("EXIT\n");
  2715. }
  2716. /**
  2717. * sata_phy_debounce - debounce SATA phy status
  2718. * @ap: ATA port to debounce SATA phy status for
  2719. * @params: timing parameters { interval, duratinon, timeout } in msec
  2720. * @deadline: deadline jiffies for the operation
  2721. *
  2722. * Make sure SStatus of @ap reaches stable state, determined by
  2723. * holding the same value where DET is not 1 for @duration polled
  2724. * every @interval, before @timeout. Timeout constraints the
  2725. * beginning of the stable state. Because DET gets stuck at 1 on
  2726. * some controllers after hot unplugging, this functions waits
  2727. * until timeout then returns 0 if DET is stable at 1.
  2728. *
  2729. * @timeout is further limited by @deadline. The sooner of the
  2730. * two is used.
  2731. *
  2732. * LOCKING:
  2733. * Kernel thread context (may sleep)
  2734. *
  2735. * RETURNS:
  2736. * 0 on success, -errno on failure.
  2737. */
  2738. int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
  2739. unsigned long deadline)
  2740. {
  2741. unsigned long interval_msec = params[0];
  2742. unsigned long duration = msecs_to_jiffies(params[1]);
  2743. unsigned long last_jiffies, t;
  2744. u32 last, cur;
  2745. int rc;
  2746. t = jiffies + msecs_to_jiffies(params[2]);
  2747. if (time_before(t, deadline))
  2748. deadline = t;
  2749. if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
  2750. return rc;
  2751. cur &= 0xf;
  2752. last = cur;
  2753. last_jiffies = jiffies;
  2754. while (1) {
  2755. msleep(interval_msec);
  2756. if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
  2757. return rc;
  2758. cur &= 0xf;
  2759. /* DET stable? */
  2760. if (cur == last) {
  2761. if (cur == 1 && time_before(jiffies, deadline))
  2762. continue;
  2763. if (time_after(jiffies, last_jiffies + duration))
  2764. return 0;
  2765. continue;
  2766. }
  2767. /* unstable, start over */
  2768. last = cur;
  2769. last_jiffies = jiffies;
  2770. /* check deadline */
  2771. if (time_after(jiffies, deadline))
  2772. return -EBUSY;
  2773. }
  2774. }
  2775. /**
  2776. * sata_phy_resume - resume SATA phy
  2777. * @ap: ATA port to resume SATA phy for
  2778. * @params: timing parameters { interval, duratinon, timeout } in msec
  2779. * @deadline: deadline jiffies for the operation
  2780. *
  2781. * Resume SATA phy of @ap and debounce it.
  2782. *
  2783. * LOCKING:
  2784. * Kernel thread context (may sleep)
  2785. *
  2786. * RETURNS:
  2787. * 0 on success, -errno on failure.
  2788. */
  2789. int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
  2790. unsigned long deadline)
  2791. {
  2792. u32 scontrol;
  2793. int rc;
  2794. if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
  2795. return rc;
  2796. scontrol = (scontrol & 0x0f0) | 0x300;
  2797. if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
  2798. return rc;
  2799. /* Some PHYs react badly if SStatus is pounded immediately
  2800. * after resuming. Delay 200ms before debouncing.
  2801. */
  2802. msleep(200);
  2803. return sata_phy_debounce(ap, params, deadline);
  2804. }
  2805. /**
  2806. * ata_std_prereset - prepare for reset
  2807. * @ap: ATA port to be reset
  2808. * @deadline: deadline jiffies for the operation
  2809. *
  2810. * @ap is about to be reset. Initialize it. Failure from
  2811. * prereset makes libata abort whole reset sequence and give up
  2812. * that port, so prereset should be best-effort. It does its
  2813. * best to prepare for reset sequence but if things go wrong, it
  2814. * should just whine, not fail.
  2815. *
  2816. * LOCKING:
  2817. * Kernel thread context (may sleep)
  2818. *
  2819. * RETURNS:
  2820. * 0 on success, -errno otherwise.
  2821. */
  2822. int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
  2823. {
  2824. struct ata_eh_context *ehc = &ap->eh_context;
  2825. const unsigned long *timing = sata_ehc_deb_timing(ehc);
  2826. int rc;
  2827. /* handle link resume */
  2828. if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
  2829. (ap->flags & ATA_FLAG_HRST_TO_RESUME))
  2830. ehc->i.action |= ATA_EH_HARDRESET;
  2831. /* if we're about to do hardreset, nothing more to do */
  2832. if (ehc->i.action & ATA_EH_HARDRESET)
  2833. return 0;
  2834. /* if SATA, resume phy */
  2835. if (ap->flags & ATA_FLAG_SATA) {
  2836. rc = sata_phy_resume(ap, timing, deadline);
  2837. /* whine about phy resume failure but proceed */
  2838. if (rc && rc != -EOPNOTSUPP)
  2839. ata_port_printk(ap, KERN_WARNING, "failed to resume "
  2840. "link for reset (errno=%d)\n", rc);
  2841. }
  2842. /* Wait for !BSY if the controller can wait for the first D2H
  2843. * Reg FIS and we don't know that no device is attached.
  2844. */
  2845. if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
  2846. rc = ata_wait_ready(ap, deadline);
  2847. if (rc && rc != -ENODEV) {
  2848. ata_port_printk(ap, KERN_WARNING, "device not ready "
  2849. "(errno=%d), forcing hardreset\n", rc);
  2850. ehc->i.action |= ATA_EH_HARDRESET;
  2851. }
  2852. }
  2853. return 0;
  2854. }
  2855. /**
  2856. * ata_std_softreset - reset host port via ATA SRST
  2857. * @ap: port to reset
  2858. * @classes: resulting classes of attached devices
  2859. * @deadline: deadline jiffies for the operation
  2860. *
  2861. * Reset host port using ATA SRST.
  2862. *
  2863. * LOCKING:
  2864. * Kernel thread context (may sleep)
  2865. *
  2866. * RETURNS:
  2867. * 0 on success, -errno otherwise.
  2868. */
  2869. int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
  2870. unsigned long deadline)
  2871. {
  2872. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  2873. unsigned int devmask = 0;
  2874. int rc;
  2875. u8 err;
  2876. DPRINTK("ENTER\n");
  2877. if (ata_port_offline(ap)) {
  2878. classes[0] = ATA_DEV_NONE;
  2879. goto out;
  2880. }
  2881. /* determine if device 0/1 are present */
  2882. if (ata_devchk(ap, 0))
  2883. devmask |= (1 << 0);
  2884. if (slave_possible && ata_devchk(ap, 1))
  2885. devmask |= (1 << 1);
  2886. /* select device 0 again */
  2887. ap->ops->dev_select(ap, 0);
  2888. /* issue bus reset */
  2889. DPRINTK("about to softreset, devmask=%x\n", devmask);
  2890. rc = ata_bus_softreset(ap, devmask, deadline);
  2891. /* if link is occupied, -ENODEV too is an error */
  2892. if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
  2893. ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
  2894. return rc;
  2895. }
  2896. /* determine by signature whether we have ATA or ATAPI devices */
  2897. classes[0] = ata_dev_try_classify(ap, 0, &err);
  2898. if (slave_possible && err != 0x81)
  2899. classes[1] = ata_dev_try_classify(ap, 1, &err);
  2900. out:
  2901. DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
  2902. return 0;
  2903. }
  2904. /**
  2905. * sata_port_hardreset - reset port via SATA phy reset
  2906. * @ap: port to reset
  2907. * @timing: timing parameters { interval, duratinon, timeout } in msec
  2908. * @deadline: deadline jiffies for the operation
  2909. *
  2910. * SATA phy-reset host port using DET bits of SControl register.
  2911. *
  2912. * LOCKING:
  2913. * Kernel thread context (may sleep)
  2914. *
  2915. * RETURNS:
  2916. * 0 on success, -errno otherwise.
  2917. */
  2918. int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
  2919. unsigned long deadline)
  2920. {
  2921. u32 scontrol;
  2922. int rc;
  2923. DPRINTK("ENTER\n");
  2924. if (sata_set_spd_needed(ap)) {
  2925. /* SATA spec says nothing about how to reconfigure
  2926. * spd. To be on the safe side, turn off phy during
  2927. * reconfiguration. This works for at least ICH7 AHCI
  2928. * and Sil3124.
  2929. */
  2930. if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
  2931. goto out;
  2932. scontrol = (scontrol & 0x0f0) | 0x304;
  2933. if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
  2934. goto out;
  2935. sata_set_spd(ap);
  2936. }
  2937. /* issue phy wake/reset */
  2938. if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
  2939. goto out;
  2940. scontrol = (scontrol & 0x0f0) | 0x301;
  2941. if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
  2942. goto out;
  2943. /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
  2944. * 10.4.2 says at least 1 ms.
  2945. */
  2946. msleep(1);
  2947. /* bring phy back */
  2948. rc = sata_phy_resume(ap, timing, deadline);
  2949. out:
  2950. DPRINTK("EXIT, rc=%d\n", rc);
  2951. return rc;
  2952. }
  2953. /**
  2954. * sata_std_hardreset - reset host port via SATA phy reset
  2955. * @ap: port to reset
  2956. * @class: resulting class of attached device
  2957. * @deadline: deadline jiffies for the operation
  2958. *
  2959. * SATA phy-reset host port using DET bits of SControl register,
  2960. * wait for !BSY and classify the attached device.
  2961. *
  2962. * LOCKING:
  2963. * Kernel thread context (may sleep)
  2964. *
  2965. * RETURNS:
  2966. * 0 on success, -errno otherwise.
  2967. */
  2968. int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
  2969. unsigned long deadline)
  2970. {
  2971. const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
  2972. int rc;
  2973. DPRINTK("ENTER\n");
  2974. /* do hardreset */
  2975. rc = sata_port_hardreset(ap, timing, deadline);
  2976. if (rc) {
  2977. ata_port_printk(ap, KERN_ERR,
  2978. "COMRESET failed (errno=%d)\n", rc);
  2979. return rc;
  2980. }
  2981. /* TODO: phy layer with polling, timeouts, etc. */
  2982. if (ata_port_offline(ap)) {
  2983. *class = ATA_DEV_NONE;
  2984. DPRINTK("EXIT, link offline\n");
  2985. return 0;
  2986. }
  2987. /* wait a while before checking status, see SRST for more info */
  2988. msleep(150);
  2989. rc = ata_wait_ready(ap, deadline);
  2990. /* link occupied, -ENODEV too is an error */
  2991. if (rc) {
  2992. ata_port_printk(ap, KERN_ERR,
  2993. "COMRESET failed (errno=%d)\n", rc);
  2994. return rc;
  2995. }
  2996. ap->ops->dev_select(ap, 0); /* probably unnecessary */
  2997. *class = ata_dev_try_classify(ap, 0, NULL);
  2998. DPRINTK("EXIT, class=%u\n", *class);
  2999. return 0;
  3000. }
  3001. /**
  3002. * ata_std_postreset - standard postreset callback
  3003. * @ap: the target ata_port
  3004. * @classes: classes of attached devices
  3005. *
  3006. * This function is invoked after a successful reset. Note that
  3007. * the device might have been reset more than once using
  3008. * different reset methods before postreset is invoked.
  3009. *
  3010. * LOCKING:
  3011. * Kernel thread context (may sleep)
  3012. */
  3013. void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
  3014. {
  3015. u32 serror;
  3016. DPRINTK("ENTER\n");
  3017. /* print link status */
  3018. sata_print_link_status(ap);
  3019. /* clear SError */
  3020. if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
  3021. sata_scr_write(ap, SCR_ERROR, serror);
  3022. /* re-enable interrupts */
  3023. if (!ap->ops->error_handler)
  3024. ap->ops->irq_on(ap);
  3025. /* is double-select really necessary? */
  3026. if (classes[0] != ATA_DEV_NONE)
  3027. ap->ops->dev_select(ap, 1);
  3028. if (classes[1] != ATA_DEV_NONE)
  3029. ap->ops->dev_select(ap, 0);
  3030. /* bail out if no device is present */
  3031. if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
  3032. DPRINTK("EXIT, no device\n");
  3033. return;
  3034. }
  3035. /* set up device control */
  3036. if (ap->ioaddr.ctl_addr)
  3037. iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
  3038. DPRINTK("EXIT\n");
  3039. }
  3040. /**
  3041. * ata_dev_same_device - Determine whether new ID matches configured device
  3042. * @dev: device to compare against
  3043. * @new_class: class of the new device
  3044. * @new_id: IDENTIFY page of the new device
  3045. *
  3046. * Compare @new_class and @new_id against @dev and determine
  3047. * whether @dev is the device indicated by @new_class and
  3048. * @new_id.
  3049. *
  3050. * LOCKING:
  3051. * None.
  3052. *
  3053. * RETURNS:
  3054. * 1 if @dev matches @new_class and @new_id, 0 otherwise.
  3055. */
  3056. static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
  3057. const u16 *new_id)
  3058. {
  3059. const u16 *old_id = dev->id;
  3060. unsigned char model[2][ATA_ID_PROD_LEN + 1];
  3061. unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
  3062. if (dev->class != new_class) {
  3063. ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
  3064. dev->class, new_class);
  3065. return 0;
  3066. }
  3067. ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
  3068. ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
  3069. ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
  3070. ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
  3071. if (strcmp(model[0], model[1])) {
  3072. ata_dev_printk(dev, KERN_INFO, "model number mismatch "
  3073. "'%s' != '%s'\n", model[0], model[1]);
  3074. return 0;
  3075. }
  3076. if (strcmp(serial[0], serial[1])) {
  3077. ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
  3078. "'%s' != '%s'\n", serial[0], serial[1]);
  3079. return 0;
  3080. }
  3081. return 1;
  3082. }
  3083. /**
  3084. * ata_dev_reread_id - Re-read IDENTIFY data
  3085. * @dev: target ATA device
  3086. * @readid_flags: read ID flags
  3087. *
  3088. * Re-read IDENTIFY page and make sure @dev is still attached to
  3089. * the port.
  3090. *
  3091. * LOCKING:
  3092. * Kernel thread context (may sleep)
  3093. *
  3094. * RETURNS:
  3095. * 0 on success, negative errno otherwise
  3096. */
  3097. int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
  3098. {
  3099. unsigned int class = dev->class;
  3100. u16 *id = (void *)dev->ap->sector_buf;
  3101. int rc;
  3102. /* read ID data */
  3103. rc = ata_dev_read_id(dev, &class, readid_flags, id);
  3104. if (rc)
  3105. return rc;
  3106. /* is the device still there? */
  3107. if (!ata_dev_same_device(dev, class, id))
  3108. return -ENODEV;
  3109. memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
  3110. return 0;
  3111. }
  3112. /**
  3113. * ata_dev_revalidate - Revalidate ATA device
  3114. * @dev: device to revalidate
  3115. * @readid_flags: read ID flags
  3116. *
  3117. * Re-read IDENTIFY page, make sure @dev is still attached to the
  3118. * port and reconfigure it according to the new IDENTIFY page.
  3119. *
  3120. * LOCKING:
  3121. * Kernel thread context (may sleep)
  3122. *
  3123. * RETURNS:
  3124. * 0 on success, negative errno otherwise
  3125. */
  3126. int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
  3127. {
  3128. u64 n_sectors = dev->n_sectors;
  3129. int rc;
  3130. if (!ata_dev_enabled(dev))
  3131. return -ENODEV;
  3132. /* re-read ID */
  3133. rc = ata_dev_reread_id(dev, readid_flags);
  3134. if (rc)
  3135. goto fail;
  3136. /* configure device according to the new ID */
  3137. rc = ata_dev_configure(dev);
  3138. if (rc)
  3139. goto fail;
  3140. /* verify n_sectors hasn't changed */
  3141. if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
  3142. ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
  3143. "%llu != %llu\n",
  3144. (unsigned long long)n_sectors,
  3145. (unsigned long long)dev->n_sectors);
  3146. rc = -ENODEV;
  3147. goto fail;
  3148. }
  3149. return 0;
  3150. fail:
  3151. ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
  3152. return rc;
  3153. }
  3154. struct ata_blacklist_entry {
  3155. const char *model_num;
  3156. const char *model_rev;
  3157. unsigned long horkage;
  3158. };
  3159. static const struct ata_blacklist_entry ata_device_blacklist [] = {
  3160. /* Devices with DMA related problems under Linux */
  3161. { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
  3162. { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
  3163. { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
  3164. { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
  3165. { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
  3166. { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
  3167. { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
  3168. { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
  3169. { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
  3170. { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
  3171. { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
  3172. { "CRD-84", NULL, ATA_HORKAGE_NODMA },
  3173. { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
  3174. { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
  3175. { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
  3176. { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
  3177. { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
  3178. { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
  3179. { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
  3180. { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
  3181. { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
  3182. { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
  3183. { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
  3184. { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
  3185. { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
  3186. { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
  3187. { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
  3188. { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
  3189. { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
  3190. { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
  3191. { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
  3192. { "IOMEGA ZIP 250 ATAPI Floppy",
  3193. NULL, ATA_HORKAGE_NODMA },
  3194. /* Weird ATAPI devices */
  3195. { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
  3196. /* Devices we expect to fail diagnostics */
  3197. /* Devices where NCQ should be avoided */
  3198. /* NCQ is slow */
  3199. { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
  3200. /* http://thread.gmane.org/gmane.linux.ide/14907 */
  3201. { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
  3202. /* NCQ is broken */
  3203. { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
  3204. { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
  3205. /* NCQ hard hangs device under heavier load, needs hard power cycle */
  3206. { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
  3207. /* Blacklist entries taken from Silicon Image 3124/3132
  3208. Windows driver .inf file - also several Linux problem reports */
  3209. { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
  3210. { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
  3211. { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
  3212. /* Drives which do spurious command completion */
  3213. { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
  3214. { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
  3215. { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
  3216. { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
  3217. /* Devices with NCQ limits */
  3218. /* End Marker */
  3219. { }
  3220. };
  3221. unsigned long ata_device_blacklisted(const struct ata_device *dev)
  3222. {
  3223. unsigned char model_num[ATA_ID_PROD_LEN + 1];
  3224. unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
  3225. const struct ata_blacklist_entry *ad = ata_device_blacklist;
  3226. ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
  3227. ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
  3228. while (ad->model_num) {
  3229. if (!strcmp(ad->model_num, model_num)) {
  3230. if (ad->model_rev == NULL)
  3231. return ad->horkage;
  3232. if (!strcmp(ad->model_rev, model_rev))
  3233. return ad->horkage;
  3234. }
  3235. ad++;
  3236. }
  3237. return 0;
  3238. }
  3239. static int ata_dma_blacklisted(const struct ata_device *dev)
  3240. {
  3241. /* We don't support polling DMA.
  3242. * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
  3243. * if the LLDD handles only interrupts in the HSM_ST_LAST state.
  3244. */
  3245. if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
  3246. (dev->flags & ATA_DFLAG_CDB_INTR))
  3247. return 1;
  3248. return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
  3249. }
  3250. /**
  3251. * ata_dev_xfermask - Compute supported xfermask of the given device
  3252. * @dev: Device to compute xfermask for
  3253. *
  3254. * Compute supported xfermask of @dev and store it in
  3255. * dev->*_mask. This function is responsible for applying all
  3256. * known limits including host controller limits, device
  3257. * blacklist, etc...
  3258. *
  3259. * LOCKING:
  3260. * None.
  3261. */
  3262. static void ata_dev_xfermask(struct ata_device *dev)
  3263. {
  3264. struct ata_port *ap = dev->ap;
  3265. struct ata_host *host = ap->host;
  3266. unsigned long xfer_mask;
  3267. /* controller modes available */
  3268. xfer_mask = ata_pack_xfermask(ap->pio_mask,
  3269. ap->mwdma_mask, ap->udma_mask);
  3270. /* drive modes available */
  3271. xfer_mask &= ata_pack_xfermask(dev->pio_mask,
  3272. dev->mwdma_mask, dev->udma_mask);
  3273. xfer_mask &= ata_id_xfermask(dev->id);
  3274. /*
  3275. * CFA Advanced TrueIDE timings are not allowed on a shared
  3276. * cable
  3277. */
  3278. if (ata_dev_pair(dev)) {
  3279. /* No PIO5 or PIO6 */
  3280. xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
  3281. /* No MWDMA3 or MWDMA 4 */
  3282. xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
  3283. }
  3284. if (ata_dma_blacklisted(dev)) {
  3285. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3286. ata_dev_printk(dev, KERN_WARNING,
  3287. "device is on DMA blacklist, disabling DMA\n");
  3288. }
  3289. if ((host->flags & ATA_HOST_SIMPLEX) &&
  3290. host->simplex_claimed && host->simplex_claimed != ap) {
  3291. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3292. ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
  3293. "other device, disabling DMA\n");
  3294. }
  3295. if (ap->flags & ATA_FLAG_NO_IORDY)
  3296. xfer_mask &= ata_pio_mask_no_iordy(dev);
  3297. if (ap->ops->mode_filter)
  3298. xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
  3299. /* Apply cable rule here. Don't apply it early because when
  3300. * we handle hot plug the cable type can itself change.
  3301. * Check this last so that we know if the transfer rate was
  3302. * solely limited by the cable.
  3303. * Unknown or 80 wire cables reported host side are checked
  3304. * drive side as well. Cases where we know a 40wire cable
  3305. * is used safely for 80 are not checked here.
  3306. */
  3307. if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
  3308. /* UDMA/44 or higher would be available */
  3309. if((ap->cbl == ATA_CBL_PATA40) ||
  3310. (ata_drive_40wire(dev->id) &&
  3311. (ap->cbl == ATA_CBL_PATA_UNK ||
  3312. ap->cbl == ATA_CBL_PATA80))) {
  3313. ata_dev_printk(dev, KERN_WARNING,
  3314. "limited to UDMA/33 due to 40-wire cable\n");
  3315. xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
  3316. }
  3317. ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
  3318. &dev->mwdma_mask, &dev->udma_mask);
  3319. }
  3320. /**
  3321. * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
  3322. * @dev: Device to which command will be sent
  3323. *
  3324. * Issue SET FEATURES - XFER MODE command to device @dev
  3325. * on port @ap.
  3326. *
  3327. * LOCKING:
  3328. * PCI/etc. bus probe sem.
  3329. *
  3330. * RETURNS:
  3331. * 0 on success, AC_ERR_* mask otherwise.
  3332. */
  3333. static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
  3334. {
  3335. struct ata_taskfile tf;
  3336. unsigned int err_mask;
  3337. /* set up set-features taskfile */
  3338. DPRINTK("set features - xfer mode\n");
  3339. /* Some controllers and ATAPI devices show flaky interrupt
  3340. * behavior after setting xfer mode. Use polling instead.
  3341. */
  3342. ata_tf_init(dev, &tf);
  3343. tf.command = ATA_CMD_SET_FEATURES;
  3344. tf.feature = SETFEATURES_XFER;
  3345. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
  3346. tf.protocol = ATA_PROT_NODATA;
  3347. tf.nsect = dev->xfer_mode;
  3348. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  3349. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  3350. return err_mask;
  3351. }
  3352. /**
  3353. * ata_dev_init_params - Issue INIT DEV PARAMS command
  3354. * @dev: Device to which command will be sent
  3355. * @heads: Number of heads (taskfile parameter)
  3356. * @sectors: Number of sectors (taskfile parameter)
  3357. *
  3358. * LOCKING:
  3359. * Kernel thread context (may sleep)
  3360. *
  3361. * RETURNS:
  3362. * 0 on success, AC_ERR_* mask otherwise.
  3363. */
  3364. static unsigned int ata_dev_init_params(struct ata_device *dev,
  3365. u16 heads, u16 sectors)
  3366. {
  3367. struct ata_taskfile tf;
  3368. unsigned int err_mask;
  3369. /* Number of sectors per track 1-255. Number of heads 1-16 */
  3370. if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
  3371. return AC_ERR_INVALID;
  3372. /* set up init dev params taskfile */
  3373. DPRINTK("init dev params \n");
  3374. ata_tf_init(dev, &tf);
  3375. tf.command = ATA_CMD_INIT_DEV_PARAMS;
  3376. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  3377. tf.protocol = ATA_PROT_NODATA;
  3378. tf.nsect = sectors;
  3379. tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
  3380. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
  3381. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  3382. return err_mask;
  3383. }
  3384. /**
  3385. * ata_sg_clean - Unmap DMA memory associated with command
  3386. * @qc: Command containing DMA memory to be released
  3387. *
  3388. * Unmap all mapped DMA memory associated with this command.
  3389. *
  3390. * LOCKING:
  3391. * spin_lock_irqsave(host lock)
  3392. */
  3393. void ata_sg_clean(struct ata_queued_cmd *qc)
  3394. {
  3395. struct ata_port *ap = qc->ap;
  3396. struct scatterlist *sg = qc->__sg;
  3397. int dir = qc->dma_dir;
  3398. void *pad_buf = NULL;
  3399. WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
  3400. WARN_ON(sg == NULL);
  3401. if (qc->flags & ATA_QCFLAG_SINGLE)
  3402. WARN_ON(qc->n_elem > 1);
  3403. VPRINTK("unmapping %u sg elements\n", qc->n_elem);
  3404. /* if we padded the buffer out to 32-bit bound, and data
  3405. * xfer direction is from-device, we must copy from the
  3406. * pad buffer back into the supplied buffer
  3407. */
  3408. if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
  3409. pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  3410. if (qc->flags & ATA_QCFLAG_SG) {
  3411. if (qc->n_elem)
  3412. dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
  3413. /* restore last sg */
  3414. sg[qc->orig_n_elem - 1].length += qc->pad_len;
  3415. if (pad_buf) {
  3416. struct scatterlist *psg = &qc->pad_sgent;
  3417. void *addr = kmap_atomic(psg->page, KM_IRQ0);
  3418. memcpy(addr + psg->offset, pad_buf, qc->pad_len);
  3419. kunmap_atomic(addr, KM_IRQ0);
  3420. }
  3421. } else {
  3422. if (qc->n_elem)
  3423. dma_unmap_single(ap->dev,
  3424. sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
  3425. dir);
  3426. /* restore sg */
  3427. sg->length += qc->pad_len;
  3428. if (pad_buf)
  3429. memcpy(qc->buf_virt + sg->length - qc->pad_len,
  3430. pad_buf, qc->pad_len);
  3431. }
  3432. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  3433. qc->__sg = NULL;
  3434. }
  3435. /**
  3436. * ata_fill_sg - Fill PCI IDE PRD table
  3437. * @qc: Metadata associated with taskfile to be transferred
  3438. *
  3439. * Fill PCI IDE PRD (scatter-gather) table with segments
  3440. * associated with the current disk command.
  3441. *
  3442. * LOCKING:
  3443. * spin_lock_irqsave(host lock)
  3444. *
  3445. */
  3446. static void ata_fill_sg(struct ata_queued_cmd *qc)
  3447. {
  3448. struct ata_port *ap = qc->ap;
  3449. struct scatterlist *sg;
  3450. unsigned int idx;
  3451. WARN_ON(qc->__sg == NULL);
  3452. WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
  3453. idx = 0;
  3454. ata_for_each_sg(sg, qc) {
  3455. u32 addr, offset;
  3456. u32 sg_len, len;
  3457. /* determine if physical DMA addr spans 64K boundary.
  3458. * Note h/w doesn't support 64-bit, so we unconditionally
  3459. * truncate dma_addr_t to u32.
  3460. */
  3461. addr = (u32) sg_dma_address(sg);
  3462. sg_len = sg_dma_len(sg);
  3463. while (sg_len) {
  3464. offset = addr & 0xffff;
  3465. len = sg_len;
  3466. if ((offset + sg_len) > 0x10000)
  3467. len = 0x10000 - offset;
  3468. ap->prd[idx].addr = cpu_to_le32(addr);
  3469. ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
  3470. VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
  3471. idx++;
  3472. sg_len -= len;
  3473. addr += len;
  3474. }
  3475. }
  3476. if (idx)
  3477. ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  3478. }
  3479. /**
  3480. * ata_fill_sg_dumb - Fill PCI IDE PRD table
  3481. * @qc: Metadata associated with taskfile to be transferred
  3482. *
  3483. * Fill PCI IDE PRD (scatter-gather) table with segments
  3484. * associated with the current disk command. Perform the fill
  3485. * so that we avoid writing any length 64K records for
  3486. * controllers that don't follow the spec.
  3487. *
  3488. * LOCKING:
  3489. * spin_lock_irqsave(host lock)
  3490. *
  3491. */
  3492. static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
  3493. {
  3494. struct ata_port *ap = qc->ap;
  3495. struct scatterlist *sg;
  3496. unsigned int idx;
  3497. WARN_ON(qc->__sg == NULL);
  3498. WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
  3499. idx = 0;
  3500. ata_for_each_sg(sg, qc) {
  3501. u32 addr, offset;
  3502. u32 sg_len, len, blen;
  3503. /* determine if physical DMA addr spans 64K boundary.
  3504. * Note h/w doesn't support 64-bit, so we unconditionally
  3505. * truncate dma_addr_t to u32.
  3506. */
  3507. addr = (u32) sg_dma_address(sg);
  3508. sg_len = sg_dma_len(sg);
  3509. while (sg_len) {
  3510. offset = addr & 0xffff;
  3511. len = sg_len;
  3512. if ((offset + sg_len) > 0x10000)
  3513. len = 0x10000 - offset;
  3514. blen = len & 0xffff;
  3515. ap->prd[idx].addr = cpu_to_le32(addr);
  3516. if (blen == 0) {
  3517. /* Some PATA chipsets like the CS5530 can't
  3518. cope with 0x0000 meaning 64K as the spec says */
  3519. ap->prd[idx].flags_len = cpu_to_le32(0x8000);
  3520. blen = 0x8000;
  3521. ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
  3522. }
  3523. ap->prd[idx].flags_len = cpu_to_le32(blen);
  3524. VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
  3525. idx++;
  3526. sg_len -= len;
  3527. addr += len;
  3528. }
  3529. }
  3530. if (idx)
  3531. ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  3532. }
  3533. /**
  3534. * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
  3535. * @qc: Metadata associated with taskfile to check
  3536. *
  3537. * Allow low-level driver to filter ATA PACKET commands, returning
  3538. * a status indicating whether or not it is OK to use DMA for the
  3539. * supplied PACKET command.
  3540. *
  3541. * LOCKING:
  3542. * spin_lock_irqsave(host lock)
  3543. *
  3544. * RETURNS: 0 when ATAPI DMA can be used
  3545. * nonzero otherwise
  3546. */
  3547. int ata_check_atapi_dma(struct ata_queued_cmd *qc)
  3548. {
  3549. struct ata_port *ap = qc->ap;
  3550. /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
  3551. * few ATAPI devices choke on such DMA requests.
  3552. */
  3553. if (unlikely(qc->nbytes & 15))
  3554. return 1;
  3555. if (ap->ops->check_atapi_dma)
  3556. return ap->ops->check_atapi_dma(qc);
  3557. return 0;
  3558. }
  3559. /**
  3560. * ata_qc_prep - Prepare taskfile for submission
  3561. * @qc: Metadata associated with taskfile to be prepared
  3562. *
  3563. * Prepare ATA taskfile for submission.
  3564. *
  3565. * LOCKING:
  3566. * spin_lock_irqsave(host lock)
  3567. */
  3568. void ata_qc_prep(struct ata_queued_cmd *qc)
  3569. {
  3570. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  3571. return;
  3572. ata_fill_sg(qc);
  3573. }
  3574. /**
  3575. * ata_dumb_qc_prep - Prepare taskfile for submission
  3576. * @qc: Metadata associated with taskfile to be prepared
  3577. *
  3578. * Prepare ATA taskfile for submission.
  3579. *
  3580. * LOCKING:
  3581. * spin_lock_irqsave(host lock)
  3582. */
  3583. void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
  3584. {
  3585. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  3586. return;
  3587. ata_fill_sg_dumb(qc);
  3588. }
  3589. void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
  3590. /**
  3591. * ata_sg_init_one - Associate command with memory buffer
  3592. * @qc: Command to be associated
  3593. * @buf: Memory buffer
  3594. * @buflen: Length of memory buffer, in bytes.
  3595. *
  3596. * Initialize the data-related elements of queued_cmd @qc
  3597. * to point to a single memory buffer, @buf of byte length @buflen.
  3598. *
  3599. * LOCKING:
  3600. * spin_lock_irqsave(host lock)
  3601. */
  3602. void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
  3603. {
  3604. qc->flags |= ATA_QCFLAG_SINGLE;
  3605. qc->__sg = &qc->sgent;
  3606. qc->n_elem = 1;
  3607. qc->orig_n_elem = 1;
  3608. qc->buf_virt = buf;
  3609. qc->nbytes = buflen;
  3610. sg_init_one(&qc->sgent, buf, buflen);
  3611. }
  3612. /**
  3613. * ata_sg_init - Associate command with scatter-gather table.
  3614. * @qc: Command to be associated
  3615. * @sg: Scatter-gather table.
  3616. * @n_elem: Number of elements in s/g table.
  3617. *
  3618. * Initialize the data-related elements of queued_cmd @qc
  3619. * to point to a scatter-gather table @sg, containing @n_elem
  3620. * elements.
  3621. *
  3622. * LOCKING:
  3623. * spin_lock_irqsave(host lock)
  3624. */
  3625. void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
  3626. unsigned int n_elem)
  3627. {
  3628. qc->flags |= ATA_QCFLAG_SG;
  3629. qc->__sg = sg;
  3630. qc->n_elem = n_elem;
  3631. qc->orig_n_elem = n_elem;
  3632. }
  3633. /**
  3634. * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
  3635. * @qc: Command with memory buffer to be mapped.
  3636. *
  3637. * DMA-map the memory buffer associated with queued_cmd @qc.
  3638. *
  3639. * LOCKING:
  3640. * spin_lock_irqsave(host lock)
  3641. *
  3642. * RETURNS:
  3643. * Zero on success, negative on error.
  3644. */
  3645. static int ata_sg_setup_one(struct ata_queued_cmd *qc)
  3646. {
  3647. struct ata_port *ap = qc->ap;
  3648. int dir = qc->dma_dir;
  3649. struct scatterlist *sg = qc->__sg;
  3650. dma_addr_t dma_address;
  3651. int trim_sg = 0;
  3652. /* we must lengthen transfers to end on a 32-bit boundary */
  3653. qc->pad_len = sg->length & 3;
  3654. if (qc->pad_len) {
  3655. void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  3656. struct scatterlist *psg = &qc->pad_sgent;
  3657. WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
  3658. memset(pad_buf, 0, ATA_DMA_PAD_SZ);
  3659. if (qc->tf.flags & ATA_TFLAG_WRITE)
  3660. memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
  3661. qc->pad_len);
  3662. sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
  3663. sg_dma_len(psg) = ATA_DMA_PAD_SZ;
  3664. /* trim sg */
  3665. sg->length -= qc->pad_len;
  3666. if (sg->length == 0)
  3667. trim_sg = 1;
  3668. DPRINTK("padding done, sg->length=%u pad_len=%u\n",
  3669. sg->length, qc->pad_len);
  3670. }
  3671. if (trim_sg) {
  3672. qc->n_elem--;
  3673. goto skip_map;
  3674. }
  3675. dma_address = dma_map_single(ap->dev, qc->buf_virt,
  3676. sg->length, dir);
  3677. if (dma_mapping_error(dma_address)) {
  3678. /* restore sg */
  3679. sg->length += qc->pad_len;
  3680. return -1;
  3681. }
  3682. sg_dma_address(sg) = dma_address;
  3683. sg_dma_len(sg) = sg->length;
  3684. skip_map:
  3685. DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
  3686. qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  3687. return 0;
  3688. }
  3689. /**
  3690. * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
  3691. * @qc: Command with scatter-gather table to be mapped.
  3692. *
  3693. * DMA-map the scatter-gather table associated with queued_cmd @qc.
  3694. *
  3695. * LOCKING:
  3696. * spin_lock_irqsave(host lock)
  3697. *
  3698. * RETURNS:
  3699. * Zero on success, negative on error.
  3700. *
  3701. */
  3702. static int ata_sg_setup(struct ata_queued_cmd *qc)
  3703. {
  3704. struct ata_port *ap = qc->ap;
  3705. struct scatterlist *sg = qc->__sg;
  3706. struct scatterlist *lsg = &sg[qc->n_elem - 1];
  3707. int n_elem, pre_n_elem, dir, trim_sg = 0;
  3708. VPRINTK("ENTER, ata%u\n", ap->print_id);
  3709. WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
  3710. /* we must lengthen transfers to end on a 32-bit boundary */
  3711. qc->pad_len = lsg->length & 3;
  3712. if (qc->pad_len) {
  3713. void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  3714. struct scatterlist *psg = &qc->pad_sgent;
  3715. unsigned int offset;
  3716. WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
  3717. memset(pad_buf, 0, ATA_DMA_PAD_SZ);
  3718. /*
  3719. * psg->page/offset are used to copy to-be-written
  3720. * data in this function or read data in ata_sg_clean.
  3721. */
  3722. offset = lsg->offset + lsg->length - qc->pad_len;
  3723. psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
  3724. psg->offset = offset_in_page(offset);
  3725. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  3726. void *addr = kmap_atomic(psg->page, KM_IRQ0);
  3727. memcpy(pad_buf, addr + psg->offset, qc->pad_len);
  3728. kunmap_atomic(addr, KM_IRQ0);
  3729. }
  3730. sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
  3731. sg_dma_len(psg) = ATA_DMA_PAD_SZ;
  3732. /* trim last sg */
  3733. lsg->length -= qc->pad_len;
  3734. if (lsg->length == 0)
  3735. trim_sg = 1;
  3736. DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
  3737. qc->n_elem - 1, lsg->length, qc->pad_len);
  3738. }
  3739. pre_n_elem = qc->n_elem;
  3740. if (trim_sg && pre_n_elem)
  3741. pre_n_elem--;
  3742. if (!pre_n_elem) {
  3743. n_elem = 0;
  3744. goto skip_map;
  3745. }
  3746. dir = qc->dma_dir;
  3747. n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
  3748. if (n_elem < 1) {
  3749. /* restore last sg */
  3750. lsg->length += qc->pad_len;
  3751. return -1;
  3752. }
  3753. DPRINTK("%d sg elements mapped\n", n_elem);
  3754. skip_map:
  3755. qc->n_elem = n_elem;
  3756. return 0;
  3757. }
  3758. /**
  3759. * swap_buf_le16 - swap halves of 16-bit words in place
  3760. * @buf: Buffer to swap
  3761. * @buf_words: Number of 16-bit words in buffer.
  3762. *
  3763. * Swap halves of 16-bit words if needed to convert from
  3764. * little-endian byte order to native cpu byte order, or
  3765. * vice-versa.
  3766. *
  3767. * LOCKING:
  3768. * Inherited from caller.
  3769. */
  3770. void swap_buf_le16(u16 *buf, unsigned int buf_words)
  3771. {
  3772. #ifdef __BIG_ENDIAN
  3773. unsigned int i;
  3774. for (i = 0; i < buf_words; i++)
  3775. buf[i] = le16_to_cpu(buf[i]);
  3776. #endif /* __BIG_ENDIAN */
  3777. }
  3778. /**
  3779. * ata_data_xfer - Transfer data by PIO
  3780. * @adev: device to target
  3781. * @buf: data buffer
  3782. * @buflen: buffer length
  3783. * @write_data: read/write
  3784. *
  3785. * Transfer data from/to the device data register by PIO.
  3786. *
  3787. * LOCKING:
  3788. * Inherited from caller.
  3789. */
  3790. void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
  3791. unsigned int buflen, int write_data)
  3792. {
  3793. struct ata_port *ap = adev->ap;
  3794. unsigned int words = buflen >> 1;
  3795. /* Transfer multiple of 2 bytes */
  3796. if (write_data)
  3797. iowrite16_rep(ap->ioaddr.data_addr, buf, words);
  3798. else
  3799. ioread16_rep(ap->ioaddr.data_addr, buf, words);
  3800. /* Transfer trailing 1 byte, if any. */
  3801. if (unlikely(buflen & 0x01)) {
  3802. u16 align_buf[1] = { 0 };
  3803. unsigned char *trailing_buf = buf + buflen - 1;
  3804. if (write_data) {
  3805. memcpy(align_buf, trailing_buf, 1);
  3806. iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
  3807. } else {
  3808. align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
  3809. memcpy(trailing_buf, align_buf, 1);
  3810. }
  3811. }
  3812. }
  3813. /**
  3814. * ata_data_xfer_noirq - Transfer data by PIO
  3815. * @adev: device to target
  3816. * @buf: data buffer
  3817. * @buflen: buffer length
  3818. * @write_data: read/write
  3819. *
  3820. * Transfer data from/to the device data register by PIO. Do the
  3821. * transfer with interrupts disabled.
  3822. *
  3823. * LOCKING:
  3824. * Inherited from caller.
  3825. */
  3826. void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
  3827. unsigned int buflen, int write_data)
  3828. {
  3829. unsigned long flags;
  3830. local_irq_save(flags);
  3831. ata_data_xfer(adev, buf, buflen, write_data);
  3832. local_irq_restore(flags);
  3833. }
  3834. /**
  3835. * ata_pio_sector - Transfer a sector of data.
  3836. * @qc: Command on going
  3837. *
  3838. * Transfer qc->sect_size bytes of data from/to the ATA device.
  3839. *
  3840. * LOCKING:
  3841. * Inherited from caller.
  3842. */
  3843. static void ata_pio_sector(struct ata_queued_cmd *qc)
  3844. {
  3845. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  3846. struct scatterlist *sg = qc->__sg;
  3847. struct ata_port *ap = qc->ap;
  3848. struct page *page;
  3849. unsigned int offset;
  3850. unsigned char *buf;
  3851. if (qc->curbytes == qc->nbytes - qc->sect_size)
  3852. ap->hsm_task_state = HSM_ST_LAST;
  3853. page = sg[qc->cursg].page;
  3854. offset = sg[qc->cursg].offset + qc->cursg_ofs;
  3855. /* get the current page and offset */
  3856. page = nth_page(page, (offset >> PAGE_SHIFT));
  3857. offset %= PAGE_SIZE;
  3858. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  3859. if (PageHighMem(page)) {
  3860. unsigned long flags;
  3861. /* FIXME: use a bounce buffer */
  3862. local_irq_save(flags);
  3863. buf = kmap_atomic(page, KM_IRQ0);
  3864. /* do the actual data transfer */
  3865. ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
  3866. kunmap_atomic(buf, KM_IRQ0);
  3867. local_irq_restore(flags);
  3868. } else {
  3869. buf = page_address(page);
  3870. ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
  3871. }
  3872. qc->curbytes += qc->sect_size;
  3873. qc->cursg_ofs += qc->sect_size;
  3874. if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
  3875. qc->cursg++;
  3876. qc->cursg_ofs = 0;
  3877. }
  3878. }
  3879. /**
  3880. * ata_pio_sectors - Transfer one or many sectors.
  3881. * @qc: Command on going
  3882. *
  3883. * Transfer one or many sectors of data from/to the
  3884. * ATA device for the DRQ request.
  3885. *
  3886. * LOCKING:
  3887. * Inherited from caller.
  3888. */
  3889. static void ata_pio_sectors(struct ata_queued_cmd *qc)
  3890. {
  3891. if (is_multi_taskfile(&qc->tf)) {
  3892. /* READ/WRITE MULTIPLE */
  3893. unsigned int nsect;
  3894. WARN_ON(qc->dev->multi_count == 0);
  3895. nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
  3896. qc->dev->multi_count);
  3897. while (nsect--)
  3898. ata_pio_sector(qc);
  3899. } else
  3900. ata_pio_sector(qc);
  3901. }
  3902. /**
  3903. * atapi_send_cdb - Write CDB bytes to hardware
  3904. * @ap: Port to which ATAPI device is attached.
  3905. * @qc: Taskfile currently active
  3906. *
  3907. * When device has indicated its readiness to accept
  3908. * a CDB, this function is called. Send the CDB.
  3909. *
  3910. * LOCKING:
  3911. * caller.
  3912. */
  3913. static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
  3914. {
  3915. /* send SCSI cdb */
  3916. DPRINTK("send cdb\n");
  3917. WARN_ON(qc->dev->cdb_len < 12);
  3918. ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
  3919. ata_altstatus(ap); /* flush */
  3920. switch (qc->tf.protocol) {
  3921. case ATA_PROT_ATAPI:
  3922. ap->hsm_task_state = HSM_ST;
  3923. break;
  3924. case ATA_PROT_ATAPI_NODATA:
  3925. ap->hsm_task_state = HSM_ST_LAST;
  3926. break;
  3927. case ATA_PROT_ATAPI_DMA:
  3928. ap->hsm_task_state = HSM_ST_LAST;
  3929. /* initiate bmdma */
  3930. ap->ops->bmdma_start(qc);
  3931. break;
  3932. }
  3933. }
  3934. /**
  3935. * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
  3936. * @qc: Command on going
  3937. * @bytes: number of bytes
  3938. *
  3939. * Transfer Transfer data from/to the ATAPI device.
  3940. *
  3941. * LOCKING:
  3942. * Inherited from caller.
  3943. *
  3944. */
  3945. static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
  3946. {
  3947. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  3948. struct scatterlist *sg = qc->__sg;
  3949. struct ata_port *ap = qc->ap;
  3950. struct page *page;
  3951. unsigned char *buf;
  3952. unsigned int offset, count;
  3953. if (qc->curbytes + bytes >= qc->nbytes)
  3954. ap->hsm_task_state = HSM_ST_LAST;
  3955. next_sg:
  3956. if (unlikely(qc->cursg >= qc->n_elem)) {
  3957. /*
  3958. * The end of qc->sg is reached and the device expects
  3959. * more data to transfer. In order not to overrun qc->sg
  3960. * and fulfill length specified in the byte count register,
  3961. * - for read case, discard trailing data from the device
  3962. * - for write case, padding zero data to the device
  3963. */
  3964. u16 pad_buf[1] = { 0 };
  3965. unsigned int words = bytes >> 1;
  3966. unsigned int i;
  3967. if (words) /* warning if bytes > 1 */
  3968. ata_dev_printk(qc->dev, KERN_WARNING,
  3969. "%u bytes trailing data\n", bytes);
  3970. for (i = 0; i < words; i++)
  3971. ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
  3972. ap->hsm_task_state = HSM_ST_LAST;
  3973. return;
  3974. }
  3975. sg = &qc->__sg[qc->cursg];
  3976. page = sg->page;
  3977. offset = sg->offset + qc->cursg_ofs;
  3978. /* get the current page and offset */
  3979. page = nth_page(page, (offset >> PAGE_SHIFT));
  3980. offset %= PAGE_SIZE;
  3981. /* don't overrun current sg */
  3982. count = min(sg->length - qc->cursg_ofs, bytes);
  3983. /* don't cross page boundaries */
  3984. count = min(count, (unsigned int)PAGE_SIZE - offset);
  3985. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  3986. if (PageHighMem(page)) {
  3987. unsigned long flags;
  3988. /* FIXME: use bounce buffer */
  3989. local_irq_save(flags);
  3990. buf = kmap_atomic(page, KM_IRQ0);
  3991. /* do the actual data transfer */
  3992. ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
  3993. kunmap_atomic(buf, KM_IRQ0);
  3994. local_irq_restore(flags);
  3995. } else {
  3996. buf = page_address(page);
  3997. ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
  3998. }
  3999. bytes -= count;
  4000. qc->curbytes += count;
  4001. qc->cursg_ofs += count;
  4002. if (qc->cursg_ofs == sg->length) {
  4003. qc->cursg++;
  4004. qc->cursg_ofs = 0;
  4005. }
  4006. if (bytes)
  4007. goto next_sg;
  4008. }
  4009. /**
  4010. * atapi_pio_bytes - Transfer data from/to the ATAPI device.
  4011. * @qc: Command on going
  4012. *
  4013. * Transfer Transfer data from/to the ATAPI device.
  4014. *
  4015. * LOCKING:
  4016. * Inherited from caller.
  4017. */
  4018. static void atapi_pio_bytes(struct ata_queued_cmd *qc)
  4019. {
  4020. struct ata_port *ap = qc->ap;
  4021. struct ata_device *dev = qc->dev;
  4022. unsigned int ireason, bc_lo, bc_hi, bytes;
  4023. int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
  4024. /* Abuse qc->result_tf for temp storage of intermediate TF
  4025. * here to save some kernel stack usage.
  4026. * For normal completion, qc->result_tf is not relevant. For
  4027. * error, qc->result_tf is later overwritten by ata_qc_complete().
  4028. * So, the correctness of qc->result_tf is not affected.
  4029. */
  4030. ap->ops->tf_read(ap, &qc->result_tf);
  4031. ireason = qc->result_tf.nsect;
  4032. bc_lo = qc->result_tf.lbam;
  4033. bc_hi = qc->result_tf.lbah;
  4034. bytes = (bc_hi << 8) | bc_lo;
  4035. /* shall be cleared to zero, indicating xfer of data */
  4036. if (ireason & (1 << 0))
  4037. goto err_out;
  4038. /* make sure transfer direction matches expected */
  4039. i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
  4040. if (do_write != i_write)
  4041. goto err_out;
  4042. VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
  4043. __atapi_pio_bytes(qc, bytes);
  4044. return;
  4045. err_out:
  4046. ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
  4047. qc->err_mask |= AC_ERR_HSM;
  4048. ap->hsm_task_state = HSM_ST_ERR;
  4049. }
  4050. /**
  4051. * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
  4052. * @ap: the target ata_port
  4053. * @qc: qc on going
  4054. *
  4055. * RETURNS:
  4056. * 1 if ok in workqueue, 0 otherwise.
  4057. */
  4058. static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
  4059. {
  4060. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4061. return 1;
  4062. if (ap->hsm_task_state == HSM_ST_FIRST) {
  4063. if (qc->tf.protocol == ATA_PROT_PIO &&
  4064. (qc->tf.flags & ATA_TFLAG_WRITE))
  4065. return 1;
  4066. if (is_atapi_taskfile(&qc->tf) &&
  4067. !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  4068. return 1;
  4069. }
  4070. return 0;
  4071. }
  4072. /**
  4073. * ata_hsm_qc_complete - finish a qc running on standard HSM
  4074. * @qc: Command to complete
  4075. * @in_wq: 1 if called from workqueue, 0 otherwise
  4076. *
  4077. * Finish @qc which is running on standard HSM.
  4078. *
  4079. * LOCKING:
  4080. * If @in_wq is zero, spin_lock_irqsave(host lock).
  4081. * Otherwise, none on entry and grabs host lock.
  4082. */
  4083. static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
  4084. {
  4085. struct ata_port *ap = qc->ap;
  4086. unsigned long flags;
  4087. if (ap->ops->error_handler) {
  4088. if (in_wq) {
  4089. spin_lock_irqsave(ap->lock, flags);
  4090. /* EH might have kicked in while host lock is
  4091. * released.
  4092. */
  4093. qc = ata_qc_from_tag(ap, qc->tag);
  4094. if (qc) {
  4095. if (likely(!(qc->err_mask & AC_ERR_HSM))) {
  4096. ap->ops->irq_on(ap);
  4097. ata_qc_complete(qc);
  4098. } else
  4099. ata_port_freeze(ap);
  4100. }
  4101. spin_unlock_irqrestore(ap->lock, flags);
  4102. } else {
  4103. if (likely(!(qc->err_mask & AC_ERR_HSM)))
  4104. ata_qc_complete(qc);
  4105. else
  4106. ata_port_freeze(ap);
  4107. }
  4108. } else {
  4109. if (in_wq) {
  4110. spin_lock_irqsave(ap->lock, flags);
  4111. ap->ops->irq_on(ap);
  4112. ata_qc_complete(qc);
  4113. spin_unlock_irqrestore(ap->lock, flags);
  4114. } else
  4115. ata_qc_complete(qc);
  4116. }
  4117. }
  4118. /**
  4119. * ata_hsm_move - move the HSM to the next state.
  4120. * @ap: the target ata_port
  4121. * @qc: qc on going
  4122. * @status: current device status
  4123. * @in_wq: 1 if called from workqueue, 0 otherwise
  4124. *
  4125. * RETURNS:
  4126. * 1 when poll next status needed, 0 otherwise.
  4127. */
  4128. int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
  4129. u8 status, int in_wq)
  4130. {
  4131. unsigned long flags = 0;
  4132. int poll_next;
  4133. WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
  4134. /* Make sure ata_qc_issue_prot() does not throw things
  4135. * like DMA polling into the workqueue. Notice that
  4136. * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
  4137. */
  4138. WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
  4139. fsm_start:
  4140. DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
  4141. ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
  4142. switch (ap->hsm_task_state) {
  4143. case HSM_ST_FIRST:
  4144. /* Send first data block or PACKET CDB */
  4145. /* If polling, we will stay in the work queue after
  4146. * sending the data. Otherwise, interrupt handler
  4147. * takes over after sending the data.
  4148. */
  4149. poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
  4150. /* check device status */
  4151. if (unlikely((status & ATA_DRQ) == 0)) {
  4152. /* handle BSY=0, DRQ=0 as error */
  4153. if (likely(status & (ATA_ERR | ATA_DF)))
  4154. /* device stops HSM for abort/error */
  4155. qc->err_mask |= AC_ERR_DEV;
  4156. else
  4157. /* HSM violation. Let EH handle this */
  4158. qc->err_mask |= AC_ERR_HSM;
  4159. ap->hsm_task_state = HSM_ST_ERR;
  4160. goto fsm_start;
  4161. }
  4162. /* Device should not ask for data transfer (DRQ=1)
  4163. * when it finds something wrong.
  4164. * We ignore DRQ here and stop the HSM by
  4165. * changing hsm_task_state to HSM_ST_ERR and
  4166. * let the EH abort the command or reset the device.
  4167. */
  4168. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4169. ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
  4170. "error, dev_stat 0x%X\n", status);
  4171. qc->err_mask |= AC_ERR_HSM;
  4172. ap->hsm_task_state = HSM_ST_ERR;
  4173. goto fsm_start;
  4174. }
  4175. /* Send the CDB (atapi) or the first data block (ata pio out).
  4176. * During the state transition, interrupt handler shouldn't
  4177. * be invoked before the data transfer is complete and
  4178. * hsm_task_state is changed. Hence, the following locking.
  4179. */
  4180. if (in_wq)
  4181. spin_lock_irqsave(ap->lock, flags);
  4182. if (qc->tf.protocol == ATA_PROT_PIO) {
  4183. /* PIO data out protocol.
  4184. * send first data block.
  4185. */
  4186. /* ata_pio_sectors() might change the state
  4187. * to HSM_ST_LAST. so, the state is changed here
  4188. * before ata_pio_sectors().
  4189. */
  4190. ap->hsm_task_state = HSM_ST;
  4191. ata_pio_sectors(qc);
  4192. ata_altstatus(ap); /* flush */
  4193. } else
  4194. /* send CDB */
  4195. atapi_send_cdb(ap, qc);
  4196. if (in_wq)
  4197. spin_unlock_irqrestore(ap->lock, flags);
  4198. /* if polling, ata_pio_task() handles the rest.
  4199. * otherwise, interrupt handler takes over from here.
  4200. */
  4201. break;
  4202. case HSM_ST:
  4203. /* complete command or read/write the data register */
  4204. if (qc->tf.protocol == ATA_PROT_ATAPI) {
  4205. /* ATAPI PIO protocol */
  4206. if ((status & ATA_DRQ) == 0) {
  4207. /* No more data to transfer or device error.
  4208. * Device error will be tagged in HSM_ST_LAST.
  4209. */
  4210. ap->hsm_task_state = HSM_ST_LAST;
  4211. goto fsm_start;
  4212. }
  4213. /* Device should not ask for data transfer (DRQ=1)
  4214. * when it finds something wrong.
  4215. * We ignore DRQ here and stop the HSM by
  4216. * changing hsm_task_state to HSM_ST_ERR and
  4217. * let the EH abort the command or reset the device.
  4218. */
  4219. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4220. ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
  4221. "device error, dev_stat 0x%X\n",
  4222. status);
  4223. qc->err_mask |= AC_ERR_HSM;
  4224. ap->hsm_task_state = HSM_ST_ERR;
  4225. goto fsm_start;
  4226. }
  4227. atapi_pio_bytes(qc);
  4228. if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
  4229. /* bad ireason reported by device */
  4230. goto fsm_start;
  4231. } else {
  4232. /* ATA PIO protocol */
  4233. if (unlikely((status & ATA_DRQ) == 0)) {
  4234. /* handle BSY=0, DRQ=0 as error */
  4235. if (likely(status & (ATA_ERR | ATA_DF)))
  4236. /* device stops HSM for abort/error */
  4237. qc->err_mask |= AC_ERR_DEV;
  4238. else
  4239. /* HSM violation. Let EH handle this.
  4240. * Phantom devices also trigger this
  4241. * condition. Mark hint.
  4242. */
  4243. qc->err_mask |= AC_ERR_HSM |
  4244. AC_ERR_NODEV_HINT;
  4245. ap->hsm_task_state = HSM_ST_ERR;
  4246. goto fsm_start;
  4247. }
  4248. /* For PIO reads, some devices may ask for
  4249. * data transfer (DRQ=1) alone with ERR=1.
  4250. * We respect DRQ here and transfer one
  4251. * block of junk data before changing the
  4252. * hsm_task_state to HSM_ST_ERR.
  4253. *
  4254. * For PIO writes, ERR=1 DRQ=1 doesn't make
  4255. * sense since the data block has been
  4256. * transferred to the device.
  4257. */
  4258. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4259. /* data might be corrputed */
  4260. qc->err_mask |= AC_ERR_DEV;
  4261. if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
  4262. ata_pio_sectors(qc);
  4263. ata_altstatus(ap);
  4264. status = ata_wait_idle(ap);
  4265. }
  4266. if (status & (ATA_BUSY | ATA_DRQ))
  4267. qc->err_mask |= AC_ERR_HSM;
  4268. /* ata_pio_sectors() might change the
  4269. * state to HSM_ST_LAST. so, the state
  4270. * is changed after ata_pio_sectors().
  4271. */
  4272. ap->hsm_task_state = HSM_ST_ERR;
  4273. goto fsm_start;
  4274. }
  4275. ata_pio_sectors(qc);
  4276. if (ap->hsm_task_state == HSM_ST_LAST &&
  4277. (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
  4278. /* all data read */
  4279. ata_altstatus(ap);
  4280. status = ata_wait_idle(ap);
  4281. goto fsm_start;
  4282. }
  4283. }
  4284. ata_altstatus(ap); /* flush */
  4285. poll_next = 1;
  4286. break;
  4287. case HSM_ST_LAST:
  4288. if (unlikely(!ata_ok(status))) {
  4289. qc->err_mask |= __ac_err_mask(status);
  4290. ap->hsm_task_state = HSM_ST_ERR;
  4291. goto fsm_start;
  4292. }
  4293. /* no more data to transfer */
  4294. DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
  4295. ap->print_id, qc->dev->devno, status);
  4296. WARN_ON(qc->err_mask);
  4297. ap->hsm_task_state = HSM_ST_IDLE;
  4298. /* complete taskfile transaction */
  4299. ata_hsm_qc_complete(qc, in_wq);
  4300. poll_next = 0;
  4301. break;
  4302. case HSM_ST_ERR:
  4303. /* make sure qc->err_mask is available to
  4304. * know what's wrong and recover
  4305. */
  4306. WARN_ON(qc->err_mask == 0);
  4307. ap->hsm_task_state = HSM_ST_IDLE;
  4308. /* complete taskfile transaction */
  4309. ata_hsm_qc_complete(qc, in_wq);
  4310. poll_next = 0;
  4311. break;
  4312. default:
  4313. poll_next = 0;
  4314. BUG();
  4315. }
  4316. return poll_next;
  4317. }
  4318. static void ata_pio_task(struct work_struct *work)
  4319. {
  4320. struct ata_port *ap =
  4321. container_of(work, struct ata_port, port_task.work);
  4322. struct ata_queued_cmd *qc = ap->port_task_data;
  4323. u8 status;
  4324. int poll_next;
  4325. fsm_start:
  4326. WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
  4327. /*
  4328. * This is purely heuristic. This is a fast path.
  4329. * Sometimes when we enter, BSY will be cleared in
  4330. * a chk-status or two. If not, the drive is probably seeking
  4331. * or something. Snooze for a couple msecs, then
  4332. * chk-status again. If still busy, queue delayed work.
  4333. */
  4334. status = ata_busy_wait(ap, ATA_BUSY, 5);
  4335. if (status & ATA_BUSY) {
  4336. msleep(2);
  4337. status = ata_busy_wait(ap, ATA_BUSY, 10);
  4338. if (status & ATA_BUSY) {
  4339. ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
  4340. return;
  4341. }
  4342. }
  4343. /* move the HSM */
  4344. poll_next = ata_hsm_move(ap, qc, status, 1);
  4345. /* another command or interrupt handler
  4346. * may be running at this point.
  4347. */
  4348. if (poll_next)
  4349. goto fsm_start;
  4350. }
  4351. /**
  4352. * ata_qc_new - Request an available ATA command, for queueing
  4353. * @ap: Port associated with device @dev
  4354. * @dev: Device from whom we request an available command structure
  4355. *
  4356. * LOCKING:
  4357. * None.
  4358. */
  4359. static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
  4360. {
  4361. struct ata_queued_cmd *qc = NULL;
  4362. unsigned int i;
  4363. /* no command while frozen */
  4364. if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
  4365. return NULL;
  4366. /* the last tag is reserved for internal command. */
  4367. for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
  4368. if (!test_and_set_bit(i, &ap->qc_allocated)) {
  4369. qc = __ata_qc_from_tag(ap, i);
  4370. break;
  4371. }
  4372. if (qc)
  4373. qc->tag = i;
  4374. return qc;
  4375. }
  4376. /**
  4377. * ata_qc_new_init - Request an available ATA command, and initialize it
  4378. * @dev: Device from whom we request an available command structure
  4379. *
  4380. * LOCKING:
  4381. * None.
  4382. */
  4383. struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
  4384. {
  4385. struct ata_port *ap = dev->ap;
  4386. struct ata_queued_cmd *qc;
  4387. qc = ata_qc_new(ap);
  4388. if (qc) {
  4389. qc->scsicmd = NULL;
  4390. qc->ap = ap;
  4391. qc->dev = dev;
  4392. ata_qc_reinit(qc);
  4393. }
  4394. return qc;
  4395. }
  4396. /**
  4397. * ata_qc_free - free unused ata_queued_cmd
  4398. * @qc: Command to complete
  4399. *
  4400. * Designed to free unused ata_queued_cmd object
  4401. * in case something prevents using it.
  4402. *
  4403. * LOCKING:
  4404. * spin_lock_irqsave(host lock)
  4405. */
  4406. void ata_qc_free(struct ata_queued_cmd *qc)
  4407. {
  4408. struct ata_port *ap = qc->ap;
  4409. unsigned int tag;
  4410. WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
  4411. qc->flags = 0;
  4412. tag = qc->tag;
  4413. if (likely(ata_tag_valid(tag))) {
  4414. qc->tag = ATA_TAG_POISON;
  4415. clear_bit(tag, &ap->qc_allocated);
  4416. }
  4417. }
  4418. void __ata_qc_complete(struct ata_queued_cmd *qc)
  4419. {
  4420. struct ata_port *ap = qc->ap;
  4421. WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
  4422. WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
  4423. if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
  4424. ata_sg_clean(qc);
  4425. /* command should be marked inactive atomically with qc completion */
  4426. if (qc->tf.protocol == ATA_PROT_NCQ)
  4427. ap->sactive &= ~(1 << qc->tag);
  4428. else
  4429. ap->active_tag = ATA_TAG_POISON;
  4430. /* atapi: mark qc as inactive to prevent the interrupt handler
  4431. * from completing the command twice later, before the error handler
  4432. * is called. (when rc != 0 and atapi request sense is needed)
  4433. */
  4434. qc->flags &= ~ATA_QCFLAG_ACTIVE;
  4435. ap->qc_active &= ~(1 << qc->tag);
  4436. /* call completion callback */
  4437. qc->complete_fn(qc);
  4438. }
  4439. static void fill_result_tf(struct ata_queued_cmd *qc)
  4440. {
  4441. struct ata_port *ap = qc->ap;
  4442. qc->result_tf.flags = qc->tf.flags;
  4443. ap->ops->tf_read(ap, &qc->result_tf);
  4444. }
  4445. /**
  4446. * ata_qc_complete - Complete an active ATA command
  4447. * @qc: Command to complete
  4448. * @err_mask: ATA Status register contents
  4449. *
  4450. * Indicate to the mid and upper layers that an ATA
  4451. * command has completed, with either an ok or not-ok status.
  4452. *
  4453. * LOCKING:
  4454. * spin_lock_irqsave(host lock)
  4455. */
  4456. void ata_qc_complete(struct ata_queued_cmd *qc)
  4457. {
  4458. struct ata_port *ap = qc->ap;
  4459. /* XXX: New EH and old EH use different mechanisms to
  4460. * synchronize EH with regular execution path.
  4461. *
  4462. * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
  4463. * Normal execution path is responsible for not accessing a
  4464. * failed qc. libata core enforces the rule by returning NULL
  4465. * from ata_qc_from_tag() for failed qcs.
  4466. *
  4467. * Old EH depends on ata_qc_complete() nullifying completion
  4468. * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
  4469. * not synchronize with interrupt handler. Only PIO task is
  4470. * taken care of.
  4471. */
  4472. if (ap->ops->error_handler) {
  4473. WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
  4474. if (unlikely(qc->err_mask))
  4475. qc->flags |= ATA_QCFLAG_FAILED;
  4476. if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
  4477. if (!ata_tag_internal(qc->tag)) {
  4478. /* always fill result TF for failed qc */
  4479. fill_result_tf(qc);
  4480. ata_qc_schedule_eh(qc);
  4481. return;
  4482. }
  4483. }
  4484. /* read result TF if requested */
  4485. if (qc->flags & ATA_QCFLAG_RESULT_TF)
  4486. fill_result_tf(qc);
  4487. __ata_qc_complete(qc);
  4488. } else {
  4489. if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
  4490. return;
  4491. /* read result TF if failed or requested */
  4492. if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
  4493. fill_result_tf(qc);
  4494. __ata_qc_complete(qc);
  4495. }
  4496. }
  4497. /**
  4498. * ata_qc_complete_multiple - Complete multiple qcs successfully
  4499. * @ap: port in question
  4500. * @qc_active: new qc_active mask
  4501. * @finish_qc: LLDD callback invoked before completing a qc
  4502. *
  4503. * Complete in-flight commands. This functions is meant to be
  4504. * called from low-level driver's interrupt routine to complete
  4505. * requests normally. ap->qc_active and @qc_active is compared
  4506. * and commands are completed accordingly.
  4507. *
  4508. * LOCKING:
  4509. * spin_lock_irqsave(host lock)
  4510. *
  4511. * RETURNS:
  4512. * Number of completed commands on success, -errno otherwise.
  4513. */
  4514. int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
  4515. void (*finish_qc)(struct ata_queued_cmd *))
  4516. {
  4517. int nr_done = 0;
  4518. u32 done_mask;
  4519. int i;
  4520. done_mask = ap->qc_active ^ qc_active;
  4521. if (unlikely(done_mask & qc_active)) {
  4522. ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
  4523. "(%08x->%08x)\n", ap->qc_active, qc_active);
  4524. return -EINVAL;
  4525. }
  4526. for (i = 0; i < ATA_MAX_QUEUE; i++) {
  4527. struct ata_queued_cmd *qc;
  4528. if (!(done_mask & (1 << i)))
  4529. continue;
  4530. if ((qc = ata_qc_from_tag(ap, i))) {
  4531. if (finish_qc)
  4532. finish_qc(qc);
  4533. ata_qc_complete(qc);
  4534. nr_done++;
  4535. }
  4536. }
  4537. return nr_done;
  4538. }
  4539. static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
  4540. {
  4541. struct ata_port *ap = qc->ap;
  4542. switch (qc->tf.protocol) {
  4543. case ATA_PROT_NCQ:
  4544. case ATA_PROT_DMA:
  4545. case ATA_PROT_ATAPI_DMA:
  4546. return 1;
  4547. case ATA_PROT_ATAPI:
  4548. case ATA_PROT_PIO:
  4549. if (ap->flags & ATA_FLAG_PIO_DMA)
  4550. return 1;
  4551. /* fall through */
  4552. default:
  4553. return 0;
  4554. }
  4555. /* never reached */
  4556. }
  4557. /**
  4558. * ata_qc_issue - issue taskfile to device
  4559. * @qc: command to issue to device
  4560. *
  4561. * Prepare an ATA command to submission to device.
  4562. * This includes mapping the data into a DMA-able
  4563. * area, filling in the S/G table, and finally
  4564. * writing the taskfile to hardware, starting the command.
  4565. *
  4566. * LOCKING:
  4567. * spin_lock_irqsave(host lock)
  4568. */
  4569. void ata_qc_issue(struct ata_queued_cmd *qc)
  4570. {
  4571. struct ata_port *ap = qc->ap;
  4572. /* Make sure only one non-NCQ command is outstanding. The
  4573. * check is skipped for old EH because it reuses active qc to
  4574. * request ATAPI sense.
  4575. */
  4576. WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
  4577. if (qc->tf.protocol == ATA_PROT_NCQ) {
  4578. WARN_ON(ap->sactive & (1 << qc->tag));
  4579. ap->sactive |= 1 << qc->tag;
  4580. } else {
  4581. WARN_ON(ap->sactive);
  4582. ap->active_tag = qc->tag;
  4583. }
  4584. qc->flags |= ATA_QCFLAG_ACTIVE;
  4585. ap->qc_active |= 1 << qc->tag;
  4586. if (ata_should_dma_map(qc)) {
  4587. if (qc->flags & ATA_QCFLAG_SG) {
  4588. if (ata_sg_setup(qc))
  4589. goto sg_err;
  4590. } else if (qc->flags & ATA_QCFLAG_SINGLE) {
  4591. if (ata_sg_setup_one(qc))
  4592. goto sg_err;
  4593. }
  4594. } else {
  4595. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  4596. }
  4597. ap->ops->qc_prep(qc);
  4598. qc->err_mask |= ap->ops->qc_issue(qc);
  4599. if (unlikely(qc->err_mask))
  4600. goto err;
  4601. return;
  4602. sg_err:
  4603. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  4604. qc->err_mask |= AC_ERR_SYSTEM;
  4605. err:
  4606. ata_qc_complete(qc);
  4607. }
  4608. /**
  4609. * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
  4610. * @qc: command to issue to device
  4611. *
  4612. * Using various libata functions and hooks, this function
  4613. * starts an ATA command. ATA commands are grouped into
  4614. * classes called "protocols", and issuing each type of protocol
  4615. * is slightly different.
  4616. *
  4617. * May be used as the qc_issue() entry in ata_port_operations.
  4618. *
  4619. * LOCKING:
  4620. * spin_lock_irqsave(host lock)
  4621. *
  4622. * RETURNS:
  4623. * Zero on success, AC_ERR_* mask on failure
  4624. */
  4625. unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
  4626. {
  4627. struct ata_port *ap = qc->ap;
  4628. /* Use polling pio if the LLD doesn't handle
  4629. * interrupt driven pio and atapi CDB interrupt.
  4630. */
  4631. if (ap->flags & ATA_FLAG_PIO_POLLING) {
  4632. switch (qc->tf.protocol) {
  4633. case ATA_PROT_PIO:
  4634. case ATA_PROT_NODATA:
  4635. case ATA_PROT_ATAPI:
  4636. case ATA_PROT_ATAPI_NODATA:
  4637. qc->tf.flags |= ATA_TFLAG_POLLING;
  4638. break;
  4639. case ATA_PROT_ATAPI_DMA:
  4640. if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
  4641. /* see ata_dma_blacklisted() */
  4642. BUG();
  4643. break;
  4644. default:
  4645. break;
  4646. }
  4647. }
  4648. /* select the device */
  4649. ata_dev_select(ap, qc->dev->devno, 1, 0);
  4650. /* start the command */
  4651. switch (qc->tf.protocol) {
  4652. case ATA_PROT_NODATA:
  4653. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4654. ata_qc_set_polling(qc);
  4655. ata_tf_to_host(ap, &qc->tf);
  4656. ap->hsm_task_state = HSM_ST_LAST;
  4657. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4658. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  4659. break;
  4660. case ATA_PROT_DMA:
  4661. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  4662. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  4663. ap->ops->bmdma_setup(qc); /* set up bmdma */
  4664. ap->ops->bmdma_start(qc); /* initiate bmdma */
  4665. ap->hsm_task_state = HSM_ST_LAST;
  4666. break;
  4667. case ATA_PROT_PIO:
  4668. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4669. ata_qc_set_polling(qc);
  4670. ata_tf_to_host(ap, &qc->tf);
  4671. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  4672. /* PIO data out protocol */
  4673. ap->hsm_task_state = HSM_ST_FIRST;
  4674. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  4675. /* always send first data block using
  4676. * the ata_pio_task() codepath.
  4677. */
  4678. } else {
  4679. /* PIO data in protocol */
  4680. ap->hsm_task_state = HSM_ST;
  4681. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4682. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  4683. /* if polling, ata_pio_task() handles the rest.
  4684. * otherwise, interrupt handler takes over from here.
  4685. */
  4686. }
  4687. break;
  4688. case ATA_PROT_ATAPI:
  4689. case ATA_PROT_ATAPI_NODATA:
  4690. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4691. ata_qc_set_polling(qc);
  4692. ata_tf_to_host(ap, &qc->tf);
  4693. ap->hsm_task_state = HSM_ST_FIRST;
  4694. /* send cdb by polling if no cdb interrupt */
  4695. if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
  4696. (qc->tf.flags & ATA_TFLAG_POLLING))
  4697. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  4698. break;
  4699. case ATA_PROT_ATAPI_DMA:
  4700. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  4701. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  4702. ap->ops->bmdma_setup(qc); /* set up bmdma */
  4703. ap->hsm_task_state = HSM_ST_FIRST;
  4704. /* send cdb by polling if no cdb interrupt */
  4705. if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  4706. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  4707. break;
  4708. default:
  4709. WARN_ON(1);
  4710. return AC_ERR_SYSTEM;
  4711. }
  4712. return 0;
  4713. }
  4714. /**
  4715. * ata_host_intr - Handle host interrupt for given (port, task)
  4716. * @ap: Port on which interrupt arrived (possibly...)
  4717. * @qc: Taskfile currently active in engine
  4718. *
  4719. * Handle host interrupt for given queued command. Currently,
  4720. * only DMA interrupts are handled. All other commands are
  4721. * handled via polling with interrupts disabled (nIEN bit).
  4722. *
  4723. * LOCKING:
  4724. * spin_lock_irqsave(host lock)
  4725. *
  4726. * RETURNS:
  4727. * One if interrupt was handled, zero if not (shared irq).
  4728. */
  4729. inline unsigned int ata_host_intr (struct ata_port *ap,
  4730. struct ata_queued_cmd *qc)
  4731. {
  4732. struct ata_eh_info *ehi = &ap->eh_info;
  4733. u8 status, host_stat = 0;
  4734. VPRINTK("ata%u: protocol %d task_state %d\n",
  4735. ap->print_id, qc->tf.protocol, ap->hsm_task_state);
  4736. /* Check whether we are expecting interrupt in this state */
  4737. switch (ap->hsm_task_state) {
  4738. case HSM_ST_FIRST:
  4739. /* Some pre-ATAPI-4 devices assert INTRQ
  4740. * at this state when ready to receive CDB.
  4741. */
  4742. /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
  4743. * The flag was turned on only for atapi devices.
  4744. * No need to check is_atapi_taskfile(&qc->tf) again.
  4745. */
  4746. if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  4747. goto idle_irq;
  4748. break;
  4749. case HSM_ST_LAST:
  4750. if (qc->tf.protocol == ATA_PROT_DMA ||
  4751. qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
  4752. /* check status of DMA engine */
  4753. host_stat = ap->ops->bmdma_status(ap);
  4754. VPRINTK("ata%u: host_stat 0x%X\n",
  4755. ap->print_id, host_stat);
  4756. /* if it's not our irq... */
  4757. if (!(host_stat & ATA_DMA_INTR))
  4758. goto idle_irq;
  4759. /* before we do anything else, clear DMA-Start bit */
  4760. ap->ops->bmdma_stop(qc);
  4761. if (unlikely(host_stat & ATA_DMA_ERR)) {
  4762. /* error when transfering data to/from memory */
  4763. qc->err_mask |= AC_ERR_HOST_BUS;
  4764. ap->hsm_task_state = HSM_ST_ERR;
  4765. }
  4766. }
  4767. break;
  4768. case HSM_ST:
  4769. break;
  4770. default:
  4771. goto idle_irq;
  4772. }
  4773. /* check altstatus */
  4774. status = ata_altstatus(ap);
  4775. if (status & ATA_BUSY)
  4776. goto idle_irq;
  4777. /* check main status, clearing INTRQ */
  4778. status = ata_chk_status(ap);
  4779. if (unlikely(status & ATA_BUSY))
  4780. goto idle_irq;
  4781. /* ack bmdma irq events */
  4782. ap->ops->irq_clear(ap);
  4783. ata_hsm_move(ap, qc, status, 0);
  4784. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
  4785. qc->tf.protocol == ATA_PROT_ATAPI_DMA))
  4786. ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
  4787. return 1; /* irq handled */
  4788. idle_irq:
  4789. ap->stats.idle_irq++;
  4790. #ifdef ATA_IRQ_TRAP
  4791. if ((ap->stats.idle_irq % 1000) == 0) {
  4792. ap->ops->irq_ack(ap, 0); /* debug trap */
  4793. ata_port_printk(ap, KERN_WARNING, "irq trap\n");
  4794. return 1;
  4795. }
  4796. #endif
  4797. return 0; /* irq not handled */
  4798. }
  4799. /**
  4800. * ata_interrupt - Default ATA host interrupt handler
  4801. * @irq: irq line (unused)
  4802. * @dev_instance: pointer to our ata_host information structure
  4803. *
  4804. * Default interrupt handler for PCI IDE devices. Calls
  4805. * ata_host_intr() for each port that is not disabled.
  4806. *
  4807. * LOCKING:
  4808. * Obtains host lock during operation.
  4809. *
  4810. * RETURNS:
  4811. * IRQ_NONE or IRQ_HANDLED.
  4812. */
  4813. irqreturn_t ata_interrupt (int irq, void *dev_instance)
  4814. {
  4815. struct ata_host *host = dev_instance;
  4816. unsigned int i;
  4817. unsigned int handled = 0;
  4818. unsigned long flags;
  4819. /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
  4820. spin_lock_irqsave(&host->lock, flags);
  4821. for (i = 0; i < host->n_ports; i++) {
  4822. struct ata_port *ap;
  4823. ap = host->ports[i];
  4824. if (ap &&
  4825. !(ap->flags & ATA_FLAG_DISABLED)) {
  4826. struct ata_queued_cmd *qc;
  4827. qc = ata_qc_from_tag(ap, ap->active_tag);
  4828. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
  4829. (qc->flags & ATA_QCFLAG_ACTIVE))
  4830. handled |= ata_host_intr(ap, qc);
  4831. }
  4832. }
  4833. spin_unlock_irqrestore(&host->lock, flags);
  4834. return IRQ_RETVAL(handled);
  4835. }
  4836. /**
  4837. * sata_scr_valid - test whether SCRs are accessible
  4838. * @ap: ATA port to test SCR accessibility for
  4839. *
  4840. * Test whether SCRs are accessible for @ap.
  4841. *
  4842. * LOCKING:
  4843. * None.
  4844. *
  4845. * RETURNS:
  4846. * 1 if SCRs are accessible, 0 otherwise.
  4847. */
  4848. int sata_scr_valid(struct ata_port *ap)
  4849. {
  4850. return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
  4851. }
  4852. /**
  4853. * sata_scr_read - read SCR register of the specified port
  4854. * @ap: ATA port to read SCR for
  4855. * @reg: SCR to read
  4856. * @val: Place to store read value
  4857. *
  4858. * Read SCR register @reg of @ap into *@val. This function is
  4859. * guaranteed to succeed if the cable type of the port is SATA
  4860. * and the port implements ->scr_read.
  4861. *
  4862. * LOCKING:
  4863. * None.
  4864. *
  4865. * RETURNS:
  4866. * 0 on success, negative errno on failure.
  4867. */
  4868. int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
  4869. {
  4870. if (sata_scr_valid(ap)) {
  4871. *val = ap->ops->scr_read(ap, reg);
  4872. return 0;
  4873. }
  4874. return -EOPNOTSUPP;
  4875. }
  4876. /**
  4877. * sata_scr_write - write SCR register of the specified port
  4878. * @ap: ATA port to write SCR for
  4879. * @reg: SCR to write
  4880. * @val: value to write
  4881. *
  4882. * Write @val to SCR register @reg of @ap. This function is
  4883. * guaranteed to succeed if the cable type of the port is SATA
  4884. * and the port implements ->scr_read.
  4885. *
  4886. * LOCKING:
  4887. * None.
  4888. *
  4889. * RETURNS:
  4890. * 0 on success, negative errno on failure.
  4891. */
  4892. int sata_scr_write(struct ata_port *ap, int reg, u32 val)
  4893. {
  4894. if (sata_scr_valid(ap)) {
  4895. ap->ops->scr_write(ap, reg, val);
  4896. return 0;
  4897. }
  4898. return -EOPNOTSUPP;
  4899. }
  4900. /**
  4901. * sata_scr_write_flush - write SCR register of the specified port and flush
  4902. * @ap: ATA port to write SCR for
  4903. * @reg: SCR to write
  4904. * @val: value to write
  4905. *
  4906. * This function is identical to sata_scr_write() except that this
  4907. * function performs flush after writing to the register.
  4908. *
  4909. * LOCKING:
  4910. * None.
  4911. *
  4912. * RETURNS:
  4913. * 0 on success, negative errno on failure.
  4914. */
  4915. int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
  4916. {
  4917. if (sata_scr_valid(ap)) {
  4918. ap->ops->scr_write(ap, reg, val);
  4919. ap->ops->scr_read(ap, reg);
  4920. return 0;
  4921. }
  4922. return -EOPNOTSUPP;
  4923. }
  4924. /**
  4925. * ata_port_online - test whether the given port is online
  4926. * @ap: ATA port to test
  4927. *
  4928. * Test whether @ap is online. Note that this function returns 0
  4929. * if online status of @ap cannot be obtained, so
  4930. * ata_port_online(ap) != !ata_port_offline(ap).
  4931. *
  4932. * LOCKING:
  4933. * None.
  4934. *
  4935. * RETURNS:
  4936. * 1 if the port online status is available and online.
  4937. */
  4938. int ata_port_online(struct ata_port *ap)
  4939. {
  4940. u32 sstatus;
  4941. if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
  4942. return 1;
  4943. return 0;
  4944. }
  4945. /**
  4946. * ata_port_offline - test whether the given port is offline
  4947. * @ap: ATA port to test
  4948. *
  4949. * Test whether @ap is offline. Note that this function returns
  4950. * 0 if offline status of @ap cannot be obtained, so
  4951. * ata_port_online(ap) != !ata_port_offline(ap).
  4952. *
  4953. * LOCKING:
  4954. * None.
  4955. *
  4956. * RETURNS:
  4957. * 1 if the port offline status is available and offline.
  4958. */
  4959. int ata_port_offline(struct ata_port *ap)
  4960. {
  4961. u32 sstatus;
  4962. if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
  4963. return 1;
  4964. return 0;
  4965. }
  4966. int ata_flush_cache(struct ata_device *dev)
  4967. {
  4968. unsigned int err_mask;
  4969. u8 cmd;
  4970. if (!ata_try_flush_cache(dev))
  4971. return 0;
  4972. if (dev->flags & ATA_DFLAG_FLUSH_EXT)
  4973. cmd = ATA_CMD_FLUSH_EXT;
  4974. else
  4975. cmd = ATA_CMD_FLUSH;
  4976. err_mask = ata_do_simple_cmd(dev, cmd);
  4977. if (err_mask) {
  4978. ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
  4979. return -EIO;
  4980. }
  4981. return 0;
  4982. }
  4983. #ifdef CONFIG_PM
  4984. static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  4985. unsigned int action, unsigned int ehi_flags,
  4986. int wait)
  4987. {
  4988. unsigned long flags;
  4989. int i, rc;
  4990. for (i = 0; i < host->n_ports; i++) {
  4991. struct ata_port *ap = host->ports[i];
  4992. /* Previous resume operation might still be in
  4993. * progress. Wait for PM_PENDING to clear.
  4994. */
  4995. if (ap->pflags & ATA_PFLAG_PM_PENDING) {
  4996. ata_port_wait_eh(ap);
  4997. WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
  4998. }
  4999. /* request PM ops to EH */
  5000. spin_lock_irqsave(ap->lock, flags);
  5001. ap->pm_mesg = mesg;
  5002. if (wait) {
  5003. rc = 0;
  5004. ap->pm_result = &rc;
  5005. }
  5006. ap->pflags |= ATA_PFLAG_PM_PENDING;
  5007. ap->eh_info.action |= action;
  5008. ap->eh_info.flags |= ehi_flags;
  5009. ata_port_schedule_eh(ap);
  5010. spin_unlock_irqrestore(ap->lock, flags);
  5011. /* wait and check result */
  5012. if (wait) {
  5013. ata_port_wait_eh(ap);
  5014. WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
  5015. if (rc)
  5016. return rc;
  5017. }
  5018. }
  5019. return 0;
  5020. }
  5021. /**
  5022. * ata_host_suspend - suspend host
  5023. * @host: host to suspend
  5024. * @mesg: PM message
  5025. *
  5026. * Suspend @host. Actual operation is performed by EH. This
  5027. * function requests EH to perform PM operations and waits for EH
  5028. * to finish.
  5029. *
  5030. * LOCKING:
  5031. * Kernel thread context (may sleep).
  5032. *
  5033. * RETURNS:
  5034. * 0 on success, -errno on failure.
  5035. */
  5036. int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
  5037. {
  5038. int rc;
  5039. rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
  5040. if (rc == 0)
  5041. host->dev->power.power_state = mesg;
  5042. return rc;
  5043. }
  5044. /**
  5045. * ata_host_resume - resume host
  5046. * @host: host to resume
  5047. *
  5048. * Resume @host. Actual operation is performed by EH. This
  5049. * function requests EH to perform PM operations and returns.
  5050. * Note that all resume operations are performed parallely.
  5051. *
  5052. * LOCKING:
  5053. * Kernel thread context (may sleep).
  5054. */
  5055. void ata_host_resume(struct ata_host *host)
  5056. {
  5057. ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
  5058. ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
  5059. host->dev->power.power_state = PMSG_ON;
  5060. }
  5061. #endif
  5062. /**
  5063. * ata_port_start - Set port up for dma.
  5064. * @ap: Port to initialize
  5065. *
  5066. * Called just after data structures for each port are
  5067. * initialized. Allocates space for PRD table.
  5068. *
  5069. * May be used as the port_start() entry in ata_port_operations.
  5070. *
  5071. * LOCKING:
  5072. * Inherited from caller.
  5073. */
  5074. int ata_port_start(struct ata_port *ap)
  5075. {
  5076. struct device *dev = ap->dev;
  5077. int rc;
  5078. ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
  5079. GFP_KERNEL);
  5080. if (!ap->prd)
  5081. return -ENOMEM;
  5082. rc = ata_pad_alloc(ap, dev);
  5083. if (rc)
  5084. return rc;
  5085. DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
  5086. (unsigned long long)ap->prd_dma);
  5087. return 0;
  5088. }
  5089. /**
  5090. * ata_dev_init - Initialize an ata_device structure
  5091. * @dev: Device structure to initialize
  5092. *
  5093. * Initialize @dev in preparation for probing.
  5094. *
  5095. * LOCKING:
  5096. * Inherited from caller.
  5097. */
  5098. void ata_dev_init(struct ata_device *dev)
  5099. {
  5100. struct ata_port *ap = dev->ap;
  5101. unsigned long flags;
  5102. /* SATA spd limit is bound to the first device */
  5103. ap->sata_spd_limit = ap->hw_sata_spd_limit;
  5104. /* High bits of dev->flags are used to record warm plug
  5105. * requests which occur asynchronously. Synchronize using
  5106. * host lock.
  5107. */
  5108. spin_lock_irqsave(ap->lock, flags);
  5109. dev->flags &= ~ATA_DFLAG_INIT_MASK;
  5110. spin_unlock_irqrestore(ap->lock, flags);
  5111. memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
  5112. sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
  5113. dev->pio_mask = UINT_MAX;
  5114. dev->mwdma_mask = UINT_MAX;
  5115. dev->udma_mask = UINT_MAX;
  5116. }
  5117. /**
  5118. * ata_port_alloc - allocate and initialize basic ATA port resources
  5119. * @host: ATA host this allocated port belongs to
  5120. *
  5121. * Allocate and initialize basic ATA port resources.
  5122. *
  5123. * RETURNS:
  5124. * Allocate ATA port on success, NULL on failure.
  5125. *
  5126. * LOCKING:
  5127. * Inherited from calling layer (may sleep).
  5128. */
  5129. struct ata_port *ata_port_alloc(struct ata_host *host)
  5130. {
  5131. struct ata_port *ap;
  5132. unsigned int i;
  5133. DPRINTK("ENTER\n");
  5134. ap = kzalloc(sizeof(*ap), GFP_KERNEL);
  5135. if (!ap)
  5136. return NULL;
  5137. ap->pflags |= ATA_PFLAG_INITIALIZING;
  5138. ap->lock = &host->lock;
  5139. ap->flags = ATA_FLAG_DISABLED;
  5140. ap->print_id = -1;
  5141. ap->ctl = ATA_DEVCTL_OBS;
  5142. ap->host = host;
  5143. ap->dev = host->dev;
  5144. ap->hw_sata_spd_limit = UINT_MAX;
  5145. ap->active_tag = ATA_TAG_POISON;
  5146. ap->last_ctl = 0xFF;
  5147. #if defined(ATA_VERBOSE_DEBUG)
  5148. /* turn on all debugging levels */
  5149. ap->msg_enable = 0x00FF;
  5150. #elif defined(ATA_DEBUG)
  5151. ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
  5152. #else
  5153. ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
  5154. #endif
  5155. INIT_DELAYED_WORK(&ap->port_task, NULL);
  5156. INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
  5157. INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
  5158. INIT_LIST_HEAD(&ap->eh_done_q);
  5159. init_waitqueue_head(&ap->eh_wait_q);
  5160. ap->cbl = ATA_CBL_NONE;
  5161. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  5162. struct ata_device *dev = &ap->device[i];
  5163. dev->ap = ap;
  5164. dev->devno = i;
  5165. ata_dev_init(dev);
  5166. }
  5167. #ifdef ATA_IRQ_TRAP
  5168. ap->stats.unhandled_irq = 1;
  5169. ap->stats.idle_irq = 1;
  5170. #endif
  5171. return ap;
  5172. }
  5173. static void ata_host_release(struct device *gendev, void *res)
  5174. {
  5175. struct ata_host *host = dev_get_drvdata(gendev);
  5176. int i;
  5177. for (i = 0; i < host->n_ports; i++) {
  5178. struct ata_port *ap = host->ports[i];
  5179. if (!ap)
  5180. continue;
  5181. if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
  5182. ap->ops->port_stop(ap);
  5183. }
  5184. if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
  5185. host->ops->host_stop(host);
  5186. for (i = 0; i < host->n_ports; i++) {
  5187. struct ata_port *ap = host->ports[i];
  5188. if (!ap)
  5189. continue;
  5190. if (ap->scsi_host)
  5191. scsi_host_put(ap->scsi_host);
  5192. kfree(ap);
  5193. host->ports[i] = NULL;
  5194. }
  5195. dev_set_drvdata(gendev, NULL);
  5196. }
  5197. /**
  5198. * ata_host_alloc - allocate and init basic ATA host resources
  5199. * @dev: generic device this host is associated with
  5200. * @max_ports: maximum number of ATA ports associated with this host
  5201. *
  5202. * Allocate and initialize basic ATA host resources. LLD calls
  5203. * this function to allocate a host, initializes it fully and
  5204. * attaches it using ata_host_register().
  5205. *
  5206. * @max_ports ports are allocated and host->n_ports is
  5207. * initialized to @max_ports. The caller is allowed to decrease
  5208. * host->n_ports before calling ata_host_register(). The unused
  5209. * ports will be automatically freed on registration.
  5210. *
  5211. * RETURNS:
  5212. * Allocate ATA host on success, NULL on failure.
  5213. *
  5214. * LOCKING:
  5215. * Inherited from calling layer (may sleep).
  5216. */
  5217. struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
  5218. {
  5219. struct ata_host *host;
  5220. size_t sz;
  5221. int i;
  5222. DPRINTK("ENTER\n");
  5223. if (!devres_open_group(dev, NULL, GFP_KERNEL))
  5224. return NULL;
  5225. /* alloc a container for our list of ATA ports (buses) */
  5226. sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
  5227. /* alloc a container for our list of ATA ports (buses) */
  5228. host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
  5229. if (!host)
  5230. goto err_out;
  5231. devres_add(dev, host);
  5232. dev_set_drvdata(dev, host);
  5233. spin_lock_init(&host->lock);
  5234. host->dev = dev;
  5235. host->n_ports = max_ports;
  5236. /* allocate ports bound to this host */
  5237. for (i = 0; i < max_ports; i++) {
  5238. struct ata_port *ap;
  5239. ap = ata_port_alloc(host);
  5240. if (!ap)
  5241. goto err_out;
  5242. ap->port_no = i;
  5243. host->ports[i] = ap;
  5244. }
  5245. devres_remove_group(dev, NULL);
  5246. return host;
  5247. err_out:
  5248. devres_release_group(dev, NULL);
  5249. return NULL;
  5250. }
  5251. /**
  5252. * ata_host_alloc_pinfo - alloc host and init with port_info array
  5253. * @dev: generic device this host is associated with
  5254. * @ppi: array of ATA port_info to initialize host with
  5255. * @n_ports: number of ATA ports attached to this host
  5256. *
  5257. * Allocate ATA host and initialize with info from @ppi. If NULL
  5258. * terminated, @ppi may contain fewer entries than @n_ports. The
  5259. * last entry will be used for the remaining ports.
  5260. *
  5261. * RETURNS:
  5262. * Allocate ATA host on success, NULL on failure.
  5263. *
  5264. * LOCKING:
  5265. * Inherited from calling layer (may sleep).
  5266. */
  5267. struct ata_host *ata_host_alloc_pinfo(struct device *dev,
  5268. const struct ata_port_info * const * ppi,
  5269. int n_ports)
  5270. {
  5271. const struct ata_port_info *pi;
  5272. struct ata_host *host;
  5273. int i, j;
  5274. host = ata_host_alloc(dev, n_ports);
  5275. if (!host)
  5276. return NULL;
  5277. for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
  5278. struct ata_port *ap = host->ports[i];
  5279. if (ppi[j])
  5280. pi = ppi[j++];
  5281. ap->pio_mask = pi->pio_mask;
  5282. ap->mwdma_mask = pi->mwdma_mask;
  5283. ap->udma_mask = pi->udma_mask;
  5284. ap->flags |= pi->flags;
  5285. ap->ops = pi->port_ops;
  5286. if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
  5287. host->ops = pi->port_ops;
  5288. if (!host->private_data && pi->private_data)
  5289. host->private_data = pi->private_data;
  5290. }
  5291. return host;
  5292. }
  5293. /**
  5294. * ata_host_start - start and freeze ports of an ATA host
  5295. * @host: ATA host to start ports for
  5296. *
  5297. * Start and then freeze ports of @host. Started status is
  5298. * recorded in host->flags, so this function can be called
  5299. * multiple times. Ports are guaranteed to get started only
  5300. * once. If host->ops isn't initialized yet, its set to the
  5301. * first non-dummy port ops.
  5302. *
  5303. * LOCKING:
  5304. * Inherited from calling layer (may sleep).
  5305. *
  5306. * RETURNS:
  5307. * 0 if all ports are started successfully, -errno otherwise.
  5308. */
  5309. int ata_host_start(struct ata_host *host)
  5310. {
  5311. int i, rc;
  5312. if (host->flags & ATA_HOST_STARTED)
  5313. return 0;
  5314. for (i = 0; i < host->n_ports; i++) {
  5315. struct ata_port *ap = host->ports[i];
  5316. if (!host->ops && !ata_port_is_dummy(ap))
  5317. host->ops = ap->ops;
  5318. if (ap->ops->port_start) {
  5319. rc = ap->ops->port_start(ap);
  5320. if (rc) {
  5321. ata_port_printk(ap, KERN_ERR, "failed to "
  5322. "start port (errno=%d)\n", rc);
  5323. goto err_out;
  5324. }
  5325. }
  5326. ata_eh_freeze_port(ap);
  5327. }
  5328. host->flags |= ATA_HOST_STARTED;
  5329. return 0;
  5330. err_out:
  5331. while (--i >= 0) {
  5332. struct ata_port *ap = host->ports[i];
  5333. if (ap->ops->port_stop)
  5334. ap->ops->port_stop(ap);
  5335. }
  5336. return rc;
  5337. }
  5338. /**
  5339. * ata_sas_host_init - Initialize a host struct
  5340. * @host: host to initialize
  5341. * @dev: device host is attached to
  5342. * @flags: host flags
  5343. * @ops: port_ops
  5344. *
  5345. * LOCKING:
  5346. * PCI/etc. bus probe sem.
  5347. *
  5348. */
  5349. /* KILLME - the only user left is ipr */
  5350. void ata_host_init(struct ata_host *host, struct device *dev,
  5351. unsigned long flags, const struct ata_port_operations *ops)
  5352. {
  5353. spin_lock_init(&host->lock);
  5354. host->dev = dev;
  5355. host->flags = flags;
  5356. host->ops = ops;
  5357. }
  5358. /**
  5359. * ata_host_register - register initialized ATA host
  5360. * @host: ATA host to register
  5361. * @sht: template for SCSI host
  5362. *
  5363. * Register initialized ATA host. @host is allocated using
  5364. * ata_host_alloc() and fully initialized by LLD. This function
  5365. * starts ports, registers @host with ATA and SCSI layers and
  5366. * probe registered devices.
  5367. *
  5368. * LOCKING:
  5369. * Inherited from calling layer (may sleep).
  5370. *
  5371. * RETURNS:
  5372. * 0 on success, -errno otherwise.
  5373. */
  5374. int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
  5375. {
  5376. int i, rc;
  5377. /* host must have been started */
  5378. if (!(host->flags & ATA_HOST_STARTED)) {
  5379. dev_printk(KERN_ERR, host->dev,
  5380. "BUG: trying to register unstarted host\n");
  5381. WARN_ON(1);
  5382. return -EINVAL;
  5383. }
  5384. /* Blow away unused ports. This happens when LLD can't
  5385. * determine the exact number of ports to allocate at
  5386. * allocation time.
  5387. */
  5388. for (i = host->n_ports; host->ports[i]; i++)
  5389. kfree(host->ports[i]);
  5390. /* give ports names and add SCSI hosts */
  5391. for (i = 0; i < host->n_ports; i++)
  5392. host->ports[i]->print_id = ata_print_id++;
  5393. rc = ata_scsi_add_hosts(host, sht);
  5394. if (rc)
  5395. return rc;
  5396. /* associate with ACPI nodes */
  5397. ata_acpi_associate(host);
  5398. /* set cable, sata_spd_limit and report */
  5399. for (i = 0; i < host->n_ports; i++) {
  5400. struct ata_port *ap = host->ports[i];
  5401. int irq_line;
  5402. u32 scontrol;
  5403. unsigned long xfer_mask;
  5404. /* set SATA cable type if still unset */
  5405. if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
  5406. ap->cbl = ATA_CBL_SATA;
  5407. /* init sata_spd_limit to the current value */
  5408. if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
  5409. int spd = (scontrol >> 4) & 0xf;
  5410. if (spd)
  5411. ap->hw_sata_spd_limit &= (1 << spd) - 1;
  5412. }
  5413. ap->sata_spd_limit = ap->hw_sata_spd_limit;
  5414. /* report the secondary IRQ for second channel legacy */
  5415. irq_line = host->irq;
  5416. if (i == 1 && host->irq2)
  5417. irq_line = host->irq2;
  5418. xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
  5419. ap->udma_mask);
  5420. /* print per-port info to dmesg */
  5421. if (!ata_port_is_dummy(ap))
  5422. ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
  5423. "ctl 0x%p bmdma 0x%p irq %d\n",
  5424. (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
  5425. ata_mode_string(xfer_mask),
  5426. ap->ioaddr.cmd_addr,
  5427. ap->ioaddr.ctl_addr,
  5428. ap->ioaddr.bmdma_addr,
  5429. irq_line);
  5430. else
  5431. ata_port_printk(ap, KERN_INFO, "DUMMY\n");
  5432. }
  5433. /* perform each probe synchronously */
  5434. DPRINTK("probe begin\n");
  5435. for (i = 0; i < host->n_ports; i++) {
  5436. struct ata_port *ap = host->ports[i];
  5437. int rc;
  5438. /* probe */
  5439. if (ap->ops->error_handler) {
  5440. struct ata_eh_info *ehi = &ap->eh_info;
  5441. unsigned long flags;
  5442. ata_port_probe(ap);
  5443. /* kick EH for boot probing */
  5444. spin_lock_irqsave(ap->lock, flags);
  5445. ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
  5446. ehi->action |= ATA_EH_SOFTRESET;
  5447. ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
  5448. ap->pflags &= ~ATA_PFLAG_INITIALIZING;
  5449. ap->pflags |= ATA_PFLAG_LOADING;
  5450. ata_port_schedule_eh(ap);
  5451. spin_unlock_irqrestore(ap->lock, flags);
  5452. /* wait for EH to finish */
  5453. ata_port_wait_eh(ap);
  5454. } else {
  5455. DPRINTK("ata%u: bus probe begin\n", ap->print_id);
  5456. rc = ata_bus_probe(ap);
  5457. DPRINTK("ata%u: bus probe end\n", ap->print_id);
  5458. if (rc) {
  5459. /* FIXME: do something useful here?
  5460. * Current libata behavior will
  5461. * tear down everything when
  5462. * the module is removed
  5463. * or the h/w is unplugged.
  5464. */
  5465. }
  5466. }
  5467. }
  5468. /* probes are done, now scan each port's disk(s) */
  5469. DPRINTK("host probe begin\n");
  5470. for (i = 0; i < host->n_ports; i++) {
  5471. struct ata_port *ap = host->ports[i];
  5472. ata_scsi_scan_host(ap);
  5473. }
  5474. return 0;
  5475. }
  5476. /**
  5477. * ata_host_activate - start host, request IRQ and register it
  5478. * @host: target ATA host
  5479. * @irq: IRQ to request
  5480. * @irq_handler: irq_handler used when requesting IRQ
  5481. * @irq_flags: irq_flags used when requesting IRQ
  5482. * @sht: scsi_host_template to use when registering the host
  5483. *
  5484. * After allocating an ATA host and initializing it, most libata
  5485. * LLDs perform three steps to activate the host - start host,
  5486. * request IRQ and register it. This helper takes necessasry
  5487. * arguments and performs the three steps in one go.
  5488. *
  5489. * LOCKING:
  5490. * Inherited from calling layer (may sleep).
  5491. *
  5492. * RETURNS:
  5493. * 0 on success, -errno otherwise.
  5494. */
  5495. int ata_host_activate(struct ata_host *host, int irq,
  5496. irq_handler_t irq_handler, unsigned long irq_flags,
  5497. struct scsi_host_template *sht)
  5498. {
  5499. int rc;
  5500. rc = ata_host_start(host);
  5501. if (rc)
  5502. return rc;
  5503. rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
  5504. dev_driver_string(host->dev), host);
  5505. if (rc)
  5506. return rc;
  5507. /* Used to print device info at probe */
  5508. host->irq = irq;
  5509. rc = ata_host_register(host, sht);
  5510. /* if failed, just free the IRQ and leave ports alone */
  5511. if (rc)
  5512. devm_free_irq(host->dev, irq, host);
  5513. return rc;
  5514. }
  5515. /**
  5516. * ata_port_detach - Detach ATA port in prepration of device removal
  5517. * @ap: ATA port to be detached
  5518. *
  5519. * Detach all ATA devices and the associated SCSI devices of @ap;
  5520. * then, remove the associated SCSI host. @ap is guaranteed to
  5521. * be quiescent on return from this function.
  5522. *
  5523. * LOCKING:
  5524. * Kernel thread context (may sleep).
  5525. */
  5526. void ata_port_detach(struct ata_port *ap)
  5527. {
  5528. unsigned long flags;
  5529. int i;
  5530. if (!ap->ops->error_handler)
  5531. goto skip_eh;
  5532. /* tell EH we're leaving & flush EH */
  5533. spin_lock_irqsave(ap->lock, flags);
  5534. ap->pflags |= ATA_PFLAG_UNLOADING;
  5535. spin_unlock_irqrestore(ap->lock, flags);
  5536. ata_port_wait_eh(ap);
  5537. /* EH is now guaranteed to see UNLOADING, so no new device
  5538. * will be attached. Disable all existing devices.
  5539. */
  5540. spin_lock_irqsave(ap->lock, flags);
  5541. for (i = 0; i < ATA_MAX_DEVICES; i++)
  5542. ata_dev_disable(&ap->device[i]);
  5543. spin_unlock_irqrestore(ap->lock, flags);
  5544. /* Final freeze & EH. All in-flight commands are aborted. EH
  5545. * will be skipped and retrials will be terminated with bad
  5546. * target.
  5547. */
  5548. spin_lock_irqsave(ap->lock, flags);
  5549. ata_port_freeze(ap); /* won't be thawed */
  5550. spin_unlock_irqrestore(ap->lock, flags);
  5551. ata_port_wait_eh(ap);
  5552. cancel_rearming_delayed_work(&ap->hotplug_task);
  5553. skip_eh:
  5554. /* remove the associated SCSI host */
  5555. scsi_remove_host(ap->scsi_host);
  5556. }
  5557. /**
  5558. * ata_host_detach - Detach all ports of an ATA host
  5559. * @host: Host to detach
  5560. *
  5561. * Detach all ports of @host.
  5562. *
  5563. * LOCKING:
  5564. * Kernel thread context (may sleep).
  5565. */
  5566. void ata_host_detach(struct ata_host *host)
  5567. {
  5568. int i;
  5569. for (i = 0; i < host->n_ports; i++)
  5570. ata_port_detach(host->ports[i]);
  5571. }
  5572. /**
  5573. * ata_std_ports - initialize ioaddr with standard port offsets.
  5574. * @ioaddr: IO address structure to be initialized
  5575. *
  5576. * Utility function which initializes data_addr, error_addr,
  5577. * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
  5578. * device_addr, status_addr, and command_addr to standard offsets
  5579. * relative to cmd_addr.
  5580. *
  5581. * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
  5582. */
  5583. void ata_std_ports(struct ata_ioports *ioaddr)
  5584. {
  5585. ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
  5586. ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
  5587. ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
  5588. ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
  5589. ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
  5590. ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
  5591. ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
  5592. ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
  5593. ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
  5594. ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
  5595. }
  5596. #ifdef CONFIG_PCI
  5597. /**
  5598. * ata_pci_remove_one - PCI layer callback for device removal
  5599. * @pdev: PCI device that was removed
  5600. *
  5601. * PCI layer indicates to libata via this hook that hot-unplug or
  5602. * module unload event has occurred. Detach all ports. Resource
  5603. * release is handled via devres.
  5604. *
  5605. * LOCKING:
  5606. * Inherited from PCI layer (may sleep).
  5607. */
  5608. void ata_pci_remove_one(struct pci_dev *pdev)
  5609. {
  5610. struct device *dev = pci_dev_to_dev(pdev);
  5611. struct ata_host *host = dev_get_drvdata(dev);
  5612. ata_host_detach(host);
  5613. }
  5614. /* move to PCI subsystem */
  5615. int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
  5616. {
  5617. unsigned long tmp = 0;
  5618. switch (bits->width) {
  5619. case 1: {
  5620. u8 tmp8 = 0;
  5621. pci_read_config_byte(pdev, bits->reg, &tmp8);
  5622. tmp = tmp8;
  5623. break;
  5624. }
  5625. case 2: {
  5626. u16 tmp16 = 0;
  5627. pci_read_config_word(pdev, bits->reg, &tmp16);
  5628. tmp = tmp16;
  5629. break;
  5630. }
  5631. case 4: {
  5632. u32 tmp32 = 0;
  5633. pci_read_config_dword(pdev, bits->reg, &tmp32);
  5634. tmp = tmp32;
  5635. break;
  5636. }
  5637. default:
  5638. return -EINVAL;
  5639. }
  5640. tmp &= bits->mask;
  5641. return (tmp == bits->val) ? 1 : 0;
  5642. }
  5643. #ifdef CONFIG_PM
  5644. void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
  5645. {
  5646. pci_save_state(pdev);
  5647. pci_disable_device(pdev);
  5648. if (mesg.event == PM_EVENT_SUSPEND)
  5649. pci_set_power_state(pdev, PCI_D3hot);
  5650. }
  5651. int ata_pci_device_do_resume(struct pci_dev *pdev)
  5652. {
  5653. int rc;
  5654. pci_set_power_state(pdev, PCI_D0);
  5655. pci_restore_state(pdev);
  5656. rc = pcim_enable_device(pdev);
  5657. if (rc) {
  5658. dev_printk(KERN_ERR, &pdev->dev,
  5659. "failed to enable device after resume (%d)\n", rc);
  5660. return rc;
  5661. }
  5662. pci_set_master(pdev);
  5663. return 0;
  5664. }
  5665. int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  5666. {
  5667. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  5668. int rc = 0;
  5669. rc = ata_host_suspend(host, mesg);
  5670. if (rc)
  5671. return rc;
  5672. ata_pci_device_do_suspend(pdev, mesg);
  5673. return 0;
  5674. }
  5675. int ata_pci_device_resume(struct pci_dev *pdev)
  5676. {
  5677. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  5678. int rc;
  5679. rc = ata_pci_device_do_resume(pdev);
  5680. if (rc == 0)
  5681. ata_host_resume(host);
  5682. return rc;
  5683. }
  5684. #endif /* CONFIG_PM */
  5685. #endif /* CONFIG_PCI */
  5686. static int __init ata_init(void)
  5687. {
  5688. ata_probe_timeout *= HZ;
  5689. ata_wq = create_workqueue("ata");
  5690. if (!ata_wq)
  5691. return -ENOMEM;
  5692. ata_aux_wq = create_singlethread_workqueue("ata_aux");
  5693. if (!ata_aux_wq) {
  5694. destroy_workqueue(ata_wq);
  5695. return -ENOMEM;
  5696. }
  5697. printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
  5698. return 0;
  5699. }
  5700. static void __exit ata_exit(void)
  5701. {
  5702. destroy_workqueue(ata_wq);
  5703. destroy_workqueue(ata_aux_wq);
  5704. }
  5705. subsys_initcall(ata_init);
  5706. module_exit(ata_exit);
  5707. static unsigned long ratelimit_time;
  5708. static DEFINE_SPINLOCK(ata_ratelimit_lock);
  5709. int ata_ratelimit(void)
  5710. {
  5711. int rc;
  5712. unsigned long flags;
  5713. spin_lock_irqsave(&ata_ratelimit_lock, flags);
  5714. if (time_after(jiffies, ratelimit_time)) {
  5715. rc = 1;
  5716. ratelimit_time = jiffies + (HZ/5);
  5717. } else
  5718. rc = 0;
  5719. spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
  5720. return rc;
  5721. }
  5722. /**
  5723. * ata_wait_register - wait until register value changes
  5724. * @reg: IO-mapped register
  5725. * @mask: Mask to apply to read register value
  5726. * @val: Wait condition
  5727. * @interval_msec: polling interval in milliseconds
  5728. * @timeout_msec: timeout in milliseconds
  5729. *
  5730. * Waiting for some bits of register to change is a common
  5731. * operation for ATA controllers. This function reads 32bit LE
  5732. * IO-mapped register @reg and tests for the following condition.
  5733. *
  5734. * (*@reg & mask) != val
  5735. *
  5736. * If the condition is met, it returns; otherwise, the process is
  5737. * repeated after @interval_msec until timeout.
  5738. *
  5739. * LOCKING:
  5740. * Kernel thread context (may sleep)
  5741. *
  5742. * RETURNS:
  5743. * The final register value.
  5744. */
  5745. u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
  5746. unsigned long interval_msec,
  5747. unsigned long timeout_msec)
  5748. {
  5749. unsigned long timeout;
  5750. u32 tmp;
  5751. tmp = ioread32(reg);
  5752. /* Calculate timeout _after_ the first read to make sure
  5753. * preceding writes reach the controller before starting to
  5754. * eat away the timeout.
  5755. */
  5756. timeout = jiffies + (timeout_msec * HZ) / 1000;
  5757. while ((tmp & mask) == val && time_before(jiffies, timeout)) {
  5758. msleep(interval_msec);
  5759. tmp = ioread32(reg);
  5760. }
  5761. return tmp;
  5762. }
  5763. /*
  5764. * Dummy port_ops
  5765. */
  5766. static void ata_dummy_noret(struct ata_port *ap) { }
  5767. static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
  5768. static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
  5769. static u8 ata_dummy_check_status(struct ata_port *ap)
  5770. {
  5771. return ATA_DRDY;
  5772. }
  5773. static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
  5774. {
  5775. return AC_ERR_SYSTEM;
  5776. }
  5777. const struct ata_port_operations ata_dummy_port_ops = {
  5778. .port_disable = ata_port_disable,
  5779. .check_status = ata_dummy_check_status,
  5780. .check_altstatus = ata_dummy_check_status,
  5781. .dev_select = ata_noop_dev_select,
  5782. .qc_prep = ata_noop_qc_prep,
  5783. .qc_issue = ata_dummy_qc_issue,
  5784. .freeze = ata_dummy_noret,
  5785. .thaw = ata_dummy_noret,
  5786. .error_handler = ata_dummy_noret,
  5787. .post_internal_cmd = ata_dummy_qc_noret,
  5788. .irq_clear = ata_dummy_noret,
  5789. .port_start = ata_dummy_ret0,
  5790. .port_stop = ata_dummy_noret,
  5791. };
  5792. const struct ata_port_info ata_dummy_port_info = {
  5793. .port_ops = &ata_dummy_port_ops,
  5794. };
  5795. /*
  5796. * libata is essentially a library of internal helper functions for
  5797. * low-level ATA host controller drivers. As such, the API/ABI is
  5798. * likely to change as new drivers are added and updated.
  5799. * Do not depend on ABI/API stability.
  5800. */
  5801. EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
  5802. EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
  5803. EXPORT_SYMBOL_GPL(sata_deb_timing_long);
  5804. EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
  5805. EXPORT_SYMBOL_GPL(ata_dummy_port_info);
  5806. EXPORT_SYMBOL_GPL(ata_std_bios_param);
  5807. EXPORT_SYMBOL_GPL(ata_std_ports);
  5808. EXPORT_SYMBOL_GPL(ata_host_init);
  5809. EXPORT_SYMBOL_GPL(ata_host_alloc);
  5810. EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
  5811. EXPORT_SYMBOL_GPL(ata_host_start);
  5812. EXPORT_SYMBOL_GPL(ata_host_register);
  5813. EXPORT_SYMBOL_GPL(ata_host_activate);
  5814. EXPORT_SYMBOL_GPL(ata_host_detach);
  5815. EXPORT_SYMBOL_GPL(ata_sg_init);
  5816. EXPORT_SYMBOL_GPL(ata_sg_init_one);
  5817. EXPORT_SYMBOL_GPL(ata_hsm_move);
  5818. EXPORT_SYMBOL_GPL(ata_qc_complete);
  5819. EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
  5820. EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
  5821. EXPORT_SYMBOL_GPL(ata_tf_load);
  5822. EXPORT_SYMBOL_GPL(ata_tf_read);
  5823. EXPORT_SYMBOL_GPL(ata_noop_dev_select);
  5824. EXPORT_SYMBOL_GPL(ata_std_dev_select);
  5825. EXPORT_SYMBOL_GPL(sata_print_link_status);
  5826. EXPORT_SYMBOL_GPL(ata_tf_to_fis);
  5827. EXPORT_SYMBOL_GPL(ata_tf_from_fis);
  5828. EXPORT_SYMBOL_GPL(ata_check_status);
  5829. EXPORT_SYMBOL_GPL(ata_altstatus);
  5830. EXPORT_SYMBOL_GPL(ata_exec_command);
  5831. EXPORT_SYMBOL_GPL(ata_port_start);
  5832. EXPORT_SYMBOL_GPL(ata_sff_port_start);
  5833. EXPORT_SYMBOL_GPL(ata_interrupt);
  5834. EXPORT_SYMBOL_GPL(ata_do_set_mode);
  5835. EXPORT_SYMBOL_GPL(ata_data_xfer);
  5836. EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
  5837. EXPORT_SYMBOL_GPL(ata_qc_prep);
  5838. EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
  5839. EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
  5840. EXPORT_SYMBOL_GPL(ata_bmdma_setup);
  5841. EXPORT_SYMBOL_GPL(ata_bmdma_start);
  5842. EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
  5843. EXPORT_SYMBOL_GPL(ata_bmdma_status);
  5844. EXPORT_SYMBOL_GPL(ata_bmdma_stop);
  5845. EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
  5846. EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
  5847. EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
  5848. EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
  5849. EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
  5850. EXPORT_SYMBOL_GPL(ata_port_probe);
  5851. EXPORT_SYMBOL_GPL(ata_dev_disable);
  5852. EXPORT_SYMBOL_GPL(sata_set_spd);
  5853. EXPORT_SYMBOL_GPL(sata_phy_debounce);
  5854. EXPORT_SYMBOL_GPL(sata_phy_resume);
  5855. EXPORT_SYMBOL_GPL(sata_phy_reset);
  5856. EXPORT_SYMBOL_GPL(__sata_phy_reset);
  5857. EXPORT_SYMBOL_GPL(ata_bus_reset);
  5858. EXPORT_SYMBOL_GPL(ata_std_prereset);
  5859. EXPORT_SYMBOL_GPL(ata_std_softreset);
  5860. EXPORT_SYMBOL_GPL(sata_port_hardreset);
  5861. EXPORT_SYMBOL_GPL(sata_std_hardreset);
  5862. EXPORT_SYMBOL_GPL(ata_std_postreset);
  5863. EXPORT_SYMBOL_GPL(ata_dev_classify);
  5864. EXPORT_SYMBOL_GPL(ata_dev_pair);
  5865. EXPORT_SYMBOL_GPL(ata_port_disable);
  5866. EXPORT_SYMBOL_GPL(ata_ratelimit);
  5867. EXPORT_SYMBOL_GPL(ata_wait_register);
  5868. EXPORT_SYMBOL_GPL(ata_busy_sleep);
  5869. EXPORT_SYMBOL_GPL(ata_wait_ready);
  5870. EXPORT_SYMBOL_GPL(ata_port_queue_task);
  5871. EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
  5872. EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
  5873. EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
  5874. EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
  5875. EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
  5876. EXPORT_SYMBOL_GPL(ata_host_intr);
  5877. EXPORT_SYMBOL_GPL(sata_scr_valid);
  5878. EXPORT_SYMBOL_GPL(sata_scr_read);
  5879. EXPORT_SYMBOL_GPL(sata_scr_write);
  5880. EXPORT_SYMBOL_GPL(sata_scr_write_flush);
  5881. EXPORT_SYMBOL_GPL(ata_port_online);
  5882. EXPORT_SYMBOL_GPL(ata_port_offline);
  5883. #ifdef CONFIG_PM
  5884. EXPORT_SYMBOL_GPL(ata_host_suspend);
  5885. EXPORT_SYMBOL_GPL(ata_host_resume);
  5886. #endif /* CONFIG_PM */
  5887. EXPORT_SYMBOL_GPL(ata_id_string);
  5888. EXPORT_SYMBOL_GPL(ata_id_c_string);
  5889. EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
  5890. EXPORT_SYMBOL_GPL(ata_device_blacklisted);
  5891. EXPORT_SYMBOL_GPL(ata_scsi_simulate);
  5892. EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
  5893. EXPORT_SYMBOL_GPL(ata_timing_compute);
  5894. EXPORT_SYMBOL_GPL(ata_timing_merge);
  5895. #ifdef CONFIG_PCI
  5896. EXPORT_SYMBOL_GPL(pci_test_config_bits);
  5897. EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
  5898. EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
  5899. EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
  5900. EXPORT_SYMBOL_GPL(ata_pci_init_one);
  5901. EXPORT_SYMBOL_GPL(ata_pci_remove_one);
  5902. #ifdef CONFIG_PM
  5903. EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
  5904. EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
  5905. EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
  5906. EXPORT_SYMBOL_GPL(ata_pci_device_resume);
  5907. #endif /* CONFIG_PM */
  5908. EXPORT_SYMBOL_GPL(ata_pci_default_filter);
  5909. EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
  5910. #endif /* CONFIG_PCI */
  5911. EXPORT_SYMBOL_GPL(ata_eng_timeout);
  5912. EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
  5913. EXPORT_SYMBOL_GPL(ata_port_abort);
  5914. EXPORT_SYMBOL_GPL(ata_port_freeze);
  5915. EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
  5916. EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
  5917. EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
  5918. EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
  5919. EXPORT_SYMBOL_GPL(ata_do_eh);
  5920. EXPORT_SYMBOL_GPL(ata_irq_on);
  5921. EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
  5922. EXPORT_SYMBOL_GPL(ata_irq_ack);
  5923. EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
  5924. EXPORT_SYMBOL_GPL(ata_dev_try_classify);
  5925. EXPORT_SYMBOL_GPL(ata_cable_40wire);
  5926. EXPORT_SYMBOL_GPL(ata_cable_80wire);
  5927. EXPORT_SYMBOL_GPL(ata_cable_unknown);
  5928. EXPORT_SYMBOL_GPL(ata_cable_sata);