libata-core.c 193 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735
  1. /*
  2. * libata-core.c - helper library for ATA
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
  9. * Copyright 2003-2004 Jeff Garzik
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. *
  27. * libata documentation is available via 'make {ps|pdf}docs',
  28. * as Documentation/DocBook/libata.*
  29. *
  30. * Hardware documentation available from http://www.t13.org/ and
  31. * http://www.sata-io.org/
  32. *
  33. * Standards documents from:
  34. * http://www.t13.org (ATA standards, PCI DMA IDE spec)
  35. * http://www.t10.org (SCSI MMC - for ATAPI MMC)
  36. * http://www.sata-io.org (SATA)
  37. * http://www.compactflash.org (CF)
  38. * http://www.qic.org (QIC157 - Tape and DSC)
  39. * http://www.ce-ata.org (CE-ATA: not supported)
  40. *
  41. */
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/pci.h>
  45. #include <linux/init.h>
  46. #include <linux/list.h>
  47. #include <linux/mm.h>
  48. #include <linux/highmem.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/blkdev.h>
  51. #include <linux/delay.h>
  52. #include <linux/timer.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/completion.h>
  55. #include <linux/suspend.h>
  56. #include <linux/workqueue.h>
  57. #include <linux/jiffies.h>
  58. #include <linux/scatterlist.h>
  59. #include <linux/io.h>
  60. #include <scsi/scsi.h>
  61. #include <scsi/scsi_cmnd.h>
  62. #include <scsi/scsi_host.h>
  63. #include <linux/libata.h>
  64. #include <asm/semaphore.h>
  65. #include <asm/byteorder.h>
  66. #include <linux/cdrom.h>
  67. #include "libata.h"
  68. /* debounce timing parameters in msecs { interval, duration, timeout } */
  69. const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
  70. const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
  71. const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
  72. static unsigned int ata_dev_init_params(struct ata_device *dev,
  73. u16 heads, u16 sectors);
  74. static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  75. static unsigned int ata_dev_set_feature(struct ata_device *dev,
  76. u8 enable, u8 feature);
  77. static void ata_dev_xfermask(struct ata_device *dev);
  78. static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
  79. unsigned int ata_print_id = 1;
  80. static struct workqueue_struct *ata_wq;
  81. struct workqueue_struct *ata_aux_wq;
  82. int atapi_enabled = 1;
  83. module_param(atapi_enabled, int, 0444);
  84. MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
  85. int atapi_dmadir = 0;
  86. module_param(atapi_dmadir, int, 0444);
  87. MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
  88. int atapi_passthru16 = 1;
  89. module_param(atapi_passthru16, int, 0444);
  90. MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
  91. int libata_fua = 0;
  92. module_param_named(fua, libata_fua, int, 0444);
  93. MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
  94. static int ata_ignore_hpa;
  95. module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
  96. MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
  97. static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
  98. module_param_named(dma, libata_dma_mask, int, 0444);
  99. MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
  100. static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
  101. module_param(ata_probe_timeout, int, 0444);
  102. MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
  103. int libata_noacpi = 0;
  104. module_param_named(noacpi, libata_noacpi, int, 0444);
  105. MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
  106. int libata_allow_tpm = 0;
  107. module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
  108. MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
  109. MODULE_AUTHOR("Jeff Garzik");
  110. MODULE_DESCRIPTION("Library module for ATA devices");
  111. MODULE_LICENSE("GPL");
  112. MODULE_VERSION(DRV_VERSION);
  113. /**
  114. * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
  115. * @tf: Taskfile to convert
  116. * @pmp: Port multiplier port
  117. * @is_cmd: This FIS is for command
  118. * @fis: Buffer into which data will output
  119. *
  120. * Converts a standard ATA taskfile to a Serial ATA
  121. * FIS structure (Register - Host to Device).
  122. *
  123. * LOCKING:
  124. * Inherited from caller.
  125. */
  126. void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
  127. {
  128. fis[0] = 0x27; /* Register - Host to Device FIS */
  129. fis[1] = pmp & 0xf; /* Port multiplier number*/
  130. if (is_cmd)
  131. fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
  132. fis[2] = tf->command;
  133. fis[3] = tf->feature;
  134. fis[4] = tf->lbal;
  135. fis[5] = tf->lbam;
  136. fis[6] = tf->lbah;
  137. fis[7] = tf->device;
  138. fis[8] = tf->hob_lbal;
  139. fis[9] = tf->hob_lbam;
  140. fis[10] = tf->hob_lbah;
  141. fis[11] = tf->hob_feature;
  142. fis[12] = tf->nsect;
  143. fis[13] = tf->hob_nsect;
  144. fis[14] = 0;
  145. fis[15] = tf->ctl;
  146. fis[16] = 0;
  147. fis[17] = 0;
  148. fis[18] = 0;
  149. fis[19] = 0;
  150. }
  151. /**
  152. * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
  153. * @fis: Buffer from which data will be input
  154. * @tf: Taskfile to output
  155. *
  156. * Converts a serial ATA FIS structure to a standard ATA taskfile.
  157. *
  158. * LOCKING:
  159. * Inherited from caller.
  160. */
  161. void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
  162. {
  163. tf->command = fis[2]; /* status */
  164. tf->feature = fis[3]; /* error */
  165. tf->lbal = fis[4];
  166. tf->lbam = fis[5];
  167. tf->lbah = fis[6];
  168. tf->device = fis[7];
  169. tf->hob_lbal = fis[8];
  170. tf->hob_lbam = fis[9];
  171. tf->hob_lbah = fis[10];
  172. tf->nsect = fis[12];
  173. tf->hob_nsect = fis[13];
  174. }
  175. static const u8 ata_rw_cmds[] = {
  176. /* pio multi */
  177. ATA_CMD_READ_MULTI,
  178. ATA_CMD_WRITE_MULTI,
  179. ATA_CMD_READ_MULTI_EXT,
  180. ATA_CMD_WRITE_MULTI_EXT,
  181. 0,
  182. 0,
  183. 0,
  184. ATA_CMD_WRITE_MULTI_FUA_EXT,
  185. /* pio */
  186. ATA_CMD_PIO_READ,
  187. ATA_CMD_PIO_WRITE,
  188. ATA_CMD_PIO_READ_EXT,
  189. ATA_CMD_PIO_WRITE_EXT,
  190. 0,
  191. 0,
  192. 0,
  193. 0,
  194. /* dma */
  195. ATA_CMD_READ,
  196. ATA_CMD_WRITE,
  197. ATA_CMD_READ_EXT,
  198. ATA_CMD_WRITE_EXT,
  199. 0,
  200. 0,
  201. 0,
  202. ATA_CMD_WRITE_FUA_EXT
  203. };
  204. /**
  205. * ata_rwcmd_protocol - set taskfile r/w commands and protocol
  206. * @tf: command to examine and configure
  207. * @dev: device tf belongs to
  208. *
  209. * Examine the device configuration and tf->flags to calculate
  210. * the proper read/write commands and protocol to use.
  211. *
  212. * LOCKING:
  213. * caller.
  214. */
  215. static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  216. {
  217. u8 cmd;
  218. int index, fua, lba48, write;
  219. fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
  220. lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
  221. write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
  222. if (dev->flags & ATA_DFLAG_PIO) {
  223. tf->protocol = ATA_PROT_PIO;
  224. index = dev->multi_count ? 0 : 8;
  225. } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
  226. /* Unable to use DMA due to host limitation */
  227. tf->protocol = ATA_PROT_PIO;
  228. index = dev->multi_count ? 0 : 8;
  229. } else {
  230. tf->protocol = ATA_PROT_DMA;
  231. index = 16;
  232. }
  233. cmd = ata_rw_cmds[index + fua + lba48 + write];
  234. if (cmd) {
  235. tf->command = cmd;
  236. return 0;
  237. }
  238. return -1;
  239. }
  240. /**
  241. * ata_tf_read_block - Read block address from ATA taskfile
  242. * @tf: ATA taskfile of interest
  243. * @dev: ATA device @tf belongs to
  244. *
  245. * LOCKING:
  246. * None.
  247. *
  248. * Read block address from @tf. This function can handle all
  249. * three address formats - LBA, LBA48 and CHS. tf->protocol and
  250. * flags select the address format to use.
  251. *
  252. * RETURNS:
  253. * Block address read from @tf.
  254. */
  255. u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
  256. {
  257. u64 block = 0;
  258. if (tf->flags & ATA_TFLAG_LBA) {
  259. if (tf->flags & ATA_TFLAG_LBA48) {
  260. block |= (u64)tf->hob_lbah << 40;
  261. block |= (u64)tf->hob_lbam << 32;
  262. block |= tf->hob_lbal << 24;
  263. } else
  264. block |= (tf->device & 0xf) << 24;
  265. block |= tf->lbah << 16;
  266. block |= tf->lbam << 8;
  267. block |= tf->lbal;
  268. } else {
  269. u32 cyl, head, sect;
  270. cyl = tf->lbam | (tf->lbah << 8);
  271. head = tf->device & 0xf;
  272. sect = tf->lbal;
  273. block = (cyl * dev->heads + head) * dev->sectors + sect;
  274. }
  275. return block;
  276. }
  277. /**
  278. * ata_build_rw_tf - Build ATA taskfile for given read/write request
  279. * @tf: Target ATA taskfile
  280. * @dev: ATA device @tf belongs to
  281. * @block: Block address
  282. * @n_block: Number of blocks
  283. * @tf_flags: RW/FUA etc...
  284. * @tag: tag
  285. *
  286. * LOCKING:
  287. * None.
  288. *
  289. * Build ATA taskfile @tf for read/write request described by
  290. * @block, @n_block, @tf_flags and @tag on @dev.
  291. *
  292. * RETURNS:
  293. *
  294. * 0 on success, -ERANGE if the request is too large for @dev,
  295. * -EINVAL if the request is invalid.
  296. */
  297. int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
  298. u64 block, u32 n_block, unsigned int tf_flags,
  299. unsigned int tag)
  300. {
  301. tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  302. tf->flags |= tf_flags;
  303. if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
  304. /* yay, NCQ */
  305. if (!lba_48_ok(block, n_block))
  306. return -ERANGE;
  307. tf->protocol = ATA_PROT_NCQ;
  308. tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
  309. if (tf->flags & ATA_TFLAG_WRITE)
  310. tf->command = ATA_CMD_FPDMA_WRITE;
  311. else
  312. tf->command = ATA_CMD_FPDMA_READ;
  313. tf->nsect = tag << 3;
  314. tf->hob_feature = (n_block >> 8) & 0xff;
  315. tf->feature = n_block & 0xff;
  316. tf->hob_lbah = (block >> 40) & 0xff;
  317. tf->hob_lbam = (block >> 32) & 0xff;
  318. tf->hob_lbal = (block >> 24) & 0xff;
  319. tf->lbah = (block >> 16) & 0xff;
  320. tf->lbam = (block >> 8) & 0xff;
  321. tf->lbal = block & 0xff;
  322. tf->device = 1 << 6;
  323. if (tf->flags & ATA_TFLAG_FUA)
  324. tf->device |= 1 << 7;
  325. } else if (dev->flags & ATA_DFLAG_LBA) {
  326. tf->flags |= ATA_TFLAG_LBA;
  327. if (lba_28_ok(block, n_block)) {
  328. /* use LBA28 */
  329. tf->device |= (block >> 24) & 0xf;
  330. } else if (lba_48_ok(block, n_block)) {
  331. if (!(dev->flags & ATA_DFLAG_LBA48))
  332. return -ERANGE;
  333. /* use LBA48 */
  334. tf->flags |= ATA_TFLAG_LBA48;
  335. tf->hob_nsect = (n_block >> 8) & 0xff;
  336. tf->hob_lbah = (block >> 40) & 0xff;
  337. tf->hob_lbam = (block >> 32) & 0xff;
  338. tf->hob_lbal = (block >> 24) & 0xff;
  339. } else
  340. /* request too large even for LBA48 */
  341. return -ERANGE;
  342. if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
  343. return -EINVAL;
  344. tf->nsect = n_block & 0xff;
  345. tf->lbah = (block >> 16) & 0xff;
  346. tf->lbam = (block >> 8) & 0xff;
  347. tf->lbal = block & 0xff;
  348. tf->device |= ATA_LBA;
  349. } else {
  350. /* CHS */
  351. u32 sect, head, cyl, track;
  352. /* The request -may- be too large for CHS addressing. */
  353. if (!lba_28_ok(block, n_block))
  354. return -ERANGE;
  355. if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
  356. return -EINVAL;
  357. /* Convert LBA to CHS */
  358. track = (u32)block / dev->sectors;
  359. cyl = track / dev->heads;
  360. head = track % dev->heads;
  361. sect = (u32)block % dev->sectors + 1;
  362. DPRINTK("block %u track %u cyl %u head %u sect %u\n",
  363. (u32)block, track, cyl, head, sect);
  364. /* Check whether the converted CHS can fit.
  365. Cylinder: 0-65535
  366. Head: 0-15
  367. Sector: 1-255*/
  368. if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
  369. return -ERANGE;
  370. tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
  371. tf->lbal = sect;
  372. tf->lbam = cyl;
  373. tf->lbah = cyl >> 8;
  374. tf->device |= head;
  375. }
  376. return 0;
  377. }
  378. /**
  379. * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
  380. * @pio_mask: pio_mask
  381. * @mwdma_mask: mwdma_mask
  382. * @udma_mask: udma_mask
  383. *
  384. * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
  385. * unsigned int xfer_mask.
  386. *
  387. * LOCKING:
  388. * None.
  389. *
  390. * RETURNS:
  391. * Packed xfer_mask.
  392. */
  393. static unsigned int ata_pack_xfermask(unsigned int pio_mask,
  394. unsigned int mwdma_mask,
  395. unsigned int udma_mask)
  396. {
  397. return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
  398. ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
  399. ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
  400. }
  401. /**
  402. * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
  403. * @xfer_mask: xfer_mask to unpack
  404. * @pio_mask: resulting pio_mask
  405. * @mwdma_mask: resulting mwdma_mask
  406. * @udma_mask: resulting udma_mask
  407. *
  408. * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
  409. * Any NULL distination masks will be ignored.
  410. */
  411. static void ata_unpack_xfermask(unsigned int xfer_mask,
  412. unsigned int *pio_mask,
  413. unsigned int *mwdma_mask,
  414. unsigned int *udma_mask)
  415. {
  416. if (pio_mask)
  417. *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
  418. if (mwdma_mask)
  419. *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
  420. if (udma_mask)
  421. *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
  422. }
  423. static const struct ata_xfer_ent {
  424. int shift, bits;
  425. u8 base;
  426. } ata_xfer_tbl[] = {
  427. { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
  428. { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
  429. { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
  430. { -1, },
  431. };
  432. /**
  433. * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
  434. * @xfer_mask: xfer_mask of interest
  435. *
  436. * Return matching XFER_* value for @xfer_mask. Only the highest
  437. * bit of @xfer_mask is considered.
  438. *
  439. * LOCKING:
  440. * None.
  441. *
  442. * RETURNS:
  443. * Matching XFER_* value, 0 if no match found.
  444. */
  445. static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
  446. {
  447. int highbit = fls(xfer_mask) - 1;
  448. const struct ata_xfer_ent *ent;
  449. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  450. if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
  451. return ent->base + highbit - ent->shift;
  452. return 0;
  453. }
  454. /**
  455. * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
  456. * @xfer_mode: XFER_* of interest
  457. *
  458. * Return matching xfer_mask for @xfer_mode.
  459. *
  460. * LOCKING:
  461. * None.
  462. *
  463. * RETURNS:
  464. * Matching xfer_mask, 0 if no match found.
  465. */
  466. static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
  467. {
  468. const struct ata_xfer_ent *ent;
  469. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  470. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  471. return 1 << (ent->shift + xfer_mode - ent->base);
  472. return 0;
  473. }
  474. /**
  475. * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
  476. * @xfer_mode: XFER_* of interest
  477. *
  478. * Return matching xfer_shift for @xfer_mode.
  479. *
  480. * LOCKING:
  481. * None.
  482. *
  483. * RETURNS:
  484. * Matching xfer_shift, -1 if no match found.
  485. */
  486. static int ata_xfer_mode2shift(unsigned int xfer_mode)
  487. {
  488. const struct ata_xfer_ent *ent;
  489. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  490. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  491. return ent->shift;
  492. return -1;
  493. }
  494. /**
  495. * ata_mode_string - convert xfer_mask to string
  496. * @xfer_mask: mask of bits supported; only highest bit counts.
  497. *
  498. * Determine string which represents the highest speed
  499. * (highest bit in @modemask).
  500. *
  501. * LOCKING:
  502. * None.
  503. *
  504. * RETURNS:
  505. * Constant C string representing highest speed listed in
  506. * @mode_mask, or the constant C string "<n/a>".
  507. */
  508. static const char *ata_mode_string(unsigned int xfer_mask)
  509. {
  510. static const char * const xfer_mode_str[] = {
  511. "PIO0",
  512. "PIO1",
  513. "PIO2",
  514. "PIO3",
  515. "PIO4",
  516. "PIO5",
  517. "PIO6",
  518. "MWDMA0",
  519. "MWDMA1",
  520. "MWDMA2",
  521. "MWDMA3",
  522. "MWDMA4",
  523. "UDMA/16",
  524. "UDMA/25",
  525. "UDMA/33",
  526. "UDMA/44",
  527. "UDMA/66",
  528. "UDMA/100",
  529. "UDMA/133",
  530. "UDMA7",
  531. };
  532. int highbit;
  533. highbit = fls(xfer_mask) - 1;
  534. if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
  535. return xfer_mode_str[highbit];
  536. return "<n/a>";
  537. }
  538. static const char *sata_spd_string(unsigned int spd)
  539. {
  540. static const char * const spd_str[] = {
  541. "1.5 Gbps",
  542. "3.0 Gbps",
  543. };
  544. if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
  545. return "<unknown>";
  546. return spd_str[spd - 1];
  547. }
  548. void ata_dev_disable(struct ata_device *dev)
  549. {
  550. if (ata_dev_enabled(dev)) {
  551. if (ata_msg_drv(dev->link->ap))
  552. ata_dev_printk(dev, KERN_WARNING, "disabled\n");
  553. ata_acpi_on_disable(dev);
  554. ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
  555. ATA_DNXFER_QUIET);
  556. dev->class++;
  557. }
  558. }
  559. static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
  560. {
  561. struct ata_link *link = dev->link;
  562. struct ata_port *ap = link->ap;
  563. u32 scontrol;
  564. unsigned int err_mask;
  565. int rc;
  566. /*
  567. * disallow DIPM for drivers which haven't set
  568. * ATA_FLAG_IPM. This is because when DIPM is enabled,
  569. * phy ready will be set in the interrupt status on
  570. * state changes, which will cause some drivers to
  571. * think there are errors - additionally drivers will
  572. * need to disable hot plug.
  573. */
  574. if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
  575. ap->pm_policy = NOT_AVAILABLE;
  576. return -EINVAL;
  577. }
  578. /*
  579. * For DIPM, we will only enable it for the
  580. * min_power setting.
  581. *
  582. * Why? Because Disks are too stupid to know that
  583. * If the host rejects a request to go to SLUMBER
  584. * they should retry at PARTIAL, and instead it
  585. * just would give up. So, for medium_power to
  586. * work at all, we need to only allow HIPM.
  587. */
  588. rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
  589. if (rc)
  590. return rc;
  591. switch (policy) {
  592. case MIN_POWER:
  593. /* no restrictions on IPM transitions */
  594. scontrol &= ~(0x3 << 8);
  595. rc = sata_scr_write(link, SCR_CONTROL, scontrol);
  596. if (rc)
  597. return rc;
  598. /* enable DIPM */
  599. if (dev->flags & ATA_DFLAG_DIPM)
  600. err_mask = ata_dev_set_feature(dev,
  601. SETFEATURES_SATA_ENABLE, SATA_DIPM);
  602. break;
  603. case MEDIUM_POWER:
  604. /* allow IPM to PARTIAL */
  605. scontrol &= ~(0x1 << 8);
  606. scontrol |= (0x2 << 8);
  607. rc = sata_scr_write(link, SCR_CONTROL, scontrol);
  608. if (rc)
  609. return rc;
  610. /*
  611. * we don't have to disable DIPM since IPM flags
  612. * disallow transitions to SLUMBER, which effectively
  613. * disable DIPM if it does not support PARTIAL
  614. */
  615. break;
  616. case NOT_AVAILABLE:
  617. case MAX_PERFORMANCE:
  618. /* disable all IPM transitions */
  619. scontrol |= (0x3 << 8);
  620. rc = sata_scr_write(link, SCR_CONTROL, scontrol);
  621. if (rc)
  622. return rc;
  623. /*
  624. * we don't have to disable DIPM since IPM flags
  625. * disallow all transitions which effectively
  626. * disable DIPM anyway.
  627. */
  628. break;
  629. }
  630. /* FIXME: handle SET FEATURES failure */
  631. (void) err_mask;
  632. return 0;
  633. }
  634. /**
  635. * ata_dev_enable_pm - enable SATA interface power management
  636. * @dev: device to enable power management
  637. * @policy: the link power management policy
  638. *
  639. * Enable SATA Interface power management. This will enable
  640. * Device Interface Power Management (DIPM) for min_power
  641. * policy, and then call driver specific callbacks for
  642. * enabling Host Initiated Power management.
  643. *
  644. * Locking: Caller.
  645. * Returns: -EINVAL if IPM is not supported, 0 otherwise.
  646. */
  647. void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
  648. {
  649. int rc = 0;
  650. struct ata_port *ap = dev->link->ap;
  651. /* set HIPM first, then DIPM */
  652. if (ap->ops->enable_pm)
  653. rc = ap->ops->enable_pm(ap, policy);
  654. if (rc)
  655. goto enable_pm_out;
  656. rc = ata_dev_set_dipm(dev, policy);
  657. enable_pm_out:
  658. if (rc)
  659. ap->pm_policy = MAX_PERFORMANCE;
  660. else
  661. ap->pm_policy = policy;
  662. return /* rc */; /* hopefully we can use 'rc' eventually */
  663. }
  664. #ifdef CONFIG_PM
  665. /**
  666. * ata_dev_disable_pm - disable SATA interface power management
  667. * @dev: device to disable power management
  668. *
  669. * Disable SATA Interface power management. This will disable
  670. * Device Interface Power Management (DIPM) without changing
  671. * policy, call driver specific callbacks for disabling Host
  672. * Initiated Power management.
  673. *
  674. * Locking: Caller.
  675. * Returns: void
  676. */
  677. static void ata_dev_disable_pm(struct ata_device *dev)
  678. {
  679. struct ata_port *ap = dev->link->ap;
  680. ata_dev_set_dipm(dev, MAX_PERFORMANCE);
  681. if (ap->ops->disable_pm)
  682. ap->ops->disable_pm(ap);
  683. }
  684. #endif /* CONFIG_PM */
  685. void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
  686. {
  687. ap->pm_policy = policy;
  688. ap->link.eh_info.action |= ATA_EHI_LPM;
  689. ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
  690. ata_port_schedule_eh(ap);
  691. }
  692. #ifdef CONFIG_PM
  693. static void ata_lpm_enable(struct ata_host *host)
  694. {
  695. struct ata_link *link;
  696. struct ata_port *ap;
  697. struct ata_device *dev;
  698. int i;
  699. for (i = 0; i < host->n_ports; i++) {
  700. ap = host->ports[i];
  701. ata_port_for_each_link(link, ap) {
  702. ata_link_for_each_dev(dev, link)
  703. ata_dev_disable_pm(dev);
  704. }
  705. }
  706. }
  707. static void ata_lpm_disable(struct ata_host *host)
  708. {
  709. int i;
  710. for (i = 0; i < host->n_ports; i++) {
  711. struct ata_port *ap = host->ports[i];
  712. ata_lpm_schedule(ap, ap->pm_policy);
  713. }
  714. }
  715. #endif /* CONFIG_PM */
  716. /**
  717. * ata_devchk - PATA device presence detection
  718. * @ap: ATA channel to examine
  719. * @device: Device to examine (starting at zero)
  720. *
  721. * This technique was originally described in
  722. * Hale Landis's ATADRVR (www.ata-atapi.com), and
  723. * later found its way into the ATA/ATAPI spec.
  724. *
  725. * Write a pattern to the ATA shadow registers,
  726. * and if a device is present, it will respond by
  727. * correctly storing and echoing back the
  728. * ATA shadow register contents.
  729. *
  730. * LOCKING:
  731. * caller.
  732. */
  733. static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
  734. {
  735. struct ata_ioports *ioaddr = &ap->ioaddr;
  736. u8 nsect, lbal;
  737. ap->ops->dev_select(ap, device);
  738. iowrite8(0x55, ioaddr->nsect_addr);
  739. iowrite8(0xaa, ioaddr->lbal_addr);
  740. iowrite8(0xaa, ioaddr->nsect_addr);
  741. iowrite8(0x55, ioaddr->lbal_addr);
  742. iowrite8(0x55, ioaddr->nsect_addr);
  743. iowrite8(0xaa, ioaddr->lbal_addr);
  744. nsect = ioread8(ioaddr->nsect_addr);
  745. lbal = ioread8(ioaddr->lbal_addr);
  746. if ((nsect == 0x55) && (lbal == 0xaa))
  747. return 1; /* we found a device */
  748. return 0; /* nothing found */
  749. }
  750. /**
  751. * ata_dev_classify - determine device type based on ATA-spec signature
  752. * @tf: ATA taskfile register set for device to be identified
  753. *
  754. * Determine from taskfile register contents whether a device is
  755. * ATA or ATAPI, as per "Signature and persistence" section
  756. * of ATA/PI spec (volume 1, sect 5.14).
  757. *
  758. * LOCKING:
  759. * None.
  760. *
  761. * RETURNS:
  762. * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
  763. * %ATA_DEV_UNKNOWN the event of failure.
  764. */
  765. unsigned int ata_dev_classify(const struct ata_taskfile *tf)
  766. {
  767. /* Apple's open source Darwin code hints that some devices only
  768. * put a proper signature into the LBA mid/high registers,
  769. * So, we only check those. It's sufficient for uniqueness.
  770. *
  771. * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
  772. * signatures for ATA and ATAPI devices attached on SerialATA,
  773. * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
  774. * spec has never mentioned about using different signatures
  775. * for ATA/ATAPI devices. Then, Serial ATA II: Port
  776. * Multiplier specification began to use 0x69/0x96 to identify
  777. * port multpliers and 0x3c/0xc3 to identify SEMB device.
  778. * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
  779. * 0x69/0x96 shortly and described them as reserved for
  780. * SerialATA.
  781. *
  782. * We follow the current spec and consider that 0x69/0x96
  783. * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
  784. */
  785. if ((tf->lbam == 0) && (tf->lbah == 0)) {
  786. DPRINTK("found ATA device by sig\n");
  787. return ATA_DEV_ATA;
  788. }
  789. if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
  790. DPRINTK("found ATAPI device by sig\n");
  791. return ATA_DEV_ATAPI;
  792. }
  793. if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
  794. DPRINTK("found PMP device by sig\n");
  795. return ATA_DEV_PMP;
  796. }
  797. if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
  798. printk(KERN_INFO "ata: SEMB device ignored\n");
  799. return ATA_DEV_SEMB_UNSUP; /* not yet */
  800. }
  801. DPRINTK("unknown device\n");
  802. return ATA_DEV_UNKNOWN;
  803. }
  804. /**
  805. * ata_dev_try_classify - Parse returned ATA device signature
  806. * @dev: ATA device to classify (starting at zero)
  807. * @present: device seems present
  808. * @r_err: Value of error register on completion
  809. *
  810. * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
  811. * an ATA/ATAPI-defined set of values is placed in the ATA
  812. * shadow registers, indicating the results of device detection
  813. * and diagnostics.
  814. *
  815. * Select the ATA device, and read the values from the ATA shadow
  816. * registers. Then parse according to the Error register value,
  817. * and the spec-defined values examined by ata_dev_classify().
  818. *
  819. * LOCKING:
  820. * caller.
  821. *
  822. * RETURNS:
  823. * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
  824. */
  825. unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
  826. u8 *r_err)
  827. {
  828. struct ata_port *ap = dev->link->ap;
  829. struct ata_taskfile tf;
  830. unsigned int class;
  831. u8 err;
  832. ap->ops->dev_select(ap, dev->devno);
  833. memset(&tf, 0, sizeof(tf));
  834. ap->ops->tf_read(ap, &tf);
  835. err = tf.feature;
  836. if (r_err)
  837. *r_err = err;
  838. /* see if device passed diags: if master then continue and warn later */
  839. if (err == 0 && dev->devno == 0)
  840. /* diagnostic fail : do nothing _YET_ */
  841. dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
  842. else if (err == 1)
  843. /* do nothing */ ;
  844. else if ((dev->devno == 0) && (err == 0x81))
  845. /* do nothing */ ;
  846. else
  847. return ATA_DEV_NONE;
  848. /* determine if device is ATA or ATAPI */
  849. class = ata_dev_classify(&tf);
  850. if (class == ATA_DEV_UNKNOWN) {
  851. /* If the device failed diagnostic, it's likely to
  852. * have reported incorrect device signature too.
  853. * Assume ATA device if the device seems present but
  854. * device signature is invalid with diagnostic
  855. * failure.
  856. */
  857. if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
  858. class = ATA_DEV_ATA;
  859. else
  860. class = ATA_DEV_NONE;
  861. } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
  862. class = ATA_DEV_NONE;
  863. return class;
  864. }
  865. /**
  866. * ata_id_string - Convert IDENTIFY DEVICE page into string
  867. * @id: IDENTIFY DEVICE results we will examine
  868. * @s: string into which data is output
  869. * @ofs: offset into identify device page
  870. * @len: length of string to return. must be an even number.
  871. *
  872. * The strings in the IDENTIFY DEVICE page are broken up into
  873. * 16-bit chunks. Run through the string, and output each
  874. * 8-bit chunk linearly, regardless of platform.
  875. *
  876. * LOCKING:
  877. * caller.
  878. */
  879. void ata_id_string(const u16 *id, unsigned char *s,
  880. unsigned int ofs, unsigned int len)
  881. {
  882. unsigned int c;
  883. while (len > 0) {
  884. c = id[ofs] >> 8;
  885. *s = c;
  886. s++;
  887. c = id[ofs] & 0xff;
  888. *s = c;
  889. s++;
  890. ofs++;
  891. len -= 2;
  892. }
  893. }
  894. /**
  895. * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
  896. * @id: IDENTIFY DEVICE results we will examine
  897. * @s: string into which data is output
  898. * @ofs: offset into identify device page
  899. * @len: length of string to return. must be an odd number.
  900. *
  901. * This function is identical to ata_id_string except that it
  902. * trims trailing spaces and terminates the resulting string with
  903. * null. @len must be actual maximum length (even number) + 1.
  904. *
  905. * LOCKING:
  906. * caller.
  907. */
  908. void ata_id_c_string(const u16 *id, unsigned char *s,
  909. unsigned int ofs, unsigned int len)
  910. {
  911. unsigned char *p;
  912. WARN_ON(!(len & 1));
  913. ata_id_string(id, s, ofs, len - 1);
  914. p = s + strnlen(s, len - 1);
  915. while (p > s && p[-1] == ' ')
  916. p--;
  917. *p = '\0';
  918. }
  919. static u64 ata_id_n_sectors(const u16 *id)
  920. {
  921. if (ata_id_has_lba(id)) {
  922. if (ata_id_has_lba48(id))
  923. return ata_id_u64(id, 100);
  924. else
  925. return ata_id_u32(id, 60);
  926. } else {
  927. if (ata_id_current_chs_valid(id))
  928. return ata_id_u32(id, 57);
  929. else
  930. return id[1] * id[3] * id[6];
  931. }
  932. }
  933. static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
  934. {
  935. u64 sectors = 0;
  936. sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
  937. sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
  938. sectors |= (tf->hob_lbal & 0xff) << 24;
  939. sectors |= (tf->lbah & 0xff) << 16;
  940. sectors |= (tf->lbam & 0xff) << 8;
  941. sectors |= (tf->lbal & 0xff);
  942. return ++sectors;
  943. }
  944. static u64 ata_tf_to_lba(struct ata_taskfile *tf)
  945. {
  946. u64 sectors = 0;
  947. sectors |= (tf->device & 0x0f) << 24;
  948. sectors |= (tf->lbah & 0xff) << 16;
  949. sectors |= (tf->lbam & 0xff) << 8;
  950. sectors |= (tf->lbal & 0xff);
  951. return ++sectors;
  952. }
  953. /**
  954. * ata_read_native_max_address - Read native max address
  955. * @dev: target device
  956. * @max_sectors: out parameter for the result native max address
  957. *
  958. * Perform an LBA48 or LBA28 native size query upon the device in
  959. * question.
  960. *
  961. * RETURNS:
  962. * 0 on success, -EACCES if command is aborted by the drive.
  963. * -EIO on other errors.
  964. */
  965. static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
  966. {
  967. unsigned int err_mask;
  968. struct ata_taskfile tf;
  969. int lba48 = ata_id_has_lba48(dev->id);
  970. ata_tf_init(dev, &tf);
  971. /* always clear all address registers */
  972. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  973. if (lba48) {
  974. tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
  975. tf.flags |= ATA_TFLAG_LBA48;
  976. } else
  977. tf.command = ATA_CMD_READ_NATIVE_MAX;
  978. tf.protocol |= ATA_PROT_NODATA;
  979. tf.device |= ATA_LBA;
  980. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  981. if (err_mask) {
  982. ata_dev_printk(dev, KERN_WARNING, "failed to read native "
  983. "max address (err_mask=0x%x)\n", err_mask);
  984. if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
  985. return -EACCES;
  986. return -EIO;
  987. }
  988. if (lba48)
  989. *max_sectors = ata_tf_to_lba48(&tf);
  990. else
  991. *max_sectors = ata_tf_to_lba(&tf);
  992. if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
  993. (*max_sectors)--;
  994. return 0;
  995. }
  996. /**
  997. * ata_set_max_sectors - Set max sectors
  998. * @dev: target device
  999. * @new_sectors: new max sectors value to set for the device
  1000. *
  1001. * Set max sectors of @dev to @new_sectors.
  1002. *
  1003. * RETURNS:
  1004. * 0 on success, -EACCES if command is aborted or denied (due to
  1005. * previous non-volatile SET_MAX) by the drive. -EIO on other
  1006. * errors.
  1007. */
  1008. static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
  1009. {
  1010. unsigned int err_mask;
  1011. struct ata_taskfile tf;
  1012. int lba48 = ata_id_has_lba48(dev->id);
  1013. new_sectors--;
  1014. ata_tf_init(dev, &tf);
  1015. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  1016. if (lba48) {
  1017. tf.command = ATA_CMD_SET_MAX_EXT;
  1018. tf.flags |= ATA_TFLAG_LBA48;
  1019. tf.hob_lbal = (new_sectors >> 24) & 0xff;
  1020. tf.hob_lbam = (new_sectors >> 32) & 0xff;
  1021. tf.hob_lbah = (new_sectors >> 40) & 0xff;
  1022. } else {
  1023. tf.command = ATA_CMD_SET_MAX;
  1024. tf.device |= (new_sectors >> 24) & 0xf;
  1025. }
  1026. tf.protocol |= ATA_PROT_NODATA;
  1027. tf.device |= ATA_LBA;
  1028. tf.lbal = (new_sectors >> 0) & 0xff;
  1029. tf.lbam = (new_sectors >> 8) & 0xff;
  1030. tf.lbah = (new_sectors >> 16) & 0xff;
  1031. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1032. if (err_mask) {
  1033. ata_dev_printk(dev, KERN_WARNING, "failed to set "
  1034. "max address (err_mask=0x%x)\n", err_mask);
  1035. if (err_mask == AC_ERR_DEV &&
  1036. (tf.feature & (ATA_ABORTED | ATA_IDNF)))
  1037. return -EACCES;
  1038. return -EIO;
  1039. }
  1040. return 0;
  1041. }
  1042. /**
  1043. * ata_hpa_resize - Resize a device with an HPA set
  1044. * @dev: Device to resize
  1045. *
  1046. * Read the size of an LBA28 or LBA48 disk with HPA features and resize
  1047. * it if required to the full size of the media. The caller must check
  1048. * the drive has the HPA feature set enabled.
  1049. *
  1050. * RETURNS:
  1051. * 0 on success, -errno on failure.
  1052. */
  1053. static int ata_hpa_resize(struct ata_device *dev)
  1054. {
  1055. struct ata_eh_context *ehc = &dev->link->eh_context;
  1056. int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
  1057. u64 sectors = ata_id_n_sectors(dev->id);
  1058. u64 native_sectors;
  1059. int rc;
  1060. /* do we need to do it? */
  1061. if (dev->class != ATA_DEV_ATA ||
  1062. !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
  1063. (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
  1064. return 0;
  1065. /* read native max address */
  1066. rc = ata_read_native_max_address(dev, &native_sectors);
  1067. if (rc) {
  1068. /* If HPA isn't going to be unlocked, skip HPA
  1069. * resizing from the next try.
  1070. */
  1071. if (!ata_ignore_hpa) {
  1072. ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
  1073. "broken, will skip HPA handling\n");
  1074. dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
  1075. /* we can continue if device aborted the command */
  1076. if (rc == -EACCES)
  1077. rc = 0;
  1078. }
  1079. return rc;
  1080. }
  1081. /* nothing to do? */
  1082. if (native_sectors <= sectors || !ata_ignore_hpa) {
  1083. if (!print_info || native_sectors == sectors)
  1084. return 0;
  1085. if (native_sectors > sectors)
  1086. ata_dev_printk(dev, KERN_INFO,
  1087. "HPA detected: current %llu, native %llu\n",
  1088. (unsigned long long)sectors,
  1089. (unsigned long long)native_sectors);
  1090. else if (native_sectors < sectors)
  1091. ata_dev_printk(dev, KERN_WARNING,
  1092. "native sectors (%llu) is smaller than "
  1093. "sectors (%llu)\n",
  1094. (unsigned long long)native_sectors,
  1095. (unsigned long long)sectors);
  1096. return 0;
  1097. }
  1098. /* let's unlock HPA */
  1099. rc = ata_set_max_sectors(dev, native_sectors);
  1100. if (rc == -EACCES) {
  1101. /* if device aborted the command, skip HPA resizing */
  1102. ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
  1103. "(%llu -> %llu), skipping HPA handling\n",
  1104. (unsigned long long)sectors,
  1105. (unsigned long long)native_sectors);
  1106. dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
  1107. return 0;
  1108. } else if (rc)
  1109. return rc;
  1110. /* re-read IDENTIFY data */
  1111. rc = ata_dev_reread_id(dev, 0);
  1112. if (rc) {
  1113. ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
  1114. "data after HPA resizing\n");
  1115. return rc;
  1116. }
  1117. if (print_info) {
  1118. u64 new_sectors = ata_id_n_sectors(dev->id);
  1119. ata_dev_printk(dev, KERN_INFO,
  1120. "HPA unlocked: %llu -> %llu, native %llu\n",
  1121. (unsigned long long)sectors,
  1122. (unsigned long long)new_sectors,
  1123. (unsigned long long)native_sectors);
  1124. }
  1125. return 0;
  1126. }
  1127. /**
  1128. * ata_id_to_dma_mode - Identify DMA mode from id block
  1129. * @dev: device to identify
  1130. * @unknown: mode to assume if we cannot tell
  1131. *
  1132. * Set up the timing values for the device based upon the identify
  1133. * reported values for the DMA mode. This function is used by drivers
  1134. * which rely upon firmware configured modes, but wish to report the
  1135. * mode correctly when possible.
  1136. *
  1137. * In addition we emit similarly formatted messages to the default
  1138. * ata_dev_set_mode handler, in order to provide consistency of
  1139. * presentation.
  1140. */
  1141. void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
  1142. {
  1143. unsigned int mask;
  1144. u8 mode;
  1145. /* Pack the DMA modes */
  1146. mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
  1147. if (dev->id[53] & 0x04)
  1148. mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
  1149. /* Select the mode in use */
  1150. mode = ata_xfer_mask2mode(mask);
  1151. if (mode != 0) {
  1152. ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
  1153. ata_mode_string(mask));
  1154. } else {
  1155. /* SWDMA perhaps ? */
  1156. mode = unknown;
  1157. ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
  1158. }
  1159. /* Configure the device reporting */
  1160. dev->xfer_mode = mode;
  1161. dev->xfer_shift = ata_xfer_mode2shift(mode);
  1162. }
  1163. /**
  1164. * ata_noop_dev_select - Select device 0/1 on ATA bus
  1165. * @ap: ATA channel to manipulate
  1166. * @device: ATA device (numbered from zero) to select
  1167. *
  1168. * This function performs no actual function.
  1169. *
  1170. * May be used as the dev_select() entry in ata_port_operations.
  1171. *
  1172. * LOCKING:
  1173. * caller.
  1174. */
  1175. void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
  1176. {
  1177. }
  1178. /**
  1179. * ata_std_dev_select - Select device 0/1 on ATA bus
  1180. * @ap: ATA channel to manipulate
  1181. * @device: ATA device (numbered from zero) to select
  1182. *
  1183. * Use the method defined in the ATA specification to
  1184. * make either device 0, or device 1, active on the
  1185. * ATA channel. Works with both PIO and MMIO.
  1186. *
  1187. * May be used as the dev_select() entry in ata_port_operations.
  1188. *
  1189. * LOCKING:
  1190. * caller.
  1191. */
  1192. void ata_std_dev_select(struct ata_port *ap, unsigned int device)
  1193. {
  1194. u8 tmp;
  1195. if (device == 0)
  1196. tmp = ATA_DEVICE_OBS;
  1197. else
  1198. tmp = ATA_DEVICE_OBS | ATA_DEV1;
  1199. iowrite8(tmp, ap->ioaddr.device_addr);
  1200. ata_pause(ap); /* needed; also flushes, for mmio */
  1201. }
  1202. /**
  1203. * ata_dev_select - Select device 0/1 on ATA bus
  1204. * @ap: ATA channel to manipulate
  1205. * @device: ATA device (numbered from zero) to select
  1206. * @wait: non-zero to wait for Status register BSY bit to clear
  1207. * @can_sleep: non-zero if context allows sleeping
  1208. *
  1209. * Use the method defined in the ATA specification to
  1210. * make either device 0, or device 1, active on the
  1211. * ATA channel.
  1212. *
  1213. * This is a high-level version of ata_std_dev_select(),
  1214. * which additionally provides the services of inserting
  1215. * the proper pauses and status polling, where needed.
  1216. *
  1217. * LOCKING:
  1218. * caller.
  1219. */
  1220. void ata_dev_select(struct ata_port *ap, unsigned int device,
  1221. unsigned int wait, unsigned int can_sleep)
  1222. {
  1223. if (ata_msg_probe(ap))
  1224. ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
  1225. "device %u, wait %u\n", device, wait);
  1226. if (wait)
  1227. ata_wait_idle(ap);
  1228. ap->ops->dev_select(ap, device);
  1229. if (wait) {
  1230. if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
  1231. msleep(150);
  1232. ata_wait_idle(ap);
  1233. }
  1234. }
  1235. /**
  1236. * ata_dump_id - IDENTIFY DEVICE info debugging output
  1237. * @id: IDENTIFY DEVICE page to dump
  1238. *
  1239. * Dump selected 16-bit words from the given IDENTIFY DEVICE
  1240. * page.
  1241. *
  1242. * LOCKING:
  1243. * caller.
  1244. */
  1245. static inline void ata_dump_id(const u16 *id)
  1246. {
  1247. DPRINTK("49==0x%04x "
  1248. "53==0x%04x "
  1249. "63==0x%04x "
  1250. "64==0x%04x "
  1251. "75==0x%04x \n",
  1252. id[49],
  1253. id[53],
  1254. id[63],
  1255. id[64],
  1256. id[75]);
  1257. DPRINTK("80==0x%04x "
  1258. "81==0x%04x "
  1259. "82==0x%04x "
  1260. "83==0x%04x "
  1261. "84==0x%04x \n",
  1262. id[80],
  1263. id[81],
  1264. id[82],
  1265. id[83],
  1266. id[84]);
  1267. DPRINTK("88==0x%04x "
  1268. "93==0x%04x\n",
  1269. id[88],
  1270. id[93]);
  1271. }
  1272. /**
  1273. * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
  1274. * @id: IDENTIFY data to compute xfer mask from
  1275. *
  1276. * Compute the xfermask for this device. This is not as trivial
  1277. * as it seems if we must consider early devices correctly.
  1278. *
  1279. * FIXME: pre IDE drive timing (do we care ?).
  1280. *
  1281. * LOCKING:
  1282. * None.
  1283. *
  1284. * RETURNS:
  1285. * Computed xfermask
  1286. */
  1287. static unsigned int ata_id_xfermask(const u16 *id)
  1288. {
  1289. unsigned int pio_mask, mwdma_mask, udma_mask;
  1290. /* Usual case. Word 53 indicates word 64 is valid */
  1291. if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
  1292. pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
  1293. pio_mask <<= 3;
  1294. pio_mask |= 0x7;
  1295. } else {
  1296. /* If word 64 isn't valid then Word 51 high byte holds
  1297. * the PIO timing number for the maximum. Turn it into
  1298. * a mask.
  1299. */
  1300. u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
  1301. if (mode < 5) /* Valid PIO range */
  1302. pio_mask = (2 << mode) - 1;
  1303. else
  1304. pio_mask = 1;
  1305. /* But wait.. there's more. Design your standards by
  1306. * committee and you too can get a free iordy field to
  1307. * process. However its the speeds not the modes that
  1308. * are supported... Note drivers using the timing API
  1309. * will get this right anyway
  1310. */
  1311. }
  1312. mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
  1313. if (ata_id_is_cfa(id)) {
  1314. /*
  1315. * Process compact flash extended modes
  1316. */
  1317. int pio = id[163] & 0x7;
  1318. int dma = (id[163] >> 3) & 7;
  1319. if (pio)
  1320. pio_mask |= (1 << 5);
  1321. if (pio > 1)
  1322. pio_mask |= (1 << 6);
  1323. if (dma)
  1324. mwdma_mask |= (1 << 3);
  1325. if (dma > 1)
  1326. mwdma_mask |= (1 << 4);
  1327. }
  1328. udma_mask = 0;
  1329. if (id[ATA_ID_FIELD_VALID] & (1 << 2))
  1330. udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
  1331. return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  1332. }
  1333. /**
  1334. * ata_port_queue_task - Queue port_task
  1335. * @ap: The ata_port to queue port_task for
  1336. * @fn: workqueue function to be scheduled
  1337. * @data: data for @fn to use
  1338. * @delay: delay time for workqueue function
  1339. *
  1340. * Schedule @fn(@data) for execution after @delay jiffies using
  1341. * port_task. There is one port_task per port and it's the
  1342. * user(low level driver)'s responsibility to make sure that only
  1343. * one task is active at any given time.
  1344. *
  1345. * libata core layer takes care of synchronization between
  1346. * port_task and EH. ata_port_queue_task() may be ignored for EH
  1347. * synchronization.
  1348. *
  1349. * LOCKING:
  1350. * Inherited from caller.
  1351. */
  1352. void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
  1353. unsigned long delay)
  1354. {
  1355. PREPARE_DELAYED_WORK(&ap->port_task, fn);
  1356. ap->port_task_data = data;
  1357. /* may fail if ata_port_flush_task() in progress */
  1358. queue_delayed_work(ata_wq, &ap->port_task, delay);
  1359. }
  1360. /**
  1361. * ata_port_flush_task - Flush port_task
  1362. * @ap: The ata_port to flush port_task for
  1363. *
  1364. * After this function completes, port_task is guranteed not to
  1365. * be running or scheduled.
  1366. *
  1367. * LOCKING:
  1368. * Kernel thread context (may sleep)
  1369. */
  1370. void ata_port_flush_task(struct ata_port *ap)
  1371. {
  1372. DPRINTK("ENTER\n");
  1373. cancel_rearming_delayed_work(&ap->port_task);
  1374. if (ata_msg_ctl(ap))
  1375. ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
  1376. }
  1377. static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  1378. {
  1379. struct completion *waiting = qc->private_data;
  1380. complete(waiting);
  1381. }
  1382. /**
  1383. * ata_exec_internal_sg - execute libata internal command
  1384. * @dev: Device to which the command is sent
  1385. * @tf: Taskfile registers for the command and the result
  1386. * @cdb: CDB for packet command
  1387. * @dma_dir: Data tranfer direction of the command
  1388. * @sgl: sg list for the data buffer of the command
  1389. * @n_elem: Number of sg entries
  1390. * @timeout: Timeout in msecs (0 for default)
  1391. *
  1392. * Executes libata internal command with timeout. @tf contains
  1393. * command on entry and result on return. Timeout and error
  1394. * conditions are reported via return value. No recovery action
  1395. * is taken after a command times out. It's caller's duty to
  1396. * clean up after timeout.
  1397. *
  1398. * LOCKING:
  1399. * None. Should be called with kernel context, might sleep.
  1400. *
  1401. * RETURNS:
  1402. * Zero on success, AC_ERR_* mask on failure
  1403. */
  1404. unsigned ata_exec_internal_sg(struct ata_device *dev,
  1405. struct ata_taskfile *tf, const u8 *cdb,
  1406. int dma_dir, struct scatterlist *sgl,
  1407. unsigned int n_elem, unsigned long timeout)
  1408. {
  1409. struct ata_link *link = dev->link;
  1410. struct ata_port *ap = link->ap;
  1411. u8 command = tf->command;
  1412. struct ata_queued_cmd *qc;
  1413. unsigned int tag, preempted_tag;
  1414. u32 preempted_sactive, preempted_qc_active;
  1415. int preempted_nr_active_links;
  1416. DECLARE_COMPLETION_ONSTACK(wait);
  1417. unsigned long flags;
  1418. unsigned int err_mask;
  1419. int rc;
  1420. spin_lock_irqsave(ap->lock, flags);
  1421. /* no internal command while frozen */
  1422. if (ap->pflags & ATA_PFLAG_FROZEN) {
  1423. spin_unlock_irqrestore(ap->lock, flags);
  1424. return AC_ERR_SYSTEM;
  1425. }
  1426. /* initialize internal qc */
  1427. /* XXX: Tag 0 is used for drivers with legacy EH as some
  1428. * drivers choke if any other tag is given. This breaks
  1429. * ata_tag_internal() test for those drivers. Don't use new
  1430. * EH stuff without converting to it.
  1431. */
  1432. if (ap->ops->error_handler)
  1433. tag = ATA_TAG_INTERNAL;
  1434. else
  1435. tag = 0;
  1436. if (test_and_set_bit(tag, &ap->qc_allocated))
  1437. BUG();
  1438. qc = __ata_qc_from_tag(ap, tag);
  1439. qc->tag = tag;
  1440. qc->scsicmd = NULL;
  1441. qc->ap = ap;
  1442. qc->dev = dev;
  1443. ata_qc_reinit(qc);
  1444. preempted_tag = link->active_tag;
  1445. preempted_sactive = link->sactive;
  1446. preempted_qc_active = ap->qc_active;
  1447. preempted_nr_active_links = ap->nr_active_links;
  1448. link->active_tag = ATA_TAG_POISON;
  1449. link->sactive = 0;
  1450. ap->qc_active = 0;
  1451. ap->nr_active_links = 0;
  1452. /* prepare & issue qc */
  1453. qc->tf = *tf;
  1454. if (cdb)
  1455. memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
  1456. qc->flags |= ATA_QCFLAG_RESULT_TF;
  1457. qc->dma_dir = dma_dir;
  1458. if (dma_dir != DMA_NONE) {
  1459. unsigned int i, buflen = 0;
  1460. struct scatterlist *sg;
  1461. for_each_sg(sgl, sg, n_elem, i)
  1462. buflen += sg->length;
  1463. ata_sg_init(qc, sgl, n_elem);
  1464. qc->nbytes = buflen;
  1465. }
  1466. qc->private_data = &wait;
  1467. qc->complete_fn = ata_qc_complete_internal;
  1468. ata_qc_issue(qc);
  1469. spin_unlock_irqrestore(ap->lock, flags);
  1470. if (!timeout)
  1471. timeout = ata_probe_timeout * 1000 / HZ;
  1472. rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
  1473. ata_port_flush_task(ap);
  1474. if (!rc) {
  1475. spin_lock_irqsave(ap->lock, flags);
  1476. /* We're racing with irq here. If we lose, the
  1477. * following test prevents us from completing the qc
  1478. * twice. If we win, the port is frozen and will be
  1479. * cleaned up by ->post_internal_cmd().
  1480. */
  1481. if (qc->flags & ATA_QCFLAG_ACTIVE) {
  1482. qc->err_mask |= AC_ERR_TIMEOUT;
  1483. if (ap->ops->error_handler)
  1484. ata_port_freeze(ap);
  1485. else
  1486. ata_qc_complete(qc);
  1487. if (ata_msg_warn(ap))
  1488. ata_dev_printk(dev, KERN_WARNING,
  1489. "qc timeout (cmd 0x%x)\n", command);
  1490. }
  1491. spin_unlock_irqrestore(ap->lock, flags);
  1492. }
  1493. /* do post_internal_cmd */
  1494. if (ap->ops->post_internal_cmd)
  1495. ap->ops->post_internal_cmd(qc);
  1496. /* perform minimal error analysis */
  1497. if (qc->flags & ATA_QCFLAG_FAILED) {
  1498. if (qc->result_tf.command & (ATA_ERR | ATA_DF))
  1499. qc->err_mask |= AC_ERR_DEV;
  1500. if (!qc->err_mask)
  1501. qc->err_mask |= AC_ERR_OTHER;
  1502. if (qc->err_mask & ~AC_ERR_OTHER)
  1503. qc->err_mask &= ~AC_ERR_OTHER;
  1504. }
  1505. /* finish up */
  1506. spin_lock_irqsave(ap->lock, flags);
  1507. *tf = qc->result_tf;
  1508. err_mask = qc->err_mask;
  1509. ata_qc_free(qc);
  1510. link->active_tag = preempted_tag;
  1511. link->sactive = preempted_sactive;
  1512. ap->qc_active = preempted_qc_active;
  1513. ap->nr_active_links = preempted_nr_active_links;
  1514. /* XXX - Some LLDDs (sata_mv) disable port on command failure.
  1515. * Until those drivers are fixed, we detect the condition
  1516. * here, fail the command with AC_ERR_SYSTEM and reenable the
  1517. * port.
  1518. *
  1519. * Note that this doesn't change any behavior as internal
  1520. * command failure results in disabling the device in the
  1521. * higher layer for LLDDs without new reset/EH callbacks.
  1522. *
  1523. * Kill the following code as soon as those drivers are fixed.
  1524. */
  1525. if (ap->flags & ATA_FLAG_DISABLED) {
  1526. err_mask |= AC_ERR_SYSTEM;
  1527. ata_port_probe(ap);
  1528. }
  1529. spin_unlock_irqrestore(ap->lock, flags);
  1530. return err_mask;
  1531. }
  1532. /**
  1533. * ata_exec_internal - execute libata internal command
  1534. * @dev: Device to which the command is sent
  1535. * @tf: Taskfile registers for the command and the result
  1536. * @cdb: CDB for packet command
  1537. * @dma_dir: Data tranfer direction of the command
  1538. * @buf: Data buffer of the command
  1539. * @buflen: Length of data buffer
  1540. * @timeout: Timeout in msecs (0 for default)
  1541. *
  1542. * Wrapper around ata_exec_internal_sg() which takes simple
  1543. * buffer instead of sg list.
  1544. *
  1545. * LOCKING:
  1546. * None. Should be called with kernel context, might sleep.
  1547. *
  1548. * RETURNS:
  1549. * Zero on success, AC_ERR_* mask on failure
  1550. */
  1551. unsigned ata_exec_internal(struct ata_device *dev,
  1552. struct ata_taskfile *tf, const u8 *cdb,
  1553. int dma_dir, void *buf, unsigned int buflen,
  1554. unsigned long timeout)
  1555. {
  1556. struct scatterlist *psg = NULL, sg;
  1557. unsigned int n_elem = 0;
  1558. if (dma_dir != DMA_NONE) {
  1559. WARN_ON(!buf);
  1560. sg_init_one(&sg, buf, buflen);
  1561. psg = &sg;
  1562. n_elem++;
  1563. }
  1564. return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
  1565. timeout);
  1566. }
  1567. /**
  1568. * ata_do_simple_cmd - execute simple internal command
  1569. * @dev: Device to which the command is sent
  1570. * @cmd: Opcode to execute
  1571. *
  1572. * Execute a 'simple' command, that only consists of the opcode
  1573. * 'cmd' itself, without filling any other registers
  1574. *
  1575. * LOCKING:
  1576. * Kernel thread context (may sleep).
  1577. *
  1578. * RETURNS:
  1579. * Zero on success, AC_ERR_* mask on failure
  1580. */
  1581. unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
  1582. {
  1583. struct ata_taskfile tf;
  1584. ata_tf_init(dev, &tf);
  1585. tf.command = cmd;
  1586. tf.flags |= ATA_TFLAG_DEVICE;
  1587. tf.protocol = ATA_PROT_NODATA;
  1588. return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1589. }
  1590. /**
  1591. * ata_pio_need_iordy - check if iordy needed
  1592. * @adev: ATA device
  1593. *
  1594. * Check if the current speed of the device requires IORDY. Used
  1595. * by various controllers for chip configuration.
  1596. */
  1597. unsigned int ata_pio_need_iordy(const struct ata_device *adev)
  1598. {
  1599. /* Controller doesn't support IORDY. Probably a pointless check
  1600. as the caller should know this */
  1601. if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
  1602. return 0;
  1603. /* PIO3 and higher it is mandatory */
  1604. if (adev->pio_mode > XFER_PIO_2)
  1605. return 1;
  1606. /* We turn it on when possible */
  1607. if (ata_id_has_iordy(adev->id))
  1608. return 1;
  1609. return 0;
  1610. }
  1611. /**
  1612. * ata_pio_mask_no_iordy - Return the non IORDY mask
  1613. * @adev: ATA device
  1614. *
  1615. * Compute the highest mode possible if we are not using iordy. Return
  1616. * -1 if no iordy mode is available.
  1617. */
  1618. static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
  1619. {
  1620. /* If we have no drive specific rule, then PIO 2 is non IORDY */
  1621. if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
  1622. u16 pio = adev->id[ATA_ID_EIDE_PIO];
  1623. /* Is the speed faster than the drive allows non IORDY ? */
  1624. if (pio) {
  1625. /* This is cycle times not frequency - watch the logic! */
  1626. if (pio > 240) /* PIO2 is 240nS per cycle */
  1627. return 3 << ATA_SHIFT_PIO;
  1628. return 7 << ATA_SHIFT_PIO;
  1629. }
  1630. }
  1631. return 3 << ATA_SHIFT_PIO;
  1632. }
  1633. /**
  1634. * ata_dev_read_id - Read ID data from the specified device
  1635. * @dev: target device
  1636. * @p_class: pointer to class of the target device (may be changed)
  1637. * @flags: ATA_READID_* flags
  1638. * @id: buffer to read IDENTIFY data into
  1639. *
  1640. * Read ID data from the specified device. ATA_CMD_ID_ATA is
  1641. * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
  1642. * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
  1643. * for pre-ATA4 drives.
  1644. *
  1645. * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
  1646. * now we abort if we hit that case.
  1647. *
  1648. * LOCKING:
  1649. * Kernel thread context (may sleep)
  1650. *
  1651. * RETURNS:
  1652. * 0 on success, -errno otherwise.
  1653. */
  1654. int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
  1655. unsigned int flags, u16 *id)
  1656. {
  1657. struct ata_port *ap = dev->link->ap;
  1658. unsigned int class = *p_class;
  1659. struct ata_taskfile tf;
  1660. unsigned int err_mask = 0;
  1661. const char *reason;
  1662. int may_fallback = 1, tried_spinup = 0;
  1663. int rc;
  1664. if (ata_msg_ctl(ap))
  1665. ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
  1666. ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
  1667. retry:
  1668. ata_tf_init(dev, &tf);
  1669. switch (class) {
  1670. case ATA_DEV_ATA:
  1671. tf.command = ATA_CMD_ID_ATA;
  1672. break;
  1673. case ATA_DEV_ATAPI:
  1674. tf.command = ATA_CMD_ID_ATAPI;
  1675. break;
  1676. default:
  1677. rc = -ENODEV;
  1678. reason = "unsupported class";
  1679. goto err_out;
  1680. }
  1681. tf.protocol = ATA_PROT_PIO;
  1682. /* Some devices choke if TF registers contain garbage. Make
  1683. * sure those are properly initialized.
  1684. */
  1685. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  1686. /* Device presence detection is unreliable on some
  1687. * controllers. Always poll IDENTIFY if available.
  1688. */
  1689. tf.flags |= ATA_TFLAG_POLLING;
  1690. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
  1691. id, sizeof(id[0]) * ATA_ID_WORDS, 0);
  1692. if (err_mask) {
  1693. if (err_mask & AC_ERR_NODEV_HINT) {
  1694. DPRINTK("ata%u.%d: NODEV after polling detection\n",
  1695. ap->print_id, dev->devno);
  1696. return -ENOENT;
  1697. }
  1698. /* Device or controller might have reported the wrong
  1699. * device class. Give a shot at the other IDENTIFY if
  1700. * the current one is aborted by the device.
  1701. */
  1702. if (may_fallback &&
  1703. (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
  1704. may_fallback = 0;
  1705. if (class == ATA_DEV_ATA)
  1706. class = ATA_DEV_ATAPI;
  1707. else
  1708. class = ATA_DEV_ATA;
  1709. goto retry;
  1710. }
  1711. rc = -EIO;
  1712. reason = "I/O error";
  1713. goto err_out;
  1714. }
  1715. /* Falling back doesn't make sense if ID data was read
  1716. * successfully at least once.
  1717. */
  1718. may_fallback = 0;
  1719. swap_buf_le16(id, ATA_ID_WORDS);
  1720. /* sanity check */
  1721. rc = -EINVAL;
  1722. reason = "device reports invalid type";
  1723. if (class == ATA_DEV_ATA) {
  1724. if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
  1725. goto err_out;
  1726. } else {
  1727. if (ata_id_is_ata(id))
  1728. goto err_out;
  1729. }
  1730. if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
  1731. tried_spinup = 1;
  1732. /*
  1733. * Drive powered-up in standby mode, and requires a specific
  1734. * SET_FEATURES spin-up subcommand before it will accept
  1735. * anything other than the original IDENTIFY command.
  1736. */
  1737. err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
  1738. if (err_mask && id[2] != 0x738c) {
  1739. rc = -EIO;
  1740. reason = "SPINUP failed";
  1741. goto err_out;
  1742. }
  1743. /*
  1744. * If the drive initially returned incomplete IDENTIFY info,
  1745. * we now must reissue the IDENTIFY command.
  1746. */
  1747. if (id[2] == 0x37c8)
  1748. goto retry;
  1749. }
  1750. if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
  1751. /*
  1752. * The exact sequence expected by certain pre-ATA4 drives is:
  1753. * SRST RESET
  1754. * IDENTIFY (optional in early ATA)
  1755. * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
  1756. * anything else..
  1757. * Some drives were very specific about that exact sequence.
  1758. *
  1759. * Note that ATA4 says lba is mandatory so the second check
  1760. * shoud never trigger.
  1761. */
  1762. if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
  1763. err_mask = ata_dev_init_params(dev, id[3], id[6]);
  1764. if (err_mask) {
  1765. rc = -EIO;
  1766. reason = "INIT_DEV_PARAMS failed";
  1767. goto err_out;
  1768. }
  1769. /* current CHS translation info (id[53-58]) might be
  1770. * changed. reread the identify device info.
  1771. */
  1772. flags &= ~ATA_READID_POSTRESET;
  1773. goto retry;
  1774. }
  1775. }
  1776. *p_class = class;
  1777. return 0;
  1778. err_out:
  1779. if (ata_msg_warn(ap))
  1780. ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
  1781. "(%s, err_mask=0x%x)\n", reason, err_mask);
  1782. return rc;
  1783. }
  1784. static inline u8 ata_dev_knobble(struct ata_device *dev)
  1785. {
  1786. struct ata_port *ap = dev->link->ap;
  1787. return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
  1788. }
  1789. static void ata_dev_config_ncq(struct ata_device *dev,
  1790. char *desc, size_t desc_sz)
  1791. {
  1792. struct ata_port *ap = dev->link->ap;
  1793. int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
  1794. if (!ata_id_has_ncq(dev->id)) {
  1795. desc[0] = '\0';
  1796. return;
  1797. }
  1798. if (dev->horkage & ATA_HORKAGE_NONCQ) {
  1799. snprintf(desc, desc_sz, "NCQ (not used)");
  1800. return;
  1801. }
  1802. if (ap->flags & ATA_FLAG_NCQ) {
  1803. hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
  1804. dev->flags |= ATA_DFLAG_NCQ;
  1805. }
  1806. if (hdepth >= ddepth)
  1807. snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
  1808. else
  1809. snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
  1810. }
  1811. /**
  1812. * ata_dev_configure - Configure the specified ATA/ATAPI device
  1813. * @dev: Target device to configure
  1814. *
  1815. * Configure @dev according to @dev->id. Generic and low-level
  1816. * driver specific fixups are also applied.
  1817. *
  1818. * LOCKING:
  1819. * Kernel thread context (may sleep)
  1820. *
  1821. * RETURNS:
  1822. * 0 on success, -errno otherwise
  1823. */
  1824. int ata_dev_configure(struct ata_device *dev)
  1825. {
  1826. struct ata_port *ap = dev->link->ap;
  1827. struct ata_eh_context *ehc = &dev->link->eh_context;
  1828. int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
  1829. const u16 *id = dev->id;
  1830. unsigned int xfer_mask;
  1831. char revbuf[7]; /* XYZ-99\0 */
  1832. char fwrevbuf[ATA_ID_FW_REV_LEN+1];
  1833. char modelbuf[ATA_ID_PROD_LEN+1];
  1834. int rc;
  1835. if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
  1836. ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
  1837. __FUNCTION__);
  1838. return 0;
  1839. }
  1840. if (ata_msg_probe(ap))
  1841. ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
  1842. /* set horkage */
  1843. dev->horkage |= ata_dev_blacklisted(dev);
  1844. /* let ACPI work its magic */
  1845. rc = ata_acpi_on_devcfg(dev);
  1846. if (rc)
  1847. return rc;
  1848. /* massage HPA, do it early as it might change IDENTIFY data */
  1849. rc = ata_hpa_resize(dev);
  1850. if (rc)
  1851. return rc;
  1852. /* print device capabilities */
  1853. if (ata_msg_probe(ap))
  1854. ata_dev_printk(dev, KERN_DEBUG,
  1855. "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
  1856. "85:%04x 86:%04x 87:%04x 88:%04x\n",
  1857. __FUNCTION__,
  1858. id[49], id[82], id[83], id[84],
  1859. id[85], id[86], id[87], id[88]);
  1860. /* initialize to-be-configured parameters */
  1861. dev->flags &= ~ATA_DFLAG_CFG_MASK;
  1862. dev->max_sectors = 0;
  1863. dev->cdb_len = 0;
  1864. dev->n_sectors = 0;
  1865. dev->cylinders = 0;
  1866. dev->heads = 0;
  1867. dev->sectors = 0;
  1868. /*
  1869. * common ATA, ATAPI feature tests
  1870. */
  1871. /* find max transfer mode; for printk only */
  1872. xfer_mask = ata_id_xfermask(id);
  1873. if (ata_msg_probe(ap))
  1874. ata_dump_id(id);
  1875. /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
  1876. ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
  1877. sizeof(fwrevbuf));
  1878. ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
  1879. sizeof(modelbuf));
  1880. /* ATA-specific feature tests */
  1881. if (dev->class == ATA_DEV_ATA) {
  1882. if (ata_id_is_cfa(id)) {
  1883. if (id[162] & 1) /* CPRM may make this media unusable */
  1884. ata_dev_printk(dev, KERN_WARNING,
  1885. "supports DRM functions and may "
  1886. "not be fully accessable.\n");
  1887. snprintf(revbuf, 7, "CFA");
  1888. } else {
  1889. snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
  1890. /* Warn the user if the device has TPM extensions */
  1891. if (ata_id_has_tpm(id))
  1892. ata_dev_printk(dev, KERN_WARNING,
  1893. "supports DRM functions and may "
  1894. "not be fully accessable.\n");
  1895. }
  1896. dev->n_sectors = ata_id_n_sectors(id);
  1897. if (dev->id[59] & 0x100)
  1898. dev->multi_count = dev->id[59] & 0xff;
  1899. if (ata_id_has_lba(id)) {
  1900. const char *lba_desc;
  1901. char ncq_desc[20];
  1902. lba_desc = "LBA";
  1903. dev->flags |= ATA_DFLAG_LBA;
  1904. if (ata_id_has_lba48(id)) {
  1905. dev->flags |= ATA_DFLAG_LBA48;
  1906. lba_desc = "LBA48";
  1907. if (dev->n_sectors >= (1UL << 28) &&
  1908. ata_id_has_flush_ext(id))
  1909. dev->flags |= ATA_DFLAG_FLUSH_EXT;
  1910. }
  1911. /* config NCQ */
  1912. ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
  1913. /* print device info to dmesg */
  1914. if (ata_msg_drv(ap) && print_info) {
  1915. ata_dev_printk(dev, KERN_INFO,
  1916. "%s: %s, %s, max %s\n",
  1917. revbuf, modelbuf, fwrevbuf,
  1918. ata_mode_string(xfer_mask));
  1919. ata_dev_printk(dev, KERN_INFO,
  1920. "%Lu sectors, multi %u: %s %s\n",
  1921. (unsigned long long)dev->n_sectors,
  1922. dev->multi_count, lba_desc, ncq_desc);
  1923. }
  1924. } else {
  1925. /* CHS */
  1926. /* Default translation */
  1927. dev->cylinders = id[1];
  1928. dev->heads = id[3];
  1929. dev->sectors = id[6];
  1930. if (ata_id_current_chs_valid(id)) {
  1931. /* Current CHS translation is valid. */
  1932. dev->cylinders = id[54];
  1933. dev->heads = id[55];
  1934. dev->sectors = id[56];
  1935. }
  1936. /* print device info to dmesg */
  1937. if (ata_msg_drv(ap) && print_info) {
  1938. ata_dev_printk(dev, KERN_INFO,
  1939. "%s: %s, %s, max %s\n",
  1940. revbuf, modelbuf, fwrevbuf,
  1941. ata_mode_string(xfer_mask));
  1942. ata_dev_printk(dev, KERN_INFO,
  1943. "%Lu sectors, multi %u, CHS %u/%u/%u\n",
  1944. (unsigned long long)dev->n_sectors,
  1945. dev->multi_count, dev->cylinders,
  1946. dev->heads, dev->sectors);
  1947. }
  1948. }
  1949. dev->cdb_len = 16;
  1950. }
  1951. /* ATAPI-specific feature tests */
  1952. else if (dev->class == ATA_DEV_ATAPI) {
  1953. const char *cdb_intr_string = "";
  1954. const char *atapi_an_string = "";
  1955. u32 sntf;
  1956. rc = atapi_cdb_len(id);
  1957. if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
  1958. if (ata_msg_warn(ap))
  1959. ata_dev_printk(dev, KERN_WARNING,
  1960. "unsupported CDB len\n");
  1961. rc = -EINVAL;
  1962. goto err_out_nosup;
  1963. }
  1964. dev->cdb_len = (unsigned int) rc;
  1965. /* Enable ATAPI AN if both the host and device have
  1966. * the support. If PMP is attached, SNTF is required
  1967. * to enable ATAPI AN to discern between PHY status
  1968. * changed notifications and ATAPI ANs.
  1969. */
  1970. if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
  1971. (!ap->nr_pmp_links ||
  1972. sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
  1973. unsigned int err_mask;
  1974. /* issue SET feature command to turn this on */
  1975. err_mask = ata_dev_set_feature(dev,
  1976. SETFEATURES_SATA_ENABLE, SATA_AN);
  1977. if (err_mask)
  1978. ata_dev_printk(dev, KERN_ERR,
  1979. "failed to enable ATAPI AN "
  1980. "(err_mask=0x%x)\n", err_mask);
  1981. else {
  1982. dev->flags |= ATA_DFLAG_AN;
  1983. atapi_an_string = ", ATAPI AN";
  1984. }
  1985. }
  1986. if (ata_id_cdb_intr(dev->id)) {
  1987. dev->flags |= ATA_DFLAG_CDB_INTR;
  1988. cdb_intr_string = ", CDB intr";
  1989. }
  1990. /* print device info to dmesg */
  1991. if (ata_msg_drv(ap) && print_info)
  1992. ata_dev_printk(dev, KERN_INFO,
  1993. "ATAPI: %s, %s, max %s%s%s\n",
  1994. modelbuf, fwrevbuf,
  1995. ata_mode_string(xfer_mask),
  1996. cdb_intr_string, atapi_an_string);
  1997. }
  1998. /* determine max_sectors */
  1999. dev->max_sectors = ATA_MAX_SECTORS;
  2000. if (dev->flags & ATA_DFLAG_LBA48)
  2001. dev->max_sectors = ATA_MAX_SECTORS_LBA48;
  2002. if (!(dev->horkage & ATA_HORKAGE_IPM)) {
  2003. if (ata_id_has_hipm(dev->id))
  2004. dev->flags |= ATA_DFLAG_HIPM;
  2005. if (ata_id_has_dipm(dev->id))
  2006. dev->flags |= ATA_DFLAG_DIPM;
  2007. }
  2008. if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
  2009. /* Let the user know. We don't want to disallow opens for
  2010. rescue purposes, or in case the vendor is just a blithering
  2011. idiot */
  2012. if (print_info) {
  2013. ata_dev_printk(dev, KERN_WARNING,
  2014. "Drive reports diagnostics failure. This may indicate a drive\n");
  2015. ata_dev_printk(dev, KERN_WARNING,
  2016. "fault or invalid emulation. Contact drive vendor for information.\n");
  2017. }
  2018. }
  2019. /* limit bridge transfers to udma5, 200 sectors */
  2020. if (ata_dev_knobble(dev)) {
  2021. if (ata_msg_drv(ap) && print_info)
  2022. ata_dev_printk(dev, KERN_INFO,
  2023. "applying bridge limits\n");
  2024. dev->udma_mask &= ATA_UDMA5;
  2025. dev->max_sectors = ATA_MAX_SECTORS;
  2026. }
  2027. if ((dev->class == ATA_DEV_ATAPI) &&
  2028. (atapi_command_packet_set(id) == TYPE_TAPE)) {
  2029. dev->max_sectors = ATA_MAX_SECTORS_TAPE;
  2030. dev->horkage |= ATA_HORKAGE_STUCK_ERR;
  2031. }
  2032. if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
  2033. dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
  2034. dev->max_sectors);
  2035. if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
  2036. dev->horkage |= ATA_HORKAGE_IPM;
  2037. /* reset link pm_policy for this port to no pm */
  2038. ap->pm_policy = MAX_PERFORMANCE;
  2039. }
  2040. if (ap->ops->dev_config)
  2041. ap->ops->dev_config(dev);
  2042. if (ata_msg_probe(ap))
  2043. ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
  2044. __FUNCTION__, ata_chk_status(ap));
  2045. return 0;
  2046. err_out_nosup:
  2047. if (ata_msg_probe(ap))
  2048. ata_dev_printk(dev, KERN_DEBUG,
  2049. "%s: EXIT, err\n", __FUNCTION__);
  2050. return rc;
  2051. }
  2052. /**
  2053. * ata_cable_40wire - return 40 wire cable type
  2054. * @ap: port
  2055. *
  2056. * Helper method for drivers which want to hardwire 40 wire cable
  2057. * detection.
  2058. */
  2059. int ata_cable_40wire(struct ata_port *ap)
  2060. {
  2061. return ATA_CBL_PATA40;
  2062. }
  2063. /**
  2064. * ata_cable_80wire - return 80 wire cable type
  2065. * @ap: port
  2066. *
  2067. * Helper method for drivers which want to hardwire 80 wire cable
  2068. * detection.
  2069. */
  2070. int ata_cable_80wire(struct ata_port *ap)
  2071. {
  2072. return ATA_CBL_PATA80;
  2073. }
  2074. /**
  2075. * ata_cable_unknown - return unknown PATA cable.
  2076. * @ap: port
  2077. *
  2078. * Helper method for drivers which have no PATA cable detection.
  2079. */
  2080. int ata_cable_unknown(struct ata_port *ap)
  2081. {
  2082. return ATA_CBL_PATA_UNK;
  2083. }
  2084. /**
  2085. * ata_cable_sata - return SATA cable type
  2086. * @ap: port
  2087. *
  2088. * Helper method for drivers which have SATA cables
  2089. */
  2090. int ata_cable_sata(struct ata_port *ap)
  2091. {
  2092. return ATA_CBL_SATA;
  2093. }
  2094. /**
  2095. * ata_bus_probe - Reset and probe ATA bus
  2096. * @ap: Bus to probe
  2097. *
  2098. * Master ATA bus probing function. Initiates a hardware-dependent
  2099. * bus reset, then attempts to identify any devices found on
  2100. * the bus.
  2101. *
  2102. * LOCKING:
  2103. * PCI/etc. bus probe sem.
  2104. *
  2105. * RETURNS:
  2106. * Zero on success, negative errno otherwise.
  2107. */
  2108. int ata_bus_probe(struct ata_port *ap)
  2109. {
  2110. unsigned int classes[ATA_MAX_DEVICES];
  2111. int tries[ATA_MAX_DEVICES];
  2112. int rc;
  2113. struct ata_device *dev;
  2114. ata_port_probe(ap);
  2115. ata_link_for_each_dev(dev, &ap->link)
  2116. tries[dev->devno] = ATA_PROBE_MAX_TRIES;
  2117. retry:
  2118. ata_link_for_each_dev(dev, &ap->link) {
  2119. /* If we issue an SRST then an ATA drive (not ATAPI)
  2120. * may change configuration and be in PIO0 timing. If
  2121. * we do a hard reset (or are coming from power on)
  2122. * this is true for ATA or ATAPI. Until we've set a
  2123. * suitable controller mode we should not touch the
  2124. * bus as we may be talking too fast.
  2125. */
  2126. dev->pio_mode = XFER_PIO_0;
  2127. /* If the controller has a pio mode setup function
  2128. * then use it to set the chipset to rights. Don't
  2129. * touch the DMA setup as that will be dealt with when
  2130. * configuring devices.
  2131. */
  2132. if (ap->ops->set_piomode)
  2133. ap->ops->set_piomode(ap, dev);
  2134. }
  2135. /* reset and determine device classes */
  2136. ap->ops->phy_reset(ap);
  2137. ata_link_for_each_dev(dev, &ap->link) {
  2138. if (!(ap->flags & ATA_FLAG_DISABLED) &&
  2139. dev->class != ATA_DEV_UNKNOWN)
  2140. classes[dev->devno] = dev->class;
  2141. else
  2142. classes[dev->devno] = ATA_DEV_NONE;
  2143. dev->class = ATA_DEV_UNKNOWN;
  2144. }
  2145. ata_port_probe(ap);
  2146. /* read IDENTIFY page and configure devices. We have to do the identify
  2147. specific sequence bass-ackwards so that PDIAG- is released by
  2148. the slave device */
  2149. ata_link_for_each_dev(dev, &ap->link) {
  2150. if (tries[dev->devno])
  2151. dev->class = classes[dev->devno];
  2152. if (!ata_dev_enabled(dev))
  2153. continue;
  2154. rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
  2155. dev->id);
  2156. if (rc)
  2157. goto fail;
  2158. }
  2159. /* Now ask for the cable type as PDIAG- should have been released */
  2160. if (ap->ops->cable_detect)
  2161. ap->cbl = ap->ops->cable_detect(ap);
  2162. /* We may have SATA bridge glue hiding here irrespective of the
  2163. reported cable types and sensed types */
  2164. ata_link_for_each_dev(dev, &ap->link) {
  2165. if (!ata_dev_enabled(dev))
  2166. continue;
  2167. /* SATA drives indicate we have a bridge. We don't know which
  2168. end of the link the bridge is which is a problem */
  2169. if (ata_id_is_sata(dev->id))
  2170. ap->cbl = ATA_CBL_SATA;
  2171. }
  2172. /* After the identify sequence we can now set up the devices. We do
  2173. this in the normal order so that the user doesn't get confused */
  2174. ata_link_for_each_dev(dev, &ap->link) {
  2175. if (!ata_dev_enabled(dev))
  2176. continue;
  2177. ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
  2178. rc = ata_dev_configure(dev);
  2179. ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
  2180. if (rc)
  2181. goto fail;
  2182. }
  2183. /* configure transfer mode */
  2184. rc = ata_set_mode(&ap->link, &dev);
  2185. if (rc)
  2186. goto fail;
  2187. ata_link_for_each_dev(dev, &ap->link)
  2188. if (ata_dev_enabled(dev))
  2189. return 0;
  2190. /* no device present, disable port */
  2191. ata_port_disable(ap);
  2192. return -ENODEV;
  2193. fail:
  2194. tries[dev->devno]--;
  2195. switch (rc) {
  2196. case -EINVAL:
  2197. /* eeek, something went very wrong, give up */
  2198. tries[dev->devno] = 0;
  2199. break;
  2200. case -ENODEV:
  2201. /* give it just one more chance */
  2202. tries[dev->devno] = min(tries[dev->devno], 1);
  2203. case -EIO:
  2204. if (tries[dev->devno] == 1) {
  2205. /* This is the last chance, better to slow
  2206. * down than lose it.
  2207. */
  2208. sata_down_spd_limit(&ap->link);
  2209. ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
  2210. }
  2211. }
  2212. if (!tries[dev->devno])
  2213. ata_dev_disable(dev);
  2214. goto retry;
  2215. }
  2216. /**
  2217. * ata_port_probe - Mark port as enabled
  2218. * @ap: Port for which we indicate enablement
  2219. *
  2220. * Modify @ap data structure such that the system
  2221. * thinks that the entire port is enabled.
  2222. *
  2223. * LOCKING: host lock, or some other form of
  2224. * serialization.
  2225. */
  2226. void ata_port_probe(struct ata_port *ap)
  2227. {
  2228. ap->flags &= ~ATA_FLAG_DISABLED;
  2229. }
  2230. /**
  2231. * sata_print_link_status - Print SATA link status
  2232. * @link: SATA link to printk link status about
  2233. *
  2234. * This function prints link speed and status of a SATA link.
  2235. *
  2236. * LOCKING:
  2237. * None.
  2238. */
  2239. void sata_print_link_status(struct ata_link *link)
  2240. {
  2241. u32 sstatus, scontrol, tmp;
  2242. if (sata_scr_read(link, SCR_STATUS, &sstatus))
  2243. return;
  2244. sata_scr_read(link, SCR_CONTROL, &scontrol);
  2245. if (ata_link_online(link)) {
  2246. tmp = (sstatus >> 4) & 0xf;
  2247. ata_link_printk(link, KERN_INFO,
  2248. "SATA link up %s (SStatus %X SControl %X)\n",
  2249. sata_spd_string(tmp), sstatus, scontrol);
  2250. } else {
  2251. ata_link_printk(link, KERN_INFO,
  2252. "SATA link down (SStatus %X SControl %X)\n",
  2253. sstatus, scontrol);
  2254. }
  2255. }
  2256. /**
  2257. * ata_dev_pair - return other device on cable
  2258. * @adev: device
  2259. *
  2260. * Obtain the other device on the same cable, or if none is
  2261. * present NULL is returned
  2262. */
  2263. struct ata_device *ata_dev_pair(struct ata_device *adev)
  2264. {
  2265. struct ata_link *link = adev->link;
  2266. struct ata_device *pair = &link->device[1 - adev->devno];
  2267. if (!ata_dev_enabled(pair))
  2268. return NULL;
  2269. return pair;
  2270. }
  2271. /**
  2272. * ata_port_disable - Disable port.
  2273. * @ap: Port to be disabled.
  2274. *
  2275. * Modify @ap data structure such that the system
  2276. * thinks that the entire port is disabled, and should
  2277. * never attempt to probe or communicate with devices
  2278. * on this port.
  2279. *
  2280. * LOCKING: host lock, or some other form of
  2281. * serialization.
  2282. */
  2283. void ata_port_disable(struct ata_port *ap)
  2284. {
  2285. ap->link.device[0].class = ATA_DEV_NONE;
  2286. ap->link.device[1].class = ATA_DEV_NONE;
  2287. ap->flags |= ATA_FLAG_DISABLED;
  2288. }
  2289. /**
  2290. * sata_down_spd_limit - adjust SATA spd limit downward
  2291. * @link: Link to adjust SATA spd limit for
  2292. *
  2293. * Adjust SATA spd limit of @link downward. Note that this
  2294. * function only adjusts the limit. The change must be applied
  2295. * using sata_set_spd().
  2296. *
  2297. * LOCKING:
  2298. * Inherited from caller.
  2299. *
  2300. * RETURNS:
  2301. * 0 on success, negative errno on failure
  2302. */
  2303. int sata_down_spd_limit(struct ata_link *link)
  2304. {
  2305. u32 sstatus, spd, mask;
  2306. int rc, highbit;
  2307. if (!sata_scr_valid(link))
  2308. return -EOPNOTSUPP;
  2309. /* If SCR can be read, use it to determine the current SPD.
  2310. * If not, use cached value in link->sata_spd.
  2311. */
  2312. rc = sata_scr_read(link, SCR_STATUS, &sstatus);
  2313. if (rc == 0)
  2314. spd = (sstatus >> 4) & 0xf;
  2315. else
  2316. spd = link->sata_spd;
  2317. mask = link->sata_spd_limit;
  2318. if (mask <= 1)
  2319. return -EINVAL;
  2320. /* unconditionally mask off the highest bit */
  2321. highbit = fls(mask) - 1;
  2322. mask &= ~(1 << highbit);
  2323. /* Mask off all speeds higher than or equal to the current
  2324. * one. Force 1.5Gbps if current SPD is not available.
  2325. */
  2326. if (spd > 1)
  2327. mask &= (1 << (spd - 1)) - 1;
  2328. else
  2329. mask &= 1;
  2330. /* were we already at the bottom? */
  2331. if (!mask)
  2332. return -EINVAL;
  2333. link->sata_spd_limit = mask;
  2334. ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
  2335. sata_spd_string(fls(mask)));
  2336. return 0;
  2337. }
  2338. static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
  2339. {
  2340. struct ata_link *host_link = &link->ap->link;
  2341. u32 limit, target, spd;
  2342. limit = link->sata_spd_limit;
  2343. /* Don't configure downstream link faster than upstream link.
  2344. * It doesn't speed up anything and some PMPs choke on such
  2345. * configuration.
  2346. */
  2347. if (!ata_is_host_link(link) && host_link->sata_spd)
  2348. limit &= (1 << host_link->sata_spd) - 1;
  2349. if (limit == UINT_MAX)
  2350. target = 0;
  2351. else
  2352. target = fls(limit);
  2353. spd = (*scontrol >> 4) & 0xf;
  2354. *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
  2355. return spd != target;
  2356. }
  2357. /**
  2358. * sata_set_spd_needed - is SATA spd configuration needed
  2359. * @link: Link in question
  2360. *
  2361. * Test whether the spd limit in SControl matches
  2362. * @link->sata_spd_limit. This function is used to determine
  2363. * whether hardreset is necessary to apply SATA spd
  2364. * configuration.
  2365. *
  2366. * LOCKING:
  2367. * Inherited from caller.
  2368. *
  2369. * RETURNS:
  2370. * 1 if SATA spd configuration is needed, 0 otherwise.
  2371. */
  2372. int sata_set_spd_needed(struct ata_link *link)
  2373. {
  2374. u32 scontrol;
  2375. if (sata_scr_read(link, SCR_CONTROL, &scontrol))
  2376. return 1;
  2377. return __sata_set_spd_needed(link, &scontrol);
  2378. }
  2379. /**
  2380. * sata_set_spd - set SATA spd according to spd limit
  2381. * @link: Link to set SATA spd for
  2382. *
  2383. * Set SATA spd of @link according to sata_spd_limit.
  2384. *
  2385. * LOCKING:
  2386. * Inherited from caller.
  2387. *
  2388. * RETURNS:
  2389. * 0 if spd doesn't need to be changed, 1 if spd has been
  2390. * changed. Negative errno if SCR registers are inaccessible.
  2391. */
  2392. int sata_set_spd(struct ata_link *link)
  2393. {
  2394. u32 scontrol;
  2395. int rc;
  2396. if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
  2397. return rc;
  2398. if (!__sata_set_spd_needed(link, &scontrol))
  2399. return 0;
  2400. if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
  2401. return rc;
  2402. return 1;
  2403. }
  2404. /*
  2405. * This mode timing computation functionality is ported over from
  2406. * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
  2407. */
  2408. /*
  2409. * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
  2410. * These were taken from ATA/ATAPI-6 standard, rev 0a, except
  2411. * for UDMA6, which is currently supported only by Maxtor drives.
  2412. *
  2413. * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
  2414. */
  2415. static const struct ata_timing ata_timing[] = {
  2416. { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
  2417. { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
  2418. { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
  2419. { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
  2420. { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
  2421. { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
  2422. { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
  2423. { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
  2424. { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
  2425. /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
  2426. { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
  2427. { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
  2428. { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
  2429. { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
  2430. { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
  2431. { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
  2432. { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
  2433. { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
  2434. { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
  2435. { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
  2436. { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
  2437. { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
  2438. { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
  2439. /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
  2440. { 0xFF }
  2441. };
  2442. #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
  2443. #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
  2444. static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
  2445. {
  2446. q->setup = EZ(t->setup * 1000, T);
  2447. q->act8b = EZ(t->act8b * 1000, T);
  2448. q->rec8b = EZ(t->rec8b * 1000, T);
  2449. q->cyc8b = EZ(t->cyc8b * 1000, T);
  2450. q->active = EZ(t->active * 1000, T);
  2451. q->recover = EZ(t->recover * 1000, T);
  2452. q->cycle = EZ(t->cycle * 1000, T);
  2453. q->udma = EZ(t->udma * 1000, UT);
  2454. }
  2455. void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
  2456. struct ata_timing *m, unsigned int what)
  2457. {
  2458. if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
  2459. if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
  2460. if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
  2461. if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
  2462. if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
  2463. if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
  2464. if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
  2465. if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
  2466. }
  2467. static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
  2468. {
  2469. const struct ata_timing *t;
  2470. for (t = ata_timing; t->mode != speed; t++)
  2471. if (t->mode == 0xFF)
  2472. return NULL;
  2473. return t;
  2474. }
  2475. int ata_timing_compute(struct ata_device *adev, unsigned short speed,
  2476. struct ata_timing *t, int T, int UT)
  2477. {
  2478. const struct ata_timing *s;
  2479. struct ata_timing p;
  2480. /*
  2481. * Find the mode.
  2482. */
  2483. if (!(s = ata_timing_find_mode(speed)))
  2484. return -EINVAL;
  2485. memcpy(t, s, sizeof(*s));
  2486. /*
  2487. * If the drive is an EIDE drive, it can tell us it needs extended
  2488. * PIO/MW_DMA cycle timing.
  2489. */
  2490. if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
  2491. memset(&p, 0, sizeof(p));
  2492. if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
  2493. if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
  2494. else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
  2495. } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
  2496. p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
  2497. }
  2498. ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
  2499. }
  2500. /*
  2501. * Convert the timing to bus clock counts.
  2502. */
  2503. ata_timing_quantize(t, t, T, UT);
  2504. /*
  2505. * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
  2506. * S.M.A.R.T * and some other commands. We have to ensure that the
  2507. * DMA cycle timing is slower/equal than the fastest PIO timing.
  2508. */
  2509. if (speed > XFER_PIO_6) {
  2510. ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
  2511. ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
  2512. }
  2513. /*
  2514. * Lengthen active & recovery time so that cycle time is correct.
  2515. */
  2516. if (t->act8b + t->rec8b < t->cyc8b) {
  2517. t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
  2518. t->rec8b = t->cyc8b - t->act8b;
  2519. }
  2520. if (t->active + t->recover < t->cycle) {
  2521. t->active += (t->cycle - (t->active + t->recover)) / 2;
  2522. t->recover = t->cycle - t->active;
  2523. }
  2524. /* In a few cases quantisation may produce enough errors to
  2525. leave t->cycle too low for the sum of active and recovery
  2526. if so we must correct this */
  2527. if (t->active + t->recover > t->cycle)
  2528. t->cycle = t->active + t->recover;
  2529. return 0;
  2530. }
  2531. /**
  2532. * ata_down_xfermask_limit - adjust dev xfer masks downward
  2533. * @dev: Device to adjust xfer masks
  2534. * @sel: ATA_DNXFER_* selector
  2535. *
  2536. * Adjust xfer masks of @dev downward. Note that this function
  2537. * does not apply the change. Invoking ata_set_mode() afterwards
  2538. * will apply the limit.
  2539. *
  2540. * LOCKING:
  2541. * Inherited from caller.
  2542. *
  2543. * RETURNS:
  2544. * 0 on success, negative errno on failure
  2545. */
  2546. int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
  2547. {
  2548. char buf[32];
  2549. unsigned int orig_mask, xfer_mask;
  2550. unsigned int pio_mask, mwdma_mask, udma_mask;
  2551. int quiet, highbit;
  2552. quiet = !!(sel & ATA_DNXFER_QUIET);
  2553. sel &= ~ATA_DNXFER_QUIET;
  2554. xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
  2555. dev->mwdma_mask,
  2556. dev->udma_mask);
  2557. ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
  2558. switch (sel) {
  2559. case ATA_DNXFER_PIO:
  2560. highbit = fls(pio_mask) - 1;
  2561. pio_mask &= ~(1 << highbit);
  2562. break;
  2563. case ATA_DNXFER_DMA:
  2564. if (udma_mask) {
  2565. highbit = fls(udma_mask) - 1;
  2566. udma_mask &= ~(1 << highbit);
  2567. if (!udma_mask)
  2568. return -ENOENT;
  2569. } else if (mwdma_mask) {
  2570. highbit = fls(mwdma_mask) - 1;
  2571. mwdma_mask &= ~(1 << highbit);
  2572. if (!mwdma_mask)
  2573. return -ENOENT;
  2574. }
  2575. break;
  2576. case ATA_DNXFER_40C:
  2577. udma_mask &= ATA_UDMA_MASK_40C;
  2578. break;
  2579. case ATA_DNXFER_FORCE_PIO0:
  2580. pio_mask &= 1;
  2581. case ATA_DNXFER_FORCE_PIO:
  2582. mwdma_mask = 0;
  2583. udma_mask = 0;
  2584. break;
  2585. default:
  2586. BUG();
  2587. }
  2588. xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  2589. if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
  2590. return -ENOENT;
  2591. if (!quiet) {
  2592. if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
  2593. snprintf(buf, sizeof(buf), "%s:%s",
  2594. ata_mode_string(xfer_mask),
  2595. ata_mode_string(xfer_mask & ATA_MASK_PIO));
  2596. else
  2597. snprintf(buf, sizeof(buf), "%s",
  2598. ata_mode_string(xfer_mask));
  2599. ata_dev_printk(dev, KERN_WARNING,
  2600. "limiting speed to %s\n", buf);
  2601. }
  2602. ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
  2603. &dev->udma_mask);
  2604. return 0;
  2605. }
  2606. static int ata_dev_set_mode(struct ata_device *dev)
  2607. {
  2608. struct ata_eh_context *ehc = &dev->link->eh_context;
  2609. unsigned int err_mask;
  2610. int rc;
  2611. dev->flags &= ~ATA_DFLAG_PIO;
  2612. if (dev->xfer_shift == ATA_SHIFT_PIO)
  2613. dev->flags |= ATA_DFLAG_PIO;
  2614. err_mask = ata_dev_set_xfermode(dev);
  2615. /* Old CFA may refuse this command, which is just fine */
  2616. if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
  2617. err_mask &= ~AC_ERR_DEV;
  2618. /* Some very old devices and some bad newer ones fail any kind of
  2619. SET_XFERMODE request but support PIO0-2 timings and no IORDY */
  2620. if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
  2621. dev->pio_mode <= XFER_PIO_2)
  2622. err_mask &= ~AC_ERR_DEV;
  2623. /* Early MWDMA devices do DMA but don't allow DMA mode setting.
  2624. Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
  2625. if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
  2626. dev->dma_mode == XFER_MW_DMA_0 &&
  2627. (dev->id[63] >> 8) & 1)
  2628. err_mask &= ~AC_ERR_DEV;
  2629. if (err_mask) {
  2630. ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
  2631. "(err_mask=0x%x)\n", err_mask);
  2632. return -EIO;
  2633. }
  2634. ehc->i.flags |= ATA_EHI_POST_SETMODE;
  2635. rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
  2636. ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
  2637. if (rc)
  2638. return rc;
  2639. DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
  2640. dev->xfer_shift, (int)dev->xfer_mode);
  2641. ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
  2642. ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
  2643. return 0;
  2644. }
  2645. /**
  2646. * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
  2647. * @link: link on which timings will be programmed
  2648. * @r_failed_dev: out paramter for failed device
  2649. *
  2650. * Standard implementation of the function used to tune and set
  2651. * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
  2652. * ata_dev_set_mode() fails, pointer to the failing device is
  2653. * returned in @r_failed_dev.
  2654. *
  2655. * LOCKING:
  2656. * PCI/etc. bus probe sem.
  2657. *
  2658. * RETURNS:
  2659. * 0 on success, negative errno otherwise
  2660. */
  2661. int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
  2662. {
  2663. struct ata_port *ap = link->ap;
  2664. struct ata_device *dev;
  2665. int rc = 0, used_dma = 0, found = 0;
  2666. /* step 1: calculate xfer_mask */
  2667. ata_link_for_each_dev(dev, link) {
  2668. unsigned int pio_mask, dma_mask;
  2669. unsigned int mode_mask;
  2670. if (!ata_dev_enabled(dev))
  2671. continue;
  2672. mode_mask = ATA_DMA_MASK_ATA;
  2673. if (dev->class == ATA_DEV_ATAPI)
  2674. mode_mask = ATA_DMA_MASK_ATAPI;
  2675. else if (ata_id_is_cfa(dev->id))
  2676. mode_mask = ATA_DMA_MASK_CFA;
  2677. ata_dev_xfermask(dev);
  2678. pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
  2679. dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
  2680. if (libata_dma_mask & mode_mask)
  2681. dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
  2682. else
  2683. dma_mask = 0;
  2684. dev->pio_mode = ata_xfer_mask2mode(pio_mask);
  2685. dev->dma_mode = ata_xfer_mask2mode(dma_mask);
  2686. found = 1;
  2687. if (dev->dma_mode)
  2688. used_dma = 1;
  2689. }
  2690. if (!found)
  2691. goto out;
  2692. /* step 2: always set host PIO timings */
  2693. ata_link_for_each_dev(dev, link) {
  2694. if (!ata_dev_enabled(dev))
  2695. continue;
  2696. if (!dev->pio_mode) {
  2697. ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
  2698. rc = -EINVAL;
  2699. goto out;
  2700. }
  2701. dev->xfer_mode = dev->pio_mode;
  2702. dev->xfer_shift = ATA_SHIFT_PIO;
  2703. if (ap->ops->set_piomode)
  2704. ap->ops->set_piomode(ap, dev);
  2705. }
  2706. /* step 3: set host DMA timings */
  2707. ata_link_for_each_dev(dev, link) {
  2708. if (!ata_dev_enabled(dev) || !dev->dma_mode)
  2709. continue;
  2710. dev->xfer_mode = dev->dma_mode;
  2711. dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
  2712. if (ap->ops->set_dmamode)
  2713. ap->ops->set_dmamode(ap, dev);
  2714. }
  2715. /* step 4: update devices' xfer mode */
  2716. ata_link_for_each_dev(dev, link) {
  2717. /* don't update suspended devices' xfer mode */
  2718. if (!ata_dev_enabled(dev))
  2719. continue;
  2720. rc = ata_dev_set_mode(dev);
  2721. if (rc)
  2722. goto out;
  2723. }
  2724. /* Record simplex status. If we selected DMA then the other
  2725. * host channels are not permitted to do so.
  2726. */
  2727. if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
  2728. ap->host->simplex_claimed = ap;
  2729. out:
  2730. if (rc)
  2731. *r_failed_dev = dev;
  2732. return rc;
  2733. }
  2734. /**
  2735. * ata_set_mode - Program timings and issue SET FEATURES - XFER
  2736. * @link: link on which timings will be programmed
  2737. * @r_failed_dev: out paramter for failed device
  2738. *
  2739. * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
  2740. * ata_set_mode() fails, pointer to the failing device is
  2741. * returned in @r_failed_dev.
  2742. *
  2743. * LOCKING:
  2744. * PCI/etc. bus probe sem.
  2745. *
  2746. * RETURNS:
  2747. * 0 on success, negative errno otherwise
  2748. */
  2749. int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
  2750. {
  2751. struct ata_port *ap = link->ap;
  2752. /* has private set_mode? */
  2753. if (ap->ops->set_mode)
  2754. return ap->ops->set_mode(link, r_failed_dev);
  2755. return ata_do_set_mode(link, r_failed_dev);
  2756. }
  2757. /**
  2758. * ata_tf_to_host - issue ATA taskfile to host controller
  2759. * @ap: port to which command is being issued
  2760. * @tf: ATA taskfile register set
  2761. *
  2762. * Issues ATA taskfile register set to ATA host controller,
  2763. * with proper synchronization with interrupt handler and
  2764. * other threads.
  2765. *
  2766. * LOCKING:
  2767. * spin_lock_irqsave(host lock)
  2768. */
  2769. static inline void ata_tf_to_host(struct ata_port *ap,
  2770. const struct ata_taskfile *tf)
  2771. {
  2772. ap->ops->tf_load(ap, tf);
  2773. ap->ops->exec_command(ap, tf);
  2774. }
  2775. /**
  2776. * ata_busy_sleep - sleep until BSY clears, or timeout
  2777. * @ap: port containing status register to be polled
  2778. * @tmout_pat: impatience timeout
  2779. * @tmout: overall timeout
  2780. *
  2781. * Sleep until ATA Status register bit BSY clears,
  2782. * or a timeout occurs.
  2783. *
  2784. * LOCKING:
  2785. * Kernel thread context (may sleep).
  2786. *
  2787. * RETURNS:
  2788. * 0 on success, -errno otherwise.
  2789. */
  2790. int ata_busy_sleep(struct ata_port *ap,
  2791. unsigned long tmout_pat, unsigned long tmout)
  2792. {
  2793. unsigned long timer_start, timeout;
  2794. u8 status;
  2795. status = ata_busy_wait(ap, ATA_BUSY, 300);
  2796. timer_start = jiffies;
  2797. timeout = timer_start + tmout_pat;
  2798. while (status != 0xff && (status & ATA_BUSY) &&
  2799. time_before(jiffies, timeout)) {
  2800. msleep(50);
  2801. status = ata_busy_wait(ap, ATA_BUSY, 3);
  2802. }
  2803. if (status != 0xff && (status & ATA_BUSY))
  2804. ata_port_printk(ap, KERN_WARNING,
  2805. "port is slow to respond, please be patient "
  2806. "(Status 0x%x)\n", status);
  2807. timeout = timer_start + tmout;
  2808. while (status != 0xff && (status & ATA_BUSY) &&
  2809. time_before(jiffies, timeout)) {
  2810. msleep(50);
  2811. status = ata_chk_status(ap);
  2812. }
  2813. if (status == 0xff)
  2814. return -ENODEV;
  2815. if (status & ATA_BUSY) {
  2816. ata_port_printk(ap, KERN_ERR, "port failed to respond "
  2817. "(%lu secs, Status 0x%x)\n",
  2818. tmout / HZ, status);
  2819. return -EBUSY;
  2820. }
  2821. return 0;
  2822. }
  2823. /**
  2824. * ata_wait_after_reset - wait before checking status after reset
  2825. * @ap: port containing status register to be polled
  2826. * @deadline: deadline jiffies for the operation
  2827. *
  2828. * After reset, we need to pause a while before reading status.
  2829. * Also, certain combination of controller and device report 0xff
  2830. * for some duration (e.g. until SATA PHY is up and running)
  2831. * which is interpreted as empty port in ATA world. This
  2832. * function also waits for such devices to get out of 0xff
  2833. * status.
  2834. *
  2835. * LOCKING:
  2836. * Kernel thread context (may sleep).
  2837. */
  2838. void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
  2839. {
  2840. unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
  2841. if (time_before(until, deadline))
  2842. deadline = until;
  2843. /* Spec mandates ">= 2ms" before checking status. We wait
  2844. * 150ms, because that was the magic delay used for ATAPI
  2845. * devices in Hale Landis's ATADRVR, for the period of time
  2846. * between when the ATA command register is written, and then
  2847. * status is checked. Because waiting for "a while" before
  2848. * checking status is fine, post SRST, we perform this magic
  2849. * delay here as well.
  2850. *
  2851. * Old drivers/ide uses the 2mS rule and then waits for ready.
  2852. */
  2853. msleep(150);
  2854. /* Wait for 0xff to clear. Some SATA devices take a long time
  2855. * to clear 0xff after reset. For example, HHD424020F7SV00
  2856. * iVDR needs >= 800ms while. Quantum GoVault needs even more
  2857. * than that.
  2858. *
  2859. * Note that some PATA controllers (pata_ali) explode if
  2860. * status register is read more than once when there's no
  2861. * device attached.
  2862. */
  2863. if (ap->flags & ATA_FLAG_SATA) {
  2864. while (1) {
  2865. u8 status = ata_chk_status(ap);
  2866. if (status != 0xff || time_after(jiffies, deadline))
  2867. return;
  2868. msleep(50);
  2869. }
  2870. }
  2871. }
  2872. /**
  2873. * ata_wait_ready - sleep until BSY clears, or timeout
  2874. * @ap: port containing status register to be polled
  2875. * @deadline: deadline jiffies for the operation
  2876. *
  2877. * Sleep until ATA Status register bit BSY clears, or timeout
  2878. * occurs.
  2879. *
  2880. * LOCKING:
  2881. * Kernel thread context (may sleep).
  2882. *
  2883. * RETURNS:
  2884. * 0 on success, -errno otherwise.
  2885. */
  2886. int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
  2887. {
  2888. unsigned long start = jiffies;
  2889. int warned = 0;
  2890. while (1) {
  2891. u8 status = ata_chk_status(ap);
  2892. unsigned long now = jiffies;
  2893. if (!(status & ATA_BUSY))
  2894. return 0;
  2895. if (!ata_link_online(&ap->link) && status == 0xff)
  2896. return -ENODEV;
  2897. if (time_after(now, deadline))
  2898. return -EBUSY;
  2899. if (!warned && time_after(now, start + 5 * HZ) &&
  2900. (deadline - now > 3 * HZ)) {
  2901. ata_port_printk(ap, KERN_WARNING,
  2902. "port is slow to respond, please be patient "
  2903. "(Status 0x%x)\n", status);
  2904. warned = 1;
  2905. }
  2906. msleep(50);
  2907. }
  2908. }
  2909. static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
  2910. unsigned long deadline)
  2911. {
  2912. struct ata_ioports *ioaddr = &ap->ioaddr;
  2913. unsigned int dev0 = devmask & (1 << 0);
  2914. unsigned int dev1 = devmask & (1 << 1);
  2915. int rc, ret = 0;
  2916. /* if device 0 was found in ata_devchk, wait for its
  2917. * BSY bit to clear
  2918. */
  2919. if (dev0) {
  2920. rc = ata_wait_ready(ap, deadline);
  2921. if (rc) {
  2922. if (rc != -ENODEV)
  2923. return rc;
  2924. ret = rc;
  2925. }
  2926. }
  2927. /* if device 1 was found in ata_devchk, wait for register
  2928. * access briefly, then wait for BSY to clear.
  2929. */
  2930. if (dev1) {
  2931. int i;
  2932. ap->ops->dev_select(ap, 1);
  2933. /* Wait for register access. Some ATAPI devices fail
  2934. * to set nsect/lbal after reset, so don't waste too
  2935. * much time on it. We're gonna wait for !BSY anyway.
  2936. */
  2937. for (i = 0; i < 2; i++) {
  2938. u8 nsect, lbal;
  2939. nsect = ioread8(ioaddr->nsect_addr);
  2940. lbal = ioread8(ioaddr->lbal_addr);
  2941. if ((nsect == 1) && (lbal == 1))
  2942. break;
  2943. msleep(50); /* give drive a breather */
  2944. }
  2945. rc = ata_wait_ready(ap, deadline);
  2946. if (rc) {
  2947. if (rc != -ENODEV)
  2948. return rc;
  2949. ret = rc;
  2950. }
  2951. }
  2952. /* is all this really necessary? */
  2953. ap->ops->dev_select(ap, 0);
  2954. if (dev1)
  2955. ap->ops->dev_select(ap, 1);
  2956. if (dev0)
  2957. ap->ops->dev_select(ap, 0);
  2958. return ret;
  2959. }
  2960. static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
  2961. unsigned long deadline)
  2962. {
  2963. struct ata_ioports *ioaddr = &ap->ioaddr;
  2964. DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
  2965. /* software reset. causes dev0 to be selected */
  2966. iowrite8(ap->ctl, ioaddr->ctl_addr);
  2967. udelay(20); /* FIXME: flush */
  2968. iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
  2969. udelay(20); /* FIXME: flush */
  2970. iowrite8(ap->ctl, ioaddr->ctl_addr);
  2971. /* wait a while before checking status */
  2972. ata_wait_after_reset(ap, deadline);
  2973. /* Before we perform post reset processing we want to see if
  2974. * the bus shows 0xFF because the odd clown forgets the D7
  2975. * pulldown resistor.
  2976. */
  2977. if (ata_chk_status(ap) == 0xFF)
  2978. return -ENODEV;
  2979. return ata_bus_post_reset(ap, devmask, deadline);
  2980. }
  2981. /**
  2982. * ata_bus_reset - reset host port and associated ATA channel
  2983. * @ap: port to reset
  2984. *
  2985. * This is typically the first time we actually start issuing
  2986. * commands to the ATA channel. We wait for BSY to clear, then
  2987. * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
  2988. * result. Determine what devices, if any, are on the channel
  2989. * by looking at the device 0/1 error register. Look at the signature
  2990. * stored in each device's taskfile registers, to determine if
  2991. * the device is ATA or ATAPI.
  2992. *
  2993. * LOCKING:
  2994. * PCI/etc. bus probe sem.
  2995. * Obtains host lock.
  2996. *
  2997. * SIDE EFFECTS:
  2998. * Sets ATA_FLAG_DISABLED if bus reset fails.
  2999. */
  3000. void ata_bus_reset(struct ata_port *ap)
  3001. {
  3002. struct ata_device *device = ap->link.device;
  3003. struct ata_ioports *ioaddr = &ap->ioaddr;
  3004. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  3005. u8 err;
  3006. unsigned int dev0, dev1 = 0, devmask = 0;
  3007. int rc;
  3008. DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
  3009. /* determine if device 0/1 are present */
  3010. if (ap->flags & ATA_FLAG_SATA_RESET)
  3011. dev0 = 1;
  3012. else {
  3013. dev0 = ata_devchk(ap, 0);
  3014. if (slave_possible)
  3015. dev1 = ata_devchk(ap, 1);
  3016. }
  3017. if (dev0)
  3018. devmask |= (1 << 0);
  3019. if (dev1)
  3020. devmask |= (1 << 1);
  3021. /* select device 0 again */
  3022. ap->ops->dev_select(ap, 0);
  3023. /* issue bus reset */
  3024. if (ap->flags & ATA_FLAG_SRST) {
  3025. rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
  3026. if (rc && rc != -ENODEV)
  3027. goto err_out;
  3028. }
  3029. /*
  3030. * determine by signature whether we have ATA or ATAPI devices
  3031. */
  3032. device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
  3033. if ((slave_possible) && (err != 0x81))
  3034. device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
  3035. /* is double-select really necessary? */
  3036. if (device[1].class != ATA_DEV_NONE)
  3037. ap->ops->dev_select(ap, 1);
  3038. if (device[0].class != ATA_DEV_NONE)
  3039. ap->ops->dev_select(ap, 0);
  3040. /* if no devices were detected, disable this port */
  3041. if ((device[0].class == ATA_DEV_NONE) &&
  3042. (device[1].class == ATA_DEV_NONE))
  3043. goto err_out;
  3044. if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
  3045. /* set up device control for ATA_FLAG_SATA_RESET */
  3046. iowrite8(ap->ctl, ioaddr->ctl_addr);
  3047. }
  3048. DPRINTK("EXIT\n");
  3049. return;
  3050. err_out:
  3051. ata_port_printk(ap, KERN_ERR, "disabling port\n");
  3052. ata_port_disable(ap);
  3053. DPRINTK("EXIT\n");
  3054. }
  3055. /**
  3056. * sata_link_debounce - debounce SATA phy status
  3057. * @link: ATA link to debounce SATA phy status for
  3058. * @params: timing parameters { interval, duratinon, timeout } in msec
  3059. * @deadline: deadline jiffies for the operation
  3060. *
  3061. * Make sure SStatus of @link reaches stable state, determined by
  3062. * holding the same value where DET is not 1 for @duration polled
  3063. * every @interval, before @timeout. Timeout constraints the
  3064. * beginning of the stable state. Because DET gets stuck at 1 on
  3065. * some controllers after hot unplugging, this functions waits
  3066. * until timeout then returns 0 if DET is stable at 1.
  3067. *
  3068. * @timeout is further limited by @deadline. The sooner of the
  3069. * two is used.
  3070. *
  3071. * LOCKING:
  3072. * Kernel thread context (may sleep)
  3073. *
  3074. * RETURNS:
  3075. * 0 on success, -errno on failure.
  3076. */
  3077. int sata_link_debounce(struct ata_link *link, const unsigned long *params,
  3078. unsigned long deadline)
  3079. {
  3080. unsigned long interval_msec = params[0];
  3081. unsigned long duration = msecs_to_jiffies(params[1]);
  3082. unsigned long last_jiffies, t;
  3083. u32 last, cur;
  3084. int rc;
  3085. t = jiffies + msecs_to_jiffies(params[2]);
  3086. if (time_before(t, deadline))
  3087. deadline = t;
  3088. if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
  3089. return rc;
  3090. cur &= 0xf;
  3091. last = cur;
  3092. last_jiffies = jiffies;
  3093. while (1) {
  3094. msleep(interval_msec);
  3095. if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
  3096. return rc;
  3097. cur &= 0xf;
  3098. /* DET stable? */
  3099. if (cur == last) {
  3100. if (cur == 1 && time_before(jiffies, deadline))
  3101. continue;
  3102. if (time_after(jiffies, last_jiffies + duration))
  3103. return 0;
  3104. continue;
  3105. }
  3106. /* unstable, start over */
  3107. last = cur;
  3108. last_jiffies = jiffies;
  3109. /* Check deadline. If debouncing failed, return
  3110. * -EPIPE to tell upper layer to lower link speed.
  3111. */
  3112. if (time_after(jiffies, deadline))
  3113. return -EPIPE;
  3114. }
  3115. }
  3116. /**
  3117. * sata_link_resume - resume SATA link
  3118. * @link: ATA link to resume SATA
  3119. * @params: timing parameters { interval, duratinon, timeout } in msec
  3120. * @deadline: deadline jiffies for the operation
  3121. *
  3122. * Resume SATA phy @link and debounce it.
  3123. *
  3124. * LOCKING:
  3125. * Kernel thread context (may sleep)
  3126. *
  3127. * RETURNS:
  3128. * 0 on success, -errno on failure.
  3129. */
  3130. int sata_link_resume(struct ata_link *link, const unsigned long *params,
  3131. unsigned long deadline)
  3132. {
  3133. u32 scontrol;
  3134. int rc;
  3135. if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
  3136. return rc;
  3137. scontrol = (scontrol & 0x0f0) | 0x300;
  3138. if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
  3139. return rc;
  3140. /* Some PHYs react badly if SStatus is pounded immediately
  3141. * after resuming. Delay 200ms before debouncing.
  3142. */
  3143. msleep(200);
  3144. return sata_link_debounce(link, params, deadline);
  3145. }
  3146. /**
  3147. * ata_std_prereset - prepare for reset
  3148. * @link: ATA link to be reset
  3149. * @deadline: deadline jiffies for the operation
  3150. *
  3151. * @link is about to be reset. Initialize it. Failure from
  3152. * prereset makes libata abort whole reset sequence and give up
  3153. * that port, so prereset should be best-effort. It does its
  3154. * best to prepare for reset sequence but if things go wrong, it
  3155. * should just whine, not fail.
  3156. *
  3157. * LOCKING:
  3158. * Kernel thread context (may sleep)
  3159. *
  3160. * RETURNS:
  3161. * 0 on success, -errno otherwise.
  3162. */
  3163. int ata_std_prereset(struct ata_link *link, unsigned long deadline)
  3164. {
  3165. struct ata_port *ap = link->ap;
  3166. struct ata_eh_context *ehc = &link->eh_context;
  3167. const unsigned long *timing = sata_ehc_deb_timing(ehc);
  3168. int rc;
  3169. /* handle link resume */
  3170. if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
  3171. (link->flags & ATA_LFLAG_HRST_TO_RESUME))
  3172. ehc->i.action |= ATA_EH_HARDRESET;
  3173. /* Some PMPs don't work with only SRST, force hardreset if PMP
  3174. * is supported.
  3175. */
  3176. if (ap->flags & ATA_FLAG_PMP)
  3177. ehc->i.action |= ATA_EH_HARDRESET;
  3178. /* if we're about to do hardreset, nothing more to do */
  3179. if (ehc->i.action & ATA_EH_HARDRESET)
  3180. return 0;
  3181. /* if SATA, resume link */
  3182. if (ap->flags & ATA_FLAG_SATA) {
  3183. rc = sata_link_resume(link, timing, deadline);
  3184. /* whine about phy resume failure but proceed */
  3185. if (rc && rc != -EOPNOTSUPP)
  3186. ata_link_printk(link, KERN_WARNING, "failed to resume "
  3187. "link for reset (errno=%d)\n", rc);
  3188. }
  3189. /* Wait for !BSY if the controller can wait for the first D2H
  3190. * Reg FIS and we don't know that no device is attached.
  3191. */
  3192. if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
  3193. rc = ata_wait_ready(ap, deadline);
  3194. if (rc && rc != -ENODEV) {
  3195. ata_link_printk(link, KERN_WARNING, "device not ready "
  3196. "(errno=%d), forcing hardreset\n", rc);
  3197. ehc->i.action |= ATA_EH_HARDRESET;
  3198. }
  3199. }
  3200. return 0;
  3201. }
  3202. /**
  3203. * ata_std_softreset - reset host port via ATA SRST
  3204. * @link: ATA link to reset
  3205. * @classes: resulting classes of attached devices
  3206. * @deadline: deadline jiffies for the operation
  3207. *
  3208. * Reset host port using ATA SRST.
  3209. *
  3210. * LOCKING:
  3211. * Kernel thread context (may sleep)
  3212. *
  3213. * RETURNS:
  3214. * 0 on success, -errno otherwise.
  3215. */
  3216. int ata_std_softreset(struct ata_link *link, unsigned int *classes,
  3217. unsigned long deadline)
  3218. {
  3219. struct ata_port *ap = link->ap;
  3220. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  3221. unsigned int devmask = 0;
  3222. int rc;
  3223. u8 err;
  3224. DPRINTK("ENTER\n");
  3225. if (ata_link_offline(link)) {
  3226. classes[0] = ATA_DEV_NONE;
  3227. goto out;
  3228. }
  3229. /* determine if device 0/1 are present */
  3230. if (ata_devchk(ap, 0))
  3231. devmask |= (1 << 0);
  3232. if (slave_possible && ata_devchk(ap, 1))
  3233. devmask |= (1 << 1);
  3234. /* select device 0 again */
  3235. ap->ops->dev_select(ap, 0);
  3236. /* issue bus reset */
  3237. DPRINTK("about to softreset, devmask=%x\n", devmask);
  3238. rc = ata_bus_softreset(ap, devmask, deadline);
  3239. /* if link is occupied, -ENODEV too is an error */
  3240. if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
  3241. ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
  3242. return rc;
  3243. }
  3244. /* determine by signature whether we have ATA or ATAPI devices */
  3245. classes[0] = ata_dev_try_classify(&link->device[0],
  3246. devmask & (1 << 0), &err);
  3247. if (slave_possible && err != 0x81)
  3248. classes[1] = ata_dev_try_classify(&link->device[1],
  3249. devmask & (1 << 1), &err);
  3250. out:
  3251. DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
  3252. return 0;
  3253. }
  3254. /**
  3255. * sata_link_hardreset - reset link via SATA phy reset
  3256. * @link: link to reset
  3257. * @timing: timing parameters { interval, duratinon, timeout } in msec
  3258. * @deadline: deadline jiffies for the operation
  3259. *
  3260. * SATA phy-reset @link using DET bits of SControl register.
  3261. *
  3262. * LOCKING:
  3263. * Kernel thread context (may sleep)
  3264. *
  3265. * RETURNS:
  3266. * 0 on success, -errno otherwise.
  3267. */
  3268. int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
  3269. unsigned long deadline)
  3270. {
  3271. u32 scontrol;
  3272. int rc;
  3273. DPRINTK("ENTER\n");
  3274. if (sata_set_spd_needed(link)) {
  3275. /* SATA spec says nothing about how to reconfigure
  3276. * spd. To be on the safe side, turn off phy during
  3277. * reconfiguration. This works for at least ICH7 AHCI
  3278. * and Sil3124.
  3279. */
  3280. if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
  3281. goto out;
  3282. scontrol = (scontrol & 0x0f0) | 0x304;
  3283. if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
  3284. goto out;
  3285. sata_set_spd(link);
  3286. }
  3287. /* issue phy wake/reset */
  3288. if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
  3289. goto out;
  3290. scontrol = (scontrol & 0x0f0) | 0x301;
  3291. if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
  3292. goto out;
  3293. /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
  3294. * 10.4.2 says at least 1 ms.
  3295. */
  3296. msleep(1);
  3297. /* bring link back */
  3298. rc = sata_link_resume(link, timing, deadline);
  3299. out:
  3300. DPRINTK("EXIT, rc=%d\n", rc);
  3301. return rc;
  3302. }
  3303. /**
  3304. * sata_std_hardreset - reset host port via SATA phy reset
  3305. * @link: link to reset
  3306. * @class: resulting class of attached device
  3307. * @deadline: deadline jiffies for the operation
  3308. *
  3309. * SATA phy-reset host port using DET bits of SControl register,
  3310. * wait for !BSY and classify the attached device.
  3311. *
  3312. * LOCKING:
  3313. * Kernel thread context (may sleep)
  3314. *
  3315. * RETURNS:
  3316. * 0 on success, -errno otherwise.
  3317. */
  3318. int sata_std_hardreset(struct ata_link *link, unsigned int *class,
  3319. unsigned long deadline)
  3320. {
  3321. struct ata_port *ap = link->ap;
  3322. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  3323. int rc;
  3324. DPRINTK("ENTER\n");
  3325. /* do hardreset */
  3326. rc = sata_link_hardreset(link, timing, deadline);
  3327. if (rc) {
  3328. ata_link_printk(link, KERN_ERR,
  3329. "COMRESET failed (errno=%d)\n", rc);
  3330. return rc;
  3331. }
  3332. /* TODO: phy layer with polling, timeouts, etc. */
  3333. if (ata_link_offline(link)) {
  3334. *class = ATA_DEV_NONE;
  3335. DPRINTK("EXIT, link offline\n");
  3336. return 0;
  3337. }
  3338. /* wait a while before checking status */
  3339. ata_wait_after_reset(ap, deadline);
  3340. /* If PMP is supported, we have to do follow-up SRST. Note
  3341. * that some PMPs don't send D2H Reg FIS after hardreset at
  3342. * all if the first port is empty. Wait for it just for a
  3343. * second and request follow-up SRST.
  3344. */
  3345. if (ap->flags & ATA_FLAG_PMP) {
  3346. ata_wait_ready(ap, jiffies + HZ);
  3347. return -EAGAIN;
  3348. }
  3349. rc = ata_wait_ready(ap, deadline);
  3350. /* link occupied, -ENODEV too is an error */
  3351. if (rc) {
  3352. ata_link_printk(link, KERN_ERR,
  3353. "COMRESET failed (errno=%d)\n", rc);
  3354. return rc;
  3355. }
  3356. ap->ops->dev_select(ap, 0); /* probably unnecessary */
  3357. *class = ata_dev_try_classify(link->device, 1, NULL);
  3358. DPRINTK("EXIT, class=%u\n", *class);
  3359. return 0;
  3360. }
  3361. /**
  3362. * ata_std_postreset - standard postreset callback
  3363. * @link: the target ata_link
  3364. * @classes: classes of attached devices
  3365. *
  3366. * This function is invoked after a successful reset. Note that
  3367. * the device might have been reset more than once using
  3368. * different reset methods before postreset is invoked.
  3369. *
  3370. * LOCKING:
  3371. * Kernel thread context (may sleep)
  3372. */
  3373. void ata_std_postreset(struct ata_link *link, unsigned int *classes)
  3374. {
  3375. struct ata_port *ap = link->ap;
  3376. u32 serror;
  3377. DPRINTK("ENTER\n");
  3378. /* print link status */
  3379. sata_print_link_status(link);
  3380. /* clear SError */
  3381. if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
  3382. sata_scr_write(link, SCR_ERROR, serror);
  3383. link->eh_info.serror = 0;
  3384. /* is double-select really necessary? */
  3385. if (classes[0] != ATA_DEV_NONE)
  3386. ap->ops->dev_select(ap, 1);
  3387. if (classes[1] != ATA_DEV_NONE)
  3388. ap->ops->dev_select(ap, 0);
  3389. /* bail out if no device is present */
  3390. if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
  3391. DPRINTK("EXIT, no device\n");
  3392. return;
  3393. }
  3394. /* set up device control */
  3395. if (ap->ioaddr.ctl_addr)
  3396. iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
  3397. DPRINTK("EXIT\n");
  3398. }
  3399. /**
  3400. * ata_dev_same_device - Determine whether new ID matches configured device
  3401. * @dev: device to compare against
  3402. * @new_class: class of the new device
  3403. * @new_id: IDENTIFY page of the new device
  3404. *
  3405. * Compare @new_class and @new_id against @dev and determine
  3406. * whether @dev is the device indicated by @new_class and
  3407. * @new_id.
  3408. *
  3409. * LOCKING:
  3410. * None.
  3411. *
  3412. * RETURNS:
  3413. * 1 if @dev matches @new_class and @new_id, 0 otherwise.
  3414. */
  3415. static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
  3416. const u16 *new_id)
  3417. {
  3418. const u16 *old_id = dev->id;
  3419. unsigned char model[2][ATA_ID_PROD_LEN + 1];
  3420. unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
  3421. if (dev->class != new_class) {
  3422. ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
  3423. dev->class, new_class);
  3424. return 0;
  3425. }
  3426. ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
  3427. ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
  3428. ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
  3429. ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
  3430. if (strcmp(model[0], model[1])) {
  3431. ata_dev_printk(dev, KERN_INFO, "model number mismatch "
  3432. "'%s' != '%s'\n", model[0], model[1]);
  3433. return 0;
  3434. }
  3435. if (strcmp(serial[0], serial[1])) {
  3436. ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
  3437. "'%s' != '%s'\n", serial[0], serial[1]);
  3438. return 0;
  3439. }
  3440. return 1;
  3441. }
  3442. /**
  3443. * ata_dev_reread_id - Re-read IDENTIFY data
  3444. * @dev: target ATA device
  3445. * @readid_flags: read ID flags
  3446. *
  3447. * Re-read IDENTIFY page and make sure @dev is still attached to
  3448. * the port.
  3449. *
  3450. * LOCKING:
  3451. * Kernel thread context (may sleep)
  3452. *
  3453. * RETURNS:
  3454. * 0 on success, negative errno otherwise
  3455. */
  3456. int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
  3457. {
  3458. unsigned int class = dev->class;
  3459. u16 *id = (void *)dev->link->ap->sector_buf;
  3460. int rc;
  3461. /* read ID data */
  3462. rc = ata_dev_read_id(dev, &class, readid_flags, id);
  3463. if (rc)
  3464. return rc;
  3465. /* is the device still there? */
  3466. if (!ata_dev_same_device(dev, class, id))
  3467. return -ENODEV;
  3468. memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
  3469. return 0;
  3470. }
  3471. /**
  3472. * ata_dev_revalidate - Revalidate ATA device
  3473. * @dev: device to revalidate
  3474. * @new_class: new class code
  3475. * @readid_flags: read ID flags
  3476. *
  3477. * Re-read IDENTIFY page, make sure @dev is still attached to the
  3478. * port and reconfigure it according to the new IDENTIFY page.
  3479. *
  3480. * LOCKING:
  3481. * Kernel thread context (may sleep)
  3482. *
  3483. * RETURNS:
  3484. * 0 on success, negative errno otherwise
  3485. */
  3486. int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
  3487. unsigned int readid_flags)
  3488. {
  3489. u64 n_sectors = dev->n_sectors;
  3490. int rc;
  3491. if (!ata_dev_enabled(dev))
  3492. return -ENODEV;
  3493. /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
  3494. if (ata_class_enabled(new_class) &&
  3495. new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
  3496. ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
  3497. dev->class, new_class);
  3498. rc = -ENODEV;
  3499. goto fail;
  3500. }
  3501. /* re-read ID */
  3502. rc = ata_dev_reread_id(dev, readid_flags);
  3503. if (rc)
  3504. goto fail;
  3505. /* configure device according to the new ID */
  3506. rc = ata_dev_configure(dev);
  3507. if (rc)
  3508. goto fail;
  3509. /* verify n_sectors hasn't changed */
  3510. if (dev->class == ATA_DEV_ATA && n_sectors &&
  3511. dev->n_sectors != n_sectors) {
  3512. ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
  3513. "%llu != %llu\n",
  3514. (unsigned long long)n_sectors,
  3515. (unsigned long long)dev->n_sectors);
  3516. /* restore original n_sectors */
  3517. dev->n_sectors = n_sectors;
  3518. rc = -ENODEV;
  3519. goto fail;
  3520. }
  3521. return 0;
  3522. fail:
  3523. ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
  3524. return rc;
  3525. }
  3526. struct ata_blacklist_entry {
  3527. const char *model_num;
  3528. const char *model_rev;
  3529. unsigned long horkage;
  3530. };
  3531. static const struct ata_blacklist_entry ata_device_blacklist [] = {
  3532. /* Devices with DMA related problems under Linux */
  3533. { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
  3534. { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
  3535. { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
  3536. { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
  3537. { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
  3538. { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
  3539. { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
  3540. { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
  3541. { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
  3542. { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
  3543. { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
  3544. { "CRD-84", NULL, ATA_HORKAGE_NODMA },
  3545. { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
  3546. { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
  3547. { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
  3548. { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
  3549. { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
  3550. { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
  3551. { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
  3552. { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
  3553. { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
  3554. { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
  3555. { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
  3556. { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
  3557. { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
  3558. { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
  3559. { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
  3560. { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
  3561. { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
  3562. { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
  3563. /* Odd clown on sil3726/4726 PMPs */
  3564. { "Config Disk", NULL, ATA_HORKAGE_NODMA |
  3565. ATA_HORKAGE_SKIP_PM },
  3566. /* Weird ATAPI devices */
  3567. { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
  3568. /* Devices we expect to fail diagnostics */
  3569. /* Devices where NCQ should be avoided */
  3570. /* NCQ is slow */
  3571. { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
  3572. { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
  3573. /* http://thread.gmane.org/gmane.linux.ide/14907 */
  3574. { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
  3575. /* NCQ is broken */
  3576. { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
  3577. { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
  3578. { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
  3579. { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
  3580. { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
  3581. { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
  3582. /* Blacklist entries taken from Silicon Image 3124/3132
  3583. Windows driver .inf file - also several Linux problem reports */
  3584. { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
  3585. { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
  3586. { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
  3587. /* devices which puke on READ_NATIVE_MAX */
  3588. { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
  3589. { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
  3590. { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
  3591. { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
  3592. /* Devices which report 1 sector over size HPA */
  3593. { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
  3594. { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
  3595. /* Devices which get the IVB wrong */
  3596. { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
  3597. { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
  3598. { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
  3599. { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
  3600. { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
  3601. /* End Marker */
  3602. { }
  3603. };
  3604. static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
  3605. {
  3606. const char *p;
  3607. int len;
  3608. /*
  3609. * check for trailing wildcard: *\0
  3610. */
  3611. p = strchr(patt, wildchar);
  3612. if (p && ((*(p + 1)) == 0))
  3613. len = p - patt;
  3614. else {
  3615. len = strlen(name);
  3616. if (!len) {
  3617. if (!*patt)
  3618. return 0;
  3619. return -1;
  3620. }
  3621. }
  3622. return strncmp(patt, name, len);
  3623. }
  3624. static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
  3625. {
  3626. unsigned char model_num[ATA_ID_PROD_LEN + 1];
  3627. unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
  3628. const struct ata_blacklist_entry *ad = ata_device_blacklist;
  3629. ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
  3630. ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
  3631. while (ad->model_num) {
  3632. if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
  3633. if (ad->model_rev == NULL)
  3634. return ad->horkage;
  3635. if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
  3636. return ad->horkage;
  3637. }
  3638. ad++;
  3639. }
  3640. return 0;
  3641. }
  3642. static int ata_dma_blacklisted(const struct ata_device *dev)
  3643. {
  3644. /* We don't support polling DMA.
  3645. * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
  3646. * if the LLDD handles only interrupts in the HSM_ST_LAST state.
  3647. */
  3648. if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
  3649. (dev->flags & ATA_DFLAG_CDB_INTR))
  3650. return 1;
  3651. return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
  3652. }
  3653. /**
  3654. * ata_is_40wire - check drive side detection
  3655. * @dev: device
  3656. *
  3657. * Perform drive side detection decoding, allowing for device vendors
  3658. * who can't follow the documentation.
  3659. */
  3660. static int ata_is_40wire(struct ata_device *dev)
  3661. {
  3662. if (dev->horkage & ATA_HORKAGE_IVB)
  3663. return ata_drive_40wire_relaxed(dev->id);
  3664. return ata_drive_40wire(dev->id);
  3665. }
  3666. /**
  3667. * ata_dev_xfermask - Compute supported xfermask of the given device
  3668. * @dev: Device to compute xfermask for
  3669. *
  3670. * Compute supported xfermask of @dev and store it in
  3671. * dev->*_mask. This function is responsible for applying all
  3672. * known limits including host controller limits, device
  3673. * blacklist, etc...
  3674. *
  3675. * LOCKING:
  3676. * None.
  3677. */
  3678. static void ata_dev_xfermask(struct ata_device *dev)
  3679. {
  3680. struct ata_link *link = dev->link;
  3681. struct ata_port *ap = link->ap;
  3682. struct ata_host *host = ap->host;
  3683. unsigned long xfer_mask;
  3684. /* controller modes available */
  3685. xfer_mask = ata_pack_xfermask(ap->pio_mask,
  3686. ap->mwdma_mask, ap->udma_mask);
  3687. /* drive modes available */
  3688. xfer_mask &= ata_pack_xfermask(dev->pio_mask,
  3689. dev->mwdma_mask, dev->udma_mask);
  3690. xfer_mask &= ata_id_xfermask(dev->id);
  3691. /*
  3692. * CFA Advanced TrueIDE timings are not allowed on a shared
  3693. * cable
  3694. */
  3695. if (ata_dev_pair(dev)) {
  3696. /* No PIO5 or PIO6 */
  3697. xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
  3698. /* No MWDMA3 or MWDMA 4 */
  3699. xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
  3700. }
  3701. if (ata_dma_blacklisted(dev)) {
  3702. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3703. ata_dev_printk(dev, KERN_WARNING,
  3704. "device is on DMA blacklist, disabling DMA\n");
  3705. }
  3706. if ((host->flags & ATA_HOST_SIMPLEX) &&
  3707. host->simplex_claimed && host->simplex_claimed != ap) {
  3708. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3709. ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
  3710. "other device, disabling DMA\n");
  3711. }
  3712. if (ap->flags & ATA_FLAG_NO_IORDY)
  3713. xfer_mask &= ata_pio_mask_no_iordy(dev);
  3714. if (ap->ops->mode_filter)
  3715. xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
  3716. /* Apply cable rule here. Don't apply it early because when
  3717. * we handle hot plug the cable type can itself change.
  3718. * Check this last so that we know if the transfer rate was
  3719. * solely limited by the cable.
  3720. * Unknown or 80 wire cables reported host side are checked
  3721. * drive side as well. Cases where we know a 40wire cable
  3722. * is used safely for 80 are not checked here.
  3723. */
  3724. if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
  3725. /* UDMA/44 or higher would be available */
  3726. if ((ap->cbl == ATA_CBL_PATA40) ||
  3727. (ata_is_40wire(dev) &&
  3728. (ap->cbl == ATA_CBL_PATA_UNK ||
  3729. ap->cbl == ATA_CBL_PATA80))) {
  3730. ata_dev_printk(dev, KERN_WARNING,
  3731. "limited to UDMA/33 due to 40-wire cable\n");
  3732. xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
  3733. }
  3734. ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
  3735. &dev->mwdma_mask, &dev->udma_mask);
  3736. }
  3737. /**
  3738. * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
  3739. * @dev: Device to which command will be sent
  3740. *
  3741. * Issue SET FEATURES - XFER MODE command to device @dev
  3742. * on port @ap.
  3743. *
  3744. * LOCKING:
  3745. * PCI/etc. bus probe sem.
  3746. *
  3747. * RETURNS:
  3748. * 0 on success, AC_ERR_* mask otherwise.
  3749. */
  3750. static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
  3751. {
  3752. struct ata_taskfile tf;
  3753. unsigned int err_mask;
  3754. /* set up set-features taskfile */
  3755. DPRINTK("set features - xfer mode\n");
  3756. /* Some controllers and ATAPI devices show flaky interrupt
  3757. * behavior after setting xfer mode. Use polling instead.
  3758. */
  3759. ata_tf_init(dev, &tf);
  3760. tf.command = ATA_CMD_SET_FEATURES;
  3761. tf.feature = SETFEATURES_XFER;
  3762. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
  3763. tf.protocol = ATA_PROT_NODATA;
  3764. /* If we are using IORDY we must send the mode setting command */
  3765. if (ata_pio_need_iordy(dev))
  3766. tf.nsect = dev->xfer_mode;
  3767. /* If the device has IORDY and the controller does not - turn it off */
  3768. else if (ata_id_has_iordy(dev->id))
  3769. tf.nsect = 0x01;
  3770. else /* In the ancient relic department - skip all of this */
  3771. return 0;
  3772. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  3773. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  3774. return err_mask;
  3775. }
  3776. /**
  3777. * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
  3778. * @dev: Device to which command will be sent
  3779. * @enable: Whether to enable or disable the feature
  3780. * @feature: The sector count represents the feature to set
  3781. *
  3782. * Issue SET FEATURES - SATA FEATURES command to device @dev
  3783. * on port @ap with sector count
  3784. *
  3785. * LOCKING:
  3786. * PCI/etc. bus probe sem.
  3787. *
  3788. * RETURNS:
  3789. * 0 on success, AC_ERR_* mask otherwise.
  3790. */
  3791. static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
  3792. u8 feature)
  3793. {
  3794. struct ata_taskfile tf;
  3795. unsigned int err_mask;
  3796. /* set up set-features taskfile */
  3797. DPRINTK("set features - SATA features\n");
  3798. ata_tf_init(dev, &tf);
  3799. tf.command = ATA_CMD_SET_FEATURES;
  3800. tf.feature = enable;
  3801. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  3802. tf.protocol = ATA_PROT_NODATA;
  3803. tf.nsect = feature;
  3804. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  3805. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  3806. return err_mask;
  3807. }
  3808. /**
  3809. * ata_dev_init_params - Issue INIT DEV PARAMS command
  3810. * @dev: Device to which command will be sent
  3811. * @heads: Number of heads (taskfile parameter)
  3812. * @sectors: Number of sectors (taskfile parameter)
  3813. *
  3814. * LOCKING:
  3815. * Kernel thread context (may sleep)
  3816. *
  3817. * RETURNS:
  3818. * 0 on success, AC_ERR_* mask otherwise.
  3819. */
  3820. static unsigned int ata_dev_init_params(struct ata_device *dev,
  3821. u16 heads, u16 sectors)
  3822. {
  3823. struct ata_taskfile tf;
  3824. unsigned int err_mask;
  3825. /* Number of sectors per track 1-255. Number of heads 1-16 */
  3826. if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
  3827. return AC_ERR_INVALID;
  3828. /* set up init dev params taskfile */
  3829. DPRINTK("init dev params \n");
  3830. ata_tf_init(dev, &tf);
  3831. tf.command = ATA_CMD_INIT_DEV_PARAMS;
  3832. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  3833. tf.protocol = ATA_PROT_NODATA;
  3834. tf.nsect = sectors;
  3835. tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
  3836. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  3837. /* A clean abort indicates an original or just out of spec drive
  3838. and we should continue as we issue the setup based on the
  3839. drive reported working geometry */
  3840. if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
  3841. err_mask = 0;
  3842. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  3843. return err_mask;
  3844. }
  3845. /**
  3846. * ata_sg_clean - Unmap DMA memory associated with command
  3847. * @qc: Command containing DMA memory to be released
  3848. *
  3849. * Unmap all mapped DMA memory associated with this command.
  3850. *
  3851. * LOCKING:
  3852. * spin_lock_irqsave(host lock)
  3853. */
  3854. void ata_sg_clean(struct ata_queued_cmd *qc)
  3855. {
  3856. struct ata_port *ap = qc->ap;
  3857. struct scatterlist *sg = qc->__sg;
  3858. int dir = qc->dma_dir;
  3859. void *pad_buf = NULL;
  3860. WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
  3861. WARN_ON(sg == NULL);
  3862. if (qc->flags & ATA_QCFLAG_SINGLE)
  3863. WARN_ON(qc->n_elem > 1);
  3864. VPRINTK("unmapping %u sg elements\n", qc->n_elem);
  3865. /* if we padded the buffer out to 32-bit bound, and data
  3866. * xfer direction is from-device, we must copy from the
  3867. * pad buffer back into the supplied buffer
  3868. */
  3869. if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
  3870. pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  3871. if (qc->flags & ATA_QCFLAG_SG) {
  3872. if (qc->n_elem)
  3873. dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
  3874. /* restore last sg */
  3875. sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
  3876. if (pad_buf) {
  3877. struct scatterlist *psg = &qc->pad_sgent;
  3878. void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
  3879. memcpy(addr + psg->offset, pad_buf, qc->pad_len);
  3880. kunmap_atomic(addr, KM_IRQ0);
  3881. }
  3882. } else {
  3883. if (qc->n_elem)
  3884. dma_unmap_single(ap->dev,
  3885. sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
  3886. dir);
  3887. /* restore sg */
  3888. sg->length += qc->pad_len;
  3889. if (pad_buf)
  3890. memcpy(qc->buf_virt + sg->length - qc->pad_len,
  3891. pad_buf, qc->pad_len);
  3892. }
  3893. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  3894. qc->__sg = NULL;
  3895. }
  3896. /**
  3897. * ata_fill_sg - Fill PCI IDE PRD table
  3898. * @qc: Metadata associated with taskfile to be transferred
  3899. *
  3900. * Fill PCI IDE PRD (scatter-gather) table with segments
  3901. * associated with the current disk command.
  3902. *
  3903. * LOCKING:
  3904. * spin_lock_irqsave(host lock)
  3905. *
  3906. */
  3907. static void ata_fill_sg(struct ata_queued_cmd *qc)
  3908. {
  3909. struct ata_port *ap = qc->ap;
  3910. struct scatterlist *sg;
  3911. unsigned int idx;
  3912. WARN_ON(qc->__sg == NULL);
  3913. WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
  3914. idx = 0;
  3915. ata_for_each_sg(sg, qc) {
  3916. u32 addr, offset;
  3917. u32 sg_len, len;
  3918. /* determine if physical DMA addr spans 64K boundary.
  3919. * Note h/w doesn't support 64-bit, so we unconditionally
  3920. * truncate dma_addr_t to u32.
  3921. */
  3922. addr = (u32) sg_dma_address(sg);
  3923. sg_len = sg_dma_len(sg);
  3924. while (sg_len) {
  3925. offset = addr & 0xffff;
  3926. len = sg_len;
  3927. if ((offset + sg_len) > 0x10000)
  3928. len = 0x10000 - offset;
  3929. ap->prd[idx].addr = cpu_to_le32(addr);
  3930. ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
  3931. VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
  3932. idx++;
  3933. sg_len -= len;
  3934. addr += len;
  3935. }
  3936. }
  3937. if (idx)
  3938. ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  3939. }
  3940. /**
  3941. * ata_fill_sg_dumb - Fill PCI IDE PRD table
  3942. * @qc: Metadata associated with taskfile to be transferred
  3943. *
  3944. * Fill PCI IDE PRD (scatter-gather) table with segments
  3945. * associated with the current disk command. Perform the fill
  3946. * so that we avoid writing any length 64K records for
  3947. * controllers that don't follow the spec.
  3948. *
  3949. * LOCKING:
  3950. * spin_lock_irqsave(host lock)
  3951. *
  3952. */
  3953. static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
  3954. {
  3955. struct ata_port *ap = qc->ap;
  3956. struct scatterlist *sg;
  3957. unsigned int idx;
  3958. WARN_ON(qc->__sg == NULL);
  3959. WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
  3960. idx = 0;
  3961. ata_for_each_sg(sg, qc) {
  3962. u32 addr, offset;
  3963. u32 sg_len, len, blen;
  3964. /* determine if physical DMA addr spans 64K boundary.
  3965. * Note h/w doesn't support 64-bit, so we unconditionally
  3966. * truncate dma_addr_t to u32.
  3967. */
  3968. addr = (u32) sg_dma_address(sg);
  3969. sg_len = sg_dma_len(sg);
  3970. while (sg_len) {
  3971. offset = addr & 0xffff;
  3972. len = sg_len;
  3973. if ((offset + sg_len) > 0x10000)
  3974. len = 0x10000 - offset;
  3975. blen = len & 0xffff;
  3976. ap->prd[idx].addr = cpu_to_le32(addr);
  3977. if (blen == 0) {
  3978. /* Some PATA chipsets like the CS5530 can't
  3979. cope with 0x0000 meaning 64K as the spec says */
  3980. ap->prd[idx].flags_len = cpu_to_le32(0x8000);
  3981. blen = 0x8000;
  3982. ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
  3983. }
  3984. ap->prd[idx].flags_len = cpu_to_le32(blen);
  3985. VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
  3986. idx++;
  3987. sg_len -= len;
  3988. addr += len;
  3989. }
  3990. }
  3991. if (idx)
  3992. ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  3993. }
  3994. /**
  3995. * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
  3996. * @qc: Metadata associated with taskfile to check
  3997. *
  3998. * Allow low-level driver to filter ATA PACKET commands, returning
  3999. * a status indicating whether or not it is OK to use DMA for the
  4000. * supplied PACKET command.
  4001. *
  4002. * LOCKING:
  4003. * spin_lock_irqsave(host lock)
  4004. *
  4005. * RETURNS: 0 when ATAPI DMA can be used
  4006. * nonzero otherwise
  4007. */
  4008. int ata_check_atapi_dma(struct ata_queued_cmd *qc)
  4009. {
  4010. struct ata_port *ap = qc->ap;
  4011. /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
  4012. * few ATAPI devices choke on such DMA requests.
  4013. */
  4014. if (unlikely(qc->nbytes & 15))
  4015. return 1;
  4016. if (ap->ops->check_atapi_dma)
  4017. return ap->ops->check_atapi_dma(qc);
  4018. return 0;
  4019. }
  4020. /**
  4021. * atapi_qc_may_overflow - Check whether data transfer may overflow
  4022. * @qc: ATA command in question
  4023. *
  4024. * ATAPI commands which transfer variable length data to host
  4025. * might overflow due to application error or hardare bug. This
  4026. * function checks whether overflow should be drained and ignored
  4027. * for @qc.
  4028. *
  4029. * LOCKING:
  4030. * None.
  4031. *
  4032. * RETURNS:
  4033. * 1 if @qc may overflow; otherwise, 0.
  4034. */
  4035. static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
  4036. {
  4037. if (qc->tf.protocol != ATA_PROT_ATAPI &&
  4038. qc->tf.protocol != ATA_PROT_ATAPI_DMA)
  4039. return 0;
  4040. if (qc->tf.flags & ATA_TFLAG_WRITE)
  4041. return 0;
  4042. switch (qc->cdb[0]) {
  4043. case READ_10:
  4044. case READ_12:
  4045. case WRITE_10:
  4046. case WRITE_12:
  4047. case GPCMD_READ_CD:
  4048. case GPCMD_READ_CD_MSF:
  4049. return 0;
  4050. }
  4051. return 1;
  4052. }
  4053. /**
  4054. * ata_std_qc_defer - Check whether a qc needs to be deferred
  4055. * @qc: ATA command in question
  4056. *
  4057. * Non-NCQ commands cannot run with any other command, NCQ or
  4058. * not. As upper layer only knows the queue depth, we are
  4059. * responsible for maintaining exclusion. This function checks
  4060. * whether a new command @qc can be issued.
  4061. *
  4062. * LOCKING:
  4063. * spin_lock_irqsave(host lock)
  4064. *
  4065. * RETURNS:
  4066. * ATA_DEFER_* if deferring is needed, 0 otherwise.
  4067. */
  4068. int ata_std_qc_defer(struct ata_queued_cmd *qc)
  4069. {
  4070. struct ata_link *link = qc->dev->link;
  4071. if (qc->tf.protocol == ATA_PROT_NCQ) {
  4072. if (!ata_tag_valid(link->active_tag))
  4073. return 0;
  4074. } else {
  4075. if (!ata_tag_valid(link->active_tag) && !link->sactive)
  4076. return 0;
  4077. }
  4078. return ATA_DEFER_LINK;
  4079. }
  4080. /**
  4081. * ata_qc_prep - Prepare taskfile for submission
  4082. * @qc: Metadata associated with taskfile to be prepared
  4083. *
  4084. * Prepare ATA taskfile for submission.
  4085. *
  4086. * LOCKING:
  4087. * spin_lock_irqsave(host lock)
  4088. */
  4089. void ata_qc_prep(struct ata_queued_cmd *qc)
  4090. {
  4091. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  4092. return;
  4093. ata_fill_sg(qc);
  4094. }
  4095. /**
  4096. * ata_dumb_qc_prep - Prepare taskfile for submission
  4097. * @qc: Metadata associated with taskfile to be prepared
  4098. *
  4099. * Prepare ATA taskfile for submission.
  4100. *
  4101. * LOCKING:
  4102. * spin_lock_irqsave(host lock)
  4103. */
  4104. void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
  4105. {
  4106. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  4107. return;
  4108. ata_fill_sg_dumb(qc);
  4109. }
  4110. void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
  4111. /**
  4112. * ata_sg_init_one - Associate command with memory buffer
  4113. * @qc: Command to be associated
  4114. * @buf: Memory buffer
  4115. * @buflen: Length of memory buffer, in bytes.
  4116. *
  4117. * Initialize the data-related elements of queued_cmd @qc
  4118. * to point to a single memory buffer, @buf of byte length @buflen.
  4119. *
  4120. * LOCKING:
  4121. * spin_lock_irqsave(host lock)
  4122. */
  4123. void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
  4124. {
  4125. qc->flags |= ATA_QCFLAG_SINGLE;
  4126. qc->__sg = &qc->sgent;
  4127. qc->n_elem = 1;
  4128. qc->orig_n_elem = 1;
  4129. qc->buf_virt = buf;
  4130. qc->nbytes = buflen;
  4131. qc->cursg = qc->__sg;
  4132. sg_init_one(&qc->sgent, buf, buflen);
  4133. }
  4134. /**
  4135. * ata_sg_init - Associate command with scatter-gather table.
  4136. * @qc: Command to be associated
  4137. * @sg: Scatter-gather table.
  4138. * @n_elem: Number of elements in s/g table.
  4139. *
  4140. * Initialize the data-related elements of queued_cmd @qc
  4141. * to point to a scatter-gather table @sg, containing @n_elem
  4142. * elements.
  4143. *
  4144. * LOCKING:
  4145. * spin_lock_irqsave(host lock)
  4146. */
  4147. void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
  4148. unsigned int n_elem)
  4149. {
  4150. qc->flags |= ATA_QCFLAG_SG;
  4151. qc->__sg = sg;
  4152. qc->n_elem = n_elem;
  4153. qc->orig_n_elem = n_elem;
  4154. qc->cursg = qc->__sg;
  4155. }
  4156. /**
  4157. * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
  4158. * @qc: Command with memory buffer to be mapped.
  4159. *
  4160. * DMA-map the memory buffer associated with queued_cmd @qc.
  4161. *
  4162. * LOCKING:
  4163. * spin_lock_irqsave(host lock)
  4164. *
  4165. * RETURNS:
  4166. * Zero on success, negative on error.
  4167. */
  4168. static int ata_sg_setup_one(struct ata_queued_cmd *qc)
  4169. {
  4170. struct ata_port *ap = qc->ap;
  4171. int dir = qc->dma_dir;
  4172. struct scatterlist *sg = qc->__sg;
  4173. dma_addr_t dma_address;
  4174. int trim_sg = 0;
  4175. /* we must lengthen transfers to end on a 32-bit boundary */
  4176. qc->pad_len = sg->length & 3;
  4177. if (qc->pad_len) {
  4178. void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  4179. struct scatterlist *psg = &qc->pad_sgent;
  4180. WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
  4181. memset(pad_buf, 0, ATA_DMA_PAD_SZ);
  4182. if (qc->tf.flags & ATA_TFLAG_WRITE)
  4183. memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
  4184. qc->pad_len);
  4185. sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
  4186. sg_dma_len(psg) = ATA_DMA_PAD_SZ;
  4187. /* trim sg */
  4188. sg->length -= qc->pad_len;
  4189. if (sg->length == 0)
  4190. trim_sg = 1;
  4191. DPRINTK("padding done, sg->length=%u pad_len=%u\n",
  4192. sg->length, qc->pad_len);
  4193. }
  4194. if (trim_sg) {
  4195. qc->n_elem--;
  4196. goto skip_map;
  4197. }
  4198. dma_address = dma_map_single(ap->dev, qc->buf_virt,
  4199. sg->length, dir);
  4200. if (dma_mapping_error(dma_address)) {
  4201. /* restore sg */
  4202. sg->length += qc->pad_len;
  4203. return -1;
  4204. }
  4205. sg_dma_address(sg) = dma_address;
  4206. sg_dma_len(sg) = sg->length;
  4207. skip_map:
  4208. DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
  4209. qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  4210. return 0;
  4211. }
  4212. /**
  4213. * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
  4214. * @qc: Command with scatter-gather table to be mapped.
  4215. *
  4216. * DMA-map the scatter-gather table associated with queued_cmd @qc.
  4217. *
  4218. * LOCKING:
  4219. * spin_lock_irqsave(host lock)
  4220. *
  4221. * RETURNS:
  4222. * Zero on success, negative on error.
  4223. *
  4224. */
  4225. static int ata_sg_setup(struct ata_queued_cmd *qc)
  4226. {
  4227. struct ata_port *ap = qc->ap;
  4228. struct scatterlist *sg = qc->__sg;
  4229. struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
  4230. int n_elem, pre_n_elem, dir, trim_sg = 0;
  4231. VPRINTK("ENTER, ata%u\n", ap->print_id);
  4232. WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
  4233. /* we must lengthen transfers to end on a 32-bit boundary */
  4234. qc->pad_len = lsg->length & 3;
  4235. if (qc->pad_len) {
  4236. void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
  4237. struct scatterlist *psg = &qc->pad_sgent;
  4238. unsigned int offset;
  4239. WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
  4240. memset(pad_buf, 0, ATA_DMA_PAD_SZ);
  4241. /*
  4242. * psg->page/offset are used to copy to-be-written
  4243. * data in this function or read data in ata_sg_clean.
  4244. */
  4245. offset = lsg->offset + lsg->length - qc->pad_len;
  4246. sg_init_table(psg, 1);
  4247. sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
  4248. qc->pad_len, offset_in_page(offset));
  4249. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  4250. void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
  4251. memcpy(pad_buf, addr + psg->offset, qc->pad_len);
  4252. kunmap_atomic(addr, KM_IRQ0);
  4253. }
  4254. sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
  4255. sg_dma_len(psg) = ATA_DMA_PAD_SZ;
  4256. /* trim last sg */
  4257. lsg->length -= qc->pad_len;
  4258. if (lsg->length == 0)
  4259. trim_sg = 1;
  4260. DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
  4261. qc->n_elem - 1, lsg->length, qc->pad_len);
  4262. }
  4263. pre_n_elem = qc->n_elem;
  4264. if (trim_sg && pre_n_elem)
  4265. pre_n_elem--;
  4266. if (!pre_n_elem) {
  4267. n_elem = 0;
  4268. goto skip_map;
  4269. }
  4270. dir = qc->dma_dir;
  4271. n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
  4272. if (n_elem < 1) {
  4273. /* restore last sg */
  4274. lsg->length += qc->pad_len;
  4275. return -1;
  4276. }
  4277. DPRINTK("%d sg elements mapped\n", n_elem);
  4278. skip_map:
  4279. qc->n_elem = n_elem;
  4280. return 0;
  4281. }
  4282. /**
  4283. * swap_buf_le16 - swap halves of 16-bit words in place
  4284. * @buf: Buffer to swap
  4285. * @buf_words: Number of 16-bit words in buffer.
  4286. *
  4287. * Swap halves of 16-bit words if needed to convert from
  4288. * little-endian byte order to native cpu byte order, or
  4289. * vice-versa.
  4290. *
  4291. * LOCKING:
  4292. * Inherited from caller.
  4293. */
  4294. void swap_buf_le16(u16 *buf, unsigned int buf_words)
  4295. {
  4296. #ifdef __BIG_ENDIAN
  4297. unsigned int i;
  4298. for (i = 0; i < buf_words; i++)
  4299. buf[i] = le16_to_cpu(buf[i]);
  4300. #endif /* __BIG_ENDIAN */
  4301. }
  4302. /**
  4303. * ata_data_xfer - Transfer data by PIO
  4304. * @adev: device to target
  4305. * @buf: data buffer
  4306. * @buflen: buffer length
  4307. * @write_data: read/write
  4308. *
  4309. * Transfer data from/to the device data register by PIO.
  4310. *
  4311. * LOCKING:
  4312. * Inherited from caller.
  4313. */
  4314. void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
  4315. unsigned int buflen, int write_data)
  4316. {
  4317. struct ata_port *ap = adev->link->ap;
  4318. unsigned int words = buflen >> 1;
  4319. /* Transfer multiple of 2 bytes */
  4320. if (write_data)
  4321. iowrite16_rep(ap->ioaddr.data_addr, buf, words);
  4322. else
  4323. ioread16_rep(ap->ioaddr.data_addr, buf, words);
  4324. /* Transfer trailing 1 byte, if any. */
  4325. if (unlikely(buflen & 0x01)) {
  4326. u16 align_buf[1] = { 0 };
  4327. unsigned char *trailing_buf = buf + buflen - 1;
  4328. if (write_data) {
  4329. memcpy(align_buf, trailing_buf, 1);
  4330. iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
  4331. } else {
  4332. align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
  4333. memcpy(trailing_buf, align_buf, 1);
  4334. }
  4335. }
  4336. }
  4337. /**
  4338. * ata_data_xfer_noirq - Transfer data by PIO
  4339. * @adev: device to target
  4340. * @buf: data buffer
  4341. * @buflen: buffer length
  4342. * @write_data: read/write
  4343. *
  4344. * Transfer data from/to the device data register by PIO. Do the
  4345. * transfer with interrupts disabled.
  4346. *
  4347. * LOCKING:
  4348. * Inherited from caller.
  4349. */
  4350. void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
  4351. unsigned int buflen, int write_data)
  4352. {
  4353. unsigned long flags;
  4354. local_irq_save(flags);
  4355. ata_data_xfer(adev, buf, buflen, write_data);
  4356. local_irq_restore(flags);
  4357. }
  4358. /**
  4359. * ata_pio_sector - Transfer a sector of data.
  4360. * @qc: Command on going
  4361. *
  4362. * Transfer qc->sect_size bytes of data from/to the ATA device.
  4363. *
  4364. * LOCKING:
  4365. * Inherited from caller.
  4366. */
  4367. static void ata_pio_sector(struct ata_queued_cmd *qc)
  4368. {
  4369. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  4370. struct ata_port *ap = qc->ap;
  4371. struct page *page;
  4372. unsigned int offset;
  4373. unsigned char *buf;
  4374. if (qc->curbytes == qc->nbytes - qc->sect_size)
  4375. ap->hsm_task_state = HSM_ST_LAST;
  4376. page = sg_page(qc->cursg);
  4377. offset = qc->cursg->offset + qc->cursg_ofs;
  4378. /* get the current page and offset */
  4379. page = nth_page(page, (offset >> PAGE_SHIFT));
  4380. offset %= PAGE_SIZE;
  4381. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  4382. if (PageHighMem(page)) {
  4383. unsigned long flags;
  4384. /* FIXME: use a bounce buffer */
  4385. local_irq_save(flags);
  4386. buf = kmap_atomic(page, KM_IRQ0);
  4387. /* do the actual data transfer */
  4388. ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
  4389. kunmap_atomic(buf, KM_IRQ0);
  4390. local_irq_restore(flags);
  4391. } else {
  4392. buf = page_address(page);
  4393. ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
  4394. }
  4395. qc->curbytes += qc->sect_size;
  4396. qc->cursg_ofs += qc->sect_size;
  4397. if (qc->cursg_ofs == qc->cursg->length) {
  4398. qc->cursg = sg_next(qc->cursg);
  4399. qc->cursg_ofs = 0;
  4400. }
  4401. }
  4402. /**
  4403. * ata_pio_sectors - Transfer one or many sectors.
  4404. * @qc: Command on going
  4405. *
  4406. * Transfer one or many sectors of data from/to the
  4407. * ATA device for the DRQ request.
  4408. *
  4409. * LOCKING:
  4410. * Inherited from caller.
  4411. */
  4412. static void ata_pio_sectors(struct ata_queued_cmd *qc)
  4413. {
  4414. if (is_multi_taskfile(&qc->tf)) {
  4415. /* READ/WRITE MULTIPLE */
  4416. unsigned int nsect;
  4417. WARN_ON(qc->dev->multi_count == 0);
  4418. nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
  4419. qc->dev->multi_count);
  4420. while (nsect--)
  4421. ata_pio_sector(qc);
  4422. } else
  4423. ata_pio_sector(qc);
  4424. ata_altstatus(qc->ap); /* flush */
  4425. }
  4426. /**
  4427. * atapi_send_cdb - Write CDB bytes to hardware
  4428. * @ap: Port to which ATAPI device is attached.
  4429. * @qc: Taskfile currently active
  4430. *
  4431. * When device has indicated its readiness to accept
  4432. * a CDB, this function is called. Send the CDB.
  4433. *
  4434. * LOCKING:
  4435. * caller.
  4436. */
  4437. static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
  4438. {
  4439. /* send SCSI cdb */
  4440. DPRINTK("send cdb\n");
  4441. WARN_ON(qc->dev->cdb_len < 12);
  4442. ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
  4443. ata_altstatus(ap); /* flush */
  4444. switch (qc->tf.protocol) {
  4445. case ATA_PROT_ATAPI:
  4446. ap->hsm_task_state = HSM_ST;
  4447. break;
  4448. case ATA_PROT_ATAPI_NODATA:
  4449. ap->hsm_task_state = HSM_ST_LAST;
  4450. break;
  4451. case ATA_PROT_ATAPI_DMA:
  4452. ap->hsm_task_state = HSM_ST_LAST;
  4453. /* initiate bmdma */
  4454. ap->ops->bmdma_start(qc);
  4455. break;
  4456. }
  4457. }
  4458. /**
  4459. * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
  4460. * @qc: Command on going
  4461. * @bytes: number of bytes
  4462. *
  4463. * Transfer Transfer data from/to the ATAPI device.
  4464. *
  4465. * LOCKING:
  4466. * Inherited from caller.
  4467. *
  4468. */
  4469. static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
  4470. {
  4471. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  4472. struct ata_port *ap = qc->ap;
  4473. struct ata_eh_info *ehi = &qc->dev->link->eh_info;
  4474. struct scatterlist *sg;
  4475. struct page *page;
  4476. unsigned char *buf;
  4477. unsigned int offset, count;
  4478. next_sg:
  4479. sg = qc->cursg;
  4480. if (unlikely(!sg)) {
  4481. /*
  4482. * The end of qc->sg is reached and the device expects
  4483. * more data to transfer. In order not to overrun qc->sg
  4484. * and fulfill length specified in the byte count register,
  4485. * - for read case, discard trailing data from the device
  4486. * - for write case, padding zero data to the device
  4487. */
  4488. u16 pad_buf[1] = { 0 };
  4489. unsigned int i;
  4490. if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
  4491. ata_ehi_push_desc(ehi, "too much trailing data "
  4492. "buf=%u cur=%u bytes=%u",
  4493. qc->nbytes, qc->curbytes, bytes);
  4494. return -1;
  4495. }
  4496. /* overflow is exptected for misc ATAPI commands */
  4497. if (bytes && !atapi_qc_may_overflow(qc))
  4498. ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
  4499. "trailing data (cdb=%02x nbytes=%u)\n",
  4500. bytes, qc->cdb[0], qc->nbytes);
  4501. for (i = 0; i < (bytes + 1) / 2; i++)
  4502. ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
  4503. qc->curbytes += bytes;
  4504. return 0;
  4505. }
  4506. page = sg_page(sg);
  4507. offset = sg->offset + qc->cursg_ofs;
  4508. /* get the current page and offset */
  4509. page = nth_page(page, (offset >> PAGE_SHIFT));
  4510. offset %= PAGE_SIZE;
  4511. /* don't overrun current sg */
  4512. count = min(sg->length - qc->cursg_ofs, bytes);
  4513. /* don't cross page boundaries */
  4514. count = min(count, (unsigned int)PAGE_SIZE - offset);
  4515. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  4516. if (PageHighMem(page)) {
  4517. unsigned long flags;
  4518. /* FIXME: use bounce buffer */
  4519. local_irq_save(flags);
  4520. buf = kmap_atomic(page, KM_IRQ0);
  4521. /* do the actual data transfer */
  4522. ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
  4523. kunmap_atomic(buf, KM_IRQ0);
  4524. local_irq_restore(flags);
  4525. } else {
  4526. buf = page_address(page);
  4527. ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
  4528. }
  4529. bytes -= count;
  4530. if ((count & 1) && bytes)
  4531. bytes--;
  4532. qc->curbytes += count;
  4533. qc->cursg_ofs += count;
  4534. if (qc->cursg_ofs == sg->length) {
  4535. qc->cursg = sg_next(qc->cursg);
  4536. qc->cursg_ofs = 0;
  4537. }
  4538. if (bytes)
  4539. goto next_sg;
  4540. return 0;
  4541. }
  4542. /**
  4543. * atapi_pio_bytes - Transfer data from/to the ATAPI device.
  4544. * @qc: Command on going
  4545. *
  4546. * Transfer Transfer data from/to the ATAPI device.
  4547. *
  4548. * LOCKING:
  4549. * Inherited from caller.
  4550. */
  4551. static void atapi_pio_bytes(struct ata_queued_cmd *qc)
  4552. {
  4553. struct ata_port *ap = qc->ap;
  4554. struct ata_device *dev = qc->dev;
  4555. unsigned int ireason, bc_lo, bc_hi, bytes;
  4556. int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
  4557. /* Abuse qc->result_tf for temp storage of intermediate TF
  4558. * here to save some kernel stack usage.
  4559. * For normal completion, qc->result_tf is not relevant. For
  4560. * error, qc->result_tf is later overwritten by ata_qc_complete().
  4561. * So, the correctness of qc->result_tf is not affected.
  4562. */
  4563. ap->ops->tf_read(ap, &qc->result_tf);
  4564. ireason = qc->result_tf.nsect;
  4565. bc_lo = qc->result_tf.lbam;
  4566. bc_hi = qc->result_tf.lbah;
  4567. bytes = (bc_hi << 8) | bc_lo;
  4568. /* shall be cleared to zero, indicating xfer of data */
  4569. if (ireason & (1 << 0))
  4570. goto err_out;
  4571. /* make sure transfer direction matches expected */
  4572. i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
  4573. if (do_write != i_write)
  4574. goto err_out;
  4575. VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
  4576. if (__atapi_pio_bytes(qc, bytes))
  4577. goto err_out;
  4578. ata_altstatus(ap); /* flush */
  4579. return;
  4580. err_out:
  4581. ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
  4582. qc->err_mask |= AC_ERR_HSM;
  4583. ap->hsm_task_state = HSM_ST_ERR;
  4584. }
  4585. /**
  4586. * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
  4587. * @ap: the target ata_port
  4588. * @qc: qc on going
  4589. *
  4590. * RETURNS:
  4591. * 1 if ok in workqueue, 0 otherwise.
  4592. */
  4593. static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
  4594. {
  4595. if (qc->tf.flags & ATA_TFLAG_POLLING)
  4596. return 1;
  4597. if (ap->hsm_task_state == HSM_ST_FIRST) {
  4598. if (qc->tf.protocol == ATA_PROT_PIO &&
  4599. (qc->tf.flags & ATA_TFLAG_WRITE))
  4600. return 1;
  4601. if (is_atapi_taskfile(&qc->tf) &&
  4602. !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  4603. return 1;
  4604. }
  4605. return 0;
  4606. }
  4607. /**
  4608. * ata_hsm_qc_complete - finish a qc running on standard HSM
  4609. * @qc: Command to complete
  4610. * @in_wq: 1 if called from workqueue, 0 otherwise
  4611. *
  4612. * Finish @qc which is running on standard HSM.
  4613. *
  4614. * LOCKING:
  4615. * If @in_wq is zero, spin_lock_irqsave(host lock).
  4616. * Otherwise, none on entry and grabs host lock.
  4617. */
  4618. static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
  4619. {
  4620. struct ata_port *ap = qc->ap;
  4621. unsigned long flags;
  4622. if (ap->ops->error_handler) {
  4623. if (in_wq) {
  4624. spin_lock_irqsave(ap->lock, flags);
  4625. /* EH might have kicked in while host lock is
  4626. * released.
  4627. */
  4628. qc = ata_qc_from_tag(ap, qc->tag);
  4629. if (qc) {
  4630. if (likely(!(qc->err_mask & AC_ERR_HSM))) {
  4631. ap->ops->irq_on(ap);
  4632. ata_qc_complete(qc);
  4633. } else
  4634. ata_port_freeze(ap);
  4635. }
  4636. spin_unlock_irqrestore(ap->lock, flags);
  4637. } else {
  4638. if (likely(!(qc->err_mask & AC_ERR_HSM)))
  4639. ata_qc_complete(qc);
  4640. else
  4641. ata_port_freeze(ap);
  4642. }
  4643. } else {
  4644. if (in_wq) {
  4645. spin_lock_irqsave(ap->lock, flags);
  4646. ap->ops->irq_on(ap);
  4647. ata_qc_complete(qc);
  4648. spin_unlock_irqrestore(ap->lock, flags);
  4649. } else
  4650. ata_qc_complete(qc);
  4651. }
  4652. }
  4653. /**
  4654. * ata_hsm_move - move the HSM to the next state.
  4655. * @ap: the target ata_port
  4656. * @qc: qc on going
  4657. * @status: current device status
  4658. * @in_wq: 1 if called from workqueue, 0 otherwise
  4659. *
  4660. * RETURNS:
  4661. * 1 when poll next status needed, 0 otherwise.
  4662. */
  4663. int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
  4664. u8 status, int in_wq)
  4665. {
  4666. unsigned long flags = 0;
  4667. int poll_next;
  4668. WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
  4669. /* Make sure ata_qc_issue_prot() does not throw things
  4670. * like DMA polling into the workqueue. Notice that
  4671. * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
  4672. */
  4673. WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
  4674. fsm_start:
  4675. DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
  4676. ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
  4677. switch (ap->hsm_task_state) {
  4678. case HSM_ST_FIRST:
  4679. /* Send first data block or PACKET CDB */
  4680. /* If polling, we will stay in the work queue after
  4681. * sending the data. Otherwise, interrupt handler
  4682. * takes over after sending the data.
  4683. */
  4684. poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
  4685. /* check device status */
  4686. if (unlikely((status & ATA_DRQ) == 0)) {
  4687. /* handle BSY=0, DRQ=0 as error */
  4688. if (likely(status & (ATA_ERR | ATA_DF)))
  4689. /* device stops HSM for abort/error */
  4690. qc->err_mask |= AC_ERR_DEV;
  4691. else
  4692. /* HSM violation. Let EH handle this */
  4693. qc->err_mask |= AC_ERR_HSM;
  4694. ap->hsm_task_state = HSM_ST_ERR;
  4695. goto fsm_start;
  4696. }
  4697. /* Device should not ask for data transfer (DRQ=1)
  4698. * when it finds something wrong.
  4699. * We ignore DRQ here and stop the HSM by
  4700. * changing hsm_task_state to HSM_ST_ERR and
  4701. * let the EH abort the command or reset the device.
  4702. */
  4703. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4704. /* Some ATAPI tape drives forget to clear the ERR bit
  4705. * when doing the next command (mostly request sense).
  4706. * We ignore ERR here to workaround and proceed sending
  4707. * the CDB.
  4708. */
  4709. if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
  4710. ata_port_printk(ap, KERN_WARNING,
  4711. "DRQ=1 with device error, "
  4712. "dev_stat 0x%X\n", status);
  4713. qc->err_mask |= AC_ERR_HSM;
  4714. ap->hsm_task_state = HSM_ST_ERR;
  4715. goto fsm_start;
  4716. }
  4717. }
  4718. /* Send the CDB (atapi) or the first data block (ata pio out).
  4719. * During the state transition, interrupt handler shouldn't
  4720. * be invoked before the data transfer is complete and
  4721. * hsm_task_state is changed. Hence, the following locking.
  4722. */
  4723. if (in_wq)
  4724. spin_lock_irqsave(ap->lock, flags);
  4725. if (qc->tf.protocol == ATA_PROT_PIO) {
  4726. /* PIO data out protocol.
  4727. * send first data block.
  4728. */
  4729. /* ata_pio_sectors() might change the state
  4730. * to HSM_ST_LAST. so, the state is changed here
  4731. * before ata_pio_sectors().
  4732. */
  4733. ap->hsm_task_state = HSM_ST;
  4734. ata_pio_sectors(qc);
  4735. } else
  4736. /* send CDB */
  4737. atapi_send_cdb(ap, qc);
  4738. if (in_wq)
  4739. spin_unlock_irqrestore(ap->lock, flags);
  4740. /* if polling, ata_pio_task() handles the rest.
  4741. * otherwise, interrupt handler takes over from here.
  4742. */
  4743. break;
  4744. case HSM_ST:
  4745. /* complete command or read/write the data register */
  4746. if (qc->tf.protocol == ATA_PROT_ATAPI) {
  4747. /* ATAPI PIO protocol */
  4748. if ((status & ATA_DRQ) == 0) {
  4749. /* No more data to transfer or device error.
  4750. * Device error will be tagged in HSM_ST_LAST.
  4751. */
  4752. ap->hsm_task_state = HSM_ST_LAST;
  4753. goto fsm_start;
  4754. }
  4755. /* Device should not ask for data transfer (DRQ=1)
  4756. * when it finds something wrong.
  4757. * We ignore DRQ here and stop the HSM by
  4758. * changing hsm_task_state to HSM_ST_ERR and
  4759. * let the EH abort the command or reset the device.
  4760. */
  4761. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4762. ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
  4763. "device error, dev_stat 0x%X\n",
  4764. status);
  4765. qc->err_mask |= AC_ERR_HSM;
  4766. ap->hsm_task_state = HSM_ST_ERR;
  4767. goto fsm_start;
  4768. }
  4769. atapi_pio_bytes(qc);
  4770. if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
  4771. /* bad ireason reported by device */
  4772. goto fsm_start;
  4773. } else {
  4774. /* ATA PIO protocol */
  4775. if (unlikely((status & ATA_DRQ) == 0)) {
  4776. /* handle BSY=0, DRQ=0 as error */
  4777. if (likely(status & (ATA_ERR | ATA_DF)))
  4778. /* device stops HSM for abort/error */
  4779. qc->err_mask |= AC_ERR_DEV;
  4780. else
  4781. /* HSM violation. Let EH handle this.
  4782. * Phantom devices also trigger this
  4783. * condition. Mark hint.
  4784. */
  4785. qc->err_mask |= AC_ERR_HSM |
  4786. AC_ERR_NODEV_HINT;
  4787. ap->hsm_task_state = HSM_ST_ERR;
  4788. goto fsm_start;
  4789. }
  4790. /* For PIO reads, some devices may ask for
  4791. * data transfer (DRQ=1) alone with ERR=1.
  4792. * We respect DRQ here and transfer one
  4793. * block of junk data before changing the
  4794. * hsm_task_state to HSM_ST_ERR.
  4795. *
  4796. * For PIO writes, ERR=1 DRQ=1 doesn't make
  4797. * sense since the data block has been
  4798. * transferred to the device.
  4799. */
  4800. if (unlikely(status & (ATA_ERR | ATA_DF))) {
  4801. /* data might be corrputed */
  4802. qc->err_mask |= AC_ERR_DEV;
  4803. if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
  4804. ata_pio_sectors(qc);
  4805. status = ata_wait_idle(ap);
  4806. }
  4807. if (status & (ATA_BUSY | ATA_DRQ))
  4808. qc->err_mask |= AC_ERR_HSM;
  4809. /* ata_pio_sectors() might change the
  4810. * state to HSM_ST_LAST. so, the state
  4811. * is changed after ata_pio_sectors().
  4812. */
  4813. ap->hsm_task_state = HSM_ST_ERR;
  4814. goto fsm_start;
  4815. }
  4816. ata_pio_sectors(qc);
  4817. if (ap->hsm_task_state == HSM_ST_LAST &&
  4818. (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
  4819. /* all data read */
  4820. status = ata_wait_idle(ap);
  4821. goto fsm_start;
  4822. }
  4823. }
  4824. poll_next = 1;
  4825. break;
  4826. case HSM_ST_LAST:
  4827. if (unlikely(!ata_ok(status))) {
  4828. qc->err_mask |= __ac_err_mask(status);
  4829. ap->hsm_task_state = HSM_ST_ERR;
  4830. goto fsm_start;
  4831. }
  4832. /* no more data to transfer */
  4833. DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
  4834. ap->print_id, qc->dev->devno, status);
  4835. WARN_ON(qc->err_mask);
  4836. ap->hsm_task_state = HSM_ST_IDLE;
  4837. /* complete taskfile transaction */
  4838. ata_hsm_qc_complete(qc, in_wq);
  4839. poll_next = 0;
  4840. break;
  4841. case HSM_ST_ERR:
  4842. /* make sure qc->err_mask is available to
  4843. * know what's wrong and recover
  4844. */
  4845. WARN_ON(qc->err_mask == 0);
  4846. ap->hsm_task_state = HSM_ST_IDLE;
  4847. /* complete taskfile transaction */
  4848. ata_hsm_qc_complete(qc, in_wq);
  4849. poll_next = 0;
  4850. break;
  4851. default:
  4852. poll_next = 0;
  4853. BUG();
  4854. }
  4855. return poll_next;
  4856. }
  4857. static void ata_pio_task(struct work_struct *work)
  4858. {
  4859. struct ata_port *ap =
  4860. container_of(work, struct ata_port, port_task.work);
  4861. struct ata_queued_cmd *qc = ap->port_task_data;
  4862. u8 status;
  4863. int poll_next;
  4864. fsm_start:
  4865. WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
  4866. /*
  4867. * This is purely heuristic. This is a fast path.
  4868. * Sometimes when we enter, BSY will be cleared in
  4869. * a chk-status or two. If not, the drive is probably seeking
  4870. * or something. Snooze for a couple msecs, then
  4871. * chk-status again. If still busy, queue delayed work.
  4872. */
  4873. status = ata_busy_wait(ap, ATA_BUSY, 5);
  4874. if (status & ATA_BUSY) {
  4875. msleep(2);
  4876. status = ata_busy_wait(ap, ATA_BUSY, 10);
  4877. if (status & ATA_BUSY) {
  4878. ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
  4879. return;
  4880. }
  4881. }
  4882. /* move the HSM */
  4883. poll_next = ata_hsm_move(ap, qc, status, 1);
  4884. /* another command or interrupt handler
  4885. * may be running at this point.
  4886. */
  4887. if (poll_next)
  4888. goto fsm_start;
  4889. }
  4890. /**
  4891. * ata_qc_new - Request an available ATA command, for queueing
  4892. * @ap: Port associated with device @dev
  4893. * @dev: Device from whom we request an available command structure
  4894. *
  4895. * LOCKING:
  4896. * None.
  4897. */
  4898. static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
  4899. {
  4900. struct ata_queued_cmd *qc = NULL;
  4901. unsigned int i;
  4902. /* no command while frozen */
  4903. if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
  4904. return NULL;
  4905. /* the last tag is reserved for internal command. */
  4906. for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
  4907. if (!test_and_set_bit(i, &ap->qc_allocated)) {
  4908. qc = __ata_qc_from_tag(ap, i);
  4909. break;
  4910. }
  4911. if (qc)
  4912. qc->tag = i;
  4913. return qc;
  4914. }
  4915. /**
  4916. * ata_qc_new_init - Request an available ATA command, and initialize it
  4917. * @dev: Device from whom we request an available command structure
  4918. *
  4919. * LOCKING:
  4920. * None.
  4921. */
  4922. struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
  4923. {
  4924. struct ata_port *ap = dev->link->ap;
  4925. struct ata_queued_cmd *qc;
  4926. qc = ata_qc_new(ap);
  4927. if (qc) {
  4928. qc->scsicmd = NULL;
  4929. qc->ap = ap;
  4930. qc->dev = dev;
  4931. ata_qc_reinit(qc);
  4932. }
  4933. return qc;
  4934. }
  4935. /**
  4936. * ata_qc_free - free unused ata_queued_cmd
  4937. * @qc: Command to complete
  4938. *
  4939. * Designed to free unused ata_queued_cmd object
  4940. * in case something prevents using it.
  4941. *
  4942. * LOCKING:
  4943. * spin_lock_irqsave(host lock)
  4944. */
  4945. void ata_qc_free(struct ata_queued_cmd *qc)
  4946. {
  4947. struct ata_port *ap = qc->ap;
  4948. unsigned int tag;
  4949. WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
  4950. qc->flags = 0;
  4951. tag = qc->tag;
  4952. if (likely(ata_tag_valid(tag))) {
  4953. qc->tag = ATA_TAG_POISON;
  4954. clear_bit(tag, &ap->qc_allocated);
  4955. }
  4956. }
  4957. void __ata_qc_complete(struct ata_queued_cmd *qc)
  4958. {
  4959. struct ata_port *ap = qc->ap;
  4960. struct ata_link *link = qc->dev->link;
  4961. WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
  4962. WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
  4963. if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
  4964. ata_sg_clean(qc);
  4965. /* command should be marked inactive atomically with qc completion */
  4966. if (qc->tf.protocol == ATA_PROT_NCQ) {
  4967. link->sactive &= ~(1 << qc->tag);
  4968. if (!link->sactive)
  4969. ap->nr_active_links--;
  4970. } else {
  4971. link->active_tag = ATA_TAG_POISON;
  4972. ap->nr_active_links--;
  4973. }
  4974. /* clear exclusive status */
  4975. if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
  4976. ap->excl_link == link))
  4977. ap->excl_link = NULL;
  4978. /* atapi: mark qc as inactive to prevent the interrupt handler
  4979. * from completing the command twice later, before the error handler
  4980. * is called. (when rc != 0 and atapi request sense is needed)
  4981. */
  4982. qc->flags &= ~ATA_QCFLAG_ACTIVE;
  4983. ap->qc_active &= ~(1 << qc->tag);
  4984. /* call completion callback */
  4985. qc->complete_fn(qc);
  4986. }
  4987. static void fill_result_tf(struct ata_queued_cmd *qc)
  4988. {
  4989. struct ata_port *ap = qc->ap;
  4990. qc->result_tf.flags = qc->tf.flags;
  4991. ap->ops->tf_read(ap, &qc->result_tf);
  4992. }
  4993. /**
  4994. * ata_qc_complete - Complete an active ATA command
  4995. * @qc: Command to complete
  4996. * @err_mask: ATA Status register contents
  4997. *
  4998. * Indicate to the mid and upper layers that an ATA
  4999. * command has completed, with either an ok or not-ok status.
  5000. *
  5001. * LOCKING:
  5002. * spin_lock_irqsave(host lock)
  5003. */
  5004. void ata_qc_complete(struct ata_queued_cmd *qc)
  5005. {
  5006. struct ata_port *ap = qc->ap;
  5007. /* XXX: New EH and old EH use different mechanisms to
  5008. * synchronize EH with regular execution path.
  5009. *
  5010. * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
  5011. * Normal execution path is responsible for not accessing a
  5012. * failed qc. libata core enforces the rule by returning NULL
  5013. * from ata_qc_from_tag() for failed qcs.
  5014. *
  5015. * Old EH depends on ata_qc_complete() nullifying completion
  5016. * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
  5017. * not synchronize with interrupt handler. Only PIO task is
  5018. * taken care of.
  5019. */
  5020. if (ap->ops->error_handler) {
  5021. struct ata_device *dev = qc->dev;
  5022. struct ata_eh_info *ehi = &dev->link->eh_info;
  5023. WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
  5024. if (unlikely(qc->err_mask))
  5025. qc->flags |= ATA_QCFLAG_FAILED;
  5026. if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
  5027. if (!ata_tag_internal(qc->tag)) {
  5028. /* always fill result TF for failed qc */
  5029. fill_result_tf(qc);
  5030. ata_qc_schedule_eh(qc);
  5031. return;
  5032. }
  5033. }
  5034. /* read result TF if requested */
  5035. if (qc->flags & ATA_QCFLAG_RESULT_TF)
  5036. fill_result_tf(qc);
  5037. /* Some commands need post-processing after successful
  5038. * completion.
  5039. */
  5040. switch (qc->tf.command) {
  5041. case ATA_CMD_SET_FEATURES:
  5042. if (qc->tf.feature != SETFEATURES_WC_ON &&
  5043. qc->tf.feature != SETFEATURES_WC_OFF)
  5044. break;
  5045. /* fall through */
  5046. case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
  5047. case ATA_CMD_SET_MULTI: /* multi_count changed */
  5048. /* revalidate device */
  5049. ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
  5050. ata_port_schedule_eh(ap);
  5051. break;
  5052. case ATA_CMD_SLEEP:
  5053. dev->flags |= ATA_DFLAG_SLEEPING;
  5054. break;
  5055. }
  5056. __ata_qc_complete(qc);
  5057. } else {
  5058. if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
  5059. return;
  5060. /* read result TF if failed or requested */
  5061. if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
  5062. fill_result_tf(qc);
  5063. __ata_qc_complete(qc);
  5064. }
  5065. }
  5066. /**
  5067. * ata_qc_complete_multiple - Complete multiple qcs successfully
  5068. * @ap: port in question
  5069. * @qc_active: new qc_active mask
  5070. * @finish_qc: LLDD callback invoked before completing a qc
  5071. *
  5072. * Complete in-flight commands. This functions is meant to be
  5073. * called from low-level driver's interrupt routine to complete
  5074. * requests normally. ap->qc_active and @qc_active is compared
  5075. * and commands are completed accordingly.
  5076. *
  5077. * LOCKING:
  5078. * spin_lock_irqsave(host lock)
  5079. *
  5080. * RETURNS:
  5081. * Number of completed commands on success, -errno otherwise.
  5082. */
  5083. int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
  5084. void (*finish_qc)(struct ata_queued_cmd *))
  5085. {
  5086. int nr_done = 0;
  5087. u32 done_mask;
  5088. int i;
  5089. done_mask = ap->qc_active ^ qc_active;
  5090. if (unlikely(done_mask & qc_active)) {
  5091. ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
  5092. "(%08x->%08x)\n", ap->qc_active, qc_active);
  5093. return -EINVAL;
  5094. }
  5095. for (i = 0; i < ATA_MAX_QUEUE; i++) {
  5096. struct ata_queued_cmd *qc;
  5097. if (!(done_mask & (1 << i)))
  5098. continue;
  5099. if ((qc = ata_qc_from_tag(ap, i))) {
  5100. if (finish_qc)
  5101. finish_qc(qc);
  5102. ata_qc_complete(qc);
  5103. nr_done++;
  5104. }
  5105. }
  5106. return nr_done;
  5107. }
  5108. static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
  5109. {
  5110. struct ata_port *ap = qc->ap;
  5111. switch (qc->tf.protocol) {
  5112. case ATA_PROT_NCQ:
  5113. case ATA_PROT_DMA:
  5114. case ATA_PROT_ATAPI_DMA:
  5115. return 1;
  5116. case ATA_PROT_ATAPI:
  5117. case ATA_PROT_PIO:
  5118. if (ap->flags & ATA_FLAG_PIO_DMA)
  5119. return 1;
  5120. /* fall through */
  5121. default:
  5122. return 0;
  5123. }
  5124. /* never reached */
  5125. }
  5126. /**
  5127. * ata_qc_issue - issue taskfile to device
  5128. * @qc: command to issue to device
  5129. *
  5130. * Prepare an ATA command to submission to device.
  5131. * This includes mapping the data into a DMA-able
  5132. * area, filling in the S/G table, and finally
  5133. * writing the taskfile to hardware, starting the command.
  5134. *
  5135. * LOCKING:
  5136. * spin_lock_irqsave(host lock)
  5137. */
  5138. void ata_qc_issue(struct ata_queued_cmd *qc)
  5139. {
  5140. struct ata_port *ap = qc->ap;
  5141. struct ata_link *link = qc->dev->link;
  5142. /* Make sure only one non-NCQ command is outstanding. The
  5143. * check is skipped for old EH because it reuses active qc to
  5144. * request ATAPI sense.
  5145. */
  5146. WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
  5147. if (qc->tf.protocol == ATA_PROT_NCQ) {
  5148. WARN_ON(link->sactive & (1 << qc->tag));
  5149. if (!link->sactive)
  5150. ap->nr_active_links++;
  5151. link->sactive |= 1 << qc->tag;
  5152. } else {
  5153. WARN_ON(link->sactive);
  5154. ap->nr_active_links++;
  5155. link->active_tag = qc->tag;
  5156. }
  5157. qc->flags |= ATA_QCFLAG_ACTIVE;
  5158. ap->qc_active |= 1 << qc->tag;
  5159. if (ata_should_dma_map(qc)) {
  5160. if (qc->flags & ATA_QCFLAG_SG) {
  5161. if (ata_sg_setup(qc))
  5162. goto sg_err;
  5163. } else if (qc->flags & ATA_QCFLAG_SINGLE) {
  5164. if (ata_sg_setup_one(qc))
  5165. goto sg_err;
  5166. }
  5167. } else {
  5168. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  5169. }
  5170. /* if device is sleeping, schedule softreset and abort the link */
  5171. if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
  5172. link->eh_info.action |= ATA_EH_SOFTRESET;
  5173. ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
  5174. ata_link_abort(link);
  5175. return;
  5176. }
  5177. ap->ops->qc_prep(qc);
  5178. qc->err_mask |= ap->ops->qc_issue(qc);
  5179. if (unlikely(qc->err_mask))
  5180. goto err;
  5181. return;
  5182. sg_err:
  5183. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  5184. qc->err_mask |= AC_ERR_SYSTEM;
  5185. err:
  5186. ata_qc_complete(qc);
  5187. }
  5188. /**
  5189. * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
  5190. * @qc: command to issue to device
  5191. *
  5192. * Using various libata functions and hooks, this function
  5193. * starts an ATA command. ATA commands are grouped into
  5194. * classes called "protocols", and issuing each type of protocol
  5195. * is slightly different.
  5196. *
  5197. * May be used as the qc_issue() entry in ata_port_operations.
  5198. *
  5199. * LOCKING:
  5200. * spin_lock_irqsave(host lock)
  5201. *
  5202. * RETURNS:
  5203. * Zero on success, AC_ERR_* mask on failure
  5204. */
  5205. unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
  5206. {
  5207. struct ata_port *ap = qc->ap;
  5208. /* Use polling pio if the LLD doesn't handle
  5209. * interrupt driven pio and atapi CDB interrupt.
  5210. */
  5211. if (ap->flags & ATA_FLAG_PIO_POLLING) {
  5212. switch (qc->tf.protocol) {
  5213. case ATA_PROT_PIO:
  5214. case ATA_PROT_NODATA:
  5215. case ATA_PROT_ATAPI:
  5216. case ATA_PROT_ATAPI_NODATA:
  5217. qc->tf.flags |= ATA_TFLAG_POLLING;
  5218. break;
  5219. case ATA_PROT_ATAPI_DMA:
  5220. if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
  5221. /* see ata_dma_blacklisted() */
  5222. BUG();
  5223. break;
  5224. default:
  5225. break;
  5226. }
  5227. }
  5228. /* select the device */
  5229. ata_dev_select(ap, qc->dev->devno, 1, 0);
  5230. /* start the command */
  5231. switch (qc->tf.protocol) {
  5232. case ATA_PROT_NODATA:
  5233. if (qc->tf.flags & ATA_TFLAG_POLLING)
  5234. ata_qc_set_polling(qc);
  5235. ata_tf_to_host(ap, &qc->tf);
  5236. ap->hsm_task_state = HSM_ST_LAST;
  5237. if (qc->tf.flags & ATA_TFLAG_POLLING)
  5238. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  5239. break;
  5240. case ATA_PROT_DMA:
  5241. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  5242. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  5243. ap->ops->bmdma_setup(qc); /* set up bmdma */
  5244. ap->ops->bmdma_start(qc); /* initiate bmdma */
  5245. ap->hsm_task_state = HSM_ST_LAST;
  5246. break;
  5247. case ATA_PROT_PIO:
  5248. if (qc->tf.flags & ATA_TFLAG_POLLING)
  5249. ata_qc_set_polling(qc);
  5250. ata_tf_to_host(ap, &qc->tf);
  5251. if (qc->tf.flags & ATA_TFLAG_WRITE) {
  5252. /* PIO data out protocol */
  5253. ap->hsm_task_state = HSM_ST_FIRST;
  5254. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  5255. /* always send first data block using
  5256. * the ata_pio_task() codepath.
  5257. */
  5258. } else {
  5259. /* PIO data in protocol */
  5260. ap->hsm_task_state = HSM_ST;
  5261. if (qc->tf.flags & ATA_TFLAG_POLLING)
  5262. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  5263. /* if polling, ata_pio_task() handles the rest.
  5264. * otherwise, interrupt handler takes over from here.
  5265. */
  5266. }
  5267. break;
  5268. case ATA_PROT_ATAPI:
  5269. case ATA_PROT_ATAPI_NODATA:
  5270. if (qc->tf.flags & ATA_TFLAG_POLLING)
  5271. ata_qc_set_polling(qc);
  5272. ata_tf_to_host(ap, &qc->tf);
  5273. ap->hsm_task_state = HSM_ST_FIRST;
  5274. /* send cdb by polling if no cdb interrupt */
  5275. if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
  5276. (qc->tf.flags & ATA_TFLAG_POLLING))
  5277. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  5278. break;
  5279. case ATA_PROT_ATAPI_DMA:
  5280. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  5281. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  5282. ap->ops->bmdma_setup(qc); /* set up bmdma */
  5283. ap->hsm_task_state = HSM_ST_FIRST;
  5284. /* send cdb by polling if no cdb interrupt */
  5285. if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  5286. ata_port_queue_task(ap, ata_pio_task, qc, 0);
  5287. break;
  5288. default:
  5289. WARN_ON(1);
  5290. return AC_ERR_SYSTEM;
  5291. }
  5292. return 0;
  5293. }
  5294. /**
  5295. * ata_host_intr - Handle host interrupt for given (port, task)
  5296. * @ap: Port on which interrupt arrived (possibly...)
  5297. * @qc: Taskfile currently active in engine
  5298. *
  5299. * Handle host interrupt for given queued command. Currently,
  5300. * only DMA interrupts are handled. All other commands are
  5301. * handled via polling with interrupts disabled (nIEN bit).
  5302. *
  5303. * LOCKING:
  5304. * spin_lock_irqsave(host lock)
  5305. *
  5306. * RETURNS:
  5307. * One if interrupt was handled, zero if not (shared irq).
  5308. */
  5309. inline unsigned int ata_host_intr(struct ata_port *ap,
  5310. struct ata_queued_cmd *qc)
  5311. {
  5312. struct ata_eh_info *ehi = &ap->link.eh_info;
  5313. u8 status, host_stat = 0;
  5314. VPRINTK("ata%u: protocol %d task_state %d\n",
  5315. ap->print_id, qc->tf.protocol, ap->hsm_task_state);
  5316. /* Check whether we are expecting interrupt in this state */
  5317. switch (ap->hsm_task_state) {
  5318. case HSM_ST_FIRST:
  5319. /* Some pre-ATAPI-4 devices assert INTRQ
  5320. * at this state when ready to receive CDB.
  5321. */
  5322. /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
  5323. * The flag was turned on only for atapi devices.
  5324. * No need to check is_atapi_taskfile(&qc->tf) again.
  5325. */
  5326. if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
  5327. goto idle_irq;
  5328. break;
  5329. case HSM_ST_LAST:
  5330. if (qc->tf.protocol == ATA_PROT_DMA ||
  5331. qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
  5332. /* check status of DMA engine */
  5333. host_stat = ap->ops->bmdma_status(ap);
  5334. VPRINTK("ata%u: host_stat 0x%X\n",
  5335. ap->print_id, host_stat);
  5336. /* if it's not our irq... */
  5337. if (!(host_stat & ATA_DMA_INTR))
  5338. goto idle_irq;
  5339. /* before we do anything else, clear DMA-Start bit */
  5340. ap->ops->bmdma_stop(qc);
  5341. if (unlikely(host_stat & ATA_DMA_ERR)) {
  5342. /* error when transfering data to/from memory */
  5343. qc->err_mask |= AC_ERR_HOST_BUS;
  5344. ap->hsm_task_state = HSM_ST_ERR;
  5345. }
  5346. }
  5347. break;
  5348. case HSM_ST:
  5349. break;
  5350. default:
  5351. goto idle_irq;
  5352. }
  5353. /* check altstatus */
  5354. status = ata_altstatus(ap);
  5355. if (status & ATA_BUSY)
  5356. goto idle_irq;
  5357. /* check main status, clearing INTRQ */
  5358. status = ata_chk_status(ap);
  5359. if (unlikely(status & ATA_BUSY))
  5360. goto idle_irq;
  5361. /* ack bmdma irq events */
  5362. ap->ops->irq_clear(ap);
  5363. ata_hsm_move(ap, qc, status, 0);
  5364. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
  5365. qc->tf.protocol == ATA_PROT_ATAPI_DMA))
  5366. ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
  5367. return 1; /* irq handled */
  5368. idle_irq:
  5369. ap->stats.idle_irq++;
  5370. #ifdef ATA_IRQ_TRAP
  5371. if ((ap->stats.idle_irq % 1000) == 0) {
  5372. ata_chk_status(ap);
  5373. ap->ops->irq_clear(ap);
  5374. ata_port_printk(ap, KERN_WARNING, "irq trap\n");
  5375. return 1;
  5376. }
  5377. #endif
  5378. return 0; /* irq not handled */
  5379. }
  5380. /**
  5381. * ata_interrupt - Default ATA host interrupt handler
  5382. * @irq: irq line (unused)
  5383. * @dev_instance: pointer to our ata_host information structure
  5384. *
  5385. * Default interrupt handler for PCI IDE devices. Calls
  5386. * ata_host_intr() for each port that is not disabled.
  5387. *
  5388. * LOCKING:
  5389. * Obtains host lock during operation.
  5390. *
  5391. * RETURNS:
  5392. * IRQ_NONE or IRQ_HANDLED.
  5393. */
  5394. irqreturn_t ata_interrupt(int irq, void *dev_instance)
  5395. {
  5396. struct ata_host *host = dev_instance;
  5397. unsigned int i;
  5398. unsigned int handled = 0;
  5399. unsigned long flags;
  5400. /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
  5401. spin_lock_irqsave(&host->lock, flags);
  5402. for (i = 0; i < host->n_ports; i++) {
  5403. struct ata_port *ap;
  5404. ap = host->ports[i];
  5405. if (ap &&
  5406. !(ap->flags & ATA_FLAG_DISABLED)) {
  5407. struct ata_queued_cmd *qc;
  5408. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  5409. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
  5410. (qc->flags & ATA_QCFLAG_ACTIVE))
  5411. handled |= ata_host_intr(ap, qc);
  5412. }
  5413. }
  5414. spin_unlock_irqrestore(&host->lock, flags);
  5415. return IRQ_RETVAL(handled);
  5416. }
  5417. /**
  5418. * sata_scr_valid - test whether SCRs are accessible
  5419. * @link: ATA link to test SCR accessibility for
  5420. *
  5421. * Test whether SCRs are accessible for @link.
  5422. *
  5423. * LOCKING:
  5424. * None.
  5425. *
  5426. * RETURNS:
  5427. * 1 if SCRs are accessible, 0 otherwise.
  5428. */
  5429. int sata_scr_valid(struct ata_link *link)
  5430. {
  5431. struct ata_port *ap = link->ap;
  5432. return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
  5433. }
  5434. /**
  5435. * sata_scr_read - read SCR register of the specified port
  5436. * @link: ATA link to read SCR for
  5437. * @reg: SCR to read
  5438. * @val: Place to store read value
  5439. *
  5440. * Read SCR register @reg of @link into *@val. This function is
  5441. * guaranteed to succeed if @link is ap->link, the cable type of
  5442. * the port is SATA and the port implements ->scr_read.
  5443. *
  5444. * LOCKING:
  5445. * None if @link is ap->link. Kernel thread context otherwise.
  5446. *
  5447. * RETURNS:
  5448. * 0 on success, negative errno on failure.
  5449. */
  5450. int sata_scr_read(struct ata_link *link, int reg, u32 *val)
  5451. {
  5452. if (ata_is_host_link(link)) {
  5453. struct ata_port *ap = link->ap;
  5454. if (sata_scr_valid(link))
  5455. return ap->ops->scr_read(ap, reg, val);
  5456. return -EOPNOTSUPP;
  5457. }
  5458. return sata_pmp_scr_read(link, reg, val);
  5459. }
  5460. /**
  5461. * sata_scr_write - write SCR register of the specified port
  5462. * @link: ATA link to write SCR for
  5463. * @reg: SCR to write
  5464. * @val: value to write
  5465. *
  5466. * Write @val to SCR register @reg of @link. This function is
  5467. * guaranteed to succeed if @link is ap->link, the cable type of
  5468. * the port is SATA and the port implements ->scr_read.
  5469. *
  5470. * LOCKING:
  5471. * None if @link is ap->link. Kernel thread context otherwise.
  5472. *
  5473. * RETURNS:
  5474. * 0 on success, negative errno on failure.
  5475. */
  5476. int sata_scr_write(struct ata_link *link, int reg, u32 val)
  5477. {
  5478. if (ata_is_host_link(link)) {
  5479. struct ata_port *ap = link->ap;
  5480. if (sata_scr_valid(link))
  5481. return ap->ops->scr_write(ap, reg, val);
  5482. return -EOPNOTSUPP;
  5483. }
  5484. return sata_pmp_scr_write(link, reg, val);
  5485. }
  5486. /**
  5487. * sata_scr_write_flush - write SCR register of the specified port and flush
  5488. * @link: ATA link to write SCR for
  5489. * @reg: SCR to write
  5490. * @val: value to write
  5491. *
  5492. * This function is identical to sata_scr_write() except that this
  5493. * function performs flush after writing to the register.
  5494. *
  5495. * LOCKING:
  5496. * None if @link is ap->link. Kernel thread context otherwise.
  5497. *
  5498. * RETURNS:
  5499. * 0 on success, negative errno on failure.
  5500. */
  5501. int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
  5502. {
  5503. if (ata_is_host_link(link)) {
  5504. struct ata_port *ap = link->ap;
  5505. int rc;
  5506. if (sata_scr_valid(link)) {
  5507. rc = ap->ops->scr_write(ap, reg, val);
  5508. if (rc == 0)
  5509. rc = ap->ops->scr_read(ap, reg, &val);
  5510. return rc;
  5511. }
  5512. return -EOPNOTSUPP;
  5513. }
  5514. return sata_pmp_scr_write(link, reg, val);
  5515. }
  5516. /**
  5517. * ata_link_online - test whether the given link is online
  5518. * @link: ATA link to test
  5519. *
  5520. * Test whether @link is online. Note that this function returns
  5521. * 0 if online status of @link cannot be obtained, so
  5522. * ata_link_online(link) != !ata_link_offline(link).
  5523. *
  5524. * LOCKING:
  5525. * None.
  5526. *
  5527. * RETURNS:
  5528. * 1 if the port online status is available and online.
  5529. */
  5530. int ata_link_online(struct ata_link *link)
  5531. {
  5532. u32 sstatus;
  5533. if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
  5534. (sstatus & 0xf) == 0x3)
  5535. return 1;
  5536. return 0;
  5537. }
  5538. /**
  5539. * ata_link_offline - test whether the given link is offline
  5540. * @link: ATA link to test
  5541. *
  5542. * Test whether @link is offline. Note that this function
  5543. * returns 0 if offline status of @link cannot be obtained, so
  5544. * ata_link_online(link) != !ata_link_offline(link).
  5545. *
  5546. * LOCKING:
  5547. * None.
  5548. *
  5549. * RETURNS:
  5550. * 1 if the port offline status is available and offline.
  5551. */
  5552. int ata_link_offline(struct ata_link *link)
  5553. {
  5554. u32 sstatus;
  5555. if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
  5556. (sstatus & 0xf) != 0x3)
  5557. return 1;
  5558. return 0;
  5559. }
  5560. int ata_flush_cache(struct ata_device *dev)
  5561. {
  5562. unsigned int err_mask;
  5563. u8 cmd;
  5564. if (!ata_try_flush_cache(dev))
  5565. return 0;
  5566. if (dev->flags & ATA_DFLAG_FLUSH_EXT)
  5567. cmd = ATA_CMD_FLUSH_EXT;
  5568. else
  5569. cmd = ATA_CMD_FLUSH;
  5570. /* This is wrong. On a failed flush we get back the LBA of the lost
  5571. sector and we should (assuming it wasn't aborted as unknown) issue
  5572. a further flush command to continue the writeback until it
  5573. does not error */
  5574. err_mask = ata_do_simple_cmd(dev, cmd);
  5575. if (err_mask) {
  5576. ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
  5577. return -EIO;
  5578. }
  5579. return 0;
  5580. }
  5581. #ifdef CONFIG_PM
  5582. static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  5583. unsigned int action, unsigned int ehi_flags,
  5584. int wait)
  5585. {
  5586. unsigned long flags;
  5587. int i, rc;
  5588. for (i = 0; i < host->n_ports; i++) {
  5589. struct ata_port *ap = host->ports[i];
  5590. struct ata_link *link;
  5591. /* Previous resume operation might still be in
  5592. * progress. Wait for PM_PENDING to clear.
  5593. */
  5594. if (ap->pflags & ATA_PFLAG_PM_PENDING) {
  5595. ata_port_wait_eh(ap);
  5596. WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
  5597. }
  5598. /* request PM ops to EH */
  5599. spin_lock_irqsave(ap->lock, flags);
  5600. ap->pm_mesg = mesg;
  5601. if (wait) {
  5602. rc = 0;
  5603. ap->pm_result = &rc;
  5604. }
  5605. ap->pflags |= ATA_PFLAG_PM_PENDING;
  5606. __ata_port_for_each_link(link, ap) {
  5607. link->eh_info.action |= action;
  5608. link->eh_info.flags |= ehi_flags;
  5609. }
  5610. ata_port_schedule_eh(ap);
  5611. spin_unlock_irqrestore(ap->lock, flags);
  5612. /* wait and check result */
  5613. if (wait) {
  5614. ata_port_wait_eh(ap);
  5615. WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
  5616. if (rc)
  5617. return rc;
  5618. }
  5619. }
  5620. return 0;
  5621. }
  5622. /**
  5623. * ata_host_suspend - suspend host
  5624. * @host: host to suspend
  5625. * @mesg: PM message
  5626. *
  5627. * Suspend @host. Actual operation is performed by EH. This
  5628. * function requests EH to perform PM operations and waits for EH
  5629. * to finish.
  5630. *
  5631. * LOCKING:
  5632. * Kernel thread context (may sleep).
  5633. *
  5634. * RETURNS:
  5635. * 0 on success, -errno on failure.
  5636. */
  5637. int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
  5638. {
  5639. int rc;
  5640. /*
  5641. * disable link pm on all ports before requesting
  5642. * any pm activity
  5643. */
  5644. ata_lpm_enable(host);
  5645. rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
  5646. if (rc == 0)
  5647. host->dev->power.power_state = mesg;
  5648. return rc;
  5649. }
  5650. /**
  5651. * ata_host_resume - resume host
  5652. * @host: host to resume
  5653. *
  5654. * Resume @host. Actual operation is performed by EH. This
  5655. * function requests EH to perform PM operations and returns.
  5656. * Note that all resume operations are performed parallely.
  5657. *
  5658. * LOCKING:
  5659. * Kernel thread context (may sleep).
  5660. */
  5661. void ata_host_resume(struct ata_host *host)
  5662. {
  5663. ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
  5664. ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
  5665. host->dev->power.power_state = PMSG_ON;
  5666. /* reenable link pm */
  5667. ata_lpm_disable(host);
  5668. }
  5669. #endif
  5670. /**
  5671. * ata_port_start - Set port up for dma.
  5672. * @ap: Port to initialize
  5673. *
  5674. * Called just after data structures for each port are
  5675. * initialized. Allocates space for PRD table.
  5676. *
  5677. * May be used as the port_start() entry in ata_port_operations.
  5678. *
  5679. * LOCKING:
  5680. * Inherited from caller.
  5681. */
  5682. int ata_port_start(struct ata_port *ap)
  5683. {
  5684. struct device *dev = ap->dev;
  5685. int rc;
  5686. ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
  5687. GFP_KERNEL);
  5688. if (!ap->prd)
  5689. return -ENOMEM;
  5690. rc = ata_pad_alloc(ap, dev);
  5691. if (rc)
  5692. return rc;
  5693. DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
  5694. (unsigned long long)ap->prd_dma);
  5695. return 0;
  5696. }
  5697. /**
  5698. * ata_dev_init - Initialize an ata_device structure
  5699. * @dev: Device structure to initialize
  5700. *
  5701. * Initialize @dev in preparation for probing.
  5702. *
  5703. * LOCKING:
  5704. * Inherited from caller.
  5705. */
  5706. void ata_dev_init(struct ata_device *dev)
  5707. {
  5708. struct ata_link *link = dev->link;
  5709. struct ata_port *ap = link->ap;
  5710. unsigned long flags;
  5711. /* SATA spd limit is bound to the first device */
  5712. link->sata_spd_limit = link->hw_sata_spd_limit;
  5713. link->sata_spd = 0;
  5714. /* High bits of dev->flags are used to record warm plug
  5715. * requests which occur asynchronously. Synchronize using
  5716. * host lock.
  5717. */
  5718. spin_lock_irqsave(ap->lock, flags);
  5719. dev->flags &= ~ATA_DFLAG_INIT_MASK;
  5720. dev->horkage = 0;
  5721. spin_unlock_irqrestore(ap->lock, flags);
  5722. memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
  5723. sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
  5724. dev->pio_mask = UINT_MAX;
  5725. dev->mwdma_mask = UINT_MAX;
  5726. dev->udma_mask = UINT_MAX;
  5727. }
  5728. /**
  5729. * ata_link_init - Initialize an ata_link structure
  5730. * @ap: ATA port link is attached to
  5731. * @link: Link structure to initialize
  5732. * @pmp: Port multiplier port number
  5733. *
  5734. * Initialize @link.
  5735. *
  5736. * LOCKING:
  5737. * Kernel thread context (may sleep)
  5738. */
  5739. void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
  5740. {
  5741. int i;
  5742. /* clear everything except for devices */
  5743. memset(link, 0, offsetof(struct ata_link, device[0]));
  5744. link->ap = ap;
  5745. link->pmp = pmp;
  5746. link->active_tag = ATA_TAG_POISON;
  5747. link->hw_sata_spd_limit = UINT_MAX;
  5748. /* can't use iterator, ap isn't initialized yet */
  5749. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  5750. struct ata_device *dev = &link->device[i];
  5751. dev->link = link;
  5752. dev->devno = dev - link->device;
  5753. ata_dev_init(dev);
  5754. }
  5755. }
  5756. /**
  5757. * sata_link_init_spd - Initialize link->sata_spd_limit
  5758. * @link: Link to configure sata_spd_limit for
  5759. *
  5760. * Initialize @link->[hw_]sata_spd_limit to the currently
  5761. * configured value.
  5762. *
  5763. * LOCKING:
  5764. * Kernel thread context (may sleep).
  5765. *
  5766. * RETURNS:
  5767. * 0 on success, -errno on failure.
  5768. */
  5769. int sata_link_init_spd(struct ata_link *link)
  5770. {
  5771. u32 scontrol, spd;
  5772. int rc;
  5773. rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
  5774. if (rc)
  5775. return rc;
  5776. spd = (scontrol >> 4) & 0xf;
  5777. if (spd)
  5778. link->hw_sata_spd_limit &= (1 << spd) - 1;
  5779. link->sata_spd_limit = link->hw_sata_spd_limit;
  5780. return 0;
  5781. }
  5782. /**
  5783. * ata_port_alloc - allocate and initialize basic ATA port resources
  5784. * @host: ATA host this allocated port belongs to
  5785. *
  5786. * Allocate and initialize basic ATA port resources.
  5787. *
  5788. * RETURNS:
  5789. * Allocate ATA port on success, NULL on failure.
  5790. *
  5791. * LOCKING:
  5792. * Inherited from calling layer (may sleep).
  5793. */
  5794. struct ata_port *ata_port_alloc(struct ata_host *host)
  5795. {
  5796. struct ata_port *ap;
  5797. DPRINTK("ENTER\n");
  5798. ap = kzalloc(sizeof(*ap), GFP_KERNEL);
  5799. if (!ap)
  5800. return NULL;
  5801. ap->pflags |= ATA_PFLAG_INITIALIZING;
  5802. ap->lock = &host->lock;
  5803. ap->flags = ATA_FLAG_DISABLED;
  5804. ap->print_id = -1;
  5805. ap->ctl = ATA_DEVCTL_OBS;
  5806. ap->host = host;
  5807. ap->dev = host->dev;
  5808. ap->last_ctl = 0xFF;
  5809. #if defined(ATA_VERBOSE_DEBUG)
  5810. /* turn on all debugging levels */
  5811. ap->msg_enable = 0x00FF;
  5812. #elif defined(ATA_DEBUG)
  5813. ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
  5814. #else
  5815. ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
  5816. #endif
  5817. INIT_DELAYED_WORK(&ap->port_task, NULL);
  5818. INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
  5819. INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
  5820. INIT_LIST_HEAD(&ap->eh_done_q);
  5821. init_waitqueue_head(&ap->eh_wait_q);
  5822. init_timer_deferrable(&ap->fastdrain_timer);
  5823. ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
  5824. ap->fastdrain_timer.data = (unsigned long)ap;
  5825. ap->cbl = ATA_CBL_NONE;
  5826. ata_link_init(ap, &ap->link, 0);
  5827. #ifdef ATA_IRQ_TRAP
  5828. ap->stats.unhandled_irq = 1;
  5829. ap->stats.idle_irq = 1;
  5830. #endif
  5831. return ap;
  5832. }
  5833. static void ata_host_release(struct device *gendev, void *res)
  5834. {
  5835. struct ata_host *host = dev_get_drvdata(gendev);
  5836. int i;
  5837. for (i = 0; i < host->n_ports; i++) {
  5838. struct ata_port *ap = host->ports[i];
  5839. if (!ap)
  5840. continue;
  5841. if (ap->scsi_host)
  5842. scsi_host_put(ap->scsi_host);
  5843. kfree(ap->pmp_link);
  5844. kfree(ap);
  5845. host->ports[i] = NULL;
  5846. }
  5847. dev_set_drvdata(gendev, NULL);
  5848. }
  5849. /**
  5850. * ata_host_alloc - allocate and init basic ATA host resources
  5851. * @dev: generic device this host is associated with
  5852. * @max_ports: maximum number of ATA ports associated with this host
  5853. *
  5854. * Allocate and initialize basic ATA host resources. LLD calls
  5855. * this function to allocate a host, initializes it fully and
  5856. * attaches it using ata_host_register().
  5857. *
  5858. * @max_ports ports are allocated and host->n_ports is
  5859. * initialized to @max_ports. The caller is allowed to decrease
  5860. * host->n_ports before calling ata_host_register(). The unused
  5861. * ports will be automatically freed on registration.
  5862. *
  5863. * RETURNS:
  5864. * Allocate ATA host on success, NULL on failure.
  5865. *
  5866. * LOCKING:
  5867. * Inherited from calling layer (may sleep).
  5868. */
  5869. struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
  5870. {
  5871. struct ata_host *host;
  5872. size_t sz;
  5873. int i;
  5874. DPRINTK("ENTER\n");
  5875. if (!devres_open_group(dev, NULL, GFP_KERNEL))
  5876. return NULL;
  5877. /* alloc a container for our list of ATA ports (buses) */
  5878. sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
  5879. /* alloc a container for our list of ATA ports (buses) */
  5880. host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
  5881. if (!host)
  5882. goto err_out;
  5883. devres_add(dev, host);
  5884. dev_set_drvdata(dev, host);
  5885. spin_lock_init(&host->lock);
  5886. host->dev = dev;
  5887. host->n_ports = max_ports;
  5888. /* allocate ports bound to this host */
  5889. for (i = 0; i < max_ports; i++) {
  5890. struct ata_port *ap;
  5891. ap = ata_port_alloc(host);
  5892. if (!ap)
  5893. goto err_out;
  5894. ap->port_no = i;
  5895. host->ports[i] = ap;
  5896. }
  5897. devres_remove_group(dev, NULL);
  5898. return host;
  5899. err_out:
  5900. devres_release_group(dev, NULL);
  5901. return NULL;
  5902. }
  5903. /**
  5904. * ata_host_alloc_pinfo - alloc host and init with port_info array
  5905. * @dev: generic device this host is associated with
  5906. * @ppi: array of ATA port_info to initialize host with
  5907. * @n_ports: number of ATA ports attached to this host
  5908. *
  5909. * Allocate ATA host and initialize with info from @ppi. If NULL
  5910. * terminated, @ppi may contain fewer entries than @n_ports. The
  5911. * last entry will be used for the remaining ports.
  5912. *
  5913. * RETURNS:
  5914. * Allocate ATA host on success, NULL on failure.
  5915. *
  5916. * LOCKING:
  5917. * Inherited from calling layer (may sleep).
  5918. */
  5919. struct ata_host *ata_host_alloc_pinfo(struct device *dev,
  5920. const struct ata_port_info * const * ppi,
  5921. int n_ports)
  5922. {
  5923. const struct ata_port_info *pi;
  5924. struct ata_host *host;
  5925. int i, j;
  5926. host = ata_host_alloc(dev, n_ports);
  5927. if (!host)
  5928. return NULL;
  5929. for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
  5930. struct ata_port *ap = host->ports[i];
  5931. if (ppi[j])
  5932. pi = ppi[j++];
  5933. ap->pio_mask = pi->pio_mask;
  5934. ap->mwdma_mask = pi->mwdma_mask;
  5935. ap->udma_mask = pi->udma_mask;
  5936. ap->flags |= pi->flags;
  5937. ap->link.flags |= pi->link_flags;
  5938. ap->ops = pi->port_ops;
  5939. if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
  5940. host->ops = pi->port_ops;
  5941. if (!host->private_data && pi->private_data)
  5942. host->private_data = pi->private_data;
  5943. }
  5944. return host;
  5945. }
  5946. static void ata_host_stop(struct device *gendev, void *res)
  5947. {
  5948. struct ata_host *host = dev_get_drvdata(gendev);
  5949. int i;
  5950. WARN_ON(!(host->flags & ATA_HOST_STARTED));
  5951. for (i = 0; i < host->n_ports; i++) {
  5952. struct ata_port *ap = host->ports[i];
  5953. if (ap->ops->port_stop)
  5954. ap->ops->port_stop(ap);
  5955. }
  5956. if (host->ops->host_stop)
  5957. host->ops->host_stop(host);
  5958. }
  5959. /**
  5960. * ata_host_start - start and freeze ports of an ATA host
  5961. * @host: ATA host to start ports for
  5962. *
  5963. * Start and then freeze ports of @host. Started status is
  5964. * recorded in host->flags, so this function can be called
  5965. * multiple times. Ports are guaranteed to get started only
  5966. * once. If host->ops isn't initialized yet, its set to the
  5967. * first non-dummy port ops.
  5968. *
  5969. * LOCKING:
  5970. * Inherited from calling layer (may sleep).
  5971. *
  5972. * RETURNS:
  5973. * 0 if all ports are started successfully, -errno otherwise.
  5974. */
  5975. int ata_host_start(struct ata_host *host)
  5976. {
  5977. int have_stop = 0;
  5978. void *start_dr = NULL;
  5979. int i, rc;
  5980. if (host->flags & ATA_HOST_STARTED)
  5981. return 0;
  5982. for (i = 0; i < host->n_ports; i++) {
  5983. struct ata_port *ap = host->ports[i];
  5984. if (!host->ops && !ata_port_is_dummy(ap))
  5985. host->ops = ap->ops;
  5986. if (ap->ops->port_stop)
  5987. have_stop = 1;
  5988. }
  5989. if (host->ops->host_stop)
  5990. have_stop = 1;
  5991. if (have_stop) {
  5992. start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
  5993. if (!start_dr)
  5994. return -ENOMEM;
  5995. }
  5996. for (i = 0; i < host->n_ports; i++) {
  5997. struct ata_port *ap = host->ports[i];
  5998. if (ap->ops->port_start) {
  5999. rc = ap->ops->port_start(ap);
  6000. if (rc) {
  6001. if (rc != -ENODEV)
  6002. dev_printk(KERN_ERR, host->dev,
  6003. "failed to start port %d "
  6004. "(errno=%d)\n", i, rc);
  6005. goto err_out;
  6006. }
  6007. }
  6008. ata_eh_freeze_port(ap);
  6009. }
  6010. if (start_dr)
  6011. devres_add(host->dev, start_dr);
  6012. host->flags |= ATA_HOST_STARTED;
  6013. return 0;
  6014. err_out:
  6015. while (--i >= 0) {
  6016. struct ata_port *ap = host->ports[i];
  6017. if (ap->ops->port_stop)
  6018. ap->ops->port_stop(ap);
  6019. }
  6020. devres_free(start_dr);
  6021. return rc;
  6022. }
  6023. /**
  6024. * ata_sas_host_init - Initialize a host struct
  6025. * @host: host to initialize
  6026. * @dev: device host is attached to
  6027. * @flags: host flags
  6028. * @ops: port_ops
  6029. *
  6030. * LOCKING:
  6031. * PCI/etc. bus probe sem.
  6032. *
  6033. */
  6034. /* KILLME - the only user left is ipr */
  6035. void ata_host_init(struct ata_host *host, struct device *dev,
  6036. unsigned long flags, const struct ata_port_operations *ops)
  6037. {
  6038. spin_lock_init(&host->lock);
  6039. host->dev = dev;
  6040. host->flags = flags;
  6041. host->ops = ops;
  6042. }
  6043. /**
  6044. * ata_host_register - register initialized ATA host
  6045. * @host: ATA host to register
  6046. * @sht: template for SCSI host
  6047. *
  6048. * Register initialized ATA host. @host is allocated using
  6049. * ata_host_alloc() and fully initialized by LLD. This function
  6050. * starts ports, registers @host with ATA and SCSI layers and
  6051. * probe registered devices.
  6052. *
  6053. * LOCKING:
  6054. * Inherited from calling layer (may sleep).
  6055. *
  6056. * RETURNS:
  6057. * 0 on success, -errno otherwise.
  6058. */
  6059. int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
  6060. {
  6061. int i, rc;
  6062. /* host must have been started */
  6063. if (!(host->flags & ATA_HOST_STARTED)) {
  6064. dev_printk(KERN_ERR, host->dev,
  6065. "BUG: trying to register unstarted host\n");
  6066. WARN_ON(1);
  6067. return -EINVAL;
  6068. }
  6069. /* Blow away unused ports. This happens when LLD can't
  6070. * determine the exact number of ports to allocate at
  6071. * allocation time.
  6072. */
  6073. for (i = host->n_ports; host->ports[i]; i++)
  6074. kfree(host->ports[i]);
  6075. /* give ports names and add SCSI hosts */
  6076. for (i = 0; i < host->n_ports; i++)
  6077. host->ports[i]->print_id = ata_print_id++;
  6078. rc = ata_scsi_add_hosts(host, sht);
  6079. if (rc)
  6080. return rc;
  6081. /* associate with ACPI nodes */
  6082. ata_acpi_associate(host);
  6083. /* set cable, sata_spd_limit and report */
  6084. for (i = 0; i < host->n_ports; i++) {
  6085. struct ata_port *ap = host->ports[i];
  6086. unsigned long xfer_mask;
  6087. /* set SATA cable type if still unset */
  6088. if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
  6089. ap->cbl = ATA_CBL_SATA;
  6090. /* init sata_spd_limit to the current value */
  6091. sata_link_init_spd(&ap->link);
  6092. /* print per-port info to dmesg */
  6093. xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
  6094. ap->udma_mask);
  6095. if (!ata_port_is_dummy(ap)) {
  6096. ata_port_printk(ap, KERN_INFO,
  6097. "%cATA max %s %s\n",
  6098. (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
  6099. ata_mode_string(xfer_mask),
  6100. ap->link.eh_info.desc);
  6101. ata_ehi_clear_desc(&ap->link.eh_info);
  6102. } else
  6103. ata_port_printk(ap, KERN_INFO, "DUMMY\n");
  6104. }
  6105. /* perform each probe synchronously */
  6106. DPRINTK("probe begin\n");
  6107. for (i = 0; i < host->n_ports; i++) {
  6108. struct ata_port *ap = host->ports[i];
  6109. int rc;
  6110. /* probe */
  6111. if (ap->ops->error_handler) {
  6112. struct ata_eh_info *ehi = &ap->link.eh_info;
  6113. unsigned long flags;
  6114. ata_port_probe(ap);
  6115. /* kick EH for boot probing */
  6116. spin_lock_irqsave(ap->lock, flags);
  6117. ehi->probe_mask =
  6118. (1 << ata_link_max_devices(&ap->link)) - 1;
  6119. ehi->action |= ATA_EH_SOFTRESET;
  6120. ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
  6121. ap->pflags &= ~ATA_PFLAG_INITIALIZING;
  6122. ap->pflags |= ATA_PFLAG_LOADING;
  6123. ata_port_schedule_eh(ap);
  6124. spin_unlock_irqrestore(ap->lock, flags);
  6125. /* wait for EH to finish */
  6126. ata_port_wait_eh(ap);
  6127. } else {
  6128. DPRINTK("ata%u: bus probe begin\n", ap->print_id);
  6129. rc = ata_bus_probe(ap);
  6130. DPRINTK("ata%u: bus probe end\n", ap->print_id);
  6131. if (rc) {
  6132. /* FIXME: do something useful here?
  6133. * Current libata behavior will
  6134. * tear down everything when
  6135. * the module is removed
  6136. * or the h/w is unplugged.
  6137. */
  6138. }
  6139. }
  6140. }
  6141. /* probes are done, now scan each port's disk(s) */
  6142. DPRINTK("host probe begin\n");
  6143. for (i = 0; i < host->n_ports; i++) {
  6144. struct ata_port *ap = host->ports[i];
  6145. ata_scsi_scan_host(ap, 1);
  6146. ata_lpm_schedule(ap, ap->pm_policy);
  6147. }
  6148. return 0;
  6149. }
  6150. /**
  6151. * ata_host_activate - start host, request IRQ and register it
  6152. * @host: target ATA host
  6153. * @irq: IRQ to request
  6154. * @irq_handler: irq_handler used when requesting IRQ
  6155. * @irq_flags: irq_flags used when requesting IRQ
  6156. * @sht: scsi_host_template to use when registering the host
  6157. *
  6158. * After allocating an ATA host and initializing it, most libata
  6159. * LLDs perform three steps to activate the host - start host,
  6160. * request IRQ and register it. This helper takes necessasry
  6161. * arguments and performs the three steps in one go.
  6162. *
  6163. * An invalid IRQ skips the IRQ registration and expects the host to
  6164. * have set polling mode on the port. In this case, @irq_handler
  6165. * should be NULL.
  6166. *
  6167. * LOCKING:
  6168. * Inherited from calling layer (may sleep).
  6169. *
  6170. * RETURNS:
  6171. * 0 on success, -errno otherwise.
  6172. */
  6173. int ata_host_activate(struct ata_host *host, int irq,
  6174. irq_handler_t irq_handler, unsigned long irq_flags,
  6175. struct scsi_host_template *sht)
  6176. {
  6177. int i, rc;
  6178. rc = ata_host_start(host);
  6179. if (rc)
  6180. return rc;
  6181. /* Special case for polling mode */
  6182. if (!irq) {
  6183. WARN_ON(irq_handler);
  6184. return ata_host_register(host, sht);
  6185. }
  6186. rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
  6187. dev_driver_string(host->dev), host);
  6188. if (rc)
  6189. return rc;
  6190. for (i = 0; i < host->n_ports; i++)
  6191. ata_port_desc(host->ports[i], "irq %d", irq);
  6192. rc = ata_host_register(host, sht);
  6193. /* if failed, just free the IRQ and leave ports alone */
  6194. if (rc)
  6195. devm_free_irq(host->dev, irq, host);
  6196. return rc;
  6197. }
  6198. /**
  6199. * ata_port_detach - Detach ATA port in prepration of device removal
  6200. * @ap: ATA port to be detached
  6201. *
  6202. * Detach all ATA devices and the associated SCSI devices of @ap;
  6203. * then, remove the associated SCSI host. @ap is guaranteed to
  6204. * be quiescent on return from this function.
  6205. *
  6206. * LOCKING:
  6207. * Kernel thread context (may sleep).
  6208. */
  6209. static void ata_port_detach(struct ata_port *ap)
  6210. {
  6211. unsigned long flags;
  6212. struct ata_link *link;
  6213. struct ata_device *dev;
  6214. if (!ap->ops->error_handler)
  6215. goto skip_eh;
  6216. /* tell EH we're leaving & flush EH */
  6217. spin_lock_irqsave(ap->lock, flags);
  6218. ap->pflags |= ATA_PFLAG_UNLOADING;
  6219. spin_unlock_irqrestore(ap->lock, flags);
  6220. ata_port_wait_eh(ap);
  6221. /* EH is now guaranteed to see UNLOADING - EH context belongs
  6222. * to us. Disable all existing devices.
  6223. */
  6224. ata_port_for_each_link(link, ap) {
  6225. ata_link_for_each_dev(dev, link)
  6226. ata_dev_disable(dev);
  6227. }
  6228. /* Final freeze & EH. All in-flight commands are aborted. EH
  6229. * will be skipped and retrials will be terminated with bad
  6230. * target.
  6231. */
  6232. spin_lock_irqsave(ap->lock, flags);
  6233. ata_port_freeze(ap); /* won't be thawed */
  6234. spin_unlock_irqrestore(ap->lock, flags);
  6235. ata_port_wait_eh(ap);
  6236. cancel_rearming_delayed_work(&ap->hotplug_task);
  6237. skip_eh:
  6238. /* remove the associated SCSI host */
  6239. scsi_remove_host(ap->scsi_host);
  6240. }
  6241. /**
  6242. * ata_host_detach - Detach all ports of an ATA host
  6243. * @host: Host to detach
  6244. *
  6245. * Detach all ports of @host.
  6246. *
  6247. * LOCKING:
  6248. * Kernel thread context (may sleep).
  6249. */
  6250. void ata_host_detach(struct ata_host *host)
  6251. {
  6252. int i;
  6253. for (i = 0; i < host->n_ports; i++)
  6254. ata_port_detach(host->ports[i]);
  6255. /* the host is dead now, dissociate ACPI */
  6256. ata_acpi_dissociate(host);
  6257. }
  6258. /**
  6259. * ata_std_ports - initialize ioaddr with standard port offsets.
  6260. * @ioaddr: IO address structure to be initialized
  6261. *
  6262. * Utility function which initializes data_addr, error_addr,
  6263. * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
  6264. * device_addr, status_addr, and command_addr to standard offsets
  6265. * relative to cmd_addr.
  6266. *
  6267. * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
  6268. */
  6269. void ata_std_ports(struct ata_ioports *ioaddr)
  6270. {
  6271. ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
  6272. ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
  6273. ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
  6274. ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
  6275. ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
  6276. ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
  6277. ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
  6278. ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
  6279. ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
  6280. ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
  6281. }
  6282. #ifdef CONFIG_PCI
  6283. /**
  6284. * ata_pci_remove_one - PCI layer callback for device removal
  6285. * @pdev: PCI device that was removed
  6286. *
  6287. * PCI layer indicates to libata via this hook that hot-unplug or
  6288. * module unload event has occurred. Detach all ports. Resource
  6289. * release is handled via devres.
  6290. *
  6291. * LOCKING:
  6292. * Inherited from PCI layer (may sleep).
  6293. */
  6294. void ata_pci_remove_one(struct pci_dev *pdev)
  6295. {
  6296. struct device *dev = &pdev->dev;
  6297. struct ata_host *host = dev_get_drvdata(dev);
  6298. ata_host_detach(host);
  6299. }
  6300. /* move to PCI subsystem */
  6301. int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
  6302. {
  6303. unsigned long tmp = 0;
  6304. switch (bits->width) {
  6305. case 1: {
  6306. u8 tmp8 = 0;
  6307. pci_read_config_byte(pdev, bits->reg, &tmp8);
  6308. tmp = tmp8;
  6309. break;
  6310. }
  6311. case 2: {
  6312. u16 tmp16 = 0;
  6313. pci_read_config_word(pdev, bits->reg, &tmp16);
  6314. tmp = tmp16;
  6315. break;
  6316. }
  6317. case 4: {
  6318. u32 tmp32 = 0;
  6319. pci_read_config_dword(pdev, bits->reg, &tmp32);
  6320. tmp = tmp32;
  6321. break;
  6322. }
  6323. default:
  6324. return -EINVAL;
  6325. }
  6326. tmp &= bits->mask;
  6327. return (tmp == bits->val) ? 1 : 0;
  6328. }
  6329. #ifdef CONFIG_PM
  6330. void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
  6331. {
  6332. pci_save_state(pdev);
  6333. pci_disable_device(pdev);
  6334. if (mesg.event == PM_EVENT_SUSPEND)
  6335. pci_set_power_state(pdev, PCI_D3hot);
  6336. }
  6337. int ata_pci_device_do_resume(struct pci_dev *pdev)
  6338. {
  6339. int rc;
  6340. pci_set_power_state(pdev, PCI_D0);
  6341. pci_restore_state(pdev);
  6342. rc = pcim_enable_device(pdev);
  6343. if (rc) {
  6344. dev_printk(KERN_ERR, &pdev->dev,
  6345. "failed to enable device after resume (%d)\n", rc);
  6346. return rc;
  6347. }
  6348. pci_set_master(pdev);
  6349. return 0;
  6350. }
  6351. int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  6352. {
  6353. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  6354. int rc = 0;
  6355. rc = ata_host_suspend(host, mesg);
  6356. if (rc)
  6357. return rc;
  6358. ata_pci_device_do_suspend(pdev, mesg);
  6359. return 0;
  6360. }
  6361. int ata_pci_device_resume(struct pci_dev *pdev)
  6362. {
  6363. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  6364. int rc;
  6365. rc = ata_pci_device_do_resume(pdev);
  6366. if (rc == 0)
  6367. ata_host_resume(host);
  6368. return rc;
  6369. }
  6370. #endif /* CONFIG_PM */
  6371. #endif /* CONFIG_PCI */
  6372. static int __init ata_init(void)
  6373. {
  6374. ata_probe_timeout *= HZ;
  6375. ata_wq = create_workqueue("ata");
  6376. if (!ata_wq)
  6377. return -ENOMEM;
  6378. ata_aux_wq = create_singlethread_workqueue("ata_aux");
  6379. if (!ata_aux_wq) {
  6380. destroy_workqueue(ata_wq);
  6381. return -ENOMEM;
  6382. }
  6383. printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
  6384. return 0;
  6385. }
  6386. static void __exit ata_exit(void)
  6387. {
  6388. destroy_workqueue(ata_wq);
  6389. destroy_workqueue(ata_aux_wq);
  6390. }
  6391. subsys_initcall(ata_init);
  6392. module_exit(ata_exit);
  6393. static unsigned long ratelimit_time;
  6394. static DEFINE_SPINLOCK(ata_ratelimit_lock);
  6395. int ata_ratelimit(void)
  6396. {
  6397. int rc;
  6398. unsigned long flags;
  6399. spin_lock_irqsave(&ata_ratelimit_lock, flags);
  6400. if (time_after(jiffies, ratelimit_time)) {
  6401. rc = 1;
  6402. ratelimit_time = jiffies + (HZ/5);
  6403. } else
  6404. rc = 0;
  6405. spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
  6406. return rc;
  6407. }
  6408. /**
  6409. * ata_wait_register - wait until register value changes
  6410. * @reg: IO-mapped register
  6411. * @mask: Mask to apply to read register value
  6412. * @val: Wait condition
  6413. * @interval_msec: polling interval in milliseconds
  6414. * @timeout_msec: timeout in milliseconds
  6415. *
  6416. * Waiting for some bits of register to change is a common
  6417. * operation for ATA controllers. This function reads 32bit LE
  6418. * IO-mapped register @reg and tests for the following condition.
  6419. *
  6420. * (*@reg & mask) != val
  6421. *
  6422. * If the condition is met, it returns; otherwise, the process is
  6423. * repeated after @interval_msec until timeout.
  6424. *
  6425. * LOCKING:
  6426. * Kernel thread context (may sleep)
  6427. *
  6428. * RETURNS:
  6429. * The final register value.
  6430. */
  6431. u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
  6432. unsigned long interval_msec,
  6433. unsigned long timeout_msec)
  6434. {
  6435. unsigned long timeout;
  6436. u32 tmp;
  6437. tmp = ioread32(reg);
  6438. /* Calculate timeout _after_ the first read to make sure
  6439. * preceding writes reach the controller before starting to
  6440. * eat away the timeout.
  6441. */
  6442. timeout = jiffies + (timeout_msec * HZ) / 1000;
  6443. while ((tmp & mask) == val && time_before(jiffies, timeout)) {
  6444. msleep(interval_msec);
  6445. tmp = ioread32(reg);
  6446. }
  6447. return tmp;
  6448. }
  6449. /*
  6450. * Dummy port_ops
  6451. */
  6452. static void ata_dummy_noret(struct ata_port *ap) { }
  6453. static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
  6454. static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
  6455. static u8 ata_dummy_check_status(struct ata_port *ap)
  6456. {
  6457. return ATA_DRDY;
  6458. }
  6459. static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
  6460. {
  6461. return AC_ERR_SYSTEM;
  6462. }
  6463. const struct ata_port_operations ata_dummy_port_ops = {
  6464. .check_status = ata_dummy_check_status,
  6465. .check_altstatus = ata_dummy_check_status,
  6466. .dev_select = ata_noop_dev_select,
  6467. .qc_prep = ata_noop_qc_prep,
  6468. .qc_issue = ata_dummy_qc_issue,
  6469. .freeze = ata_dummy_noret,
  6470. .thaw = ata_dummy_noret,
  6471. .error_handler = ata_dummy_noret,
  6472. .post_internal_cmd = ata_dummy_qc_noret,
  6473. .irq_clear = ata_dummy_noret,
  6474. .port_start = ata_dummy_ret0,
  6475. .port_stop = ata_dummy_noret,
  6476. };
  6477. const struct ata_port_info ata_dummy_port_info = {
  6478. .port_ops = &ata_dummy_port_ops,
  6479. };
  6480. /*
  6481. * libata is essentially a library of internal helper functions for
  6482. * low-level ATA host controller drivers. As such, the API/ABI is
  6483. * likely to change as new drivers are added and updated.
  6484. * Do not depend on ABI/API stability.
  6485. */
  6486. EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
  6487. EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
  6488. EXPORT_SYMBOL_GPL(sata_deb_timing_long);
  6489. EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
  6490. EXPORT_SYMBOL_GPL(ata_dummy_port_info);
  6491. EXPORT_SYMBOL_GPL(ata_std_bios_param);
  6492. EXPORT_SYMBOL_GPL(ata_std_ports);
  6493. EXPORT_SYMBOL_GPL(ata_host_init);
  6494. EXPORT_SYMBOL_GPL(ata_host_alloc);
  6495. EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
  6496. EXPORT_SYMBOL_GPL(ata_host_start);
  6497. EXPORT_SYMBOL_GPL(ata_host_register);
  6498. EXPORT_SYMBOL_GPL(ata_host_activate);
  6499. EXPORT_SYMBOL_GPL(ata_host_detach);
  6500. EXPORT_SYMBOL_GPL(ata_sg_init);
  6501. EXPORT_SYMBOL_GPL(ata_sg_init_one);
  6502. EXPORT_SYMBOL_GPL(ata_hsm_move);
  6503. EXPORT_SYMBOL_GPL(ata_qc_complete);
  6504. EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
  6505. EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
  6506. EXPORT_SYMBOL_GPL(ata_tf_load);
  6507. EXPORT_SYMBOL_GPL(ata_tf_read);
  6508. EXPORT_SYMBOL_GPL(ata_noop_dev_select);
  6509. EXPORT_SYMBOL_GPL(ata_std_dev_select);
  6510. EXPORT_SYMBOL_GPL(sata_print_link_status);
  6511. EXPORT_SYMBOL_GPL(ata_tf_to_fis);
  6512. EXPORT_SYMBOL_GPL(ata_tf_from_fis);
  6513. EXPORT_SYMBOL_GPL(ata_check_status);
  6514. EXPORT_SYMBOL_GPL(ata_altstatus);
  6515. EXPORT_SYMBOL_GPL(ata_exec_command);
  6516. EXPORT_SYMBOL_GPL(ata_port_start);
  6517. EXPORT_SYMBOL_GPL(ata_sff_port_start);
  6518. EXPORT_SYMBOL_GPL(ata_interrupt);
  6519. EXPORT_SYMBOL_GPL(ata_do_set_mode);
  6520. EXPORT_SYMBOL_GPL(ata_data_xfer);
  6521. EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
  6522. EXPORT_SYMBOL_GPL(ata_std_qc_defer);
  6523. EXPORT_SYMBOL_GPL(ata_qc_prep);
  6524. EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
  6525. EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
  6526. EXPORT_SYMBOL_GPL(ata_bmdma_setup);
  6527. EXPORT_SYMBOL_GPL(ata_bmdma_start);
  6528. EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
  6529. EXPORT_SYMBOL_GPL(ata_bmdma_status);
  6530. EXPORT_SYMBOL_GPL(ata_bmdma_stop);
  6531. EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
  6532. EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
  6533. EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
  6534. EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
  6535. EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
  6536. EXPORT_SYMBOL_GPL(ata_port_probe);
  6537. EXPORT_SYMBOL_GPL(ata_dev_disable);
  6538. EXPORT_SYMBOL_GPL(sata_set_spd);
  6539. EXPORT_SYMBOL_GPL(sata_link_debounce);
  6540. EXPORT_SYMBOL_GPL(sata_link_resume);
  6541. EXPORT_SYMBOL_GPL(ata_bus_reset);
  6542. EXPORT_SYMBOL_GPL(ata_std_prereset);
  6543. EXPORT_SYMBOL_GPL(ata_std_softreset);
  6544. EXPORT_SYMBOL_GPL(sata_link_hardreset);
  6545. EXPORT_SYMBOL_GPL(sata_std_hardreset);
  6546. EXPORT_SYMBOL_GPL(ata_std_postreset);
  6547. EXPORT_SYMBOL_GPL(ata_dev_classify);
  6548. EXPORT_SYMBOL_GPL(ata_dev_pair);
  6549. EXPORT_SYMBOL_GPL(ata_port_disable);
  6550. EXPORT_SYMBOL_GPL(ata_ratelimit);
  6551. EXPORT_SYMBOL_GPL(ata_wait_register);
  6552. EXPORT_SYMBOL_GPL(ata_busy_sleep);
  6553. EXPORT_SYMBOL_GPL(ata_wait_after_reset);
  6554. EXPORT_SYMBOL_GPL(ata_wait_ready);
  6555. EXPORT_SYMBOL_GPL(ata_port_queue_task);
  6556. EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
  6557. EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
  6558. EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
  6559. EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
  6560. EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
  6561. EXPORT_SYMBOL_GPL(ata_host_intr);
  6562. EXPORT_SYMBOL_GPL(sata_scr_valid);
  6563. EXPORT_SYMBOL_GPL(sata_scr_read);
  6564. EXPORT_SYMBOL_GPL(sata_scr_write);
  6565. EXPORT_SYMBOL_GPL(sata_scr_write_flush);
  6566. EXPORT_SYMBOL_GPL(ata_link_online);
  6567. EXPORT_SYMBOL_GPL(ata_link_offline);
  6568. #ifdef CONFIG_PM
  6569. EXPORT_SYMBOL_GPL(ata_host_suspend);
  6570. EXPORT_SYMBOL_GPL(ata_host_resume);
  6571. #endif /* CONFIG_PM */
  6572. EXPORT_SYMBOL_GPL(ata_id_string);
  6573. EXPORT_SYMBOL_GPL(ata_id_c_string);
  6574. EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
  6575. EXPORT_SYMBOL_GPL(ata_scsi_simulate);
  6576. EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
  6577. EXPORT_SYMBOL_GPL(ata_timing_compute);
  6578. EXPORT_SYMBOL_GPL(ata_timing_merge);
  6579. #ifdef CONFIG_PCI
  6580. EXPORT_SYMBOL_GPL(pci_test_config_bits);
  6581. EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
  6582. EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
  6583. EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
  6584. EXPORT_SYMBOL_GPL(ata_pci_init_one);
  6585. EXPORT_SYMBOL_GPL(ata_pci_remove_one);
  6586. #ifdef CONFIG_PM
  6587. EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
  6588. EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
  6589. EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
  6590. EXPORT_SYMBOL_GPL(ata_pci_device_resume);
  6591. #endif /* CONFIG_PM */
  6592. EXPORT_SYMBOL_GPL(ata_pci_default_filter);
  6593. EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
  6594. #endif /* CONFIG_PCI */
  6595. EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
  6596. EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
  6597. EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
  6598. EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
  6599. EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
  6600. EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
  6601. EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
  6602. EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
  6603. EXPORT_SYMBOL_GPL(ata_port_desc);
  6604. #ifdef CONFIG_PCI
  6605. EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
  6606. #endif /* CONFIG_PCI */
  6607. EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
  6608. EXPORT_SYMBOL_GPL(ata_link_abort);
  6609. EXPORT_SYMBOL_GPL(ata_port_abort);
  6610. EXPORT_SYMBOL_GPL(ata_port_freeze);
  6611. EXPORT_SYMBOL_GPL(sata_async_notification);
  6612. EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
  6613. EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
  6614. EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
  6615. EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
  6616. EXPORT_SYMBOL_GPL(ata_do_eh);
  6617. EXPORT_SYMBOL_GPL(ata_irq_on);
  6618. EXPORT_SYMBOL_GPL(ata_dev_try_classify);
  6619. EXPORT_SYMBOL_GPL(ata_cable_40wire);
  6620. EXPORT_SYMBOL_GPL(ata_cable_80wire);
  6621. EXPORT_SYMBOL_GPL(ata_cable_unknown);
  6622. EXPORT_SYMBOL_GPL(ata_cable_sata);