aic7xxx_osm.c 146 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304
  1. /*
  2. * Adaptec AIC7xxx device driver for Linux.
  3. *
  4. * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
  5. *
  6. * Copyright (c) 1994 John Aycock
  7. * The University of Calgary Department of Computer Science.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; see the file COPYING. If not, write to
  21. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  22. *
  23. * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
  24. * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
  25. * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
  26. * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
  27. * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
  28. * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
  29. * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
  30. * ANSI SCSI-2 specification (draft 10c), ...
  31. *
  32. * --------------------------------------------------------------------------
  33. *
  34. * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
  35. *
  36. * Substantially modified to include support for wide and twin bus
  37. * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
  38. * SCB paging, and other rework of the code.
  39. *
  40. * --------------------------------------------------------------------------
  41. * Copyright (c) 1994-2000 Justin T. Gibbs.
  42. * Copyright (c) 2000-2001 Adaptec Inc.
  43. * All rights reserved.
  44. *
  45. * Redistribution and use in source and binary forms, with or without
  46. * modification, are permitted provided that the following conditions
  47. * are met:
  48. * 1. Redistributions of source code must retain the above copyright
  49. * notice, this list of conditions, and the following disclaimer,
  50. * without modification.
  51. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  52. * substantially similar to the "NO WARRANTY" disclaimer below
  53. * ("Disclaimer") and any redistribution must be conditioned upon
  54. * including a substantially similar Disclaimer requirement for further
  55. * binary redistribution.
  56. * 3. Neither the names of the above-listed copyright holders nor the names
  57. * of any contributors may be used to endorse or promote products derived
  58. * from this software without specific prior written permission.
  59. *
  60. * Alternatively, this software may be distributed under the terms of the
  61. * GNU General Public License ("GPL") version 2 as published by the Free
  62. * Software Foundation.
  63. *
  64. * NO WARRANTY
  65. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  66. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  67. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  68. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  69. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  70. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  71. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  72. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  73. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  74. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  75. * POSSIBILITY OF SUCH DAMAGES.
  76. *
  77. *---------------------------------------------------------------------------
  78. *
  79. * Thanks also go to (in alphabetical order) the following:
  80. *
  81. * Rory Bolt - Sequencer bug fixes
  82. * Jay Estabrook - Initial DEC Alpha support
  83. * Doug Ledford - Much needed abort/reset bug fixes
  84. * Kai Makisara - DMAing of SCBs
  85. *
  86. * A Boot time option was also added for not resetting the scsi bus.
  87. *
  88. * Form: aic7xxx=extended
  89. * aic7xxx=no_reset
  90. * aic7xxx=verbose
  91. *
  92. * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
  93. *
  94. * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
  95. */
  96. /*
  97. * Further driver modifications made by Doug Ledford <dledford@redhat.com>
  98. *
  99. * Copyright (c) 1997-1999 Doug Ledford
  100. *
  101. * These changes are released under the same licensing terms as the FreeBSD
  102. * driver written by Justin Gibbs. Please see his Copyright notice above
  103. * for the exact terms and conditions covering my changes as well as the
  104. * warranty statement.
  105. *
  106. * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
  107. * but are not limited to:
  108. *
  109. * 1: Import of the latest FreeBSD sequencer code for this driver
  110. * 2: Modification of kernel code to accommodate different sequencer semantics
  111. * 3: Extensive changes throughout kernel portion of driver to improve
  112. * abort/reset processing and error hanndling
  113. * 4: Other work contributed by various people on the Internet
  114. * 5: Changes to printk information and verbosity selection code
  115. * 6: General reliability related changes, especially in IRQ management
  116. * 7: Modifications to the default probe/attach order for supported cards
  117. * 8: SMP friendliness has been improved
  118. *
  119. */
  120. #include "aic7xxx_osm.h"
  121. #include "aic7xxx_inline.h"
  122. #include <scsi/scsicam.h>
  123. #include <scsi/scsi_transport.h>
  124. #include <scsi/scsi_transport_spi.h>
  125. static struct scsi_transport_template *ahc_linux_transport_template = NULL;
  126. /*
  127. * Include aiclib.c as part of our
  128. * "module dependencies are hard" work around.
  129. */
  130. #include "aiclib.c"
  131. #include <linux/init.h> /* __setup */
  132. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  133. #include "sd.h" /* For geometry detection */
  134. #endif
  135. #include <linux/mm.h> /* For fetching system memory size */
  136. #include <linux/blkdev.h> /* For block_size() */
  137. #include <linux/delay.h> /* For ssleep/msleep */
  138. /*
  139. * Lock protecting manipulation of the ahc softc list.
  140. */
  141. spinlock_t ahc_list_spinlock;
  142. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  143. /* For dynamic sglist size calculation. */
  144. u_int ahc_linux_nseg;
  145. #endif
  146. /*
  147. * Set this to the delay in seconds after SCSI bus reset.
  148. * Note, we honor this only for the initial bus reset.
  149. * The scsi error recovery code performs its own bus settle
  150. * delay handling for error recovery actions.
  151. */
  152. #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
  153. #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
  154. #else
  155. #define AIC7XXX_RESET_DELAY 5000
  156. #endif
  157. /*
  158. * Control collection of SCSI transfer statistics for the /proc filesystem.
  159. *
  160. * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
  161. * NOTE: This does affect performance since it has to maintain statistics.
  162. */
  163. #ifdef CONFIG_AIC7XXX_PROC_STATS
  164. #define AIC7XXX_PROC_STATS
  165. #endif
  166. /*
  167. * To change the default number of tagged transactions allowed per-device,
  168. * add a line to the lilo.conf file like:
  169. * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
  170. * which will result in the first four devices on the first two
  171. * controllers being set to a tagged queue depth of 32.
  172. *
  173. * The tag_commands is an array of 16 to allow for wide and twin adapters.
  174. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
  175. * for channel 1.
  176. */
  177. typedef struct {
  178. uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */
  179. } adapter_tag_info_t;
  180. /*
  181. * Modify this as you see fit for your system.
  182. *
  183. * 0 tagged queuing disabled
  184. * 1 <= n <= 253 n == max tags ever dispatched.
  185. *
  186. * The driver will throttle the number of commands dispatched to a
  187. * device if it returns queue full. For devices with a fixed maximum
  188. * queue depth, the driver will eventually determine this depth and
  189. * lock it in (a console message is printed to indicate that a lock
  190. * has occurred). On some devices, queue full is returned for a temporary
  191. * resource shortage. These devices will return queue full at varying
  192. * depths. The driver will throttle back when the queue fulls occur and
  193. * attempt to slowly increase the depth over time as the device recovers
  194. * from the resource shortage.
  195. *
  196. * In this example, the first line will disable tagged queueing for all
  197. * the devices on the first probed aic7xxx adapter.
  198. *
  199. * The second line enables tagged queueing with 4 commands/LUN for IDs
  200. * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
  201. * driver to attempt to use up to 64 tags for ID 1.
  202. *
  203. * The third line is the same as the first line.
  204. *
  205. * The fourth line disables tagged queueing for devices 0 and 3. It
  206. * enables tagged queueing for the other IDs, with 16 commands/LUN
  207. * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
  208. * IDs 2, 5-7, and 9-15.
  209. */
  210. /*
  211. * NOTE: The below structure is for reference only, the actual structure
  212. * to modify in order to change things is just below this comment block.
  213. adapter_tag_info_t aic7xxx_tag_info[] =
  214. {
  215. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  216. {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
  217. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  218. {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
  219. };
  220. */
  221. #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
  222. #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
  223. #else
  224. #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
  225. #endif
  226. #define AIC7XXX_CONFIGED_TAG_COMMANDS { \
  227. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  228. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  229. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  230. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  231. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  232. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  233. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
  234. AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \
  235. }
  236. /*
  237. * By default, use the number of commands specified by
  238. * the users kernel configuration.
  239. */
  240. static adapter_tag_info_t aic7xxx_tag_info[] =
  241. {
  242. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  243. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  244. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  245. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  246. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  247. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  248. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  249. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  250. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  251. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  252. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  253. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  254. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  255. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  256. {AIC7XXX_CONFIGED_TAG_COMMANDS},
  257. {AIC7XXX_CONFIGED_TAG_COMMANDS}
  258. };
  259. /*
  260. * DV option:
  261. *
  262. * positive value = DV Enabled
  263. * zero = DV Disabled
  264. * negative value = DV Default for adapter type/seeprom
  265. */
  266. #ifdef CONFIG_AIC7XXX_DV_SETTING
  267. #define AIC7XXX_CONFIGED_DV CONFIG_AIC7XXX_DV_SETTING
  268. #else
  269. #define AIC7XXX_CONFIGED_DV -1
  270. #endif
  271. static int8_t aic7xxx_dv_settings[] =
  272. {
  273. AIC7XXX_CONFIGED_DV,
  274. AIC7XXX_CONFIGED_DV,
  275. AIC7XXX_CONFIGED_DV,
  276. AIC7XXX_CONFIGED_DV,
  277. AIC7XXX_CONFIGED_DV,
  278. AIC7XXX_CONFIGED_DV,
  279. AIC7XXX_CONFIGED_DV,
  280. AIC7XXX_CONFIGED_DV,
  281. AIC7XXX_CONFIGED_DV,
  282. AIC7XXX_CONFIGED_DV,
  283. AIC7XXX_CONFIGED_DV,
  284. AIC7XXX_CONFIGED_DV,
  285. AIC7XXX_CONFIGED_DV,
  286. AIC7XXX_CONFIGED_DV,
  287. AIC7XXX_CONFIGED_DV,
  288. AIC7XXX_CONFIGED_DV
  289. };
  290. /*
  291. * There should be a specific return value for this in scsi.h, but
  292. * it seems that most drivers ignore it.
  293. */
  294. #define DID_UNDERFLOW DID_ERROR
  295. void
  296. ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
  297. {
  298. printk("(scsi%d:%c:%d:%d): ",
  299. ahc->platform_data->host->host_no,
  300. scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
  301. scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
  302. scb != NULL ? SCB_GET_LUN(scb) : -1);
  303. }
  304. /*
  305. * XXX - these options apply unilaterally to _all_ 274x/284x/294x
  306. * cards in the system. This should be fixed. Exceptions to this
  307. * rule are noted in the comments.
  308. */
  309. /*
  310. * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
  311. * has no effect on any later resets that might occur due to things like
  312. * SCSI bus timeouts.
  313. */
  314. static uint32_t aic7xxx_no_reset;
  315. /*
  316. * Certain PCI motherboards will scan PCI devices from highest to lowest,
  317. * others scan from lowest to highest, and they tend to do all kinds of
  318. * strange things when they come into contact with PCI bridge chips. The
  319. * net result of all this is that the PCI card that is actually used to boot
  320. * the machine is very hard to detect. Most motherboards go from lowest
  321. * PCI slot number to highest, and the first SCSI controller found is the
  322. * one you boot from. The only exceptions to this are when a controller
  323. * has its BIOS disabled. So, we by default sort all of our SCSI controllers
  324. * from lowest PCI slot number to highest PCI slot number. We also force
  325. * all controllers with their BIOS disabled to the end of the list. This
  326. * works on *almost* all computers. Where it doesn't work, we have this
  327. * option. Setting this option to non-0 will reverse the order of the sort
  328. * to highest first, then lowest, but will still leave cards with their BIOS
  329. * disabled at the very end. That should fix everyone up unless there are
  330. * really strange cirumstances.
  331. */
  332. static uint32_t aic7xxx_reverse_scan;
  333. /*
  334. * Should we force EXTENDED translation on a controller.
  335. * 0 == Use whatever is in the SEEPROM or default to off
  336. * 1 == Use whatever is in the SEEPROM or default to on
  337. */
  338. static uint32_t aic7xxx_extended;
  339. /*
  340. * PCI bus parity checking of the Adaptec controllers. This is somewhat
  341. * dubious at best. To my knowledge, this option has never actually
  342. * solved a PCI parity problem, but on certain machines with broken PCI
  343. * chipset configurations where stray PCI transactions with bad parity are
  344. * the norm rather than the exception, the error messages can be overwelming.
  345. * It's included in the driver for completeness.
  346. * 0 = Shut off PCI parity check
  347. * non-0 = reverse polarity pci parity checking
  348. */
  349. static uint32_t aic7xxx_pci_parity = ~0;
  350. /*
  351. * Certain newer motherboards have put new PCI based devices into the
  352. * IO spaces that used to typically be occupied by VLB or EISA cards.
  353. * This overlap can cause these newer motherboards to lock up when scanned
  354. * for older EISA and VLB devices. Setting this option to non-0 will
  355. * cause the driver to skip scanning for any VLB or EISA controllers and
  356. * only support the PCI controllers. NOTE: this means that if the kernel
  357. * os compiled with PCI support disabled, then setting this to non-0
  358. * would result in never finding any devices :)
  359. */
  360. #ifndef CONFIG_AIC7XXX_PROBE_EISA_VL
  361. uint32_t aic7xxx_probe_eisa_vl;
  362. #else
  363. uint32_t aic7xxx_probe_eisa_vl = ~0;
  364. #endif
  365. /*
  366. * There are lots of broken chipsets in the world. Some of them will
  367. * violate the PCI spec when we issue byte sized memory writes to our
  368. * controller. I/O mapped register access, if allowed by the given
  369. * platform, will work in almost all cases.
  370. */
  371. uint32_t aic7xxx_allow_memio = ~0;
  372. /*
  373. * aic7xxx_detect() has been run, so register all device arrivals
  374. * immediately with the system rather than deferring to the sorted
  375. * attachment performed by aic7xxx_detect().
  376. */
  377. int aic7xxx_detect_complete;
  378. /*
  379. * So that we can set how long each device is given as a selection timeout.
  380. * The table of values goes like this:
  381. * 0 - 256ms
  382. * 1 - 128ms
  383. * 2 - 64ms
  384. * 3 - 32ms
  385. * We default to 256ms because some older devices need a longer time
  386. * to respond to initial selection.
  387. */
  388. static uint32_t aic7xxx_seltime;
  389. /*
  390. * Certain devices do not perform any aging on commands. Should the
  391. * device be saturated by commands in one portion of the disk, it is
  392. * possible for transactions on far away sectors to never be serviced.
  393. * To handle these devices, we can periodically send an ordered tag to
  394. * force all outstanding transactions to be serviced prior to a new
  395. * transaction.
  396. */
  397. uint32_t aic7xxx_periodic_otag;
  398. /*
  399. * Module information and settable options.
  400. */
  401. static char *aic7xxx = NULL;
  402. MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
  403. MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver");
  404. MODULE_LICENSE("Dual BSD/GPL");
  405. MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
  406. module_param(aic7xxx, charp, 0444);
  407. MODULE_PARM_DESC(aic7xxx,
  408. "period delimited, options string.\n"
  409. " verbose Enable verbose/diagnostic logging\n"
  410. " allow_memio Allow device registers to be memory mapped\n"
  411. " debug Bitmask of debug values to enable\n"
  412. " no_probe Toggle EISA/VLB controller probing\n"
  413. " probe_eisa_vl Toggle EISA/VLB controller probing\n"
  414. " no_reset Supress initial bus resets\n"
  415. " extended Enable extended geometry on all controllers\n"
  416. " periodic_otag Send an ordered tagged transaction\n"
  417. " periodically to prevent tag starvation.\n"
  418. " This may be required by some older disk\n"
  419. " drives or RAID arrays.\n"
  420. " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
  421. " tag_info:<tag_str> Set per-target tag depth\n"
  422. " global_tag_depth:<int> Global tag depth for every target\n"
  423. " on every bus\n"
  424. " dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
  425. " seltime:<int> Selection Timeout\n"
  426. " (0/256ms,1/128ms,2/64ms,3/32ms)\n"
  427. "\n"
  428. " Sample /etc/modprobe.conf line:\n"
  429. " Toggle EISA/VLB probing\n"
  430. " Set tag depth on Controller 1/Target 1 to 10 tags\n"
  431. " Shorten the selection timeout to 128ms\n"
  432. "\n"
  433. " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
  434. );
  435. static void ahc_linux_handle_scsi_status(struct ahc_softc *,
  436. struct ahc_linux_device *,
  437. struct scb *);
  438. static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
  439. Scsi_Cmnd *cmd);
  440. static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);
  441. static void ahc_linux_sem_timeout(u_long arg);
  442. static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
  443. static void ahc_linux_release_simq(u_long arg);
  444. static void ahc_linux_dev_timed_unfreeze(u_long arg);
  445. static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
  446. static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
  447. static void ahc_linux_size_nseg(void);
  448. static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
  449. static void ahc_linux_start_dv(struct ahc_softc *ahc);
  450. static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
  451. static int ahc_linux_dv_thread(void *data);
  452. static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
  453. static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
  454. static void ahc_linux_dv_transition(struct ahc_softc *ahc,
  455. struct scsi_cmnd *cmd,
  456. struct ahc_devinfo *devinfo,
  457. struct ahc_linux_target *targ);
  458. static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc,
  459. struct scsi_cmnd *cmd,
  460. struct ahc_devinfo *devinfo);
  461. static void ahc_linux_dv_inq(struct ahc_softc *ahc,
  462. struct scsi_cmnd *cmd,
  463. struct ahc_devinfo *devinfo,
  464. struct ahc_linux_target *targ,
  465. u_int request_length);
  466. static void ahc_linux_dv_tur(struct ahc_softc *ahc,
  467. struct scsi_cmnd *cmd,
  468. struct ahc_devinfo *devinfo);
  469. static void ahc_linux_dv_rebd(struct ahc_softc *ahc,
  470. struct scsi_cmnd *cmd,
  471. struct ahc_devinfo *devinfo,
  472. struct ahc_linux_target *targ);
  473. static void ahc_linux_dv_web(struct ahc_softc *ahc,
  474. struct scsi_cmnd *cmd,
  475. struct ahc_devinfo *devinfo,
  476. struct ahc_linux_target *targ);
  477. static void ahc_linux_dv_reb(struct ahc_softc *ahc,
  478. struct scsi_cmnd *cmd,
  479. struct ahc_devinfo *devinfo,
  480. struct ahc_linux_target *targ);
  481. static void ahc_linux_dv_su(struct ahc_softc *ahc,
  482. struct scsi_cmnd *cmd,
  483. struct ahc_devinfo *devinfo,
  484. struct ahc_linux_target *targ);
  485. static int ahc_linux_fallback(struct ahc_softc *ahc,
  486. struct ahc_devinfo *devinfo);
  487. static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);
  488. static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);
  489. static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
  490. struct ahc_devinfo *devinfo);
  491. static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);
  492. static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
  493. struct ahc_linux_device *dev);
  494. static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*,
  495. u_int, u_int);
  496. static void ahc_linux_free_target(struct ahc_softc*,
  497. struct ahc_linux_target*);
  498. static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*,
  499. struct ahc_linux_target*,
  500. u_int);
  501. static void ahc_linux_free_device(struct ahc_softc*,
  502. struct ahc_linux_device*);
  503. static void ahc_linux_run_device_queue(struct ahc_softc*,
  504. struct ahc_linux_device*);
  505. static void ahc_linux_setup_tag_info_global(char *p);
  506. static aic_option_callback_t ahc_linux_setup_tag_info;
  507. static aic_option_callback_t ahc_linux_setup_dv;
  508. static int aic7xxx_setup(char *s);
  509. static int ahc_linux_next_unit(void);
  510. static void ahc_runq_tasklet(unsigned long data);
  511. static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
  512. /********************************* Inlines ************************************/
  513. static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
  514. static __inline struct ahc_linux_device*
  515. ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
  516. u_int target, u_int lun, int alloc);
  517. static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
  518. static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
  519. struct ahc_linux_device *dev);
  520. static __inline struct ahc_linux_device *
  521. ahc_linux_next_device_to_run(struct ahc_softc *ahc);
  522. static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
  523. static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
  524. static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
  525. struct ahc_dma_seg *sg,
  526. dma_addr_t addr, bus_size_t len);
  527. static __inline void
  528. ahc_schedule_completeq(struct ahc_softc *ahc)
  529. {
  530. if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) {
  531. ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER;
  532. ahc->platform_data->completeq_timer.expires = jiffies;
  533. add_timer(&ahc->platform_data->completeq_timer);
  534. }
  535. }
  536. /*
  537. * Must be called with our lock held.
  538. */
  539. static __inline void
  540. ahc_schedule_runq(struct ahc_softc *ahc)
  541. {
  542. tasklet_schedule(&ahc->platform_data->runq_tasklet);
  543. }
  544. static __inline struct ahc_linux_device*
  545. ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
  546. u_int lun, int alloc)
  547. {
  548. struct ahc_linux_target *targ;
  549. struct ahc_linux_device *dev;
  550. u_int target_offset;
  551. target_offset = target;
  552. if (channel != 0)
  553. target_offset += 8;
  554. targ = ahc->platform_data->targets[target_offset];
  555. if (targ == NULL) {
  556. if (alloc != 0) {
  557. targ = ahc_linux_alloc_target(ahc, channel, target);
  558. if (targ == NULL)
  559. return (NULL);
  560. } else
  561. return (NULL);
  562. }
  563. dev = targ->devices[lun];
  564. if (dev == NULL && alloc != 0)
  565. dev = ahc_linux_alloc_device(ahc, targ, lun);
  566. return (dev);
  567. }
  568. #define AHC_LINUX_MAX_RETURNED_ERRORS 4
  569. static struct ahc_cmd *
  570. ahc_linux_run_complete_queue(struct ahc_softc *ahc)
  571. {
  572. struct ahc_cmd *acmd;
  573. u_long done_flags;
  574. int with_errors;
  575. with_errors = 0;
  576. ahc_done_lock(ahc, &done_flags);
  577. while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) {
  578. Scsi_Cmnd *cmd;
  579. if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) {
  580. /*
  581. * Linux uses stack recursion to requeue
  582. * commands that need to be retried. Avoid
  583. * blowing out the stack by "spoon feeding"
  584. * commands that completed with error back
  585. * the operating system in case they are going
  586. * to be retried. "ick"
  587. */
  588. ahc_schedule_completeq(ahc);
  589. break;
  590. }
  591. TAILQ_REMOVE(&ahc->platform_data->completeq,
  592. acmd, acmd_links.tqe);
  593. cmd = &acmd_scsi_cmd(acmd);
  594. cmd->host_scribble = NULL;
  595. if (ahc_cmd_get_transaction_status(cmd) != DID_OK
  596. || (cmd->result & 0xFF) != SCSI_STATUS_OK)
  597. with_errors++;
  598. cmd->scsi_done(cmd);
  599. }
  600. ahc_done_unlock(ahc, &done_flags);
  601. return (acmd);
  602. }
  603. static __inline void
  604. ahc_linux_check_device_queue(struct ahc_softc *ahc,
  605. struct ahc_linux_device *dev)
  606. {
  607. if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
  608. && dev->active == 0) {
  609. dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
  610. dev->qfrozen--;
  611. }
  612. if (TAILQ_FIRST(&dev->busyq) == NULL
  613. || dev->openings == 0 || dev->qfrozen != 0)
  614. return;
  615. ahc_linux_run_device_queue(ahc, dev);
  616. }
  617. static __inline struct ahc_linux_device *
  618. ahc_linux_next_device_to_run(struct ahc_softc *ahc)
  619. {
  620. if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
  621. || (ahc->platform_data->qfrozen != 0
  622. && AHC_DV_SIMQ_FROZEN(ahc) == 0))
  623. return (NULL);
  624. return (TAILQ_FIRST(&ahc->platform_data->device_runq));
  625. }
  626. static __inline void
  627. ahc_linux_run_device_queues(struct ahc_softc *ahc)
  628. {
  629. struct ahc_linux_device *dev;
  630. while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
  631. TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
  632. dev->flags &= ~AHC_DEV_ON_RUN_LIST;
  633. ahc_linux_check_device_queue(ahc, dev);
  634. }
  635. }
  636. static __inline void
  637. ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
  638. {
  639. Scsi_Cmnd *cmd;
  640. cmd = scb->io_ctx;
  641. ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
  642. if (cmd->use_sg != 0) {
  643. struct scatterlist *sg;
  644. sg = (struct scatterlist *)cmd->request_buffer;
  645. pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
  646. cmd->sc_data_direction);
  647. } else if (cmd->request_bufflen != 0) {
  648. pci_unmap_single(ahc->dev_softc,
  649. scb->platform_data->buf_busaddr,
  650. cmd->request_bufflen,
  651. cmd->sc_data_direction);
  652. }
  653. }
  654. static __inline int
  655. ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
  656. struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
  657. {
  658. int consumed;
  659. if ((scb->sg_count + 1) > AHC_NSEG)
  660. panic("Too few segs for dma mapping. "
  661. "Increase AHC_NSEG\n");
  662. consumed = 1;
  663. sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
  664. scb->platform_data->xfer_len += len;
  665. if (sizeof(dma_addr_t) > 4
  666. && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
  667. len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
  668. sg->len = ahc_htole32(len);
  669. return (consumed);
  670. }
  671. /************************ Host template entry points *************************/
  672. static int ahc_linux_detect(Scsi_Host_Template *);
  673. static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
  674. static const char *ahc_linux_info(struct Scsi_Host *);
  675. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  676. static int ahc_linux_slave_alloc(Scsi_Device *);
  677. static int ahc_linux_slave_configure(Scsi_Device *);
  678. static void ahc_linux_slave_destroy(Scsi_Device *);
  679. #if defined(__i386__)
  680. static int ahc_linux_biosparam(struct scsi_device*,
  681. struct block_device*,
  682. sector_t, int[]);
  683. #endif
  684. #else
  685. static int ahc_linux_release(struct Scsi_Host *);
  686. static void ahc_linux_select_queue_depth(struct Scsi_Host *host,
  687. Scsi_Device *scsi_devs);
  688. #if defined(__i386__)
  689. static int ahc_linux_biosparam(Disk *, kdev_t, int[]);
  690. #endif
  691. #endif
  692. static int ahc_linux_bus_reset(Scsi_Cmnd *);
  693. static int ahc_linux_dev_reset(Scsi_Cmnd *);
  694. static int ahc_linux_abort(Scsi_Cmnd *);
  695. /*
  696. * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg).
  697. *
  698. * In pre-2.5.X...
  699. * The midlayer allocates an S/G array dynamically when a command is issued
  700. * using SCSI malloc. This array, which is in an OS dependent format that
  701. * must later be copied to our private S/G list, is sized to house just the
  702. * number of segments needed for the current transfer. Since the code that
  703. * sizes the SCSI malloc pool does not take into consideration fragmentation
  704. * of the pool, executing transactions numbering just a fraction of our
  705. * concurrent transaction limit with list lengths aproaching AHC_NSEG will
  706. * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
  707. * mid-layer does not properly handle this scsi malloc failures for the S/G
  708. * array and the result can be a lockup of the I/O subsystem. We try to size
  709. * our S/G list so that it satisfies our drivers allocation requirements in
  710. * addition to avoiding fragmentation of the SCSI malloc pool.
  711. */
  712. static void
  713. ahc_linux_size_nseg(void)
  714. {
  715. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  716. u_int cur_size;
  717. u_int best_size;
  718. /*
  719. * The SCSI allocator rounds to the nearest 512 bytes
  720. * an cannot allocate across a page boundary. Our algorithm
  721. * is to start at 1K of scsi malloc space per-command and
  722. * loop through all factors of the PAGE_SIZE and pick the best.
  723. */
  724. best_size = 0;
  725. for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
  726. u_int nseg;
  727. nseg = cur_size / sizeof(struct scatterlist);
  728. if (nseg < AHC_LINUX_MIN_NSEG)
  729. continue;
  730. if (best_size == 0) {
  731. best_size = cur_size;
  732. ahc_linux_nseg = nseg;
  733. } else {
  734. u_int best_rem;
  735. u_int cur_rem;
  736. /*
  737. * Compare the traits of the current "best_size"
  738. * with the current size to determine if the
  739. * current size is a better size.
  740. */
  741. best_rem = best_size % sizeof(struct scatterlist);
  742. cur_rem = cur_size % sizeof(struct scatterlist);
  743. if (cur_rem < best_rem) {
  744. best_size = cur_size;
  745. ahc_linux_nseg = nseg;
  746. }
  747. }
  748. }
  749. #endif
  750. }
  751. /*
  752. * Try to detect an Adaptec 7XXX controller.
  753. */
  754. static int
  755. ahc_linux_detect(Scsi_Host_Template *template)
  756. {
  757. struct ahc_softc *ahc;
  758. int found = 0;
  759. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  760. /*
  761. * It is a bug that the upper layer takes
  762. * this lock just prior to calling us.
  763. */
  764. spin_unlock_irq(&io_request_lock);
  765. #endif
  766. /*
  767. * Sanity checking of Linux SCSI data structures so
  768. * that some of our hacks^H^H^H^H^Hassumptions aren't
  769. * violated.
  770. */
  771. if (offsetof(struct ahc_cmd_internal, end)
  772. > offsetof(struct scsi_cmnd, host_scribble)) {
  773. printf("ahc_linux_detect: SCSI data structures changed.\n");
  774. printf("ahc_linux_detect: Unable to attach\n");
  775. return (0);
  776. }
  777. ahc_linux_size_nseg();
  778. /*
  779. * If we've been passed any parameters, process them now.
  780. */
  781. if (aic7xxx)
  782. aic7xxx_setup(aic7xxx);
  783. template->proc_name = "aic7xxx";
  784. /*
  785. * Initialize our softc list lock prior to
  786. * probing for any adapters.
  787. */
  788. ahc_list_lockinit();
  789. found = ahc_linux_pci_init();
  790. if (!ahc_linux_eisa_init())
  791. found++;
  792. /*
  793. * Register with the SCSI layer all
  794. * controllers we've found.
  795. */
  796. TAILQ_FOREACH(ahc, &ahc_tailq, links) {
  797. if (ahc_linux_register_host(ahc, template) == 0)
  798. found++;
  799. }
  800. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  801. spin_lock_irq(&io_request_lock);
  802. #endif
  803. aic7xxx_detect_complete++;
  804. return (found);
  805. }
  806. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  807. /*
  808. * Free the passed in Scsi_Host memory structures prior to unloading the
  809. * module.
  810. */
  811. int
  812. ahc_linux_release(struct Scsi_Host * host)
  813. {
  814. struct ahc_softc *ahc;
  815. u_long l;
  816. ahc_list_lock(&l);
  817. if (host != NULL) {
  818. /*
  819. * We should be able to just perform
  820. * the free directly, but check our
  821. * list for extra sanity.
  822. */
  823. ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata);
  824. if (ahc != NULL) {
  825. u_long s;
  826. ahc_lock(ahc, &s);
  827. ahc_intr_enable(ahc, FALSE);
  828. ahc_unlock(ahc, &s);
  829. ahc_free(ahc);
  830. }
  831. }
  832. ahc_list_unlock(&l);
  833. return (0);
  834. }
  835. #endif
  836. /*
  837. * Return a string describing the driver.
  838. */
  839. static const char *
  840. ahc_linux_info(struct Scsi_Host *host)
  841. {
  842. static char buffer[512];
  843. char ahc_info[256];
  844. char *bp;
  845. struct ahc_softc *ahc;
  846. bp = &buffer[0];
  847. ahc = *(struct ahc_softc **)host->hostdata;
  848. memset(bp, 0, sizeof(buffer));
  849. strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
  850. strcat(bp, AIC7XXX_DRIVER_VERSION);
  851. strcat(bp, "\n");
  852. strcat(bp, " <");
  853. strcat(bp, ahc->description);
  854. strcat(bp, ">\n");
  855. strcat(bp, " ");
  856. ahc_controller_info(ahc, ahc_info);
  857. strcat(bp, ahc_info);
  858. strcat(bp, "\n");
  859. return (bp);
  860. }
  861. /*
  862. * Queue an SCB to the controller.
  863. */
  864. static int
  865. ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
  866. {
  867. struct ahc_softc *ahc;
  868. struct ahc_linux_device *dev;
  869. u_long flags;
  870. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  871. /*
  872. * Save the callback on completion function.
  873. */
  874. cmd->scsi_done = scsi_done;
  875. ahc_midlayer_entrypoint_lock(ahc, &flags);
  876. /*
  877. * Close the race of a command that was in the process of
  878. * being queued to us just as our simq was frozen. Let
  879. * DV commands through so long as we are only frozen to
  880. * perform DV.
  881. */
  882. if (ahc->platform_data->qfrozen != 0
  883. && AHC_DV_CMD(cmd) == 0) {
  884. ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
  885. ahc_linux_queue_cmd_complete(ahc, cmd);
  886. ahc_schedule_completeq(ahc);
  887. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  888. return (0);
  889. }
  890. dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
  891. cmd->device->lun, /*alloc*/TRUE);
  892. if (dev == NULL) {
  893. ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
  894. ahc_linux_queue_cmd_complete(ahc, cmd);
  895. ahc_schedule_completeq(ahc);
  896. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  897. printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
  898. ahc_name(ahc));
  899. return (0);
  900. }
  901. cmd->result = CAM_REQ_INPROG << 16;
  902. TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe);
  903. if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
  904. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
  905. dev->flags |= AHC_DEV_ON_RUN_LIST;
  906. ahc_linux_run_device_queues(ahc);
  907. }
  908. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  909. return (0);
  910. }
  911. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  912. static int
  913. ahc_linux_slave_alloc(Scsi_Device *device)
  914. {
  915. struct ahc_softc *ahc;
  916. ahc = *((struct ahc_softc **)device->host->hostdata);
  917. if (bootverbose)
  918. printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id);
  919. return (0);
  920. }
  921. static int
  922. ahc_linux_slave_configure(Scsi_Device *device)
  923. {
  924. struct ahc_softc *ahc;
  925. struct ahc_linux_device *dev;
  926. u_long flags;
  927. ahc = *((struct ahc_softc **)device->host->hostdata);
  928. if (bootverbose)
  929. printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id);
  930. ahc_midlayer_entrypoint_lock(ahc, &flags);
  931. /*
  932. * Since Linux has attached to the device, configure
  933. * it so we don't free and allocate the device
  934. * structure on every command.
  935. */
  936. dev = ahc_linux_get_device(ahc, device->channel,
  937. device->id, device->lun,
  938. /*alloc*/TRUE);
  939. if (dev != NULL) {
  940. dev->flags &= ~AHC_DEV_UNCONFIGURED;
  941. dev->scsi_device = device;
  942. ahc_linux_device_queue_depth(ahc, dev);
  943. }
  944. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  945. return (0);
  946. }
  947. static void
  948. ahc_linux_slave_destroy(Scsi_Device *device)
  949. {
  950. struct ahc_softc *ahc;
  951. struct ahc_linux_device *dev;
  952. u_long flags;
  953. ahc = *((struct ahc_softc **)device->host->hostdata);
  954. if (bootverbose)
  955. printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id);
  956. ahc_midlayer_entrypoint_lock(ahc, &flags);
  957. dev = ahc_linux_get_device(ahc, device->channel,
  958. device->id, device->lun,
  959. /*alloc*/FALSE);
  960. /*
  961. * Filter out "silly" deletions of real devices by only
  962. * deleting devices that have had slave_configure()
  963. * called on them. All other devices that have not
  964. * been configured will automatically be deleted by
  965. * the refcounting process.
  966. */
  967. if (dev != NULL
  968. && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
  969. dev->flags |= AHC_DEV_UNCONFIGURED;
  970. if (TAILQ_EMPTY(&dev->busyq)
  971. && dev->active == 0
  972. && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
  973. ahc_linux_free_device(ahc, dev);
  974. }
  975. ahc_midlayer_entrypoint_unlock(ahc, &flags);
  976. }
  977. #else
  978. /*
  979. * Sets the queue depth for each SCSI device hanging
  980. * off the input host adapter.
  981. */
  982. static void
  983. ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs)
  984. {
  985. Scsi_Device *device;
  986. Scsi_Device *ldev;
  987. struct ahc_softc *ahc;
  988. u_long flags;
  989. ahc = *((struct ahc_softc **)host->hostdata);
  990. ahc_lock(ahc, &flags);
  991. for (device = scsi_devs; device != NULL; device = device->next) {
  992. /*
  993. * Watch out for duplicate devices. This works around
  994. * some quirks in how the SCSI scanning code does its
  995. * device management.
  996. */
  997. for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
  998. if (ldev->host == device->host
  999. && ldev->channel == device->channel
  1000. && ldev->id == device->id
  1001. && ldev->lun == device->lun)
  1002. break;
  1003. }
  1004. /* Skip duplicate. */
  1005. if (ldev != device)
  1006. continue;
  1007. if (device->host == host) {
  1008. struct ahc_linux_device *dev;
  1009. /*
  1010. * Since Linux has attached to the device, configure
  1011. * it so we don't free and allocate the device
  1012. * structure on every command.
  1013. */
  1014. dev = ahc_linux_get_device(ahc, device->channel,
  1015. device->id, device->lun,
  1016. /*alloc*/TRUE);
  1017. if (dev != NULL) {
  1018. dev->flags &= ~AHC_DEV_UNCONFIGURED;
  1019. dev->scsi_device = device;
  1020. ahc_linux_device_queue_depth(ahc, dev);
  1021. device->queue_depth = dev->openings
  1022. + dev->active;
  1023. if ((dev->flags & (AHC_DEV_Q_BASIC
  1024. | AHC_DEV_Q_TAGGED)) == 0) {
  1025. /*
  1026. * We allow the OS to queue 2 untagged
  1027. * transactions to us at any time even
  1028. * though we can only execute them
  1029. * serially on the controller/device.
  1030. * This should remove some latency.
  1031. */
  1032. device->queue_depth = 2;
  1033. }
  1034. }
  1035. }
  1036. }
  1037. ahc_unlock(ahc, &flags);
  1038. }
  1039. #endif
  1040. #if defined(__i386__)
  1041. /*
  1042. * Return the disk geometry for the given SCSI device.
  1043. */
  1044. static int
  1045. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1046. ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  1047. sector_t capacity, int geom[])
  1048. {
  1049. uint8_t *bh;
  1050. #else
  1051. ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
  1052. {
  1053. struct scsi_device *sdev = disk->device;
  1054. u_long capacity = disk->capacity;
  1055. struct buffer_head *bh;
  1056. #endif
  1057. int heads;
  1058. int sectors;
  1059. int cylinders;
  1060. int ret;
  1061. int extended;
  1062. struct ahc_softc *ahc;
  1063. u_int channel;
  1064. ahc = *((struct ahc_softc **)sdev->host->hostdata);
  1065. channel = sdev->channel;
  1066. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1067. bh = scsi_bios_ptable(bdev);
  1068. #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
  1069. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
  1070. #else
  1071. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
  1072. #endif
  1073. if (bh) {
  1074. ret = scsi_partsize(bh, capacity,
  1075. &geom[2], &geom[0], &geom[1]);
  1076. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1077. kfree(bh);
  1078. #else
  1079. brelse(bh);
  1080. #endif
  1081. if (ret != -1)
  1082. return (ret);
  1083. }
  1084. heads = 64;
  1085. sectors = 32;
  1086. cylinders = aic_sector_div(capacity, heads, sectors);
  1087. if (aic7xxx_extended != 0)
  1088. extended = 1;
  1089. else if (channel == 0)
  1090. extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
  1091. else
  1092. extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
  1093. if (extended && cylinders >= 1024) {
  1094. heads = 255;
  1095. sectors = 63;
  1096. cylinders = aic_sector_div(capacity, heads, sectors);
  1097. }
  1098. geom[0] = heads;
  1099. geom[1] = sectors;
  1100. geom[2] = cylinders;
  1101. return (0);
  1102. }
  1103. #endif
  1104. /*
  1105. * Abort the current SCSI command(s).
  1106. */
  1107. static int
  1108. ahc_linux_abort(Scsi_Cmnd *cmd)
  1109. {
  1110. int error;
  1111. error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
  1112. if (error != 0)
  1113. printf("aic7xxx_abort returns 0x%x\n", error);
  1114. return (error);
  1115. }
  1116. /*
  1117. * Attempt to send a target reset message to the device that timed out.
  1118. */
  1119. static int
  1120. ahc_linux_dev_reset(Scsi_Cmnd *cmd)
  1121. {
  1122. int error;
  1123. error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
  1124. if (error != 0)
  1125. printf("aic7xxx_dev_reset returns 0x%x\n", error);
  1126. return (error);
  1127. }
  1128. /*
  1129. * Reset the SCSI bus.
  1130. */
  1131. static int
  1132. ahc_linux_bus_reset(Scsi_Cmnd *cmd)
  1133. {
  1134. struct ahc_softc *ahc;
  1135. u_long s;
  1136. int found;
  1137. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  1138. ahc_midlayer_entrypoint_lock(ahc, &s);
  1139. found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
  1140. /*initiate reset*/TRUE);
  1141. ahc_linux_run_complete_queue(ahc);
  1142. ahc_midlayer_entrypoint_unlock(ahc, &s);
  1143. if (bootverbose)
  1144. printf("%s: SCSI bus reset delivered. "
  1145. "%d SCBs aborted.\n", ahc_name(ahc), found);
  1146. return SUCCESS;
  1147. }
  1148. Scsi_Host_Template aic7xxx_driver_template = {
  1149. .module = THIS_MODULE,
  1150. .name = "aic7xxx",
  1151. .proc_info = ahc_linux_proc_info,
  1152. .info = ahc_linux_info,
  1153. .queuecommand = ahc_linux_queue,
  1154. .eh_abort_handler = ahc_linux_abort,
  1155. .eh_device_reset_handler = ahc_linux_dev_reset,
  1156. .eh_bus_reset_handler = ahc_linux_bus_reset,
  1157. #if defined(__i386__)
  1158. .bios_param = ahc_linux_biosparam,
  1159. #endif
  1160. .can_queue = AHC_MAX_QUEUE,
  1161. .this_id = -1,
  1162. .cmd_per_lun = 2,
  1163. .use_clustering = ENABLE_CLUSTERING,
  1164. .slave_alloc = ahc_linux_slave_alloc,
  1165. .slave_configure = ahc_linux_slave_configure,
  1166. .slave_destroy = ahc_linux_slave_destroy,
  1167. };
  1168. /**************************** Tasklet Handler *********************************/
  1169. /*
  1170. * In 2.4.X and above, this routine is called from a tasklet,
  1171. * so we must re-acquire our lock prior to executing this code.
  1172. * In all prior kernels, ahc_schedule_runq() calls this routine
  1173. * directly and ahc_schedule_runq() is called with our lock held.
  1174. */
  1175. static void
  1176. ahc_runq_tasklet(unsigned long data)
  1177. {
  1178. struct ahc_softc* ahc;
  1179. struct ahc_linux_device *dev;
  1180. u_long flags;
  1181. ahc = (struct ahc_softc *)data;
  1182. ahc_lock(ahc, &flags);
  1183. while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
  1184. TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
  1185. dev->flags &= ~AHC_DEV_ON_RUN_LIST;
  1186. ahc_linux_check_device_queue(ahc, dev);
  1187. /* Yeild to our interrupt handler */
  1188. ahc_unlock(ahc, &flags);
  1189. ahc_lock(ahc, &flags);
  1190. }
  1191. ahc_unlock(ahc, &flags);
  1192. }
  1193. /******************************** Macros **************************************/
  1194. #define BUILD_SCSIID(ahc, cmd) \
  1195. ((((cmd)->device->id << TID_SHIFT) & TID) \
  1196. | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
  1197. | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
  1198. /******************************** Bus DMA *************************************/
  1199. int
  1200. ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
  1201. bus_size_t alignment, bus_size_t boundary,
  1202. dma_addr_t lowaddr, dma_addr_t highaddr,
  1203. bus_dma_filter_t *filter, void *filterarg,
  1204. bus_size_t maxsize, int nsegments,
  1205. bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
  1206. {
  1207. bus_dma_tag_t dmat;
  1208. dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
  1209. if (dmat == NULL)
  1210. return (ENOMEM);
  1211. /*
  1212. * Linux is very simplistic about DMA memory. For now don't
  1213. * maintain all specification information. Once Linux supplies
  1214. * better facilities for doing these operations, or the
  1215. * needs of this particular driver change, we might need to do
  1216. * more here.
  1217. */
  1218. dmat->alignment = alignment;
  1219. dmat->boundary = boundary;
  1220. dmat->maxsize = maxsize;
  1221. *ret_tag = dmat;
  1222. return (0);
  1223. }
  1224. void
  1225. ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
  1226. {
  1227. free(dmat, M_DEVBUF);
  1228. }
  1229. int
  1230. ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
  1231. int flags, bus_dmamap_t *mapp)
  1232. {
  1233. bus_dmamap_t map;
  1234. map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
  1235. if (map == NULL)
  1236. return (ENOMEM);
  1237. /*
  1238. * Although we can dma data above 4GB, our
  1239. * "consistent" memory is below 4GB for
  1240. * space efficiency reasons (only need a 4byte
  1241. * address). For this reason, we have to reset
  1242. * our dma mask when doing allocations.
  1243. */
  1244. if (ahc->dev_softc != NULL)
  1245. if (pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF)) {
  1246. printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
  1247. kfree(map);
  1248. return (ENODEV);
  1249. }
  1250. *vaddr = pci_alloc_consistent(ahc->dev_softc,
  1251. dmat->maxsize, &map->bus_addr);
  1252. if (ahc->dev_softc != NULL)
  1253. if (pci_set_dma_mask(ahc->dev_softc,
  1254. ahc->platform_data->hw_dma_mask)) {
  1255. printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
  1256. kfree(map);
  1257. return (ENODEV);
  1258. }
  1259. if (*vaddr == NULL)
  1260. return (ENOMEM);
  1261. *mapp = map;
  1262. return(0);
  1263. }
  1264. void
  1265. ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
  1266. void* vaddr, bus_dmamap_t map)
  1267. {
  1268. pci_free_consistent(ahc->dev_softc, dmat->maxsize,
  1269. vaddr, map->bus_addr);
  1270. }
  1271. int
  1272. ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
  1273. void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
  1274. void *cb_arg, int flags)
  1275. {
  1276. /*
  1277. * Assume for now that this will only be used during
  1278. * initialization and not for per-transaction buffer mapping.
  1279. */
  1280. bus_dma_segment_t stack_sg;
  1281. stack_sg.ds_addr = map->bus_addr;
  1282. stack_sg.ds_len = dmat->maxsize;
  1283. cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
  1284. return (0);
  1285. }
  1286. void
  1287. ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
  1288. {
  1289. /*
  1290. * The map may is NULL in our < 2.3.X implementation.
  1291. * Now it's 2.6.5, but just in case...
  1292. */
  1293. BUG_ON(map == NULL);
  1294. free(map, M_DEVBUF);
  1295. }
  1296. int
  1297. ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
  1298. {
  1299. /* Nothing to do */
  1300. return (0);
  1301. }
  1302. /********************* Platform Dependent Functions ***************************/
  1303. /*
  1304. * Compare "left hand" softc with "right hand" softc, returning:
  1305. * < 0 - lahc has a lower priority than rahc
  1306. * 0 - Softcs are equal
  1307. * > 0 - lahc has a higher priority than rahc
  1308. */
  1309. int
  1310. ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
  1311. {
  1312. int value;
  1313. int rvalue;
  1314. int lvalue;
  1315. /*
  1316. * Under Linux, cards are ordered as follows:
  1317. * 1) VLB/EISA BIOS enabled devices sorted by BIOS address.
  1318. * 2) PCI devices with BIOS enabled sorted by bus/slot/func.
  1319. * 3) All remaining VLB/EISA devices sorted by ioport.
  1320. * 4) All remaining PCI devices sorted by bus/slot/func.
  1321. */
  1322. value = (lahc->flags & AHC_BIOS_ENABLED)
  1323. - (rahc->flags & AHC_BIOS_ENABLED);
  1324. if (value != 0)
  1325. /* Controllers with BIOS enabled have a *higher* priority */
  1326. return (value);
  1327. /*
  1328. * Same BIOS setting, now sort based on bus type.
  1329. * EISA and VL controllers sort together. EISA/VL
  1330. * have higher priority than PCI.
  1331. */
  1332. rvalue = (rahc->chip & AHC_BUS_MASK);
  1333. if (rvalue == AHC_VL)
  1334. rvalue = AHC_EISA;
  1335. lvalue = (lahc->chip & AHC_BUS_MASK);
  1336. if (lvalue == AHC_VL)
  1337. lvalue = AHC_EISA;
  1338. value = rvalue - lvalue;
  1339. if (value != 0)
  1340. return (value);
  1341. /* Still equal. Sort by BIOS address, ioport, or bus/slot/func. */
  1342. switch (rvalue) {
  1343. #ifdef CONFIG_PCI
  1344. case AHC_PCI:
  1345. {
  1346. char primary_channel;
  1347. if (aic7xxx_reverse_scan != 0)
  1348. value = ahc_get_pci_bus(lahc->dev_softc)
  1349. - ahc_get_pci_bus(rahc->dev_softc);
  1350. else
  1351. value = ahc_get_pci_bus(rahc->dev_softc)
  1352. - ahc_get_pci_bus(lahc->dev_softc);
  1353. if (value != 0)
  1354. break;
  1355. if (aic7xxx_reverse_scan != 0)
  1356. value = ahc_get_pci_slot(lahc->dev_softc)
  1357. - ahc_get_pci_slot(rahc->dev_softc);
  1358. else
  1359. value = ahc_get_pci_slot(rahc->dev_softc)
  1360. - ahc_get_pci_slot(lahc->dev_softc);
  1361. if (value != 0)
  1362. break;
  1363. /*
  1364. * On multi-function devices, the user can choose
  1365. * to have function 1 probed before function 0.
  1366. * Give whichever channel is the primary channel
  1367. * the highest priority.
  1368. */
  1369. primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A';
  1370. value = -1;
  1371. if (lahc->channel == primary_channel)
  1372. value = 1;
  1373. break;
  1374. }
  1375. #endif
  1376. case AHC_EISA:
  1377. if ((rahc->flags & AHC_BIOS_ENABLED) != 0) {
  1378. value = rahc->platform_data->bios_address
  1379. - lahc->platform_data->bios_address;
  1380. } else {
  1381. value = rahc->bsh.ioport
  1382. - lahc->bsh.ioport;
  1383. }
  1384. break;
  1385. default:
  1386. panic("ahc_softc_sort: invalid bus type");
  1387. }
  1388. return (value);
  1389. }
  1390. static void
  1391. ahc_linux_setup_tag_info_global(char *p)
  1392. {
  1393. int tags, i, j;
  1394. tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
  1395. printf("Setting Global Tags= %d\n", tags);
  1396. for (i = 0; i < NUM_ELEMENTS(aic7xxx_tag_info); i++) {
  1397. for (j = 0; j < AHC_NUM_TARGETS; j++) {
  1398. aic7xxx_tag_info[i].tag_commands[j] = tags;
  1399. }
  1400. }
  1401. }
  1402. static void
  1403. ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
  1404. {
  1405. if ((instance >= 0) && (targ >= 0)
  1406. && (instance < NUM_ELEMENTS(aic7xxx_tag_info))
  1407. && (targ < AHC_NUM_TARGETS)) {
  1408. aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
  1409. if (bootverbose)
  1410. printf("tag_info[%d:%d] = %d\n", instance, targ, value);
  1411. }
  1412. }
  1413. static void
  1414. ahc_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
  1415. {
  1416. if ((instance >= 0)
  1417. && (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
  1418. aic7xxx_dv_settings[instance] = value;
  1419. if (bootverbose)
  1420. printf("dv[%d] = %d\n", instance, value);
  1421. }
  1422. }
  1423. /*
  1424. * Handle Linux boot parameters. This routine allows for assigning a value
  1425. * to a parameter with a ':' between the parameter and the value.
  1426. * ie. aic7xxx=stpwlev:1,extended
  1427. */
  1428. static int
  1429. aic7xxx_setup(char *s)
  1430. {
  1431. int i, n;
  1432. char *p;
  1433. char *end;
  1434. static struct {
  1435. const char *name;
  1436. uint32_t *flag;
  1437. } options[] = {
  1438. { "extended", &aic7xxx_extended },
  1439. { "no_reset", &aic7xxx_no_reset },
  1440. { "verbose", &aic7xxx_verbose },
  1441. { "allow_memio", &aic7xxx_allow_memio},
  1442. #ifdef AHC_DEBUG
  1443. { "debug", &ahc_debug },
  1444. #endif
  1445. { "reverse_scan", &aic7xxx_reverse_scan },
  1446. { "no_probe", &aic7xxx_probe_eisa_vl },
  1447. { "probe_eisa_vl", &aic7xxx_probe_eisa_vl },
  1448. { "periodic_otag", &aic7xxx_periodic_otag },
  1449. { "pci_parity", &aic7xxx_pci_parity },
  1450. { "seltime", &aic7xxx_seltime },
  1451. { "tag_info", NULL },
  1452. { "global_tag_depth", NULL },
  1453. { "dv", NULL }
  1454. };
  1455. end = strchr(s, '\0');
  1456. /*
  1457. * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
  1458. * will never be 0 in this case.
  1459. */
  1460. n = 0;
  1461. while ((p = strsep(&s, ",.")) != NULL) {
  1462. if (*p == '\0')
  1463. continue;
  1464. for (i = 0; i < NUM_ELEMENTS(options); i++) {
  1465. n = strlen(options[i].name);
  1466. if (strncmp(options[i].name, p, n) == 0)
  1467. break;
  1468. }
  1469. if (i == NUM_ELEMENTS(options))
  1470. continue;
  1471. if (strncmp(p, "global_tag_depth", n) == 0) {
  1472. ahc_linux_setup_tag_info_global(p + n);
  1473. } else if (strncmp(p, "tag_info", n) == 0) {
  1474. s = aic_parse_brace_option("tag_info", p + n, end,
  1475. 2, ahc_linux_setup_tag_info, 0);
  1476. } else if (strncmp(p, "dv", n) == 0) {
  1477. s = aic_parse_brace_option("dv", p + n, end, 1,
  1478. ahc_linux_setup_dv, 0);
  1479. } else if (p[n] == ':') {
  1480. *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
  1481. } else if (strncmp(p, "verbose", n) == 0) {
  1482. *(options[i].flag) = 1;
  1483. } else {
  1484. *(options[i].flag) ^= 0xFFFFFFFF;
  1485. }
  1486. }
  1487. return 1;
  1488. }
  1489. __setup("aic7xxx=", aic7xxx_setup);
  1490. uint32_t aic7xxx_verbose;
  1491. int
  1492. ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
  1493. {
  1494. char buf[80];
  1495. struct Scsi_Host *host;
  1496. char *new_name;
  1497. u_long s;
  1498. u_int targ_offset;
  1499. template->name = ahc->description;
  1500. host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
  1501. if (host == NULL)
  1502. return (ENOMEM);
  1503. *((struct ahc_softc **)host->hostdata) = ahc;
  1504. ahc_lock(ahc, &s);
  1505. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1506. scsi_assign_lock(host, &ahc->platform_data->spin_lock);
  1507. #elif AHC_SCSI_HAS_HOST_LOCK != 0
  1508. host->lock = &ahc->platform_data->spin_lock;
  1509. #endif
  1510. ahc->platform_data->host = host;
  1511. host->can_queue = AHC_MAX_QUEUE;
  1512. host->cmd_per_lun = 2;
  1513. /* XXX No way to communicate the ID for multiple channels */
  1514. host->this_id = ahc->our_id;
  1515. host->irq = ahc->platform_data->irq;
  1516. host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
  1517. host->max_lun = AHC_NUM_LUNS;
  1518. host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
  1519. host->sg_tablesize = AHC_NSEG;
  1520. ahc_set_unit(ahc, ahc_linux_next_unit());
  1521. sprintf(buf, "scsi%d", host->host_no);
  1522. new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
  1523. if (new_name != NULL) {
  1524. strcpy(new_name, buf);
  1525. ahc_set_name(ahc, new_name);
  1526. }
  1527. host->unique_id = ahc->unit;
  1528. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  1529. scsi_set_pci_device(host, ahc->dev_softc);
  1530. #endif
  1531. ahc_linux_initialize_scsi_bus(ahc);
  1532. ahc_unlock(ahc, &s);
  1533. ahc->platform_data->dv_pid = kernel_thread(ahc_linux_dv_thread, ahc, 0);
  1534. ahc_lock(ahc, &s);
  1535. if (ahc->platform_data->dv_pid < 0) {
  1536. printf("%s: Failed to create DV thread, error= %d\n",
  1537. ahc_name(ahc), ahc->platform_data->dv_pid);
  1538. return (-ahc->platform_data->dv_pid);
  1539. }
  1540. /*
  1541. * Initially allocate *all* of our linux target objects
  1542. * so that the DV thread will scan them all in parallel
  1543. * just after driver initialization. Any device that
  1544. * does not exist will have its target object destroyed
  1545. * by the selection timeout handler. In the case of a
  1546. * device that appears after the initial DV scan, async
  1547. * negotiation will occur for the first command, and DV
  1548. * will comence should that first command be successful.
  1549. */
  1550. for (targ_offset = 0;
  1551. targ_offset < host->max_id * (host->max_channel + 1);
  1552. targ_offset++) {
  1553. u_int channel;
  1554. u_int target;
  1555. channel = 0;
  1556. target = targ_offset;
  1557. if (target > 7
  1558. && (ahc->features & AHC_TWIN) != 0) {
  1559. channel = 1;
  1560. target &= 0x7;
  1561. }
  1562. /*
  1563. * Skip our own ID. Some Compaq/HP storage devices
  1564. * have enclosure management devices that respond to
  1565. * single bit selection (i.e. selecting ourselves).
  1566. * It is expected that either an external application
  1567. * or a modified kernel will be used to probe this
  1568. * ID if it is appropriate. To accommodate these
  1569. * installations, ahc_linux_alloc_target() will allocate
  1570. * for our ID if asked to do so.
  1571. */
  1572. if ((channel == 0 && target == ahc->our_id)
  1573. || (channel == 1 && target == ahc->our_id_b))
  1574. continue;
  1575. ahc_linux_alloc_target(ahc, channel, target);
  1576. }
  1577. ahc_intr_enable(ahc, TRUE);
  1578. ahc_linux_start_dv(ahc);
  1579. ahc_unlock(ahc, &s);
  1580. host->transportt = ahc_linux_transport_template;
  1581. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1582. scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
  1583. scsi_scan_host(host);
  1584. #endif
  1585. return (0);
  1586. }
  1587. uint64_t
  1588. ahc_linux_get_memsize(void)
  1589. {
  1590. struct sysinfo si;
  1591. si_meminfo(&si);
  1592. return ((uint64_t)si.totalram << PAGE_SHIFT);
  1593. }
  1594. /*
  1595. * Find the smallest available unit number to use
  1596. * for a new device. We don't just use a static
  1597. * count to handle the "repeated hot-(un)plug"
  1598. * scenario.
  1599. */
  1600. static int
  1601. ahc_linux_next_unit(void)
  1602. {
  1603. struct ahc_softc *ahc;
  1604. int unit;
  1605. unit = 0;
  1606. retry:
  1607. TAILQ_FOREACH(ahc, &ahc_tailq, links) {
  1608. if (ahc->unit == unit) {
  1609. unit++;
  1610. goto retry;
  1611. }
  1612. }
  1613. return (unit);
  1614. }
  1615. /*
  1616. * Place the SCSI bus into a known state by either resetting it,
  1617. * or forcing transfer negotiations on the next command to any
  1618. * target.
  1619. */
  1620. void
  1621. ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
  1622. {
  1623. int i;
  1624. int numtarg;
  1625. i = 0;
  1626. numtarg = 0;
  1627. if (aic7xxx_no_reset != 0)
  1628. ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
  1629. if ((ahc->flags & AHC_RESET_BUS_A) != 0)
  1630. ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
  1631. else
  1632. numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
  1633. if ((ahc->features & AHC_TWIN) != 0) {
  1634. if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
  1635. ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
  1636. } else {
  1637. if (numtarg == 0)
  1638. i = 8;
  1639. numtarg += 8;
  1640. }
  1641. }
  1642. /*
  1643. * Force negotiation to async for all targets that
  1644. * will not see an initial bus reset.
  1645. */
  1646. for (; i < numtarg; i++) {
  1647. struct ahc_devinfo devinfo;
  1648. struct ahc_initiator_tinfo *tinfo;
  1649. struct ahc_tmode_tstate *tstate;
  1650. u_int our_id;
  1651. u_int target_id;
  1652. char channel;
  1653. channel = 'A';
  1654. our_id = ahc->our_id;
  1655. target_id = i;
  1656. if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
  1657. channel = 'B';
  1658. our_id = ahc->our_id_b;
  1659. target_id = i % 8;
  1660. }
  1661. tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
  1662. target_id, &tstate);
  1663. ahc_compile_devinfo(&devinfo, our_id, target_id,
  1664. CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
  1665. ahc_update_neg_request(ahc, &devinfo, tstate,
  1666. tinfo, AHC_NEG_ALWAYS);
  1667. }
  1668. /* Give the bus some time to recover */
  1669. if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
  1670. ahc_linux_freeze_simq(ahc);
  1671. init_timer(&ahc->platform_data->reset_timer);
  1672. ahc->platform_data->reset_timer.data = (u_long)ahc;
  1673. ahc->platform_data->reset_timer.expires =
  1674. jiffies + (AIC7XXX_RESET_DELAY * HZ)/1000;
  1675. ahc->platform_data->reset_timer.function =
  1676. ahc_linux_release_simq;
  1677. add_timer(&ahc->platform_data->reset_timer);
  1678. }
  1679. }
  1680. int
  1681. ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
  1682. {
  1683. ahc->platform_data =
  1684. malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
  1685. if (ahc->platform_data == NULL)
  1686. return (ENOMEM);
  1687. memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
  1688. TAILQ_INIT(&ahc->platform_data->completeq);
  1689. TAILQ_INIT(&ahc->platform_data->device_runq);
  1690. ahc->platform_data->irq = AHC_LINUX_NOIRQ;
  1691. ahc->platform_data->hw_dma_mask = 0xFFFFFFFF;
  1692. ahc_lockinit(ahc);
  1693. ahc_done_lockinit(ahc);
  1694. init_timer(&ahc->platform_data->completeq_timer);
  1695. ahc->platform_data->completeq_timer.data = (u_long)ahc;
  1696. ahc->platform_data->completeq_timer.function =
  1697. (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
  1698. init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
  1699. init_MUTEX_LOCKED(&ahc->platform_data->dv_sem);
  1700. init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);
  1701. tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
  1702. (unsigned long)ahc);
  1703. ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
  1704. ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
  1705. if (aic7xxx_pci_parity == 0)
  1706. ahc->flags |= AHC_DISABLE_PCI_PERR;
  1707. return (0);
  1708. }
  1709. void
  1710. ahc_platform_free(struct ahc_softc *ahc)
  1711. {
  1712. struct ahc_linux_target *targ;
  1713. struct ahc_linux_device *dev;
  1714. int i, j;
  1715. if (ahc->platform_data != NULL) {
  1716. del_timer_sync(&ahc->platform_data->completeq_timer);
  1717. ahc_linux_kill_dv_thread(ahc);
  1718. tasklet_kill(&ahc->platform_data->runq_tasklet);
  1719. if (ahc->platform_data->host != NULL) {
  1720. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1721. scsi_remove_host(ahc->platform_data->host);
  1722. #endif
  1723. scsi_host_put(ahc->platform_data->host);
  1724. }
  1725. /* destroy all of the device and target objects */
  1726. for (i = 0; i < AHC_NUM_TARGETS; i++) {
  1727. targ = ahc->platform_data->targets[i];
  1728. if (targ != NULL) {
  1729. /* Keep target around through the loop. */
  1730. targ->refcount++;
  1731. for (j = 0; j < AHC_NUM_LUNS; j++) {
  1732. if (targ->devices[j] == NULL)
  1733. continue;
  1734. dev = targ->devices[j];
  1735. ahc_linux_free_device(ahc, dev);
  1736. }
  1737. /*
  1738. * Forcibly free the target now that
  1739. * all devices are gone.
  1740. */
  1741. ahc_linux_free_target(ahc, targ);
  1742. }
  1743. }
  1744. if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
  1745. free_irq(ahc->platform_data->irq, ahc);
  1746. if (ahc->tag == BUS_SPACE_PIO
  1747. && ahc->bsh.ioport != 0)
  1748. release_region(ahc->bsh.ioport, 256);
  1749. if (ahc->tag == BUS_SPACE_MEMIO
  1750. && ahc->bsh.maddr != NULL) {
  1751. iounmap(ahc->bsh.maddr);
  1752. release_mem_region(ahc->platform_data->mem_busaddr,
  1753. 0x1000);
  1754. }
  1755. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  1756. /*
  1757. * In 2.4 we detach from the scsi midlayer before the PCI
  1758. * layer invokes our remove callback. No per-instance
  1759. * detach is provided, so we must reach inside the PCI
  1760. * subsystem's internals and detach our driver manually.
  1761. */
  1762. if (ahc->dev_softc != NULL)
  1763. ahc->dev_softc->driver = NULL;
  1764. #endif
  1765. free(ahc->platform_data, M_DEVBUF);
  1766. }
  1767. }
  1768. void
  1769. ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
  1770. {
  1771. ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
  1772. SCB_GET_CHANNEL(ahc, scb),
  1773. SCB_GET_LUN(scb), SCB_LIST_NULL,
  1774. ROLE_UNKNOWN, CAM_REQUEUE_REQ);
  1775. }
  1776. void
  1777. ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  1778. ahc_queue_alg alg)
  1779. {
  1780. struct ahc_linux_device *dev;
  1781. int was_queuing;
  1782. int now_queuing;
  1783. dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
  1784. devinfo->target,
  1785. devinfo->lun, /*alloc*/FALSE);
  1786. if (dev == NULL)
  1787. return;
  1788. was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
  1789. switch (alg) {
  1790. default:
  1791. case AHC_QUEUE_NONE:
  1792. now_queuing = 0;
  1793. break;
  1794. case AHC_QUEUE_BASIC:
  1795. now_queuing = AHC_DEV_Q_BASIC;
  1796. break;
  1797. case AHC_QUEUE_TAGGED:
  1798. now_queuing = AHC_DEV_Q_TAGGED;
  1799. break;
  1800. }
  1801. if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
  1802. && (was_queuing != now_queuing)
  1803. && (dev->active != 0)) {
  1804. dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
  1805. dev->qfrozen++;
  1806. }
  1807. dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
  1808. if (now_queuing) {
  1809. u_int usertags;
  1810. usertags = ahc_linux_user_tagdepth(ahc, devinfo);
  1811. if (!was_queuing) {
  1812. /*
  1813. * Start out agressively and allow our
  1814. * dynamic queue depth algorithm to take
  1815. * care of the rest.
  1816. */
  1817. dev->maxtags = usertags;
  1818. dev->openings = dev->maxtags - dev->active;
  1819. }
  1820. if (dev->maxtags == 0) {
  1821. /*
  1822. * Queueing is disabled by the user.
  1823. */
  1824. dev->openings = 1;
  1825. } else if (alg == AHC_QUEUE_TAGGED) {
  1826. dev->flags |= AHC_DEV_Q_TAGGED;
  1827. if (aic7xxx_periodic_otag != 0)
  1828. dev->flags |= AHC_DEV_PERIODIC_OTAG;
  1829. } else
  1830. dev->flags |= AHC_DEV_Q_BASIC;
  1831. } else {
  1832. /* We can only have one opening. */
  1833. dev->maxtags = 0;
  1834. dev->openings = 1 - dev->active;
  1835. }
  1836. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1837. if (dev->scsi_device != NULL) {
  1838. switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
  1839. case AHC_DEV_Q_BASIC:
  1840. scsi_adjust_queue_depth(dev->scsi_device,
  1841. MSG_SIMPLE_TASK,
  1842. dev->openings + dev->active);
  1843. break;
  1844. case AHC_DEV_Q_TAGGED:
  1845. scsi_adjust_queue_depth(dev->scsi_device,
  1846. MSG_ORDERED_TASK,
  1847. dev->openings + dev->active);
  1848. break;
  1849. default:
  1850. /*
  1851. * We allow the OS to queue 2 untagged transactions to
  1852. * us at any time even though we can only execute them
  1853. * serially on the controller/device. This should
  1854. * remove some latency.
  1855. */
  1856. scsi_adjust_queue_depth(dev->scsi_device,
  1857. /*NON-TAGGED*/0,
  1858. /*queue depth*/2);
  1859. break;
  1860. }
  1861. }
  1862. #endif
  1863. }
  1864. int
  1865. ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
  1866. int lun, u_int tag, role_t role, uint32_t status)
  1867. {
  1868. int chan;
  1869. int maxchan;
  1870. int targ;
  1871. int maxtarg;
  1872. int clun;
  1873. int maxlun;
  1874. int count;
  1875. if (tag != SCB_LIST_NULL)
  1876. return (0);
  1877. chan = 0;
  1878. if (channel != ALL_CHANNELS) {
  1879. chan = channel - 'A';
  1880. maxchan = chan + 1;
  1881. } else {
  1882. maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
  1883. }
  1884. targ = 0;
  1885. if (target != CAM_TARGET_WILDCARD) {
  1886. targ = target;
  1887. maxtarg = targ + 1;
  1888. } else {
  1889. maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
  1890. }
  1891. clun = 0;
  1892. if (lun != CAM_LUN_WILDCARD) {
  1893. clun = lun;
  1894. maxlun = clun + 1;
  1895. } else {
  1896. maxlun = AHC_NUM_LUNS;
  1897. }
  1898. count = 0;
  1899. for (; chan < maxchan; chan++) {
  1900. for (; targ < maxtarg; targ++) {
  1901. for (; clun < maxlun; clun++) {
  1902. struct ahc_linux_device *dev;
  1903. struct ahc_busyq *busyq;
  1904. struct ahc_cmd *acmd;
  1905. dev = ahc_linux_get_device(ahc, chan,
  1906. targ, clun,
  1907. /*alloc*/FALSE);
  1908. if (dev == NULL)
  1909. continue;
  1910. busyq = &dev->busyq;
  1911. while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
  1912. Scsi_Cmnd *cmd;
  1913. cmd = &acmd_scsi_cmd(acmd);
  1914. TAILQ_REMOVE(busyq, acmd,
  1915. acmd_links.tqe);
  1916. count++;
  1917. cmd->result = status << 16;
  1918. ahc_linux_queue_cmd_complete(ahc, cmd);
  1919. }
  1920. }
  1921. }
  1922. }
  1923. return (count);
  1924. }
  1925. static void
  1926. ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
  1927. {
  1928. u_long flags;
  1929. ahc_lock(ahc, &flags);
  1930. del_timer(&ahc->platform_data->completeq_timer);
  1931. ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
  1932. ahc_linux_run_complete_queue(ahc);
  1933. ahc_unlock(ahc, &flags);
  1934. }
  1935. static void
  1936. ahc_linux_start_dv(struct ahc_softc *ahc)
  1937. {
  1938. /*
  1939. * Freeze the simq and signal ahc_linux_queue to not let any
  1940. * more commands through.
  1941. */
  1942. if ((ahc->platform_data->flags & AHC_DV_ACTIVE) == 0) {
  1943. #ifdef AHC_DEBUG
  1944. if (ahc_debug & AHC_SHOW_DV)
  1945. printf("%s: Waking DV thread\n", ahc_name(ahc));
  1946. #endif
  1947. ahc->platform_data->flags |= AHC_DV_ACTIVE;
  1948. ahc_linux_freeze_simq(ahc);
  1949. /* Wake up the DV kthread */
  1950. up(&ahc->platform_data->dv_sem);
  1951. }
  1952. }
  1953. static void
  1954. ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
  1955. {
  1956. u_long s;
  1957. ahc_lock(ahc, &s);
  1958. if (ahc->platform_data->dv_pid != 0) {
  1959. ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
  1960. ahc_unlock(ahc, &s);
  1961. up(&ahc->platform_data->dv_sem);
  1962. /*
  1963. * Use the eh_sem as an indicator that the
  1964. * dv thread is exiting. Note that the dv
  1965. * thread must still return after performing
  1966. * the up on our semaphore before it has
  1967. * completely exited this module. Unfortunately,
  1968. * there seems to be no easy way to wait for the
  1969. * exit of a thread for which you are not the
  1970. * parent (dv threads are parented by init).
  1971. * Cross your fingers...
  1972. */
  1973. down(&ahc->platform_data->eh_sem);
  1974. /*
  1975. * Mark the dv thread as already dead. This
  1976. * avoids attempting to kill it a second time.
  1977. * This is necessary because we must kill the
  1978. * DV thread before calling ahc_free() in the
  1979. * module shutdown case to avoid bogus locking
  1980. * in the SCSI mid-layer, but we ahc_free() is
  1981. * called without killing the DV thread in the
  1982. * instance detach case, so ahc_platform_free()
  1983. * calls us again to verify that the DV thread
  1984. * is dead.
  1985. */
  1986. ahc->platform_data->dv_pid = 0;
  1987. } else {
  1988. ahc_unlock(ahc, &s);
  1989. }
  1990. }
  1991. static int
  1992. ahc_linux_dv_thread(void *data)
  1993. {
  1994. struct ahc_softc *ahc;
  1995. int target;
  1996. u_long s;
  1997. ahc = (struct ahc_softc *)data;
  1998. #ifdef AHC_DEBUG
  1999. if (ahc_debug & AHC_SHOW_DV)
  2000. printf("Launching DV Thread\n");
  2001. #endif
  2002. /*
  2003. * Complete thread creation.
  2004. */
  2005. lock_kernel();
  2006. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  2007. /*
  2008. * Don't care about any signals.
  2009. */
  2010. siginitsetinv(&current->blocked, 0);
  2011. daemonize();
  2012. sprintf(current->comm, "ahc_dv_%d", ahc->unit);
  2013. #else
  2014. daemonize("ahc_dv_%d", ahc->unit);
  2015. current->flags |= PF_FREEZE;
  2016. #endif
  2017. unlock_kernel();
  2018. while (1) {
  2019. /*
  2020. * Use down_interruptible() rather than down() to
  2021. * avoid inclusion in the load average.
  2022. */
  2023. down_interruptible(&ahc->platform_data->dv_sem);
  2024. /* Check to see if we've been signaled to exit */
  2025. ahc_lock(ahc, &s);
  2026. if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
  2027. ahc_unlock(ahc, &s);
  2028. break;
  2029. }
  2030. ahc_unlock(ahc, &s);
  2031. #ifdef AHC_DEBUG
  2032. if (ahc_debug & AHC_SHOW_DV)
  2033. printf("%s: Beginning Domain Validation\n",
  2034. ahc_name(ahc));
  2035. #endif
  2036. /*
  2037. * Wait for any pending commands to drain before proceeding.
  2038. */
  2039. ahc_lock(ahc, &s);
  2040. while (LIST_FIRST(&ahc->pending_scbs) != NULL) {
  2041. ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_EMPTY;
  2042. ahc_unlock(ahc, &s);
  2043. down_interruptible(&ahc->platform_data->dv_sem);
  2044. ahc_lock(ahc, &s);
  2045. }
  2046. /*
  2047. * Wait for the SIMQ to be released so that DV is the
  2048. * only reason the queue is frozen.
  2049. */
  2050. while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
  2051. ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
  2052. ahc_unlock(ahc, &s);
  2053. down_interruptible(&ahc->platform_data->dv_sem);
  2054. ahc_lock(ahc, &s);
  2055. }
  2056. ahc_unlock(ahc, &s);
  2057. for (target = 0; target < AHC_NUM_TARGETS; target++)
  2058. ahc_linux_dv_target(ahc, target);
  2059. ahc_lock(ahc, &s);
  2060. ahc->platform_data->flags &= ~AHC_DV_ACTIVE;
  2061. ahc_unlock(ahc, &s);
  2062. /*
  2063. * Release the SIMQ so that normal commands are
  2064. * allowed to continue on the bus.
  2065. */
  2066. ahc_linux_release_simq((u_long)ahc);
  2067. }
  2068. up(&ahc->platform_data->eh_sem);
  2069. return (0);
  2070. }
  2071. #define AHC_LINUX_DV_INQ_SHORT_LEN 36
  2072. #define AHC_LINUX_DV_INQ_LEN 256
  2073. #define AHC_LINUX_DV_TIMEOUT (HZ / 4)
  2074. #define AHC_SET_DV_STATE(ahc, targ, newstate) \
  2075. ahc_set_dv_state(ahc, targ, newstate, __LINE__)
  2076. static __inline void
  2077. ahc_set_dv_state(struct ahc_softc *ahc, struct ahc_linux_target *targ,
  2078. ahc_dv_state newstate, u_int line)
  2079. {
  2080. ahc_dv_state oldstate;
  2081. oldstate = targ->dv_state;
  2082. #ifdef AHC_DEBUG
  2083. if (ahc_debug & AHC_SHOW_DV)
  2084. printf("%s:%d: Going from state %d to state %d\n",
  2085. ahc_name(ahc), line, oldstate, newstate);
  2086. #endif
  2087. if (oldstate == newstate)
  2088. targ->dv_state_retry++;
  2089. else
  2090. targ->dv_state_retry = 0;
  2091. targ->dv_state = newstate;
  2092. }
  2093. static void
  2094. ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
  2095. {
  2096. struct ahc_devinfo devinfo;
  2097. struct ahc_linux_target *targ;
  2098. struct scsi_cmnd *cmd;
  2099. struct scsi_device *scsi_dev;
  2100. struct scsi_sense_data *sense;
  2101. uint8_t *buffer;
  2102. u_long s;
  2103. u_int timeout;
  2104. int echo_size;
  2105. sense = NULL;
  2106. buffer = NULL;
  2107. echo_size = 0;
  2108. ahc_lock(ahc, &s);
  2109. targ = ahc->platform_data->targets[target_offset];
  2110. if (targ == NULL || (targ->flags & AHC_DV_REQUIRED) == 0) {
  2111. ahc_unlock(ahc, &s);
  2112. return;
  2113. }
  2114. ahc_compile_devinfo(&devinfo,
  2115. targ->channel == 0 ? ahc->our_id : ahc->our_id_b,
  2116. targ->target, /*lun*/0, targ->channel + 'A',
  2117. ROLE_INITIATOR);
  2118. #ifdef AHC_DEBUG
  2119. if (ahc_debug & AHC_SHOW_DV) {
  2120. ahc_print_devinfo(ahc, &devinfo);
  2121. printf("Performing DV\n");
  2122. }
  2123. #endif
  2124. ahc_unlock(ahc, &s);
  2125. cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
  2126. scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
  2127. scsi_dev->host = ahc->platform_data->host;
  2128. scsi_dev->id = devinfo.target;
  2129. scsi_dev->lun = devinfo.lun;
  2130. scsi_dev->channel = devinfo.channel - 'A';
  2131. ahc->platform_data->dv_scsi_dev = scsi_dev;
  2132. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_SHORT_ASYNC);
  2133. while (targ->dv_state != AHC_DV_STATE_EXIT) {
  2134. timeout = AHC_LINUX_DV_TIMEOUT;
  2135. switch (targ->dv_state) {
  2136. case AHC_DV_STATE_INQ_SHORT_ASYNC:
  2137. case AHC_DV_STATE_INQ_ASYNC:
  2138. case AHC_DV_STATE_INQ_ASYNC_VERIFY:
  2139. /*
  2140. * Set things to async narrow to reduce the
  2141. * chance that the INQ will fail.
  2142. */
  2143. ahc_lock(ahc, &s);
  2144. ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
  2145. AHC_TRANS_GOAL, /*paused*/FALSE);
  2146. ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  2147. AHC_TRANS_GOAL, /*paused*/FALSE);
  2148. ahc_unlock(ahc, &s);
  2149. timeout = 10 * HZ;
  2150. targ->flags &= ~AHC_INQ_VALID;
  2151. /* FALLTHROUGH */
  2152. case AHC_DV_STATE_INQ_VERIFY:
  2153. {
  2154. u_int inq_len;
  2155. if (targ->dv_state == AHC_DV_STATE_INQ_SHORT_ASYNC)
  2156. inq_len = AHC_LINUX_DV_INQ_SHORT_LEN;
  2157. else
  2158. inq_len = targ->inq_data->additional_length + 5;
  2159. ahc_linux_dv_inq(ahc, cmd, &devinfo, targ, inq_len);
  2160. break;
  2161. }
  2162. case AHC_DV_STATE_TUR:
  2163. case AHC_DV_STATE_BUSY:
  2164. timeout = 5 * HZ;
  2165. ahc_linux_dv_tur(ahc, cmd, &devinfo);
  2166. break;
  2167. case AHC_DV_STATE_REBD:
  2168. ahc_linux_dv_rebd(ahc, cmd, &devinfo, targ);
  2169. break;
  2170. case AHC_DV_STATE_WEB:
  2171. ahc_linux_dv_web(ahc, cmd, &devinfo, targ);
  2172. break;
  2173. case AHC_DV_STATE_REB:
  2174. ahc_linux_dv_reb(ahc, cmd, &devinfo, targ);
  2175. break;
  2176. case AHC_DV_STATE_SU:
  2177. ahc_linux_dv_su(ahc, cmd, &devinfo, targ);
  2178. timeout = 50 * HZ;
  2179. break;
  2180. default:
  2181. ahc_print_devinfo(ahc, &devinfo);
  2182. printf("Unknown DV state %d\n", targ->dv_state);
  2183. goto out;
  2184. }
  2185. /* Queue the command and wait for it to complete */
  2186. /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
  2187. init_timer(&cmd->eh_timeout);
  2188. #ifdef AHC_DEBUG
  2189. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
  2190. /*
  2191. * All of the printfs during negotiation
  2192. * really slow down the negotiation.
  2193. * Add a bit of time just to be safe.
  2194. */
  2195. timeout += HZ;
  2196. #endif
  2197. scsi_add_timer(cmd, timeout, ahc_linux_dv_timeout);
  2198. /*
  2199. * In 2.5.X, it is assumed that all calls from the
  2200. * "midlayer" (which we are emulating) will have the
  2201. * ahc host lock held. For other kernels, the
  2202. * io_request_lock must be held.
  2203. */
  2204. #if AHC_SCSI_HAS_HOST_LOCK != 0
  2205. ahc_lock(ahc, &s);
  2206. #else
  2207. spin_lock_irqsave(&io_request_lock, s);
  2208. #endif
  2209. ahc_linux_queue(cmd, ahc_linux_dv_complete);
  2210. #if AHC_SCSI_HAS_HOST_LOCK != 0
  2211. ahc_unlock(ahc, &s);
  2212. #else
  2213. spin_unlock_irqrestore(&io_request_lock, s);
  2214. #endif
  2215. down_interruptible(&ahc->platform_data->dv_cmd_sem);
  2216. /*
  2217. * Wait for the SIMQ to be released so that DV is the
  2218. * only reason the queue is frozen.
  2219. */
  2220. ahc_lock(ahc, &s);
  2221. while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
  2222. ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
  2223. ahc_unlock(ahc, &s);
  2224. down_interruptible(&ahc->platform_data->dv_sem);
  2225. ahc_lock(ahc, &s);
  2226. }
  2227. ahc_unlock(ahc, &s);
  2228. ahc_linux_dv_transition(ahc, cmd, &devinfo, targ);
  2229. }
  2230. out:
  2231. if ((targ->flags & AHC_INQ_VALID) != 0
  2232. && ahc_linux_get_device(ahc, devinfo.channel - 'A',
  2233. devinfo.target, devinfo.lun,
  2234. /*alloc*/FALSE) == NULL) {
  2235. /*
  2236. * The DV state machine failed to configure this device.
  2237. * This is normal if DV is disabled. Since we have inquiry
  2238. * data, filter it and use the "optimistic" negotiation
  2239. * parameters found in the inquiry string.
  2240. */
  2241. ahc_linux_filter_inquiry(ahc, &devinfo);
  2242. if ((targ->flags & (AHC_BASIC_DV|AHC_ENHANCED_DV)) != 0) {
  2243. ahc_print_devinfo(ahc, &devinfo);
  2244. printf("DV failed to configure device. "
  2245. "Please file a bug report against "
  2246. "this driver.\n");
  2247. }
  2248. }
  2249. if (cmd != NULL)
  2250. free(cmd, M_DEVBUF);
  2251. if (ahc->platform_data->dv_scsi_dev != NULL) {
  2252. free(ahc->platform_data->dv_scsi_dev, M_DEVBUF);
  2253. ahc->platform_data->dv_scsi_dev = NULL;
  2254. }
  2255. ahc_lock(ahc, &s);
  2256. if (targ->dv_buffer != NULL) {
  2257. free(targ->dv_buffer, M_DEVBUF);
  2258. targ->dv_buffer = NULL;
  2259. }
  2260. if (targ->dv_buffer1 != NULL) {
  2261. free(targ->dv_buffer1, M_DEVBUF);
  2262. targ->dv_buffer1 = NULL;
  2263. }
  2264. targ->flags &= ~AHC_DV_REQUIRED;
  2265. if (targ->refcount == 0)
  2266. ahc_linux_free_target(ahc, targ);
  2267. ahc_unlock(ahc, &s);
  2268. }
  2269. static void
  2270. ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2271. struct ahc_devinfo *devinfo,
  2272. struct ahc_linux_target *targ)
  2273. {
  2274. u_int32_t status;
  2275. status = aic_error_action(cmd, targ->inq_data,
  2276. ahc_cmd_get_transaction_status(cmd),
  2277. ahc_cmd_get_scsi_status(cmd));
  2278. #ifdef AHC_DEBUG
  2279. if (ahc_debug & AHC_SHOW_DV) {
  2280. ahc_print_devinfo(ahc, devinfo);
  2281. printf("Entering ahc_linux_dv_transition, state= %d, "
  2282. "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
  2283. status, cmd->result);
  2284. }
  2285. #endif
  2286. switch (targ->dv_state) {
  2287. case AHC_DV_STATE_INQ_SHORT_ASYNC:
  2288. case AHC_DV_STATE_INQ_ASYNC:
  2289. switch (status & SS_MASK) {
  2290. case SS_NOP:
  2291. {
  2292. AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
  2293. break;
  2294. }
  2295. case SS_INQ_REFRESH:
  2296. AHC_SET_DV_STATE(ahc, targ,
  2297. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2298. break;
  2299. case SS_TUR:
  2300. case SS_RETRY:
  2301. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2302. if (ahc_cmd_get_transaction_status(cmd)
  2303. == CAM_REQUEUE_REQ)
  2304. targ->dv_state_retry--;
  2305. if ((status & SS_ERRMASK) == EBUSY)
  2306. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
  2307. if (targ->dv_state_retry < 10)
  2308. break;
  2309. /* FALLTHROUGH */
  2310. default:
  2311. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2312. #ifdef AHC_DEBUG
  2313. if (ahc_debug & AHC_SHOW_DV) {
  2314. ahc_print_devinfo(ahc, devinfo);
  2315. printf("Failed DV inquiry, skipping\n");
  2316. }
  2317. #endif
  2318. break;
  2319. }
  2320. break;
  2321. case AHC_DV_STATE_INQ_ASYNC_VERIFY:
  2322. switch (status & SS_MASK) {
  2323. case SS_NOP:
  2324. {
  2325. u_int xportflags;
  2326. u_int spi3data;
  2327. if (memcmp(targ->inq_data, targ->dv_buffer,
  2328. AHC_LINUX_DV_INQ_LEN) != 0) {
  2329. /*
  2330. * Inquiry data must have changed.
  2331. * Try from the top again.
  2332. */
  2333. AHC_SET_DV_STATE(ahc, targ,
  2334. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2335. break;
  2336. }
  2337. AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
  2338. targ->flags |= AHC_INQ_VALID;
  2339. if (ahc_linux_user_dv_setting(ahc) == 0)
  2340. break;
  2341. xportflags = targ->inq_data->flags;
  2342. if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
  2343. break;
  2344. spi3data = targ->inq_data->spi3data;
  2345. switch (spi3data & SID_SPI_CLOCK_DT_ST) {
  2346. default:
  2347. case SID_SPI_CLOCK_ST:
  2348. /* Assume only basic DV is supported. */
  2349. targ->flags |= AHC_BASIC_DV;
  2350. break;
  2351. case SID_SPI_CLOCK_DT:
  2352. case SID_SPI_CLOCK_DT_ST:
  2353. targ->flags |= AHC_ENHANCED_DV;
  2354. break;
  2355. }
  2356. break;
  2357. }
  2358. case SS_INQ_REFRESH:
  2359. AHC_SET_DV_STATE(ahc, targ,
  2360. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2361. break;
  2362. case SS_TUR:
  2363. case SS_RETRY:
  2364. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2365. if (ahc_cmd_get_transaction_status(cmd)
  2366. == CAM_REQUEUE_REQ)
  2367. targ->dv_state_retry--;
  2368. if ((status & SS_ERRMASK) == EBUSY)
  2369. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
  2370. if (targ->dv_state_retry < 10)
  2371. break;
  2372. /* FALLTHROUGH */
  2373. default:
  2374. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2375. #ifdef AHC_DEBUG
  2376. if (ahc_debug & AHC_SHOW_DV) {
  2377. ahc_print_devinfo(ahc, devinfo);
  2378. printf("Failed DV inquiry, skipping\n");
  2379. }
  2380. #endif
  2381. break;
  2382. }
  2383. break;
  2384. case AHC_DV_STATE_INQ_VERIFY:
  2385. switch (status & SS_MASK) {
  2386. case SS_NOP:
  2387. {
  2388. if (memcmp(targ->inq_data, targ->dv_buffer,
  2389. AHC_LINUX_DV_INQ_LEN) == 0) {
  2390. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2391. break;
  2392. }
  2393. #ifdef AHC_DEBUG
  2394. if (ahc_debug & AHC_SHOW_DV) {
  2395. int i;
  2396. ahc_print_devinfo(ahc, devinfo);
  2397. printf("Inquiry buffer mismatch:");
  2398. for (i = 0; i < AHC_LINUX_DV_INQ_LEN; i++) {
  2399. if ((i & 0xF) == 0)
  2400. printf("\n ");
  2401. printf("0x%x:0x0%x ",
  2402. ((uint8_t *)targ->inq_data)[i],
  2403. targ->dv_buffer[i]);
  2404. }
  2405. printf("\n");
  2406. }
  2407. #endif
  2408. if (ahc_linux_fallback(ahc, devinfo) != 0) {
  2409. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2410. break;
  2411. }
  2412. /*
  2413. * Do not count "falling back"
  2414. * against our retries.
  2415. */
  2416. targ->dv_state_retry = 0;
  2417. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2418. break;
  2419. }
  2420. case SS_INQ_REFRESH:
  2421. AHC_SET_DV_STATE(ahc, targ,
  2422. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2423. break;
  2424. case SS_TUR:
  2425. case SS_RETRY:
  2426. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2427. if (ahc_cmd_get_transaction_status(cmd)
  2428. == CAM_REQUEUE_REQ) {
  2429. targ->dv_state_retry--;
  2430. } else if ((status & SSQ_FALLBACK) != 0) {
  2431. if (ahc_linux_fallback(ahc, devinfo) != 0) {
  2432. AHC_SET_DV_STATE(ahc, targ,
  2433. AHC_DV_STATE_EXIT);
  2434. break;
  2435. }
  2436. /*
  2437. * Do not count "falling back"
  2438. * against our retries.
  2439. */
  2440. targ->dv_state_retry = 0;
  2441. } else if ((status & SS_ERRMASK) == EBUSY)
  2442. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
  2443. if (targ->dv_state_retry < 10)
  2444. break;
  2445. /* FALLTHROUGH */
  2446. default:
  2447. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2448. #ifdef AHC_DEBUG
  2449. if (ahc_debug & AHC_SHOW_DV) {
  2450. ahc_print_devinfo(ahc, devinfo);
  2451. printf("Failed DV inquiry, skipping\n");
  2452. }
  2453. #endif
  2454. break;
  2455. }
  2456. break;
  2457. case AHC_DV_STATE_TUR:
  2458. switch (status & SS_MASK) {
  2459. case SS_NOP:
  2460. if ((targ->flags & AHC_BASIC_DV) != 0) {
  2461. ahc_linux_filter_inquiry(ahc, devinfo);
  2462. AHC_SET_DV_STATE(ahc, targ,
  2463. AHC_DV_STATE_INQ_VERIFY);
  2464. } else if ((targ->flags & AHC_ENHANCED_DV) != 0) {
  2465. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REBD);
  2466. } else {
  2467. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2468. }
  2469. break;
  2470. case SS_RETRY:
  2471. case SS_TUR:
  2472. if ((status & SS_ERRMASK) == EBUSY) {
  2473. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
  2474. break;
  2475. }
  2476. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2477. if (ahc_cmd_get_transaction_status(cmd)
  2478. == CAM_REQUEUE_REQ) {
  2479. targ->dv_state_retry--;
  2480. } else if ((status & SSQ_FALLBACK) != 0) {
  2481. if (ahc_linux_fallback(ahc, devinfo) != 0) {
  2482. AHC_SET_DV_STATE(ahc, targ,
  2483. AHC_DV_STATE_EXIT);
  2484. break;
  2485. }
  2486. /*
  2487. * Do not count "falling back"
  2488. * against our retries.
  2489. */
  2490. targ->dv_state_retry = 0;
  2491. }
  2492. if (targ->dv_state_retry >= 10) {
  2493. #ifdef AHC_DEBUG
  2494. if (ahc_debug & AHC_SHOW_DV) {
  2495. ahc_print_devinfo(ahc, devinfo);
  2496. printf("DV TUR reties exhausted\n");
  2497. }
  2498. #endif
  2499. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2500. break;
  2501. }
  2502. if (status & SSQ_DELAY)
  2503. ssleep(1);
  2504. break;
  2505. case SS_START:
  2506. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_SU);
  2507. break;
  2508. case SS_INQ_REFRESH:
  2509. AHC_SET_DV_STATE(ahc, targ,
  2510. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2511. break;
  2512. default:
  2513. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2514. break;
  2515. }
  2516. break;
  2517. case AHC_DV_STATE_REBD:
  2518. switch (status & SS_MASK) {
  2519. case SS_NOP:
  2520. {
  2521. uint32_t echo_size;
  2522. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
  2523. echo_size = scsi_3btoul(&targ->dv_buffer[1]);
  2524. echo_size &= 0x1FFF;
  2525. #ifdef AHC_DEBUG
  2526. if (ahc_debug & AHC_SHOW_DV) {
  2527. ahc_print_devinfo(ahc, devinfo);
  2528. printf("Echo buffer size= %d\n", echo_size);
  2529. }
  2530. #endif
  2531. if (echo_size == 0) {
  2532. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2533. break;
  2534. }
  2535. /* Generate the buffer pattern */
  2536. targ->dv_echo_size = echo_size;
  2537. ahc_linux_generate_dv_pattern(targ);
  2538. /*
  2539. * Setup initial negotiation values.
  2540. */
  2541. ahc_linux_filter_inquiry(ahc, devinfo);
  2542. break;
  2543. }
  2544. case SS_INQ_REFRESH:
  2545. AHC_SET_DV_STATE(ahc, targ,
  2546. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2547. break;
  2548. case SS_RETRY:
  2549. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2550. if (ahc_cmd_get_transaction_status(cmd)
  2551. == CAM_REQUEUE_REQ)
  2552. targ->dv_state_retry--;
  2553. if (targ->dv_state_retry <= 10)
  2554. break;
  2555. #ifdef AHC_DEBUG
  2556. if (ahc_debug & AHC_SHOW_DV) {
  2557. ahc_print_devinfo(ahc, devinfo);
  2558. printf("DV REBD reties exhausted\n");
  2559. }
  2560. #endif
  2561. /* FALLTHROUGH */
  2562. case SS_FATAL:
  2563. default:
  2564. /*
  2565. * Setup initial negotiation values
  2566. * and try level 1 DV.
  2567. */
  2568. ahc_linux_filter_inquiry(ahc, devinfo);
  2569. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_VERIFY);
  2570. targ->dv_echo_size = 0;
  2571. break;
  2572. }
  2573. break;
  2574. case AHC_DV_STATE_WEB:
  2575. switch (status & SS_MASK) {
  2576. case SS_NOP:
  2577. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REB);
  2578. break;
  2579. case SS_INQ_REFRESH:
  2580. AHC_SET_DV_STATE(ahc, targ,
  2581. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2582. break;
  2583. case SS_RETRY:
  2584. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2585. if (ahc_cmd_get_transaction_status(cmd)
  2586. == CAM_REQUEUE_REQ) {
  2587. targ->dv_state_retry--;
  2588. } else if ((status & SSQ_FALLBACK) != 0) {
  2589. if (ahc_linux_fallback(ahc, devinfo) != 0) {
  2590. AHC_SET_DV_STATE(ahc, targ,
  2591. AHC_DV_STATE_EXIT);
  2592. break;
  2593. }
  2594. /*
  2595. * Do not count "falling back"
  2596. * against our retries.
  2597. */
  2598. targ->dv_state_retry = 0;
  2599. }
  2600. if (targ->dv_state_retry <= 10)
  2601. break;
  2602. /* FALLTHROUGH */
  2603. #ifdef AHC_DEBUG
  2604. if (ahc_debug & AHC_SHOW_DV) {
  2605. ahc_print_devinfo(ahc, devinfo);
  2606. printf("DV WEB reties exhausted\n");
  2607. }
  2608. #endif
  2609. default:
  2610. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2611. break;
  2612. }
  2613. break;
  2614. case AHC_DV_STATE_REB:
  2615. switch (status & SS_MASK) {
  2616. case SS_NOP:
  2617. if (memcmp(targ->dv_buffer, targ->dv_buffer1,
  2618. targ->dv_echo_size) != 0) {
  2619. if (ahc_linux_fallback(ahc, devinfo) != 0)
  2620. AHC_SET_DV_STATE(ahc, targ,
  2621. AHC_DV_STATE_EXIT);
  2622. else
  2623. AHC_SET_DV_STATE(ahc, targ,
  2624. AHC_DV_STATE_WEB);
  2625. break;
  2626. }
  2627. if (targ->dv_buffer != NULL) {
  2628. free(targ->dv_buffer, M_DEVBUF);
  2629. targ->dv_buffer = NULL;
  2630. }
  2631. if (targ->dv_buffer1 != NULL) {
  2632. free(targ->dv_buffer1, M_DEVBUF);
  2633. targ->dv_buffer1 = NULL;
  2634. }
  2635. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2636. break;
  2637. case SS_INQ_REFRESH:
  2638. AHC_SET_DV_STATE(ahc, targ,
  2639. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2640. break;
  2641. case SS_RETRY:
  2642. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2643. if (ahc_cmd_get_transaction_status(cmd)
  2644. == CAM_REQUEUE_REQ) {
  2645. targ->dv_state_retry--;
  2646. } else if ((status & SSQ_FALLBACK) != 0) {
  2647. if (ahc_linux_fallback(ahc, devinfo) != 0) {
  2648. AHC_SET_DV_STATE(ahc, targ,
  2649. AHC_DV_STATE_EXIT);
  2650. break;
  2651. }
  2652. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
  2653. }
  2654. if (targ->dv_state_retry <= 10) {
  2655. if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
  2656. msleep(ahc->our_id*1000/10);
  2657. break;
  2658. }
  2659. #ifdef AHC_DEBUG
  2660. if (ahc_debug & AHC_SHOW_DV) {
  2661. ahc_print_devinfo(ahc, devinfo);
  2662. printf("DV REB reties exhausted\n");
  2663. }
  2664. #endif
  2665. /* FALLTHROUGH */
  2666. default:
  2667. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2668. break;
  2669. }
  2670. break;
  2671. case AHC_DV_STATE_SU:
  2672. switch (status & SS_MASK) {
  2673. case SS_NOP:
  2674. case SS_INQ_REFRESH:
  2675. AHC_SET_DV_STATE(ahc, targ,
  2676. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2677. break;
  2678. default:
  2679. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2680. break;
  2681. }
  2682. break;
  2683. case AHC_DV_STATE_BUSY:
  2684. switch (status & SS_MASK) {
  2685. case SS_NOP:
  2686. case SS_INQ_REFRESH:
  2687. AHC_SET_DV_STATE(ahc, targ,
  2688. AHC_DV_STATE_INQ_SHORT_ASYNC);
  2689. break;
  2690. case SS_TUR:
  2691. case SS_RETRY:
  2692. AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
  2693. if (ahc_cmd_get_transaction_status(cmd)
  2694. == CAM_REQUEUE_REQ) {
  2695. targ->dv_state_retry--;
  2696. } else if (targ->dv_state_retry < 60) {
  2697. if ((status & SSQ_DELAY) != 0)
  2698. ssleep(1);
  2699. } else {
  2700. #ifdef AHC_DEBUG
  2701. if (ahc_debug & AHC_SHOW_DV) {
  2702. ahc_print_devinfo(ahc, devinfo);
  2703. printf("DV BUSY reties exhausted\n");
  2704. }
  2705. #endif
  2706. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2707. }
  2708. break;
  2709. default:
  2710. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2711. break;
  2712. }
  2713. break;
  2714. default:
  2715. printf("%s: Invalid DV completion state %d\n", ahc_name(ahc),
  2716. targ->dv_state);
  2717. AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
  2718. break;
  2719. }
  2720. }
  2721. static void
  2722. ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2723. struct ahc_devinfo *devinfo)
  2724. {
  2725. memset(cmd, 0, sizeof(struct scsi_cmnd));
  2726. cmd->device = ahc->platform_data->dv_scsi_dev;
  2727. cmd->scsi_done = ahc_linux_dv_complete;
  2728. }
  2729. /*
  2730. * Synthesize an inquiry command. On the return trip, it'll be
  2731. * sniffed and the device transfer settings set for us.
  2732. */
  2733. static void
  2734. ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2735. struct ahc_devinfo *devinfo, struct ahc_linux_target *targ,
  2736. u_int request_length)
  2737. {
  2738. #ifdef AHC_DEBUG
  2739. if (ahc_debug & AHC_SHOW_DV) {
  2740. ahc_print_devinfo(ahc, devinfo);
  2741. printf("Sending INQ\n");
  2742. }
  2743. #endif
  2744. if (targ->inq_data == NULL)
  2745. targ->inq_data = malloc(AHC_LINUX_DV_INQ_LEN,
  2746. M_DEVBUF, M_WAITOK);
  2747. if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC) {
  2748. if (targ->dv_buffer != NULL)
  2749. free(targ->dv_buffer, M_DEVBUF);
  2750. targ->dv_buffer = malloc(AHC_LINUX_DV_INQ_LEN,
  2751. M_DEVBUF, M_WAITOK);
  2752. }
  2753. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2754. cmd->sc_data_direction = DMA_FROM_DEVICE;
  2755. cmd->cmd_len = 6;
  2756. cmd->cmnd[0] = INQUIRY;
  2757. cmd->cmnd[4] = request_length;
  2758. cmd->request_bufflen = request_length;
  2759. if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC)
  2760. cmd->request_buffer = targ->dv_buffer;
  2761. else
  2762. cmd->request_buffer = targ->inq_data;
  2763. memset(cmd->request_buffer, 0, AHC_LINUX_DV_INQ_LEN);
  2764. }
  2765. static void
  2766. ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2767. struct ahc_devinfo *devinfo)
  2768. {
  2769. #ifdef AHC_DEBUG
  2770. if (ahc_debug & AHC_SHOW_DV) {
  2771. ahc_print_devinfo(ahc, devinfo);
  2772. printf("Sending TUR\n");
  2773. }
  2774. #endif
  2775. /* Do a TUR to clear out any non-fatal transitional state */
  2776. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2777. cmd->sc_data_direction = DMA_NONE;
  2778. cmd->cmd_len = 6;
  2779. cmd->cmnd[0] = TEST_UNIT_READY;
  2780. }
  2781. #define AHC_REBD_LEN 4
  2782. static void
  2783. ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2784. struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
  2785. {
  2786. #ifdef AHC_DEBUG
  2787. if (ahc_debug & AHC_SHOW_DV) {
  2788. ahc_print_devinfo(ahc, devinfo);
  2789. printf("Sending REBD\n");
  2790. }
  2791. #endif
  2792. if (targ->dv_buffer != NULL)
  2793. free(targ->dv_buffer, M_DEVBUF);
  2794. targ->dv_buffer = malloc(AHC_REBD_LEN, M_DEVBUF, M_WAITOK);
  2795. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2796. cmd->sc_data_direction = DMA_FROM_DEVICE;
  2797. cmd->cmd_len = 10;
  2798. cmd->cmnd[0] = READ_BUFFER;
  2799. cmd->cmnd[1] = 0x0b;
  2800. scsi_ulto3b(AHC_REBD_LEN, &cmd->cmnd[6]);
  2801. cmd->request_bufflen = AHC_REBD_LEN;
  2802. cmd->underflow = cmd->request_bufflen;
  2803. cmd->request_buffer = targ->dv_buffer;
  2804. }
  2805. static void
  2806. ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2807. struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
  2808. {
  2809. #ifdef AHC_DEBUG
  2810. if (ahc_debug & AHC_SHOW_DV) {
  2811. ahc_print_devinfo(ahc, devinfo);
  2812. printf("Sending WEB\n");
  2813. }
  2814. #endif
  2815. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2816. cmd->sc_data_direction = DMA_TO_DEVICE;
  2817. cmd->cmd_len = 10;
  2818. cmd->cmnd[0] = WRITE_BUFFER;
  2819. cmd->cmnd[1] = 0x0a;
  2820. scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
  2821. cmd->request_bufflen = targ->dv_echo_size;
  2822. cmd->underflow = cmd->request_bufflen;
  2823. cmd->request_buffer = targ->dv_buffer;
  2824. }
  2825. static void
  2826. ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2827. struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
  2828. {
  2829. #ifdef AHC_DEBUG
  2830. if (ahc_debug & AHC_SHOW_DV) {
  2831. ahc_print_devinfo(ahc, devinfo);
  2832. printf("Sending REB\n");
  2833. }
  2834. #endif
  2835. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2836. cmd->sc_data_direction = DMA_FROM_DEVICE;
  2837. cmd->cmd_len = 10;
  2838. cmd->cmnd[0] = READ_BUFFER;
  2839. cmd->cmnd[1] = 0x0a;
  2840. scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
  2841. cmd->request_bufflen = targ->dv_echo_size;
  2842. cmd->underflow = cmd->request_bufflen;
  2843. cmd->request_buffer = targ->dv_buffer1;
  2844. }
  2845. static void
  2846. ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2847. struct ahc_devinfo *devinfo,
  2848. struct ahc_linux_target *targ)
  2849. {
  2850. u_int le;
  2851. le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
  2852. #ifdef AHC_DEBUG
  2853. if (ahc_debug & AHC_SHOW_DV) {
  2854. ahc_print_devinfo(ahc, devinfo);
  2855. printf("Sending SU\n");
  2856. }
  2857. #endif
  2858. ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
  2859. cmd->sc_data_direction = DMA_NONE;
  2860. cmd->cmd_len = 6;
  2861. cmd->cmnd[0] = START_STOP_UNIT;
  2862. cmd->cmnd[4] = le | SSS_START;
  2863. }
  2864. static int
  2865. ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  2866. {
  2867. struct ahc_linux_target *targ;
  2868. struct ahc_initiator_tinfo *tinfo;
  2869. struct ahc_transinfo *goal;
  2870. struct ahc_tmode_tstate *tstate;
  2871. struct ahc_syncrate *syncrate;
  2872. u_long s;
  2873. u_int width;
  2874. u_int period;
  2875. u_int offset;
  2876. u_int ppr_options;
  2877. u_int cur_speed;
  2878. u_int wide_speed;
  2879. u_int narrow_speed;
  2880. u_int fallback_speed;
  2881. #ifdef AHC_DEBUG
  2882. if (ahc_debug & AHC_SHOW_DV) {
  2883. ahc_print_devinfo(ahc, devinfo);
  2884. printf("Trying to fallback\n");
  2885. }
  2886. #endif
  2887. ahc_lock(ahc, &s);
  2888. targ = ahc->platform_data->targets[devinfo->target_offset];
  2889. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
  2890. devinfo->our_scsiid,
  2891. devinfo->target, &tstate);
  2892. goal = &tinfo->goal;
  2893. width = goal->width;
  2894. period = goal->period;
  2895. offset = goal->offset;
  2896. ppr_options = goal->ppr_options;
  2897. if (offset == 0)
  2898. period = AHC_ASYNC_XFER_PERIOD;
  2899. if (targ->dv_next_narrow_period == 0)
  2900. targ->dv_next_narrow_period = MAX(period, AHC_SYNCRATE_ULTRA2);
  2901. if (targ->dv_next_wide_period == 0)
  2902. targ->dv_next_wide_period = period;
  2903. if (targ->dv_max_width == 0)
  2904. targ->dv_max_width = width;
  2905. if (targ->dv_max_ppr_options == 0)
  2906. targ->dv_max_ppr_options = ppr_options;
  2907. if (targ->dv_last_ppr_options == 0)
  2908. targ->dv_last_ppr_options = ppr_options;
  2909. cur_speed = aic_calc_speed(width, period, offset, AHC_SYNCRATE_MIN);
  2910. wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
  2911. targ->dv_next_wide_period,
  2912. MAX_OFFSET,
  2913. AHC_SYNCRATE_MIN);
  2914. narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
  2915. targ->dv_next_narrow_period,
  2916. MAX_OFFSET,
  2917. AHC_SYNCRATE_MIN);
  2918. fallback_speed = aic_calc_speed(width, period+1, offset,
  2919. AHC_SYNCRATE_MIN);
  2920. #ifdef AHC_DEBUG
  2921. if (ahc_debug & AHC_SHOW_DV) {
  2922. printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
  2923. "fallback_speed= %d\n", cur_speed, wide_speed,
  2924. narrow_speed, fallback_speed);
  2925. }
  2926. #endif
  2927. if (cur_speed > 160000) {
  2928. /*
  2929. * Paced/DT/IU_REQ only transfer speeds. All we
  2930. * can do is fallback in terms of syncrate.
  2931. */
  2932. period++;
  2933. } else if (cur_speed > 80000) {
  2934. if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  2935. /*
  2936. * Try without IU_REQ as it may be confusing
  2937. * an expander.
  2938. */
  2939. ppr_options &= ~MSG_EXT_PPR_IU_REQ;
  2940. } else {
  2941. /*
  2942. * Paced/DT only transfer speeds. All we
  2943. * can do is fallback in terms of syncrate.
  2944. */
  2945. period++;
  2946. ppr_options = targ->dv_max_ppr_options;
  2947. }
  2948. } else if (cur_speed > 3300) {
  2949. /*
  2950. * In this range we the following
  2951. * options ordered from highest to
  2952. * lowest desireability:
  2953. *
  2954. * o Wide/DT
  2955. * o Wide/non-DT
  2956. * o Narrow at a potentally higher sync rate.
  2957. *
  2958. * All modes are tested with and without IU_REQ
  2959. * set since using IUs may confuse an expander.
  2960. */
  2961. if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  2962. ppr_options &= ~MSG_EXT_PPR_IU_REQ;
  2963. } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
  2964. /*
  2965. * Try going non-DT.
  2966. */
  2967. ppr_options = targ->dv_max_ppr_options;
  2968. ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2969. } else if (targ->dv_last_ppr_options != 0) {
  2970. /*
  2971. * Try without QAS or any other PPR options.
  2972. * We may need a non-PPR message to work with
  2973. * an expander. We look at the "last PPR options"
  2974. * so we will perform this fallback even if the
  2975. * target responded to our PPR negotiation with
  2976. * no option bits set.
  2977. */
  2978. ppr_options = 0;
  2979. } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
  2980. /*
  2981. * If the next narrow speed is greater than
  2982. * the next wide speed, fallback to narrow.
  2983. * Otherwise fallback to the next DT/Wide setting.
  2984. * The narrow async speed will always be smaller
  2985. * than the wide async speed, so handle this case
  2986. * specifically.
  2987. */
  2988. ppr_options = targ->dv_max_ppr_options;
  2989. if (narrow_speed > fallback_speed
  2990. || period >= AHC_ASYNC_XFER_PERIOD) {
  2991. targ->dv_next_wide_period = period+1;
  2992. width = MSG_EXT_WDTR_BUS_8_BIT;
  2993. period = targ->dv_next_narrow_period;
  2994. } else {
  2995. period++;
  2996. }
  2997. } else if ((ahc->features & AHC_WIDE) != 0
  2998. && targ->dv_max_width != 0
  2999. && wide_speed >= fallback_speed
  3000. && (targ->dv_next_wide_period <= AHC_ASYNC_XFER_PERIOD
  3001. || period >= AHC_ASYNC_XFER_PERIOD)) {
  3002. /*
  3003. * We are narrow. Try falling back
  3004. * to the next wide speed with
  3005. * all supported ppr options set.
  3006. */
  3007. targ->dv_next_narrow_period = period+1;
  3008. width = MSG_EXT_WDTR_BUS_16_BIT;
  3009. period = targ->dv_next_wide_period;
  3010. ppr_options = targ->dv_max_ppr_options;
  3011. } else {
  3012. /* Only narrow fallback is allowed. */
  3013. period++;
  3014. ppr_options = targ->dv_max_ppr_options;
  3015. }
  3016. } else {
  3017. ahc_unlock(ahc, &s);
  3018. return (-1);
  3019. }
  3020. offset = MAX_OFFSET;
  3021. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  3022. AHC_SYNCRATE_DT);
  3023. ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, FALSE);
  3024. if (period == 0) {
  3025. period = 0;
  3026. offset = 0;
  3027. ppr_options = 0;
  3028. if (width == MSG_EXT_WDTR_BUS_8_BIT)
  3029. targ->dv_next_narrow_period = AHC_ASYNC_XFER_PERIOD;
  3030. else
  3031. targ->dv_next_wide_period = AHC_ASYNC_XFER_PERIOD;
  3032. }
  3033. ahc_set_syncrate(ahc, devinfo, syncrate, period, offset,
  3034. ppr_options, AHC_TRANS_GOAL, FALSE);
  3035. targ->dv_last_ppr_options = ppr_options;
  3036. ahc_unlock(ahc, &s);
  3037. return (0);
  3038. }
  3039. static void
  3040. ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
  3041. {
  3042. struct ahc_softc *ahc;
  3043. struct scb *scb;
  3044. u_long flags;
  3045. ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
  3046. ahc_lock(ahc, &flags);
  3047. #ifdef AHC_DEBUG
  3048. if (ahc_debug & AHC_SHOW_DV) {
  3049. printf("%s: Timeout while doing DV command %x.\n",
  3050. ahc_name(ahc), cmd->cmnd[0]);
  3051. ahc_dump_card_state(ahc);
  3052. }
  3053. #endif
  3054. /*
  3055. * Guard against "done race". No action is
  3056. * required if we just completed.
  3057. */
  3058. if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
  3059. ahc_unlock(ahc, &flags);
  3060. return;
  3061. }
  3062. /*
  3063. * Command has not completed. Mark this
  3064. * SCB as having failing status prior to
  3065. * resetting the bus, so we get the correct
  3066. * error code.
  3067. */
  3068. if ((scb->flags & SCB_SENSE) != 0)
  3069. ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
  3070. else
  3071. ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
  3072. ahc_reset_channel(ahc, cmd->device->channel + 'A', /*initiate*/TRUE);
  3073. /*
  3074. * Add a minimal bus settle delay for devices that are slow to
  3075. * respond after bus resets.
  3076. */
  3077. ahc_linux_freeze_simq(ahc);
  3078. init_timer(&ahc->platform_data->reset_timer);
  3079. ahc->platform_data->reset_timer.data = (u_long)ahc;
  3080. ahc->platform_data->reset_timer.expires = jiffies + HZ / 2;
  3081. ahc->platform_data->reset_timer.function =
  3082. (ahc_linux_callback_t *)ahc_linux_release_simq;
  3083. add_timer(&ahc->platform_data->reset_timer);
  3084. if (ahc_linux_next_device_to_run(ahc) != NULL)
  3085. ahc_schedule_runq(ahc);
  3086. ahc_linux_run_complete_queue(ahc);
  3087. ahc_unlock(ahc, &flags);
  3088. }
  3089. static void
  3090. ahc_linux_dv_complete(struct scsi_cmnd *cmd)
  3091. {
  3092. struct ahc_softc *ahc;
  3093. ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
  3094. /* Delete the DV timer before it goes off! */
  3095. scsi_delete_timer(cmd);
  3096. #ifdef AHC_DEBUG
  3097. if (ahc_debug & AHC_SHOW_DV)
  3098. printf("%s:%d:%d: Command completed, status= 0x%x\n",
  3099. ahc_name(ahc), cmd->device->channel,
  3100. cmd->device->id, cmd->result);
  3101. #endif
  3102. /* Wake up the state machine */
  3103. up(&ahc->platform_data->dv_cmd_sem);
  3104. }
  3105. static void
  3106. ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ)
  3107. {
  3108. uint16_t b;
  3109. u_int i;
  3110. u_int j;
  3111. if (targ->dv_buffer != NULL)
  3112. free(targ->dv_buffer, M_DEVBUF);
  3113. targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
  3114. if (targ->dv_buffer1 != NULL)
  3115. free(targ->dv_buffer1, M_DEVBUF);
  3116. targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
  3117. i = 0;
  3118. b = 0x0001;
  3119. for (j = 0 ; i < targ->dv_echo_size; j++) {
  3120. if (j < 32) {
  3121. /*
  3122. * 32bytes of sequential numbers.
  3123. */
  3124. targ->dv_buffer[i++] = j & 0xff;
  3125. } else if (j < 48) {
  3126. /*
  3127. * 32bytes of repeating 0x0000, 0xffff.
  3128. */
  3129. targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
  3130. } else if (j < 64) {
  3131. /*
  3132. * 32bytes of repeating 0x5555, 0xaaaa.
  3133. */
  3134. targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
  3135. } else {
  3136. /*
  3137. * Remaining buffer is filled with a repeating
  3138. * patter of:
  3139. *
  3140. * 0xffff
  3141. * ~0x0001 << shifted once in each loop.
  3142. */
  3143. if (j & 0x02) {
  3144. if (j & 0x01) {
  3145. targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
  3146. b <<= 1;
  3147. if (b == 0x0000)
  3148. b = 0x0001;
  3149. } else {
  3150. targ->dv_buffer[i++] = (~b & 0xff);
  3151. }
  3152. } else {
  3153. targ->dv_buffer[i++] = 0xff;
  3154. }
  3155. }
  3156. }
  3157. }
  3158. static u_int
  3159. ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  3160. {
  3161. static int warned_user;
  3162. u_int tags;
  3163. tags = 0;
  3164. if ((ahc->user_discenable & devinfo->target_mask) != 0) {
  3165. if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
  3166. if (warned_user == 0) {
  3167. printf(KERN_WARNING
  3168. "aic7xxx: WARNING: Insufficient tag_info instances\n"
  3169. "aic7xxx: for installed controllers. Using defaults\n"
  3170. "aic7xxx: Please update the aic7xxx_tag_info array in\n"
  3171. "aic7xxx: the aic7xxx_osm..c source file.\n");
  3172. warned_user++;
  3173. }
  3174. tags = AHC_MAX_QUEUE;
  3175. } else {
  3176. adapter_tag_info_t *tag_info;
  3177. tag_info = &aic7xxx_tag_info[ahc->unit];
  3178. tags = tag_info->tag_commands[devinfo->target_offset];
  3179. if (tags > AHC_MAX_QUEUE)
  3180. tags = AHC_MAX_QUEUE;
  3181. }
  3182. }
  3183. return (tags);
  3184. }
  3185. static u_int
  3186. ahc_linux_user_dv_setting(struct ahc_softc *ahc)
  3187. {
  3188. static int warned_user;
  3189. int dv;
  3190. if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
  3191. if (warned_user == 0) {
  3192. printf(KERN_WARNING
  3193. "aic7xxx: WARNING: Insufficient dv settings instances\n"
  3194. "aic7xxx: for installed controllers. Using defaults\n"
  3195. "aic7xxx: Please update the aic7xxx_dv_settings array\n"
  3196. "aic7xxx: in the aic7xxx_osm.c source file.\n");
  3197. warned_user++;
  3198. }
  3199. dv = -1;
  3200. } else {
  3201. dv = aic7xxx_dv_settings[ahc->unit];
  3202. }
  3203. if (dv < 0) {
  3204. u_long s;
  3205. /*
  3206. * Apply the default.
  3207. */
  3208. /*
  3209. * XXX - Enable DV on non-U160 controllers once it
  3210. * has been tested there.
  3211. */
  3212. ahc_lock(ahc, &s);
  3213. dv = (ahc->features & AHC_DT);
  3214. if (ahc->seep_config != 0
  3215. && ahc->seep_config->signature >= CFSIGNATURE2)
  3216. dv = (ahc->seep_config->adapter_control & CFENABLEDV);
  3217. ahc_unlock(ahc, &s);
  3218. }
  3219. return (dv);
  3220. }
  3221. /*
  3222. * Determines the queue depth for a given device.
  3223. */
  3224. static void
  3225. ahc_linux_device_queue_depth(struct ahc_softc *ahc,
  3226. struct ahc_linux_device *dev)
  3227. {
  3228. struct ahc_devinfo devinfo;
  3229. u_int tags;
  3230. ahc_compile_devinfo(&devinfo,
  3231. dev->target->channel == 0
  3232. ? ahc->our_id : ahc->our_id_b,
  3233. dev->target->target, dev->lun,
  3234. dev->target->channel == 0 ? 'A' : 'B',
  3235. ROLE_INITIATOR);
  3236. tags = ahc_linux_user_tagdepth(ahc, &devinfo);
  3237. if (tags != 0
  3238. && dev->scsi_device != NULL
  3239. && dev->scsi_device->tagged_supported != 0) {
  3240. ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
  3241. ahc_print_devinfo(ahc, &devinfo);
  3242. printf("Tagged Queuing enabled. Depth %d\n", tags);
  3243. } else {
  3244. ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
  3245. }
  3246. }
  3247. static void
  3248. ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  3249. {
  3250. struct ahc_cmd *acmd;
  3251. struct scsi_cmnd *cmd;
  3252. struct scb *scb;
  3253. struct hardware_scb *hscb;
  3254. struct ahc_initiator_tinfo *tinfo;
  3255. struct ahc_tmode_tstate *tstate;
  3256. uint16_t mask;
  3257. if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
  3258. panic("running device on run list");
  3259. while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
  3260. && dev->openings > 0 && dev->qfrozen == 0) {
  3261. /*
  3262. * Schedule us to run later. The only reason we are not
  3263. * running is because the whole controller Q is frozen.
  3264. */
  3265. if (ahc->platform_data->qfrozen != 0
  3266. && AHC_DV_SIMQ_FROZEN(ahc) == 0) {
  3267. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
  3268. dev, links);
  3269. dev->flags |= AHC_DEV_ON_RUN_LIST;
  3270. return;
  3271. }
  3272. /*
  3273. * Get an scb to use.
  3274. */
  3275. if ((scb = ahc_get_scb(ahc)) == NULL) {
  3276. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
  3277. dev, links);
  3278. dev->flags |= AHC_DEV_ON_RUN_LIST;
  3279. ahc->flags |= AHC_RESOURCE_SHORTAGE;
  3280. return;
  3281. }
  3282. TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
  3283. cmd = &acmd_scsi_cmd(acmd);
  3284. scb->io_ctx = cmd;
  3285. scb->platform_data->dev = dev;
  3286. hscb = scb->hscb;
  3287. cmd->host_scribble = (char *)scb;
  3288. /*
  3289. * Fill out basics of the HSCB.
  3290. */
  3291. hscb->control = 0;
  3292. hscb->scsiid = BUILD_SCSIID(ahc, cmd);
  3293. hscb->lun = cmd->device->lun;
  3294. mask = SCB_GET_TARGET_MASK(ahc, scb);
  3295. tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
  3296. SCB_GET_OUR_ID(scb),
  3297. SCB_GET_TARGET(ahc, scb), &tstate);
  3298. hscb->scsirate = tinfo->scsirate;
  3299. hscb->scsioffset = tinfo->curr.offset;
  3300. if ((tstate->ultraenb & mask) != 0)
  3301. hscb->control |= ULTRAENB;
  3302. if ((ahc->user_discenable & mask) != 0)
  3303. hscb->control |= DISCENB;
  3304. if (AHC_DV_CMD(cmd) != 0)
  3305. scb->flags |= SCB_SILENT;
  3306. if ((tstate->auto_negotiate & mask) != 0) {
  3307. scb->flags |= SCB_AUTO_NEGOTIATE;
  3308. scb->hscb->control |= MK_MESSAGE;
  3309. }
  3310. if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
  3311. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  3312. int msg_bytes;
  3313. uint8_t tag_msgs[2];
  3314. msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
  3315. if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
  3316. hscb->control |= tag_msgs[0];
  3317. if (tag_msgs[0] == MSG_ORDERED_TASK)
  3318. dev->commands_since_idle_or_otag = 0;
  3319. } else
  3320. #endif
  3321. if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
  3322. && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
  3323. hscb->control |= MSG_ORDERED_TASK;
  3324. dev->commands_since_idle_or_otag = 0;
  3325. } else {
  3326. hscb->control |= MSG_SIMPLE_TASK;
  3327. }
  3328. }
  3329. hscb->cdb_len = cmd->cmd_len;
  3330. if (hscb->cdb_len <= 12) {
  3331. memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
  3332. } else {
  3333. memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
  3334. scb->flags |= SCB_CDB32_PTR;
  3335. }
  3336. scb->platform_data->xfer_len = 0;
  3337. ahc_set_residual(scb, 0);
  3338. ahc_set_sense_residual(scb, 0);
  3339. scb->sg_count = 0;
  3340. if (cmd->use_sg != 0) {
  3341. struct ahc_dma_seg *sg;
  3342. struct scatterlist *cur_seg;
  3343. struct scatterlist *end_seg;
  3344. int nseg;
  3345. cur_seg = (struct scatterlist *)cmd->request_buffer;
  3346. nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
  3347. cmd->sc_data_direction);
  3348. end_seg = cur_seg + nseg;
  3349. /* Copy the segments into the SG list. */
  3350. sg = scb->sg_list;
  3351. /*
  3352. * The sg_count may be larger than nseg if
  3353. * a transfer crosses a 32bit page.
  3354. */
  3355. while (cur_seg < end_seg) {
  3356. dma_addr_t addr;
  3357. bus_size_t len;
  3358. int consumed;
  3359. addr = sg_dma_address(cur_seg);
  3360. len = sg_dma_len(cur_seg);
  3361. consumed = ahc_linux_map_seg(ahc, scb,
  3362. sg, addr, len);
  3363. sg += consumed;
  3364. scb->sg_count += consumed;
  3365. cur_seg++;
  3366. }
  3367. sg--;
  3368. sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
  3369. /*
  3370. * Reset the sg list pointer.
  3371. */
  3372. scb->hscb->sgptr =
  3373. ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
  3374. /*
  3375. * Copy the first SG into the "current"
  3376. * data pointer area.
  3377. */
  3378. scb->hscb->dataptr = scb->sg_list->addr;
  3379. scb->hscb->datacnt = scb->sg_list->len;
  3380. } else if (cmd->request_bufflen != 0) {
  3381. struct ahc_dma_seg *sg;
  3382. dma_addr_t addr;
  3383. sg = scb->sg_list;
  3384. addr = pci_map_single(ahc->dev_softc,
  3385. cmd->request_buffer,
  3386. cmd->request_bufflen,
  3387. cmd->sc_data_direction);
  3388. scb->platform_data->buf_busaddr = addr;
  3389. scb->sg_count = ahc_linux_map_seg(ahc, scb,
  3390. sg, addr,
  3391. cmd->request_bufflen);
  3392. sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
  3393. /*
  3394. * Reset the sg list pointer.
  3395. */
  3396. scb->hscb->sgptr =
  3397. ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
  3398. /*
  3399. * Copy the first SG into the "current"
  3400. * data pointer area.
  3401. */
  3402. scb->hscb->dataptr = sg->addr;
  3403. scb->hscb->datacnt = sg->len;
  3404. } else {
  3405. scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
  3406. scb->hscb->dataptr = 0;
  3407. scb->hscb->datacnt = 0;
  3408. scb->sg_count = 0;
  3409. }
  3410. ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE);
  3411. LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
  3412. dev->openings--;
  3413. dev->active++;
  3414. dev->commands_issued++;
  3415. if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
  3416. dev->commands_since_idle_or_otag++;
  3417. /*
  3418. * We only allow one untagged transaction
  3419. * per target in the initiator role unless
  3420. * we are storing a full busy target *lun*
  3421. * table in SCB space.
  3422. */
  3423. if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
  3424. && (ahc->features & AHC_SCB_BTT) == 0) {
  3425. struct scb_tailq *untagged_q;
  3426. int target_offset;
  3427. target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
  3428. untagged_q = &(ahc->untagged_queues[target_offset]);
  3429. TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
  3430. scb->flags |= SCB_UNTAGGEDQ;
  3431. if (TAILQ_FIRST(untagged_q) != scb)
  3432. continue;
  3433. }
  3434. scb->flags |= SCB_ACTIVE;
  3435. ahc_queue_scb(ahc, scb);
  3436. }
  3437. }
  3438. /*
  3439. * SCSI controller interrupt handler.
  3440. */
  3441. irqreturn_t
  3442. ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
  3443. {
  3444. struct ahc_softc *ahc;
  3445. u_long flags;
  3446. int ours;
  3447. ahc = (struct ahc_softc *) dev_id;
  3448. ahc_lock(ahc, &flags);
  3449. ours = ahc_intr(ahc);
  3450. if (ahc_linux_next_device_to_run(ahc) != NULL)
  3451. ahc_schedule_runq(ahc);
  3452. ahc_linux_run_complete_queue(ahc);
  3453. ahc_unlock(ahc, &flags);
  3454. return IRQ_RETVAL(ours);
  3455. }
  3456. void
  3457. ahc_platform_flushwork(struct ahc_softc *ahc)
  3458. {
  3459. while (ahc_linux_run_complete_queue(ahc) != NULL)
  3460. ;
  3461. }
  3462. static struct ahc_linux_target*
  3463. ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
  3464. {
  3465. struct ahc_linux_target *targ;
  3466. u_int target_offset;
  3467. target_offset = target;
  3468. if (channel != 0)
  3469. target_offset += 8;
  3470. targ = malloc(sizeof(*targ), M_DEVBUG, M_NOWAIT);
  3471. if (targ == NULL)
  3472. return (NULL);
  3473. memset(targ, 0, sizeof(*targ));
  3474. targ->channel = channel;
  3475. targ->target = target;
  3476. targ->ahc = ahc;
  3477. targ->flags = AHC_DV_REQUIRED;
  3478. ahc->platform_data->targets[target_offset] = targ;
  3479. return (targ);
  3480. }
  3481. static void
  3482. ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
  3483. {
  3484. struct ahc_devinfo devinfo;
  3485. struct ahc_initiator_tinfo *tinfo;
  3486. struct ahc_tmode_tstate *tstate;
  3487. u_int our_id;
  3488. u_int target_offset;
  3489. char channel;
  3490. /*
  3491. * Force a negotiation to async/narrow on any
  3492. * future command to this device unless a bus
  3493. * reset occurs between now and that command.
  3494. */
  3495. channel = 'A' + targ->channel;
  3496. our_id = ahc->our_id;
  3497. target_offset = targ->target;
  3498. if (targ->channel != 0) {
  3499. target_offset += 8;
  3500. our_id = ahc->our_id_b;
  3501. }
  3502. tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
  3503. targ->target, &tstate);
  3504. ahc_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
  3505. channel, ROLE_INITIATOR);
  3506. ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
  3507. AHC_TRANS_GOAL, /*paused*/FALSE);
  3508. ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  3509. AHC_TRANS_GOAL, /*paused*/FALSE);
  3510. ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS);
  3511. ahc->platform_data->targets[target_offset] = NULL;
  3512. if (targ->inq_data != NULL)
  3513. free(targ->inq_data, M_DEVBUF);
  3514. if (targ->dv_buffer != NULL)
  3515. free(targ->dv_buffer, M_DEVBUF);
  3516. if (targ->dv_buffer1 != NULL)
  3517. free(targ->dv_buffer1, M_DEVBUF);
  3518. free(targ, M_DEVBUF);
  3519. }
  3520. static struct ahc_linux_device*
  3521. ahc_linux_alloc_device(struct ahc_softc *ahc,
  3522. struct ahc_linux_target *targ, u_int lun)
  3523. {
  3524. struct ahc_linux_device *dev;
  3525. dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
  3526. if (dev == NULL)
  3527. return (NULL);
  3528. memset(dev, 0, sizeof(*dev));
  3529. init_timer(&dev->timer);
  3530. TAILQ_INIT(&dev->busyq);
  3531. dev->flags = AHC_DEV_UNCONFIGURED;
  3532. dev->lun = lun;
  3533. dev->target = targ;
  3534. /*
  3535. * We start out life using untagged
  3536. * transactions of which we allow one.
  3537. */
  3538. dev->openings = 1;
  3539. /*
  3540. * Set maxtags to 0. This will be changed if we
  3541. * later determine that we are dealing with
  3542. * a tagged queuing capable device.
  3543. */
  3544. dev->maxtags = 0;
  3545. targ->refcount++;
  3546. targ->devices[lun] = dev;
  3547. return (dev);
  3548. }
  3549. static void
  3550. __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  3551. {
  3552. struct ahc_linux_target *targ;
  3553. targ = dev->target;
  3554. targ->devices[dev->lun] = NULL;
  3555. free(dev, M_DEVBUF);
  3556. targ->refcount--;
  3557. if (targ->refcount == 0
  3558. && (targ->flags & AHC_DV_REQUIRED) == 0)
  3559. ahc_linux_free_target(ahc, targ);
  3560. }
  3561. static void
  3562. ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
  3563. {
  3564. del_timer_sync(&dev->timer);
  3565. __ahc_linux_free_device(ahc, dev);
  3566. }
  3567. void
  3568. ahc_send_async(struct ahc_softc *ahc, char channel,
  3569. u_int target, u_int lun, ac_code code, void *arg)
  3570. {
  3571. switch (code) {
  3572. case AC_TRANSFER_NEG:
  3573. {
  3574. char buf[80];
  3575. struct ahc_linux_target *targ;
  3576. struct info_str info;
  3577. struct ahc_initiator_tinfo *tinfo;
  3578. struct ahc_tmode_tstate *tstate;
  3579. int target_offset;
  3580. info.buffer = buf;
  3581. info.length = sizeof(buf);
  3582. info.offset = 0;
  3583. info.pos = 0;
  3584. tinfo = ahc_fetch_transinfo(ahc, channel,
  3585. channel == 'A' ? ahc->our_id
  3586. : ahc->our_id_b,
  3587. target, &tstate);
  3588. /*
  3589. * Don't bother reporting results while
  3590. * negotiations are still pending.
  3591. */
  3592. if (tinfo->curr.period != tinfo->goal.period
  3593. || tinfo->curr.width != tinfo->goal.width
  3594. || tinfo->curr.offset != tinfo->goal.offset
  3595. || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
  3596. if (bootverbose == 0)
  3597. break;
  3598. /*
  3599. * Don't bother reporting results that
  3600. * are identical to those last reported.
  3601. */
  3602. target_offset = target;
  3603. if (channel == 'B')
  3604. target_offset += 8;
  3605. targ = ahc->platform_data->targets[target_offset];
  3606. if (targ == NULL)
  3607. break;
  3608. if (tinfo->curr.period == targ->last_tinfo.period
  3609. && tinfo->curr.width == targ->last_tinfo.width
  3610. && tinfo->curr.offset == targ->last_tinfo.offset
  3611. && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
  3612. if (bootverbose == 0)
  3613. break;
  3614. targ->last_tinfo.period = tinfo->curr.period;
  3615. targ->last_tinfo.width = tinfo->curr.width;
  3616. targ->last_tinfo.offset = tinfo->curr.offset;
  3617. targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
  3618. printf("(%s:%c:", ahc_name(ahc), channel);
  3619. if (target == CAM_TARGET_WILDCARD)
  3620. printf("*): ");
  3621. else
  3622. printf("%d): ", target);
  3623. ahc_format_transinfo(&info, &tinfo->curr);
  3624. if (info.pos < info.length)
  3625. *info.buffer = '\0';
  3626. else
  3627. buf[info.length - 1] = '\0';
  3628. printf("%s", buf);
  3629. break;
  3630. }
  3631. case AC_SENT_BDR:
  3632. {
  3633. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  3634. WARN_ON(lun != CAM_LUN_WILDCARD);
  3635. scsi_report_device_reset(ahc->platform_data->host,
  3636. channel - 'A', target);
  3637. #else
  3638. Scsi_Device *scsi_dev;
  3639. /*
  3640. * Find the SCSI device associated with this
  3641. * request and indicate that a UA is expected.
  3642. */
  3643. for (scsi_dev = ahc->platform_data->host->host_queue;
  3644. scsi_dev != NULL; scsi_dev = scsi_dev->next) {
  3645. if (channel - 'A' == scsi_dev->channel
  3646. && target == scsi_dev->id
  3647. && (lun == CAM_LUN_WILDCARD
  3648. || lun == scsi_dev->lun)) {
  3649. scsi_dev->was_reset = 1;
  3650. scsi_dev->expecting_cc_ua = 1;
  3651. }
  3652. }
  3653. #endif
  3654. break;
  3655. }
  3656. case AC_BUS_RESET:
  3657. if (ahc->platform_data->host != NULL) {
  3658. scsi_report_bus_reset(ahc->platform_data->host,
  3659. channel - 'A');
  3660. }
  3661. break;
  3662. default:
  3663. panic("ahc_send_async: Unexpected async event");
  3664. }
  3665. }
  3666. /*
  3667. * Calls the higher level scsi done function and frees the scb.
  3668. */
  3669. void
  3670. ahc_done(struct ahc_softc *ahc, struct scb *scb)
  3671. {
  3672. Scsi_Cmnd *cmd;
  3673. struct ahc_linux_device *dev;
  3674. LIST_REMOVE(scb, pending_links);
  3675. if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
  3676. struct scb_tailq *untagged_q;
  3677. int target_offset;
  3678. target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
  3679. untagged_q = &(ahc->untagged_queues[target_offset]);
  3680. TAILQ_REMOVE(untagged_q, scb, links.tqe);
  3681. ahc_run_untagged_queue(ahc, untagged_q);
  3682. }
  3683. if ((scb->flags & SCB_ACTIVE) == 0) {
  3684. printf("SCB %d done'd twice\n", scb->hscb->tag);
  3685. ahc_dump_card_state(ahc);
  3686. panic("Stopping for safety");
  3687. }
  3688. cmd = scb->io_ctx;
  3689. dev = scb->platform_data->dev;
  3690. dev->active--;
  3691. dev->openings++;
  3692. if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
  3693. cmd->result &= ~(CAM_DEV_QFRZN << 16);
  3694. dev->qfrozen--;
  3695. }
  3696. ahc_linux_unmap_scb(ahc, scb);
  3697. /*
  3698. * Guard against stale sense data.
  3699. * The Linux mid-layer assumes that sense
  3700. * was retrieved anytime the first byte of
  3701. * the sense buffer looks "sane".
  3702. */
  3703. cmd->sense_buffer[0] = 0;
  3704. if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
  3705. uint32_t amount_xferred;
  3706. amount_xferred =
  3707. ahc_get_transfer_length(scb) - ahc_get_residual(scb);
  3708. if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
  3709. #ifdef AHC_DEBUG
  3710. if ((ahc_debug & AHC_SHOW_MISC) != 0) {
  3711. ahc_print_path(ahc, scb);
  3712. printf("Set CAM_UNCOR_PARITY\n");
  3713. }
  3714. #endif
  3715. ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
  3716. #ifdef AHC_REPORT_UNDERFLOWS
  3717. /*
  3718. * This code is disabled by default as some
  3719. * clients of the SCSI system do not properly
  3720. * initialize the underflow parameter. This
  3721. * results in spurious termination of commands
  3722. * that complete as expected (e.g. underflow is
  3723. * allowed as command can return variable amounts
  3724. * of data.
  3725. */
  3726. } else if (amount_xferred < scb->io_ctx->underflow) {
  3727. u_int i;
  3728. ahc_print_path(ahc, scb);
  3729. printf("CDB:");
  3730. for (i = 0; i < scb->io_ctx->cmd_len; i++)
  3731. printf(" 0x%x", scb->io_ctx->cmnd[i]);
  3732. printf("\n");
  3733. ahc_print_path(ahc, scb);
  3734. printf("Saw underflow (%ld of %ld bytes). "
  3735. "Treated as error\n",
  3736. ahc_get_residual(scb),
  3737. ahc_get_transfer_length(scb));
  3738. ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
  3739. #endif
  3740. } else {
  3741. ahc_set_transaction_status(scb, CAM_REQ_CMP);
  3742. }
  3743. } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
  3744. ahc_linux_handle_scsi_status(ahc, dev, scb);
  3745. } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
  3746. dev->flags |= AHC_DEV_UNCONFIGURED;
  3747. if (AHC_DV_CMD(cmd) == FALSE)
  3748. dev->target->flags &= ~AHC_DV_REQUIRED;
  3749. }
  3750. /*
  3751. * Start DV for devices that require it assuming the first command
  3752. * sent does not result in a selection timeout.
  3753. */
  3754. if (ahc_get_transaction_status(scb) != CAM_SEL_TIMEOUT
  3755. && (dev->target->flags & AHC_DV_REQUIRED) != 0)
  3756. ahc_linux_start_dv(ahc);
  3757. if (dev->openings == 1
  3758. && ahc_get_transaction_status(scb) == CAM_REQ_CMP
  3759. && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
  3760. dev->tag_success_count++;
  3761. /*
  3762. * Some devices deal with temporary internal resource
  3763. * shortages by returning queue full. When the queue
  3764. * full occurrs, we throttle back. Slowly try to get
  3765. * back to our previous queue depth.
  3766. */
  3767. if ((dev->openings + dev->active) < dev->maxtags
  3768. && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
  3769. dev->tag_success_count = 0;
  3770. dev->openings++;
  3771. }
  3772. if (dev->active == 0)
  3773. dev->commands_since_idle_or_otag = 0;
  3774. if (TAILQ_EMPTY(&dev->busyq)) {
  3775. if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
  3776. && dev->active == 0
  3777. && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
  3778. ahc_linux_free_device(ahc, dev);
  3779. } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
  3780. TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
  3781. dev->flags |= AHC_DEV_ON_RUN_LIST;
  3782. }
  3783. if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
  3784. printf("Recovery SCB completes\n");
  3785. if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
  3786. || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
  3787. ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
  3788. if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
  3789. ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
  3790. up(&ahc->platform_data->eh_sem);
  3791. }
  3792. }
  3793. ahc_free_scb(ahc, scb);
  3794. ahc_linux_queue_cmd_complete(ahc, cmd);
  3795. if ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_EMPTY) != 0
  3796. && LIST_FIRST(&ahc->pending_scbs) == NULL) {
  3797. ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_EMPTY;
  3798. up(&ahc->platform_data->dv_sem);
  3799. }
  3800. }
  3801. static void
  3802. ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
  3803. struct ahc_linux_device *dev, struct scb *scb)
  3804. {
  3805. struct ahc_devinfo devinfo;
  3806. ahc_compile_devinfo(&devinfo,
  3807. ahc->our_id,
  3808. dev->target->target, dev->lun,
  3809. dev->target->channel == 0 ? 'A' : 'B',
  3810. ROLE_INITIATOR);
  3811. /*
  3812. * We don't currently trust the mid-layer to
  3813. * properly deal with queue full or busy. So,
  3814. * when one occurs, we tell the mid-layer to
  3815. * unconditionally requeue the command to us
  3816. * so that we can retry it ourselves. We also
  3817. * implement our own throttling mechanism so
  3818. * we don't clobber the device with too many
  3819. * commands.
  3820. */
  3821. switch (ahc_get_scsi_status(scb)) {
  3822. default:
  3823. break;
  3824. case SCSI_STATUS_CHECK_COND:
  3825. case SCSI_STATUS_CMD_TERMINATED:
  3826. {
  3827. Scsi_Cmnd *cmd;
  3828. /*
  3829. * Copy sense information to the OS's cmd
  3830. * structure if it is available.
  3831. */
  3832. cmd = scb->io_ctx;
  3833. if (scb->flags & SCB_SENSE) {
  3834. u_int sense_size;
  3835. sense_size = MIN(sizeof(struct scsi_sense_data)
  3836. - ahc_get_sense_residual(scb),
  3837. sizeof(cmd->sense_buffer));
  3838. memcpy(cmd->sense_buffer,
  3839. ahc_get_sense_buf(ahc, scb), sense_size);
  3840. if (sense_size < sizeof(cmd->sense_buffer))
  3841. memset(&cmd->sense_buffer[sense_size], 0,
  3842. sizeof(cmd->sense_buffer) - sense_size);
  3843. cmd->result |= (DRIVER_SENSE << 24);
  3844. #ifdef AHC_DEBUG
  3845. if (ahc_debug & AHC_SHOW_SENSE) {
  3846. int i;
  3847. printf("Copied %d bytes of sense data:",
  3848. sense_size);
  3849. for (i = 0; i < sense_size; i++) {
  3850. if ((i & 0xF) == 0)
  3851. printf("\n");
  3852. printf("0x%x ", cmd->sense_buffer[i]);
  3853. }
  3854. printf("\n");
  3855. }
  3856. #endif
  3857. }
  3858. break;
  3859. }
  3860. case SCSI_STATUS_QUEUE_FULL:
  3861. {
  3862. /*
  3863. * By the time the core driver has returned this
  3864. * command, all other commands that were queued
  3865. * to us but not the device have been returned.
  3866. * This ensures that dev->active is equal to
  3867. * the number of commands actually queued to
  3868. * the device.
  3869. */
  3870. dev->tag_success_count = 0;
  3871. if (dev->active != 0) {
  3872. /*
  3873. * Drop our opening count to the number
  3874. * of commands currently outstanding.
  3875. */
  3876. dev->openings = 0;
  3877. /*
  3878. ahc_print_path(ahc, scb);
  3879. printf("Dropping tag count to %d\n", dev->active);
  3880. */
  3881. if (dev->active == dev->tags_on_last_queuefull) {
  3882. dev->last_queuefull_same_count++;
  3883. /*
  3884. * If we repeatedly see a queue full
  3885. * at the same queue depth, this
  3886. * device has a fixed number of tag
  3887. * slots. Lock in this tag depth
  3888. * so we stop seeing queue fulls from
  3889. * this device.
  3890. */
  3891. if (dev->last_queuefull_same_count
  3892. == AHC_LOCK_TAGS_COUNT) {
  3893. dev->maxtags = dev->active;
  3894. ahc_print_path(ahc, scb);
  3895. printf("Locking max tag count at %d\n",
  3896. dev->active);
  3897. }
  3898. } else {
  3899. dev->tags_on_last_queuefull = dev->active;
  3900. dev->last_queuefull_same_count = 0;
  3901. }
  3902. ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
  3903. ahc_set_scsi_status(scb, SCSI_STATUS_OK);
  3904. ahc_platform_set_tags(ahc, &devinfo,
  3905. (dev->flags & AHC_DEV_Q_BASIC)
  3906. ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
  3907. break;
  3908. }
  3909. /*
  3910. * Drop down to a single opening, and treat this
  3911. * as if the target returned BUSY SCSI status.
  3912. */
  3913. dev->openings = 1;
  3914. ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
  3915. ahc_platform_set_tags(ahc, &devinfo,
  3916. (dev->flags & AHC_DEV_Q_BASIC)
  3917. ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
  3918. /* FALLTHROUGH */
  3919. }
  3920. case SCSI_STATUS_BUSY:
  3921. {
  3922. /*
  3923. * Set a short timer to defer sending commands for
  3924. * a bit since Linux will not delay in this case.
  3925. */
  3926. if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) {
  3927. printf("%s:%c:%d: Device Timer still active during "
  3928. "busy processing\n", ahc_name(ahc),
  3929. dev->target->channel, dev->target->target);
  3930. break;
  3931. }
  3932. dev->flags |= AHC_DEV_TIMER_ACTIVE;
  3933. dev->qfrozen++;
  3934. init_timer(&dev->timer);
  3935. dev->timer.data = (u_long)dev;
  3936. dev->timer.expires = jiffies + (HZ/2);
  3937. dev->timer.function = ahc_linux_dev_timed_unfreeze;
  3938. add_timer(&dev->timer);
  3939. break;
  3940. }
  3941. }
  3942. }
  3943. static void
  3944. ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
  3945. {
  3946. /*
  3947. * Typically, the complete queue has very few entries
  3948. * queued to it before the queue is emptied by
  3949. * ahc_linux_run_complete_queue, so sorting the entries
  3950. * by generation number should be inexpensive.
  3951. * We perform the sort so that commands that complete
  3952. * with an error are retuned in the order origionally
  3953. * queued to the controller so that any subsequent retries
  3954. * are performed in order. The underlying ahc routines do
  3955. * not guarantee the order that aborted commands will be
  3956. * returned to us.
  3957. */
  3958. struct ahc_completeq *completeq;
  3959. struct ahc_cmd *list_cmd;
  3960. struct ahc_cmd *acmd;
  3961. /*
  3962. * Map CAM error codes into Linux Error codes. We
  3963. * avoid the conversion so that the DV code has the
  3964. * full error information available when making
  3965. * state change decisions.
  3966. */
  3967. if (AHC_DV_CMD(cmd) == FALSE) {
  3968. u_int new_status;
  3969. switch (ahc_cmd_get_transaction_status(cmd)) {
  3970. case CAM_REQ_INPROG:
  3971. case CAM_REQ_CMP:
  3972. case CAM_SCSI_STATUS_ERROR:
  3973. new_status = DID_OK;
  3974. break;
  3975. case CAM_REQ_ABORTED:
  3976. new_status = DID_ABORT;
  3977. break;
  3978. case CAM_BUSY:
  3979. new_status = DID_BUS_BUSY;
  3980. break;
  3981. case CAM_REQ_INVALID:
  3982. case CAM_PATH_INVALID:
  3983. new_status = DID_BAD_TARGET;
  3984. break;
  3985. case CAM_SEL_TIMEOUT:
  3986. new_status = DID_NO_CONNECT;
  3987. break;
  3988. case CAM_SCSI_BUS_RESET:
  3989. case CAM_BDR_SENT:
  3990. new_status = DID_RESET;
  3991. break;
  3992. case CAM_UNCOR_PARITY:
  3993. new_status = DID_PARITY;
  3994. break;
  3995. case CAM_CMD_TIMEOUT:
  3996. new_status = DID_TIME_OUT;
  3997. break;
  3998. case CAM_UA_ABORT:
  3999. case CAM_REQ_CMP_ERR:
  4000. case CAM_AUTOSENSE_FAIL:
  4001. case CAM_NO_HBA:
  4002. case CAM_DATA_RUN_ERR:
  4003. case CAM_UNEXP_BUSFREE:
  4004. case CAM_SEQUENCE_FAIL:
  4005. case CAM_CCB_LEN_ERR:
  4006. case CAM_PROVIDE_FAIL:
  4007. case CAM_REQ_TERMIO:
  4008. case CAM_UNREC_HBA_ERROR:
  4009. case CAM_REQ_TOO_BIG:
  4010. new_status = DID_ERROR;
  4011. break;
  4012. case CAM_REQUEUE_REQ:
  4013. /*
  4014. * If we want the request requeued, make sure there
  4015. * are sufficent retries. In the old scsi error code,
  4016. * we used to be able to specify a result code that
  4017. * bypassed the retry count. Now we must use this
  4018. * hack. We also "fake" a check condition with
  4019. * a sense code of ABORTED COMMAND. This seems to
  4020. * evoke a retry even if this command is being sent
  4021. * via the eh thread. Ick! Ick! Ick!
  4022. */
  4023. if (cmd->retries > 0)
  4024. cmd->retries--;
  4025. new_status = DID_OK;
  4026. ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
  4027. cmd->result |= (DRIVER_SENSE << 24);
  4028. memset(cmd->sense_buffer, 0,
  4029. sizeof(cmd->sense_buffer));
  4030. cmd->sense_buffer[0] = SSD_ERRCODE_VALID
  4031. | SSD_CURRENT_ERROR;
  4032. cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
  4033. break;
  4034. default:
  4035. /* We should never get here */
  4036. new_status = DID_ERROR;
  4037. break;
  4038. }
  4039. ahc_cmd_set_transaction_status(cmd, new_status);
  4040. }
  4041. completeq = &ahc->platform_data->completeq;
  4042. list_cmd = TAILQ_FIRST(completeq);
  4043. acmd = (struct ahc_cmd *)cmd;
  4044. while (list_cmd != NULL
  4045. && acmd_scsi_cmd(list_cmd).serial_number
  4046. < acmd_scsi_cmd(acmd).serial_number)
  4047. list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
  4048. if (list_cmd != NULL)
  4049. TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
  4050. else
  4051. TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
  4052. }
  4053. static void
  4054. ahc_linux_filter_inquiry(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  4055. {
  4056. struct scsi_inquiry_data *sid;
  4057. struct ahc_initiator_tinfo *tinfo;
  4058. struct ahc_transinfo *user;
  4059. struct ahc_transinfo *goal;
  4060. struct ahc_transinfo *curr;
  4061. struct ahc_tmode_tstate *tstate;
  4062. struct ahc_syncrate *syncrate;
  4063. struct ahc_linux_device *dev;
  4064. u_int maxsync;
  4065. u_int width;
  4066. u_int period;
  4067. u_int offset;
  4068. u_int ppr_options;
  4069. u_int trans_version;
  4070. u_int prot_version;
  4071. /*
  4072. * Determine if this lun actually exists. If so,
  4073. * hold on to its corresponding device structure.
  4074. * If not, make sure we release the device and
  4075. * don't bother processing the rest of this inquiry
  4076. * command.
  4077. */
  4078. dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
  4079. devinfo->target, devinfo->lun,
  4080. /*alloc*/TRUE);
  4081. sid = (struct scsi_inquiry_data *)dev->target->inq_data;
  4082. if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
  4083. dev->flags &= ~AHC_DEV_UNCONFIGURED;
  4084. } else {
  4085. dev->flags |= AHC_DEV_UNCONFIGURED;
  4086. return;
  4087. }
  4088. /*
  4089. * Update our notion of this device's transfer
  4090. * negotiation capabilities.
  4091. */
  4092. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
  4093. devinfo->our_scsiid,
  4094. devinfo->target, &tstate);
  4095. user = &tinfo->user;
  4096. goal = &tinfo->goal;
  4097. curr = &tinfo->curr;
  4098. width = user->width;
  4099. period = user->period;
  4100. offset = user->offset;
  4101. ppr_options = user->ppr_options;
  4102. trans_version = user->transport_version;
  4103. prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
  4104. /*
  4105. * Only attempt SPI3/4 once we've verified that
  4106. * the device claims to support SPI3/4 features.
  4107. */
  4108. if (prot_version < SCSI_REV_2)
  4109. trans_version = SID_ANSI_REV(sid);
  4110. else
  4111. trans_version = SCSI_REV_2;
  4112. if ((sid->flags & SID_WBus16) == 0)
  4113. width = MSG_EXT_WDTR_BUS_8_BIT;
  4114. if ((sid->flags & SID_Sync) == 0) {
  4115. period = 0;
  4116. offset = 0;
  4117. ppr_options = 0;
  4118. }
  4119. if ((sid->spi3data & SID_SPI_QAS) == 0)
  4120. ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
  4121. if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
  4122. ppr_options &= MSG_EXT_PPR_QAS_REQ;
  4123. if ((sid->spi3data & SID_SPI_IUS) == 0)
  4124. ppr_options &= (MSG_EXT_PPR_DT_REQ
  4125. | MSG_EXT_PPR_QAS_REQ);
  4126. if (prot_version > SCSI_REV_2
  4127. && ppr_options != 0)
  4128. trans_version = user->transport_version;
  4129. ahc_validate_width(ahc, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
  4130. if ((ahc->features & AHC_ULTRA2) != 0)
  4131. maxsync = AHC_SYNCRATE_DT;
  4132. else if ((ahc->features & AHC_ULTRA) != 0)
  4133. maxsync = AHC_SYNCRATE_ULTRA;
  4134. else
  4135. maxsync = AHC_SYNCRATE_FAST;
  4136. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, maxsync);
  4137. ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate,
  4138. &offset, width, ROLE_UNKNOWN);
  4139. if (offset == 0 || period == 0) {
  4140. period = 0;
  4141. offset = 0;
  4142. ppr_options = 0;
  4143. }
  4144. /* Apply our filtered user settings. */
  4145. curr->transport_version = trans_version;
  4146. curr->protocol_version = prot_version;
  4147. ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, /*paused*/FALSE);
  4148. ahc_set_syncrate(ahc, devinfo, syncrate, period,
  4149. offset, ppr_options, AHC_TRANS_GOAL,
  4150. /*paused*/FALSE);
  4151. }
  4152. static void
  4153. ahc_linux_sem_timeout(u_long arg)
  4154. {
  4155. struct ahc_softc *ahc;
  4156. u_long s;
  4157. ahc = (struct ahc_softc *)arg;
  4158. ahc_lock(ahc, &s);
  4159. if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
  4160. ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
  4161. up(&ahc->platform_data->eh_sem);
  4162. }
  4163. ahc_unlock(ahc, &s);
  4164. }
  4165. static void
  4166. ahc_linux_freeze_simq(struct ahc_softc *ahc)
  4167. {
  4168. ahc->platform_data->qfrozen++;
  4169. if (ahc->platform_data->qfrozen == 1) {
  4170. scsi_block_requests(ahc->platform_data->host);
  4171. /* XXX What about Twin channels? */
  4172. ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
  4173. CAM_LUN_WILDCARD, SCB_LIST_NULL,
  4174. ROLE_INITIATOR, CAM_REQUEUE_REQ);
  4175. }
  4176. }
  4177. static void
  4178. ahc_linux_release_simq(u_long arg)
  4179. {
  4180. struct ahc_softc *ahc;
  4181. u_long s;
  4182. int unblock_reqs;
  4183. ahc = (struct ahc_softc *)arg;
  4184. unblock_reqs = 0;
  4185. ahc_lock(ahc, &s);
  4186. if (ahc->platform_data->qfrozen > 0)
  4187. ahc->platform_data->qfrozen--;
  4188. if (ahc->platform_data->qfrozen == 0)
  4189. unblock_reqs = 1;
  4190. if (AHC_DV_SIMQ_FROZEN(ahc)
  4191. && ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_RELEASE) != 0)) {
  4192. ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
  4193. up(&ahc->platform_data->dv_sem);
  4194. }
  4195. ahc_schedule_runq(ahc);
  4196. ahc_unlock(ahc, &s);
  4197. /*
  4198. * There is still a race here. The mid-layer
  4199. * should keep its own freeze count and use
  4200. * a bottom half handler to run the queues
  4201. * so we can unblock with our own lock held.
  4202. */
  4203. if (unblock_reqs)
  4204. scsi_unblock_requests(ahc->platform_data->host);
  4205. }
  4206. static void
  4207. ahc_linux_dev_timed_unfreeze(u_long arg)
  4208. {
  4209. struct ahc_linux_device *dev;
  4210. struct ahc_softc *ahc;
  4211. u_long s;
  4212. dev = (struct ahc_linux_device *)arg;
  4213. ahc = dev->target->ahc;
  4214. ahc_lock(ahc, &s);
  4215. dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
  4216. if (dev->qfrozen > 0)
  4217. dev->qfrozen--;
  4218. if (dev->qfrozen == 0
  4219. && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
  4220. ahc_linux_run_device_queue(ahc, dev);
  4221. if (TAILQ_EMPTY(&dev->busyq)
  4222. && dev->active == 0)
  4223. __ahc_linux_free_device(ahc, dev);
  4224. ahc_unlock(ahc, &s);
  4225. }
  4226. static int
  4227. ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
  4228. {
  4229. struct ahc_softc *ahc;
  4230. struct ahc_cmd *acmd;
  4231. struct ahc_cmd *list_acmd;
  4232. struct ahc_linux_device *dev;
  4233. struct scb *pending_scb;
  4234. u_long s;
  4235. u_int saved_scbptr;
  4236. u_int active_scb_index;
  4237. u_int last_phase;
  4238. u_int saved_scsiid;
  4239. u_int cdb_byte;
  4240. int retval;
  4241. int was_paused;
  4242. int paused;
  4243. int wait;
  4244. int disconnected;
  4245. pending_scb = NULL;
  4246. paused = FALSE;
  4247. wait = FALSE;
  4248. ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
  4249. acmd = (struct ahc_cmd *)cmd;
  4250. printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
  4251. ahc_name(ahc), cmd->device->channel,
  4252. cmd->device->id, cmd->device->lun,
  4253. flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
  4254. printf("CDB:");
  4255. for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
  4256. printf(" 0x%x", cmd->cmnd[cdb_byte]);
  4257. printf("\n");
  4258. /*
  4259. * In all versions of Linux, we have to work around
  4260. * a major flaw in how the mid-layer is locked down
  4261. * if we are to sleep successfully in our error handler
  4262. * while allowing our interrupt handler to run. Since
  4263. * the midlayer acquires either the io_request_lock or
  4264. * our lock prior to calling us, we must use the
  4265. * spin_unlock_irq() method for unlocking our lock.
  4266. * This will force interrupts to be enabled on the
  4267. * current CPU. Since the EH thread should not have
  4268. * been running with CPU interrupts disabled other than
  4269. * by acquiring either the io_request_lock or our own
  4270. * lock, this *should* be safe.
  4271. */
  4272. ahc_midlayer_entrypoint_lock(ahc, &s);
  4273. /*
  4274. * First determine if we currently own this command.
  4275. * Start by searching the device queue. If not found
  4276. * there, check the pending_scb list. If not found
  4277. * at all, and the system wanted us to just abort the
  4278. * command, return success.
  4279. */
  4280. dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
  4281. cmd->device->lun, /*alloc*/FALSE);
  4282. if (dev == NULL) {
  4283. /*
  4284. * No target device for this command exists,
  4285. * so we must not still own the command.
  4286. */
  4287. printf("%s:%d:%d:%d: Is not an active device\n",
  4288. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4289. cmd->device->lun);
  4290. retval = SUCCESS;
  4291. goto no_cmd;
  4292. }
  4293. TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
  4294. if (list_acmd == acmd)
  4295. break;
  4296. }
  4297. if (list_acmd != NULL) {
  4298. printf("%s:%d:%d:%d: Command found on device queue\n",
  4299. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4300. cmd->device->lun);
  4301. if (flag == SCB_ABORT) {
  4302. TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
  4303. cmd->result = DID_ABORT << 16;
  4304. ahc_linux_queue_cmd_complete(ahc, cmd);
  4305. retval = SUCCESS;
  4306. goto done;
  4307. }
  4308. }
  4309. if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
  4310. && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
  4311. cmd->device->channel + 'A',
  4312. cmd->device->lun,
  4313. CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
  4314. printf("%s:%d:%d:%d: Command found on untagged queue\n",
  4315. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4316. cmd->device->lun);
  4317. retval = SUCCESS;
  4318. goto done;
  4319. }
  4320. /*
  4321. * See if we can find a matching cmd in the pending list.
  4322. */
  4323. LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
  4324. if (pending_scb->io_ctx == cmd)
  4325. break;
  4326. }
  4327. if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
  4328. /* Any SCB for this device will do for a target reset */
  4329. LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
  4330. if (ahc_match_scb(ahc, pending_scb, cmd->device->id,
  4331. cmd->device->channel + 'A',
  4332. CAM_LUN_WILDCARD,
  4333. SCB_LIST_NULL, ROLE_INITIATOR) == 0)
  4334. break;
  4335. }
  4336. }
  4337. if (pending_scb == NULL) {
  4338. printf("%s:%d:%d:%d: Command not found\n",
  4339. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4340. cmd->device->lun);
  4341. goto no_cmd;
  4342. }
  4343. if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
  4344. /*
  4345. * We can't queue two recovery actions using the same SCB
  4346. */
  4347. retval = FAILED;
  4348. goto done;
  4349. }
  4350. /*
  4351. * Ensure that the card doesn't do anything
  4352. * behind our back and that we didn't "just" miss
  4353. * an interrupt that would affect this cmd.
  4354. */
  4355. was_paused = ahc_is_paused(ahc);
  4356. ahc_pause_and_flushwork(ahc);
  4357. paused = TRUE;
  4358. if ((pending_scb->flags & SCB_ACTIVE) == 0) {
  4359. printf("%s:%d:%d:%d: Command already completed\n",
  4360. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4361. cmd->device->lun);
  4362. goto no_cmd;
  4363. }
  4364. printf("%s: At time of recovery, card was %spaused\n",
  4365. ahc_name(ahc), was_paused ? "" : "not ");
  4366. ahc_dump_card_state(ahc);
  4367. disconnected = TRUE;
  4368. if (flag == SCB_ABORT) {
  4369. if (ahc_search_qinfifo(ahc, cmd->device->id,
  4370. cmd->device->channel + 'A',
  4371. cmd->device->lun,
  4372. pending_scb->hscb->tag,
  4373. ROLE_INITIATOR, CAM_REQ_ABORTED,
  4374. SEARCH_COMPLETE) > 0) {
  4375. printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
  4376. ahc_name(ahc), cmd->device->channel,
  4377. cmd->device->id, cmd->device->lun);
  4378. retval = SUCCESS;
  4379. goto done;
  4380. }
  4381. } else if (ahc_search_qinfifo(ahc, cmd->device->id,
  4382. cmd->device->channel + 'A',
  4383. cmd->device->lun, pending_scb->hscb->tag,
  4384. ROLE_INITIATOR, /*status*/0,
  4385. SEARCH_COUNT) > 0) {
  4386. disconnected = FALSE;
  4387. }
  4388. if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
  4389. struct scb *bus_scb;
  4390. bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
  4391. if (bus_scb == pending_scb)
  4392. disconnected = FALSE;
  4393. else if (flag != SCB_ABORT
  4394. && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
  4395. && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
  4396. disconnected = FALSE;
  4397. }
  4398. /*
  4399. * At this point, pending_scb is the scb associated with the
  4400. * passed in command. That command is currently active on the
  4401. * bus, is in the disconnected state, or we're hoping to find
  4402. * a command for the same target active on the bus to abuse to
  4403. * send a BDR. Queue the appropriate message based on which of
  4404. * these states we are in.
  4405. */
  4406. last_phase = ahc_inb(ahc, LASTPHASE);
  4407. saved_scbptr = ahc_inb(ahc, SCBPTR);
  4408. active_scb_index = ahc_inb(ahc, SCB_TAG);
  4409. saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
  4410. if (last_phase != P_BUSFREE
  4411. && (pending_scb->hscb->tag == active_scb_index
  4412. || (flag == SCB_DEVICE_RESET
  4413. && SCSIID_TARGET(ahc, saved_scsiid) == cmd->device->id))) {
  4414. /*
  4415. * We're active on the bus, so assert ATN
  4416. * and hope that the target responds.
  4417. */
  4418. pending_scb = ahc_lookup_scb(ahc, active_scb_index);
  4419. pending_scb->flags |= SCB_RECOVERY_SCB|flag;
  4420. ahc_outb(ahc, MSG_OUT, HOST_MSG);
  4421. ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
  4422. printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
  4423. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4424. cmd->device->lun);
  4425. wait = TRUE;
  4426. } else if (disconnected) {
  4427. /*
  4428. * Actually re-queue this SCB in an attempt
  4429. * to select the device before it reconnects.
  4430. * In either case (selection or reselection),
  4431. * we will now issue the approprate message
  4432. * to the timed-out device.
  4433. *
  4434. * Set the MK_MESSAGE control bit indicating
  4435. * that we desire to send a message. We
  4436. * also set the disconnected flag since
  4437. * in the paging case there is no guarantee
  4438. * that our SCB control byte matches the
  4439. * version on the card. We don't want the
  4440. * sequencer to abort the command thinking
  4441. * an unsolicited reselection occurred.
  4442. */
  4443. pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
  4444. pending_scb->flags |= SCB_RECOVERY_SCB|flag;
  4445. /*
  4446. * Remove any cached copy of this SCB in the
  4447. * disconnected list in preparation for the
  4448. * queuing of our abort SCB. We use the
  4449. * same element in the SCB, SCB_NEXT, for
  4450. * both the qinfifo and the disconnected list.
  4451. */
  4452. ahc_search_disc_list(ahc, cmd->device->id,
  4453. cmd->device->channel + 'A',
  4454. cmd->device->lun, pending_scb->hscb->tag,
  4455. /*stop_on_first*/TRUE,
  4456. /*remove*/TRUE,
  4457. /*save_state*/FALSE);
  4458. /*
  4459. * In the non-paging case, the sequencer will
  4460. * never re-reference the in-core SCB.
  4461. * To make sure we are notified during
  4462. * reslection, set the MK_MESSAGE flag in
  4463. * the card's copy of the SCB.
  4464. */
  4465. if ((ahc->flags & AHC_PAGESCBS) == 0) {
  4466. ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
  4467. ahc_outb(ahc, SCB_CONTROL,
  4468. ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
  4469. }
  4470. /*
  4471. * Clear out any entries in the QINFIFO first
  4472. * so we are the next SCB for this target
  4473. * to run.
  4474. */
  4475. ahc_search_qinfifo(ahc, cmd->device->id,
  4476. cmd->device->channel + 'A',
  4477. cmd->device->lun, SCB_LIST_NULL,
  4478. ROLE_INITIATOR, CAM_REQUEUE_REQ,
  4479. SEARCH_COMPLETE);
  4480. ahc_qinfifo_requeue_tail(ahc, pending_scb);
  4481. ahc_outb(ahc, SCBPTR, saved_scbptr);
  4482. ahc_print_path(ahc, pending_scb);
  4483. printf("Device is disconnected, re-queuing SCB\n");
  4484. wait = TRUE;
  4485. } else {
  4486. printf("%s:%d:%d:%d: Unable to deliver message\n",
  4487. ahc_name(ahc), cmd->device->channel, cmd->device->id,
  4488. cmd->device->lun);
  4489. retval = FAILED;
  4490. goto done;
  4491. }
  4492. no_cmd:
  4493. /*
  4494. * Our assumption is that if we don't have the command, no
  4495. * recovery action was required, so we return success. Again,
  4496. * the semantics of the mid-layer recovery engine are not
  4497. * well defined, so this may change in time.
  4498. */
  4499. retval = SUCCESS;
  4500. done:
  4501. if (paused)
  4502. ahc_unpause(ahc);
  4503. if (wait) {
  4504. struct timer_list timer;
  4505. int ret;
  4506. ahc->platform_data->flags |= AHC_UP_EH_SEMAPHORE;
  4507. spin_unlock_irq(&ahc->platform_data->spin_lock);
  4508. init_timer(&timer);
  4509. timer.data = (u_long)ahc;
  4510. timer.expires = jiffies + (5 * HZ);
  4511. timer.function = ahc_linux_sem_timeout;
  4512. add_timer(&timer);
  4513. printf("Recovery code sleeping\n");
  4514. down(&ahc->platform_data->eh_sem);
  4515. printf("Recovery code awake\n");
  4516. ret = del_timer_sync(&timer);
  4517. if (ret == 0) {
  4518. printf("Timer Expired\n");
  4519. retval = FAILED;
  4520. }
  4521. spin_lock_irq(&ahc->platform_data->spin_lock);
  4522. }
  4523. ahc_schedule_runq(ahc);
  4524. ahc_linux_run_complete_queue(ahc);
  4525. ahc_midlayer_entrypoint_unlock(ahc, &s);
  4526. return (retval);
  4527. }
  4528. void
  4529. ahc_platform_dump_card_state(struct ahc_softc *ahc)
  4530. {
  4531. struct ahc_linux_device *dev;
  4532. int channel;
  4533. int maxchannel;
  4534. int target;
  4535. int maxtarget;
  4536. int lun;
  4537. int i;
  4538. maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
  4539. maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
  4540. for (channel = 0; channel <= maxchannel; channel++) {
  4541. for (target = 0; target <=maxtarget; target++) {
  4542. for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
  4543. struct ahc_cmd *acmd;
  4544. dev = ahc_linux_get_device(ahc, channel, target,
  4545. lun, /*alloc*/FALSE);
  4546. if (dev == NULL)
  4547. continue;
  4548. printf("DevQ(%d:%d:%d): ",
  4549. channel, target, lun);
  4550. i = 0;
  4551. TAILQ_FOREACH(acmd, &dev->busyq,
  4552. acmd_links.tqe) {
  4553. if (i++ > AHC_SCB_MAX)
  4554. break;
  4555. }
  4556. printf("%d waiting\n", i);
  4557. }
  4558. }
  4559. }
  4560. }
  4561. static void ahc_linux_exit(void);
  4562. static void ahc_linux_get_period(struct scsi_target *starget)
  4563. {
  4564. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4565. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4566. struct ahc_tmode_tstate *tstate;
  4567. struct ahc_initiator_tinfo *tinfo
  4568. = ahc_fetch_transinfo(ahc,
  4569. starget->channel + 'A',
  4570. shost->this_id, starget->id, &tstate);
  4571. spi_period(starget) = tinfo->curr.period;
  4572. }
  4573. static void ahc_linux_set_period(struct scsi_target *starget, int period)
  4574. {
  4575. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4576. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4577. struct ahc_tmode_tstate *tstate;
  4578. struct ahc_initiator_tinfo *tinfo
  4579. = ahc_fetch_transinfo(ahc,
  4580. starget->channel + 'A',
  4581. shost->this_id, starget->id, &tstate);
  4582. struct ahc_devinfo devinfo;
  4583. unsigned int ppr_options = tinfo->curr.ppr_options;
  4584. unsigned long flags;
  4585. unsigned long offset = tinfo->curr.offset;
  4586. struct ahc_syncrate *syncrate;
  4587. if (offset == 0)
  4588. offset = MAX_OFFSET;
  4589. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4590. starget->channel + 'A', ROLE_INITIATOR);
  4591. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
  4592. ahc_lock(ahc, &flags);
  4593. ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
  4594. ppr_options, AHC_TRANS_GOAL, FALSE);
  4595. ahc_unlock(ahc, &flags);
  4596. }
  4597. static void ahc_linux_get_offset(struct scsi_target *starget)
  4598. {
  4599. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4600. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4601. struct ahc_tmode_tstate *tstate;
  4602. struct ahc_initiator_tinfo *tinfo
  4603. = ahc_fetch_transinfo(ahc,
  4604. starget->channel + 'A',
  4605. shost->this_id, starget->id, &tstate);
  4606. spi_offset(starget) = tinfo->curr.offset;
  4607. }
  4608. static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
  4609. {
  4610. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4611. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4612. struct ahc_tmode_tstate *tstate;
  4613. struct ahc_initiator_tinfo *tinfo
  4614. = ahc_fetch_transinfo(ahc,
  4615. starget->channel + 'A',
  4616. shost->this_id, starget->id, &tstate);
  4617. struct ahc_devinfo devinfo;
  4618. unsigned int ppr_options = 0;
  4619. unsigned int period = 0;
  4620. unsigned long flags;
  4621. struct ahc_syncrate *syncrate = NULL;
  4622. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4623. starget->channel + 'A', ROLE_INITIATOR);
  4624. if (offset != 0) {
  4625. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
  4626. period = tinfo->curr.period;
  4627. ppr_options = tinfo->curr.ppr_options;
  4628. }
  4629. ahc_lock(ahc, &flags);
  4630. ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
  4631. ppr_options, AHC_TRANS_GOAL, FALSE);
  4632. ahc_unlock(ahc, &flags);
  4633. }
  4634. static void ahc_linux_get_width(struct scsi_target *starget)
  4635. {
  4636. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4637. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4638. struct ahc_tmode_tstate *tstate;
  4639. struct ahc_initiator_tinfo *tinfo
  4640. = ahc_fetch_transinfo(ahc,
  4641. starget->channel + 'A',
  4642. shost->this_id, starget->id, &tstate);
  4643. spi_width(starget) = tinfo->curr.width;
  4644. }
  4645. static void ahc_linux_set_width(struct scsi_target *starget, int width)
  4646. {
  4647. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4648. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4649. struct ahc_devinfo devinfo;
  4650. unsigned long flags;
  4651. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4652. starget->channel + 'A', ROLE_INITIATOR);
  4653. ahc_lock(ahc, &flags);
  4654. ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
  4655. ahc_unlock(ahc, &flags);
  4656. }
  4657. static void ahc_linux_get_dt(struct scsi_target *starget)
  4658. {
  4659. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4660. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4661. struct ahc_tmode_tstate *tstate;
  4662. struct ahc_initiator_tinfo *tinfo
  4663. = ahc_fetch_transinfo(ahc,
  4664. starget->channel + 'A',
  4665. shost->this_id, starget->id, &tstate);
  4666. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ;
  4667. }
  4668. static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
  4669. {
  4670. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4671. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4672. struct ahc_tmode_tstate *tstate;
  4673. struct ahc_initiator_tinfo *tinfo
  4674. = ahc_fetch_transinfo(ahc,
  4675. starget->channel + 'A',
  4676. shost->this_id, starget->id, &tstate);
  4677. struct ahc_devinfo devinfo;
  4678. unsigned int ppr_options = tinfo->curr.ppr_options
  4679. & ~MSG_EXT_PPR_DT_REQ;
  4680. unsigned int period = tinfo->curr.period;
  4681. unsigned long flags;
  4682. struct ahc_syncrate *syncrate;
  4683. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4684. starget->channel + 'A', ROLE_INITIATOR);
  4685. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  4686. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  4687. ahc_lock(ahc, &flags);
  4688. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  4689. ppr_options, AHC_TRANS_GOAL, FALSE);
  4690. ahc_unlock(ahc, &flags);
  4691. }
  4692. static void ahc_linux_get_qas(struct scsi_target *starget)
  4693. {
  4694. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4695. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4696. struct ahc_tmode_tstate *tstate;
  4697. struct ahc_initiator_tinfo *tinfo
  4698. = ahc_fetch_transinfo(ahc,
  4699. starget->channel + 'A',
  4700. shost->this_id, starget->id, &tstate);
  4701. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ;
  4702. }
  4703. static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
  4704. {
  4705. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4706. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4707. struct ahc_tmode_tstate *tstate;
  4708. struct ahc_initiator_tinfo *tinfo
  4709. = ahc_fetch_transinfo(ahc,
  4710. starget->channel + 'A',
  4711. shost->this_id, starget->id, &tstate);
  4712. struct ahc_devinfo devinfo;
  4713. unsigned int ppr_options = tinfo->curr.ppr_options
  4714. & ~MSG_EXT_PPR_QAS_REQ;
  4715. unsigned int period = tinfo->curr.period;
  4716. unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
  4717. unsigned long flags;
  4718. struct ahc_syncrate *syncrate;
  4719. if (qas)
  4720. ppr_options |= MSG_EXT_PPR_QAS_REQ;
  4721. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4722. starget->channel + 'A', ROLE_INITIATOR);
  4723. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  4724. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  4725. ahc_lock(ahc, &flags);
  4726. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  4727. ppr_options, AHC_TRANS_GOAL, FALSE);
  4728. ahc_unlock(ahc, &flags);
  4729. }
  4730. static void ahc_linux_get_iu(struct scsi_target *starget)
  4731. {
  4732. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4733. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4734. struct ahc_tmode_tstate *tstate;
  4735. struct ahc_initiator_tinfo *tinfo
  4736. = ahc_fetch_transinfo(ahc,
  4737. starget->channel + 'A',
  4738. shost->this_id, starget->id, &tstate);
  4739. spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ;
  4740. }
  4741. static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
  4742. {
  4743. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  4744. struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
  4745. struct ahc_tmode_tstate *tstate;
  4746. struct ahc_initiator_tinfo *tinfo
  4747. = ahc_fetch_transinfo(ahc,
  4748. starget->channel + 'A',
  4749. shost->this_id, starget->id, &tstate);
  4750. struct ahc_devinfo devinfo;
  4751. unsigned int ppr_options = tinfo->curr.ppr_options
  4752. & ~MSG_EXT_PPR_IU_REQ;
  4753. unsigned int period = tinfo->curr.period;
  4754. unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
  4755. unsigned long flags;
  4756. struct ahc_syncrate *syncrate;
  4757. if (iu)
  4758. ppr_options |= MSG_EXT_PPR_IU_REQ;
  4759. ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
  4760. starget->channel + 'A', ROLE_INITIATOR);
  4761. syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
  4762. dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
  4763. ahc_lock(ahc, &flags);
  4764. ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
  4765. ppr_options, AHC_TRANS_GOAL, FALSE);
  4766. ahc_unlock(ahc, &flags);
  4767. }
  4768. static struct spi_function_template ahc_linux_transport_functions = {
  4769. .get_offset = ahc_linux_get_offset,
  4770. .set_offset = ahc_linux_set_offset,
  4771. .show_offset = 1,
  4772. .get_period = ahc_linux_get_period,
  4773. .set_period = ahc_linux_set_period,
  4774. .show_period = 1,
  4775. .get_width = ahc_linux_get_width,
  4776. .set_width = ahc_linux_set_width,
  4777. .show_width = 1,
  4778. .get_dt = ahc_linux_get_dt,
  4779. .set_dt = ahc_linux_set_dt,
  4780. .show_dt = 1,
  4781. .get_iu = ahc_linux_get_iu,
  4782. .set_iu = ahc_linux_set_iu,
  4783. .show_iu = 1,
  4784. .get_qas = ahc_linux_get_qas,
  4785. .set_qas = ahc_linux_set_qas,
  4786. .show_qas = 1,
  4787. };
  4788. static int __init
  4789. ahc_linux_init(void)
  4790. {
  4791. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  4792. ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions);
  4793. if (!ahc_linux_transport_template)
  4794. return -ENODEV;
  4795. int rc = ahc_linux_detect(&aic7xxx_driver_template);
  4796. if (rc)
  4797. return rc;
  4798. spi_release_transport(ahc_linux_transport_template);
  4799. ahc_linux_exit();
  4800. return -ENODEV;
  4801. #else
  4802. scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
  4803. if (aic7xxx_driver_template.present == 0) {
  4804. scsi_unregister_module(MODULE_SCSI_HA,
  4805. &aic7xxx_driver_template);
  4806. return (-ENODEV);
  4807. }
  4808. return (0);
  4809. #endif
  4810. }
  4811. static void
  4812. ahc_linux_exit(void)
  4813. {
  4814. struct ahc_softc *ahc;
  4815. /*
  4816. * Shutdown DV threads before going into the SCSI mid-layer.
  4817. * This avoids situations where the mid-layer locks the entire
  4818. * kernel so that waiting for our DV threads to exit leads
  4819. * to deadlock.
  4820. */
  4821. TAILQ_FOREACH(ahc, &ahc_tailq, links) {
  4822. ahc_linux_kill_dv_thread(ahc);
  4823. }
  4824. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  4825. /*
  4826. * In 2.4 we have to unregister from the PCI core _after_
  4827. * unregistering from the scsi midlayer to avoid dangling
  4828. * references.
  4829. */
  4830. scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
  4831. #endif
  4832. ahc_linux_pci_exit();
  4833. ahc_linux_eisa_exit();
  4834. spi_release_transport(ahc_linux_transport_template);
  4835. }
  4836. module_init(ahc_linux_init);
  4837. module_exit(ahc_linux_exit);