dc395x.c 144 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <asm/io.h>
  61. #include <scsi/scsi.h>
  62. #include <scsi/scsicam.h> /* needed for scsicam_bios_param */
  63. #include <scsi/scsi_cmnd.h>
  64. #include <scsi/scsi_device.h>
  65. #include <scsi/scsi_host.h>
  66. #include "dc395x.h"
  67. #define DC395X_NAME "dc395x"
  68. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  69. #define DC395X_VERSION "v2.05, 2004/03/08"
  70. /*---------------------------------------------------------------------------
  71. Features
  72. ---------------------------------------------------------------------------*/
  73. /*
  74. * Set to disable parts of the driver
  75. */
  76. /*#define DC395x_NO_DISCONNECT*/
  77. /*#define DC395x_NO_TAGQ*/
  78. /*#define DC395x_NO_SYNC*/
  79. /*#define DC395x_NO_WIDE*/
  80. /*---------------------------------------------------------------------------
  81. Debugging
  82. ---------------------------------------------------------------------------*/
  83. /*
  84. * Types of debugging that can be enabled and disabled
  85. */
  86. #define DBG_KG 0x0001
  87. #define DBG_0 0x0002
  88. #define DBG_1 0x0004
  89. #define DBG_SG 0x0020
  90. #define DBG_FIFO 0x0040
  91. #define DBG_PIO 0x0080
  92. /*
  93. * Set set of things to output debugging for.
  94. * Undefine to remove all debugging
  95. */
  96. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  97. /*#define DEBUG_MASK DBG_0*/
  98. /*
  99. * Output a kernel mesage at the specified level and append the
  100. * driver name and a ": " to the start of the message
  101. */
  102. #define dprintkl(level, format, arg...) \
  103. printk(level DC395X_NAME ": " format , ## arg)
  104. #ifdef DEBUG_MASK
  105. /*
  106. * print a debug message - this is formated with KERN_DEBUG, then the
  107. * driver name followed by a ": " and then the message is output.
  108. * This also checks that the specified debug level is enabled before
  109. * outputing the message
  110. */
  111. #define dprintkdbg(type, format, arg...) \
  112. do { \
  113. if ((type) & (DEBUG_MASK)) \
  114. dprintkl(KERN_DEBUG , format , ## arg); \
  115. } while (0)
  116. /*
  117. * Check if the specified type of debugging is enabled
  118. */
  119. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  120. #else
  121. /*
  122. * No debugging. Do nothing
  123. */
  124. #define dprintkdbg(type, format, arg...) \
  125. do {} while (0)
  126. #define debug_enabled(type) (0)
  127. #endif
  128. #ifndef PCI_VENDOR_ID_TEKRAM
  129. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  130. #endif
  131. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  132. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  133. #endif
  134. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  135. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  137. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  138. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  139. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  140. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  141. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  142. /* cmd->result */
  143. #define RES_TARGET 0x000000FF /* Target State */
  144. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  145. #define RES_ENDMSG 0x0000FF00 /* End Message */
  146. #define RES_DID 0x00FF0000 /* DID_ codes */
  147. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  148. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  149. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  150. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  151. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  152. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  153. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  154. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  155. #define TAG_NONE 255
  156. /*
  157. * srb->segement_x is the hw sg list. It is always allocated as a
  158. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  159. * cross a page boundy.
  160. */
  161. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  162. struct SGentry {
  163. u32 address; /* bus! address */
  164. u32 length;
  165. };
  166. /* The SEEPROM structure for TRM_S1040 */
  167. struct NVRamTarget {
  168. u8 cfg0; /* Target configuration byte 0 */
  169. u8 period; /* Target period */
  170. u8 cfg2; /* Target configuration byte 2 */
  171. u8 cfg3; /* Target configuration byte 3 */
  172. };
  173. struct NvRamType {
  174. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  175. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  176. u8 sub_class; /* 4 Sub Class */
  177. u8 vendor_id[2]; /* 5,6 Vendor ID */
  178. u8 device_id[2]; /* 7,8 Device ID */
  179. u8 reserved; /* 9 Reserved */
  180. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  181. /** 10,11,12,13
  182. ** 14,15,16,17
  183. ** ....
  184. ** ....
  185. ** 70,71,72,73
  186. */
  187. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  188. u8 channel_cfg; /* 75 Channel configuration */
  189. u8 delay_time; /* 76 Power on delay time */
  190. u8 max_tag; /* 77 Maximum tags */
  191. u8 reserved0; /* 78 */
  192. u8 boot_target; /* 79 */
  193. u8 boot_lun; /* 80 */
  194. u8 reserved1; /* 81 */
  195. u16 reserved2[22]; /* 82,..125 */
  196. u16 cksum; /* 126,127 */
  197. };
  198. struct ScsiReqBlk {
  199. struct list_head list; /* next/prev ptrs for srb lists */
  200. struct DeviceCtlBlk *dcb;
  201. struct scsi_cmnd *cmd;
  202. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  203. dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  204. u8 sg_count; /* No of HW sg entries for this request */
  205. u8 sg_index; /* Index of HW sg entry for this request */
  206. size_t total_xfer_length; /* Total number of bytes remaining to be transfered */
  207. size_t request_length; /* Total number of bytes in this request */
  208. /*
  209. * The sense buffer handling function, request_sense, uses
  210. * the first hw sg entry (segment_x[0]) and the transfer
  211. * length (total_xfer_length). While doing this it stores the
  212. * original values into the last sg hw list
  213. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  214. * total_xfer_length in xferred. These values are restored in
  215. * pci_unmap_srb_sense. This is the only place xferred is used.
  216. */
  217. size_t xferred; /* Saved copy of total_xfer_length */
  218. u16 state;
  219. u8 msgin_buf[6];
  220. u8 msgout_buf[6];
  221. u8 adapter_status;
  222. u8 target_status;
  223. u8 msg_count;
  224. u8 end_message;
  225. u8 tag_number;
  226. u8 status;
  227. u8 retry_count;
  228. u8 flag;
  229. u8 scsi_phase;
  230. };
  231. struct DeviceCtlBlk {
  232. struct list_head list; /* next/prev ptrs for the dcb list */
  233. struct AdapterCtlBlk *acb;
  234. struct list_head srb_going_list; /* head of going srb list */
  235. struct list_head srb_waiting_list; /* head of waiting srb list */
  236. struct ScsiReqBlk *active_srb;
  237. u32 tag_mask;
  238. u16 max_command;
  239. u8 target_id; /* SCSI Target ID (SCSI Only) */
  240. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  241. u8 identify_msg;
  242. u8 dev_mode;
  243. u8 inquiry7; /* To store Inquiry flags */
  244. u8 sync_mode; /* 0:async mode */
  245. u8 min_nego_period; /* for nego. */
  246. u8 sync_period; /* for reg. */
  247. u8 sync_offset; /* for reg. and nego.(low nibble) */
  248. u8 flag;
  249. u8 dev_type;
  250. u8 init_tcq_flag;
  251. };
  252. struct AdapterCtlBlk {
  253. struct Scsi_Host *scsi_host;
  254. unsigned long io_port_base;
  255. unsigned long io_port_len;
  256. struct list_head dcb_list; /* head of going dcb list */
  257. struct DeviceCtlBlk *dcb_run_robin;
  258. struct DeviceCtlBlk *active_dcb;
  259. struct list_head srb_free_list; /* head of free srb list */
  260. struct ScsiReqBlk *tmp_srb;
  261. struct timer_list waiting_timer;
  262. struct timer_list selto_timer;
  263. u16 srb_count;
  264. u8 sel_timeout;
  265. unsigned int irq_level;
  266. u8 tag_max_num;
  267. u8 acb_flag;
  268. u8 gmode2;
  269. u8 config;
  270. u8 lun_chk;
  271. u8 scan_devices;
  272. u8 hostid_bit;
  273. u8 dcb_map[DC395x_MAX_SCSI_ID];
  274. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  275. struct pci_dev *dev;
  276. u8 msg_len;
  277. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  278. struct ScsiReqBlk srb;
  279. struct NvRamType eeprom; /* eeprom settings for this adapter */
  280. };
  281. /*---------------------------------------------------------------------------
  282. Forward declarations
  283. ---------------------------------------------------------------------------*/
  284. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  285. u16 *pscsi_status);
  286. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  287. u16 *pscsi_status);
  288. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  289. u16 *pscsi_status);
  290. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  291. u16 *pscsi_status);
  292. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  293. u16 *pscsi_status);
  294. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  295. u16 *pscsi_status);
  296. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  297. u16 *pscsi_status);
  298. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  299. u16 *pscsi_status);
  300. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  301. u16 *pscsi_status);
  302. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  303. u16 *pscsi_status);
  304. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  305. u16 *pscsi_status);
  306. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  307. u16 *pscsi_status);
  308. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  309. u16 *pscsi_status);
  310. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  311. u16 *pscsi_status);
  312. static void set_basic_config(struct AdapterCtlBlk *acb);
  313. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  314. struct ScsiReqBlk *srb);
  315. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  316. static void data_io_transfer(struct AdapterCtlBlk *acb,
  317. struct ScsiReqBlk *srb, u16 io_dir);
  318. static void disconnect(struct AdapterCtlBlk *acb);
  319. static void reselect(struct AdapterCtlBlk *acb);
  320. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  321. struct ScsiReqBlk *srb);
  322. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  323. struct ScsiReqBlk *srb);
  324. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  325. struct ScsiReqBlk *srb);
  326. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  327. struct scsi_cmnd *cmd, u8 force);
  328. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  329. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  330. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  331. struct ScsiReqBlk *srb);
  332. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  333. struct ScsiReqBlk *srb);
  334. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  335. struct ScsiReqBlk *srb);
  336. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  337. struct DeviceCtlBlk *dcb);
  338. static void waiting_timeout(unsigned long ptr);
  339. /*---------------------------------------------------------------------------
  340. Static Data
  341. ---------------------------------------------------------------------------*/
  342. static u16 current_sync_offset = 0;
  343. static void *dc395x_scsi_phase0[] = {
  344. data_out_phase0,/* phase:0 */
  345. data_in_phase0, /* phase:1 */
  346. command_phase0, /* phase:2 */
  347. status_phase0, /* phase:3 */
  348. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  349. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  350. msgout_phase0, /* phase:6 */
  351. msgin_phase0, /* phase:7 */
  352. };
  353. static void *dc395x_scsi_phase1[] = {
  354. data_out_phase1,/* phase:0 */
  355. data_in_phase1, /* phase:1 */
  356. command_phase1, /* phase:2 */
  357. status_phase1, /* phase:3 */
  358. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  359. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  360. msgout_phase1, /* phase:6 */
  361. msgin_phase1, /* phase:7 */
  362. };
  363. /*
  364. *Fast20: 000 50ns, 20.0 MHz
  365. * 001 75ns, 13.3 MHz
  366. * 010 100ns, 10.0 MHz
  367. * 011 125ns, 8.0 MHz
  368. * 100 150ns, 6.6 MHz
  369. * 101 175ns, 5.7 MHz
  370. * 110 200ns, 5.0 MHz
  371. * 111 250ns, 4.0 MHz
  372. *
  373. *Fast40(LVDS): 000 25ns, 40.0 MHz
  374. * 001 50ns, 20.0 MHz
  375. * 010 75ns, 13.3 MHz
  376. * 011 100ns, 10.0 MHz
  377. * 100 125ns, 8.0 MHz
  378. * 101 150ns, 6.6 MHz
  379. * 110 175ns, 5.7 MHz
  380. * 111 200ns, 5.0 MHz
  381. */
  382. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  383. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  384. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  385. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  386. /*---------------------------------------------------------------------------
  387. Configuration
  388. ---------------------------------------------------------------------------*/
  389. /*
  390. * Module/boot parameters currently effect *all* instances of the
  391. * card in the system.
  392. */
  393. /*
  394. * Command line parameters are stored in a structure below.
  395. * These are the index's into the structure for the various
  396. * command line options.
  397. */
  398. #define CFG_ADAPTER_ID 0
  399. #define CFG_MAX_SPEED 1
  400. #define CFG_DEV_MODE 2
  401. #define CFG_ADAPTER_MODE 3
  402. #define CFG_TAGS 4
  403. #define CFG_RESET_DELAY 5
  404. #define CFG_NUM 6 /* number of configuration items */
  405. /*
  406. * Value used to indicate that a command line override
  407. * hasn't been used to modify the value.
  408. */
  409. #define CFG_PARAM_UNSET -1
  410. /*
  411. * Hold command line parameters.
  412. */
  413. struct ParameterData {
  414. int value; /* value of this setting */
  415. int min; /* minimum value */
  416. int max; /* maximum value */
  417. int def; /* default value */
  418. int safe; /* safe value */
  419. };
  420. static struct ParameterData __devinitdata cfg_data[] = {
  421. { /* adapter id */
  422. CFG_PARAM_UNSET,
  423. 0,
  424. 15,
  425. 7,
  426. 7
  427. },
  428. { /* max speed */
  429. CFG_PARAM_UNSET,
  430. 0,
  431. 7,
  432. 1, /* 13.3Mhz */
  433. 4, /* 6.7Hmz */
  434. },
  435. { /* dev mode */
  436. CFG_PARAM_UNSET,
  437. 0,
  438. 0x3f,
  439. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  440. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  441. NTC_DO_SEND_START,
  442. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  443. },
  444. { /* adapter mode */
  445. CFG_PARAM_UNSET,
  446. 0,
  447. 0x2f,
  448. #ifdef CONFIG_SCSI_MULTI_LUN
  449. NAC_SCANLUN |
  450. #endif
  451. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  452. /*| NAC_ACTIVE_NEG*/,
  453. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  454. },
  455. { /* tags */
  456. CFG_PARAM_UNSET,
  457. 0,
  458. 5,
  459. 3, /* 16 tags (??) */
  460. 2,
  461. },
  462. { /* reset delay */
  463. CFG_PARAM_UNSET,
  464. 0,
  465. 180,
  466. 1, /* 1 second */
  467. 10, /* 10 seconds */
  468. }
  469. };
  470. /*
  471. * Safe settings. If set to zero the the BIOS/default values with
  472. * command line overrides will be used. If set to 1 then safe and
  473. * slow settings will be used.
  474. */
  475. static int use_safe_settings = 0;
  476. module_param_named(safe, use_safe_settings, bool, 0);
  477. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  478. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  479. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  480. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  481. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  482. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  483. MODULE_PARM_DESC(dev_mode, "Device mode.");
  484. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  485. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  486. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  487. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  488. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  489. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  490. /**
  491. * set_safe_settings - if the use_safe_settings option is set then
  492. * set all values to the safe and slow values.
  493. **/
  494. static void __devinit set_safe_settings(void)
  495. {
  496. if (use_safe_settings)
  497. {
  498. int i;
  499. dprintkl(KERN_INFO, "Using safe settings.\n");
  500. for (i = 0; i < CFG_NUM; i++)
  501. {
  502. cfg_data[i].value = cfg_data[i].safe;
  503. }
  504. }
  505. }
  506. /**
  507. * fix_settings - reset any boot parameters which are out of range
  508. * back to the default values.
  509. **/
  510. static void __devinit fix_settings(void)
  511. {
  512. int i;
  513. dprintkdbg(DBG_1,
  514. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  515. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  516. cfg_data[CFG_ADAPTER_ID].value,
  517. cfg_data[CFG_MAX_SPEED].value,
  518. cfg_data[CFG_DEV_MODE].value,
  519. cfg_data[CFG_ADAPTER_MODE].value,
  520. cfg_data[CFG_TAGS].value,
  521. cfg_data[CFG_RESET_DELAY].value);
  522. for (i = 0; i < CFG_NUM; i++)
  523. {
  524. if (cfg_data[i].value < cfg_data[i].min
  525. || cfg_data[i].value > cfg_data[i].max)
  526. cfg_data[i].value = cfg_data[i].def;
  527. }
  528. }
  529. /*
  530. * Mapping from the eeprom delay index value (index into this array)
  531. * to the the number of actual seconds that the delay should be for.
  532. */
  533. static char __devinitdata eeprom_index_to_delay_map[] =
  534. { 1, 3, 5, 10, 16, 30, 60, 120 };
  535. /**
  536. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  537. * into a number of seconds.
  538. *
  539. * @eeprom: The eeprom structure in which we find the delay index to map.
  540. **/
  541. static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
  542. {
  543. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  544. }
  545. /**
  546. * delay_to_eeprom_index - Take a delay in seconds and return the
  547. * closest eeprom index which will delay for at least that amount of
  548. * seconds.
  549. *
  550. * @delay: The delay, in seconds, to find the eeprom index for.
  551. **/
  552. static int __devinit delay_to_eeprom_index(int delay)
  553. {
  554. u8 idx = 0;
  555. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  556. idx++;
  557. return idx;
  558. }
  559. /**
  560. * eeprom_override - Override the eeprom settings, in the provided
  561. * eeprom structure, with values that have been set on the command
  562. * line.
  563. *
  564. * @eeprom: The eeprom data to override with command line options.
  565. **/
  566. static void __devinit eeprom_override(struct NvRamType *eeprom)
  567. {
  568. u8 id;
  569. /* Adapter Settings */
  570. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  571. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  572. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  573. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  574. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  575. eeprom->delay_time = delay_to_eeprom_index(
  576. cfg_data[CFG_RESET_DELAY].value);
  577. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  578. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  579. /* Device Settings */
  580. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  581. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  582. eeprom->target[id].cfg0 =
  583. (u8)cfg_data[CFG_DEV_MODE].value;
  584. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  585. eeprom->target[id].period =
  586. (u8)cfg_data[CFG_MAX_SPEED].value;
  587. }
  588. }
  589. /*---------------------------------------------------------------------------
  590. ---------------------------------------------------------------------------*/
  591. static unsigned int list_size(struct list_head *head)
  592. {
  593. unsigned int count = 0;
  594. struct list_head *pos;
  595. list_for_each(pos, head)
  596. count++;
  597. return count;
  598. }
  599. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  600. struct DeviceCtlBlk *pos)
  601. {
  602. int use_next = 0;
  603. struct DeviceCtlBlk* next = NULL;
  604. struct DeviceCtlBlk* i;
  605. if (list_empty(head))
  606. return NULL;
  607. /* find supplied dcb and then select the next one */
  608. list_for_each_entry(i, head, list)
  609. if (use_next) {
  610. next = i;
  611. break;
  612. } else if (i == pos) {
  613. use_next = 1;
  614. }
  615. /* if no next one take the head one (ie, wraparound) */
  616. if (!next)
  617. list_for_each_entry(i, head, list) {
  618. next = i;
  619. break;
  620. }
  621. return next;
  622. }
  623. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  624. {
  625. if (srb->tag_number < 255) {
  626. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  627. srb->tag_number = 255;
  628. }
  629. }
  630. /* Find cmd in SRB list */
  631. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  632. struct list_head *head)
  633. {
  634. struct ScsiReqBlk *i;
  635. list_for_each_entry(i, head, list)
  636. if (i->cmd == cmd)
  637. return i;
  638. return NULL;
  639. }
  640. static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
  641. {
  642. struct list_head *head = &acb->srb_free_list;
  643. struct ScsiReqBlk *srb = NULL;
  644. if (!list_empty(head)) {
  645. srb = list_entry(head->next, struct ScsiReqBlk, list);
  646. list_del(head->next);
  647. dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
  648. }
  649. return srb;
  650. }
  651. static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  652. {
  653. dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
  654. list_add_tail(&srb->list, &acb->srb_free_list);
  655. }
  656. static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
  657. struct ScsiReqBlk *srb)
  658. {
  659. dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n",
  660. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  661. list_add(&srb->list, &dcb->srb_waiting_list);
  662. }
  663. static void srb_waiting_append(struct DeviceCtlBlk *dcb,
  664. struct ScsiReqBlk *srb)
  665. {
  666. dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n",
  667. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  668. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  669. }
  670. static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  671. {
  672. dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n",
  673. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  674. list_add_tail(&srb->list, &dcb->srb_going_list);
  675. }
  676. static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  677. {
  678. struct ScsiReqBlk *i;
  679. struct ScsiReqBlk *tmp;
  680. dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n",
  681. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  682. list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
  683. if (i == srb) {
  684. list_del(&srb->list);
  685. break;
  686. }
  687. }
  688. static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
  689. struct ScsiReqBlk *srb)
  690. {
  691. struct ScsiReqBlk *i;
  692. struct ScsiReqBlk *tmp;
  693. dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n",
  694. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  695. list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
  696. if (i == srb) {
  697. list_del(&srb->list);
  698. break;
  699. }
  700. }
  701. static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
  702. struct ScsiReqBlk *srb)
  703. {
  704. dprintkdbg(DBG_0,
  705. "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n",
  706. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  707. list_move(&srb->list, &dcb->srb_waiting_list);
  708. }
  709. static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
  710. struct ScsiReqBlk *srb)
  711. {
  712. dprintkdbg(DBG_0,
  713. "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n",
  714. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  715. list_move(&srb->list, &dcb->srb_going_list);
  716. }
  717. /* Sets the timer to wake us up */
  718. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  719. {
  720. if (timer_pending(&acb->waiting_timer))
  721. return;
  722. init_timer(&acb->waiting_timer);
  723. acb->waiting_timer.function = waiting_timeout;
  724. acb->waiting_timer.data = (unsigned long) acb;
  725. if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
  726. acb->waiting_timer.expires =
  727. acb->scsi_host->last_reset - HZ / 2 + 1;
  728. else
  729. acb->waiting_timer.expires = jiffies + to + 1;
  730. add_timer(&acb->waiting_timer);
  731. }
  732. /* Send the next command from the waiting list to the bus */
  733. static void waiting_process_next(struct AdapterCtlBlk *acb)
  734. {
  735. struct DeviceCtlBlk *start = NULL;
  736. struct DeviceCtlBlk *pos;
  737. struct DeviceCtlBlk *dcb;
  738. struct ScsiReqBlk *srb;
  739. struct list_head *dcb_list_head = &acb->dcb_list;
  740. if (acb->active_dcb
  741. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  742. return;
  743. if (timer_pending(&acb->waiting_timer))
  744. del_timer(&acb->waiting_timer);
  745. if (list_empty(dcb_list_head))
  746. return;
  747. /*
  748. * Find the starting dcb. Need to find it again in the list
  749. * since the list may have changed since we set the ptr to it
  750. */
  751. list_for_each_entry(dcb, dcb_list_head, list)
  752. if (dcb == acb->dcb_run_robin) {
  753. start = dcb;
  754. break;
  755. }
  756. if (!start) {
  757. /* This can happen! */
  758. start = list_entry(dcb_list_head->next, typeof(*start), list);
  759. acb->dcb_run_robin = start;
  760. }
  761. /*
  762. * Loop over the dcb, but we start somewhere (potentially) in
  763. * the middle of the loop so we need to manully do this.
  764. */
  765. pos = start;
  766. do {
  767. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  768. /* Make sure, the next another device gets scheduled ... */
  769. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  770. acb->dcb_run_robin);
  771. if (list_empty(waiting_list_head) ||
  772. pos->max_command <= list_size(&pos->srb_going_list)) {
  773. /* move to next dcb */
  774. pos = dcb_get_next(dcb_list_head, pos);
  775. } else {
  776. srb = list_entry(waiting_list_head->next,
  777. struct ScsiReqBlk, list);
  778. /* Try to send to the bus */
  779. if (!start_scsi(acb, pos, srb))
  780. srb_waiting_to_going_move(pos, srb);
  781. else
  782. waiting_set_timer(acb, HZ/50);
  783. break;
  784. }
  785. } while (pos != start);
  786. }
  787. /* Wake up waiting queue */
  788. static void waiting_timeout(unsigned long ptr)
  789. {
  790. unsigned long flags;
  791. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  792. dprintkdbg(DBG_1,
  793. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  794. DC395x_LOCK_IO(acb->scsi_host, flags);
  795. waiting_process_next(acb);
  796. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  797. }
  798. /* Get the DCB for a given ID/LUN combination */
  799. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  800. {
  801. return acb->children[id][lun];
  802. }
  803. /* Send SCSI Request Block (srb) to adapter (acb) */
  804. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  805. {
  806. struct DeviceCtlBlk *dcb = srb->dcb;
  807. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  808. acb->active_dcb ||
  809. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  810. srb_waiting_append(dcb, srb);
  811. waiting_process_next(acb);
  812. return;
  813. }
  814. if (!start_scsi(acb, dcb, srb))
  815. srb_going_append(dcb, srb);
  816. else {
  817. srb_waiting_insert(dcb, srb);
  818. waiting_set_timer(acb, HZ / 50);
  819. }
  820. }
  821. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  822. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  823. struct ScsiReqBlk *srb)
  824. {
  825. enum dma_data_direction dir = cmd->sc_data_direction;
  826. dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
  827. cmd->pid, dcb->target_id, dcb->target_lun);
  828. srb->dcb = dcb;
  829. srb->cmd = cmd;
  830. srb->sg_count = 0;
  831. srb->total_xfer_length = 0;
  832. srb->sg_bus_addr = 0;
  833. srb->sg_index = 0;
  834. srb->adapter_status = 0;
  835. srb->target_status = 0;
  836. srb->msg_count = 0;
  837. srb->status = 0;
  838. srb->flag = 0;
  839. srb->state = 0;
  840. srb->retry_count = 0;
  841. srb->tag_number = TAG_NONE;
  842. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  843. srb->end_message = 0;
  844. if (dir == PCI_DMA_NONE || !cmd->request_buffer) {
  845. dprintkdbg(DBG_0,
  846. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  847. cmd->bufflen, cmd->request_buffer,
  848. cmd->use_sg, srb->segment_x[0].address);
  849. } else if (cmd->use_sg) {
  850. int i;
  851. u32 reqlen = cmd->request_bufflen;
  852. struct scatterlist *sl = (struct scatterlist *)
  853. cmd->request_buffer;
  854. struct SGentry *sgp = srb->segment_x;
  855. srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
  856. dir);
  857. dprintkdbg(DBG_0,
  858. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  859. reqlen, cmd->request_buffer, cmd->use_sg,
  860. srb->sg_count);
  861. for (i = 0; i < srb->sg_count; i++) {
  862. u32 busaddr = (u32)sg_dma_address(&sl[i]);
  863. u32 seglen = (u32)sl[i].length;
  864. sgp[i].address = busaddr;
  865. sgp[i].length = seglen;
  866. srb->total_xfer_length += seglen;
  867. }
  868. sgp += srb->sg_count - 1;
  869. /*
  870. * adjust last page if too big as it is allocated
  871. * on even page boundaries
  872. */
  873. if (srb->total_xfer_length > reqlen) {
  874. sgp->length -= (srb->total_xfer_length - reqlen);
  875. srb->total_xfer_length = reqlen;
  876. }
  877. /* Fixup for WIDE padding - make sure length is even */
  878. if (dcb->sync_period & WIDE_SYNC &&
  879. srb->total_xfer_length % 2) {
  880. srb->total_xfer_length++;
  881. sgp->length++;
  882. }
  883. srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
  884. srb->segment_x,
  885. SEGMENTX_LEN,
  886. PCI_DMA_TODEVICE);
  887. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  888. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  889. } else {
  890. srb->total_xfer_length = cmd->request_bufflen;
  891. srb->sg_count = 1;
  892. srb->segment_x[0].address =
  893. pci_map_single(dcb->acb->dev, cmd->request_buffer,
  894. srb->total_xfer_length, dir);
  895. /* Fixup for WIDE padding - make sure length is even */
  896. if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
  897. srb->total_xfer_length++;
  898. srb->segment_x[0].length = srb->total_xfer_length;
  899. dprintkdbg(DBG_0,
  900. "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
  901. srb->total_xfer_length, cmd->request_buffer,
  902. cmd->use_sg, srb->segment_x[0].address);
  903. }
  904. srb->request_length = srb->total_xfer_length;
  905. }
  906. /**
  907. * dc395x_queue_command - queue scsi command passed from the mid
  908. * layer, invoke 'done' on completion
  909. *
  910. * @cmd: pointer to scsi command object
  911. * @done: function pointer to be invoked on completion
  912. *
  913. * Returns 1 if the adapter (host) is busy, else returns 0. One
  914. * reason for an adapter to be busy is that the number
  915. * of outstanding queued commands is already equal to
  916. * struct Scsi_Host::can_queue .
  917. *
  918. * Required: if struct Scsi_Host::can_queue is ever non-zero
  919. * then this function is required.
  920. *
  921. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  922. * and is expected to be held on return.
  923. *
  924. **/
  925. static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  926. {
  927. struct DeviceCtlBlk *dcb;
  928. struct ScsiReqBlk *srb;
  929. struct AdapterCtlBlk *acb =
  930. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  931. dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n",
  932. cmd->pid, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
  933. /* Assume BAD_TARGET; will be cleared later */
  934. cmd->result = DID_BAD_TARGET << 16;
  935. /* ignore invalid targets */
  936. if (cmd->device->id >= acb->scsi_host->max_id ||
  937. cmd->device->lun >= acb->scsi_host->max_lun ||
  938. cmd->device->lun >31) {
  939. goto complete;
  940. }
  941. /* does the specified lun on the specified device exist */
  942. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  943. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  944. cmd->device->id, cmd->device->lun);
  945. goto complete;
  946. }
  947. /* do we have a DCB for the device */
  948. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  949. if (!dcb) {
  950. /* should never happen */
  951. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  952. cmd->device->id, cmd->device->lun);
  953. goto complete;
  954. }
  955. /* set callback and clear result in the command */
  956. cmd->scsi_done = done;
  957. cmd->result = 0;
  958. srb = srb_get_free(acb);
  959. if (!srb)
  960. {
  961. /*
  962. * Return 1 since we are unable to queue this command at this
  963. * point in time.
  964. */
  965. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  966. return 1;
  967. }
  968. build_srb(cmd, dcb, srb);
  969. if (!list_empty(&dcb->srb_waiting_list)) {
  970. /* append to waiting queue */
  971. srb_waiting_append(dcb, srb);
  972. waiting_process_next(acb);
  973. } else {
  974. /* process immediately */
  975. send_srb(acb, srb);
  976. }
  977. dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->pid);
  978. return 0;
  979. complete:
  980. /*
  981. * Complete the command immediatey, and then return 0 to
  982. * indicate that we have handled the command. This is usually
  983. * done when the commad is for things like non existent
  984. * devices.
  985. */
  986. done(cmd);
  987. return 0;
  988. }
  989. /*
  990. * Return the disk geometry for the given SCSI device.
  991. */
  992. static int dc395x_bios_param(struct scsi_device *sdev,
  993. struct block_device *bdev, sector_t capacity, int *info)
  994. {
  995. #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
  996. int heads, sectors, cylinders;
  997. struct AdapterCtlBlk *acb;
  998. int size = capacity;
  999. dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
  1000. acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
  1001. heads = 64;
  1002. sectors = 32;
  1003. cylinders = size / (heads * sectors);
  1004. if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
  1005. heads = 255;
  1006. sectors = 63;
  1007. cylinders = size / (heads * sectors);
  1008. }
  1009. geom[0] = heads;
  1010. geom[1] = sectors;
  1011. geom[2] = cylinders;
  1012. return 0;
  1013. #else
  1014. return scsicam_bios_param(bdev, capacity, info);
  1015. #endif
  1016. }
  1017. static void dump_register_info(struct AdapterCtlBlk *acb,
  1018. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  1019. {
  1020. u16 pstat;
  1021. struct pci_dev *dev = acb->dev;
  1022. pci_read_config_word(dev, PCI_STATUS, &pstat);
  1023. if (!dcb)
  1024. dcb = acb->active_dcb;
  1025. if (!srb && dcb)
  1026. srb = dcb->active_srb;
  1027. if (srb) {
  1028. if (!srb->cmd)
  1029. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  1030. srb, srb->cmd);
  1031. else
  1032. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) "
  1033. "cmnd=0x%02x <%02i-%i>\n",
  1034. srb, srb->cmd, srb->cmd->pid,
  1035. srb->cmd->cmnd[0], srb->cmd->device->id,
  1036. srb->cmd->device->lun);
  1037. printk(" sglist=%p cnt=%i idx=%i len=%i\n",
  1038. srb->segment_x, srb->sg_count, srb->sg_index,
  1039. srb->total_xfer_length);
  1040. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  1041. srb->state, srb->status, srb->scsi_phase,
  1042. (acb->active_dcb) ? "" : "not");
  1043. }
  1044. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  1045. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  1046. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  1047. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  1048. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  1049. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1050. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  1051. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  1052. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  1053. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  1054. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  1055. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1056. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  1057. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  1058. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  1059. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  1060. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  1061. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  1062. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  1063. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  1064. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  1065. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1066. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1067. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  1068. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  1069. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  1070. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  1071. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1072. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  1073. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  1074. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  1075. "pci{status=0x%04x}\n",
  1076. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  1077. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  1078. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  1079. pstat);
  1080. }
  1081. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  1082. {
  1083. #if debug_enabled(DBG_FIFO)
  1084. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1085. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1086. if (!(fifocnt & 0x40))
  1087. dprintkdbg(DBG_FIFO,
  1088. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  1089. fifocnt & 0x3f, lines, txt);
  1090. #endif
  1091. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  1092. }
  1093. static void reset_dev_param(struct AdapterCtlBlk *acb)
  1094. {
  1095. struct DeviceCtlBlk *dcb;
  1096. struct NvRamType *eeprom = &acb->eeprom;
  1097. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  1098. list_for_each_entry(dcb, &acb->dcb_list, list) {
  1099. u8 period_index;
  1100. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  1101. dcb->sync_period = 0;
  1102. dcb->sync_offset = 0;
  1103. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  1104. period_index = eeprom->target[dcb->target_id].period & 0x07;
  1105. dcb->min_nego_period = clock_period[period_index];
  1106. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  1107. || !(acb->config & HCC_WIDE_CARD))
  1108. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  1109. }
  1110. }
  1111. /*
  1112. * perform a hard reset on the SCSI bus
  1113. * @cmd - some command for this host (for fetching hooks)
  1114. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1115. */
  1116. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1117. {
  1118. struct AdapterCtlBlk *acb =
  1119. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1120. dprintkl(KERN_INFO,
  1121. "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1122. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1123. if (timer_pending(&acb->waiting_timer))
  1124. del_timer(&acb->waiting_timer);
  1125. /*
  1126. * disable interrupt
  1127. */
  1128. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1129. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1130. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1131. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1132. reset_scsi_bus(acb);
  1133. udelay(500);
  1134. /* We may be in serious trouble. Wait some seconds */
  1135. acb->scsi_host->last_reset =
  1136. jiffies + 3 * HZ / 2 +
  1137. HZ * acb->eeprom.delay_time;
  1138. /*
  1139. * re-enable interrupt
  1140. */
  1141. /* Clear SCSI FIFO */
  1142. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1143. clear_fifo(acb, "eh_bus_reset");
  1144. /* Delete pending IRQ */
  1145. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1146. set_basic_config(acb);
  1147. reset_dev_param(acb);
  1148. doing_srb_done(acb, DID_RESET, cmd, 0);
  1149. acb->active_dcb = NULL;
  1150. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1151. waiting_process_next(acb);
  1152. return SUCCESS;
  1153. }
  1154. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1155. {
  1156. int rc;
  1157. spin_lock_irq(cmd->device->host->host_lock);
  1158. rc = __dc395x_eh_bus_reset(cmd);
  1159. spin_unlock_irq(cmd->device->host->host_lock);
  1160. return rc;
  1161. }
  1162. /*
  1163. * abort an errant SCSI command
  1164. * @cmd - command to be aborted
  1165. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1166. */
  1167. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1168. {
  1169. /*
  1170. * Look into our command queues: If it has not been sent already,
  1171. * we remove it and return success. Otherwise fail.
  1172. */
  1173. struct AdapterCtlBlk *acb =
  1174. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1175. struct DeviceCtlBlk *dcb;
  1176. struct ScsiReqBlk *srb;
  1177. dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1178. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1179. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1180. if (!dcb) {
  1181. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1182. return FAILED;
  1183. }
  1184. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1185. if (srb) {
  1186. srb_waiting_remove(dcb, srb);
  1187. pci_unmap_srb_sense(acb, srb);
  1188. pci_unmap_srb(acb, srb);
  1189. free_tag(dcb, srb);
  1190. srb_free_insert(acb, srb);
  1191. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1192. cmd->result = DID_ABORT << 16;
  1193. return SUCCESS;
  1194. }
  1195. srb = find_cmd(cmd, &dcb->srb_going_list);
  1196. if (srb) {
  1197. dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
  1198. /* XXX: Should abort the command here */
  1199. } else {
  1200. dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
  1201. }
  1202. return FAILED;
  1203. }
  1204. /* SDTR */
  1205. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1206. struct ScsiReqBlk *srb)
  1207. {
  1208. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1209. if (srb->msg_count > 1) {
  1210. dprintkl(KERN_INFO,
  1211. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1212. srb->msg_count, srb->msgout_buf[0],
  1213. srb->msgout_buf[1]);
  1214. return;
  1215. }
  1216. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1217. dcb->sync_offset = 0;
  1218. dcb->min_nego_period = 200 >> 2;
  1219. } else if (dcb->sync_offset == 0)
  1220. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1221. *ptr++ = MSG_EXTENDED; /* (01h) */
  1222. *ptr++ = 3; /* length */
  1223. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1224. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1225. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1226. srb->msg_count += 5;
  1227. srb->state |= SRB_DO_SYNC_NEGO;
  1228. }
  1229. /* WDTR */
  1230. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1231. struct ScsiReqBlk *srb)
  1232. {
  1233. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1234. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1235. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1236. if (srb->msg_count > 1) {
  1237. dprintkl(KERN_INFO,
  1238. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1239. srb->msg_count, srb->msgout_buf[0],
  1240. srb->msgout_buf[1]);
  1241. return;
  1242. }
  1243. *ptr++ = MSG_EXTENDED; /* (01h) */
  1244. *ptr++ = 2; /* length */
  1245. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1246. *ptr++ = wide;
  1247. srb->msg_count += 4;
  1248. srb->state |= SRB_DO_WIDE_NEGO;
  1249. }
  1250. #if 0
  1251. /* Timer to work around chip flaw: When selecting and the bus is
  1252. * busy, we sometimes miss a Selection timeout IRQ */
  1253. void selection_timeout_missed(unsigned long ptr);
  1254. /* Sets the timer to wake us up */
  1255. static void selto_timer(struct AdapterCtlBlk *acb)
  1256. {
  1257. if (timer_pending(&acb->selto_timer))
  1258. return;
  1259. acb->selto_timer.function = selection_timeout_missed;
  1260. acb->selto_timer.data = (unsigned long) acb;
  1261. if (time_before
  1262. (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
  1263. acb->selto_timer.expires =
  1264. acb->scsi_host->last_reset + HZ / 2 + 1;
  1265. else
  1266. acb->selto_timer.expires = jiffies + HZ + 1;
  1267. add_timer(&acb->selto_timer);
  1268. }
  1269. void selection_timeout_missed(unsigned long ptr)
  1270. {
  1271. unsigned long flags;
  1272. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1273. struct ScsiReqBlk *srb;
  1274. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1275. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1276. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1277. return;
  1278. }
  1279. DC395x_LOCK_IO(acb->scsi_host, flags);
  1280. srb = acb->active_dcb->active_srb;
  1281. disconnect(acb);
  1282. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1283. }
  1284. #endif
  1285. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1286. struct ScsiReqBlk* srb)
  1287. {
  1288. u16 s_stat2, return_code;
  1289. u8 s_stat, scsicommand, i, identify_message;
  1290. u8 *ptr;
  1291. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n",
  1292. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  1293. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1294. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1295. s_stat2 = 0;
  1296. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1297. #if 1
  1298. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1299. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n",
  1300. srb->cmd->pid, s_stat, s_stat2);
  1301. /*
  1302. * Try anyway?
  1303. *
  1304. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1305. * Timeout, a Disconnect or a Reselction IRQ, so we would be screwed!
  1306. * (This is likely to be a bug in the hardware. Obviously, most people
  1307. * only have one initiator per SCSI bus.)
  1308. * Instead let this fail and have the timer make sure the command is
  1309. * tried again after a short time
  1310. */
  1311. /*selto_timer (acb); */
  1312. return 1;
  1313. }
  1314. #endif
  1315. if (acb->active_dcb) {
  1316. dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a"
  1317. "command while another command (pid#%li) is active.",
  1318. srb->cmd->pid,
  1319. acb->active_dcb->active_srb ?
  1320. acb->active_dcb->active_srb->cmd->pid : 0);
  1321. return 1;
  1322. }
  1323. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1324. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n",
  1325. srb->cmd->pid);
  1326. return 1;
  1327. }
  1328. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1329. * to queue them again after a reset */
  1330. if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
  1331. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1332. return 1;
  1333. }
  1334. /* Flush FIFO */
  1335. clear_fifo(acb, "start_scsi");
  1336. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1337. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1338. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1339. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1340. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1341. identify_message = dcb->identify_msg;
  1342. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1343. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1344. if (srb->flag & AUTO_REQSENSE)
  1345. identify_message &= 0xBF;
  1346. if (((srb->cmd->cmnd[0] == INQUIRY)
  1347. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1348. || (srb->flag & AUTO_REQSENSE))
  1349. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1350. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1351. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1352. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1353. && (dcb->target_lun == 0)) {
  1354. srb->msgout_buf[0] = identify_message;
  1355. srb->msg_count = 1;
  1356. scsicommand = SCMD_SEL_ATNSTOP;
  1357. srb->state = SRB_MSGOUT;
  1358. #ifndef SYNC_FIRST
  1359. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1360. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1361. build_wdtr(acb, dcb, srb);
  1362. goto no_cmd;
  1363. }
  1364. #endif
  1365. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1366. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1367. build_sdtr(acb, dcb, srb);
  1368. goto no_cmd;
  1369. }
  1370. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1371. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1372. build_wdtr(acb, dcb, srb);
  1373. goto no_cmd;
  1374. }
  1375. srb->msg_count = 0;
  1376. }
  1377. /* Send identify message */
  1378. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1379. scsicommand = SCMD_SEL_ATN;
  1380. srb->state = SRB_START_;
  1381. #ifndef DC395x_NO_TAGQ
  1382. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1383. && (identify_message & 0xC0)) {
  1384. /* Send Tag message */
  1385. u32 tag_mask = 1;
  1386. u8 tag_number = 0;
  1387. while (tag_mask & dcb->tag_mask
  1388. && tag_number <= dcb->max_command) {
  1389. tag_mask = tag_mask << 1;
  1390. tag_number++;
  1391. }
  1392. if (tag_number >= dcb->max_command) {
  1393. dprintkl(KERN_WARNING, "start_scsi: (pid#%li) "
  1394. "Out of tags target=<%02i-%i>)\n",
  1395. srb->cmd->pid, srb->cmd->device->id,
  1396. srb->cmd->device->lun);
  1397. srb->state = SRB_READY;
  1398. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1399. DO_HWRESELECT);
  1400. return 1;
  1401. }
  1402. /* Send Tag id */
  1403. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1404. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1405. dcb->tag_mask |= tag_mask;
  1406. srb->tag_number = tag_number;
  1407. scsicommand = SCMD_SEL_ATN3;
  1408. srb->state = SRB_START_;
  1409. }
  1410. #endif
  1411. /*polling:*/
  1412. /* Send CDB ..command block ......... */
  1413. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1414. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  1415. srb->cmd->cmnd[0], srb->tag_number);
  1416. if (srb->flag & AUTO_REQSENSE) {
  1417. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1418. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1419. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1420. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1421. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1422. sizeof(srb->cmd->sense_buffer));
  1423. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1424. } else {
  1425. ptr = (u8 *)srb->cmd->cmnd;
  1426. for (i = 0; i < srb->cmd->cmd_len; i++)
  1427. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1428. }
  1429. no_cmd:
  1430. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1431. DO_HWRESELECT | DO_DATALATCH);
  1432. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1433. /*
  1434. * If start_scsi return 1:
  1435. * we caught an interrupt (must be reset or reselection ... )
  1436. * : Let's process it first!
  1437. */
  1438. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n",
  1439. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  1440. srb->state = SRB_READY;
  1441. free_tag(dcb, srb);
  1442. srb->msg_count = 0;
  1443. return_code = 1;
  1444. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1445. } else {
  1446. /*
  1447. * If start_scsi returns 0:
  1448. * we know that the SCSI processor is free
  1449. */
  1450. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1451. dcb->active_srb = srb;
  1452. acb->active_dcb = dcb;
  1453. return_code = 0;
  1454. /* it's important for atn stop */
  1455. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1456. DO_DATALATCH | DO_HWRESELECT);
  1457. /* SCSI command */
  1458. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1459. }
  1460. return return_code;
  1461. }
  1462. #define DC395x_ENABLE_MSGOUT \
  1463. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1464. srb->state |= SRB_MSGOUT
  1465. /* abort command */
  1466. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1467. struct ScsiReqBlk *srb)
  1468. {
  1469. srb->msgout_buf[0] = ABORT;
  1470. srb->msg_count = 1;
  1471. DC395x_ENABLE_MSGOUT;
  1472. srb->state &= ~SRB_MSGIN;
  1473. srb->state |= SRB_MSGOUT;
  1474. }
  1475. /**
  1476. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1477. * have been triggered for this card.
  1478. *
  1479. * @acb: a pointer to the adpter control block
  1480. * @scsi_status: the status return when we checked the card
  1481. **/
  1482. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1483. u16 scsi_status)
  1484. {
  1485. struct DeviceCtlBlk *dcb;
  1486. struct ScsiReqBlk *srb;
  1487. u16 phase;
  1488. u8 scsi_intstatus;
  1489. unsigned long flags;
  1490. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1491. u16 *);
  1492. DC395x_LOCK_IO(acb->scsi_host, flags);
  1493. /* This acknowledges the IRQ */
  1494. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1495. if ((scsi_status & 0x2007) == 0x2002)
  1496. dprintkl(KERN_DEBUG,
  1497. "COP after COP completed? %04x\n", scsi_status);
  1498. if (debug_enabled(DBG_KG)) {
  1499. if (scsi_intstatus & INT_SELTIMEOUT)
  1500. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1501. }
  1502. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1503. if (timer_pending(&acb->selto_timer))
  1504. del_timer(&acb->selto_timer);
  1505. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1506. disconnect(acb); /* bus free interrupt */
  1507. goto out_unlock;
  1508. }
  1509. if (scsi_intstatus & INT_RESELECTED) {
  1510. reselect(acb);
  1511. goto out_unlock;
  1512. }
  1513. if (scsi_intstatus & INT_SELECT) {
  1514. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1515. goto out_unlock;
  1516. }
  1517. if (scsi_intstatus & INT_SCSIRESET) {
  1518. scsi_reset_detect(acb);
  1519. goto out_unlock;
  1520. }
  1521. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1522. dcb = acb->active_dcb;
  1523. if (!dcb) {
  1524. dprintkl(KERN_DEBUG,
  1525. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1526. scsi_status, scsi_intstatus);
  1527. goto out_unlock;
  1528. }
  1529. srb = dcb->active_srb;
  1530. if (dcb->flag & ABORT_DEV_) {
  1531. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1532. enable_msgout_abort(acb, srb);
  1533. }
  1534. /* software sequential machine */
  1535. phase = (u16)srb->scsi_phase;
  1536. /*
  1537. * 62037 or 62137
  1538. * call dc395x_scsi_phase0[]... "phase entry"
  1539. * handle every phase before start transfer
  1540. */
  1541. /* data_out_phase0, phase:0 */
  1542. /* data_in_phase0, phase:1 */
  1543. /* command_phase0, phase:2 */
  1544. /* status_phase0, phase:3 */
  1545. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1546. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1547. /* msgout_phase0, phase:6 */
  1548. /* msgin_phase0, phase:7 */
  1549. dc395x_statev = dc395x_scsi_phase0[phase];
  1550. dc395x_statev(acb, srb, &scsi_status);
  1551. /*
  1552. * if there were any exception occured scsi_status
  1553. * will be modify to bus free phase new scsi_status
  1554. * transfer out from ... previous dc395x_statev
  1555. */
  1556. srb->scsi_phase = scsi_status & PHASEMASK;
  1557. phase = (u16)scsi_status & PHASEMASK;
  1558. /*
  1559. * call dc395x_scsi_phase1[]... "phase entry" handle
  1560. * every phase to do transfer
  1561. */
  1562. /* data_out_phase1, phase:0 */
  1563. /* data_in_phase1, phase:1 */
  1564. /* command_phase1, phase:2 */
  1565. /* status_phase1, phase:3 */
  1566. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1567. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1568. /* msgout_phase1, phase:6 */
  1569. /* msgin_phase1, phase:7 */
  1570. dc395x_statev = dc395x_scsi_phase1[phase];
  1571. dc395x_statev(acb, srb, &scsi_status);
  1572. }
  1573. out_unlock:
  1574. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1575. }
  1576. static irqreturn_t dc395x_interrupt(int irq, void *dev_id,
  1577. struct pt_regs *regs)
  1578. {
  1579. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)dev_id;
  1580. u16 scsi_status;
  1581. u8 dma_status;
  1582. irqreturn_t handled = IRQ_NONE;
  1583. /*
  1584. * Check for pending interupt
  1585. */
  1586. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1587. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1588. if (scsi_status & SCSIINTERRUPT) {
  1589. /* interupt pending - let's process it! */
  1590. dc395x_handle_interrupt(acb, scsi_status);
  1591. handled = IRQ_HANDLED;
  1592. }
  1593. else if (dma_status & 0x20) {
  1594. /* Error from the DMA engine */
  1595. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1596. #if 0
  1597. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1598. if (acb->active_dcb) {
  1599. acb->active_dcb-> flag |= ABORT_DEV_;
  1600. if (acb->active_dcb->active_srb)
  1601. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1602. }
  1603. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1604. #else
  1605. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1606. acb = NULL;
  1607. #endif
  1608. handled = IRQ_HANDLED;
  1609. }
  1610. return handled;
  1611. }
  1612. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1613. u16 *pscsi_status)
  1614. {
  1615. dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->pid);
  1616. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1617. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1618. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1619. srb->state &= ~SRB_MSGOUT;
  1620. }
  1621. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1622. u16 *pscsi_status)
  1623. {
  1624. u16 i;
  1625. u8 *ptr;
  1626. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->pid);
  1627. clear_fifo(acb, "msgout_phase1");
  1628. if (!(srb->state & SRB_MSGOUT)) {
  1629. srb->state |= SRB_MSGOUT;
  1630. dprintkl(KERN_DEBUG,
  1631. "msgout_phase1: (pid#%li) Phase unexpected\n",
  1632. srb->cmd->pid); /* So what ? */
  1633. }
  1634. if (!srb->msg_count) {
  1635. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n",
  1636. srb->cmd->pid);
  1637. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1638. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1639. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1640. return;
  1641. }
  1642. ptr = (u8 *)srb->msgout_buf;
  1643. for (i = 0; i < srb->msg_count; i++)
  1644. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1645. srb->msg_count = 0;
  1646. if (srb->msgout_buf[0] == MSG_ABORT)
  1647. srb->state = SRB_ABORT_SENT;
  1648. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1649. }
  1650. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1651. u16 *pscsi_status)
  1652. {
  1653. dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->pid);
  1654. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1655. }
  1656. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1657. u16 *pscsi_status)
  1658. {
  1659. struct DeviceCtlBlk *dcb;
  1660. u8 *ptr;
  1661. u16 i;
  1662. dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->pid);
  1663. clear_fifo(acb, "command_phase1");
  1664. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1665. if (!(srb->flag & AUTO_REQSENSE)) {
  1666. ptr = (u8 *)srb->cmd->cmnd;
  1667. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1668. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1669. ptr++;
  1670. }
  1671. } else {
  1672. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1673. dcb = acb->active_dcb;
  1674. /* target id */
  1675. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1676. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1677. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1678. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1679. sizeof(srb->cmd->sense_buffer));
  1680. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1681. }
  1682. srb->state |= SRB_COMMAND;
  1683. /* it's important for atn stop */
  1684. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1685. /* SCSI command */
  1686. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1687. }
  1688. /*
  1689. * Verify that the remaining space in the hw sg lists is the same as
  1690. * the count of remaining bytes in srb->total_xfer_length
  1691. */
  1692. static void sg_verify_length(struct ScsiReqBlk *srb)
  1693. {
  1694. if (debug_enabled(DBG_SG)) {
  1695. unsigned len = 0;
  1696. unsigned idx = srb->sg_index;
  1697. struct SGentry *psge = srb->segment_x + idx;
  1698. for (; idx < srb->sg_count; psge++, idx++)
  1699. len += psge->length;
  1700. if (len != srb->total_xfer_length)
  1701. dprintkdbg(DBG_SG,
  1702. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1703. srb->total_xfer_length, len);
  1704. }
  1705. }
  1706. /*
  1707. * Compute the next Scatter Gather list index and adjust its length
  1708. * and address if necessary
  1709. */
  1710. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1711. {
  1712. u8 idx;
  1713. u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
  1714. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1715. dprintkdbg(DBG_0,
  1716. "sg_update_list: Transfered %i of %i bytes, %i remain\n",
  1717. xferred, srb->total_xfer_length, left);
  1718. if (xferred == 0) {
  1719. /* nothing to update since we did not transfer any data */
  1720. return;
  1721. }
  1722. sg_verify_length(srb);
  1723. srb->total_xfer_length = left; /* update remaining count */
  1724. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1725. if (xferred >= psge->length) {
  1726. /* Complete SG entries done */
  1727. xferred -= psge->length;
  1728. } else {
  1729. /* Partial SG entry done */
  1730. psge->length -= xferred;
  1731. psge->address += xferred;
  1732. srb->sg_index = idx;
  1733. pci_dma_sync_single_for_device(srb->dcb->
  1734. acb->dev,
  1735. srb->sg_bus_addr,
  1736. SEGMENTX_LEN,
  1737. PCI_DMA_TODEVICE);
  1738. break;
  1739. }
  1740. psge++;
  1741. }
  1742. sg_verify_length(srb);
  1743. }
  1744. /*
  1745. * We have transfered a single byte (PIO mode?) and need to update
  1746. * the count of bytes remaining (total_xfer_length) and update the sg
  1747. * entry to either point to next byte in the current sg entry, or of
  1748. * already at the end to point to the start of the next sg entry
  1749. */
  1750. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1751. {
  1752. sg_update_list(srb, srb->total_xfer_length - 1);
  1753. }
  1754. /*
  1755. * cleanup_after_transfer
  1756. *
  1757. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1758. * KG: Currently called from StatusPhase1 ()
  1759. * Should probably also be called from other places
  1760. * Best might be to call it in DataXXPhase0, if new phase will differ
  1761. */
  1762. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1763. struct ScsiReqBlk *srb)
  1764. {
  1765. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1766. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1767. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1768. clear_fifo(acb, "cleanup/in");
  1769. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1770. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1771. } else { /* write */
  1772. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1773. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1774. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1775. clear_fifo(acb, "cleanup/out");
  1776. }
  1777. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1778. }
  1779. /*
  1780. * Those no of bytes will be transfered w/ PIO through the SCSI FIFO
  1781. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1782. */
  1783. #define DC395x_LASTPIO 4
  1784. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1785. u16 *pscsi_status)
  1786. {
  1787. struct DeviceCtlBlk *dcb = srb->dcb;
  1788. u16 scsi_status = *pscsi_status;
  1789. u32 d_left_counter = 0;
  1790. dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n",
  1791. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1792. /*
  1793. * KG: We need to drain the buffers before we draw any conclusions!
  1794. * This means telling the DMA to push the rest into SCSI, telling
  1795. * SCSI to push the rest to the bus.
  1796. * However, the device might have been the one to stop us (phase
  1797. * change), and the data in transit just needs to be accounted so
  1798. * it can be retransmitted.)
  1799. */
  1800. /*
  1801. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1802. * If we need more data, the DMA SG list will be freshly set up, anyway
  1803. */
  1804. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1805. "DMA{fifocnt=0x%02x fifostat=0x%02x} "
  1806. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1807. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1808. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1809. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1810. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1811. srb->total_xfer_length);
  1812. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1813. if (!(srb->state & SRB_XFERPAD)) {
  1814. if (scsi_status & PARITYERROR)
  1815. srb->status |= PARITY_ERROR;
  1816. /*
  1817. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1818. * is the no of bytes it got from the DMA engine not the no it
  1819. * transferred successfully to the device. (And the difference could
  1820. * be as much as the FIFO size, I guess ...)
  1821. */
  1822. if (!(scsi_status & SCSIXFERDONE)) {
  1823. /*
  1824. * when data transfer from DMA FIFO to SCSI FIFO
  1825. * if there was some data left in SCSI FIFO
  1826. */
  1827. d_left_counter =
  1828. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1829. 0x1F);
  1830. if (dcb->sync_period & WIDE_SYNC)
  1831. d_left_counter <<= 1;
  1832. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1833. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1834. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1835. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1836. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1837. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1838. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1839. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1840. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1841. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1842. }
  1843. /*
  1844. * calculate all the residue data that not yet tranfered
  1845. * SCSI transfer counter + left in SCSI FIFO data
  1846. *
  1847. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1848. * The counter always decrement by one for every SCSI byte transfer.
  1849. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1850. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1851. */
  1852. if (srb->total_xfer_length > DC395x_LASTPIO)
  1853. d_left_counter +=
  1854. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1855. /* Is this a good idea? */
  1856. /*clear_fifo(acb, "DOP1"); */
  1857. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1858. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1859. && srb->cmd->request_bufflen % 2) {
  1860. d_left_counter = 0;
  1861. dprintkl(KERN_INFO,
  1862. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1863. scsi_status);
  1864. }
  1865. /*
  1866. * KG: Oops again. Same thinko as above: The SCSI might have been
  1867. * faster than the DMA engine, so that it ran out of data.
  1868. * In that case, we have to do just nothing!
  1869. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1870. */
  1871. /*
  1872. * KG: This is nonsense: We have been WRITING data to the bus
  1873. * If the SCSI engine has no bytes left, how should the DMA engine?
  1874. */
  1875. if (d_left_counter == 0) {
  1876. srb->total_xfer_length = 0;
  1877. } else {
  1878. /*
  1879. * if transfer not yet complete
  1880. * there were some data residue in SCSI FIFO or
  1881. * SCSI transfer counter not empty
  1882. */
  1883. long oldxferred =
  1884. srb->total_xfer_length - d_left_counter;
  1885. const int diff =
  1886. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1887. sg_update_list(srb, d_left_counter);
  1888. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1889. if ((srb->segment_x[srb->sg_index].length ==
  1890. diff && srb->cmd->use_sg)
  1891. || ((oldxferred & ~PAGE_MASK) ==
  1892. (PAGE_SIZE - diff))
  1893. ) {
  1894. dprintkl(KERN_INFO, "data_out_phase0: "
  1895. "Work around chip bug (%i)?\n", diff);
  1896. d_left_counter =
  1897. srb->total_xfer_length - diff;
  1898. sg_update_list(srb, d_left_counter);
  1899. /*srb->total_xfer_length -= diff; */
  1900. /*srb->virt_addr += diff; */
  1901. /*if (srb->cmd->use_sg) */
  1902. /* srb->sg_index++; */
  1903. }
  1904. }
  1905. }
  1906. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1907. cleanup_after_transfer(acb, srb);
  1908. }
  1909. }
  1910. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1911. u16 *pscsi_status)
  1912. {
  1913. dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n",
  1914. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1915. clear_fifo(acb, "data_out_phase1");
  1916. /* do prepare before transfer when data out phase */
  1917. data_io_transfer(acb, srb, XFERDATAOUT);
  1918. }
  1919. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1920. u16 *pscsi_status)
  1921. {
  1922. u16 scsi_status = *pscsi_status;
  1923. dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n",
  1924. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1925. /*
  1926. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1927. * and switches to another phase, the SCSI engine should be finished too.
  1928. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1929. * engine and transferred to memory.
  1930. * We should wait for the FIFOs to be emptied by that (is there any way to
  1931. * enforce this?) and then stop the DMA engine, because it might think, that
  1932. * there are more bytes to follow. Yes, the device might disconnect prior to
  1933. * having all bytes transferred!
  1934. * Also we should make sure that all data from the DMA engine buffer's really
  1935. * made its way to the system memory! Some documentation on this would not
  1936. * seem to be a bad idea, actually.
  1937. */
  1938. if (!(srb->state & SRB_XFERPAD)) {
  1939. u32 d_left_counter;
  1940. unsigned int sc, fc;
  1941. if (scsi_status & PARITYERROR) {
  1942. dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) "
  1943. "Parity Error\n", srb->cmd->pid);
  1944. srb->status |= PARITY_ERROR;
  1945. }
  1946. /*
  1947. * KG: We should wait for the DMA FIFO to be empty ...
  1948. * but: it would be better to wait first for the SCSI FIFO and then the
  1949. * the DMA FIFO to become empty? How do we know, that the device not already
  1950. * sent data to the FIFO in a MsgIn phase, eg.?
  1951. */
  1952. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1953. #if 0
  1954. int ctr = 6000000;
  1955. dprintkl(KERN_DEBUG,
  1956. "DIP0: Wait for DMA FIFO to flush ...\n");
  1957. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  1958. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  1959. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  1960. while (!
  1961. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  1962. 0x80) && --ctr);
  1963. if (ctr < 6000000 - 1)
  1964. dprintkl(KERN_DEBUG
  1965. "DIP0: Had to wait for DMA ...\n");
  1966. if (!ctr)
  1967. dprintkl(KERN_ERR,
  1968. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  1969. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  1970. #endif
  1971. dprintkdbg(DBG_KG, "data_in_phase0: "
  1972. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  1973. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1974. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  1975. }
  1976. /* Now: Check remainig data: The SCSI counters should tell us ... */
  1977. sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1978. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1979. d_left_counter = sc + ((fc & 0x1f)
  1980. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  1981. 0));
  1982. dprintkdbg(DBG_KG, "data_in_phase0: "
  1983. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  1984. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  1985. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  1986. fc,
  1987. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1988. sc,
  1989. fc,
  1990. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1991. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1992. srb->total_xfer_length, d_left_counter);
  1993. #if DC395x_LASTPIO
  1994. /* KG: Less than or equal to 4 bytes can not be transfered via DMA, it seems. */
  1995. if (d_left_counter
  1996. && srb->total_xfer_length <= DC395x_LASTPIO) {
  1997. size_t left_io = srb->total_xfer_length;
  1998. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  1999. /*sg_update_list (srb, d_left_counter); */
  2000. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
  2001. "for remaining %i bytes:",
  2002. fc & 0x1f,
  2003. (srb->dcb->sync_period & WIDE_SYNC) ?
  2004. "words" : "bytes",
  2005. srb->total_xfer_length);
  2006. if (srb->dcb->sync_period & WIDE_SYNC)
  2007. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2008. CFG2_WIDEFIFO);
  2009. while (left_io) {
  2010. unsigned char *virt, *base = NULL;
  2011. unsigned long flags = 0;
  2012. size_t len = left_io;
  2013. if (srb->cmd->use_sg) {
  2014. size_t offset = srb->request_length - left_io;
  2015. local_irq_save(flags);
  2016. /* Assumption: it's inside one page as it's at most 4 bytes and
  2017. I just assume it's on a 4-byte boundary */
  2018. base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
  2019. srb->sg_count, &offset, &len);
  2020. virt = base + offset;
  2021. } else {
  2022. virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
  2023. len = left_io;
  2024. }
  2025. left_io -= len;
  2026. while (len) {
  2027. u8 byte;
  2028. byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2029. *virt++ = byte;
  2030. if (debug_enabled(DBG_PIO))
  2031. printk(" %02x", byte);
  2032. d_left_counter--;
  2033. sg_subtract_one(srb);
  2034. len--;
  2035. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  2036. if (fc == 0x40) {
  2037. left_io = 0;
  2038. break;
  2039. }
  2040. }
  2041. WARN_ON((fc != 0x40) == !d_left_counter);
  2042. if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
  2043. /* Read the last byte ... */
  2044. if (srb->total_xfer_length > 0) {
  2045. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2046. *virt++ = byte;
  2047. srb->total_xfer_length--;
  2048. if (debug_enabled(DBG_PIO))
  2049. printk(" %02x", byte);
  2050. }
  2051. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2052. }
  2053. if (srb->cmd->use_sg) {
  2054. scsi_kunmap_atomic_sg(base);
  2055. local_irq_restore(flags);
  2056. }
  2057. }
  2058. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  2059. /*srb->total_xfer_length = 0; */
  2060. if (debug_enabled(DBG_PIO))
  2061. printk("\n");
  2062. }
  2063. #endif /* DC395x_LASTPIO */
  2064. #if 0
  2065. /*
  2066. * KG: This was in DATAOUT. Does it also belong here?
  2067. * Nobody seems to know what counter and fifo_cnt count exactly ...
  2068. */
  2069. if (!(scsi_status & SCSIXFERDONE)) {
  2070. /*
  2071. * when data transfer from DMA FIFO to SCSI FIFO
  2072. * if there was some data left in SCSI FIFO
  2073. */
  2074. d_left_counter =
  2075. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  2076. 0x1F);
  2077. if (srb->dcb->sync_period & WIDE_SYNC)
  2078. d_left_counter <<= 1;
  2079. /*
  2080. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  2081. * so need to *= 2
  2082. * KG: Seems to be correct ...
  2083. */
  2084. }
  2085. #endif
  2086. /* KG: This should not be needed any more! */
  2087. if (d_left_counter == 0
  2088. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  2089. #if 0
  2090. int ctr = 6000000;
  2091. u8 TempDMAstatus;
  2092. do {
  2093. TempDMAstatus =
  2094. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2095. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  2096. if (!ctr)
  2097. dprintkl(KERN_ERR,
  2098. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  2099. srb->total_xfer_length = 0;
  2100. #endif
  2101. srb->total_xfer_length = d_left_counter;
  2102. } else { /* phase changed */
  2103. /*
  2104. * parsing the case:
  2105. * when a transfer not yet complete
  2106. * but be disconnected by target
  2107. * if transfer not yet complete
  2108. * there were some data residue in SCSI FIFO or
  2109. * SCSI transfer counter not empty
  2110. */
  2111. sg_update_list(srb, d_left_counter);
  2112. }
  2113. }
  2114. /* KG: The target may decide to disconnect: Empty FIFO before! */
  2115. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  2116. cleanup_after_transfer(acb, srb);
  2117. }
  2118. }
  2119. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2120. u16 *pscsi_status)
  2121. {
  2122. dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n",
  2123. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2124. data_io_transfer(acb, srb, XFERDATAIN);
  2125. }
  2126. static void data_io_transfer(struct AdapterCtlBlk *acb,
  2127. struct ScsiReqBlk *srb, u16 io_dir)
  2128. {
  2129. struct DeviceCtlBlk *dcb = srb->dcb;
  2130. u8 bval;
  2131. dprintkdbg(DBG_0,
  2132. "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2133. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  2134. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2135. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2136. if (srb == acb->tmp_srb)
  2137. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2138. if (srb->sg_index >= srb->sg_count) {
  2139. /* can't happen? out of bounds error */
  2140. return;
  2141. }
  2142. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2143. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2144. /*
  2145. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2146. * Maybe, even ABORTXFER would be appropriate
  2147. */
  2148. if (dma_status & XFERPENDING) {
  2149. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2150. "Expect trouble!\n");
  2151. dump_register_info(acb, dcb, srb);
  2152. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2153. }
  2154. /* clear_fifo(acb, "IO"); */
  2155. /*
  2156. * load what physical address of Scatter/Gather list table
  2157. * want to be transfer
  2158. */
  2159. srb->state |= SRB_DATA_XFER;
  2160. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2161. if (srb->cmd->use_sg) { /* with S/G */
  2162. io_dir |= DMACMD_SG;
  2163. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2164. srb->sg_bus_addr +
  2165. sizeof(struct SGentry) *
  2166. srb->sg_index);
  2167. /* load how many bytes in the sg list table */
  2168. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2169. ((u32)(srb->sg_count -
  2170. srb->sg_index) << 3));
  2171. } else { /* without S/G */
  2172. io_dir &= ~DMACMD_SG;
  2173. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2174. srb->segment_x[0].address);
  2175. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2176. srb->segment_x[0].length);
  2177. }
  2178. /* load total transfer length (24bits) max value 16Mbyte */
  2179. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2180. srb->total_xfer_length);
  2181. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2182. if (io_dir & DMACMD_DIR) { /* read */
  2183. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2184. SCMD_DMA_IN);
  2185. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2186. } else {
  2187. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2188. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2189. SCMD_DMA_OUT);
  2190. }
  2191. }
  2192. #if DC395x_LASTPIO
  2193. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2194. /*
  2195. * load what physical address of Scatter/Gather list table
  2196. * want to be transfer
  2197. */
  2198. srb->state |= SRB_DATA_XFER;
  2199. /* load total transfer length (24bits) max value 16Mbyte */
  2200. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2201. srb->total_xfer_length);
  2202. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2203. if (io_dir & DMACMD_DIR) { /* read */
  2204. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2205. SCMD_FIFO_IN);
  2206. } else { /* write */
  2207. int ln = srb->total_xfer_length;
  2208. size_t left_io = srb->total_xfer_length;
  2209. if (srb->dcb->sync_period & WIDE_SYNC)
  2210. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2211. CFG2_WIDEFIFO);
  2212. while (left_io) {
  2213. unsigned char *virt, *base = NULL;
  2214. unsigned long flags = 0;
  2215. size_t len = left_io;
  2216. if (srb->cmd->use_sg) {
  2217. size_t offset = srb->request_length - left_io;
  2218. local_irq_save(flags);
  2219. /* Again, max 4 bytes */
  2220. base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
  2221. srb->sg_count, &offset, &len);
  2222. virt = base + offset;
  2223. } else {
  2224. virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
  2225. len = left_io;
  2226. }
  2227. left_io -= len;
  2228. while (len--) {
  2229. if (debug_enabled(DBG_PIO))
  2230. printk(" %02x", *virt);
  2231. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
  2232. sg_subtract_one(srb);
  2233. }
  2234. if (srb->cmd->use_sg) {
  2235. scsi_kunmap_atomic_sg(base);
  2236. local_irq_restore(flags);
  2237. }
  2238. }
  2239. if (srb->dcb->sync_period & WIDE_SYNC) {
  2240. if (ln % 2) {
  2241. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2242. if (debug_enabled(DBG_PIO))
  2243. printk(" |00");
  2244. }
  2245. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2246. }
  2247. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2248. if (debug_enabled(DBG_PIO))
  2249. printk("\n");
  2250. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2251. SCMD_FIFO_OUT);
  2252. }
  2253. }
  2254. #endif /* DC395x_LASTPIO */
  2255. else { /* xfer pad */
  2256. u8 data = 0, data2 = 0;
  2257. if (srb->sg_count) {
  2258. srb->adapter_status = H_OVER_UNDER_RUN;
  2259. srb->status |= OVER_RUN;
  2260. }
  2261. /*
  2262. * KG: despite the fact that we are using 16 bits I/O ops
  2263. * the SCSI FIFO is only 8 bits according to the docs
  2264. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2265. */
  2266. if (dcb->sync_period & WIDE_SYNC) {
  2267. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2268. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2269. CFG2_WIDEFIFO);
  2270. if (io_dir & DMACMD_DIR) {
  2271. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2272. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2273. } else {
  2274. /* Danger, Robinson: If you find KGs
  2275. * scattered over the wide disk, the driver
  2276. * or chip is to blame :-( */
  2277. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2278. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2279. }
  2280. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2281. } else {
  2282. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2283. /* Danger, Robinson: If you find a collection of Ks on your disk
  2284. * something broke :-( */
  2285. if (io_dir & DMACMD_DIR)
  2286. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2287. else
  2288. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2289. }
  2290. srb->state |= SRB_XFERPAD;
  2291. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2292. /* SCSI command */
  2293. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2294. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2295. }
  2296. }
  2297. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2298. u16 *pscsi_status)
  2299. {
  2300. dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n",
  2301. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2302. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2303. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2304. srb->state = SRB_COMPLETED;
  2305. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2306. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2307. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2308. }
  2309. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2310. u16 *pscsi_status)
  2311. {
  2312. dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n",
  2313. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2314. srb->state = SRB_STATUS;
  2315. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2316. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2317. }
  2318. /* Check if the message is complete */
  2319. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2320. {
  2321. if (*msgbuf == EXTENDED_MESSAGE) {
  2322. if (len < 2)
  2323. return 0;
  2324. if (len < msgbuf[1] + 2)
  2325. return 0;
  2326. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2327. if (len < 2)
  2328. return 0;
  2329. return 1;
  2330. }
  2331. /* reject_msg */
  2332. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2333. struct ScsiReqBlk *srb)
  2334. {
  2335. srb->msgout_buf[0] = MESSAGE_REJECT;
  2336. srb->msg_count = 1;
  2337. DC395x_ENABLE_MSGOUT;
  2338. srb->state &= ~SRB_MSGIN;
  2339. srb->state |= SRB_MSGOUT;
  2340. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2341. srb->msgin_buf[0],
  2342. srb->dcb->target_id, srb->dcb->target_lun);
  2343. }
  2344. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2345. struct DeviceCtlBlk *dcb, u8 tag)
  2346. {
  2347. struct ScsiReqBlk *srb = NULL;
  2348. struct ScsiReqBlk *i;
  2349. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n",
  2350. srb->cmd->pid, tag, srb);
  2351. if (!(dcb->tag_mask & (1 << tag)))
  2352. dprintkl(KERN_DEBUG,
  2353. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2354. dcb->tag_mask, tag);
  2355. if (list_empty(&dcb->srb_going_list))
  2356. goto mingx0;
  2357. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2358. if (i->tag_number == tag) {
  2359. srb = i;
  2360. break;
  2361. }
  2362. }
  2363. if (!srb)
  2364. goto mingx0;
  2365. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n",
  2366. srb->cmd->pid, srb->dcb->target_id, srb->dcb->target_lun);
  2367. if (dcb->flag & ABORT_DEV_) {
  2368. /*srb->state = SRB_ABORT_SENT; */
  2369. enable_msgout_abort(acb, srb);
  2370. }
  2371. if (!(srb->state & SRB_DISCONNECT))
  2372. goto mingx0;
  2373. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2374. srb->state |= dcb->active_srb->state;
  2375. srb->state |= SRB_DATA_XFER;
  2376. dcb->active_srb = srb;
  2377. /* How can we make the DORS happy? */
  2378. return srb;
  2379. mingx0:
  2380. srb = acb->tmp_srb;
  2381. srb->state = SRB_UNEXPECT_RESEL;
  2382. dcb->active_srb = srb;
  2383. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2384. srb->msg_count = 1;
  2385. DC395x_ENABLE_MSGOUT;
  2386. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2387. return srb;
  2388. }
  2389. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2390. struct DeviceCtlBlk *dcb)
  2391. {
  2392. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2393. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2394. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2395. set_xfer_rate(acb, dcb);
  2396. }
  2397. /* set async transfer mode */
  2398. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2399. {
  2400. struct DeviceCtlBlk *dcb = srb->dcb;
  2401. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2402. dcb->target_id, dcb->target_lun);
  2403. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2404. dcb->sync_mode |= SYNC_NEGO_DONE;
  2405. /*dcb->sync_period &= 0; */
  2406. dcb->sync_offset = 0;
  2407. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2408. srb->state &= ~SRB_DO_SYNC_NEGO;
  2409. reprogram_regs(acb, dcb);
  2410. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2411. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2412. build_wdtr(acb, dcb, srb);
  2413. DC395x_ENABLE_MSGOUT;
  2414. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2415. }
  2416. }
  2417. /* set sync transfer mode */
  2418. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2419. {
  2420. struct DeviceCtlBlk *dcb = srb->dcb;
  2421. u8 bval;
  2422. int fact;
  2423. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2424. "(%02i.%01i MHz) Offset %i\n",
  2425. dcb->target_id, srb->msgin_buf[3] << 2,
  2426. (250 / srb->msgin_buf[3]),
  2427. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2428. srb->msgin_buf[4]);
  2429. if (srb->msgin_buf[4] > 15)
  2430. srb->msgin_buf[4] = 15;
  2431. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2432. dcb->sync_offset = 0;
  2433. else if (dcb->sync_offset == 0)
  2434. dcb->sync_offset = srb->msgin_buf[4];
  2435. if (srb->msgin_buf[4] > dcb->sync_offset)
  2436. srb->msgin_buf[4] = dcb->sync_offset;
  2437. else
  2438. dcb->sync_offset = srb->msgin_buf[4];
  2439. bval = 0;
  2440. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2441. || dcb->min_nego_period >
  2442. clock_period[bval]))
  2443. bval++;
  2444. if (srb->msgin_buf[3] < clock_period[bval])
  2445. dprintkl(KERN_INFO,
  2446. "msgin_set_sync: Increase sync nego period to %ins\n",
  2447. clock_period[bval] << 2);
  2448. srb->msgin_buf[3] = clock_period[bval];
  2449. dcb->sync_period &= 0xf0;
  2450. dcb->sync_period |= ALT_SYNC | bval;
  2451. dcb->min_nego_period = srb->msgin_buf[3];
  2452. if (dcb->sync_period & WIDE_SYNC)
  2453. fact = 500;
  2454. else
  2455. fact = 250;
  2456. dprintkl(KERN_INFO,
  2457. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2458. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2459. dcb->min_nego_period << 2, dcb->sync_offset,
  2460. (fact / dcb->min_nego_period),
  2461. ((fact % dcb->min_nego_period) * 10 +
  2462. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2463. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2464. /* Reply with corrected SDTR Message */
  2465. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2466. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2467. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2468. srb->msg_count = 5;
  2469. DC395x_ENABLE_MSGOUT;
  2470. dcb->sync_mode |= SYNC_NEGO_DONE;
  2471. } else {
  2472. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2473. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2474. build_wdtr(acb, dcb, srb);
  2475. DC395x_ENABLE_MSGOUT;
  2476. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2477. }
  2478. }
  2479. srb->state &= ~SRB_DO_SYNC_NEGO;
  2480. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2481. reprogram_regs(acb, dcb);
  2482. }
  2483. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2484. struct ScsiReqBlk *srb)
  2485. {
  2486. struct DeviceCtlBlk *dcb = srb->dcb;
  2487. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2488. dcb->sync_period &= ~WIDE_SYNC;
  2489. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2490. dcb->sync_mode |= WIDE_NEGO_DONE;
  2491. srb->state &= ~SRB_DO_WIDE_NEGO;
  2492. reprogram_regs(acb, dcb);
  2493. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2494. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2495. build_sdtr(acb, dcb, srb);
  2496. DC395x_ENABLE_MSGOUT;
  2497. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2498. }
  2499. }
  2500. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2501. {
  2502. struct DeviceCtlBlk *dcb = srb->dcb;
  2503. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2504. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2505. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2506. if (srb->msgin_buf[3] > wide)
  2507. srb->msgin_buf[3] = wide;
  2508. /* Completed */
  2509. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2510. dprintkl(KERN_DEBUG,
  2511. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2512. dcb->target_id);
  2513. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2514. srb->msg_count = 4;
  2515. srb->state |= SRB_DO_WIDE_NEGO;
  2516. DC395x_ENABLE_MSGOUT;
  2517. }
  2518. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2519. if (srb->msgin_buf[3] > 0)
  2520. dcb->sync_period |= WIDE_SYNC;
  2521. else
  2522. dcb->sync_period &= ~WIDE_SYNC;
  2523. srb->state &= ~SRB_DO_WIDE_NEGO;
  2524. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2525. dprintkdbg(DBG_1,
  2526. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2527. (8 << srb->msgin_buf[3]), dcb->target_id);
  2528. reprogram_regs(acb, dcb);
  2529. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2530. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2531. build_sdtr(acb, dcb, srb);
  2532. DC395x_ENABLE_MSGOUT;
  2533. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2534. }
  2535. }
  2536. /*
  2537. * extended message codes:
  2538. *
  2539. * code description
  2540. *
  2541. * 02h Reserved
  2542. * 00h MODIFY DATA POINTER
  2543. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2544. * 03h WIDE DATA TRANSFER REQUEST
  2545. * 04h - 7Fh Reserved
  2546. * 80h - FFh Vendor specific
  2547. */
  2548. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2549. u16 *pscsi_status)
  2550. {
  2551. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2552. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->pid);
  2553. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2554. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2555. /* Now eval the msg */
  2556. switch (srb->msgin_buf[0]) {
  2557. case DISCONNECT:
  2558. srb->state = SRB_DISCONNECT;
  2559. break;
  2560. case SIMPLE_QUEUE_TAG:
  2561. case HEAD_OF_QUEUE_TAG:
  2562. case ORDERED_QUEUE_TAG:
  2563. srb =
  2564. msgin_qtag(acb, dcb,
  2565. srb->msgin_buf[1]);
  2566. break;
  2567. case MESSAGE_REJECT:
  2568. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2569. DO_CLRATN | DO_DATALATCH);
  2570. /* A sync nego message was rejected ! */
  2571. if (srb->state & SRB_DO_SYNC_NEGO) {
  2572. msgin_set_async(acb, srb);
  2573. break;
  2574. }
  2575. /* A wide nego message was rejected ! */
  2576. if (srb->state & SRB_DO_WIDE_NEGO) {
  2577. msgin_set_nowide(acb, srb);
  2578. break;
  2579. }
  2580. enable_msgout_abort(acb, srb);
  2581. /*srb->state |= SRB_ABORT_SENT */
  2582. break;
  2583. case EXTENDED_MESSAGE:
  2584. /* SDTR */
  2585. if (srb->msgin_buf[1] == 3
  2586. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2587. msgin_set_sync(acb, srb);
  2588. break;
  2589. }
  2590. /* WDTR */
  2591. if (srb->msgin_buf[1] == 2
  2592. && srb->msgin_buf[2] == EXTENDED_WDTR
  2593. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2594. msgin_set_wide(acb, srb);
  2595. break;
  2596. }
  2597. msgin_reject(acb, srb);
  2598. break;
  2599. case MSG_IGNOREWIDE:
  2600. /* Discard wide residual */
  2601. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2602. break;
  2603. case COMMAND_COMPLETE:
  2604. /* nothing has to be done */
  2605. break;
  2606. case SAVE_POINTERS:
  2607. /*
  2608. * SAVE POINTER may be ignored as we have the struct
  2609. * ScsiReqBlk* associated with the scsi command.
  2610. */
  2611. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2612. "SAVE POINTER rem=%i Ignore\n",
  2613. srb->cmd->pid, srb->total_xfer_length);
  2614. break;
  2615. case RESTORE_POINTERS:
  2616. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2617. break;
  2618. case ABORT:
  2619. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2620. "<%02i-%i> ABORT msg\n",
  2621. srb->cmd->pid, dcb->target_id,
  2622. dcb->target_lun);
  2623. dcb->flag |= ABORT_DEV_;
  2624. enable_msgout_abort(acb, srb);
  2625. break;
  2626. default:
  2627. /* reject unknown messages */
  2628. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2629. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2630. srb->msg_count = 1;
  2631. srb->msgout_buf[0] = dcb->identify_msg;
  2632. DC395x_ENABLE_MSGOUT;
  2633. srb->state |= SRB_MSGOUT;
  2634. /*break; */
  2635. }
  2636. msgin_reject(acb, srb);
  2637. }
  2638. /* Clear counter and MsgIn state */
  2639. srb->state &= ~SRB_MSGIN;
  2640. acb->msg_len = 0;
  2641. }
  2642. *pscsi_status = PH_BUS_FREE;
  2643. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2644. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2645. }
  2646. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2647. u16 *pscsi_status)
  2648. {
  2649. dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->pid);
  2650. clear_fifo(acb, "msgin_phase1");
  2651. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2652. if (!(srb->state & SRB_MSGIN)) {
  2653. srb->state &= ~SRB_DISCONNECT;
  2654. srb->state |= SRB_MSGIN;
  2655. }
  2656. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2657. /* SCSI command */
  2658. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2659. }
  2660. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2661. u16 *pscsi_status)
  2662. {
  2663. }
  2664. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2665. u16 *pscsi_status)
  2666. {
  2667. }
  2668. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2669. {
  2670. struct DeviceCtlBlk *i;
  2671. /* set all lun device's period, offset */
  2672. if (dcb->identify_msg & 0x07)
  2673. return;
  2674. if (acb->scan_devices) {
  2675. current_sync_offset = dcb->sync_offset;
  2676. return;
  2677. }
  2678. list_for_each_entry(i, &acb->dcb_list, list)
  2679. if (i->target_id == dcb->target_id) {
  2680. i->sync_period = dcb->sync_period;
  2681. i->sync_offset = dcb->sync_offset;
  2682. i->sync_mode = dcb->sync_mode;
  2683. i->min_nego_period = dcb->min_nego_period;
  2684. }
  2685. }
  2686. static void disconnect(struct AdapterCtlBlk *acb)
  2687. {
  2688. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2689. struct ScsiReqBlk *srb;
  2690. if (!dcb) {
  2691. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2692. udelay(500);
  2693. /* Suspend queue for a while */
  2694. acb->scsi_host->last_reset =
  2695. jiffies + HZ / 2 +
  2696. HZ * acb->eeprom.delay_time;
  2697. clear_fifo(acb, "disconnectEx");
  2698. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2699. return;
  2700. }
  2701. srb = dcb->active_srb;
  2702. acb->active_dcb = NULL;
  2703. dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->pid);
  2704. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2705. clear_fifo(acb, "disconnect");
  2706. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2707. if (srb->state & SRB_UNEXPECT_RESEL) {
  2708. dprintkl(KERN_ERR,
  2709. "disconnect: Unexpected reselection <%02i-%i>\n",
  2710. dcb->target_id, dcb->target_lun);
  2711. srb->state = 0;
  2712. waiting_process_next(acb);
  2713. } else if (srb->state & SRB_ABORT_SENT) {
  2714. dcb->flag &= ~ABORT_DEV_;
  2715. acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
  2716. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2717. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2718. waiting_process_next(acb);
  2719. } else {
  2720. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2721. || !(srb->
  2722. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2723. /*
  2724. * Selection time out
  2725. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2726. */
  2727. /* Unexp. Disc / Sel Timeout */
  2728. if (srb->state != SRB_START_
  2729. && srb->state != SRB_MSGOUT) {
  2730. srb->state = SRB_READY;
  2731. dprintkl(KERN_DEBUG,
  2732. "disconnect: (pid#%li) Unexpected\n",
  2733. srb->cmd->pid);
  2734. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2735. goto disc1;
  2736. } else {
  2737. /* Normal selection timeout */
  2738. dprintkdbg(DBG_KG, "disconnect: (pid#%li) "
  2739. "<%02i-%i> SelTO\n", srb->cmd->pid,
  2740. dcb->target_id, dcb->target_lun);
  2741. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2742. || acb->scan_devices) {
  2743. srb->target_status =
  2744. SCSI_STAT_SEL_TIMEOUT;
  2745. goto disc1;
  2746. }
  2747. free_tag(dcb, srb);
  2748. srb_going_to_waiting_move(dcb, srb);
  2749. dprintkdbg(DBG_KG,
  2750. "disconnect: (pid#%li) Retry\n",
  2751. srb->cmd->pid);
  2752. waiting_set_timer(acb, HZ / 20);
  2753. }
  2754. } else if (srb->state & SRB_DISCONNECT) {
  2755. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2756. /*
  2757. * SRB_DISCONNECT (This is what we expect!)
  2758. */
  2759. if (bval & 0x40) {
  2760. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2761. " 0x%02x: ACK set! Other controllers?\n",
  2762. bval);
  2763. /* It could come from another initiator, therefore don't do much ! */
  2764. } else
  2765. waiting_process_next(acb);
  2766. } else if (srb->state & SRB_COMPLETED) {
  2767. disc1:
  2768. /*
  2769. ** SRB_COMPLETED
  2770. */
  2771. free_tag(dcb, srb);
  2772. dcb->active_srb = NULL;
  2773. srb->state = SRB_FREE;
  2774. srb_done(acb, dcb, srb);
  2775. }
  2776. }
  2777. }
  2778. static void reselect(struct AdapterCtlBlk *acb)
  2779. {
  2780. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2781. struct ScsiReqBlk *srb = NULL;
  2782. u16 rsel_tar_lun_id;
  2783. u8 id, lun;
  2784. u8 arblostflag = 0;
  2785. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2786. clear_fifo(acb, "reselect");
  2787. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2788. /* Read Reselected Target ID and LUN */
  2789. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2790. if (dcb) { /* Arbitration lost but Reselection win */
  2791. srb = dcb->active_srb;
  2792. if (!srb) {
  2793. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2794. "but active_srb == NULL\n");
  2795. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2796. return;
  2797. }
  2798. /* Why the if ? */
  2799. if (!acb->scan_devices) {
  2800. dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> "
  2801. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2802. srb->cmd->pid, dcb->target_id,
  2803. dcb->target_lun, rsel_tar_lun_id,
  2804. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2805. arblostflag = 1;
  2806. /*srb->state |= SRB_DISCONNECT; */
  2807. srb->state = SRB_READY;
  2808. free_tag(dcb, srb);
  2809. srb_going_to_waiting_move(dcb, srb);
  2810. waiting_set_timer(acb, HZ / 20);
  2811. /* return; */
  2812. }
  2813. }
  2814. /* Read Reselected Target Id and LUN */
  2815. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2816. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2817. "Got %i!\n", rsel_tar_lun_id);
  2818. id = rsel_tar_lun_id & 0xff;
  2819. lun = (rsel_tar_lun_id >> 8) & 7;
  2820. dcb = find_dcb(acb, id, lun);
  2821. if (!dcb) {
  2822. dprintkl(KERN_ERR, "reselect: From non existent device "
  2823. "<%02i-%i>\n", id, lun);
  2824. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2825. return;
  2826. }
  2827. acb->active_dcb = dcb;
  2828. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2829. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2830. "disconnection? <%02i-%i>\n",
  2831. dcb->target_id, dcb->target_lun);
  2832. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2833. srb = acb->tmp_srb;
  2834. dcb->active_srb = srb;
  2835. } else {
  2836. /* There can be only one! */
  2837. srb = dcb->active_srb;
  2838. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2839. /*
  2840. * abort command
  2841. */
  2842. dprintkl(KERN_DEBUG,
  2843. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2844. dcb->target_id, dcb->target_lun);
  2845. srb = acb->tmp_srb;
  2846. srb->state = SRB_UNEXPECT_RESEL;
  2847. dcb->active_srb = srb;
  2848. enable_msgout_abort(acb, srb);
  2849. } else {
  2850. if (dcb->flag & ABORT_DEV_) {
  2851. /*srb->state = SRB_ABORT_SENT; */
  2852. enable_msgout_abort(acb, srb);
  2853. } else
  2854. srb->state = SRB_DATA_XFER;
  2855. }
  2856. }
  2857. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2858. /* Program HA ID, target ID, period and offset */
  2859. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2860. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2861. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2862. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2863. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2864. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2865. /* SCSI command */
  2866. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2867. }
  2868. static inline u8 tagq_blacklist(char *name)
  2869. {
  2870. #ifndef DC395x_NO_TAGQ
  2871. #if 0
  2872. u8 i;
  2873. for (i = 0; i < BADDEVCNT; i++)
  2874. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2875. return 1;
  2876. #endif
  2877. return 0;
  2878. #else
  2879. return 1;
  2880. #endif
  2881. }
  2882. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2883. {
  2884. /* Check for SCSI format (ANSI and Response data format) */
  2885. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2886. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2887. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2888. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2889. /* ((dcb->dev_type == TYPE_DISK)
  2890. || (dcb->dev_type == TYPE_MOD)) && */
  2891. !tagq_blacklist(((char *)ptr) + 8)) {
  2892. if (dcb->max_command == 1)
  2893. dcb->max_command =
  2894. dcb->acb->tag_max_num;
  2895. dcb->sync_mode |= EN_TAG_QUEUEING;
  2896. /*dcb->tag_mask = 0; */
  2897. } else
  2898. dcb->max_command = 1;
  2899. }
  2900. }
  2901. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2902. struct ScsiInqData *ptr)
  2903. {
  2904. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2905. dcb->dev_type = bval1;
  2906. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2907. disc_tagq_set(dcb, ptr);
  2908. }
  2909. /* unmap mapped pci regions from SRB */
  2910. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2911. {
  2912. struct scsi_cmnd *cmd = srb->cmd;
  2913. enum dma_data_direction dir = cmd->sc_data_direction;
  2914. if (cmd->use_sg && dir != PCI_DMA_NONE) {
  2915. /* unmap DC395x SG list */
  2916. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2917. srb->sg_bus_addr, SEGMENTX_LEN);
  2918. pci_unmap_single(acb->dev, srb->sg_bus_addr,
  2919. SEGMENTX_LEN,
  2920. PCI_DMA_TODEVICE);
  2921. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2922. cmd->use_sg, cmd->request_buffer);
  2923. /* unmap the sg segments */
  2924. pci_unmap_sg(acb->dev,
  2925. (struct scatterlist *)cmd->request_buffer,
  2926. cmd->use_sg, dir);
  2927. } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
  2928. dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
  2929. srb->segment_x[0].address, cmd->request_bufflen);
  2930. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2931. cmd->request_bufflen, dir);
  2932. }
  2933. }
  2934. /* unmap mapped pci sense buffer from SRB */
  2935. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2936. struct ScsiReqBlk *srb)
  2937. {
  2938. if (!(srb->flag & AUTO_REQSENSE))
  2939. return;
  2940. /* Unmap sense buffer */
  2941. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2942. srb->segment_x[0].address);
  2943. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2944. srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
  2945. /* Restore SG stuff */
  2946. srb->total_xfer_length = srb->xferred;
  2947. srb->segment_x[0].address =
  2948. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2949. srb->segment_x[0].length =
  2950. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2951. }
  2952. /*
  2953. * Complete execution of a SCSI command
  2954. * Signal completion to the generic SCSI driver
  2955. */
  2956. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2957. struct ScsiReqBlk *srb)
  2958. {
  2959. u8 tempcnt, status;
  2960. struct scsi_cmnd *cmd = srb->cmd;
  2961. enum dma_data_direction dir = cmd->sc_data_direction;
  2962. int ckc_only = 1;
  2963. dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
  2964. srb->cmd->device->id, srb->cmd->device->lun);
  2965. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
  2966. srb, cmd->use_sg, srb->sg_index, srb->sg_count,
  2967. cmd->request_buffer);
  2968. status = srb->target_status;
  2969. if (srb->flag & AUTO_REQSENSE) {
  2970. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2971. pci_unmap_srb_sense(acb, srb);
  2972. /*
  2973. ** target status..........................
  2974. */
  2975. srb->flag &= ~AUTO_REQSENSE;
  2976. srb->adapter_status = 0;
  2977. srb->target_status = CHECK_CONDITION << 1;
  2978. if (debug_enabled(DBG_1)) {
  2979. switch (cmd->sense_buffer[2] & 0x0f) {
  2980. case NOT_READY:
  2981. dprintkl(KERN_DEBUG,
  2982. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2983. cmd->cmnd[0], dcb->target_id,
  2984. dcb->target_lun, status, acb->scan_devices);
  2985. break;
  2986. case UNIT_ATTENTION:
  2987. dprintkl(KERN_DEBUG,
  2988. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2989. cmd->cmnd[0], dcb->target_id,
  2990. dcb->target_lun, status, acb->scan_devices);
  2991. break;
  2992. case ILLEGAL_REQUEST:
  2993. dprintkl(KERN_DEBUG,
  2994. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2995. cmd->cmnd[0], dcb->target_id,
  2996. dcb->target_lun, status, acb->scan_devices);
  2997. break;
  2998. case MEDIUM_ERROR:
  2999. dprintkl(KERN_DEBUG,
  3000. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  3001. cmd->cmnd[0], dcb->target_id,
  3002. dcb->target_lun, status, acb->scan_devices);
  3003. break;
  3004. case HARDWARE_ERROR:
  3005. dprintkl(KERN_DEBUG,
  3006. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  3007. cmd->cmnd[0], dcb->target_id,
  3008. dcb->target_lun, status, acb->scan_devices);
  3009. break;
  3010. }
  3011. if (cmd->sense_buffer[7] >= 6)
  3012. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  3013. "(0x%08x 0x%08x)\n",
  3014. cmd->sense_buffer[2], cmd->sense_buffer[12],
  3015. cmd->sense_buffer[13],
  3016. *((unsigned int *)(cmd->sense_buffer + 3)),
  3017. *((unsigned int *)(cmd->sense_buffer + 8)));
  3018. else
  3019. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  3020. cmd->sense_buffer[2],
  3021. *((unsigned int *)(cmd->sense_buffer + 3)));
  3022. }
  3023. if (status == (CHECK_CONDITION << 1)) {
  3024. cmd->result = DID_BAD_TARGET << 16;
  3025. goto ckc_e;
  3026. }
  3027. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  3028. if (srb->total_xfer_length
  3029. && srb->total_xfer_length >= cmd->underflow)
  3030. cmd->result =
  3031. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3032. srb->end_message, CHECK_CONDITION);
  3033. /*SET_RES_DID(cmd->result,DID_OK) */
  3034. else
  3035. cmd->result =
  3036. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3037. srb->end_message, CHECK_CONDITION);
  3038. goto ckc_e;
  3039. }
  3040. /*************************************************************/
  3041. if (status) {
  3042. /*
  3043. * target status..........................
  3044. */
  3045. if (status_byte(status) == CHECK_CONDITION) {
  3046. request_sense(acb, dcb, srb);
  3047. return;
  3048. } else if (status_byte(status) == QUEUE_FULL) {
  3049. tempcnt = (u8)list_size(&dcb->srb_going_list);
  3050. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  3051. dcb->target_id, dcb->target_lun, tempcnt);
  3052. if (tempcnt > 1)
  3053. tempcnt--;
  3054. dcb->max_command = tempcnt;
  3055. free_tag(dcb, srb);
  3056. srb_going_to_waiting_move(dcb, srb);
  3057. waiting_set_timer(acb, HZ / 20);
  3058. srb->adapter_status = 0;
  3059. srb->target_status = 0;
  3060. return;
  3061. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  3062. srb->adapter_status = H_SEL_TIMEOUT;
  3063. srb->target_status = 0;
  3064. cmd->result = DID_NO_CONNECT << 16;
  3065. } else {
  3066. srb->adapter_status = 0;
  3067. SET_RES_DID(cmd->result, DID_ERROR);
  3068. SET_RES_MSG(cmd->result, srb->end_message);
  3069. SET_RES_TARGET(cmd->result, status);
  3070. }
  3071. } else {
  3072. /*
  3073. ** process initiator status..........................
  3074. */
  3075. status = srb->adapter_status;
  3076. if (status & H_OVER_UNDER_RUN) {
  3077. srb->target_status = 0;
  3078. SET_RES_DID(cmd->result, DID_OK);
  3079. SET_RES_MSG(cmd->result, srb->end_message);
  3080. } else if (srb->status & PARITY_ERROR) {
  3081. SET_RES_DID(cmd->result, DID_PARITY);
  3082. SET_RES_MSG(cmd->result, srb->end_message);
  3083. } else { /* No error */
  3084. srb->adapter_status = 0;
  3085. srb->target_status = 0;
  3086. SET_RES_DID(cmd->result, DID_OK);
  3087. }
  3088. }
  3089. if (dir != PCI_DMA_NONE) {
  3090. if (cmd->use_sg)
  3091. pci_dma_sync_sg_for_cpu(acb->dev,
  3092. (struct scatterlist *)cmd->
  3093. request_buffer, cmd->use_sg, dir);
  3094. else if (cmd->request_buffer)
  3095. pci_dma_sync_single_for_cpu(acb->dev,
  3096. srb->segment_x[0].address,
  3097. cmd->request_bufflen, dir);
  3098. }
  3099. ckc_only = 0;
  3100. /* Check Error Conditions */
  3101. ckc_e:
  3102. if (cmd->cmnd[0] == INQUIRY) {
  3103. unsigned char *base = NULL;
  3104. struct ScsiInqData *ptr;
  3105. unsigned long flags = 0;
  3106. if (cmd->use_sg) {
  3107. struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
  3108. size_t offset = 0, len = sizeof(struct ScsiInqData);
  3109. local_irq_save(flags);
  3110. base = scsi_kmap_atomic_sg(sg, cmd->use_sg, &offset, &len);
  3111. ptr = (struct ScsiInqData *)(base + offset);
  3112. } else
  3113. ptr = (struct ScsiInqData *)(cmd->request_buffer);
  3114. if (!ckc_only && (cmd->result & RES_DID) == 0
  3115. && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8
  3116. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  3117. dcb->inquiry7 = ptr->Flags;
  3118. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  3119. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  3120. if ((cmd->result == (DID_OK << 16)
  3121. || status_byte(cmd->result) &
  3122. CHECK_CONDITION)) {
  3123. if (!dcb->init_tcq_flag) {
  3124. add_dev(acb, dcb, ptr);
  3125. dcb->init_tcq_flag = 1;
  3126. }
  3127. }
  3128. if (cmd->use_sg) {
  3129. scsi_kunmap_atomic_sg(base);
  3130. local_irq_restore(flags);
  3131. }
  3132. }
  3133. /* Here is the info for Doug Gilbert's sg3 ... */
  3134. cmd->resid = srb->total_xfer_length;
  3135. /* This may be interpreted by sb. or not ... */
  3136. cmd->SCp.this_residual = srb->total_xfer_length;
  3137. cmd->SCp.buffers_residual = 0;
  3138. if (debug_enabled(DBG_KG)) {
  3139. if (srb->total_xfer_length)
  3140. dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> "
  3141. "cmnd=0x%02x Missed %i bytes\n",
  3142. cmd->pid, cmd->device->id, cmd->device->lun,
  3143. cmd->cmnd[0], srb->total_xfer_length);
  3144. }
  3145. srb_going_remove(dcb, srb);
  3146. /* Add to free list */
  3147. if (srb == acb->tmp_srb)
  3148. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  3149. else {
  3150. dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n",
  3151. cmd->pid, cmd->result);
  3152. srb_free_insert(acb, srb);
  3153. }
  3154. pci_unmap_srb(acb, srb);
  3155. cmd->scsi_done(cmd);
  3156. waiting_process_next(acb);
  3157. }
  3158. /* abort all cmds in our queues */
  3159. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  3160. struct scsi_cmnd *cmd, u8 force)
  3161. {
  3162. struct DeviceCtlBlk *dcb;
  3163. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3164. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3165. struct ScsiReqBlk *srb;
  3166. struct ScsiReqBlk *tmp;
  3167. struct scsi_cmnd *p;
  3168. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3169. enum dma_data_direction dir;
  3170. int result;
  3171. p = srb->cmd;
  3172. dir = p->sc_data_direction;
  3173. result = MK_RES(0, did_flag, 0, 0);
  3174. printk("G:%li(%02i-%i) ", p->pid,
  3175. p->device->id, p->device->lun);
  3176. srb_going_remove(dcb, srb);
  3177. free_tag(dcb, srb);
  3178. srb_free_insert(acb, srb);
  3179. p->result = result;
  3180. pci_unmap_srb_sense(acb, srb);
  3181. pci_unmap_srb(acb, srb);
  3182. if (force) {
  3183. /* For new EH, we normally don't need to give commands back,
  3184. * as they all complete or all time out */
  3185. p->scsi_done(p);
  3186. }
  3187. }
  3188. if (!list_empty(&dcb->srb_going_list))
  3189. dprintkl(KERN_DEBUG,
  3190. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3191. dcb->target_id, dcb->target_lun);
  3192. if (dcb->tag_mask)
  3193. dprintkl(KERN_DEBUG,
  3194. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3195. dcb->target_id, dcb->target_lun,
  3196. dcb->tag_mask);
  3197. /* Waiting queue */
  3198. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3199. int result;
  3200. p = srb->cmd;
  3201. result = MK_RES(0, did_flag, 0, 0);
  3202. printk("W:%li<%02i-%i>", p->pid, p->device->id,
  3203. p->device->lun);
  3204. srb_waiting_remove(dcb, srb);
  3205. srb_free_insert(acb, srb);
  3206. p->result = result;
  3207. pci_unmap_srb_sense(acb, srb);
  3208. pci_unmap_srb(acb, srb);
  3209. if (force) {
  3210. /* For new EH, we normally don't need to give commands back,
  3211. * as they all complete or all time out */
  3212. cmd->scsi_done(cmd);
  3213. }
  3214. }
  3215. if (!list_empty(&dcb->srb_waiting_list))
  3216. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3217. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3218. dcb->target_lun);
  3219. dcb->flag &= ~ABORT_DEV_;
  3220. }
  3221. printk("\n");
  3222. }
  3223. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3224. {
  3225. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3226. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3227. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3228. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3229. /* nothing */;
  3230. }
  3231. static void set_basic_config(struct AdapterCtlBlk *acb)
  3232. {
  3233. u8 bval;
  3234. u16 wval;
  3235. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3236. if (acb->config & HCC_PARITY)
  3237. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3238. else
  3239. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3240. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3241. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3242. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3243. /* program Host ID */
  3244. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3245. /* set ansynchronous transfer */
  3246. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3247. /* Turn LED control off */
  3248. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3249. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3250. /* DMA config */
  3251. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3252. wval |=
  3253. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3254. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3255. /* Clear pending interrupt status */
  3256. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3257. /* Enable SCSI interrupt */
  3258. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3259. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3260. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3261. );
  3262. }
  3263. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3264. {
  3265. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3266. /* delay half a second */
  3267. if (timer_pending(&acb->waiting_timer))
  3268. del_timer(&acb->waiting_timer);
  3269. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3270. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3271. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3272. udelay(500);
  3273. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3274. acb->scsi_host->last_reset =
  3275. jiffies + 5 * HZ / 2 +
  3276. HZ * acb->eeprom.delay_time;
  3277. clear_fifo(acb, "scsi_reset_detect");
  3278. set_basic_config(acb);
  3279. /*1.25 */
  3280. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3281. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3282. acb->acb_flag |= RESET_DONE;
  3283. } else {
  3284. acb->acb_flag |= RESET_DETECT;
  3285. reset_dev_param(acb);
  3286. doing_srb_done(acb, DID_RESET, NULL, 1);
  3287. /*DC395x_RecoverSRB( acb ); */
  3288. acb->active_dcb = NULL;
  3289. acb->acb_flag = 0;
  3290. waiting_process_next(acb);
  3291. }
  3292. }
  3293. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3294. struct ScsiReqBlk *srb)
  3295. {
  3296. struct scsi_cmnd *cmd = srb->cmd;
  3297. dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n",
  3298. cmd->pid, cmd->device->id, cmd->device->lun);
  3299. srb->flag |= AUTO_REQSENSE;
  3300. srb->adapter_status = 0;
  3301. srb->target_status = 0;
  3302. /* KG: Can this prevent crap sense data ? */
  3303. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  3304. /* Save some data */
  3305. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3306. srb->segment_x[0].address;
  3307. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3308. srb->segment_x[0].length;
  3309. srb->xferred = srb->total_xfer_length;
  3310. /* srb->segment_x : a one entry of S/G list table */
  3311. srb->total_xfer_length = sizeof(cmd->sense_buffer);
  3312. srb->segment_x[0].length = sizeof(cmd->sense_buffer);
  3313. /* Map sense buffer */
  3314. srb->segment_x[0].address =
  3315. pci_map_single(acb->dev, cmd->sense_buffer,
  3316. sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE);
  3317. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3318. cmd->sense_buffer, srb->segment_x[0].address,
  3319. sizeof(cmd->sense_buffer));
  3320. srb->sg_count = 1;
  3321. srb->sg_index = 0;
  3322. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3323. dprintkl(KERN_DEBUG,
  3324. "request_sense: (pid#%li) failed <%02i-%i>\n",
  3325. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  3326. srb_going_to_waiting_move(dcb, srb);
  3327. waiting_set_timer(acb, HZ / 100);
  3328. }
  3329. }
  3330. /**
  3331. * device_alloc - Allocate a new device instance. This create the
  3332. * devices instance and sets up all the data items. The adapter
  3333. * instance is required to obtain confiuration information for this
  3334. * device. This does *not* add this device to the adapters device
  3335. * list.
  3336. *
  3337. * @acb: The adapter to obtain configuration information from.
  3338. * @target: The target for the new device.
  3339. * @lun: The lun for the new device.
  3340. *
  3341. * Return the new device if successful or NULL on failure.
  3342. **/
  3343. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3344. u8 target, u8 lun)
  3345. {
  3346. struct NvRamType *eeprom = &acb->eeprom;
  3347. u8 period_index = eeprom->target[target].period & 0x07;
  3348. struct DeviceCtlBlk *dcb;
  3349. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3350. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3351. if (!dcb)
  3352. return NULL;
  3353. dcb->acb = NULL;
  3354. INIT_LIST_HEAD(&dcb->srb_going_list);
  3355. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3356. dcb->active_srb = NULL;
  3357. dcb->tag_mask = 0;
  3358. dcb->max_command = 1;
  3359. dcb->target_id = target;
  3360. dcb->target_lun = lun;
  3361. #ifndef DC395x_NO_DISCONNECT
  3362. dcb->identify_msg =
  3363. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3364. #else
  3365. dcb->identify_msg = IDENTIFY(0, lun);
  3366. #endif
  3367. dcb->dev_mode = eeprom->target[target].cfg0;
  3368. dcb->inquiry7 = 0;
  3369. dcb->sync_mode = 0;
  3370. dcb->min_nego_period = clock_period[period_index];
  3371. dcb->sync_period = 0;
  3372. dcb->sync_offset = 0;
  3373. dcb->flag = 0;
  3374. #ifndef DC395x_NO_WIDE
  3375. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3376. && (acb->config & HCC_WIDE_CARD))
  3377. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3378. #endif
  3379. #ifndef DC395x_NO_SYNC
  3380. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3381. if (!(lun) || current_sync_offset)
  3382. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3383. #endif
  3384. if (dcb->target_lun != 0) {
  3385. /* Copy settings */
  3386. struct DeviceCtlBlk *p;
  3387. list_for_each_entry(p, &acb->dcb_list, list)
  3388. if (p->target_id == dcb->target_id)
  3389. break;
  3390. dprintkdbg(DBG_1,
  3391. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3392. dcb->target_id, dcb->target_lun,
  3393. p->target_id, p->target_lun);
  3394. dcb->sync_mode = p->sync_mode;
  3395. dcb->sync_period = p->sync_period;
  3396. dcb->min_nego_period = p->min_nego_period;
  3397. dcb->sync_offset = p->sync_offset;
  3398. dcb->inquiry7 = p->inquiry7;
  3399. }
  3400. return dcb;
  3401. }
  3402. /**
  3403. * adapter_add_device - Adds the device instance to the adaptor instance.
  3404. *
  3405. * @acb: The adapter device to be updated
  3406. * @dcb: A newly created and intialised device instance to add.
  3407. **/
  3408. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3409. struct DeviceCtlBlk *dcb)
  3410. {
  3411. /* backpointer to adapter */
  3412. dcb->acb = acb;
  3413. /* set run_robin to this device if it is currently empty */
  3414. if (list_empty(&acb->dcb_list))
  3415. acb->dcb_run_robin = dcb;
  3416. /* add device to list */
  3417. list_add_tail(&dcb->list, &acb->dcb_list);
  3418. /* update device maps */
  3419. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3420. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3421. }
  3422. /**
  3423. * adapter_remove_device - Removes the device instance from the adaptor
  3424. * instance. The device instance is not check in any way or freed by this.
  3425. * The caller is expected to take care of that. This will simply remove the
  3426. * device from the adapters data strcutures.
  3427. *
  3428. * @acb: The adapter device to be updated
  3429. * @dcb: A device that has previously been added to the adapter.
  3430. **/
  3431. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3432. struct DeviceCtlBlk *dcb)
  3433. {
  3434. struct DeviceCtlBlk *i;
  3435. struct DeviceCtlBlk *tmp;
  3436. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3437. dcb->target_id, dcb->target_lun);
  3438. /* fix up any pointers to this device that we have in the adapter */
  3439. if (acb->active_dcb == dcb)
  3440. acb->active_dcb = NULL;
  3441. if (acb->dcb_run_robin == dcb)
  3442. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3443. /* unlink from list */
  3444. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3445. if (dcb == i) {
  3446. list_del(&i->list);
  3447. break;
  3448. }
  3449. /* clear map and children */
  3450. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3451. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3452. dcb->acb = NULL;
  3453. }
  3454. /**
  3455. * adapter_remove_and_free_device - Removes a single device from the adapter
  3456. * and then frees the device information.
  3457. *
  3458. * @acb: The adapter device to be updated
  3459. * @dcb: A device that has previously been added to the adapter.
  3460. */
  3461. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3462. struct DeviceCtlBlk *dcb)
  3463. {
  3464. if (list_size(&dcb->srb_going_list) > 1) {
  3465. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3466. "Won't remove because of %i active requests.\n",
  3467. dcb->target_id, dcb->target_lun,
  3468. list_size(&dcb->srb_going_list));
  3469. return;
  3470. }
  3471. adapter_remove_device(acb, dcb);
  3472. kfree(dcb);
  3473. }
  3474. /**
  3475. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3476. * devices associated with the specified adapter.
  3477. *
  3478. * @acb: The adapter from which all devices should be removed.
  3479. **/
  3480. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3481. {
  3482. struct DeviceCtlBlk *dcb;
  3483. struct DeviceCtlBlk *tmp;
  3484. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3485. list_size(&acb->dcb_list));
  3486. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3487. adapter_remove_and_free_device(acb, dcb);
  3488. }
  3489. /**
  3490. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3491. * scsi device that we need to deal with. We allocate a new device and then
  3492. * insert that device into the adapters device list.
  3493. *
  3494. * @scsi_device: The new scsi device that we need to handle.
  3495. **/
  3496. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3497. {
  3498. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3499. struct DeviceCtlBlk *dcb;
  3500. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3501. if (!dcb)
  3502. return -ENOMEM;
  3503. adapter_add_device(acb, dcb);
  3504. return 0;
  3505. }
  3506. /**
  3507. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3508. * device that is going away.
  3509. *
  3510. * @scsi_device: The new scsi device that we need to handle.
  3511. **/
  3512. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3513. {
  3514. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3515. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3516. if (dcb)
  3517. adapter_remove_and_free_device(acb, dcb);
  3518. }
  3519. /**
  3520. * trms1040_wait_30us: wait for 30 us
  3521. *
  3522. * Waits for 30us (using the chip by the looks of it..)
  3523. *
  3524. * @io_port: base I/O address
  3525. **/
  3526. static void __devinit trms1040_wait_30us(unsigned long io_port)
  3527. {
  3528. /* ScsiPortStallExecution(30); wait 30 us */
  3529. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3530. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3531. /* nothing */ ;
  3532. }
  3533. /**
  3534. * trms1040_write_cmd - write the secified command and address to
  3535. * chip
  3536. *
  3537. * @io_port: base I/O address
  3538. * @cmd: SB + op code (command) to send
  3539. * @addr: address to send
  3540. **/
  3541. static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3542. {
  3543. int i;
  3544. u8 send_data;
  3545. /* program SB + OP code */
  3546. for (i = 0; i < 3; i++, cmd <<= 1) {
  3547. send_data = NVR_SELECT;
  3548. if (cmd & 0x04) /* Start from bit 2 */
  3549. send_data |= NVR_BITOUT;
  3550. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3551. trms1040_wait_30us(io_port);
  3552. outb((send_data | NVR_CLOCK),
  3553. io_port + TRM_S1040_GEN_NVRAM);
  3554. trms1040_wait_30us(io_port);
  3555. }
  3556. /* send address */
  3557. for (i = 0; i < 7; i++, addr <<= 1) {
  3558. send_data = NVR_SELECT;
  3559. if (addr & 0x40) /* Start from bit 6 */
  3560. send_data |= NVR_BITOUT;
  3561. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3562. trms1040_wait_30us(io_port);
  3563. outb((send_data | NVR_CLOCK),
  3564. io_port + TRM_S1040_GEN_NVRAM);
  3565. trms1040_wait_30us(io_port);
  3566. }
  3567. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3568. trms1040_wait_30us(io_port);
  3569. }
  3570. /**
  3571. * trms1040_set_data - store a single byte in the eeprom
  3572. *
  3573. * Called from write all to write a single byte into the SSEEPROM
  3574. * Which is done one bit at a time.
  3575. *
  3576. * @io_port: base I/O address
  3577. * @addr: offset into EEPROM
  3578. * @byte: bytes to write
  3579. **/
  3580. static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3581. {
  3582. int i;
  3583. u8 send_data;
  3584. /* Send write command & address */
  3585. trms1040_write_cmd(io_port, 0x05, addr);
  3586. /* Write data */
  3587. for (i = 0; i < 8; i++, byte <<= 1) {
  3588. send_data = NVR_SELECT;
  3589. if (byte & 0x80) /* Start from bit 7 */
  3590. send_data |= NVR_BITOUT;
  3591. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3592. trms1040_wait_30us(io_port);
  3593. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3594. trms1040_wait_30us(io_port);
  3595. }
  3596. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3597. trms1040_wait_30us(io_port);
  3598. /* Disable chip select */
  3599. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3600. trms1040_wait_30us(io_port);
  3601. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3602. trms1040_wait_30us(io_port);
  3603. /* Wait for write ready */
  3604. while (1) {
  3605. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3606. trms1040_wait_30us(io_port);
  3607. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3608. trms1040_wait_30us(io_port);
  3609. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3610. break;
  3611. }
  3612. /* Disable chip select */
  3613. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3614. }
  3615. /**
  3616. * trms1040_write_all - write 128 bytes to the eeprom
  3617. *
  3618. * Write the supplied 128 bytes to the chips SEEPROM
  3619. *
  3620. * @eeprom: the data to write
  3621. * @io_port: the base io port
  3622. **/
  3623. static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3624. {
  3625. u8 *b_eeprom = (u8 *)eeprom;
  3626. u8 addr;
  3627. /* Enable SEEPROM */
  3628. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3629. io_port + TRM_S1040_GEN_CONTROL);
  3630. /* write enable */
  3631. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3632. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3633. trms1040_wait_30us(io_port);
  3634. /* write */
  3635. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3636. trms1040_set_data(io_port, addr, *b_eeprom);
  3637. /* write disable */
  3638. trms1040_write_cmd(io_port, 0x04, 0x00);
  3639. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3640. trms1040_wait_30us(io_port);
  3641. /* Disable SEEPROM */
  3642. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3643. io_port + TRM_S1040_GEN_CONTROL);
  3644. }
  3645. /**
  3646. * trms1040_get_data - get a single byte from the eeprom
  3647. *
  3648. * Called from read all to read a single byte into the SSEEPROM
  3649. * Which is done one bit at a time.
  3650. *
  3651. * @io_port: base I/O address
  3652. * @addr: offset into SEEPROM
  3653. *
  3654. * Returns the the byte read.
  3655. **/
  3656. static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
  3657. {
  3658. int i;
  3659. u8 read_byte;
  3660. u8 result = 0;
  3661. /* Send read command & address */
  3662. trms1040_write_cmd(io_port, 0x06, addr);
  3663. /* read data */
  3664. for (i = 0; i < 8; i++) {
  3665. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3666. trms1040_wait_30us(io_port);
  3667. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3668. /* Get data bit while falling edge */
  3669. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3670. result <<= 1;
  3671. if (read_byte & NVR_BITIN)
  3672. result |= 1;
  3673. trms1040_wait_30us(io_port);
  3674. }
  3675. /* Disable chip select */
  3676. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3677. return result;
  3678. }
  3679. /**
  3680. * trms1040_read_all - read all bytes from the eeprom
  3681. *
  3682. * Read the 128 bytes from the SEEPROM.
  3683. *
  3684. * @eeprom: where to store the data
  3685. * @io_port: the base io port
  3686. **/
  3687. static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3688. {
  3689. u8 *b_eeprom = (u8 *)eeprom;
  3690. u8 addr;
  3691. /* Enable SEEPROM */
  3692. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3693. io_port + TRM_S1040_GEN_CONTROL);
  3694. /* read details */
  3695. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3696. *b_eeprom = trms1040_get_data(io_port, addr);
  3697. /* Disable SEEPROM */
  3698. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3699. io_port + TRM_S1040_GEN_CONTROL);
  3700. }
  3701. /**
  3702. * check_eeprom - get and check contents of the eeprom
  3703. *
  3704. * Read seeprom 128 bytes into the memory provider in eeprom.
  3705. * Checks the checksum and if it's not correct it uses a set of default
  3706. * values.
  3707. *
  3708. * @eeprom: caller allocated strcuture to read the eeprom data into
  3709. * @io_port: io port to read from
  3710. **/
  3711. static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3712. {
  3713. u16 *w_eeprom = (u16 *)eeprom;
  3714. u16 w_addr;
  3715. u16 cksum;
  3716. u32 d_addr;
  3717. u32 *d_eeprom;
  3718. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3719. cksum = 0;
  3720. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3721. w_addr++, w_eeprom++)
  3722. cksum += *w_eeprom;
  3723. if (cksum != 0x1234) {
  3724. /*
  3725. * Checksum is wrong.
  3726. * Load a set of defaults into the eeprom buffer
  3727. */
  3728. dprintkl(KERN_WARNING,
  3729. "EEProm checksum error: using default values and options.\n");
  3730. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3731. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3732. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3733. eeprom->sub_sys_id[1] =
  3734. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3735. eeprom->sub_class = 0x00;
  3736. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3737. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3738. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3739. eeprom->device_id[1] =
  3740. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3741. eeprom->reserved = 0x00;
  3742. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3743. d_addr < 16; d_addr++, d_eeprom++)
  3744. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3745. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3746. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3747. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3748. *d_eeprom = 0x00;
  3749. /* Now load defaults (maybe set by boot/module params) */
  3750. set_safe_settings();
  3751. fix_settings();
  3752. eeprom_override(eeprom);
  3753. eeprom->cksum = 0x00;
  3754. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3755. w_addr < 63; w_addr++, w_eeprom++)
  3756. cksum += *w_eeprom;
  3757. *w_eeprom = 0x1234 - cksum;
  3758. trms1040_write_all(eeprom, io_port);
  3759. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3760. } else {
  3761. set_safe_settings();
  3762. eeprom_index_to_delay(eeprom);
  3763. eeprom_override(eeprom);
  3764. }
  3765. }
  3766. /**
  3767. * print_eeprom_settings - output the eeprom settings
  3768. * to the kernel log so people can see what they were.
  3769. *
  3770. * @eeprom: The eeprom data strucutre to show details for.
  3771. **/
  3772. static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
  3773. {
  3774. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3775. eeprom->scsi_id,
  3776. eeprom->target[0].period,
  3777. clock_speed[eeprom->target[0].period] / 10,
  3778. clock_speed[eeprom->target[0].period] % 10,
  3779. eeprom->target[0].cfg0);
  3780. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3781. eeprom->channel_cfg, eeprom->max_tag,
  3782. 1 << eeprom->max_tag, eeprom->delay_time);
  3783. }
  3784. /* Free SG tables */
  3785. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3786. {
  3787. int i;
  3788. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3789. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3790. kfree(acb->srb_array[i].segment_x);
  3791. }
  3792. /*
  3793. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3794. * should never cross a page boundary */
  3795. static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3796. {
  3797. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3798. *SEGMENTX_LEN;
  3799. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3800. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3801. int srb_idx = 0;
  3802. unsigned i = 0;
  3803. struct SGentry *ptr;
  3804. for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
  3805. acb->srb_array[i].segment_x = NULL;
  3806. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3807. while (pages--) {
  3808. ptr = (struct SGentry *)kmalloc(PAGE_SIZE, GFP_KERNEL);
  3809. if (!ptr) {
  3810. adapter_sg_tables_free(acb);
  3811. return 1;
  3812. }
  3813. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3814. PAGE_SIZE, ptr, srb_idx);
  3815. i = 0;
  3816. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3817. acb->srb_array[srb_idx++].segment_x =
  3818. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3819. }
  3820. if (i < srbs_per_page)
  3821. acb->srb.segment_x =
  3822. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3823. else
  3824. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3825. return 0;
  3826. }
  3827. /**
  3828. * adapter_print_config - print adapter connection and termination
  3829. * config
  3830. *
  3831. * The io port in the adapter needs to have been set before calling
  3832. * this function.
  3833. *
  3834. * @acb: The adapter to print the information for.
  3835. **/
  3836. static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
  3837. {
  3838. u8 bval;
  3839. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3840. dprintkl(KERN_INFO, "%sConnectors: ",
  3841. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3842. if (!(bval & CON5068))
  3843. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3844. if (!(bval & CON68))
  3845. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3846. if (!(bval & CON50))
  3847. printk("int50 ");
  3848. if ((bval & (CON5068 | CON50 | CON68)) ==
  3849. 0 /*(CON5068 | CON50 | CON68) */ )
  3850. printk(" Oops! (All 3?) ");
  3851. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3852. printk(" Termination: ");
  3853. if (bval & DIS_TERM)
  3854. printk("Disabled\n");
  3855. else {
  3856. if (bval & AUTOTERM)
  3857. printk("Auto ");
  3858. if (bval & LOW8TERM)
  3859. printk("Low ");
  3860. if (bval & UP8TERM)
  3861. printk("High ");
  3862. printk("\n");
  3863. }
  3864. }
  3865. /**
  3866. * adapter_init_params - Initialize the various parameters in the
  3867. * adapter structure. Note that the pointer to the scsi_host is set
  3868. * early (when this instance is created) and the io_port and irq
  3869. * values are set later after they have been reserved. This just gets
  3870. * everything set to a good starting position.
  3871. *
  3872. * The eeprom structure in the adapter needs to have been set before
  3873. * calling this function.
  3874. *
  3875. * @acb: The adapter to initialize.
  3876. **/
  3877. static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
  3878. {
  3879. struct NvRamType *eeprom = &acb->eeprom;
  3880. int i;
  3881. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3882. /* NOTE: acb->io_port_base is set at port registration time */
  3883. /* NOTE: acb->io_port_len is set at port registration time */
  3884. INIT_LIST_HEAD(&acb->dcb_list);
  3885. acb->dcb_run_robin = NULL;
  3886. acb->active_dcb = NULL;
  3887. INIT_LIST_HEAD(&acb->srb_free_list);
  3888. /* temp SRB for Q tag used or abort command used */
  3889. acb->tmp_srb = &acb->srb;
  3890. init_timer(&acb->waiting_timer);
  3891. init_timer(&acb->selto_timer);
  3892. acb->srb_count = DC395x_MAX_SRB_CNT;
  3893. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3894. /* NOTE: acb->irq_level is set at IRQ registration time */
  3895. acb->tag_max_num = 1 << eeprom->max_tag;
  3896. if (acb->tag_max_num > 30)
  3897. acb->tag_max_num = 30;
  3898. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3899. acb->gmode2 = eeprom->channel_cfg;
  3900. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3901. if (eeprom->channel_cfg & NAC_SCANLUN)
  3902. acb->lun_chk = 1;
  3903. acb->scan_devices = 1;
  3904. acb->scsi_host->this_id = eeprom->scsi_id;
  3905. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3906. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3907. acb->dcb_map[i] = 0;
  3908. acb->msg_len = 0;
  3909. /* link static array of srbs into the srb free list */
  3910. for (i = 0; i < acb->srb_count - 1; i++)
  3911. srb_free_insert(acb, &acb->srb_array[i]);
  3912. }
  3913. /**
  3914. * adapter_init_host - Initialize the scsi host instance based on
  3915. * values that we have already stored in the adapter instance. There's
  3916. * some mention that a lot of these are deprecated, so we won't use
  3917. * them (we'll use the ones in the adapter instance) but we'll fill
  3918. * them in in case something else needs them.
  3919. *
  3920. * The eeprom structure, irq and io ports in the adapter need to have
  3921. * been set before calling this function.
  3922. *
  3923. * @host: The scsi host instance to fill in the values for.
  3924. **/
  3925. static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
  3926. {
  3927. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3928. struct NvRamType *eeprom = &acb->eeprom;
  3929. host->max_cmd_len = 24;
  3930. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3931. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3932. host->this_id = (int)eeprom->scsi_id;
  3933. host->io_port = acb->io_port_base;
  3934. host->n_io_port = acb->io_port_len;
  3935. host->dma_channel = -1;
  3936. host->unique_id = acb->io_port_base;
  3937. host->irq = acb->irq_level;
  3938. host->last_reset = jiffies;
  3939. host->max_id = 16;
  3940. if (host->max_id - 1 == eeprom->scsi_id)
  3941. host->max_id--;
  3942. #ifdef CONFIG_SCSI_MULTI_LUN
  3943. if (eeprom->channel_cfg & NAC_SCANLUN)
  3944. host->max_lun = 8;
  3945. else
  3946. host->max_lun = 1;
  3947. #else
  3948. host->max_lun = 1;
  3949. #endif
  3950. }
  3951. /**
  3952. * adapter_init_chip - Get the chip into a know state and figure out
  3953. * some of the settings that apply to this adapter.
  3954. *
  3955. * The io port in the adapter needs to have been set before calling
  3956. * this function. The config will be configured correctly on return.
  3957. *
  3958. * @acb: The adapter which we are to init.
  3959. **/
  3960. static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
  3961. {
  3962. struct NvRamType *eeprom = &acb->eeprom;
  3963. /* Mask all the interrupt */
  3964. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3965. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3966. /* Reset SCSI module */
  3967. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3968. /* Reset PCI/DMA module */
  3969. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3970. udelay(20);
  3971. /* program configuration 0 */
  3972. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3973. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3974. acb->config |= HCC_WIDE_CARD;
  3975. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3976. acb->config |= HCC_SCSI_RESET;
  3977. if (acb->config & HCC_SCSI_RESET) {
  3978. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3979. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3980. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3981. /*spin_unlock_irq (&io_request_lock); */
  3982. udelay(500);
  3983. acb->scsi_host->last_reset =
  3984. jiffies + HZ / 2 +
  3985. HZ * acb->eeprom.delay_time;
  3986. /*spin_lock_irq (&io_request_lock); */
  3987. }
  3988. }
  3989. /**
  3990. * init_adapter - Grab the resource for the card, setup the adapter
  3991. * information, set the card into a known state, create the various
  3992. * tables etc etc. This basically gets all adapter information all up
  3993. * to date, intialised and gets the chip in sync with it.
  3994. *
  3995. * @host: This hosts adapter structure
  3996. * @io_port: The base I/O port
  3997. * @irq: IRQ
  3998. *
  3999. * Returns 0 if the initialization succeeds, any other value on
  4000. * failure.
  4001. **/
  4002. static int __devinit adapter_init(struct AdapterCtlBlk *acb,
  4003. unsigned long io_port, u32 io_port_len, unsigned int irq)
  4004. {
  4005. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  4006. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  4007. goto failed;
  4008. }
  4009. /* store port base to indicate we have registered it */
  4010. acb->io_port_base = io_port;
  4011. acb->io_port_len = io_port_len;
  4012. if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
  4013. /* release the region we just claimed */
  4014. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  4015. goto failed;
  4016. }
  4017. /* store irq to indicate we have registered it */
  4018. acb->irq_level = irq;
  4019. /* get eeprom configuration information and command line settings etc */
  4020. check_eeprom(&acb->eeprom, io_port);
  4021. print_eeprom_settings(&acb->eeprom);
  4022. /* setup adapter control block */
  4023. adapter_init_params(acb);
  4024. /* display card connectors/termination settings */
  4025. adapter_print_config(acb);
  4026. if (adapter_sg_tables_alloc(acb)) {
  4027. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  4028. goto failed;
  4029. }
  4030. adapter_init_scsi_host(acb->scsi_host);
  4031. adapter_init_chip(acb);
  4032. set_basic_config(acb);
  4033. dprintkdbg(DBG_0,
  4034. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  4035. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  4036. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  4037. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  4038. return 0;
  4039. failed:
  4040. if (acb->irq_level)
  4041. free_irq(acb->irq_level, acb);
  4042. if (acb->io_port_base)
  4043. release_region(acb->io_port_base, acb->io_port_len);
  4044. adapter_sg_tables_free(acb);
  4045. return 1;
  4046. }
  4047. /**
  4048. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  4049. * stopping all operations and disabling interrupt generation on the
  4050. * card.
  4051. *
  4052. * @acb: The adapter which we are to shutdown.
  4053. **/
  4054. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  4055. {
  4056. /* disable interrupts */
  4057. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  4058. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  4059. /* reset the scsi bus */
  4060. if (acb->config & HCC_SCSI_RESET)
  4061. reset_scsi_bus(acb);
  4062. /* clear any pending interupt state */
  4063. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  4064. }
  4065. /**
  4066. * adapter_uninit - Shut down the chip and release any resources that
  4067. * we had allocated. Once this returns the adapter should not be used
  4068. * anymore.
  4069. *
  4070. * @acb: The adapter which we are to un-initialize.
  4071. **/
  4072. static void adapter_uninit(struct AdapterCtlBlk *acb)
  4073. {
  4074. unsigned long flags;
  4075. DC395x_LOCK_IO(acb->scsi_host, flags);
  4076. /* remove timers */
  4077. if (timer_pending(&acb->waiting_timer))
  4078. del_timer(&acb->waiting_timer);
  4079. if (timer_pending(&acb->selto_timer))
  4080. del_timer(&acb->selto_timer);
  4081. adapter_uninit_chip(acb);
  4082. adapter_remove_and_free_all_devices(acb);
  4083. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4084. if (acb->irq_level)
  4085. free_irq(acb->irq_level, acb);
  4086. if (acb->io_port_base)
  4087. release_region(acb->io_port_base, acb->io_port_len);
  4088. adapter_sg_tables_free(acb);
  4089. }
  4090. #undef SPRINTF
  4091. #define SPRINTF(args...) pos += sprintf(pos, args)
  4092. #undef YESNO
  4093. #define YESNO(YN) \
  4094. if (YN) SPRINTF(" Yes ");\
  4095. else SPRINTF(" No ")
  4096. static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
  4097. char **start, off_t offset, int length, int inout)
  4098. {
  4099. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  4100. int spd, spd1;
  4101. char *pos = buffer;
  4102. struct DeviceCtlBlk *dcb;
  4103. unsigned long flags;
  4104. int dev;
  4105. if (inout) /* Has data been written to the file ? */
  4106. return -EPERM;
  4107. SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
  4108. SPRINTF(" Driver Version " DC395X_VERSION "\n");
  4109. DC395x_LOCK_IO(acb->scsi_host, flags);
  4110. SPRINTF("SCSI Host Nr %i, ", host->host_no);
  4111. SPRINTF("DC395U/UW/F DC315/U %s\n",
  4112. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  4113. SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
  4114. SPRINTF("irq_level 0x%04x, ", acb->irq_level);
  4115. SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  4116. SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun);
  4117. SPRINTF("AdapterID %i\n", host->this_id);
  4118. SPRINTF("tag_max_num %i", acb->tag_max_num);
  4119. /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  4120. SPRINTF(", FilterCfg 0x%02x",
  4121. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  4122. SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
  4123. /*SPRINTF("\n"); */
  4124. SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  4125. SPRINTF
  4126. ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4127. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  4128. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  4129. acb->dcb_map[6], acb->dcb_map[7]);
  4130. SPRINTF
  4131. (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4132. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  4133. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  4134. acb->dcb_map[14], acb->dcb_map[15]);
  4135. SPRINTF
  4136. ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  4137. dev = 0;
  4138. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4139. int nego_period;
  4140. SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
  4141. dcb->target_lun);
  4142. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  4143. YESNO(dcb->sync_offset);
  4144. YESNO(dcb->sync_period & WIDE_SYNC);
  4145. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  4146. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  4147. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  4148. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  4149. if (dcb->sync_offset)
  4150. SPRINTF(" %03i ns ", nego_period);
  4151. else
  4152. SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
  4153. if (dcb->sync_offset & 0x0f) {
  4154. spd = 1000 / (nego_period);
  4155. spd1 = 1000 % (nego_period);
  4156. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  4157. SPRINTF(" %2i.%1i M %02i ", spd, spd1,
  4158. (dcb->sync_offset & 0x0f));
  4159. } else
  4160. SPRINTF(" ");
  4161. /* Add more info ... */
  4162. SPRINTF(" %02i\n", dcb->max_command);
  4163. dev++;
  4164. }
  4165. if (timer_pending(&acb->waiting_timer))
  4166. SPRINTF("Waiting queue timer running\n");
  4167. else
  4168. SPRINTF("\n");
  4169. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4170. struct ScsiReqBlk *srb;
  4171. if (!list_empty(&dcb->srb_waiting_list))
  4172. SPRINTF("DCB (%02i-%i): Waiting: %i:",
  4173. dcb->target_id, dcb->target_lun,
  4174. list_size(&dcb->srb_waiting_list));
  4175. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  4176. SPRINTF(" %li", srb->cmd->pid);
  4177. if (!list_empty(&dcb->srb_going_list))
  4178. SPRINTF("\nDCB (%02i-%i): Going : %i:",
  4179. dcb->target_id, dcb->target_lun,
  4180. list_size(&dcb->srb_going_list));
  4181. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4182. SPRINTF(" %li", srb->cmd->pid);
  4183. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4184. SPRINTF("\n");
  4185. }
  4186. if (debug_enabled(DBG_1)) {
  4187. SPRINTF("DCB list for ACB %p:\n", acb);
  4188. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4189. SPRINTF("%p -> ", dcb);
  4190. }
  4191. SPRINTF("END\n");
  4192. }
  4193. *start = buffer + offset;
  4194. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4195. if (pos - buffer < offset)
  4196. return 0;
  4197. else if (pos - buffer - offset < length)
  4198. return pos - buffer - offset;
  4199. else
  4200. return length;
  4201. }
  4202. static struct scsi_host_template dc395x_driver_template = {
  4203. .module = THIS_MODULE,
  4204. .proc_name = DC395X_NAME,
  4205. .proc_info = dc395x_proc_info,
  4206. .name = DC395X_BANNER " " DC395X_VERSION,
  4207. .queuecommand = dc395x_queue_command,
  4208. .bios_param = dc395x_bios_param,
  4209. .slave_alloc = dc395x_slave_alloc,
  4210. .slave_destroy = dc395x_slave_destroy,
  4211. .can_queue = DC395x_MAX_CAN_QUEUE,
  4212. .this_id = 7,
  4213. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4214. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4215. .eh_abort_handler = dc395x_eh_abort,
  4216. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4217. .unchecked_isa_dma = 0,
  4218. .use_clustering = DISABLE_CLUSTERING,
  4219. };
  4220. /**
  4221. * banner_display - Display banner on first instance of driver
  4222. * initialized.
  4223. **/
  4224. static void banner_display(void)
  4225. {
  4226. static int banner_done = 0;
  4227. if (!banner_done)
  4228. {
  4229. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4230. banner_done = 1;
  4231. }
  4232. }
  4233. /**
  4234. * dc395x_init_one - Initialise a single instance of the adapter.
  4235. *
  4236. * The PCI layer will call this once for each instance of the adapter
  4237. * that it finds in the system. The pci_dev strcuture indicates which
  4238. * instance we are being called from.
  4239. *
  4240. * @dev: The PCI device to intialize.
  4241. * @id: Looks like a pointer to the entry in our pci device table
  4242. * that was actually matched by the PCI subsystem.
  4243. *
  4244. * Returns 0 on success, or an error code (-ve) on failure.
  4245. **/
  4246. static int __devinit dc395x_init_one(struct pci_dev *dev,
  4247. const struct pci_device_id *id)
  4248. {
  4249. struct Scsi_Host *scsi_host = NULL;
  4250. struct AdapterCtlBlk *acb = NULL;
  4251. unsigned long io_port_base;
  4252. unsigned int io_port_len;
  4253. unsigned int irq;
  4254. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4255. banner_display();
  4256. if (pci_enable_device(dev))
  4257. {
  4258. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4259. return -ENODEV;
  4260. }
  4261. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4262. io_port_len = pci_resource_len(dev, 0);
  4263. irq = dev->irq;
  4264. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4265. /* allocate scsi host information (includes out adapter) */
  4266. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4267. sizeof(struct AdapterCtlBlk));
  4268. if (!scsi_host) {
  4269. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4270. goto fail;
  4271. }
  4272. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4273. acb->scsi_host = scsi_host;
  4274. acb->dev = dev;
  4275. /* initialise the adapter and everything we need */
  4276. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4277. dprintkl(KERN_INFO, "adapter init failed\n");
  4278. goto fail;
  4279. }
  4280. pci_set_master(dev);
  4281. /* get the scsi mid level to scan for new devices on the bus */
  4282. if (scsi_add_host(scsi_host, &dev->dev)) {
  4283. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4284. goto fail;
  4285. }
  4286. pci_set_drvdata(dev, scsi_host);
  4287. scsi_scan_host(scsi_host);
  4288. return 0;
  4289. fail:
  4290. if (acb != NULL)
  4291. adapter_uninit(acb);
  4292. if (scsi_host != NULL)
  4293. scsi_host_put(scsi_host);
  4294. pci_disable_device(dev);
  4295. return -ENODEV;
  4296. }
  4297. /**
  4298. * dc395x_remove_one - Called to remove a single instance of the
  4299. * adapter.
  4300. *
  4301. * @dev: The PCI device to intialize.
  4302. **/
  4303. static void __devexit dc395x_remove_one(struct pci_dev *dev)
  4304. {
  4305. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4306. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4307. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4308. scsi_remove_host(scsi_host);
  4309. adapter_uninit(acb);
  4310. pci_disable_device(dev);
  4311. scsi_host_put(scsi_host);
  4312. pci_set_drvdata(dev, NULL);
  4313. }
  4314. static struct pci_device_id dc395x_pci_table[] = {
  4315. {
  4316. .vendor = PCI_VENDOR_ID_TEKRAM,
  4317. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4318. .subvendor = PCI_ANY_ID,
  4319. .subdevice = PCI_ANY_ID,
  4320. },
  4321. {} /* Terminating entry */
  4322. };
  4323. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4324. static struct pci_driver dc395x_driver = {
  4325. .name = DC395X_NAME,
  4326. .id_table = dc395x_pci_table,
  4327. .probe = dc395x_init_one,
  4328. .remove = __devexit_p(dc395x_remove_one),
  4329. };
  4330. /**
  4331. * dc395x_module_init - Module initialization function
  4332. *
  4333. * Used by both module and built-in driver to initialise this driver.
  4334. **/
  4335. static int __init dc395x_module_init(void)
  4336. {
  4337. return pci_module_init(&dc395x_driver);
  4338. }
  4339. /**
  4340. * dc395x_module_exit - Module cleanup function.
  4341. **/
  4342. static void __exit dc395x_module_exit(void)
  4343. {
  4344. pci_unregister_driver(&dc395x_driver);
  4345. }
  4346. module_init(dc395x_module_init);
  4347. module_exit(dc395x_module_exit);
  4348. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4349. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4350. MODULE_LICENSE("GPL");