dc395x.c 143 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <asm/io.h>
  61. #include <scsi/scsi.h>
  62. #include <scsi/scsicam.h> /* needed for scsicam_bios_param */
  63. #include <scsi/scsi_cmnd.h>
  64. #include <scsi/scsi_device.h>
  65. #include <scsi/scsi_host.h>
  66. #include "dc395x.h"
  67. #define DC395X_NAME "dc395x"
  68. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  69. #define DC395X_VERSION "v2.05, 2004/03/08"
  70. /*---------------------------------------------------------------------------
  71. Features
  72. ---------------------------------------------------------------------------*/
  73. /*
  74. * Set to disable parts of the driver
  75. */
  76. /*#define DC395x_NO_DISCONNECT*/
  77. /*#define DC395x_NO_TAGQ*/
  78. /*#define DC395x_NO_SYNC*/
  79. /*#define DC395x_NO_WIDE*/
  80. /*---------------------------------------------------------------------------
  81. Debugging
  82. ---------------------------------------------------------------------------*/
  83. /*
  84. * Types of debugging that can be enabled and disabled
  85. */
  86. #define DBG_KG 0x0001
  87. #define DBG_0 0x0002
  88. #define DBG_1 0x0004
  89. #define DBG_SG 0x0020
  90. #define DBG_FIFO 0x0040
  91. #define DBG_PIO 0x0080
  92. /*
  93. * Set set of things to output debugging for.
  94. * Undefine to remove all debugging
  95. */
  96. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  97. /*#define DEBUG_MASK DBG_0*/
  98. /*
  99. * Output a kernel mesage at the specified level and append the
  100. * driver name and a ": " to the start of the message
  101. */
  102. #define dprintkl(level, format, arg...) \
  103. printk(level DC395X_NAME ": " format , ## arg)
  104. #ifdef DEBUG_MASK
  105. /*
  106. * print a debug message - this is formated with KERN_DEBUG, then the
  107. * driver name followed by a ": " and then the message is output.
  108. * This also checks that the specified debug level is enabled before
  109. * outputing the message
  110. */
  111. #define dprintkdbg(type, format, arg...) \
  112. do { \
  113. if ((type) & (DEBUG_MASK)) \
  114. dprintkl(KERN_DEBUG , format , ## arg); \
  115. } while (0)
  116. /*
  117. * Check if the specified type of debugging is enabled
  118. */
  119. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  120. #else
  121. /*
  122. * No debugging. Do nothing
  123. */
  124. #define dprintkdbg(type, format, arg...) \
  125. do {} while (0)
  126. #define debug_enabled(type) (0)
  127. #endif
  128. #ifndef PCI_VENDOR_ID_TEKRAM
  129. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  130. #endif
  131. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  132. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  133. #endif
  134. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  135. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  137. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  138. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  139. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  140. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  141. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  142. /* cmd->result */
  143. #define RES_TARGET 0x000000FF /* Target State */
  144. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  145. #define RES_ENDMSG 0x0000FF00 /* End Message */
  146. #define RES_DID 0x00FF0000 /* DID_ codes */
  147. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  148. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  149. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  150. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  151. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  152. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  153. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  154. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  155. #define TAG_NONE 255
  156. /*
  157. * srb->segement_x is the hw sg list. It is always allocated as a
  158. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  159. * cross a page boundy.
  160. */
  161. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  162. struct SGentry {
  163. u32 address; /* bus! address */
  164. u32 length;
  165. };
  166. /* The SEEPROM structure for TRM_S1040 */
  167. struct NVRamTarget {
  168. u8 cfg0; /* Target configuration byte 0 */
  169. u8 period; /* Target period */
  170. u8 cfg2; /* Target configuration byte 2 */
  171. u8 cfg3; /* Target configuration byte 3 */
  172. };
  173. struct NvRamType {
  174. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  175. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  176. u8 sub_class; /* 4 Sub Class */
  177. u8 vendor_id[2]; /* 5,6 Vendor ID */
  178. u8 device_id[2]; /* 7,8 Device ID */
  179. u8 reserved; /* 9 Reserved */
  180. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  181. /** 10,11,12,13
  182. ** 14,15,16,17
  183. ** ....
  184. ** ....
  185. ** 70,71,72,73
  186. */
  187. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  188. u8 channel_cfg; /* 75 Channel configuration */
  189. u8 delay_time; /* 76 Power on delay time */
  190. u8 max_tag; /* 77 Maximum tags */
  191. u8 reserved0; /* 78 */
  192. u8 boot_target; /* 79 */
  193. u8 boot_lun; /* 80 */
  194. u8 reserved1; /* 81 */
  195. u16 reserved2[22]; /* 82,..125 */
  196. u16 cksum; /* 126,127 */
  197. };
  198. struct ScsiReqBlk {
  199. struct list_head list; /* next/prev ptrs for srb lists */
  200. struct DeviceCtlBlk *dcb;
  201. struct scsi_cmnd *cmd;
  202. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  203. u32 sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  204. u8 sg_count; /* No of HW sg entries for this request */
  205. u8 sg_index; /* Index of HW sg entry for this request */
  206. u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
  207. unsigned char *virt_addr; /* Virtual address of current transfer position */
  208. /*
  209. * The sense buffer handling function, request_sense, uses
  210. * the first hw sg entry (segment_x[0]) and the transfer
  211. * length (total_xfer_length). While doing this it stores the
  212. * original values into the last sg hw list
  213. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  214. * total_xfer_length in xferred. These values are restored in
  215. * pci_unmap_srb_sense. This is the only place xferred is used.
  216. */
  217. unsigned char *virt_addr_req; /* Saved virtual address of the request buffer */
  218. u32 xferred; /* Saved copy of total_xfer_length */
  219. u16 state;
  220. u8 msgin_buf[6];
  221. u8 msgout_buf[6];
  222. u8 adapter_status;
  223. u8 target_status;
  224. u8 msg_count;
  225. u8 end_message;
  226. u8 tag_number;
  227. u8 status;
  228. u8 retry_count;
  229. u8 flag;
  230. u8 scsi_phase;
  231. };
  232. struct DeviceCtlBlk {
  233. struct list_head list; /* next/prev ptrs for the dcb list */
  234. struct AdapterCtlBlk *acb;
  235. struct list_head srb_going_list; /* head of going srb list */
  236. struct list_head srb_waiting_list; /* head of waiting srb list */
  237. struct ScsiReqBlk *active_srb;
  238. u32 tag_mask;
  239. u16 max_command;
  240. u8 target_id; /* SCSI Target ID (SCSI Only) */
  241. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  242. u8 identify_msg;
  243. u8 dev_mode;
  244. u8 inquiry7; /* To store Inquiry flags */
  245. u8 sync_mode; /* 0:async mode */
  246. u8 min_nego_period; /* for nego. */
  247. u8 sync_period; /* for reg. */
  248. u8 sync_offset; /* for reg. and nego.(low nibble) */
  249. u8 flag;
  250. u8 dev_type;
  251. u8 init_tcq_flag;
  252. };
  253. struct AdapterCtlBlk {
  254. struct Scsi_Host *scsi_host;
  255. unsigned long io_port_base;
  256. unsigned long io_port_len;
  257. struct list_head dcb_list; /* head of going dcb list */
  258. struct DeviceCtlBlk *dcb_run_robin;
  259. struct DeviceCtlBlk *active_dcb;
  260. struct list_head srb_free_list; /* head of free srb list */
  261. struct ScsiReqBlk *tmp_srb;
  262. struct timer_list waiting_timer;
  263. struct timer_list selto_timer;
  264. u16 srb_count;
  265. u8 sel_timeout;
  266. unsigned int irq_level;
  267. u8 tag_max_num;
  268. u8 acb_flag;
  269. u8 gmode2;
  270. u8 config;
  271. u8 lun_chk;
  272. u8 scan_devices;
  273. u8 hostid_bit;
  274. u8 dcb_map[DC395x_MAX_SCSI_ID];
  275. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  276. struct pci_dev *dev;
  277. u8 msg_len;
  278. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  279. struct ScsiReqBlk srb;
  280. struct NvRamType eeprom; /* eeprom settings for this adapter */
  281. };
  282. /*---------------------------------------------------------------------------
  283. Forward declarations
  284. ---------------------------------------------------------------------------*/
  285. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  286. u16 *pscsi_status);
  287. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  288. u16 *pscsi_status);
  289. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  290. u16 *pscsi_status);
  291. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  292. u16 *pscsi_status);
  293. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  294. u16 *pscsi_status);
  295. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  296. u16 *pscsi_status);
  297. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  298. u16 *pscsi_status);
  299. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  300. u16 *pscsi_status);
  301. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  302. u16 *pscsi_status);
  303. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  304. u16 *pscsi_status);
  305. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  306. u16 *pscsi_status);
  307. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  308. u16 *pscsi_status);
  309. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  310. u16 *pscsi_status);
  311. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  312. u16 *pscsi_status);
  313. static void set_basic_config(struct AdapterCtlBlk *acb);
  314. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  315. struct ScsiReqBlk *srb);
  316. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  317. static void data_io_transfer(struct AdapterCtlBlk *acb,
  318. struct ScsiReqBlk *srb, u16 io_dir);
  319. static void disconnect(struct AdapterCtlBlk *acb);
  320. static void reselect(struct AdapterCtlBlk *acb);
  321. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  322. struct ScsiReqBlk *srb);
  323. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  324. struct ScsiReqBlk *srb);
  325. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  326. struct ScsiReqBlk *srb);
  327. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  328. struct scsi_cmnd *cmd, u8 force);
  329. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  330. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  331. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  332. struct ScsiReqBlk *srb);
  333. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  334. struct ScsiReqBlk *srb);
  335. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  336. struct ScsiReqBlk *srb);
  337. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  338. struct DeviceCtlBlk *dcb);
  339. static void waiting_timeout(unsigned long ptr);
  340. /*---------------------------------------------------------------------------
  341. Static Data
  342. ---------------------------------------------------------------------------*/
  343. static u16 current_sync_offset = 0;
  344. static void *dc395x_scsi_phase0[] = {
  345. data_out_phase0,/* phase:0 */
  346. data_in_phase0, /* phase:1 */
  347. command_phase0, /* phase:2 */
  348. status_phase0, /* phase:3 */
  349. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  350. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  351. msgout_phase0, /* phase:6 */
  352. msgin_phase0, /* phase:7 */
  353. };
  354. static void *dc395x_scsi_phase1[] = {
  355. data_out_phase1,/* phase:0 */
  356. data_in_phase1, /* phase:1 */
  357. command_phase1, /* phase:2 */
  358. status_phase1, /* phase:3 */
  359. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  360. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  361. msgout_phase1, /* phase:6 */
  362. msgin_phase1, /* phase:7 */
  363. };
  364. /*
  365. *Fast20: 000 50ns, 20.0 MHz
  366. * 001 75ns, 13.3 MHz
  367. * 010 100ns, 10.0 MHz
  368. * 011 125ns, 8.0 MHz
  369. * 100 150ns, 6.6 MHz
  370. * 101 175ns, 5.7 MHz
  371. * 110 200ns, 5.0 MHz
  372. * 111 250ns, 4.0 MHz
  373. *
  374. *Fast40(LVDS): 000 25ns, 40.0 MHz
  375. * 001 50ns, 20.0 MHz
  376. * 010 75ns, 13.3 MHz
  377. * 011 100ns, 10.0 MHz
  378. * 100 125ns, 8.0 MHz
  379. * 101 150ns, 6.6 MHz
  380. * 110 175ns, 5.7 MHz
  381. * 111 200ns, 5.0 MHz
  382. */
  383. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  384. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  385. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  386. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  387. /*---------------------------------------------------------------------------
  388. Configuration
  389. ---------------------------------------------------------------------------*/
  390. /*
  391. * Module/boot parameters currently effect *all* instances of the
  392. * card in the system.
  393. */
  394. /*
  395. * Command line parameters are stored in a structure below.
  396. * These are the index's into the structure for the various
  397. * command line options.
  398. */
  399. #define CFG_ADAPTER_ID 0
  400. #define CFG_MAX_SPEED 1
  401. #define CFG_DEV_MODE 2
  402. #define CFG_ADAPTER_MODE 3
  403. #define CFG_TAGS 4
  404. #define CFG_RESET_DELAY 5
  405. #define CFG_NUM 6 /* number of configuration items */
  406. /*
  407. * Value used to indicate that a command line override
  408. * hasn't been used to modify the value.
  409. */
  410. #define CFG_PARAM_UNSET -1
  411. /*
  412. * Hold command line parameters.
  413. */
  414. struct ParameterData {
  415. int value; /* value of this setting */
  416. int min; /* minimum value */
  417. int max; /* maximum value */
  418. int def; /* default value */
  419. int safe; /* safe value */
  420. };
  421. static struct ParameterData __devinitdata cfg_data[] = {
  422. { /* adapter id */
  423. CFG_PARAM_UNSET,
  424. 0,
  425. 15,
  426. 7,
  427. 7
  428. },
  429. { /* max speed */
  430. CFG_PARAM_UNSET,
  431. 0,
  432. 7,
  433. 1, /* 13.3Mhz */
  434. 4, /* 6.7Hmz */
  435. },
  436. { /* dev mode */
  437. CFG_PARAM_UNSET,
  438. 0,
  439. 0x3f,
  440. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  441. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  442. NTC_DO_SEND_START,
  443. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  444. },
  445. { /* adapter mode */
  446. CFG_PARAM_UNSET,
  447. 0,
  448. 0x2f,
  449. #ifdef CONFIG_SCSI_MULTI_LUN
  450. NAC_SCANLUN |
  451. #endif
  452. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  453. /*| NAC_ACTIVE_NEG*/,
  454. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  455. },
  456. { /* tags */
  457. CFG_PARAM_UNSET,
  458. 0,
  459. 5,
  460. 3, /* 16 tags (??) */
  461. 2,
  462. },
  463. { /* reset delay */
  464. CFG_PARAM_UNSET,
  465. 0,
  466. 180,
  467. 1, /* 1 second */
  468. 10, /* 10 seconds */
  469. }
  470. };
  471. /*
  472. * Safe settings. If set to zero the the BIOS/default values with
  473. * command line overrides will be used. If set to 1 then safe and
  474. * slow settings will be used.
  475. */
  476. static int use_safe_settings = 0;
  477. module_param_named(safe, use_safe_settings, bool, 0);
  478. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  479. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  480. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  481. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  482. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  483. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  484. MODULE_PARM_DESC(dev_mode, "Device mode.");
  485. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  486. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  487. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  488. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  489. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  490. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  491. /**
  492. * set_safe_settings - if the use_safe_settings option is set then
  493. * set all values to the safe and slow values.
  494. **/
  495. static void __devinit set_safe_settings(void)
  496. {
  497. if (use_safe_settings)
  498. {
  499. int i;
  500. dprintkl(KERN_INFO, "Using safe settings.\n");
  501. for (i = 0; i < CFG_NUM; i++)
  502. {
  503. cfg_data[i].value = cfg_data[i].safe;
  504. }
  505. }
  506. }
  507. /**
  508. * fix_settings - reset any boot parameters which are out of range
  509. * back to the default values.
  510. **/
  511. static void __devinit fix_settings(void)
  512. {
  513. int i;
  514. dprintkdbg(DBG_1,
  515. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  516. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  517. cfg_data[CFG_ADAPTER_ID].value,
  518. cfg_data[CFG_MAX_SPEED].value,
  519. cfg_data[CFG_DEV_MODE].value,
  520. cfg_data[CFG_ADAPTER_MODE].value,
  521. cfg_data[CFG_TAGS].value,
  522. cfg_data[CFG_RESET_DELAY].value);
  523. for (i = 0; i < CFG_NUM; i++)
  524. {
  525. if (cfg_data[i].value < cfg_data[i].min
  526. || cfg_data[i].value > cfg_data[i].max)
  527. cfg_data[i].value = cfg_data[i].def;
  528. }
  529. }
  530. /*
  531. * Mapping from the eeprom delay index value (index into this array)
  532. * to the the number of actual seconds that the delay should be for.
  533. */
  534. static char __devinitdata eeprom_index_to_delay_map[] =
  535. { 1, 3, 5, 10, 16, 30, 60, 120 };
  536. /**
  537. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  538. * into a number of seconds.
  539. *
  540. * @eeprom: The eeprom structure in which we find the delay index to map.
  541. **/
  542. static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
  543. {
  544. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  545. }
  546. /**
  547. * delay_to_eeprom_index - Take a delay in seconds and return the
  548. * closest eeprom index which will delay for at least that amount of
  549. * seconds.
  550. *
  551. * @delay: The delay, in seconds, to find the eeprom index for.
  552. **/
  553. static int __devinit delay_to_eeprom_index(int delay)
  554. {
  555. u8 idx = 0;
  556. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  557. idx++;
  558. return idx;
  559. }
  560. /**
  561. * eeprom_override - Override the eeprom settings, in the provided
  562. * eeprom structure, with values that have been set on the command
  563. * line.
  564. *
  565. * @eeprom: The eeprom data to override with command line options.
  566. **/
  567. static void __devinit eeprom_override(struct NvRamType *eeprom)
  568. {
  569. u8 id;
  570. /* Adapter Settings */
  571. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  572. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  573. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  574. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  575. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  576. eeprom->delay_time = delay_to_eeprom_index(
  577. cfg_data[CFG_RESET_DELAY].value);
  578. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  579. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  580. /* Device Settings */
  581. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  582. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  583. eeprom->target[id].cfg0 =
  584. (u8)cfg_data[CFG_DEV_MODE].value;
  585. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  586. eeprom->target[id].period =
  587. (u8)cfg_data[CFG_MAX_SPEED].value;
  588. }
  589. }
  590. /*---------------------------------------------------------------------------
  591. ---------------------------------------------------------------------------*/
  592. static unsigned int list_size(struct list_head *head)
  593. {
  594. unsigned int count = 0;
  595. struct list_head *pos;
  596. list_for_each(pos, head)
  597. count++;
  598. return count;
  599. }
  600. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  601. struct DeviceCtlBlk *pos)
  602. {
  603. int use_next = 0;
  604. struct DeviceCtlBlk* next = NULL;
  605. struct DeviceCtlBlk* i;
  606. if (list_empty(head))
  607. return NULL;
  608. /* find supplied dcb and then select the next one */
  609. list_for_each_entry(i, head, list)
  610. if (use_next) {
  611. next = i;
  612. break;
  613. } else if (i == pos) {
  614. use_next = 1;
  615. }
  616. /* if no next one take the head one (ie, wraparound) */
  617. if (!next)
  618. list_for_each_entry(i, head, list) {
  619. next = i;
  620. break;
  621. }
  622. return next;
  623. }
  624. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  625. {
  626. if (srb->tag_number < 255) {
  627. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  628. srb->tag_number = 255;
  629. }
  630. }
  631. /* Find cmd in SRB list */
  632. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  633. struct list_head *head)
  634. {
  635. struct ScsiReqBlk *i;
  636. list_for_each_entry(i, head, list)
  637. if (i->cmd == cmd)
  638. return i;
  639. return NULL;
  640. }
  641. static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
  642. {
  643. struct list_head *head = &acb->srb_free_list;
  644. struct ScsiReqBlk *srb = NULL;
  645. if (!list_empty(head)) {
  646. srb = list_entry(head->next, struct ScsiReqBlk, list);
  647. list_del(head->next);
  648. dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
  649. }
  650. return srb;
  651. }
  652. static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  653. {
  654. dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
  655. list_add_tail(&srb->list, &acb->srb_free_list);
  656. }
  657. static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
  658. struct ScsiReqBlk *srb)
  659. {
  660. dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n",
  661. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  662. list_add(&srb->list, &dcb->srb_waiting_list);
  663. }
  664. static void srb_waiting_append(struct DeviceCtlBlk *dcb,
  665. struct ScsiReqBlk *srb)
  666. {
  667. dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n",
  668. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  669. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  670. }
  671. static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  672. {
  673. dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n",
  674. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  675. list_add_tail(&srb->list, &dcb->srb_going_list);
  676. }
  677. static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  678. {
  679. struct ScsiReqBlk *i;
  680. struct ScsiReqBlk *tmp;
  681. dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n",
  682. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  683. list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
  684. if (i == srb) {
  685. list_del(&srb->list);
  686. break;
  687. }
  688. }
  689. static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
  690. struct ScsiReqBlk *srb)
  691. {
  692. struct ScsiReqBlk *i;
  693. struct ScsiReqBlk *tmp;
  694. dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n",
  695. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  696. list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
  697. if (i == srb) {
  698. list_del(&srb->list);
  699. break;
  700. }
  701. }
  702. static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
  703. struct ScsiReqBlk *srb)
  704. {
  705. dprintkdbg(DBG_0,
  706. "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n",
  707. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  708. list_move(&srb->list, &dcb->srb_waiting_list);
  709. }
  710. static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
  711. struct ScsiReqBlk *srb)
  712. {
  713. dprintkdbg(DBG_0,
  714. "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n",
  715. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  716. list_move(&srb->list, &dcb->srb_going_list);
  717. }
  718. /* Sets the timer to wake us up */
  719. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  720. {
  721. if (timer_pending(&acb->waiting_timer))
  722. return;
  723. init_timer(&acb->waiting_timer);
  724. acb->waiting_timer.function = waiting_timeout;
  725. acb->waiting_timer.data = (unsigned long) acb;
  726. if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
  727. acb->waiting_timer.expires =
  728. acb->scsi_host->last_reset - HZ / 2 + 1;
  729. else
  730. acb->waiting_timer.expires = jiffies + to + 1;
  731. add_timer(&acb->waiting_timer);
  732. }
  733. /* Send the next command from the waiting list to the bus */
  734. static void waiting_process_next(struct AdapterCtlBlk *acb)
  735. {
  736. struct DeviceCtlBlk *start = NULL;
  737. struct DeviceCtlBlk *pos;
  738. struct DeviceCtlBlk *dcb;
  739. struct ScsiReqBlk *srb;
  740. struct list_head *dcb_list_head = &acb->dcb_list;
  741. if (acb->active_dcb
  742. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  743. return;
  744. if (timer_pending(&acb->waiting_timer))
  745. del_timer(&acb->waiting_timer);
  746. if (list_empty(dcb_list_head))
  747. return;
  748. /*
  749. * Find the starting dcb. Need to find it again in the list
  750. * since the list may have changed since we set the ptr to it
  751. */
  752. list_for_each_entry(dcb, dcb_list_head, list)
  753. if (dcb == acb->dcb_run_robin) {
  754. start = dcb;
  755. break;
  756. }
  757. if (!start) {
  758. /* This can happen! */
  759. start = list_entry(dcb_list_head->next, typeof(*start), list);
  760. acb->dcb_run_robin = start;
  761. }
  762. /*
  763. * Loop over the dcb, but we start somewhere (potentially) in
  764. * the middle of the loop so we need to manully do this.
  765. */
  766. pos = start;
  767. do {
  768. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  769. /* Make sure, the next another device gets scheduled ... */
  770. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  771. acb->dcb_run_robin);
  772. if (list_empty(waiting_list_head) ||
  773. pos->max_command <= list_size(&pos->srb_going_list)) {
  774. /* move to next dcb */
  775. pos = dcb_get_next(dcb_list_head, pos);
  776. } else {
  777. srb = list_entry(waiting_list_head->next,
  778. struct ScsiReqBlk, list);
  779. /* Try to send to the bus */
  780. if (!start_scsi(acb, pos, srb))
  781. srb_waiting_to_going_move(pos, srb);
  782. else
  783. waiting_set_timer(acb, HZ/50);
  784. break;
  785. }
  786. } while (pos != start);
  787. }
  788. /* Wake up waiting queue */
  789. static void waiting_timeout(unsigned long ptr)
  790. {
  791. unsigned long flags;
  792. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  793. dprintkdbg(DBG_1,
  794. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  795. DC395x_LOCK_IO(acb->scsi_host, flags);
  796. waiting_process_next(acb);
  797. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  798. }
  799. /* Get the DCB for a given ID/LUN combination */
  800. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  801. {
  802. return acb->children[id][lun];
  803. }
  804. /* Send SCSI Request Block (srb) to adapter (acb) */
  805. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  806. {
  807. struct DeviceCtlBlk *dcb = srb->dcb;
  808. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  809. acb->active_dcb ||
  810. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  811. srb_waiting_append(dcb, srb);
  812. waiting_process_next(acb);
  813. return;
  814. }
  815. if (!start_scsi(acb, dcb, srb))
  816. srb_going_append(dcb, srb);
  817. else {
  818. srb_waiting_insert(dcb, srb);
  819. waiting_set_timer(acb, HZ / 50);
  820. }
  821. }
  822. static inline void pio_trigger(void)
  823. {
  824. static int feedback_requested;
  825. if (!feedback_requested) {
  826. feedback_requested = 1;
  827. printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> "
  828. "to help improve support for your system.\n", __FILE__);
  829. }
  830. }
  831. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  832. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  833. struct ScsiReqBlk *srb)
  834. {
  835. enum dma_data_direction dir = cmd->sc_data_direction;
  836. dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
  837. cmd->pid, dcb->target_id, dcb->target_lun);
  838. srb->dcb = dcb;
  839. srb->cmd = cmd;
  840. srb->sg_count = 0;
  841. srb->total_xfer_length = 0;
  842. srb->sg_bus_addr = 0;
  843. srb->virt_addr = NULL;
  844. srb->sg_index = 0;
  845. srb->adapter_status = 0;
  846. srb->target_status = 0;
  847. srb->msg_count = 0;
  848. srb->status = 0;
  849. srb->flag = 0;
  850. srb->state = 0;
  851. srb->retry_count = 0;
  852. srb->tag_number = TAG_NONE;
  853. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  854. srb->end_message = 0;
  855. if (dir == PCI_DMA_NONE || !cmd->request_buffer) {
  856. dprintkdbg(DBG_0,
  857. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  858. cmd->bufflen, cmd->request_buffer,
  859. cmd->use_sg, srb->segment_x[0].address);
  860. } else if (cmd->use_sg) {
  861. int i;
  862. u32 reqlen = cmd->request_bufflen;
  863. struct scatterlist *sl = (struct scatterlist *)
  864. cmd->request_buffer;
  865. struct SGentry *sgp = srb->segment_x;
  866. srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
  867. dir);
  868. dprintkdbg(DBG_0,
  869. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  870. reqlen, cmd->request_buffer, cmd->use_sg,
  871. srb->sg_count);
  872. srb->virt_addr = page_address(sl->page);
  873. for (i = 0; i < srb->sg_count; i++) {
  874. u32 busaddr = (u32)sg_dma_address(&sl[i]);
  875. u32 seglen = (u32)sl[i].length;
  876. sgp[i].address = busaddr;
  877. sgp[i].length = seglen;
  878. srb->total_xfer_length += seglen;
  879. }
  880. sgp += srb->sg_count - 1;
  881. /*
  882. * adjust last page if too big as it is allocated
  883. * on even page boundaries
  884. */
  885. if (srb->total_xfer_length > reqlen) {
  886. sgp->length -= (srb->total_xfer_length - reqlen);
  887. srb->total_xfer_length = reqlen;
  888. }
  889. /* Fixup for WIDE padding - make sure length is even */
  890. if (dcb->sync_period & WIDE_SYNC &&
  891. srb->total_xfer_length % 2) {
  892. srb->total_xfer_length++;
  893. sgp->length++;
  894. }
  895. srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
  896. srb->segment_x,
  897. SEGMENTX_LEN,
  898. PCI_DMA_TODEVICE);
  899. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  900. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  901. } else {
  902. srb->total_xfer_length = cmd->request_bufflen;
  903. srb->sg_count = 1;
  904. srb->segment_x[0].address =
  905. pci_map_single(dcb->acb->dev, cmd->request_buffer,
  906. srb->total_xfer_length, dir);
  907. /* Fixup for WIDE padding - make sure length is even */
  908. if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
  909. srb->total_xfer_length++;
  910. srb->segment_x[0].length = srb->total_xfer_length;
  911. srb->virt_addr = cmd->request_buffer;
  912. dprintkdbg(DBG_0,
  913. "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
  914. srb->total_xfer_length, cmd->request_buffer,
  915. cmd->use_sg, srb->segment_x[0].address);
  916. }
  917. }
  918. /**
  919. * dc395x_queue_command - queue scsi command passed from the mid
  920. * layer, invoke 'done' on completion
  921. *
  922. * @cmd: pointer to scsi command object
  923. * @done: function pointer to be invoked on completion
  924. *
  925. * Returns 1 if the adapter (host) is busy, else returns 0. One
  926. * reason for an adapter to be busy is that the number
  927. * of outstanding queued commands is already equal to
  928. * struct Scsi_Host::can_queue .
  929. *
  930. * Required: if struct Scsi_Host::can_queue is ever non-zero
  931. * then this function is required.
  932. *
  933. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  934. * and is expected to be held on return.
  935. *
  936. **/
  937. static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  938. {
  939. struct DeviceCtlBlk *dcb;
  940. struct ScsiReqBlk *srb;
  941. struct AdapterCtlBlk *acb =
  942. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  943. dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n",
  944. cmd->pid, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
  945. /* Assume BAD_TARGET; will be cleared later */
  946. cmd->result = DID_BAD_TARGET << 16;
  947. /* ignore invalid targets */
  948. if (cmd->device->id >= acb->scsi_host->max_id ||
  949. cmd->device->lun >= acb->scsi_host->max_lun ||
  950. cmd->device->lun >31) {
  951. goto complete;
  952. }
  953. /* does the specified lun on the specified device exist */
  954. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  955. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  956. cmd->device->id, cmd->device->lun);
  957. goto complete;
  958. }
  959. /* do we have a DCB for the device */
  960. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  961. if (!dcb) {
  962. /* should never happen */
  963. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  964. cmd->device->id, cmd->device->lun);
  965. goto complete;
  966. }
  967. /* set callback and clear result in the command */
  968. cmd->scsi_done = done;
  969. cmd->result = 0;
  970. srb = srb_get_free(acb);
  971. if (!srb)
  972. {
  973. /*
  974. * Return 1 since we are unable to queue this command at this
  975. * point in time.
  976. */
  977. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  978. return 1;
  979. }
  980. build_srb(cmd, dcb, srb);
  981. if (!list_empty(&dcb->srb_waiting_list)) {
  982. /* append to waiting queue */
  983. srb_waiting_append(dcb, srb);
  984. waiting_process_next(acb);
  985. } else {
  986. /* process immediately */
  987. send_srb(acb, srb);
  988. }
  989. dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->pid);
  990. return 0;
  991. complete:
  992. /*
  993. * Complete the command immediatey, and then return 0 to
  994. * indicate that we have handled the command. This is usually
  995. * done when the commad is for things like non existent
  996. * devices.
  997. */
  998. done(cmd);
  999. return 0;
  1000. }
  1001. /*
  1002. * Return the disk geometry for the given SCSI device.
  1003. */
  1004. static int dc395x_bios_param(struct scsi_device *sdev,
  1005. struct block_device *bdev, sector_t capacity, int *info)
  1006. {
  1007. #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
  1008. int heads, sectors, cylinders;
  1009. struct AdapterCtlBlk *acb;
  1010. int size = capacity;
  1011. dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
  1012. acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
  1013. heads = 64;
  1014. sectors = 32;
  1015. cylinders = size / (heads * sectors);
  1016. if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
  1017. heads = 255;
  1018. sectors = 63;
  1019. cylinders = size / (heads * sectors);
  1020. }
  1021. geom[0] = heads;
  1022. geom[1] = sectors;
  1023. geom[2] = cylinders;
  1024. return 0;
  1025. #else
  1026. return scsicam_bios_param(bdev, capacity, info);
  1027. #endif
  1028. }
  1029. static void dump_register_info(struct AdapterCtlBlk *acb,
  1030. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  1031. {
  1032. u16 pstat;
  1033. struct pci_dev *dev = acb->dev;
  1034. pci_read_config_word(dev, PCI_STATUS, &pstat);
  1035. if (!dcb)
  1036. dcb = acb->active_dcb;
  1037. if (!srb && dcb)
  1038. srb = dcb->active_srb;
  1039. if (srb) {
  1040. if (!srb->cmd)
  1041. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  1042. srb, srb->cmd);
  1043. else
  1044. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) "
  1045. "cmnd=0x%02x <%02i-%i>\n",
  1046. srb, srb->cmd, srb->cmd->pid,
  1047. srb->cmd->cmnd[0], srb->cmd->device->id,
  1048. srb->cmd->device->lun);
  1049. printk(" sglist=%p cnt=%i idx=%i len=%i\n",
  1050. srb->segment_x, srb->sg_count, srb->sg_index,
  1051. srb->total_xfer_length);
  1052. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  1053. srb->state, srb->status, srb->scsi_phase,
  1054. (acb->active_dcb) ? "" : "not");
  1055. }
  1056. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  1057. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  1058. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  1059. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  1060. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  1061. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1062. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  1063. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  1064. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  1065. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  1066. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  1067. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1068. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  1069. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  1070. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  1071. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  1072. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  1073. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  1074. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  1075. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  1076. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  1077. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1078. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1079. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  1080. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  1081. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  1082. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  1083. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1084. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  1085. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  1086. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  1087. "pci{status=0x%04x}\n",
  1088. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  1089. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  1090. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  1091. pstat);
  1092. }
  1093. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  1094. {
  1095. #if debug_enabled(DBG_FIFO)
  1096. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1097. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1098. if (!(fifocnt & 0x40))
  1099. dprintkdbg(DBG_FIFO,
  1100. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  1101. fifocnt & 0x3f, lines, txt);
  1102. #endif
  1103. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  1104. }
  1105. static void reset_dev_param(struct AdapterCtlBlk *acb)
  1106. {
  1107. struct DeviceCtlBlk *dcb;
  1108. struct NvRamType *eeprom = &acb->eeprom;
  1109. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  1110. list_for_each_entry(dcb, &acb->dcb_list, list) {
  1111. u8 period_index;
  1112. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  1113. dcb->sync_period = 0;
  1114. dcb->sync_offset = 0;
  1115. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  1116. period_index = eeprom->target[dcb->target_id].period & 0x07;
  1117. dcb->min_nego_period = clock_period[period_index];
  1118. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  1119. || !(acb->config & HCC_WIDE_CARD))
  1120. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  1121. }
  1122. }
  1123. /*
  1124. * perform a hard reset on the SCSI bus
  1125. * @cmd - some command for this host (for fetching hooks)
  1126. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1127. */
  1128. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1129. {
  1130. struct AdapterCtlBlk *acb =
  1131. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1132. dprintkl(KERN_INFO,
  1133. "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1134. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1135. if (timer_pending(&acb->waiting_timer))
  1136. del_timer(&acb->waiting_timer);
  1137. /*
  1138. * disable interrupt
  1139. */
  1140. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1141. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1142. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1143. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1144. reset_scsi_bus(acb);
  1145. udelay(500);
  1146. /* We may be in serious trouble. Wait some seconds */
  1147. acb->scsi_host->last_reset =
  1148. jiffies + 3 * HZ / 2 +
  1149. HZ * acb->eeprom.delay_time;
  1150. /*
  1151. * re-enable interrupt
  1152. */
  1153. /* Clear SCSI FIFO */
  1154. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1155. clear_fifo(acb, "eh_bus_reset");
  1156. /* Delete pending IRQ */
  1157. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1158. set_basic_config(acb);
  1159. reset_dev_param(acb);
  1160. doing_srb_done(acb, DID_RESET, cmd, 0);
  1161. acb->active_dcb = NULL;
  1162. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1163. waiting_process_next(acb);
  1164. return SUCCESS;
  1165. }
  1166. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1167. {
  1168. int rc;
  1169. spin_lock_irq(cmd->device->host->host_lock);
  1170. rc = __dc395x_eh_bus_reset(cmd);
  1171. spin_unlock_irq(cmd->device->host->host_lock);
  1172. return rc;
  1173. }
  1174. /*
  1175. * abort an errant SCSI command
  1176. * @cmd - command to be aborted
  1177. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1178. */
  1179. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1180. {
  1181. /*
  1182. * Look into our command queues: If it has not been sent already,
  1183. * we remove it and return success. Otherwise fail.
  1184. */
  1185. struct AdapterCtlBlk *acb =
  1186. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1187. struct DeviceCtlBlk *dcb;
  1188. struct ScsiReqBlk *srb;
  1189. dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1190. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1191. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1192. if (!dcb) {
  1193. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1194. return FAILED;
  1195. }
  1196. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1197. if (srb) {
  1198. srb_waiting_remove(dcb, srb);
  1199. pci_unmap_srb_sense(acb, srb);
  1200. pci_unmap_srb(acb, srb);
  1201. free_tag(dcb, srb);
  1202. srb_free_insert(acb, srb);
  1203. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1204. cmd->result = DID_ABORT << 16;
  1205. return SUCCESS;
  1206. }
  1207. srb = find_cmd(cmd, &dcb->srb_going_list);
  1208. if (srb) {
  1209. dprintkl(KERN_DEBUG, "eh_abort: Command in progress");
  1210. /* XXX: Should abort the command here */
  1211. } else {
  1212. dprintkl(KERN_DEBUG, "eh_abort: Command not found");
  1213. }
  1214. return FAILED;
  1215. }
  1216. /* SDTR */
  1217. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1218. struct ScsiReqBlk *srb)
  1219. {
  1220. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1221. if (srb->msg_count > 1) {
  1222. dprintkl(KERN_INFO,
  1223. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1224. srb->msg_count, srb->msgout_buf[0],
  1225. srb->msgout_buf[1]);
  1226. return;
  1227. }
  1228. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1229. dcb->sync_offset = 0;
  1230. dcb->min_nego_period = 200 >> 2;
  1231. } else if (dcb->sync_offset == 0)
  1232. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1233. *ptr++ = MSG_EXTENDED; /* (01h) */
  1234. *ptr++ = 3; /* length */
  1235. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1236. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1237. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1238. srb->msg_count += 5;
  1239. srb->state |= SRB_DO_SYNC_NEGO;
  1240. }
  1241. /* WDTR */
  1242. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1243. struct ScsiReqBlk *srb)
  1244. {
  1245. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1246. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1247. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1248. if (srb->msg_count > 1) {
  1249. dprintkl(KERN_INFO,
  1250. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1251. srb->msg_count, srb->msgout_buf[0],
  1252. srb->msgout_buf[1]);
  1253. return;
  1254. }
  1255. *ptr++ = MSG_EXTENDED; /* (01h) */
  1256. *ptr++ = 2; /* length */
  1257. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1258. *ptr++ = wide;
  1259. srb->msg_count += 4;
  1260. srb->state |= SRB_DO_WIDE_NEGO;
  1261. }
  1262. #if 0
  1263. /* Timer to work around chip flaw: When selecting and the bus is
  1264. * busy, we sometimes miss a Selection timeout IRQ */
  1265. void selection_timeout_missed(unsigned long ptr);
  1266. /* Sets the timer to wake us up */
  1267. static void selto_timer(struct AdapterCtlBlk *acb)
  1268. {
  1269. if (timer_pending(&acb->selto_timer))
  1270. return;
  1271. acb->selto_timer.function = selection_timeout_missed;
  1272. acb->selto_timer.data = (unsigned long) acb;
  1273. if (time_before
  1274. (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
  1275. acb->selto_timer.expires =
  1276. acb->scsi_host->last_reset + HZ / 2 + 1;
  1277. else
  1278. acb->selto_timer.expires = jiffies + HZ + 1;
  1279. add_timer(&acb->selto_timer);
  1280. }
  1281. void selection_timeout_missed(unsigned long ptr)
  1282. {
  1283. unsigned long flags;
  1284. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1285. struct ScsiReqBlk *srb;
  1286. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1287. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1288. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1289. return;
  1290. }
  1291. DC395x_LOCK_IO(acb->scsi_host, flags);
  1292. srb = acb->active_dcb->active_srb;
  1293. disconnect(acb);
  1294. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1295. }
  1296. #endif
  1297. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1298. struct ScsiReqBlk* srb)
  1299. {
  1300. u16 s_stat2, return_code;
  1301. u8 s_stat, scsicommand, i, identify_message;
  1302. u8 *ptr;
  1303. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n",
  1304. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  1305. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1306. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1307. s_stat2 = 0;
  1308. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1309. #if 1
  1310. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1311. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n",
  1312. srb->cmd->pid, s_stat, s_stat2);
  1313. /*
  1314. * Try anyway?
  1315. *
  1316. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1317. * Timeout, a Disconnect or a Reselction IRQ, so we would be screwed!
  1318. * (This is likely to be a bug in the hardware. Obviously, most people
  1319. * only have one initiator per SCSI bus.)
  1320. * Instead let this fail and have the timer make sure the command is
  1321. * tried again after a short time
  1322. */
  1323. /*selto_timer (acb); */
  1324. return 1;
  1325. }
  1326. #endif
  1327. if (acb->active_dcb) {
  1328. dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a"
  1329. "command while another command (pid#%li) is active.",
  1330. srb->cmd->pid,
  1331. acb->active_dcb->active_srb ?
  1332. acb->active_dcb->active_srb->cmd->pid : 0);
  1333. return 1;
  1334. }
  1335. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1336. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n",
  1337. srb->cmd->pid);
  1338. return 1;
  1339. }
  1340. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1341. * to queue them again after a reset */
  1342. if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
  1343. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1344. return 1;
  1345. }
  1346. /* Flush FIFO */
  1347. clear_fifo(acb, "start_scsi");
  1348. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1349. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1350. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1351. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1352. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1353. identify_message = dcb->identify_msg;
  1354. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1355. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1356. if (srb->flag & AUTO_REQSENSE)
  1357. identify_message &= 0xBF;
  1358. if (((srb->cmd->cmnd[0] == INQUIRY)
  1359. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1360. || (srb->flag & AUTO_REQSENSE))
  1361. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1362. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1363. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1364. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1365. && (dcb->target_lun == 0)) {
  1366. srb->msgout_buf[0] = identify_message;
  1367. srb->msg_count = 1;
  1368. scsicommand = SCMD_SEL_ATNSTOP;
  1369. srb->state = SRB_MSGOUT;
  1370. #ifndef SYNC_FIRST
  1371. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1372. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1373. build_wdtr(acb, dcb, srb);
  1374. goto no_cmd;
  1375. }
  1376. #endif
  1377. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1378. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1379. build_sdtr(acb, dcb, srb);
  1380. goto no_cmd;
  1381. }
  1382. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1383. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1384. build_wdtr(acb, dcb, srb);
  1385. goto no_cmd;
  1386. }
  1387. srb->msg_count = 0;
  1388. }
  1389. /* Send identify message */
  1390. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1391. scsicommand = SCMD_SEL_ATN;
  1392. srb->state = SRB_START_;
  1393. #ifndef DC395x_NO_TAGQ
  1394. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1395. && (identify_message & 0xC0)) {
  1396. /* Send Tag message */
  1397. u32 tag_mask = 1;
  1398. u8 tag_number = 0;
  1399. while (tag_mask & dcb->tag_mask
  1400. && tag_number <= dcb->max_command) {
  1401. tag_mask = tag_mask << 1;
  1402. tag_number++;
  1403. }
  1404. if (tag_number >= dcb->max_command) {
  1405. dprintkl(KERN_WARNING, "start_scsi: (pid#%li) "
  1406. "Out of tags target=<%02i-%i>)\n",
  1407. srb->cmd->pid, srb->cmd->device->id,
  1408. srb->cmd->device->lun);
  1409. srb->state = SRB_READY;
  1410. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1411. DO_HWRESELECT);
  1412. return 1;
  1413. }
  1414. /* Send Tag id */
  1415. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1416. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1417. dcb->tag_mask |= tag_mask;
  1418. srb->tag_number = tag_number;
  1419. scsicommand = SCMD_SEL_ATN3;
  1420. srb->state = SRB_START_;
  1421. }
  1422. #endif
  1423. /*polling:*/
  1424. /* Send CDB ..command block ......... */
  1425. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1426. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  1427. srb->cmd->cmnd[0], srb->tag_number);
  1428. if (srb->flag & AUTO_REQSENSE) {
  1429. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1430. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1431. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1432. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1433. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1434. sizeof(srb->cmd->sense_buffer));
  1435. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1436. } else {
  1437. ptr = (u8 *)srb->cmd->cmnd;
  1438. for (i = 0; i < srb->cmd->cmd_len; i++)
  1439. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1440. }
  1441. no_cmd:
  1442. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1443. DO_HWRESELECT | DO_DATALATCH);
  1444. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1445. /*
  1446. * If start_scsi return 1:
  1447. * we caught an interrupt (must be reset or reselection ... )
  1448. * : Let's process it first!
  1449. */
  1450. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n",
  1451. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  1452. srb->state = SRB_READY;
  1453. free_tag(dcb, srb);
  1454. srb->msg_count = 0;
  1455. return_code = 1;
  1456. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1457. } else {
  1458. /*
  1459. * If start_scsi returns 0:
  1460. * we know that the SCSI processor is free
  1461. */
  1462. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1463. dcb->active_srb = srb;
  1464. acb->active_dcb = dcb;
  1465. return_code = 0;
  1466. /* it's important for atn stop */
  1467. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1468. DO_DATALATCH | DO_HWRESELECT);
  1469. /* SCSI command */
  1470. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1471. }
  1472. return return_code;
  1473. }
  1474. #define DC395x_ENABLE_MSGOUT \
  1475. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1476. srb->state |= SRB_MSGOUT
  1477. /* abort command */
  1478. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1479. struct ScsiReqBlk *srb)
  1480. {
  1481. srb->msgout_buf[0] = ABORT;
  1482. srb->msg_count = 1;
  1483. DC395x_ENABLE_MSGOUT;
  1484. srb->state &= ~SRB_MSGIN;
  1485. srb->state |= SRB_MSGOUT;
  1486. }
  1487. /**
  1488. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1489. * have been triggered for this card.
  1490. *
  1491. * @acb: a pointer to the adpter control block
  1492. * @scsi_status: the status return when we checked the card
  1493. **/
  1494. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1495. u16 scsi_status)
  1496. {
  1497. struct DeviceCtlBlk *dcb;
  1498. struct ScsiReqBlk *srb;
  1499. u16 phase;
  1500. u8 scsi_intstatus;
  1501. unsigned long flags;
  1502. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1503. u16 *);
  1504. DC395x_LOCK_IO(acb->scsi_host, flags);
  1505. /* This acknowledges the IRQ */
  1506. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1507. if ((scsi_status & 0x2007) == 0x2002)
  1508. dprintkl(KERN_DEBUG,
  1509. "COP after COP completed? %04x\n", scsi_status);
  1510. if (debug_enabled(DBG_KG)) {
  1511. if (scsi_intstatus & INT_SELTIMEOUT)
  1512. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1513. }
  1514. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1515. if (timer_pending(&acb->selto_timer))
  1516. del_timer(&acb->selto_timer);
  1517. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1518. disconnect(acb); /* bus free interrupt */
  1519. goto out_unlock;
  1520. }
  1521. if (scsi_intstatus & INT_RESELECTED) {
  1522. reselect(acb);
  1523. goto out_unlock;
  1524. }
  1525. if (scsi_intstatus & INT_SELECT) {
  1526. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1527. goto out_unlock;
  1528. }
  1529. if (scsi_intstatus & INT_SCSIRESET) {
  1530. scsi_reset_detect(acb);
  1531. goto out_unlock;
  1532. }
  1533. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1534. dcb = acb->active_dcb;
  1535. if (!dcb) {
  1536. dprintkl(KERN_DEBUG,
  1537. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1538. scsi_status, scsi_intstatus);
  1539. goto out_unlock;
  1540. }
  1541. srb = dcb->active_srb;
  1542. if (dcb->flag & ABORT_DEV_) {
  1543. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1544. enable_msgout_abort(acb, srb);
  1545. }
  1546. /* software sequential machine */
  1547. phase = (u16)srb->scsi_phase;
  1548. /*
  1549. * 62037 or 62137
  1550. * call dc395x_scsi_phase0[]... "phase entry"
  1551. * handle every phase before start transfer
  1552. */
  1553. /* data_out_phase0, phase:0 */
  1554. /* data_in_phase0, phase:1 */
  1555. /* command_phase0, phase:2 */
  1556. /* status_phase0, phase:3 */
  1557. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1558. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1559. /* msgout_phase0, phase:6 */
  1560. /* msgin_phase0, phase:7 */
  1561. dc395x_statev = dc395x_scsi_phase0[phase];
  1562. dc395x_statev(acb, srb, &scsi_status);
  1563. /*
  1564. * if there were any exception occured scsi_status
  1565. * will be modify to bus free phase new scsi_status
  1566. * transfer out from ... previous dc395x_statev
  1567. */
  1568. srb->scsi_phase = scsi_status & PHASEMASK;
  1569. phase = (u16)scsi_status & PHASEMASK;
  1570. /*
  1571. * call dc395x_scsi_phase1[]... "phase entry" handle
  1572. * every phase to do transfer
  1573. */
  1574. /* data_out_phase1, phase:0 */
  1575. /* data_in_phase1, phase:1 */
  1576. /* command_phase1, phase:2 */
  1577. /* status_phase1, phase:3 */
  1578. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1579. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1580. /* msgout_phase1, phase:6 */
  1581. /* msgin_phase1, phase:7 */
  1582. dc395x_statev = dc395x_scsi_phase1[phase];
  1583. dc395x_statev(acb, srb, &scsi_status);
  1584. }
  1585. out_unlock:
  1586. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1587. }
  1588. static irqreturn_t dc395x_interrupt(int irq, void *dev_id,
  1589. struct pt_regs *regs)
  1590. {
  1591. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)dev_id;
  1592. u16 scsi_status;
  1593. u8 dma_status;
  1594. irqreturn_t handled = IRQ_NONE;
  1595. /*
  1596. * Check for pending interupt
  1597. */
  1598. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1599. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1600. if (scsi_status & SCSIINTERRUPT) {
  1601. /* interupt pending - let's process it! */
  1602. dc395x_handle_interrupt(acb, scsi_status);
  1603. handled = IRQ_HANDLED;
  1604. }
  1605. else if (dma_status & 0x20) {
  1606. /* Error from the DMA engine */
  1607. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1608. #if 0
  1609. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1610. if (acb->active_dcb) {
  1611. acb->active_dcb-> flag |= ABORT_DEV_;
  1612. if (acb->active_dcb->active_srb)
  1613. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1614. }
  1615. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1616. #else
  1617. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1618. acb = NULL;
  1619. #endif
  1620. handled = IRQ_HANDLED;
  1621. }
  1622. return handled;
  1623. }
  1624. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1625. u16 *pscsi_status)
  1626. {
  1627. dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->pid);
  1628. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1629. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1630. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1631. srb->state &= ~SRB_MSGOUT;
  1632. }
  1633. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1634. u16 *pscsi_status)
  1635. {
  1636. u16 i;
  1637. u8 *ptr;
  1638. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->pid);
  1639. clear_fifo(acb, "msgout_phase1");
  1640. if (!(srb->state & SRB_MSGOUT)) {
  1641. srb->state |= SRB_MSGOUT;
  1642. dprintkl(KERN_DEBUG,
  1643. "msgout_phase1: (pid#%li) Phase unexpected\n",
  1644. srb->cmd->pid); /* So what ? */
  1645. }
  1646. if (!srb->msg_count) {
  1647. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n",
  1648. srb->cmd->pid);
  1649. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1650. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1651. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1652. return;
  1653. }
  1654. ptr = (u8 *)srb->msgout_buf;
  1655. for (i = 0; i < srb->msg_count; i++)
  1656. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1657. srb->msg_count = 0;
  1658. if (srb->msgout_buf[0] == MSG_ABORT)
  1659. srb->state = SRB_ABORT_SENT;
  1660. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1661. }
  1662. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1663. u16 *pscsi_status)
  1664. {
  1665. dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->pid);
  1666. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1667. }
  1668. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1669. u16 *pscsi_status)
  1670. {
  1671. struct DeviceCtlBlk *dcb;
  1672. u8 *ptr;
  1673. u16 i;
  1674. dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->pid);
  1675. clear_fifo(acb, "command_phase1");
  1676. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1677. if (!(srb->flag & AUTO_REQSENSE)) {
  1678. ptr = (u8 *)srb->cmd->cmnd;
  1679. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1680. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1681. ptr++;
  1682. }
  1683. } else {
  1684. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1685. dcb = acb->active_dcb;
  1686. /* target id */
  1687. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1688. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1689. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1690. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1691. sizeof(srb->cmd->sense_buffer));
  1692. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1693. }
  1694. srb->state |= SRB_COMMAND;
  1695. /* it's important for atn stop */
  1696. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1697. /* SCSI command */
  1698. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1699. }
  1700. /*
  1701. * Verify that the remaining space in the hw sg lists is the same as
  1702. * the count of remaining bytes in srb->total_xfer_length
  1703. */
  1704. static void sg_verify_length(struct ScsiReqBlk *srb)
  1705. {
  1706. if (debug_enabled(DBG_SG)) {
  1707. unsigned len = 0;
  1708. unsigned idx = srb->sg_index;
  1709. struct SGentry *psge = srb->segment_x + idx;
  1710. for (; idx < srb->sg_count; psge++, idx++)
  1711. len += psge->length;
  1712. if (len != srb->total_xfer_length)
  1713. dprintkdbg(DBG_SG,
  1714. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1715. srb->total_xfer_length, len);
  1716. }
  1717. }
  1718. /*
  1719. * Compute the next Scatter Gather list index and adjust its length
  1720. * and address if necessary; also compute virt_addr
  1721. */
  1722. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1723. {
  1724. u8 idx;
  1725. struct scatterlist *sg;
  1726. struct scsi_cmnd *cmd = srb->cmd;
  1727. int segment = cmd->use_sg;
  1728. u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
  1729. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1730. dprintkdbg(DBG_0,
  1731. "sg_update_list: Transfered %i of %i bytes, %i remain\n",
  1732. xferred, srb->total_xfer_length, left);
  1733. if (xferred == 0) {
  1734. /* nothing to update since we did not transfer any data */
  1735. return;
  1736. }
  1737. sg_verify_length(srb);
  1738. srb->total_xfer_length = left; /* update remaining count */
  1739. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1740. if (xferred >= psge->length) {
  1741. /* Complete SG entries done */
  1742. xferred -= psge->length;
  1743. } else {
  1744. /* Partial SG entry done */
  1745. psge->length -= xferred;
  1746. psge->address += xferred;
  1747. srb->sg_index = idx;
  1748. pci_dma_sync_single_for_device(srb->dcb->
  1749. acb->dev,
  1750. srb->sg_bus_addr,
  1751. SEGMENTX_LEN,
  1752. PCI_DMA_TODEVICE);
  1753. break;
  1754. }
  1755. psge++;
  1756. }
  1757. sg_verify_length(srb);
  1758. /* we need the corresponding virtual address */
  1759. if (!segment || (srb->flag & AUTO_REQSENSE)) {
  1760. srb->virt_addr += xferred;
  1761. return;
  1762. }
  1763. /* We have to walk the scatterlist to find it */
  1764. sg = (struct scatterlist *)cmd->request_buffer;
  1765. while (segment--) {
  1766. unsigned long mask =
  1767. ~((unsigned long)sg->length - 1) & PAGE_MASK;
  1768. if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
  1769. srb->virt_addr = (page_address(sg->page)
  1770. + psge->address -
  1771. (psge->address & PAGE_MASK));
  1772. return;
  1773. }
  1774. ++sg;
  1775. }
  1776. dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
  1777. srb->virt_addr = NULL;
  1778. }
  1779. /*
  1780. * We have transfered a single byte (PIO mode?) and need to update
  1781. * the count of bytes remaining (total_xfer_length) and update the sg
  1782. * entry to either point to next byte in the current sg entry, or of
  1783. * already at the end to point to the start of the next sg entry
  1784. */
  1785. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1786. {
  1787. srb->total_xfer_length--;
  1788. srb->segment_x[srb->sg_index].length--;
  1789. if (srb->total_xfer_length &&
  1790. !srb->segment_x[srb->sg_index].length) {
  1791. if (debug_enabled(DBG_PIO))
  1792. printk(" (next segment)");
  1793. srb->sg_index++;
  1794. sg_update_list(srb, srb->total_xfer_length);
  1795. }
  1796. }
  1797. /*
  1798. * cleanup_after_transfer
  1799. *
  1800. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1801. * KG: Currently called from StatusPhase1 ()
  1802. * Should probably also be called from other places
  1803. * Best might be to call it in DataXXPhase0, if new phase will differ
  1804. */
  1805. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1806. struct ScsiReqBlk *srb)
  1807. {
  1808. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1809. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1810. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1811. clear_fifo(acb, "cleanup/in");
  1812. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1813. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1814. } else { /* write */
  1815. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1816. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1817. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1818. clear_fifo(acb, "cleanup/out");
  1819. }
  1820. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1821. }
  1822. /*
  1823. * Those no of bytes will be transfered w/ PIO through the SCSI FIFO
  1824. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1825. */
  1826. #define DC395x_LASTPIO 4
  1827. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1828. u16 *pscsi_status)
  1829. {
  1830. struct DeviceCtlBlk *dcb = srb->dcb;
  1831. u16 scsi_status = *pscsi_status;
  1832. u32 d_left_counter = 0;
  1833. dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n",
  1834. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1835. /*
  1836. * KG: We need to drain the buffers before we draw any conclusions!
  1837. * This means telling the DMA to push the rest into SCSI, telling
  1838. * SCSI to push the rest to the bus.
  1839. * However, the device might have been the one to stop us (phase
  1840. * change), and the data in transit just needs to be accounted so
  1841. * it can be retransmitted.)
  1842. */
  1843. /*
  1844. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1845. * If we need more data, the DMA SG list will be freshly set up, anyway
  1846. */
  1847. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1848. "DMA{fifcnt=0x%02x fifostat=0x%02x} "
  1849. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1850. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1851. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1852. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1853. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1854. srb->total_xfer_length);
  1855. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1856. if (!(srb->state & SRB_XFERPAD)) {
  1857. if (scsi_status & PARITYERROR)
  1858. srb->status |= PARITY_ERROR;
  1859. /*
  1860. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1861. * is the no of bytes it got from the DMA engine not the no it
  1862. * transferred successfully to the device. (And the difference could
  1863. * be as much as the FIFO size, I guess ...)
  1864. */
  1865. if (!(scsi_status & SCSIXFERDONE)) {
  1866. /*
  1867. * when data transfer from DMA FIFO to SCSI FIFO
  1868. * if there was some data left in SCSI FIFO
  1869. */
  1870. d_left_counter =
  1871. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1872. 0x1F);
  1873. if (dcb->sync_period & WIDE_SYNC)
  1874. d_left_counter <<= 1;
  1875. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1876. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1877. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1878. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1879. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1880. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1881. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1882. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1883. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1884. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1885. }
  1886. /*
  1887. * calculate all the residue data that not yet tranfered
  1888. * SCSI transfer counter + left in SCSI FIFO data
  1889. *
  1890. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1891. * The counter always decrement by one for every SCSI byte transfer.
  1892. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1893. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1894. */
  1895. if (srb->total_xfer_length > DC395x_LASTPIO)
  1896. d_left_counter +=
  1897. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1898. /* Is this a good idea? */
  1899. /*clear_fifo(acb, "DOP1"); */
  1900. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1901. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1902. && srb->cmd->request_bufflen % 2) {
  1903. d_left_counter = 0;
  1904. dprintkl(KERN_INFO,
  1905. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1906. scsi_status);
  1907. }
  1908. /*
  1909. * KG: Oops again. Same thinko as above: The SCSI might have been
  1910. * faster than the DMA engine, so that it ran out of data.
  1911. * In that case, we have to do just nothing!
  1912. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1913. */
  1914. /*
  1915. * KG: This is nonsense: We have been WRITING data to the bus
  1916. * If the SCSI engine has no bytes left, how should the DMA engine?
  1917. */
  1918. if (d_left_counter == 0) {
  1919. srb->total_xfer_length = 0;
  1920. } else {
  1921. /*
  1922. * if transfer not yet complete
  1923. * there were some data residue in SCSI FIFO or
  1924. * SCSI transfer counter not empty
  1925. */
  1926. long oldxferred =
  1927. srb->total_xfer_length - d_left_counter;
  1928. const int diff =
  1929. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1930. sg_update_list(srb, d_left_counter);
  1931. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1932. if ((srb->segment_x[srb->sg_index].length ==
  1933. diff && srb->cmd->use_sg)
  1934. || ((oldxferred & ~PAGE_MASK) ==
  1935. (PAGE_SIZE - diff))
  1936. ) {
  1937. dprintkl(KERN_INFO, "data_out_phase0: "
  1938. "Work around chip bug (%i)?\n", diff);
  1939. d_left_counter =
  1940. srb->total_xfer_length - diff;
  1941. sg_update_list(srb, d_left_counter);
  1942. /*srb->total_xfer_length -= diff; */
  1943. /*srb->virt_addr += diff; */
  1944. /*if (srb->cmd->use_sg) */
  1945. /* srb->sg_index++; */
  1946. }
  1947. }
  1948. }
  1949. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1950. cleanup_after_transfer(acb, srb);
  1951. }
  1952. }
  1953. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1954. u16 *pscsi_status)
  1955. {
  1956. dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n",
  1957. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1958. clear_fifo(acb, "data_out_phase1");
  1959. /* do prepare before transfer when data out phase */
  1960. data_io_transfer(acb, srb, XFERDATAOUT);
  1961. }
  1962. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1963. u16 *pscsi_status)
  1964. {
  1965. u16 scsi_status = *pscsi_status;
  1966. u32 d_left_counter = 0;
  1967. dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n",
  1968. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1969. /*
  1970. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1971. * and switches to another phase, the SCSI engine should be finished too.
  1972. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1973. * engine and transferred to memory.
  1974. * We should wait for the FIFOs to be emptied by that (is there any way to
  1975. * enforce this?) and then stop the DMA engine, because it might think, that
  1976. * there are more bytes to follow. Yes, the device might disconnect prior to
  1977. * having all bytes transferred!
  1978. * Also we should make sure that all data from the DMA engine buffer's really
  1979. * made its way to the system memory! Some documentation on this would not
  1980. * seem to be a bad idea, actually.
  1981. */
  1982. if (!(srb->state & SRB_XFERPAD)) {
  1983. if (scsi_status & PARITYERROR) {
  1984. dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) "
  1985. "Parity Error\n", srb->cmd->pid);
  1986. srb->status |= PARITY_ERROR;
  1987. }
  1988. /*
  1989. * KG: We should wait for the DMA FIFO to be empty ...
  1990. * but: it would be better to wait first for the SCSI FIFO and then the
  1991. * the DMA FIFO to become empty? How do we know, that the device not already
  1992. * sent data to the FIFO in a MsgIn phase, eg.?
  1993. */
  1994. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1995. #if 0
  1996. int ctr = 6000000;
  1997. dprintkl(KERN_DEBUG,
  1998. "DIP0: Wait for DMA FIFO to flush ...\n");
  1999. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  2000. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  2001. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  2002. while (!
  2003. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  2004. 0x80) && --ctr);
  2005. if (ctr < 6000000 - 1)
  2006. dprintkl(KERN_DEBUG
  2007. "DIP0: Had to wait for DMA ...\n");
  2008. if (!ctr)
  2009. dprintkl(KERN_ERR,
  2010. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  2011. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  2012. #endif
  2013. dprintkdbg(DBG_KG, "data_in_phase0: "
  2014. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  2015. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  2016. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  2017. }
  2018. /* Now: Check remainig data: The SCSI counters should tell us ... */
  2019. d_left_counter = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER)
  2020. + ((DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1f)
  2021. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  2022. 0));
  2023. dprintkdbg(DBG_KG, "data_in_phase0: "
  2024. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  2025. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  2026. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  2027. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  2028. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  2029. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  2030. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  2031. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  2032. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  2033. srb->total_xfer_length, d_left_counter);
  2034. #if DC395x_LASTPIO
  2035. /* KG: Less than or equal to 4 bytes can not be transfered via DMA, it seems. */
  2036. if (d_left_counter
  2037. && srb->total_xfer_length <= DC395x_LASTPIO) {
  2038. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  2039. /*sg_update_list (srb, d_left_counter); */
  2040. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) to "
  2041. "%p for remaining %i bytes:",
  2042. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1f,
  2043. (srb->dcb->sync_period & WIDE_SYNC) ?
  2044. "words" : "bytes",
  2045. srb->virt_addr,
  2046. srb->total_xfer_length);
  2047. if (srb->dcb->sync_period & WIDE_SYNC)
  2048. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2049. CFG2_WIDEFIFO);
  2050. while (DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) != 0x40) {
  2051. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2052. pio_trigger();
  2053. *(srb->virt_addr)++ = byte;
  2054. if (debug_enabled(DBG_PIO))
  2055. printk(" %02x", byte);
  2056. d_left_counter--;
  2057. sg_subtract_one(srb);
  2058. }
  2059. if (srb->dcb->sync_period & WIDE_SYNC) {
  2060. #if 1
  2061. /* Read the last byte ... */
  2062. if (srb->total_xfer_length > 0) {
  2063. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2064. pio_trigger();
  2065. *(srb->virt_addr)++ = byte;
  2066. srb->total_xfer_length--;
  2067. if (debug_enabled(DBG_PIO))
  2068. printk(" %02x", byte);
  2069. }
  2070. #endif
  2071. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2072. }
  2073. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  2074. /*srb->total_xfer_length = 0; */
  2075. if (debug_enabled(DBG_PIO))
  2076. printk("\n");
  2077. }
  2078. #endif /* DC395x_LASTPIO */
  2079. #if 0
  2080. /*
  2081. * KG: This was in DATAOUT. Does it also belong here?
  2082. * Nobody seems to know what counter and fifo_cnt count exactly ...
  2083. */
  2084. if (!(scsi_status & SCSIXFERDONE)) {
  2085. /*
  2086. * when data transfer from DMA FIFO to SCSI FIFO
  2087. * if there was some data left in SCSI FIFO
  2088. */
  2089. d_left_counter =
  2090. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  2091. 0x1F);
  2092. if (srb->dcb->sync_period & WIDE_SYNC)
  2093. d_left_counter <<= 1;
  2094. /*
  2095. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  2096. * so need to *= 2
  2097. * KG: Seems to be correct ...
  2098. */
  2099. }
  2100. #endif
  2101. /* KG: This should not be needed any more! */
  2102. if (d_left_counter == 0
  2103. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  2104. #if 0
  2105. int ctr = 6000000;
  2106. u8 TempDMAstatus;
  2107. do {
  2108. TempDMAstatus =
  2109. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2110. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  2111. if (!ctr)
  2112. dprintkl(KERN_ERR,
  2113. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  2114. srb->total_xfer_length = 0;
  2115. #endif
  2116. srb->total_xfer_length = d_left_counter;
  2117. } else { /* phase changed */
  2118. /*
  2119. * parsing the case:
  2120. * when a transfer not yet complete
  2121. * but be disconnected by target
  2122. * if transfer not yet complete
  2123. * there were some data residue in SCSI FIFO or
  2124. * SCSI transfer counter not empty
  2125. */
  2126. sg_update_list(srb, d_left_counter);
  2127. }
  2128. }
  2129. /* KG: The target may decide to disconnect: Empty FIFO before! */
  2130. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  2131. cleanup_after_transfer(acb, srb);
  2132. }
  2133. }
  2134. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2135. u16 *pscsi_status)
  2136. {
  2137. dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n",
  2138. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2139. data_io_transfer(acb, srb, XFERDATAIN);
  2140. }
  2141. static void data_io_transfer(struct AdapterCtlBlk *acb,
  2142. struct ScsiReqBlk *srb, u16 io_dir)
  2143. {
  2144. struct DeviceCtlBlk *dcb = srb->dcb;
  2145. u8 bval;
  2146. dprintkdbg(DBG_0,
  2147. "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2148. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  2149. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2150. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2151. if (srb == acb->tmp_srb)
  2152. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2153. if (srb->sg_index >= srb->sg_count) {
  2154. /* can't happen? out of bounds error */
  2155. return;
  2156. }
  2157. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2158. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2159. /*
  2160. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2161. * Maybe, even ABORTXFER would be appropriate
  2162. */
  2163. if (dma_status & XFERPENDING) {
  2164. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2165. "Expect trouble!\n");
  2166. dump_register_info(acb, dcb, srb);
  2167. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2168. }
  2169. /* clear_fifo(acb, "IO"); */
  2170. /*
  2171. * load what physical address of Scatter/Gather list table
  2172. * want to be transfer
  2173. */
  2174. srb->state |= SRB_DATA_XFER;
  2175. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2176. if (srb->cmd->use_sg) { /* with S/G */
  2177. io_dir |= DMACMD_SG;
  2178. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2179. srb->sg_bus_addr +
  2180. sizeof(struct SGentry) *
  2181. srb->sg_index);
  2182. /* load how many bytes in the sg list table */
  2183. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2184. ((u32)(srb->sg_count -
  2185. srb->sg_index) << 3));
  2186. } else { /* without S/G */
  2187. io_dir &= ~DMACMD_SG;
  2188. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2189. srb->segment_x[0].address);
  2190. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2191. srb->segment_x[0].length);
  2192. }
  2193. /* load total transfer length (24bits) max value 16Mbyte */
  2194. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2195. srb->total_xfer_length);
  2196. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2197. if (io_dir & DMACMD_DIR) { /* read */
  2198. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2199. SCMD_DMA_IN);
  2200. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2201. } else {
  2202. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2203. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2204. SCMD_DMA_OUT);
  2205. }
  2206. }
  2207. #if DC395x_LASTPIO
  2208. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2209. /*
  2210. * load what physical address of Scatter/Gather list table
  2211. * want to be transfer
  2212. */
  2213. srb->state |= SRB_DATA_XFER;
  2214. /* load total transfer length (24bits) max value 16Mbyte */
  2215. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2216. srb->total_xfer_length);
  2217. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2218. if (io_dir & DMACMD_DIR) { /* read */
  2219. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2220. SCMD_FIFO_IN);
  2221. } else { /* write */
  2222. int ln = srb->total_xfer_length;
  2223. if (srb->dcb->sync_period & WIDE_SYNC)
  2224. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2225. CFG2_WIDEFIFO);
  2226. dprintkdbg(DBG_PIO,
  2227. "data_io_transfer: PIO %i bytes from %p:",
  2228. srb->total_xfer_length, srb->virt_addr);
  2229. while (srb->total_xfer_length) {
  2230. if (debug_enabled(DBG_PIO))
  2231. printk(" %02x", (unsigned char) *(srb->virt_addr));
  2232. pio_trigger();
  2233. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  2234. *(srb->virt_addr)++);
  2235. sg_subtract_one(srb);
  2236. }
  2237. if (srb->dcb->sync_period & WIDE_SYNC) {
  2238. if (ln % 2) {
  2239. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2240. if (debug_enabled(DBG_PIO))
  2241. printk(" |00");
  2242. }
  2243. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2244. }
  2245. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2246. if (debug_enabled(DBG_PIO))
  2247. printk("\n");
  2248. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2249. SCMD_FIFO_OUT);
  2250. }
  2251. }
  2252. #endif /* DC395x_LASTPIO */
  2253. else { /* xfer pad */
  2254. u8 data = 0, data2 = 0;
  2255. if (srb->sg_count) {
  2256. srb->adapter_status = H_OVER_UNDER_RUN;
  2257. srb->status |= OVER_RUN;
  2258. }
  2259. /*
  2260. * KG: despite the fact that we are using 16 bits I/O ops
  2261. * the SCSI FIFO is only 8 bits according to the docs
  2262. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2263. */
  2264. if (dcb->sync_period & WIDE_SYNC) {
  2265. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2266. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2267. CFG2_WIDEFIFO);
  2268. if (io_dir & DMACMD_DIR) {
  2269. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2270. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2271. } else {
  2272. /* Danger, Robinson: If you find KGs
  2273. * scattered over the wide disk, the driver
  2274. * or chip is to blame :-( */
  2275. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2276. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2277. }
  2278. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2279. } else {
  2280. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2281. /* Danger, Robinson: If you find a collection of Ks on your disk
  2282. * something broke :-( */
  2283. if (io_dir & DMACMD_DIR)
  2284. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2285. else
  2286. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2287. }
  2288. srb->state |= SRB_XFERPAD;
  2289. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2290. /* SCSI command */
  2291. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2292. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2293. }
  2294. }
  2295. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2296. u16 *pscsi_status)
  2297. {
  2298. dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n",
  2299. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2300. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2301. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2302. srb->state = SRB_COMPLETED;
  2303. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2304. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2305. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2306. }
  2307. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2308. u16 *pscsi_status)
  2309. {
  2310. dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n",
  2311. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2312. srb->state = SRB_STATUS;
  2313. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2314. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2315. }
  2316. /* Check if the message is complete */
  2317. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2318. {
  2319. if (*msgbuf == EXTENDED_MESSAGE) {
  2320. if (len < 2)
  2321. return 0;
  2322. if (len < msgbuf[1] + 2)
  2323. return 0;
  2324. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2325. if (len < 2)
  2326. return 0;
  2327. return 1;
  2328. }
  2329. /* reject_msg */
  2330. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2331. struct ScsiReqBlk *srb)
  2332. {
  2333. srb->msgout_buf[0] = MESSAGE_REJECT;
  2334. srb->msg_count = 1;
  2335. DC395x_ENABLE_MSGOUT;
  2336. srb->state &= ~SRB_MSGIN;
  2337. srb->state |= SRB_MSGOUT;
  2338. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2339. srb->msgin_buf[0],
  2340. srb->dcb->target_id, srb->dcb->target_lun);
  2341. }
  2342. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2343. struct DeviceCtlBlk *dcb, u8 tag)
  2344. {
  2345. struct ScsiReqBlk *srb = NULL;
  2346. struct ScsiReqBlk *i;
  2347. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n",
  2348. srb->cmd->pid, tag, srb);
  2349. if (!(dcb->tag_mask & (1 << tag)))
  2350. dprintkl(KERN_DEBUG,
  2351. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2352. dcb->tag_mask, tag);
  2353. if (list_empty(&dcb->srb_going_list))
  2354. goto mingx0;
  2355. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2356. if (i->tag_number == tag) {
  2357. srb = i;
  2358. break;
  2359. }
  2360. }
  2361. if (!srb)
  2362. goto mingx0;
  2363. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n",
  2364. srb->cmd->pid, srb->dcb->target_id, srb->dcb->target_lun);
  2365. if (dcb->flag & ABORT_DEV_) {
  2366. /*srb->state = SRB_ABORT_SENT; */
  2367. enable_msgout_abort(acb, srb);
  2368. }
  2369. if (!(srb->state & SRB_DISCONNECT))
  2370. goto mingx0;
  2371. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2372. srb->state |= dcb->active_srb->state;
  2373. srb->state |= SRB_DATA_XFER;
  2374. dcb->active_srb = srb;
  2375. /* How can we make the DORS happy? */
  2376. return srb;
  2377. mingx0:
  2378. srb = acb->tmp_srb;
  2379. srb->state = SRB_UNEXPECT_RESEL;
  2380. dcb->active_srb = srb;
  2381. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2382. srb->msg_count = 1;
  2383. DC395x_ENABLE_MSGOUT;
  2384. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2385. return srb;
  2386. }
  2387. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2388. struct DeviceCtlBlk *dcb)
  2389. {
  2390. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2391. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2392. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2393. set_xfer_rate(acb, dcb);
  2394. }
  2395. /* set async transfer mode */
  2396. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2397. {
  2398. struct DeviceCtlBlk *dcb = srb->dcb;
  2399. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2400. dcb->target_id, dcb->target_lun);
  2401. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2402. dcb->sync_mode |= SYNC_NEGO_DONE;
  2403. /*dcb->sync_period &= 0; */
  2404. dcb->sync_offset = 0;
  2405. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2406. srb->state &= ~SRB_DO_SYNC_NEGO;
  2407. reprogram_regs(acb, dcb);
  2408. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2409. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2410. build_wdtr(acb, dcb, srb);
  2411. DC395x_ENABLE_MSGOUT;
  2412. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2413. }
  2414. }
  2415. /* set sync transfer mode */
  2416. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2417. {
  2418. struct DeviceCtlBlk *dcb = srb->dcb;
  2419. u8 bval;
  2420. int fact;
  2421. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2422. "(%02i.%01i MHz) Offset %i\n",
  2423. dcb->target_id, srb->msgin_buf[3] << 2,
  2424. (250 / srb->msgin_buf[3]),
  2425. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2426. srb->msgin_buf[4]);
  2427. if (srb->msgin_buf[4] > 15)
  2428. srb->msgin_buf[4] = 15;
  2429. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2430. dcb->sync_offset = 0;
  2431. else if (dcb->sync_offset == 0)
  2432. dcb->sync_offset = srb->msgin_buf[4];
  2433. if (srb->msgin_buf[4] > dcb->sync_offset)
  2434. srb->msgin_buf[4] = dcb->sync_offset;
  2435. else
  2436. dcb->sync_offset = srb->msgin_buf[4];
  2437. bval = 0;
  2438. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2439. || dcb->min_nego_period >
  2440. clock_period[bval]))
  2441. bval++;
  2442. if (srb->msgin_buf[3] < clock_period[bval])
  2443. dprintkl(KERN_INFO,
  2444. "msgin_set_sync: Increase sync nego period to %ins\n",
  2445. clock_period[bval] << 2);
  2446. srb->msgin_buf[3] = clock_period[bval];
  2447. dcb->sync_period &= 0xf0;
  2448. dcb->sync_period |= ALT_SYNC | bval;
  2449. dcb->min_nego_period = srb->msgin_buf[3];
  2450. if (dcb->sync_period & WIDE_SYNC)
  2451. fact = 500;
  2452. else
  2453. fact = 250;
  2454. dprintkl(KERN_INFO,
  2455. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2456. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2457. dcb->min_nego_period << 2, dcb->sync_offset,
  2458. (fact / dcb->min_nego_period),
  2459. ((fact % dcb->min_nego_period) * 10 +
  2460. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2461. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2462. /* Reply with corrected SDTR Message */
  2463. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2464. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2465. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2466. srb->msg_count = 5;
  2467. DC395x_ENABLE_MSGOUT;
  2468. dcb->sync_mode |= SYNC_NEGO_DONE;
  2469. } else {
  2470. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2471. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2472. build_wdtr(acb, dcb, srb);
  2473. DC395x_ENABLE_MSGOUT;
  2474. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2475. }
  2476. }
  2477. srb->state &= ~SRB_DO_SYNC_NEGO;
  2478. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2479. reprogram_regs(acb, dcb);
  2480. }
  2481. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2482. struct ScsiReqBlk *srb)
  2483. {
  2484. struct DeviceCtlBlk *dcb = srb->dcb;
  2485. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2486. dcb->sync_period &= ~WIDE_SYNC;
  2487. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2488. dcb->sync_mode |= WIDE_NEGO_DONE;
  2489. srb->state &= ~SRB_DO_WIDE_NEGO;
  2490. reprogram_regs(acb, dcb);
  2491. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2492. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2493. build_sdtr(acb, dcb, srb);
  2494. DC395x_ENABLE_MSGOUT;
  2495. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2496. }
  2497. }
  2498. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2499. {
  2500. struct DeviceCtlBlk *dcb = srb->dcb;
  2501. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2502. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2503. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2504. if (srb->msgin_buf[3] > wide)
  2505. srb->msgin_buf[3] = wide;
  2506. /* Completed */
  2507. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2508. dprintkl(KERN_DEBUG,
  2509. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2510. dcb->target_id);
  2511. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2512. srb->msg_count = 4;
  2513. srb->state |= SRB_DO_WIDE_NEGO;
  2514. DC395x_ENABLE_MSGOUT;
  2515. }
  2516. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2517. if (srb->msgin_buf[3] > 0)
  2518. dcb->sync_period |= WIDE_SYNC;
  2519. else
  2520. dcb->sync_period &= ~WIDE_SYNC;
  2521. srb->state &= ~SRB_DO_WIDE_NEGO;
  2522. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2523. dprintkdbg(DBG_1,
  2524. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2525. (8 << srb->msgin_buf[3]), dcb->target_id);
  2526. reprogram_regs(acb, dcb);
  2527. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2528. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2529. build_sdtr(acb, dcb, srb);
  2530. DC395x_ENABLE_MSGOUT;
  2531. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2532. }
  2533. }
  2534. /*
  2535. * extended message codes:
  2536. *
  2537. * code description
  2538. *
  2539. * 02h Reserved
  2540. * 00h MODIFY DATA POINTER
  2541. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2542. * 03h WIDE DATA TRANSFER REQUEST
  2543. * 04h - 7Fh Reserved
  2544. * 80h - FFh Vendor specific
  2545. */
  2546. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2547. u16 *pscsi_status)
  2548. {
  2549. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2550. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->pid);
  2551. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2552. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2553. /* Now eval the msg */
  2554. switch (srb->msgin_buf[0]) {
  2555. case DISCONNECT:
  2556. srb->state = SRB_DISCONNECT;
  2557. break;
  2558. case SIMPLE_QUEUE_TAG:
  2559. case HEAD_OF_QUEUE_TAG:
  2560. case ORDERED_QUEUE_TAG:
  2561. srb =
  2562. msgin_qtag(acb, dcb,
  2563. srb->msgin_buf[1]);
  2564. break;
  2565. case MESSAGE_REJECT:
  2566. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2567. DO_CLRATN | DO_DATALATCH);
  2568. /* A sync nego message was rejected ! */
  2569. if (srb->state & SRB_DO_SYNC_NEGO) {
  2570. msgin_set_async(acb, srb);
  2571. break;
  2572. }
  2573. /* A wide nego message was rejected ! */
  2574. if (srb->state & SRB_DO_WIDE_NEGO) {
  2575. msgin_set_nowide(acb, srb);
  2576. break;
  2577. }
  2578. enable_msgout_abort(acb, srb);
  2579. /*srb->state |= SRB_ABORT_SENT */
  2580. break;
  2581. case EXTENDED_MESSAGE:
  2582. /* SDTR */
  2583. if (srb->msgin_buf[1] == 3
  2584. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2585. msgin_set_sync(acb, srb);
  2586. break;
  2587. }
  2588. /* WDTR */
  2589. if (srb->msgin_buf[1] == 2
  2590. && srb->msgin_buf[2] == EXTENDED_WDTR
  2591. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2592. msgin_set_wide(acb, srb);
  2593. break;
  2594. }
  2595. msgin_reject(acb, srb);
  2596. break;
  2597. case MSG_IGNOREWIDE:
  2598. /* Discard wide residual */
  2599. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2600. break;
  2601. case COMMAND_COMPLETE:
  2602. /* nothing has to be done */
  2603. break;
  2604. case SAVE_POINTERS:
  2605. /*
  2606. * SAVE POINTER may be ignored as we have the struct
  2607. * ScsiReqBlk* associated with the scsi command.
  2608. */
  2609. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2610. "SAVE POINTER rem=%i Ignore\n",
  2611. srb->cmd->pid, srb->total_xfer_length);
  2612. break;
  2613. case RESTORE_POINTERS:
  2614. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2615. break;
  2616. case ABORT:
  2617. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2618. "<%02i-%i> ABORT msg\n",
  2619. srb->cmd->pid, dcb->target_id,
  2620. dcb->target_lun);
  2621. dcb->flag |= ABORT_DEV_;
  2622. enable_msgout_abort(acb, srb);
  2623. break;
  2624. default:
  2625. /* reject unknown messages */
  2626. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2627. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2628. srb->msg_count = 1;
  2629. srb->msgout_buf[0] = dcb->identify_msg;
  2630. DC395x_ENABLE_MSGOUT;
  2631. srb->state |= SRB_MSGOUT;
  2632. /*break; */
  2633. }
  2634. msgin_reject(acb, srb);
  2635. }
  2636. /* Clear counter and MsgIn state */
  2637. srb->state &= ~SRB_MSGIN;
  2638. acb->msg_len = 0;
  2639. }
  2640. *pscsi_status = PH_BUS_FREE;
  2641. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2642. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2643. }
  2644. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2645. u16 *pscsi_status)
  2646. {
  2647. dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->pid);
  2648. clear_fifo(acb, "msgin_phase1");
  2649. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2650. if (!(srb->state & SRB_MSGIN)) {
  2651. srb->state &= ~SRB_DISCONNECT;
  2652. srb->state |= SRB_MSGIN;
  2653. }
  2654. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2655. /* SCSI command */
  2656. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2657. }
  2658. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2659. u16 *pscsi_status)
  2660. {
  2661. }
  2662. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2663. u16 *pscsi_status)
  2664. {
  2665. }
  2666. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2667. {
  2668. struct DeviceCtlBlk *i;
  2669. /* set all lun device's period, offset */
  2670. if (dcb->identify_msg & 0x07)
  2671. return;
  2672. if (acb->scan_devices) {
  2673. current_sync_offset = dcb->sync_offset;
  2674. return;
  2675. }
  2676. list_for_each_entry(i, &acb->dcb_list, list)
  2677. if (i->target_id == dcb->target_id) {
  2678. i->sync_period = dcb->sync_period;
  2679. i->sync_offset = dcb->sync_offset;
  2680. i->sync_mode = dcb->sync_mode;
  2681. i->min_nego_period = dcb->min_nego_period;
  2682. }
  2683. }
  2684. static void disconnect(struct AdapterCtlBlk *acb)
  2685. {
  2686. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2687. struct ScsiReqBlk *srb;
  2688. if (!dcb) {
  2689. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2690. udelay(500);
  2691. /* Suspend queue for a while */
  2692. acb->scsi_host->last_reset =
  2693. jiffies + HZ / 2 +
  2694. HZ * acb->eeprom.delay_time;
  2695. clear_fifo(acb, "disconnectEx");
  2696. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2697. return;
  2698. }
  2699. srb = dcb->active_srb;
  2700. acb->active_dcb = NULL;
  2701. dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->pid);
  2702. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2703. clear_fifo(acb, "disconnect");
  2704. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2705. if (srb->state & SRB_UNEXPECT_RESEL) {
  2706. dprintkl(KERN_ERR,
  2707. "disconnect: Unexpected reselection <%02i-%i>\n",
  2708. dcb->target_id, dcb->target_lun);
  2709. srb->state = 0;
  2710. waiting_process_next(acb);
  2711. } else if (srb->state & SRB_ABORT_SENT) {
  2712. dcb->flag &= ~ABORT_DEV_;
  2713. acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
  2714. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2715. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2716. waiting_process_next(acb);
  2717. } else {
  2718. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2719. || !(srb->
  2720. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2721. /*
  2722. * Selection time out
  2723. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2724. */
  2725. /* Unexp. Disc / Sel Timeout */
  2726. if (srb->state != SRB_START_
  2727. && srb->state != SRB_MSGOUT) {
  2728. srb->state = SRB_READY;
  2729. dprintkl(KERN_DEBUG,
  2730. "disconnect: (pid#%li) Unexpected\n",
  2731. srb->cmd->pid);
  2732. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2733. goto disc1;
  2734. } else {
  2735. /* Normal selection timeout */
  2736. dprintkdbg(DBG_KG, "disconnect: (pid#%li) "
  2737. "<%02i-%i> SelTO\n", srb->cmd->pid,
  2738. dcb->target_id, dcb->target_lun);
  2739. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2740. || acb->scan_devices) {
  2741. srb->target_status =
  2742. SCSI_STAT_SEL_TIMEOUT;
  2743. goto disc1;
  2744. }
  2745. free_tag(dcb, srb);
  2746. srb_going_to_waiting_move(dcb, srb);
  2747. dprintkdbg(DBG_KG,
  2748. "disconnect: (pid#%li) Retry\n",
  2749. srb->cmd->pid);
  2750. waiting_set_timer(acb, HZ / 20);
  2751. }
  2752. } else if (srb->state & SRB_DISCONNECT) {
  2753. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2754. /*
  2755. * SRB_DISCONNECT (This is what we expect!)
  2756. */
  2757. if (bval & 0x40) {
  2758. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2759. " 0x%02x: ACK set! Other controllers?\n",
  2760. bval);
  2761. /* It could come from another initiator, therefore don't do much ! */
  2762. } else
  2763. waiting_process_next(acb);
  2764. } else if (srb->state & SRB_COMPLETED) {
  2765. disc1:
  2766. /*
  2767. ** SRB_COMPLETED
  2768. */
  2769. free_tag(dcb, srb);
  2770. dcb->active_srb = NULL;
  2771. srb->state = SRB_FREE;
  2772. srb_done(acb, dcb, srb);
  2773. }
  2774. }
  2775. }
  2776. static void reselect(struct AdapterCtlBlk *acb)
  2777. {
  2778. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2779. struct ScsiReqBlk *srb = NULL;
  2780. u16 rsel_tar_lun_id;
  2781. u8 id, lun;
  2782. u8 arblostflag = 0;
  2783. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2784. clear_fifo(acb, "reselect");
  2785. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2786. /* Read Reselected Target ID and LUN */
  2787. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2788. if (dcb) { /* Arbitration lost but Reselection win */
  2789. srb = dcb->active_srb;
  2790. if (!srb) {
  2791. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2792. "but active_srb == NULL\n");
  2793. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2794. return;
  2795. }
  2796. /* Why the if ? */
  2797. if (!acb->scan_devices) {
  2798. dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> "
  2799. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2800. srb->cmd->pid, dcb->target_id,
  2801. dcb->target_lun, rsel_tar_lun_id,
  2802. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2803. arblostflag = 1;
  2804. /*srb->state |= SRB_DISCONNECT; */
  2805. srb->state = SRB_READY;
  2806. free_tag(dcb, srb);
  2807. srb_going_to_waiting_move(dcb, srb);
  2808. waiting_set_timer(acb, HZ / 20);
  2809. /* return; */
  2810. }
  2811. }
  2812. /* Read Reselected Target Id and LUN */
  2813. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2814. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2815. "Got %i!\n", rsel_tar_lun_id);
  2816. id = rsel_tar_lun_id & 0xff;
  2817. lun = (rsel_tar_lun_id >> 8) & 7;
  2818. dcb = find_dcb(acb, id, lun);
  2819. if (!dcb) {
  2820. dprintkl(KERN_ERR, "reselect: From non existent device "
  2821. "<%02i-%i>\n", id, lun);
  2822. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2823. return;
  2824. }
  2825. acb->active_dcb = dcb;
  2826. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2827. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2828. "disconnection? <%02i-%i>\n",
  2829. dcb->target_id, dcb->target_lun);
  2830. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2831. srb = acb->tmp_srb;
  2832. dcb->active_srb = srb;
  2833. } else {
  2834. /* There can be only one! */
  2835. srb = dcb->active_srb;
  2836. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2837. /*
  2838. * abort command
  2839. */
  2840. dprintkl(KERN_DEBUG,
  2841. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2842. dcb->target_id, dcb->target_lun);
  2843. srb = acb->tmp_srb;
  2844. srb->state = SRB_UNEXPECT_RESEL;
  2845. dcb->active_srb = srb;
  2846. enable_msgout_abort(acb, srb);
  2847. } else {
  2848. if (dcb->flag & ABORT_DEV_) {
  2849. /*srb->state = SRB_ABORT_SENT; */
  2850. enable_msgout_abort(acb, srb);
  2851. } else
  2852. srb->state = SRB_DATA_XFER;
  2853. }
  2854. }
  2855. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2856. /* Program HA ID, target ID, period and offset */
  2857. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2858. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2859. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2860. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2861. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2862. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2863. /* SCSI command */
  2864. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2865. }
  2866. static inline u8 tagq_blacklist(char *name)
  2867. {
  2868. #ifndef DC395x_NO_TAGQ
  2869. #if 0
  2870. u8 i;
  2871. for (i = 0; i < BADDEVCNT; i++)
  2872. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2873. return 1;
  2874. #endif
  2875. return 0;
  2876. #else
  2877. return 1;
  2878. #endif
  2879. }
  2880. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2881. {
  2882. /* Check for SCSI format (ANSI and Response data format) */
  2883. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2884. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2885. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2886. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2887. /* ((dcb->dev_type == TYPE_DISK)
  2888. || (dcb->dev_type == TYPE_MOD)) && */
  2889. !tagq_blacklist(((char *)ptr) + 8)) {
  2890. if (dcb->max_command == 1)
  2891. dcb->max_command =
  2892. dcb->acb->tag_max_num;
  2893. dcb->sync_mode |= EN_TAG_QUEUEING;
  2894. /*dcb->tag_mask = 0; */
  2895. } else
  2896. dcb->max_command = 1;
  2897. }
  2898. }
  2899. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2900. struct ScsiInqData *ptr)
  2901. {
  2902. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2903. dcb->dev_type = bval1;
  2904. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2905. disc_tagq_set(dcb, ptr);
  2906. }
  2907. /* unmap mapped pci regions from SRB */
  2908. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2909. {
  2910. struct scsi_cmnd *cmd = srb->cmd;
  2911. enum dma_data_direction dir = cmd->sc_data_direction;
  2912. if (cmd->use_sg && dir != PCI_DMA_NONE) {
  2913. /* unmap DC395x SG list */
  2914. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2915. srb->sg_bus_addr, SEGMENTX_LEN);
  2916. pci_unmap_single(acb->dev, srb->sg_bus_addr,
  2917. SEGMENTX_LEN,
  2918. PCI_DMA_TODEVICE);
  2919. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2920. cmd->use_sg, cmd->request_buffer);
  2921. /* unmap the sg segments */
  2922. pci_unmap_sg(acb->dev,
  2923. (struct scatterlist *)cmd->request_buffer,
  2924. cmd->use_sg, dir);
  2925. } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
  2926. dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
  2927. srb->segment_x[0].address, cmd->request_bufflen);
  2928. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2929. cmd->request_bufflen, dir);
  2930. }
  2931. }
  2932. /* unmap mapped pci sense buffer from SRB */
  2933. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2934. struct ScsiReqBlk *srb)
  2935. {
  2936. if (!(srb->flag & AUTO_REQSENSE))
  2937. return;
  2938. /* Unmap sense buffer */
  2939. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2940. srb->segment_x[0].address);
  2941. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2942. srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
  2943. /* Restore SG stuff */
  2944. srb->total_xfer_length = srb->xferred;
  2945. srb->segment_x[0].address =
  2946. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2947. srb->segment_x[0].length =
  2948. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2949. srb->virt_addr = srb->virt_addr_req;
  2950. }
  2951. /*
  2952. * Complete execution of a SCSI command
  2953. * Signal completion to the generic SCSI driver
  2954. */
  2955. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2956. struct ScsiReqBlk *srb)
  2957. {
  2958. u8 tempcnt, status;
  2959. struct scsi_cmnd *cmd = srb->cmd;
  2960. struct ScsiInqData *ptr;
  2961. enum dma_data_direction dir = cmd->sc_data_direction;
  2962. if (cmd->use_sg) {
  2963. struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
  2964. ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset);
  2965. } else {
  2966. ptr = (struct ScsiInqData *)(cmd->request_buffer);
  2967. }
  2968. dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
  2969. srb->cmd->device->id, srb->cmd->device->lun);
  2970. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p addr=%p\n",
  2971. srb, cmd->use_sg, srb->sg_index, srb->sg_count,
  2972. cmd->request_buffer, ptr);
  2973. status = srb->target_status;
  2974. if (srb->flag & AUTO_REQSENSE) {
  2975. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2976. pci_unmap_srb_sense(acb, srb);
  2977. /*
  2978. ** target status..........................
  2979. */
  2980. srb->flag &= ~AUTO_REQSENSE;
  2981. srb->adapter_status = 0;
  2982. srb->target_status = CHECK_CONDITION << 1;
  2983. if (debug_enabled(DBG_1)) {
  2984. switch (cmd->sense_buffer[2] & 0x0f) {
  2985. case NOT_READY:
  2986. dprintkl(KERN_DEBUG,
  2987. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2988. cmd->cmnd[0], dcb->target_id,
  2989. dcb->target_lun, status, acb->scan_devices);
  2990. break;
  2991. case UNIT_ATTENTION:
  2992. dprintkl(KERN_DEBUG,
  2993. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2994. cmd->cmnd[0], dcb->target_id,
  2995. dcb->target_lun, status, acb->scan_devices);
  2996. break;
  2997. case ILLEGAL_REQUEST:
  2998. dprintkl(KERN_DEBUG,
  2999. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  3000. cmd->cmnd[0], dcb->target_id,
  3001. dcb->target_lun, status, acb->scan_devices);
  3002. break;
  3003. case MEDIUM_ERROR:
  3004. dprintkl(KERN_DEBUG,
  3005. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  3006. cmd->cmnd[0], dcb->target_id,
  3007. dcb->target_lun, status, acb->scan_devices);
  3008. break;
  3009. case HARDWARE_ERROR:
  3010. dprintkl(KERN_DEBUG,
  3011. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  3012. cmd->cmnd[0], dcb->target_id,
  3013. dcb->target_lun, status, acb->scan_devices);
  3014. break;
  3015. }
  3016. if (cmd->sense_buffer[7] >= 6)
  3017. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  3018. "(0x%08x 0x%08x)\n",
  3019. cmd->sense_buffer[2], cmd->sense_buffer[12],
  3020. cmd->sense_buffer[13],
  3021. *((unsigned int *)(cmd->sense_buffer + 3)),
  3022. *((unsigned int *)(cmd->sense_buffer + 8)));
  3023. else
  3024. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  3025. cmd->sense_buffer[2],
  3026. *((unsigned int *)(cmd->sense_buffer + 3)));
  3027. }
  3028. if (status == (CHECK_CONDITION << 1)) {
  3029. cmd->result = DID_BAD_TARGET << 16;
  3030. goto ckc_e;
  3031. }
  3032. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  3033. if (srb->total_xfer_length
  3034. && srb->total_xfer_length >= cmd->underflow)
  3035. cmd->result =
  3036. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3037. srb->end_message, CHECK_CONDITION);
  3038. /*SET_RES_DID(cmd->result,DID_OK) */
  3039. else
  3040. cmd->result =
  3041. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3042. srb->end_message, CHECK_CONDITION);
  3043. goto ckc_e;
  3044. }
  3045. /*************************************************************/
  3046. if (status) {
  3047. /*
  3048. * target status..........................
  3049. */
  3050. if (status_byte(status) == CHECK_CONDITION) {
  3051. request_sense(acb, dcb, srb);
  3052. return;
  3053. } else if (status_byte(status) == QUEUE_FULL) {
  3054. tempcnt = (u8)list_size(&dcb->srb_going_list);
  3055. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  3056. dcb->target_id, dcb->target_lun, tempcnt);
  3057. if (tempcnt > 1)
  3058. tempcnt--;
  3059. dcb->max_command = tempcnt;
  3060. free_tag(dcb, srb);
  3061. srb_going_to_waiting_move(dcb, srb);
  3062. waiting_set_timer(acb, HZ / 20);
  3063. srb->adapter_status = 0;
  3064. srb->target_status = 0;
  3065. return;
  3066. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  3067. srb->adapter_status = H_SEL_TIMEOUT;
  3068. srb->target_status = 0;
  3069. cmd->result = DID_NO_CONNECT << 16;
  3070. } else {
  3071. srb->adapter_status = 0;
  3072. SET_RES_DID(cmd->result, DID_ERROR);
  3073. SET_RES_MSG(cmd->result, srb->end_message);
  3074. SET_RES_TARGET(cmd->result, status);
  3075. }
  3076. } else {
  3077. /*
  3078. ** process initiator status..........................
  3079. */
  3080. status = srb->adapter_status;
  3081. if (status & H_OVER_UNDER_RUN) {
  3082. srb->target_status = 0;
  3083. SET_RES_DID(cmd->result, DID_OK);
  3084. SET_RES_MSG(cmd->result, srb->end_message);
  3085. } else if (srb->status & PARITY_ERROR) {
  3086. SET_RES_DID(cmd->result, DID_PARITY);
  3087. SET_RES_MSG(cmd->result, srb->end_message);
  3088. } else { /* No error */
  3089. srb->adapter_status = 0;
  3090. srb->target_status = 0;
  3091. SET_RES_DID(cmd->result, DID_OK);
  3092. }
  3093. }
  3094. if (dir != PCI_DMA_NONE) {
  3095. if (cmd->use_sg)
  3096. pci_dma_sync_sg_for_cpu(acb->dev,
  3097. (struct scatterlist *)cmd->
  3098. request_buffer, cmd->use_sg, dir);
  3099. else if (cmd->request_buffer)
  3100. pci_dma_sync_single_for_cpu(acb->dev,
  3101. srb->segment_x[0].address,
  3102. cmd->request_bufflen, dir);
  3103. }
  3104. if ((cmd->result & RES_DID) == 0 && cmd->cmnd[0] == INQUIRY
  3105. && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8
  3106. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  3107. dcb->inquiry7 = ptr->Flags;
  3108. /* Check Error Conditions */
  3109. ckc_e:
  3110. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  3111. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  3112. if (cmd->cmnd[0] == INQUIRY && (cmd->result == (DID_OK << 16)
  3113. || status_byte(cmd->
  3114. result) &
  3115. CHECK_CONDITION)) {
  3116. if (!dcb->init_tcq_flag) {
  3117. add_dev(acb, dcb, ptr);
  3118. dcb->init_tcq_flag = 1;
  3119. }
  3120. }
  3121. /* Here is the info for Doug Gilbert's sg3 ... */
  3122. cmd->resid = srb->total_xfer_length;
  3123. /* This may be interpreted by sb. or not ... */
  3124. cmd->SCp.this_residual = srb->total_xfer_length;
  3125. cmd->SCp.buffers_residual = 0;
  3126. if (debug_enabled(DBG_KG)) {
  3127. if (srb->total_xfer_length)
  3128. dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> "
  3129. "cmnd=0x%02x Missed %i bytes\n",
  3130. cmd->pid, cmd->device->id, cmd->device->lun,
  3131. cmd->cmnd[0], srb->total_xfer_length);
  3132. }
  3133. srb_going_remove(dcb, srb);
  3134. /* Add to free list */
  3135. if (srb == acb->tmp_srb)
  3136. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  3137. else {
  3138. dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n",
  3139. cmd->pid, cmd->result);
  3140. srb_free_insert(acb, srb);
  3141. }
  3142. pci_unmap_srb(acb, srb);
  3143. cmd->scsi_done(cmd);
  3144. waiting_process_next(acb);
  3145. }
  3146. /* abort all cmds in our queues */
  3147. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  3148. struct scsi_cmnd *cmd, u8 force)
  3149. {
  3150. struct DeviceCtlBlk *dcb;
  3151. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3152. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3153. struct ScsiReqBlk *srb;
  3154. struct ScsiReqBlk *tmp;
  3155. struct scsi_cmnd *p;
  3156. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3157. enum dma_data_direction dir;
  3158. int result;
  3159. p = srb->cmd;
  3160. dir = p->sc_data_direction;
  3161. result = MK_RES(0, did_flag, 0, 0);
  3162. printk("G:%li(%02i-%i) ", p->pid,
  3163. p->device->id, p->device->lun);
  3164. srb_going_remove(dcb, srb);
  3165. free_tag(dcb, srb);
  3166. srb_free_insert(acb, srb);
  3167. p->result = result;
  3168. pci_unmap_srb_sense(acb, srb);
  3169. pci_unmap_srb(acb, srb);
  3170. if (force) {
  3171. /* For new EH, we normally don't need to give commands back,
  3172. * as they all complete or all time out */
  3173. p->scsi_done(p);
  3174. }
  3175. }
  3176. if (!list_empty(&dcb->srb_going_list))
  3177. dprintkl(KERN_DEBUG,
  3178. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3179. dcb->target_id, dcb->target_lun);
  3180. if (dcb->tag_mask)
  3181. dprintkl(KERN_DEBUG,
  3182. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3183. dcb->target_id, dcb->target_lun,
  3184. dcb->tag_mask);
  3185. /* Waiting queue */
  3186. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3187. int result;
  3188. p = srb->cmd;
  3189. result = MK_RES(0, did_flag, 0, 0);
  3190. printk("W:%li<%02i-%i>", p->pid, p->device->id,
  3191. p->device->lun);
  3192. srb_waiting_remove(dcb, srb);
  3193. srb_free_insert(acb, srb);
  3194. p->result = result;
  3195. pci_unmap_srb_sense(acb, srb);
  3196. pci_unmap_srb(acb, srb);
  3197. if (force) {
  3198. /* For new EH, we normally don't need to give commands back,
  3199. * as they all complete or all time out */
  3200. cmd->scsi_done(cmd);
  3201. }
  3202. }
  3203. if (!list_empty(&dcb->srb_waiting_list))
  3204. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3205. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3206. dcb->target_lun);
  3207. dcb->flag &= ~ABORT_DEV_;
  3208. }
  3209. printk("\n");
  3210. }
  3211. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3212. {
  3213. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3214. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3215. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3216. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3217. /* nothing */;
  3218. }
  3219. static void set_basic_config(struct AdapterCtlBlk *acb)
  3220. {
  3221. u8 bval;
  3222. u16 wval;
  3223. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3224. if (acb->config & HCC_PARITY)
  3225. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3226. else
  3227. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3228. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3229. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3230. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3231. /* program Host ID */
  3232. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3233. /* set ansynchronous transfer */
  3234. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3235. /* Turn LED control off */
  3236. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3237. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3238. /* DMA config */
  3239. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3240. wval |=
  3241. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3242. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3243. /* Clear pending interrupt status */
  3244. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3245. /* Enable SCSI interrupt */
  3246. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3247. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3248. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3249. );
  3250. }
  3251. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3252. {
  3253. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3254. /* delay half a second */
  3255. if (timer_pending(&acb->waiting_timer))
  3256. del_timer(&acb->waiting_timer);
  3257. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3258. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3259. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3260. udelay(500);
  3261. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3262. acb->scsi_host->last_reset =
  3263. jiffies + 5 * HZ / 2 +
  3264. HZ * acb->eeprom.delay_time;
  3265. clear_fifo(acb, "scsi_reset_detect");
  3266. set_basic_config(acb);
  3267. /*1.25 */
  3268. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3269. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3270. acb->acb_flag |= RESET_DONE;
  3271. } else {
  3272. acb->acb_flag |= RESET_DETECT;
  3273. reset_dev_param(acb);
  3274. doing_srb_done(acb, DID_RESET, NULL, 1);
  3275. /*DC395x_RecoverSRB( acb ); */
  3276. acb->active_dcb = NULL;
  3277. acb->acb_flag = 0;
  3278. waiting_process_next(acb);
  3279. }
  3280. }
  3281. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3282. struct ScsiReqBlk *srb)
  3283. {
  3284. struct scsi_cmnd *cmd = srb->cmd;
  3285. dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n",
  3286. cmd->pid, cmd->device->id, cmd->device->lun);
  3287. srb->flag |= AUTO_REQSENSE;
  3288. srb->adapter_status = 0;
  3289. srb->target_status = 0;
  3290. /* KG: Can this prevent crap sense data ? */
  3291. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  3292. /* Save some data */
  3293. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3294. srb->segment_x[0].address;
  3295. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3296. srb->segment_x[0].length;
  3297. srb->xferred = srb->total_xfer_length;
  3298. /* srb->segment_x : a one entry of S/G list table */
  3299. srb->total_xfer_length = sizeof(cmd->sense_buffer);
  3300. srb->virt_addr_req = srb->virt_addr;
  3301. srb->virt_addr = cmd->sense_buffer;
  3302. srb->segment_x[0].length = sizeof(cmd->sense_buffer);
  3303. /* Map sense buffer */
  3304. srb->segment_x[0].address =
  3305. pci_map_single(acb->dev, cmd->sense_buffer,
  3306. sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE);
  3307. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3308. cmd->sense_buffer, srb->segment_x[0].address,
  3309. sizeof(cmd->sense_buffer));
  3310. srb->sg_count = 1;
  3311. srb->sg_index = 0;
  3312. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3313. dprintkl(KERN_DEBUG,
  3314. "request_sense: (pid#%li) failed <%02i-%i>\n",
  3315. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  3316. srb_going_to_waiting_move(dcb, srb);
  3317. waiting_set_timer(acb, HZ / 100);
  3318. }
  3319. }
  3320. /**
  3321. * device_alloc - Allocate a new device instance. This create the
  3322. * devices instance and sets up all the data items. The adapter
  3323. * instance is required to obtain confiuration information for this
  3324. * device. This does *not* add this device to the adapters device
  3325. * list.
  3326. *
  3327. * @acb: The adapter to obtain configuration information from.
  3328. * @target: The target for the new device.
  3329. * @lun: The lun for the new device.
  3330. *
  3331. * Return the new device if succesfull or NULL on failure.
  3332. **/
  3333. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3334. u8 target, u8 lun)
  3335. {
  3336. struct NvRamType *eeprom = &acb->eeprom;
  3337. u8 period_index = eeprom->target[target].period & 0x07;
  3338. struct DeviceCtlBlk *dcb;
  3339. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3340. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3341. if (!dcb)
  3342. return NULL;
  3343. dcb->acb = NULL;
  3344. INIT_LIST_HEAD(&dcb->srb_going_list);
  3345. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3346. dcb->active_srb = NULL;
  3347. dcb->tag_mask = 0;
  3348. dcb->max_command = 1;
  3349. dcb->target_id = target;
  3350. dcb->target_lun = lun;
  3351. #ifndef DC395x_NO_DISCONNECT
  3352. dcb->identify_msg =
  3353. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3354. #else
  3355. dcb->identify_msg = IDENTIFY(0, lun);
  3356. #endif
  3357. dcb->dev_mode = eeprom->target[target].cfg0;
  3358. dcb->inquiry7 = 0;
  3359. dcb->sync_mode = 0;
  3360. dcb->min_nego_period = clock_period[period_index];
  3361. dcb->sync_period = 0;
  3362. dcb->sync_offset = 0;
  3363. dcb->flag = 0;
  3364. #ifndef DC395x_NO_WIDE
  3365. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3366. && (acb->config & HCC_WIDE_CARD))
  3367. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3368. #endif
  3369. #ifndef DC395x_NO_SYNC
  3370. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3371. if (!(lun) || current_sync_offset)
  3372. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3373. #endif
  3374. if (dcb->target_lun != 0) {
  3375. /* Copy settings */
  3376. struct DeviceCtlBlk *p;
  3377. list_for_each_entry(p, &acb->dcb_list, list)
  3378. if (p->target_id == dcb->target_id)
  3379. break;
  3380. dprintkdbg(DBG_1,
  3381. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3382. dcb->target_id, dcb->target_lun,
  3383. p->target_id, p->target_lun);
  3384. dcb->sync_mode = p->sync_mode;
  3385. dcb->sync_period = p->sync_period;
  3386. dcb->min_nego_period = p->min_nego_period;
  3387. dcb->sync_offset = p->sync_offset;
  3388. dcb->inquiry7 = p->inquiry7;
  3389. }
  3390. return dcb;
  3391. }
  3392. /**
  3393. * adapter_add_device - Adds the device instance to the adaptor instance.
  3394. *
  3395. * @acb: The adapter device to be updated
  3396. * @dcb: A newly created and intialised device instance to add.
  3397. **/
  3398. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3399. struct DeviceCtlBlk *dcb)
  3400. {
  3401. /* backpointer to adapter */
  3402. dcb->acb = acb;
  3403. /* set run_robin to this device if it is currently empty */
  3404. if (list_empty(&acb->dcb_list))
  3405. acb->dcb_run_robin = dcb;
  3406. /* add device to list */
  3407. list_add_tail(&dcb->list, &acb->dcb_list);
  3408. /* update device maps */
  3409. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3410. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3411. }
  3412. /**
  3413. * adapter_remove_device - Removes the device instance from the adaptor
  3414. * instance. The device instance is not check in any way or freed by this.
  3415. * The caller is expected to take care of that. This will simply remove the
  3416. * device from the adapters data strcutures.
  3417. *
  3418. * @acb: The adapter device to be updated
  3419. * @dcb: A device that has previously been added to the adapter.
  3420. **/
  3421. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3422. struct DeviceCtlBlk *dcb)
  3423. {
  3424. struct DeviceCtlBlk *i;
  3425. struct DeviceCtlBlk *tmp;
  3426. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3427. dcb->target_id, dcb->target_lun);
  3428. /* fix up any pointers to this device that we have in the adapter */
  3429. if (acb->active_dcb == dcb)
  3430. acb->active_dcb = NULL;
  3431. if (acb->dcb_run_robin == dcb)
  3432. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3433. /* unlink from list */
  3434. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3435. if (dcb == i) {
  3436. list_del(&i->list);
  3437. break;
  3438. }
  3439. /* clear map and children */
  3440. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3441. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3442. dcb->acb = NULL;
  3443. }
  3444. /**
  3445. * adapter_remove_and_free_device - Removes a single device from the adapter
  3446. * and then frees the device information.
  3447. *
  3448. * @acb: The adapter device to be updated
  3449. * @dcb: A device that has previously been added to the adapter.
  3450. */
  3451. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3452. struct DeviceCtlBlk *dcb)
  3453. {
  3454. if (list_size(&dcb->srb_going_list) > 1) {
  3455. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3456. "Won't remove because of %i active requests.\n",
  3457. dcb->target_id, dcb->target_lun,
  3458. list_size(&dcb->srb_going_list));
  3459. return;
  3460. }
  3461. adapter_remove_device(acb, dcb);
  3462. kfree(dcb);
  3463. }
  3464. /**
  3465. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3466. * devices associated with the specified adapter.
  3467. *
  3468. * @acb: The adapter from which all devices should be removed.
  3469. **/
  3470. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3471. {
  3472. struct DeviceCtlBlk *dcb;
  3473. struct DeviceCtlBlk *tmp;
  3474. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3475. list_size(&acb->dcb_list));
  3476. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3477. adapter_remove_and_free_device(acb, dcb);
  3478. }
  3479. /**
  3480. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3481. * scsi device that we need to deal with. We allocate a new device and then
  3482. * insert that device into the adapters device list.
  3483. *
  3484. * @scsi_device: The new scsi device that we need to handle.
  3485. **/
  3486. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3487. {
  3488. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3489. struct DeviceCtlBlk *dcb;
  3490. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3491. if (!dcb)
  3492. return -ENOMEM;
  3493. adapter_add_device(acb, dcb);
  3494. return 0;
  3495. }
  3496. /**
  3497. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3498. * device that is going away.
  3499. *
  3500. * @scsi_device: The new scsi device that we need to handle.
  3501. **/
  3502. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3503. {
  3504. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3505. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3506. if (dcb)
  3507. adapter_remove_and_free_device(acb, dcb);
  3508. }
  3509. /**
  3510. * trms1040_wait_30us: wait for 30 us
  3511. *
  3512. * Waits for 30us (using the chip by the looks of it..)
  3513. *
  3514. * @io_port: base I/O address
  3515. **/
  3516. static void __devinit trms1040_wait_30us(unsigned long io_port)
  3517. {
  3518. /* ScsiPortStallExecution(30); wait 30 us */
  3519. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3520. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3521. /* nothing */ ;
  3522. }
  3523. /**
  3524. * trms1040_write_cmd - write the secified command and address to
  3525. * chip
  3526. *
  3527. * @io_port: base I/O address
  3528. * @cmd: SB + op code (command) to send
  3529. * @addr: address to send
  3530. **/
  3531. static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3532. {
  3533. int i;
  3534. u8 send_data;
  3535. /* program SB + OP code */
  3536. for (i = 0; i < 3; i++, cmd <<= 1) {
  3537. send_data = NVR_SELECT;
  3538. if (cmd & 0x04) /* Start from bit 2 */
  3539. send_data |= NVR_BITOUT;
  3540. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3541. trms1040_wait_30us(io_port);
  3542. outb((send_data | NVR_CLOCK),
  3543. io_port + TRM_S1040_GEN_NVRAM);
  3544. trms1040_wait_30us(io_port);
  3545. }
  3546. /* send address */
  3547. for (i = 0; i < 7; i++, addr <<= 1) {
  3548. send_data = NVR_SELECT;
  3549. if (addr & 0x40) /* Start from bit 6 */
  3550. send_data |= NVR_BITOUT;
  3551. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3552. trms1040_wait_30us(io_port);
  3553. outb((send_data | NVR_CLOCK),
  3554. io_port + TRM_S1040_GEN_NVRAM);
  3555. trms1040_wait_30us(io_port);
  3556. }
  3557. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3558. trms1040_wait_30us(io_port);
  3559. }
  3560. /**
  3561. * trms1040_set_data - store a single byte in the eeprom
  3562. *
  3563. * Called from write all to write a single byte into the SSEEPROM
  3564. * Which is done one bit at a time.
  3565. *
  3566. * @io_port: base I/O address
  3567. * @addr: offset into EEPROM
  3568. * @byte: bytes to write
  3569. **/
  3570. static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3571. {
  3572. int i;
  3573. u8 send_data;
  3574. /* Send write command & address */
  3575. trms1040_write_cmd(io_port, 0x05, addr);
  3576. /* Write data */
  3577. for (i = 0; i < 8; i++, byte <<= 1) {
  3578. send_data = NVR_SELECT;
  3579. if (byte & 0x80) /* Start from bit 7 */
  3580. send_data |= NVR_BITOUT;
  3581. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3582. trms1040_wait_30us(io_port);
  3583. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3584. trms1040_wait_30us(io_port);
  3585. }
  3586. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3587. trms1040_wait_30us(io_port);
  3588. /* Disable chip select */
  3589. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3590. trms1040_wait_30us(io_port);
  3591. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3592. trms1040_wait_30us(io_port);
  3593. /* Wait for write ready */
  3594. while (1) {
  3595. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3596. trms1040_wait_30us(io_port);
  3597. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3598. trms1040_wait_30us(io_port);
  3599. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3600. break;
  3601. }
  3602. /* Disable chip select */
  3603. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3604. }
  3605. /**
  3606. * trms1040_write_all - write 128 bytes to the eeprom
  3607. *
  3608. * Write the supplied 128 bytes to the chips SEEPROM
  3609. *
  3610. * @eeprom: the data to write
  3611. * @io_port: the base io port
  3612. **/
  3613. static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3614. {
  3615. u8 *b_eeprom = (u8 *)eeprom;
  3616. u8 addr;
  3617. /* Enable SEEPROM */
  3618. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3619. io_port + TRM_S1040_GEN_CONTROL);
  3620. /* write enable */
  3621. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3622. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3623. trms1040_wait_30us(io_port);
  3624. /* write */
  3625. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3626. trms1040_set_data(io_port, addr, *b_eeprom);
  3627. /* write disable */
  3628. trms1040_write_cmd(io_port, 0x04, 0x00);
  3629. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3630. trms1040_wait_30us(io_port);
  3631. /* Disable SEEPROM */
  3632. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3633. io_port + TRM_S1040_GEN_CONTROL);
  3634. }
  3635. /**
  3636. * trms1040_get_data - get a single byte from the eeprom
  3637. *
  3638. * Called from read all to read a single byte into the SSEEPROM
  3639. * Which is done one bit at a time.
  3640. *
  3641. * @io_port: base I/O address
  3642. * @addr: offset into SEEPROM
  3643. *
  3644. * Returns the the byte read.
  3645. **/
  3646. static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
  3647. {
  3648. int i;
  3649. u8 read_byte;
  3650. u8 result = 0;
  3651. /* Send read command & address */
  3652. trms1040_write_cmd(io_port, 0x06, addr);
  3653. /* read data */
  3654. for (i = 0; i < 8; i++) {
  3655. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3656. trms1040_wait_30us(io_port);
  3657. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3658. /* Get data bit while falling edge */
  3659. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3660. result <<= 1;
  3661. if (read_byte & NVR_BITIN)
  3662. result |= 1;
  3663. trms1040_wait_30us(io_port);
  3664. }
  3665. /* Disable chip select */
  3666. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3667. return result;
  3668. }
  3669. /**
  3670. * trms1040_read_all - read all bytes from the eeprom
  3671. *
  3672. * Read the 128 bytes from the SEEPROM.
  3673. *
  3674. * @eeprom: where to store the data
  3675. * @io_port: the base io port
  3676. **/
  3677. static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3678. {
  3679. u8 *b_eeprom = (u8 *)eeprom;
  3680. u8 addr;
  3681. /* Enable SEEPROM */
  3682. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3683. io_port + TRM_S1040_GEN_CONTROL);
  3684. /* read details */
  3685. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3686. *b_eeprom = trms1040_get_data(io_port, addr);
  3687. /* Disable SEEPROM */
  3688. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3689. io_port + TRM_S1040_GEN_CONTROL);
  3690. }
  3691. /**
  3692. * check_eeprom - get and check contents of the eeprom
  3693. *
  3694. * Read seeprom 128 bytes into the memory provider in eeprom.
  3695. * Checks the checksum and if it's not correct it uses a set of default
  3696. * values.
  3697. *
  3698. * @eeprom: caller allocated strcuture to read the eeprom data into
  3699. * @io_port: io port to read from
  3700. **/
  3701. static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3702. {
  3703. u16 *w_eeprom = (u16 *)eeprom;
  3704. u16 w_addr;
  3705. u16 cksum;
  3706. u32 d_addr;
  3707. u32 *d_eeprom;
  3708. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3709. cksum = 0;
  3710. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3711. w_addr++, w_eeprom++)
  3712. cksum += *w_eeprom;
  3713. if (cksum != 0x1234) {
  3714. /*
  3715. * Checksum is wrong.
  3716. * Load a set of defaults into the eeprom buffer
  3717. */
  3718. dprintkl(KERN_WARNING,
  3719. "EEProm checksum error: using default values and options.\n");
  3720. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3721. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3722. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3723. eeprom->sub_sys_id[1] =
  3724. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3725. eeprom->sub_class = 0x00;
  3726. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3727. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3728. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3729. eeprom->device_id[1] =
  3730. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3731. eeprom->reserved = 0x00;
  3732. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3733. d_addr < 16; d_addr++, d_eeprom++)
  3734. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3735. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3736. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3737. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3738. *d_eeprom = 0x00;
  3739. /* Now load defaults (maybe set by boot/module params) */
  3740. set_safe_settings();
  3741. fix_settings();
  3742. eeprom_override(eeprom);
  3743. eeprom->cksum = 0x00;
  3744. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3745. w_addr < 63; w_addr++, w_eeprom++)
  3746. cksum += *w_eeprom;
  3747. *w_eeprom = 0x1234 - cksum;
  3748. trms1040_write_all(eeprom, io_port);
  3749. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3750. } else {
  3751. set_safe_settings();
  3752. eeprom_index_to_delay(eeprom);
  3753. eeprom_override(eeprom);
  3754. }
  3755. }
  3756. /**
  3757. * print_eeprom_settings - output the eeprom settings
  3758. * to the kernel log so people can see what they were.
  3759. *
  3760. * @eeprom: The eeprom data strucutre to show details for.
  3761. **/
  3762. static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
  3763. {
  3764. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3765. eeprom->scsi_id,
  3766. eeprom->target[0].period,
  3767. clock_speed[eeprom->target[0].period] / 10,
  3768. clock_speed[eeprom->target[0].period] % 10,
  3769. eeprom->target[0].cfg0);
  3770. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3771. eeprom->channel_cfg, eeprom->max_tag,
  3772. 1 << eeprom->max_tag, eeprom->delay_time);
  3773. }
  3774. /* Free SG tables */
  3775. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3776. {
  3777. int i;
  3778. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3779. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3780. kfree(acb->srb_array[i].segment_x);
  3781. }
  3782. /*
  3783. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3784. * should never cross a page boundary */
  3785. static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3786. {
  3787. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3788. *SEGMENTX_LEN;
  3789. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3790. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3791. int srb_idx = 0;
  3792. unsigned i = 0;
  3793. struct SGentry *ptr;
  3794. for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
  3795. acb->srb_array[i].segment_x = NULL;
  3796. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3797. while (pages--) {
  3798. ptr = (struct SGentry *)kmalloc(PAGE_SIZE, GFP_KERNEL);
  3799. if (!ptr) {
  3800. adapter_sg_tables_free(acb);
  3801. return 1;
  3802. }
  3803. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3804. PAGE_SIZE, ptr, srb_idx);
  3805. i = 0;
  3806. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3807. acb->srb_array[srb_idx++].segment_x =
  3808. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3809. }
  3810. if (i < srbs_per_page)
  3811. acb->srb.segment_x =
  3812. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3813. else
  3814. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3815. return 0;
  3816. }
  3817. /**
  3818. * adapter_print_config - print adapter connection and termination
  3819. * config
  3820. *
  3821. * The io port in the adapter needs to have been set before calling
  3822. * this function.
  3823. *
  3824. * @acb: The adapter to print the information for.
  3825. **/
  3826. static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
  3827. {
  3828. u8 bval;
  3829. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3830. dprintkl(KERN_INFO, "%sConnectors: ",
  3831. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3832. if (!(bval & CON5068))
  3833. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3834. if (!(bval & CON68))
  3835. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3836. if (!(bval & CON50))
  3837. printk("int50 ");
  3838. if ((bval & (CON5068 | CON50 | CON68)) ==
  3839. 0 /*(CON5068 | CON50 | CON68) */ )
  3840. printk(" Oops! (All 3?) ");
  3841. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3842. printk(" Termination: ");
  3843. if (bval & DIS_TERM)
  3844. printk("Disabled\n");
  3845. else {
  3846. if (bval & AUTOTERM)
  3847. printk("Auto ");
  3848. if (bval & LOW8TERM)
  3849. printk("Low ");
  3850. if (bval & UP8TERM)
  3851. printk("High ");
  3852. printk("\n");
  3853. }
  3854. }
  3855. /**
  3856. * adapter_init_params - Initialize the various parameters in the
  3857. * adapter structure. Note that the pointer to the scsi_host is set
  3858. * early (when this instance is created) and the io_port and irq
  3859. * values are set later after they have been reserved. This just gets
  3860. * everything set to a good starting position.
  3861. *
  3862. * The eeprom structure in the adapter needs to have been set before
  3863. * calling this function.
  3864. *
  3865. * @acb: The adapter to initialize.
  3866. **/
  3867. static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
  3868. {
  3869. struct NvRamType *eeprom = &acb->eeprom;
  3870. int i;
  3871. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3872. /* NOTE: acb->io_port_base is set at port registration time */
  3873. /* NOTE: acb->io_port_len is set at port registration time */
  3874. INIT_LIST_HEAD(&acb->dcb_list);
  3875. acb->dcb_run_robin = NULL;
  3876. acb->active_dcb = NULL;
  3877. INIT_LIST_HEAD(&acb->srb_free_list);
  3878. /* temp SRB for Q tag used or abort command used */
  3879. acb->tmp_srb = &acb->srb;
  3880. init_timer(&acb->waiting_timer);
  3881. init_timer(&acb->selto_timer);
  3882. acb->srb_count = DC395x_MAX_SRB_CNT;
  3883. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3884. /* NOTE: acb->irq_level is set at IRQ registration time */
  3885. acb->tag_max_num = 1 << eeprom->max_tag;
  3886. if (acb->tag_max_num > 30)
  3887. acb->tag_max_num = 30;
  3888. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3889. acb->gmode2 = eeprom->channel_cfg;
  3890. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3891. if (eeprom->channel_cfg & NAC_SCANLUN)
  3892. acb->lun_chk = 1;
  3893. acb->scan_devices = 1;
  3894. acb->scsi_host->this_id = eeprom->scsi_id;
  3895. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3896. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3897. acb->dcb_map[i] = 0;
  3898. acb->msg_len = 0;
  3899. /* link static array of srbs into the srb free list */
  3900. for (i = 0; i < acb->srb_count - 1; i++)
  3901. srb_free_insert(acb, &acb->srb_array[i]);
  3902. }
  3903. /**
  3904. * adapter_init_host - Initialize the scsi host instance based on
  3905. * values that we have already stored in the adapter instance. There's
  3906. * some mention that a lot of these are deprecated, so we won't use
  3907. * them (we'll use the ones in the adapter instance) but we'll fill
  3908. * them in in case something else needs them.
  3909. *
  3910. * The eeprom structure, irq and io ports in the adapter need to have
  3911. * been set before calling this function.
  3912. *
  3913. * @host: The scsi host instance to fill in the values for.
  3914. **/
  3915. static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
  3916. {
  3917. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3918. struct NvRamType *eeprom = &acb->eeprom;
  3919. host->max_cmd_len = 24;
  3920. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3921. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3922. host->this_id = (int)eeprom->scsi_id;
  3923. host->io_port = acb->io_port_base;
  3924. host->n_io_port = acb->io_port_len;
  3925. host->dma_channel = -1;
  3926. host->unique_id = acb->io_port_base;
  3927. host->irq = acb->irq_level;
  3928. host->last_reset = jiffies;
  3929. host->max_id = 16;
  3930. if (host->max_id - 1 == eeprom->scsi_id)
  3931. host->max_id--;
  3932. #ifdef CONFIG_SCSI_MULTI_LUN
  3933. if (eeprom->channel_cfg & NAC_SCANLUN)
  3934. host->max_lun = 8;
  3935. else
  3936. host->max_lun = 1;
  3937. #else
  3938. host->max_lun = 1;
  3939. #endif
  3940. }
  3941. /**
  3942. * adapter_init_chip - Get the chip into a know state and figure out
  3943. * some of the settings that apply to this adapter.
  3944. *
  3945. * The io port in the adapter needs to have been set before calling
  3946. * this function. The config will be configured correctly on return.
  3947. *
  3948. * @acb: The adapter which we are to init.
  3949. **/
  3950. static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
  3951. {
  3952. struct NvRamType *eeprom = &acb->eeprom;
  3953. /* Mask all the interrupt */
  3954. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3955. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3956. /* Reset SCSI module */
  3957. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3958. /* Reset PCI/DMA module */
  3959. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3960. udelay(20);
  3961. /* program configuration 0 */
  3962. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3963. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3964. acb->config |= HCC_WIDE_CARD;
  3965. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3966. acb->config |= HCC_SCSI_RESET;
  3967. if (acb->config & HCC_SCSI_RESET) {
  3968. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3969. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3970. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3971. /*spin_unlock_irq (&io_request_lock); */
  3972. udelay(500);
  3973. acb->scsi_host->last_reset =
  3974. jiffies + HZ / 2 +
  3975. HZ * acb->eeprom.delay_time;
  3976. /*spin_lock_irq (&io_request_lock); */
  3977. }
  3978. }
  3979. /**
  3980. * init_adapter - Grab the resource for the card, setup the adapter
  3981. * information, set the card into a known state, create the various
  3982. * tables etc etc. This basically gets all adapter information all up
  3983. * to date, intialised and gets the chip in sync with it.
  3984. *
  3985. * @host: This hosts adapter structure
  3986. * @io_port: The base I/O port
  3987. * @irq: IRQ
  3988. *
  3989. * Returns 0 if the initialization succeeds, any other value on
  3990. * failure.
  3991. **/
  3992. static int __devinit adapter_init(struct AdapterCtlBlk *acb,
  3993. unsigned long io_port, u32 io_port_len, unsigned int irq)
  3994. {
  3995. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  3996. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  3997. goto failed;
  3998. }
  3999. /* store port base to indicate we have registered it */
  4000. acb->io_port_base = io_port;
  4001. acb->io_port_len = io_port_len;
  4002. if (request_irq(irq, dc395x_interrupt, SA_SHIRQ, DC395X_NAME, acb)) {
  4003. /* release the region we just claimed */
  4004. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  4005. goto failed;
  4006. }
  4007. /* store irq to indicate we have registered it */
  4008. acb->irq_level = irq;
  4009. /* get eeprom configuration information and command line settings etc */
  4010. check_eeprom(&acb->eeprom, io_port);
  4011. print_eeprom_settings(&acb->eeprom);
  4012. /* setup adapter control block */
  4013. adapter_init_params(acb);
  4014. /* display card connectors/termination settings */
  4015. adapter_print_config(acb);
  4016. if (adapter_sg_tables_alloc(acb)) {
  4017. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  4018. goto failed;
  4019. }
  4020. adapter_init_scsi_host(acb->scsi_host);
  4021. adapter_init_chip(acb);
  4022. set_basic_config(acb);
  4023. dprintkdbg(DBG_0,
  4024. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  4025. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  4026. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  4027. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  4028. return 0;
  4029. failed:
  4030. if (acb->irq_level)
  4031. free_irq(acb->irq_level, acb);
  4032. if (acb->io_port_base)
  4033. release_region(acb->io_port_base, acb->io_port_len);
  4034. adapter_sg_tables_free(acb);
  4035. return 1;
  4036. }
  4037. /**
  4038. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  4039. * stopping all operations and disabling interrupt generation on the
  4040. * card.
  4041. *
  4042. * @acb: The adapter which we are to shutdown.
  4043. **/
  4044. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  4045. {
  4046. /* disable interrupts */
  4047. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  4048. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  4049. /* reset the scsi bus */
  4050. if (acb->config & HCC_SCSI_RESET)
  4051. reset_scsi_bus(acb);
  4052. /* clear any pending interupt state */
  4053. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  4054. }
  4055. /**
  4056. * adapter_uninit - Shut down the chip and release any resources that
  4057. * we had allocated. Once this returns the adapter should not be used
  4058. * anymore.
  4059. *
  4060. * @acb: The adapter which we are to un-initialize.
  4061. **/
  4062. static void adapter_uninit(struct AdapterCtlBlk *acb)
  4063. {
  4064. unsigned long flags;
  4065. DC395x_LOCK_IO(acb->scsi_host, flags);
  4066. /* remove timers */
  4067. if (timer_pending(&acb->waiting_timer))
  4068. del_timer(&acb->waiting_timer);
  4069. if (timer_pending(&acb->selto_timer))
  4070. del_timer(&acb->selto_timer);
  4071. adapter_uninit_chip(acb);
  4072. adapter_remove_and_free_all_devices(acb);
  4073. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4074. if (acb->irq_level)
  4075. free_irq(acb->irq_level, acb);
  4076. if (acb->io_port_base)
  4077. release_region(acb->io_port_base, acb->io_port_len);
  4078. adapter_sg_tables_free(acb);
  4079. }
  4080. #undef SPRINTF
  4081. #define SPRINTF(args...) pos += sprintf(pos, args)
  4082. #undef YESNO
  4083. #define YESNO(YN) \
  4084. if (YN) SPRINTF(" Yes ");\
  4085. else SPRINTF(" No ")
  4086. static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
  4087. char **start, off_t offset, int length, int inout)
  4088. {
  4089. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  4090. int spd, spd1;
  4091. char *pos = buffer;
  4092. struct DeviceCtlBlk *dcb;
  4093. unsigned long flags;
  4094. int dev;
  4095. if (inout) /* Has data been written to the file ? */
  4096. return -EPERM;
  4097. SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
  4098. SPRINTF(" Driver Version " DC395X_VERSION "\n");
  4099. DC395x_LOCK_IO(acb->scsi_host, flags);
  4100. SPRINTF("SCSI Host Nr %i, ", host->host_no);
  4101. SPRINTF("DC395U/UW/F DC315/U %s\n",
  4102. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  4103. SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
  4104. SPRINTF("irq_level 0x%04x, ", acb->irq_level);
  4105. SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  4106. SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun);
  4107. SPRINTF("AdapterID %i\n", host->this_id);
  4108. SPRINTF("tag_max_num %i", acb->tag_max_num);
  4109. /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  4110. SPRINTF(", FilterCfg 0x%02x",
  4111. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  4112. SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
  4113. /*SPRINTF("\n"); */
  4114. SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  4115. SPRINTF
  4116. ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4117. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  4118. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  4119. acb->dcb_map[6], acb->dcb_map[7]);
  4120. SPRINTF
  4121. (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4122. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  4123. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  4124. acb->dcb_map[14], acb->dcb_map[15]);
  4125. SPRINTF
  4126. ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  4127. dev = 0;
  4128. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4129. int nego_period;
  4130. SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
  4131. dcb->target_lun);
  4132. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  4133. YESNO(dcb->sync_offset);
  4134. YESNO(dcb->sync_period & WIDE_SYNC);
  4135. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  4136. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  4137. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  4138. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  4139. if (dcb->sync_offset)
  4140. SPRINTF(" %03i ns ", nego_period);
  4141. else
  4142. SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
  4143. if (dcb->sync_offset & 0x0f) {
  4144. spd = 1000 / (nego_period);
  4145. spd1 = 1000 % (nego_period);
  4146. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  4147. SPRINTF(" %2i.%1i M %02i ", spd, spd1,
  4148. (dcb->sync_offset & 0x0f));
  4149. } else
  4150. SPRINTF(" ");
  4151. /* Add more info ... */
  4152. SPRINTF(" %02i\n", dcb->max_command);
  4153. dev++;
  4154. }
  4155. if (timer_pending(&acb->waiting_timer))
  4156. SPRINTF("Waiting queue timer running\n");
  4157. else
  4158. SPRINTF("\n");
  4159. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4160. struct ScsiReqBlk *srb;
  4161. if (!list_empty(&dcb->srb_waiting_list))
  4162. SPRINTF("DCB (%02i-%i): Waiting: %i:",
  4163. dcb->target_id, dcb->target_lun,
  4164. list_size(&dcb->srb_waiting_list));
  4165. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  4166. SPRINTF(" %li", srb->cmd->pid);
  4167. if (!list_empty(&dcb->srb_going_list))
  4168. SPRINTF("\nDCB (%02i-%i): Going : %i:",
  4169. dcb->target_id, dcb->target_lun,
  4170. list_size(&dcb->srb_going_list));
  4171. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4172. SPRINTF(" %li", srb->cmd->pid);
  4173. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4174. SPRINTF("\n");
  4175. }
  4176. if (debug_enabled(DBG_1)) {
  4177. SPRINTF("DCB list for ACB %p:\n", acb);
  4178. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4179. SPRINTF("%p -> ", dcb);
  4180. }
  4181. SPRINTF("END\n");
  4182. }
  4183. *start = buffer + offset;
  4184. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4185. if (pos - buffer < offset)
  4186. return 0;
  4187. else if (pos - buffer - offset < length)
  4188. return pos - buffer - offset;
  4189. else
  4190. return length;
  4191. }
  4192. static struct scsi_host_template dc395x_driver_template = {
  4193. .module = THIS_MODULE,
  4194. .proc_name = DC395X_NAME,
  4195. .proc_info = dc395x_proc_info,
  4196. .name = DC395X_BANNER " " DC395X_VERSION,
  4197. .queuecommand = dc395x_queue_command,
  4198. .bios_param = dc395x_bios_param,
  4199. .slave_alloc = dc395x_slave_alloc,
  4200. .slave_destroy = dc395x_slave_destroy,
  4201. .can_queue = DC395x_MAX_CAN_QUEUE,
  4202. .this_id = 7,
  4203. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4204. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4205. .eh_abort_handler = dc395x_eh_abort,
  4206. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4207. .unchecked_isa_dma = 0,
  4208. .use_clustering = DISABLE_CLUSTERING,
  4209. };
  4210. /**
  4211. * banner_display - Display banner on first instance of driver
  4212. * initialized.
  4213. **/
  4214. static void banner_display(void)
  4215. {
  4216. static int banner_done = 0;
  4217. if (!banner_done)
  4218. {
  4219. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4220. banner_done = 1;
  4221. }
  4222. }
  4223. /**
  4224. * dc395x_init_one - Initialise a single instance of the adapter.
  4225. *
  4226. * The PCI layer will call this once for each instance of the adapter
  4227. * that it finds in the system. The pci_dev strcuture indicates which
  4228. * instance we are being called from.
  4229. *
  4230. * @dev: The PCI device to intialize.
  4231. * @id: Looks like a pointer to the entry in our pci device table
  4232. * that was actually matched by the PCI subsystem.
  4233. *
  4234. * Returns 0 on success, or an error code (-ve) on failure.
  4235. **/
  4236. static int __devinit dc395x_init_one(struct pci_dev *dev,
  4237. const struct pci_device_id *id)
  4238. {
  4239. struct Scsi_Host *scsi_host = NULL;
  4240. struct AdapterCtlBlk *acb = NULL;
  4241. unsigned long io_port_base;
  4242. unsigned int io_port_len;
  4243. unsigned int irq;
  4244. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4245. banner_display();
  4246. if (pci_enable_device(dev))
  4247. {
  4248. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4249. return -ENODEV;
  4250. }
  4251. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4252. io_port_len = pci_resource_len(dev, 0);
  4253. irq = dev->irq;
  4254. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4255. /* allocate scsi host information (includes out adapter) */
  4256. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4257. sizeof(struct AdapterCtlBlk));
  4258. if (!scsi_host) {
  4259. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4260. goto fail;
  4261. }
  4262. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4263. acb->scsi_host = scsi_host;
  4264. acb->dev = dev;
  4265. /* initialise the adapter and everything we need */
  4266. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4267. dprintkl(KERN_INFO, "adapter init failed\n");
  4268. goto fail;
  4269. }
  4270. pci_set_master(dev);
  4271. /* get the scsi mid level to scan for new devices on the bus */
  4272. if (scsi_add_host(scsi_host, &dev->dev)) {
  4273. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4274. goto fail;
  4275. }
  4276. pci_set_drvdata(dev, scsi_host);
  4277. scsi_scan_host(scsi_host);
  4278. return 0;
  4279. fail:
  4280. if (acb != NULL)
  4281. adapter_uninit(acb);
  4282. if (scsi_host != NULL)
  4283. scsi_host_put(scsi_host);
  4284. pci_disable_device(dev);
  4285. return -ENODEV;
  4286. }
  4287. /**
  4288. * dc395x_remove_one - Called to remove a single instance of the
  4289. * adapter.
  4290. *
  4291. * @dev: The PCI device to intialize.
  4292. **/
  4293. static void __devexit dc395x_remove_one(struct pci_dev *dev)
  4294. {
  4295. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4296. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4297. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4298. scsi_remove_host(scsi_host);
  4299. adapter_uninit(acb);
  4300. pci_disable_device(dev);
  4301. scsi_host_put(scsi_host);
  4302. pci_set_drvdata(dev, NULL);
  4303. }
  4304. static struct pci_device_id dc395x_pci_table[] = {
  4305. {
  4306. .vendor = PCI_VENDOR_ID_TEKRAM,
  4307. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4308. .subvendor = PCI_ANY_ID,
  4309. .subdevice = PCI_ANY_ID,
  4310. },
  4311. {} /* Terminating entry */
  4312. };
  4313. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4314. static struct pci_driver dc395x_driver = {
  4315. .name = DC395X_NAME,
  4316. .id_table = dc395x_pci_table,
  4317. .probe = dc395x_init_one,
  4318. .remove = __devexit_p(dc395x_remove_one),
  4319. };
  4320. /**
  4321. * dc395x_module_init - Module initialization function
  4322. *
  4323. * Used by both module and built-in driver to initialise this driver.
  4324. **/
  4325. static int __init dc395x_module_init(void)
  4326. {
  4327. return pci_module_init(&dc395x_driver);
  4328. }
  4329. /**
  4330. * dc395x_module_exit - Module cleanup function.
  4331. **/
  4332. static void __exit dc395x_module_exit(void)
  4333. {
  4334. pci_unregister_driver(&dc395x_driver);
  4335. }
  4336. module_init(dc395x_module_init);
  4337. module_exit(dc395x_module_exit);
  4338. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4339. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4340. MODULE_LICENSE("GPL");