dc395x.c 143 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <asm/io.h>
  61. #include <scsi/scsi.h>
  62. #include <scsi/scsicam.h> /* needed for scsicam_bios_param */
  63. #include <scsi/scsi_cmnd.h>
  64. #include <scsi/scsi_device.h>
  65. #include <scsi/scsi_host.h>
  66. #include "dc395x.h"
  67. #define DC395X_NAME "dc395x"
  68. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  69. #define DC395X_VERSION "v2.05, 2004/03/08"
  70. /*---------------------------------------------------------------------------
  71. Features
  72. ---------------------------------------------------------------------------*/
  73. /*
  74. * Set to disable parts of the driver
  75. */
  76. /*#define DC395x_NO_DISCONNECT*/
  77. /*#define DC395x_NO_TAGQ*/
  78. /*#define DC395x_NO_SYNC*/
  79. /*#define DC395x_NO_WIDE*/
  80. /*---------------------------------------------------------------------------
  81. Debugging
  82. ---------------------------------------------------------------------------*/
  83. /*
  84. * Types of debugging that can be enabled and disabled
  85. */
  86. #define DBG_KG 0x0001
  87. #define DBG_0 0x0002
  88. #define DBG_1 0x0004
  89. #define DBG_SG 0x0020
  90. #define DBG_FIFO 0x0040
  91. #define DBG_PIO 0x0080
  92. /*
  93. * Set set of things to output debugging for.
  94. * Undefine to remove all debugging
  95. */
  96. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  97. /*#define DEBUG_MASK DBG_0*/
  98. /*
  99. * Output a kernel mesage at the specified level and append the
  100. * driver name and a ": " to the start of the message
  101. */
  102. #define dprintkl(level, format, arg...) \
  103. printk(level DC395X_NAME ": " format , ## arg)
  104. #ifdef DEBUG_MASK
  105. /*
  106. * print a debug message - this is formated with KERN_DEBUG, then the
  107. * driver name followed by a ": " and then the message is output.
  108. * This also checks that the specified debug level is enabled before
  109. * outputing the message
  110. */
  111. #define dprintkdbg(type, format, arg...) \
  112. do { \
  113. if ((type) & (DEBUG_MASK)) \
  114. dprintkl(KERN_DEBUG , format , ## arg); \
  115. } while (0)
  116. /*
  117. * Check if the specified type of debugging is enabled
  118. */
  119. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  120. #else
  121. /*
  122. * No debugging. Do nothing
  123. */
  124. #define dprintkdbg(type, format, arg...) \
  125. do {} while (0)
  126. #define debug_enabled(type) (0)
  127. #endif
  128. #ifndef PCI_VENDOR_ID_TEKRAM
  129. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  130. #endif
  131. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  132. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  133. #endif
  134. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  135. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  137. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  138. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  139. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  140. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  141. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  142. /* cmd->result */
  143. #define RES_TARGET 0x000000FF /* Target State */
  144. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  145. #define RES_ENDMSG 0x0000FF00 /* End Message */
  146. #define RES_DID 0x00FF0000 /* DID_ codes */
  147. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  148. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  149. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  150. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  151. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  152. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  153. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  154. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  155. #define TAG_NONE 255
  156. /*
  157. * srb->segement_x is the hw sg list. It is always allocated as a
  158. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  159. * cross a page boundy.
  160. */
  161. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  162. #define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY)
  163. struct SGentry {
  164. u32 address; /* bus! address */
  165. u32 length;
  166. };
  167. /* The SEEPROM structure for TRM_S1040 */
  168. struct NVRamTarget {
  169. u8 cfg0; /* Target configuration byte 0 */
  170. u8 period; /* Target period */
  171. u8 cfg2; /* Target configuration byte 2 */
  172. u8 cfg3; /* Target configuration byte 3 */
  173. };
  174. struct NvRamType {
  175. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  176. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  177. u8 sub_class; /* 4 Sub Class */
  178. u8 vendor_id[2]; /* 5,6 Vendor ID */
  179. u8 device_id[2]; /* 7,8 Device ID */
  180. u8 reserved; /* 9 Reserved */
  181. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  182. /** 10,11,12,13
  183. ** 14,15,16,17
  184. ** ....
  185. ** ....
  186. ** 70,71,72,73
  187. */
  188. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  189. u8 channel_cfg; /* 75 Channel configuration */
  190. u8 delay_time; /* 76 Power on delay time */
  191. u8 max_tag; /* 77 Maximum tags */
  192. u8 reserved0; /* 78 */
  193. u8 boot_target; /* 79 */
  194. u8 boot_lun; /* 80 */
  195. u8 reserved1; /* 81 */
  196. u16 reserved2[22]; /* 82,..125 */
  197. u16 cksum; /* 126,127 */
  198. };
  199. struct ScsiReqBlk {
  200. struct list_head list; /* next/prev ptrs for srb lists */
  201. struct DeviceCtlBlk *dcb;
  202. struct scsi_cmnd *cmd;
  203. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  204. u32 sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  205. u8 sg_count; /* No of HW sg entries for this request */
  206. u8 sg_index; /* Index of HW sg entry for this request */
  207. u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
  208. void **virt_map;
  209. unsigned char *virt_addr; /* Virtual address of current transfer position */
  210. /*
  211. * The sense buffer handling function, request_sense, uses
  212. * the first hw sg entry (segment_x[0]) and the transfer
  213. * length (total_xfer_length). While doing this it stores the
  214. * original values into the last sg hw list
  215. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  216. * total_xfer_length in xferred. These values are restored in
  217. * pci_unmap_srb_sense. This is the only place xferred is used.
  218. */
  219. u32 xferred; /* Saved copy of total_xfer_length */
  220. u16 state;
  221. u8 msgin_buf[6];
  222. u8 msgout_buf[6];
  223. u8 adapter_status;
  224. u8 target_status;
  225. u8 msg_count;
  226. u8 end_message;
  227. u8 tag_number;
  228. u8 status;
  229. u8 retry_count;
  230. u8 flag;
  231. u8 scsi_phase;
  232. };
  233. struct DeviceCtlBlk {
  234. struct list_head list; /* next/prev ptrs for the dcb list */
  235. struct AdapterCtlBlk *acb;
  236. struct list_head srb_going_list; /* head of going srb list */
  237. struct list_head srb_waiting_list; /* head of waiting srb list */
  238. struct ScsiReqBlk *active_srb;
  239. u32 tag_mask;
  240. u16 max_command;
  241. u8 target_id; /* SCSI Target ID (SCSI Only) */
  242. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  243. u8 identify_msg;
  244. u8 dev_mode;
  245. u8 inquiry7; /* To store Inquiry flags */
  246. u8 sync_mode; /* 0:async mode */
  247. u8 min_nego_period; /* for nego. */
  248. u8 sync_period; /* for reg. */
  249. u8 sync_offset; /* for reg. and nego.(low nibble) */
  250. u8 flag;
  251. u8 dev_type;
  252. u8 init_tcq_flag;
  253. };
  254. struct AdapterCtlBlk {
  255. struct Scsi_Host *scsi_host;
  256. unsigned long io_port_base;
  257. unsigned long io_port_len;
  258. struct list_head dcb_list; /* head of going dcb list */
  259. struct DeviceCtlBlk *dcb_run_robin;
  260. struct DeviceCtlBlk *active_dcb;
  261. struct list_head srb_free_list; /* head of free srb list */
  262. struct ScsiReqBlk *tmp_srb;
  263. struct timer_list waiting_timer;
  264. struct timer_list selto_timer;
  265. u16 srb_count;
  266. u8 sel_timeout;
  267. unsigned int irq_level;
  268. u8 tag_max_num;
  269. u8 acb_flag;
  270. u8 gmode2;
  271. u8 config;
  272. u8 lun_chk;
  273. u8 scan_devices;
  274. u8 hostid_bit;
  275. u8 dcb_map[DC395x_MAX_SCSI_ID];
  276. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  277. struct pci_dev *dev;
  278. u8 msg_len;
  279. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  280. struct ScsiReqBlk srb;
  281. struct NvRamType eeprom; /* eeprom settings for this adapter */
  282. };
  283. /*---------------------------------------------------------------------------
  284. Forward declarations
  285. ---------------------------------------------------------------------------*/
  286. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  287. u16 *pscsi_status);
  288. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  289. u16 *pscsi_status);
  290. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  291. u16 *pscsi_status);
  292. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  293. u16 *pscsi_status);
  294. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  295. u16 *pscsi_status);
  296. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  297. u16 *pscsi_status);
  298. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  299. u16 *pscsi_status);
  300. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  301. u16 *pscsi_status);
  302. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  303. u16 *pscsi_status);
  304. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  305. u16 *pscsi_status);
  306. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  307. u16 *pscsi_status);
  308. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  309. u16 *pscsi_status);
  310. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  311. u16 *pscsi_status);
  312. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  313. u16 *pscsi_status);
  314. static void set_basic_config(struct AdapterCtlBlk *acb);
  315. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  316. struct ScsiReqBlk *srb);
  317. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  318. static void data_io_transfer(struct AdapterCtlBlk *acb,
  319. struct ScsiReqBlk *srb, u16 io_dir);
  320. static void disconnect(struct AdapterCtlBlk *acb);
  321. static void reselect(struct AdapterCtlBlk *acb);
  322. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  323. struct ScsiReqBlk *srb);
  324. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  325. struct ScsiReqBlk *srb);
  326. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  327. struct ScsiReqBlk *srb);
  328. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  329. struct scsi_cmnd *cmd, u8 force);
  330. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  331. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  332. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  333. struct ScsiReqBlk *srb);
  334. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  335. struct ScsiReqBlk *srb);
  336. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  337. struct ScsiReqBlk *srb);
  338. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  339. struct DeviceCtlBlk *dcb);
  340. static void waiting_timeout(unsigned long ptr);
  341. /*---------------------------------------------------------------------------
  342. Static Data
  343. ---------------------------------------------------------------------------*/
  344. static u16 current_sync_offset = 0;
  345. static void *dc395x_scsi_phase0[] = {
  346. data_out_phase0,/* phase:0 */
  347. data_in_phase0, /* phase:1 */
  348. command_phase0, /* phase:2 */
  349. status_phase0, /* phase:3 */
  350. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  351. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  352. msgout_phase0, /* phase:6 */
  353. msgin_phase0, /* phase:7 */
  354. };
  355. static void *dc395x_scsi_phase1[] = {
  356. data_out_phase1,/* phase:0 */
  357. data_in_phase1, /* phase:1 */
  358. command_phase1, /* phase:2 */
  359. status_phase1, /* phase:3 */
  360. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  361. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  362. msgout_phase1, /* phase:6 */
  363. msgin_phase1, /* phase:7 */
  364. };
  365. /*
  366. *Fast20: 000 50ns, 20.0 MHz
  367. * 001 75ns, 13.3 MHz
  368. * 010 100ns, 10.0 MHz
  369. * 011 125ns, 8.0 MHz
  370. * 100 150ns, 6.6 MHz
  371. * 101 175ns, 5.7 MHz
  372. * 110 200ns, 5.0 MHz
  373. * 111 250ns, 4.0 MHz
  374. *
  375. *Fast40(LVDS): 000 25ns, 40.0 MHz
  376. * 001 50ns, 20.0 MHz
  377. * 010 75ns, 13.3 MHz
  378. * 011 100ns, 10.0 MHz
  379. * 100 125ns, 8.0 MHz
  380. * 101 150ns, 6.6 MHz
  381. * 110 175ns, 5.7 MHz
  382. * 111 200ns, 5.0 MHz
  383. */
  384. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  385. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  386. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  387. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  388. /*---------------------------------------------------------------------------
  389. Configuration
  390. ---------------------------------------------------------------------------*/
  391. /*
  392. * Module/boot parameters currently effect *all* instances of the
  393. * card in the system.
  394. */
  395. /*
  396. * Command line parameters are stored in a structure below.
  397. * These are the index's into the structure for the various
  398. * command line options.
  399. */
  400. #define CFG_ADAPTER_ID 0
  401. #define CFG_MAX_SPEED 1
  402. #define CFG_DEV_MODE 2
  403. #define CFG_ADAPTER_MODE 3
  404. #define CFG_TAGS 4
  405. #define CFG_RESET_DELAY 5
  406. #define CFG_NUM 6 /* number of configuration items */
  407. /*
  408. * Value used to indicate that a command line override
  409. * hasn't been used to modify the value.
  410. */
  411. #define CFG_PARAM_UNSET -1
  412. /*
  413. * Hold command line parameters.
  414. */
  415. struct ParameterData {
  416. int value; /* value of this setting */
  417. int min; /* minimum value */
  418. int max; /* maximum value */
  419. int def; /* default value */
  420. int safe; /* safe value */
  421. };
  422. static struct ParameterData __devinitdata cfg_data[] = {
  423. { /* adapter id */
  424. CFG_PARAM_UNSET,
  425. 0,
  426. 15,
  427. 7,
  428. 7
  429. },
  430. { /* max speed */
  431. CFG_PARAM_UNSET,
  432. 0,
  433. 7,
  434. 1, /* 13.3Mhz */
  435. 4, /* 6.7Hmz */
  436. },
  437. { /* dev mode */
  438. CFG_PARAM_UNSET,
  439. 0,
  440. 0x3f,
  441. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  442. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  443. NTC_DO_SEND_START,
  444. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  445. },
  446. { /* adapter mode */
  447. CFG_PARAM_UNSET,
  448. 0,
  449. 0x2f,
  450. #ifdef CONFIG_SCSI_MULTI_LUN
  451. NAC_SCANLUN |
  452. #endif
  453. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  454. /*| NAC_ACTIVE_NEG*/,
  455. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  456. },
  457. { /* tags */
  458. CFG_PARAM_UNSET,
  459. 0,
  460. 5,
  461. 3, /* 16 tags (??) */
  462. 2,
  463. },
  464. { /* reset delay */
  465. CFG_PARAM_UNSET,
  466. 0,
  467. 180,
  468. 1, /* 1 second */
  469. 10, /* 10 seconds */
  470. }
  471. };
  472. /*
  473. * Safe settings. If set to zero the the BIOS/default values with
  474. * command line overrides will be used. If set to 1 then safe and
  475. * slow settings will be used.
  476. */
  477. static int use_safe_settings = 0;
  478. module_param_named(safe, use_safe_settings, bool, 0);
  479. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  480. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  481. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  482. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  483. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  484. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  485. MODULE_PARM_DESC(dev_mode, "Device mode.");
  486. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  487. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  488. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  489. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  490. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  491. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  492. /**
  493. * set_safe_settings - if the use_safe_settings option is set then
  494. * set all values to the safe and slow values.
  495. **/
  496. static void __devinit set_safe_settings(void)
  497. {
  498. if (use_safe_settings)
  499. {
  500. int i;
  501. dprintkl(KERN_INFO, "Using safe settings.\n");
  502. for (i = 0; i < CFG_NUM; i++)
  503. {
  504. cfg_data[i].value = cfg_data[i].safe;
  505. }
  506. }
  507. }
  508. /**
  509. * fix_settings - reset any boot parameters which are out of range
  510. * back to the default values.
  511. **/
  512. static void __devinit fix_settings(void)
  513. {
  514. int i;
  515. dprintkdbg(DBG_1,
  516. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  517. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  518. cfg_data[CFG_ADAPTER_ID].value,
  519. cfg_data[CFG_MAX_SPEED].value,
  520. cfg_data[CFG_DEV_MODE].value,
  521. cfg_data[CFG_ADAPTER_MODE].value,
  522. cfg_data[CFG_TAGS].value,
  523. cfg_data[CFG_RESET_DELAY].value);
  524. for (i = 0; i < CFG_NUM; i++)
  525. {
  526. if (cfg_data[i].value < cfg_data[i].min
  527. || cfg_data[i].value > cfg_data[i].max)
  528. cfg_data[i].value = cfg_data[i].def;
  529. }
  530. }
  531. /*
  532. * Mapping from the eeprom delay index value (index into this array)
  533. * to the the number of actual seconds that the delay should be for.
  534. */
  535. static char __devinitdata eeprom_index_to_delay_map[] =
  536. { 1, 3, 5, 10, 16, 30, 60, 120 };
  537. /**
  538. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  539. * into a number of seconds.
  540. *
  541. * @eeprom: The eeprom structure in which we find the delay index to map.
  542. **/
  543. static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
  544. {
  545. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  546. }
  547. /**
  548. * delay_to_eeprom_index - Take a delay in seconds and return the
  549. * closest eeprom index which will delay for at least that amount of
  550. * seconds.
  551. *
  552. * @delay: The delay, in seconds, to find the eeprom index for.
  553. **/
  554. static int __devinit delay_to_eeprom_index(int delay)
  555. {
  556. u8 idx = 0;
  557. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  558. idx++;
  559. return idx;
  560. }
  561. /**
  562. * eeprom_override - Override the eeprom settings, in the provided
  563. * eeprom structure, with values that have been set on the command
  564. * line.
  565. *
  566. * @eeprom: The eeprom data to override with command line options.
  567. **/
  568. static void __devinit eeprom_override(struct NvRamType *eeprom)
  569. {
  570. u8 id;
  571. /* Adapter Settings */
  572. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  573. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  574. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  575. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  576. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  577. eeprom->delay_time = delay_to_eeprom_index(
  578. cfg_data[CFG_RESET_DELAY].value);
  579. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  580. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  581. /* Device Settings */
  582. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  583. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  584. eeprom->target[id].cfg0 =
  585. (u8)cfg_data[CFG_DEV_MODE].value;
  586. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  587. eeprom->target[id].period =
  588. (u8)cfg_data[CFG_MAX_SPEED].value;
  589. }
  590. }
  591. /*---------------------------------------------------------------------------
  592. ---------------------------------------------------------------------------*/
  593. static unsigned int list_size(struct list_head *head)
  594. {
  595. unsigned int count = 0;
  596. struct list_head *pos;
  597. list_for_each(pos, head)
  598. count++;
  599. return count;
  600. }
  601. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  602. struct DeviceCtlBlk *pos)
  603. {
  604. int use_next = 0;
  605. struct DeviceCtlBlk* next = NULL;
  606. struct DeviceCtlBlk* i;
  607. if (list_empty(head))
  608. return NULL;
  609. /* find supplied dcb and then select the next one */
  610. list_for_each_entry(i, head, list)
  611. if (use_next) {
  612. next = i;
  613. break;
  614. } else if (i == pos) {
  615. use_next = 1;
  616. }
  617. /* if no next one take the head one (ie, wraparound) */
  618. if (!next)
  619. list_for_each_entry(i, head, list) {
  620. next = i;
  621. break;
  622. }
  623. return next;
  624. }
  625. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  626. {
  627. if (srb->tag_number < 255) {
  628. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  629. srb->tag_number = 255;
  630. }
  631. }
  632. /* Find cmd in SRB list */
  633. inline static struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  634. struct list_head *head)
  635. {
  636. struct ScsiReqBlk *i;
  637. list_for_each_entry(i, head, list)
  638. if (i->cmd == cmd)
  639. return i;
  640. return NULL;
  641. }
  642. static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
  643. {
  644. struct list_head *head = &acb->srb_free_list;
  645. struct ScsiReqBlk *srb = NULL;
  646. if (!list_empty(head)) {
  647. srb = list_entry(head->next, struct ScsiReqBlk, list);
  648. list_del(head->next);
  649. dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
  650. }
  651. return srb;
  652. }
  653. static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  654. {
  655. dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
  656. list_add_tail(&srb->list, &acb->srb_free_list);
  657. }
  658. static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
  659. struct ScsiReqBlk *srb)
  660. {
  661. dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n",
  662. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  663. list_add(&srb->list, &dcb->srb_waiting_list);
  664. }
  665. static void srb_waiting_append(struct DeviceCtlBlk *dcb,
  666. struct ScsiReqBlk *srb)
  667. {
  668. dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n",
  669. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  670. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  671. }
  672. static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  673. {
  674. dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n",
  675. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  676. list_add_tail(&srb->list, &dcb->srb_going_list);
  677. }
  678. static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  679. {
  680. struct ScsiReqBlk *i;
  681. struct ScsiReqBlk *tmp;
  682. dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n",
  683. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  684. list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
  685. if (i == srb) {
  686. list_del(&srb->list);
  687. break;
  688. }
  689. }
  690. static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
  691. struct ScsiReqBlk *srb)
  692. {
  693. struct ScsiReqBlk *i;
  694. struct ScsiReqBlk *tmp;
  695. dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n",
  696. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  697. list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
  698. if (i == srb) {
  699. list_del(&srb->list);
  700. break;
  701. }
  702. }
  703. static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
  704. struct ScsiReqBlk *srb)
  705. {
  706. dprintkdbg(DBG_0,
  707. "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n",
  708. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  709. list_move(&srb->list, &dcb->srb_waiting_list);
  710. }
  711. static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
  712. struct ScsiReqBlk *srb)
  713. {
  714. dprintkdbg(DBG_0,
  715. "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n",
  716. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  717. list_move(&srb->list, &dcb->srb_going_list);
  718. }
  719. /* Sets the timer to wake us up */
  720. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  721. {
  722. if (timer_pending(&acb->waiting_timer))
  723. return;
  724. init_timer(&acb->waiting_timer);
  725. acb->waiting_timer.function = waiting_timeout;
  726. acb->waiting_timer.data = (unsigned long) acb;
  727. if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
  728. acb->waiting_timer.expires =
  729. acb->scsi_host->last_reset - HZ / 2 + 1;
  730. else
  731. acb->waiting_timer.expires = jiffies + to + 1;
  732. add_timer(&acb->waiting_timer);
  733. }
  734. /* Send the next command from the waiting list to the bus */
  735. static void waiting_process_next(struct AdapterCtlBlk *acb)
  736. {
  737. struct DeviceCtlBlk *start = NULL;
  738. struct DeviceCtlBlk *pos;
  739. struct DeviceCtlBlk *dcb;
  740. struct ScsiReqBlk *srb;
  741. struct list_head *dcb_list_head = &acb->dcb_list;
  742. if (acb->active_dcb
  743. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  744. return;
  745. if (timer_pending(&acb->waiting_timer))
  746. del_timer(&acb->waiting_timer);
  747. if (list_empty(dcb_list_head))
  748. return;
  749. /*
  750. * Find the starting dcb. Need to find it again in the list
  751. * since the list may have changed since we set the ptr to it
  752. */
  753. list_for_each_entry(dcb, dcb_list_head, list)
  754. if (dcb == acb->dcb_run_robin) {
  755. start = dcb;
  756. break;
  757. }
  758. if (!start) {
  759. /* This can happen! */
  760. start = list_entry(dcb_list_head->next, typeof(*start), list);
  761. acb->dcb_run_robin = start;
  762. }
  763. /*
  764. * Loop over the dcb, but we start somewhere (potentially) in
  765. * the middle of the loop so we need to manully do this.
  766. */
  767. pos = start;
  768. do {
  769. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  770. /* Make sure, the next another device gets scheduled ... */
  771. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  772. acb->dcb_run_robin);
  773. if (list_empty(waiting_list_head) ||
  774. pos->max_command <= list_size(&pos->srb_going_list)) {
  775. /* move to next dcb */
  776. pos = dcb_get_next(dcb_list_head, pos);
  777. } else {
  778. srb = list_entry(waiting_list_head->next,
  779. struct ScsiReqBlk, list);
  780. /* Try to send to the bus */
  781. if (!start_scsi(acb, pos, srb))
  782. srb_waiting_to_going_move(pos, srb);
  783. else
  784. waiting_set_timer(acb, HZ/50);
  785. break;
  786. }
  787. } while (pos != start);
  788. }
  789. /* Wake up waiting queue */
  790. static void waiting_timeout(unsigned long ptr)
  791. {
  792. unsigned long flags;
  793. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  794. dprintkdbg(DBG_1,
  795. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  796. DC395x_LOCK_IO(acb->scsi_host, flags);
  797. waiting_process_next(acb);
  798. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  799. }
  800. /* Get the DCB for a given ID/LUN combination */
  801. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  802. {
  803. return acb->children[id][lun];
  804. }
  805. /* Send SCSI Request Block (srb) to adapter (acb) */
  806. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  807. {
  808. struct DeviceCtlBlk *dcb = srb->dcb;
  809. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  810. acb->active_dcb ||
  811. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  812. srb_waiting_append(dcb, srb);
  813. waiting_process_next(acb);
  814. return;
  815. }
  816. if (!start_scsi(acb, dcb, srb))
  817. srb_going_append(dcb, srb);
  818. else {
  819. srb_waiting_insert(dcb, srb);
  820. waiting_set_timer(acb, HZ / 50);
  821. }
  822. }
  823. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  824. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  825. struct ScsiReqBlk *srb)
  826. {
  827. enum dma_data_direction dir = cmd->sc_data_direction;
  828. dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
  829. cmd->pid, dcb->target_id, dcb->target_lun);
  830. srb->dcb = dcb;
  831. srb->cmd = cmd;
  832. srb->sg_count = 0;
  833. srb->total_xfer_length = 0;
  834. srb->sg_bus_addr = 0;
  835. srb->virt_addr = NULL;
  836. srb->sg_index = 0;
  837. srb->adapter_status = 0;
  838. srb->target_status = 0;
  839. srb->msg_count = 0;
  840. srb->status = 0;
  841. srb->flag = 0;
  842. srb->state = 0;
  843. srb->retry_count = 0;
  844. srb->tag_number = TAG_NONE;
  845. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  846. srb->end_message = 0;
  847. if (dir == PCI_DMA_NONE || !cmd->request_buffer) {
  848. dprintkdbg(DBG_0,
  849. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  850. cmd->bufflen, cmd->request_buffer,
  851. cmd->use_sg, srb->segment_x[0].address);
  852. } else if (cmd->use_sg) {
  853. int i;
  854. u32 reqlen = cmd->request_bufflen;
  855. struct scatterlist *sl = (struct scatterlist *)
  856. cmd->request_buffer;
  857. struct SGentry *sgp = srb->segment_x;
  858. srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
  859. dir);
  860. dprintkdbg(DBG_0,
  861. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  862. reqlen, cmd->request_buffer, cmd->use_sg,
  863. srb->sg_count);
  864. for (i = 0; i < srb->sg_count; i++) {
  865. u32 seglen = (u32)sg_dma_len(sl + i);
  866. sgp[i].address = (u32)sg_dma_address(sl + i);
  867. sgp[i].length = seglen;
  868. srb->total_xfer_length += seglen;
  869. srb->virt_map[i] = kmap(sl[i].page);
  870. }
  871. srb->virt_addr = srb->virt_map[0];
  872. sgp += srb->sg_count - 1;
  873. /*
  874. * adjust last page if too big as it is allocated
  875. * on even page boundaries
  876. */
  877. if (srb->total_xfer_length > reqlen) {
  878. sgp->length -= (srb->total_xfer_length - reqlen);
  879. srb->total_xfer_length = reqlen;
  880. }
  881. /* Fixup for WIDE padding - make sure length is even */
  882. if (dcb->sync_period & WIDE_SYNC &&
  883. srb->total_xfer_length % 2) {
  884. srb->total_xfer_length++;
  885. sgp->length++;
  886. }
  887. srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
  888. srb->segment_x,
  889. SEGMENTX_LEN,
  890. PCI_DMA_TODEVICE);
  891. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  892. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  893. } else {
  894. srb->total_xfer_length = cmd->request_bufflen;
  895. srb->sg_count = 1;
  896. srb->segment_x[0].address =
  897. pci_map_single(dcb->acb->dev, cmd->request_buffer,
  898. srb->total_xfer_length, dir);
  899. /* Fixup for WIDE padding - make sure length is even */
  900. if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
  901. srb->total_xfer_length++;
  902. srb->segment_x[0].length = srb->total_xfer_length;
  903. srb->virt_addr = cmd->request_buffer;
  904. dprintkdbg(DBG_0,
  905. "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
  906. srb->total_xfer_length, cmd->request_buffer,
  907. cmd->use_sg, srb->segment_x[0].address);
  908. }
  909. }
  910. /**
  911. * dc395x_queue_command - queue scsi command passed from the mid
  912. * layer, invoke 'done' on completion
  913. *
  914. * @cmd: pointer to scsi command object
  915. * @done: function pointer to be invoked on completion
  916. *
  917. * Returns 1 if the adapter (host) is busy, else returns 0. One
  918. * reason for an adapter to be busy is that the number
  919. * of outstanding queued commands is already equal to
  920. * struct Scsi_Host::can_queue .
  921. *
  922. * Required: if struct Scsi_Host::can_queue is ever non-zero
  923. * then this function is required.
  924. *
  925. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  926. * and is expected to be held on return.
  927. *
  928. **/
  929. static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  930. {
  931. struct DeviceCtlBlk *dcb;
  932. struct ScsiReqBlk *srb;
  933. struct AdapterCtlBlk *acb =
  934. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  935. dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n",
  936. cmd->pid, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
  937. /* Assume BAD_TARGET; will be cleared later */
  938. cmd->result = DID_BAD_TARGET << 16;
  939. /* ignore invalid targets */
  940. if (cmd->device->id >= acb->scsi_host->max_id ||
  941. cmd->device->lun >= acb->scsi_host->max_lun ||
  942. cmd->device->lun >31) {
  943. goto complete;
  944. }
  945. /* does the specified lun on the specified device exist */
  946. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  947. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  948. cmd->device->id, cmd->device->lun);
  949. goto complete;
  950. }
  951. /* do we have a DCB for the device */
  952. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  953. if (!dcb) {
  954. /* should never happen */
  955. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  956. cmd->device->id, cmd->device->lun);
  957. goto complete;
  958. }
  959. /* set callback and clear result in the command */
  960. cmd->scsi_done = done;
  961. cmd->result = 0;
  962. srb = srb_get_free(acb);
  963. if (!srb)
  964. {
  965. /*
  966. * Return 1 since we are unable to queue this command at this
  967. * point in time.
  968. */
  969. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  970. return 1;
  971. }
  972. build_srb(cmd, dcb, srb);
  973. if (!list_empty(&dcb->srb_waiting_list)) {
  974. /* append to waiting queue */
  975. srb_waiting_append(dcb, srb);
  976. waiting_process_next(acb);
  977. } else {
  978. /* process immediately */
  979. send_srb(acb, srb);
  980. }
  981. dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->pid);
  982. return 0;
  983. complete:
  984. /*
  985. * Complete the command immediatey, and then return 0 to
  986. * indicate that we have handled the command. This is usually
  987. * done when the commad is for things like non existent
  988. * devices.
  989. */
  990. done(cmd);
  991. return 0;
  992. }
  993. /*
  994. * Return the disk geometry for the given SCSI device.
  995. */
  996. static int dc395x_bios_param(struct scsi_device *sdev,
  997. struct block_device *bdev, sector_t capacity, int *info)
  998. {
  999. #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
  1000. int heads, sectors, cylinders;
  1001. struct AdapterCtlBlk *acb;
  1002. int size = capacity;
  1003. dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
  1004. acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
  1005. heads = 64;
  1006. sectors = 32;
  1007. cylinders = size / (heads * sectors);
  1008. if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
  1009. heads = 255;
  1010. sectors = 63;
  1011. cylinders = size / (heads * sectors);
  1012. }
  1013. geom[0] = heads;
  1014. geom[1] = sectors;
  1015. geom[2] = cylinders;
  1016. return 0;
  1017. #else
  1018. return scsicam_bios_param(bdev, capacity, info);
  1019. #endif
  1020. }
  1021. static void dump_register_info(struct AdapterCtlBlk *acb,
  1022. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  1023. {
  1024. u16 pstat;
  1025. struct pci_dev *dev = acb->dev;
  1026. pci_read_config_word(dev, PCI_STATUS, &pstat);
  1027. if (!dcb)
  1028. dcb = acb->active_dcb;
  1029. if (!srb && dcb)
  1030. srb = dcb->active_srb;
  1031. if (srb) {
  1032. if (!srb->cmd)
  1033. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  1034. srb, srb->cmd);
  1035. else
  1036. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) "
  1037. "cmnd=0x%02x <%02i-%i>\n",
  1038. srb, srb->cmd, srb->cmd->pid,
  1039. srb->cmd->cmnd[0], srb->cmd->device->id,
  1040. srb->cmd->device->lun);
  1041. printk(" sglist=%p cnt=%i idx=%i len=%i\n",
  1042. srb->segment_x, srb->sg_count, srb->sg_index,
  1043. srb->total_xfer_length);
  1044. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  1045. srb->state, srb->status, srb->scsi_phase,
  1046. (acb->active_dcb) ? "" : "not");
  1047. }
  1048. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  1049. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  1050. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  1051. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  1052. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  1053. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1054. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  1055. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  1056. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  1057. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  1058. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  1059. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1060. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  1061. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  1062. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  1063. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  1064. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  1065. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  1066. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  1067. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  1068. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  1069. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1070. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1071. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  1072. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  1073. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  1074. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  1075. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1076. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  1077. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  1078. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  1079. "pci{status=0x%04x}\n",
  1080. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  1081. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  1082. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  1083. pstat);
  1084. }
  1085. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  1086. {
  1087. #if debug_enabled(DBG_FIFO)
  1088. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1089. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1090. if (!(fifocnt & 0x40))
  1091. dprintkdbg(DBG_FIFO,
  1092. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  1093. fifocnt & 0x3f, lines, txt);
  1094. #endif
  1095. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  1096. }
  1097. static void reset_dev_param(struct AdapterCtlBlk *acb)
  1098. {
  1099. struct DeviceCtlBlk *dcb;
  1100. struct NvRamType *eeprom = &acb->eeprom;
  1101. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  1102. list_for_each_entry(dcb, &acb->dcb_list, list) {
  1103. u8 period_index;
  1104. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  1105. dcb->sync_period = 0;
  1106. dcb->sync_offset = 0;
  1107. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  1108. period_index = eeprom->target[dcb->target_id].period & 0x07;
  1109. dcb->min_nego_period = clock_period[period_index];
  1110. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  1111. || !(acb->config & HCC_WIDE_CARD))
  1112. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  1113. }
  1114. }
  1115. /*
  1116. * perform a hard reset on the SCSI bus
  1117. * @cmd - some command for this host (for fetching hooks)
  1118. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1119. */
  1120. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1121. {
  1122. struct AdapterCtlBlk *acb =
  1123. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1124. dprintkl(KERN_INFO,
  1125. "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1126. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1127. if (timer_pending(&acb->waiting_timer))
  1128. del_timer(&acb->waiting_timer);
  1129. /*
  1130. * disable interrupt
  1131. */
  1132. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1133. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1134. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1135. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1136. reset_scsi_bus(acb);
  1137. udelay(500);
  1138. /* We may be in serious trouble. Wait some seconds */
  1139. acb->scsi_host->last_reset =
  1140. jiffies + 3 * HZ / 2 +
  1141. HZ * acb->eeprom.delay_time;
  1142. /*
  1143. * re-enable interrupt
  1144. */
  1145. /* Clear SCSI FIFO */
  1146. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1147. clear_fifo(acb, "eh_bus_reset");
  1148. /* Delete pending IRQ */
  1149. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1150. set_basic_config(acb);
  1151. reset_dev_param(acb);
  1152. doing_srb_done(acb, DID_RESET, cmd, 0);
  1153. acb->active_dcb = NULL;
  1154. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1155. waiting_process_next(acb);
  1156. return SUCCESS;
  1157. }
  1158. /*
  1159. * abort an errant SCSI command
  1160. * @cmd - command to be aborted
  1161. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1162. */
  1163. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1164. {
  1165. /*
  1166. * Look into our command queues: If it has not been sent already,
  1167. * we remove it and return success. Otherwise fail.
  1168. */
  1169. struct AdapterCtlBlk *acb =
  1170. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1171. struct DeviceCtlBlk *dcb;
  1172. struct ScsiReqBlk *srb;
  1173. dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n",
  1174. cmd->pid, cmd->device->id, cmd->device->lun, cmd);
  1175. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1176. if (!dcb) {
  1177. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1178. return FAILED;
  1179. }
  1180. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1181. if (srb) {
  1182. srb_waiting_remove(dcb, srb);
  1183. pci_unmap_srb_sense(acb, srb);
  1184. pci_unmap_srb(acb, srb);
  1185. free_tag(dcb, srb);
  1186. srb_free_insert(acb, srb);
  1187. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1188. cmd->result = DID_ABORT << 16;
  1189. return SUCCESS;
  1190. }
  1191. srb = find_cmd(cmd, &dcb->srb_going_list);
  1192. if (srb) {
  1193. dprintkl(KERN_DEBUG, "eh_abort: Command in progress");
  1194. /* XXX: Should abort the command here */
  1195. } else {
  1196. dprintkl(KERN_DEBUG, "eh_abort: Command not found");
  1197. }
  1198. return FAILED;
  1199. }
  1200. /* SDTR */
  1201. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1202. struct ScsiReqBlk *srb)
  1203. {
  1204. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1205. if (srb->msg_count > 1) {
  1206. dprintkl(KERN_INFO,
  1207. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1208. srb->msg_count, srb->msgout_buf[0],
  1209. srb->msgout_buf[1]);
  1210. return;
  1211. }
  1212. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1213. dcb->sync_offset = 0;
  1214. dcb->min_nego_period = 200 >> 2;
  1215. } else if (dcb->sync_offset == 0)
  1216. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1217. *ptr++ = MSG_EXTENDED; /* (01h) */
  1218. *ptr++ = 3; /* length */
  1219. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1220. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1221. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1222. srb->msg_count += 5;
  1223. srb->state |= SRB_DO_SYNC_NEGO;
  1224. }
  1225. /* WDTR */
  1226. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1227. struct ScsiReqBlk *srb)
  1228. {
  1229. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1230. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1231. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1232. if (srb->msg_count > 1) {
  1233. dprintkl(KERN_INFO,
  1234. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1235. srb->msg_count, srb->msgout_buf[0],
  1236. srb->msgout_buf[1]);
  1237. return;
  1238. }
  1239. *ptr++ = MSG_EXTENDED; /* (01h) */
  1240. *ptr++ = 2; /* length */
  1241. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1242. *ptr++ = wide;
  1243. srb->msg_count += 4;
  1244. srb->state |= SRB_DO_WIDE_NEGO;
  1245. }
  1246. #if 0
  1247. /* Timer to work around chip flaw: When selecting and the bus is
  1248. * busy, we sometimes miss a Selection timeout IRQ */
  1249. void selection_timeout_missed(unsigned long ptr);
  1250. /* Sets the timer to wake us up */
  1251. static void selto_timer(struct AdapterCtlBlk *acb)
  1252. {
  1253. if (timer_pending(&acb->selto_timer))
  1254. return;
  1255. acb->selto_timer.function = selection_timeout_missed;
  1256. acb->selto_timer.data = (unsigned long) acb;
  1257. if (time_before
  1258. (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
  1259. acb->selto_timer.expires =
  1260. acb->scsi_host->last_reset + HZ / 2 + 1;
  1261. else
  1262. acb->selto_timer.expires = jiffies + HZ + 1;
  1263. add_timer(&acb->selto_timer);
  1264. }
  1265. void selection_timeout_missed(unsigned long ptr)
  1266. {
  1267. unsigned long flags;
  1268. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1269. struct ScsiReqBlk *srb;
  1270. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1271. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1272. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1273. return;
  1274. }
  1275. DC395x_LOCK_IO(acb->scsi_host, flags);
  1276. srb = acb->active_dcb->active_srb;
  1277. disconnect(acb);
  1278. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1279. }
  1280. #endif
  1281. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1282. struct ScsiReqBlk* srb)
  1283. {
  1284. u16 s_stat2, return_code;
  1285. u8 s_stat, scsicommand, i, identify_message;
  1286. u8 *ptr;
  1287. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n",
  1288. srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
  1289. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1290. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1291. s_stat2 = 0;
  1292. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1293. #if 1
  1294. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1295. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n",
  1296. srb->cmd->pid, s_stat, s_stat2);
  1297. /*
  1298. * Try anyway?
  1299. *
  1300. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1301. * Timeout, a Disconnect or a Reselction IRQ, so we would be screwed!
  1302. * (This is likely to be a bug in the hardware. Obviously, most people
  1303. * only have one initiator per SCSI bus.)
  1304. * Instead let this fail and have the timer make sure the command is
  1305. * tried again after a short time
  1306. */
  1307. /*selto_timer (acb); */
  1308. return 1;
  1309. }
  1310. #endif
  1311. if (acb->active_dcb) {
  1312. dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a"
  1313. "command while another command (pid#%li) is active.",
  1314. srb->cmd->pid,
  1315. acb->active_dcb->active_srb ?
  1316. acb->active_dcb->active_srb->cmd->pid : 0);
  1317. return 1;
  1318. }
  1319. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1320. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n",
  1321. srb->cmd->pid);
  1322. return 1;
  1323. }
  1324. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1325. * to queue them again after a reset */
  1326. if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
  1327. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1328. return 1;
  1329. }
  1330. /* Flush FIFO */
  1331. clear_fifo(acb, "start_scsi");
  1332. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1333. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1334. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1335. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1336. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1337. identify_message = dcb->identify_msg;
  1338. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1339. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1340. if (srb->flag & AUTO_REQSENSE)
  1341. identify_message &= 0xBF;
  1342. if (((srb->cmd->cmnd[0] == INQUIRY)
  1343. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1344. || (srb->flag & AUTO_REQSENSE))
  1345. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1346. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1347. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1348. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1349. && (dcb->target_lun == 0)) {
  1350. srb->msgout_buf[0] = identify_message;
  1351. srb->msg_count = 1;
  1352. scsicommand = SCMD_SEL_ATNSTOP;
  1353. srb->state = SRB_MSGOUT;
  1354. #ifndef SYNC_FIRST
  1355. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1356. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1357. build_wdtr(acb, dcb, srb);
  1358. goto no_cmd;
  1359. }
  1360. #endif
  1361. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1362. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1363. build_sdtr(acb, dcb, srb);
  1364. goto no_cmd;
  1365. }
  1366. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1367. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1368. build_wdtr(acb, dcb, srb);
  1369. goto no_cmd;
  1370. }
  1371. srb->msg_count = 0;
  1372. }
  1373. /* Send identify message */
  1374. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1375. scsicommand = SCMD_SEL_ATN;
  1376. srb->state = SRB_START_;
  1377. #ifndef DC395x_NO_TAGQ
  1378. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1379. && (identify_message & 0xC0)) {
  1380. /* Send Tag message */
  1381. u32 tag_mask = 1;
  1382. u8 tag_number = 0;
  1383. while (tag_mask & dcb->tag_mask
  1384. && tag_number <= dcb->max_command) {
  1385. tag_mask = tag_mask << 1;
  1386. tag_number++;
  1387. }
  1388. if (tag_number >= dcb->max_command) {
  1389. dprintkl(KERN_WARNING, "start_scsi: (pid#%li) "
  1390. "Out of tags target=<%02i-%i>)\n",
  1391. srb->cmd->pid, srb->cmd->device->id,
  1392. srb->cmd->device->lun);
  1393. srb->state = SRB_READY;
  1394. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1395. DO_HWRESELECT);
  1396. return 1;
  1397. }
  1398. /* Send Tag id */
  1399. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1400. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1401. dcb->tag_mask |= tag_mask;
  1402. srb->tag_number = tag_number;
  1403. scsicommand = SCMD_SEL_ATN3;
  1404. srb->state = SRB_START_;
  1405. }
  1406. #endif
  1407. /*polling:*/
  1408. /* Send CDB ..command block ......... */
  1409. dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1410. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  1411. srb->cmd->cmnd[0], srb->tag_number);
  1412. if (srb->flag & AUTO_REQSENSE) {
  1413. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1414. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1415. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1416. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1417. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1418. sizeof(srb->cmd->sense_buffer));
  1419. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1420. } else {
  1421. ptr = (u8 *)srb->cmd->cmnd;
  1422. for (i = 0; i < srb->cmd->cmd_len; i++)
  1423. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1424. }
  1425. no_cmd:
  1426. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1427. DO_HWRESELECT | DO_DATALATCH);
  1428. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1429. /*
  1430. * If start_scsi return 1:
  1431. * we caught an interrupt (must be reset or reselection ... )
  1432. * : Let's process it first!
  1433. */
  1434. dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n",
  1435. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  1436. srb->state = SRB_READY;
  1437. free_tag(dcb, srb);
  1438. srb->msg_count = 0;
  1439. return_code = 1;
  1440. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1441. } else {
  1442. /*
  1443. * If start_scsi returns 0:
  1444. * we know that the SCSI processor is free
  1445. */
  1446. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1447. dcb->active_srb = srb;
  1448. acb->active_dcb = dcb;
  1449. return_code = 0;
  1450. /* it's important for atn stop */
  1451. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1452. DO_DATALATCH | DO_HWRESELECT);
  1453. /* SCSI command */
  1454. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1455. }
  1456. return return_code;
  1457. }
  1458. #define DC395x_ENABLE_MSGOUT \
  1459. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1460. srb->state |= SRB_MSGOUT
  1461. /* abort command */
  1462. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1463. struct ScsiReqBlk *srb)
  1464. {
  1465. srb->msgout_buf[0] = ABORT;
  1466. srb->msg_count = 1;
  1467. DC395x_ENABLE_MSGOUT;
  1468. srb->state &= ~SRB_MSGIN;
  1469. srb->state |= SRB_MSGOUT;
  1470. }
  1471. /**
  1472. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1473. * have been triggered for this card.
  1474. *
  1475. * @acb: a pointer to the adpter control block
  1476. * @scsi_status: the status return when we checked the card
  1477. **/
  1478. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1479. u16 scsi_status)
  1480. {
  1481. struct DeviceCtlBlk *dcb;
  1482. struct ScsiReqBlk *srb;
  1483. u16 phase;
  1484. u8 scsi_intstatus;
  1485. unsigned long flags;
  1486. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1487. u16 *);
  1488. DC395x_LOCK_IO(acb->scsi_host, flags);
  1489. /* This acknowledges the IRQ */
  1490. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1491. if ((scsi_status & 0x2007) == 0x2002)
  1492. dprintkl(KERN_DEBUG,
  1493. "COP after COP completed? %04x\n", scsi_status);
  1494. if (debug_enabled(DBG_KG)) {
  1495. if (scsi_intstatus & INT_SELTIMEOUT)
  1496. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1497. }
  1498. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1499. if (timer_pending(&acb->selto_timer))
  1500. del_timer(&acb->selto_timer);
  1501. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1502. disconnect(acb); /* bus free interrupt */
  1503. goto out_unlock;
  1504. }
  1505. if (scsi_intstatus & INT_RESELECTED) {
  1506. reselect(acb);
  1507. goto out_unlock;
  1508. }
  1509. if (scsi_intstatus & INT_SELECT) {
  1510. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1511. goto out_unlock;
  1512. }
  1513. if (scsi_intstatus & INT_SCSIRESET) {
  1514. scsi_reset_detect(acb);
  1515. goto out_unlock;
  1516. }
  1517. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1518. dcb = acb->active_dcb;
  1519. if (!dcb) {
  1520. dprintkl(KERN_DEBUG,
  1521. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1522. scsi_status, scsi_intstatus);
  1523. goto out_unlock;
  1524. }
  1525. srb = dcb->active_srb;
  1526. if (dcb->flag & ABORT_DEV_) {
  1527. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1528. enable_msgout_abort(acb, srb);
  1529. }
  1530. /* software sequential machine */
  1531. phase = (u16)srb->scsi_phase;
  1532. /*
  1533. * 62037 or 62137
  1534. * call dc395x_scsi_phase0[]... "phase entry"
  1535. * handle every phase before start transfer
  1536. */
  1537. /* data_out_phase0, phase:0 */
  1538. /* data_in_phase0, phase:1 */
  1539. /* command_phase0, phase:2 */
  1540. /* status_phase0, phase:3 */
  1541. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1542. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1543. /* msgout_phase0, phase:6 */
  1544. /* msgin_phase0, phase:7 */
  1545. dc395x_statev = dc395x_scsi_phase0[phase];
  1546. dc395x_statev(acb, srb, &scsi_status);
  1547. /*
  1548. * if there were any exception occured scsi_status
  1549. * will be modify to bus free phase new scsi_status
  1550. * transfer out from ... previous dc395x_statev
  1551. */
  1552. srb->scsi_phase = scsi_status & PHASEMASK;
  1553. phase = (u16)scsi_status & PHASEMASK;
  1554. /*
  1555. * call dc395x_scsi_phase1[]... "phase entry" handle
  1556. * every phase to do transfer
  1557. */
  1558. /* data_out_phase1, phase:0 */
  1559. /* data_in_phase1, phase:1 */
  1560. /* command_phase1, phase:2 */
  1561. /* status_phase1, phase:3 */
  1562. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1563. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1564. /* msgout_phase1, phase:6 */
  1565. /* msgin_phase1, phase:7 */
  1566. dc395x_statev = dc395x_scsi_phase1[phase];
  1567. dc395x_statev(acb, srb, &scsi_status);
  1568. }
  1569. out_unlock:
  1570. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1571. }
  1572. static irqreturn_t dc395x_interrupt(int irq, void *dev_id,
  1573. struct pt_regs *regs)
  1574. {
  1575. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)dev_id;
  1576. u16 scsi_status;
  1577. u8 dma_status;
  1578. irqreturn_t handled = IRQ_NONE;
  1579. /*
  1580. * Check for pending interupt
  1581. */
  1582. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1583. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1584. if (scsi_status & SCSIINTERRUPT) {
  1585. /* interupt pending - let's process it! */
  1586. dc395x_handle_interrupt(acb, scsi_status);
  1587. handled = IRQ_HANDLED;
  1588. }
  1589. else if (dma_status & 0x20) {
  1590. /* Error from the DMA engine */
  1591. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1592. #if 0
  1593. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1594. if (acb->active_dcb) {
  1595. acb->active_dcb-> flag |= ABORT_DEV_;
  1596. if (acb->active_dcb->active_srb)
  1597. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1598. }
  1599. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1600. #else
  1601. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1602. acb = NULL;
  1603. #endif
  1604. handled = IRQ_HANDLED;
  1605. }
  1606. return handled;
  1607. }
  1608. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1609. u16 *pscsi_status)
  1610. {
  1611. dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->pid);
  1612. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1613. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1614. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1615. srb->state &= ~SRB_MSGOUT;
  1616. }
  1617. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1618. u16 *pscsi_status)
  1619. {
  1620. u16 i;
  1621. u8 *ptr;
  1622. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->pid);
  1623. clear_fifo(acb, "msgout_phase1");
  1624. if (!(srb->state & SRB_MSGOUT)) {
  1625. srb->state |= SRB_MSGOUT;
  1626. dprintkl(KERN_DEBUG,
  1627. "msgout_phase1: (pid#%li) Phase unexpected\n",
  1628. srb->cmd->pid); /* So what ? */
  1629. }
  1630. if (!srb->msg_count) {
  1631. dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n",
  1632. srb->cmd->pid);
  1633. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1634. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1635. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1636. return;
  1637. }
  1638. ptr = (u8 *)srb->msgout_buf;
  1639. for (i = 0; i < srb->msg_count; i++)
  1640. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1641. srb->msg_count = 0;
  1642. if (srb->msgout_buf[0] == MSG_ABORT)
  1643. srb->state = SRB_ABORT_SENT;
  1644. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1645. }
  1646. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1647. u16 *pscsi_status)
  1648. {
  1649. dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->pid);
  1650. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1651. }
  1652. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1653. u16 *pscsi_status)
  1654. {
  1655. struct DeviceCtlBlk *dcb;
  1656. u8 *ptr;
  1657. u16 i;
  1658. dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->pid);
  1659. clear_fifo(acb, "command_phase1");
  1660. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1661. if (!(srb->flag & AUTO_REQSENSE)) {
  1662. ptr = (u8 *)srb->cmd->cmnd;
  1663. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1664. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1665. ptr++;
  1666. }
  1667. } else {
  1668. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1669. dcb = acb->active_dcb;
  1670. /* target id */
  1671. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1672. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1673. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1674. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  1675. sizeof(srb->cmd->sense_buffer));
  1676. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1677. }
  1678. srb->state |= SRB_COMMAND;
  1679. /* it's important for atn stop */
  1680. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1681. /* SCSI command */
  1682. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1683. }
  1684. /*
  1685. * Verify that the remaining space in the hw sg lists is the same as
  1686. * the count of remaining bytes in srb->total_xfer_length
  1687. */
  1688. static void sg_verify_length(struct ScsiReqBlk *srb)
  1689. {
  1690. if (debug_enabled(DBG_SG)) {
  1691. unsigned len = 0;
  1692. unsigned idx = srb->sg_index;
  1693. struct SGentry *psge = srb->segment_x + idx;
  1694. for (; idx < srb->sg_count; psge++, idx++)
  1695. len += psge->length;
  1696. if (len != srb->total_xfer_length)
  1697. dprintkdbg(DBG_SG,
  1698. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1699. srb->total_xfer_length, len);
  1700. }
  1701. }
  1702. /*
  1703. * Compute the next Scatter Gather list index and adjust its length
  1704. * and address if necessary; also compute virt_addr
  1705. */
  1706. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1707. {
  1708. u8 idx;
  1709. struct scatterlist *sg;
  1710. struct scsi_cmnd *cmd = srb->cmd;
  1711. int segment = cmd->use_sg;
  1712. u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
  1713. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1714. void **virt = srb->virt_map;
  1715. dprintkdbg(DBG_0,
  1716. "sg_update_list: Transfered %i of %i bytes, %i remain\n",
  1717. xferred, srb->total_xfer_length, left);
  1718. if (xferred == 0) {
  1719. /* nothing to update since we did not transfer any data */
  1720. return;
  1721. }
  1722. sg_verify_length(srb);
  1723. srb->total_xfer_length = left; /* update remaining count */
  1724. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1725. if (xferred >= psge->length) {
  1726. /* Complete SG entries done */
  1727. xferred -= psge->length;
  1728. } else {
  1729. /* Partial SG entry done */
  1730. psge->length -= xferred;
  1731. psge->address += xferred;
  1732. srb->sg_index = idx;
  1733. pci_dma_sync_single_for_device(srb->dcb->
  1734. acb->dev,
  1735. srb->sg_bus_addr,
  1736. SEGMENTX_LEN,
  1737. PCI_DMA_TODEVICE);
  1738. break;
  1739. }
  1740. psge++;
  1741. }
  1742. sg_verify_length(srb);
  1743. /* we need the corresponding virtual address */
  1744. if (!segment) {
  1745. srb->virt_addr += xferred;
  1746. return;
  1747. }
  1748. /* We have to walk the scatterlist to find it */
  1749. sg = (struct scatterlist *)cmd->request_buffer;
  1750. idx = 0;
  1751. while (segment--) {
  1752. unsigned long mask =
  1753. ~((unsigned long)sg->length - 1) & PAGE_MASK;
  1754. if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
  1755. srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK);
  1756. return;
  1757. }
  1758. ++sg;
  1759. ++idx;
  1760. }
  1761. dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
  1762. srb->virt_addr = NULL;
  1763. }
  1764. /*
  1765. * We have transfered a single byte (PIO mode?) and need to update
  1766. * the count of bytes remaining (total_xfer_length) and update the sg
  1767. * entry to either point to next byte in the current sg entry, or of
  1768. * already at the end to point to the start of the next sg entry
  1769. */
  1770. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1771. {
  1772. srb->total_xfer_length--;
  1773. srb->segment_x[srb->sg_index].length--;
  1774. if (srb->total_xfer_length &&
  1775. !srb->segment_x[srb->sg_index].length) {
  1776. if (debug_enabled(DBG_PIO))
  1777. printk(" (next segment)");
  1778. srb->sg_index++;
  1779. sg_update_list(srb, srb->total_xfer_length);
  1780. }
  1781. }
  1782. /*
  1783. * cleanup_after_transfer
  1784. *
  1785. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1786. * KG: Currently called from StatusPhase1 ()
  1787. * Should probably also be called from other places
  1788. * Best might be to call it in DataXXPhase0, if new phase will differ
  1789. */
  1790. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1791. struct ScsiReqBlk *srb)
  1792. {
  1793. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1794. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1795. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1796. clear_fifo(acb, "cleanup/in");
  1797. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1798. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1799. } else { /* write */
  1800. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1801. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1802. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1803. clear_fifo(acb, "cleanup/out");
  1804. }
  1805. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1806. }
  1807. /*
  1808. * Those no of bytes will be transfered w/ PIO through the SCSI FIFO
  1809. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1810. */
  1811. #define DC395x_LASTPIO 4
  1812. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1813. u16 *pscsi_status)
  1814. {
  1815. struct DeviceCtlBlk *dcb = srb->dcb;
  1816. u16 scsi_status = *pscsi_status;
  1817. u32 d_left_counter = 0;
  1818. dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n",
  1819. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1820. /*
  1821. * KG: We need to drain the buffers before we draw any conclusions!
  1822. * This means telling the DMA to push the rest into SCSI, telling
  1823. * SCSI to push the rest to the bus.
  1824. * However, the device might have been the one to stop us (phase
  1825. * change), and the data in transit just needs to be accounted so
  1826. * it can be retransmitted.)
  1827. */
  1828. /*
  1829. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1830. * If we need more data, the DMA SG list will be freshly set up, anyway
  1831. */
  1832. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1833. "DMA{fifcnt=0x%02x fifostat=0x%02x} "
  1834. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1835. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1836. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1837. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1838. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1839. srb->total_xfer_length);
  1840. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1841. if (!(srb->state & SRB_XFERPAD)) {
  1842. if (scsi_status & PARITYERROR)
  1843. srb->status |= PARITY_ERROR;
  1844. /*
  1845. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1846. * is the no of bytes it got from the DMA engine not the no it
  1847. * transferred successfully to the device. (And the difference could
  1848. * be as much as the FIFO size, I guess ...)
  1849. */
  1850. if (!(scsi_status & SCSIXFERDONE)) {
  1851. /*
  1852. * when data transfer from DMA FIFO to SCSI FIFO
  1853. * if there was some data left in SCSI FIFO
  1854. */
  1855. d_left_counter =
  1856. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1857. 0x1F);
  1858. if (dcb->sync_period & WIDE_SYNC)
  1859. d_left_counter <<= 1;
  1860. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1861. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1862. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1863. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1864. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1865. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1866. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1867. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1868. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1869. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1870. }
  1871. /*
  1872. * calculate all the residue data that not yet transfered
  1873. * SCSI transfer counter + left in SCSI FIFO data
  1874. *
  1875. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1876. * The counter always decrement by one for every SCSI byte transfer.
  1877. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1878. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1879. */
  1880. if (srb->total_xfer_length > DC395x_LASTPIO)
  1881. d_left_counter +=
  1882. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1883. /* Is this a good idea? */
  1884. /*clear_fifo(acb, "DOP1"); */
  1885. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1886. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1887. && srb->cmd->request_bufflen % 2) {
  1888. d_left_counter = 0;
  1889. dprintkl(KERN_INFO,
  1890. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1891. scsi_status);
  1892. }
  1893. /*
  1894. * KG: Oops again. Same thinko as above: The SCSI might have been
  1895. * faster than the DMA engine, so that it ran out of data.
  1896. * In that case, we have to do just nothing!
  1897. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1898. */
  1899. /*
  1900. * KG: This is nonsense: We have been WRITING data to the bus
  1901. * If the SCSI engine has no bytes left, how should the DMA engine?
  1902. */
  1903. if (d_left_counter == 0) {
  1904. srb->total_xfer_length = 0;
  1905. } else {
  1906. /*
  1907. * if transfer not yet complete
  1908. * there were some data residue in SCSI FIFO or
  1909. * SCSI transfer counter not empty
  1910. */
  1911. long oldxferred =
  1912. srb->total_xfer_length - d_left_counter;
  1913. const int diff =
  1914. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1915. sg_update_list(srb, d_left_counter);
  1916. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1917. if ((srb->segment_x[srb->sg_index].length ==
  1918. diff && srb->cmd->use_sg)
  1919. || ((oldxferred & ~PAGE_MASK) ==
  1920. (PAGE_SIZE - diff))
  1921. ) {
  1922. dprintkl(KERN_INFO, "data_out_phase0: "
  1923. "Work around chip bug (%i)?\n", diff);
  1924. d_left_counter =
  1925. srb->total_xfer_length - diff;
  1926. sg_update_list(srb, d_left_counter);
  1927. /*srb->total_xfer_length -= diff; */
  1928. /*srb->virt_addr += diff; */
  1929. /*if (srb->cmd->use_sg) */
  1930. /* srb->sg_index++; */
  1931. }
  1932. }
  1933. }
  1934. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1935. cleanup_after_transfer(acb, srb);
  1936. }
  1937. }
  1938. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1939. u16 *pscsi_status)
  1940. {
  1941. dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n",
  1942. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1943. clear_fifo(acb, "data_out_phase1");
  1944. /* do prepare before transfer when data out phase */
  1945. data_io_transfer(acb, srb, XFERDATAOUT);
  1946. }
  1947. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1948. u16 *pscsi_status)
  1949. {
  1950. u16 scsi_status = *pscsi_status;
  1951. u32 d_left_counter = 0;
  1952. dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n",
  1953. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  1954. /*
  1955. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1956. * and switches to another phase, the SCSI engine should be finished too.
  1957. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1958. * engine and transferred to memory.
  1959. * We should wait for the FIFOs to be emptied by that (is there any way to
  1960. * enforce this?) and then stop the DMA engine, because it might think, that
  1961. * there are more bytes to follow. Yes, the device might disconnect prior to
  1962. * having all bytes transferred!
  1963. * Also we should make sure that all data from the DMA engine buffer's really
  1964. * made its way to the system memory! Some documentation on this would not
  1965. * seem to be a bad idea, actually.
  1966. */
  1967. if (!(srb->state & SRB_XFERPAD)) {
  1968. if (scsi_status & PARITYERROR) {
  1969. dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) "
  1970. "Parity Error\n", srb->cmd->pid);
  1971. srb->status |= PARITY_ERROR;
  1972. }
  1973. /*
  1974. * KG: We should wait for the DMA FIFO to be empty ...
  1975. * but: it would be better to wait first for the SCSI FIFO and then the
  1976. * the DMA FIFO to become empty? How do we know, that the device not already
  1977. * sent data to the FIFO in a MsgIn phase, eg.?
  1978. */
  1979. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1980. #if 0
  1981. int ctr = 6000000;
  1982. dprintkl(KERN_DEBUG,
  1983. "DIP0: Wait for DMA FIFO to flush ...\n");
  1984. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  1985. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  1986. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  1987. while (!
  1988. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  1989. 0x80) && --ctr);
  1990. if (ctr < 6000000 - 1)
  1991. dprintkl(KERN_DEBUG
  1992. "DIP0: Had to wait for DMA ...\n");
  1993. if (!ctr)
  1994. dprintkl(KERN_ERR,
  1995. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  1996. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  1997. #endif
  1998. dprintkdbg(DBG_KG, "data_in_phase0: "
  1999. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  2000. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  2001. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  2002. }
  2003. /* Now: Check remainig data: The SCSI counters should tell us ... */
  2004. d_left_counter = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER)
  2005. + ((DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1f)
  2006. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  2007. 0));
  2008. dprintkdbg(DBG_KG, "data_in_phase0: "
  2009. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  2010. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  2011. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  2012. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  2013. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  2014. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  2015. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  2016. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  2017. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  2018. srb->total_xfer_length, d_left_counter);
  2019. #if DC395x_LASTPIO
  2020. /* KG: Less than or equal to 4 bytes can not be transfered via DMA, it seems. */
  2021. if (d_left_counter
  2022. && srb->total_xfer_length <= DC395x_LASTPIO) {
  2023. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  2024. /*sg_update_list (srb, d_left_counter); */
  2025. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) to "
  2026. "%p for remaining %i bytes:",
  2027. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1f,
  2028. (srb->dcb->sync_period & WIDE_SYNC) ?
  2029. "words" : "bytes",
  2030. srb->virt_addr,
  2031. srb->total_xfer_length);
  2032. if (srb->dcb->sync_period & WIDE_SYNC)
  2033. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2034. CFG2_WIDEFIFO);
  2035. while (DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) != 0x40) {
  2036. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2037. *(srb->virt_addr)++ = byte;
  2038. if (debug_enabled(DBG_PIO))
  2039. printk(" %02x", byte);
  2040. d_left_counter--;
  2041. sg_subtract_one(srb);
  2042. }
  2043. if (srb->dcb->sync_period & WIDE_SYNC) {
  2044. #if 1
  2045. /* Read the last byte ... */
  2046. if (srb->total_xfer_length > 0) {
  2047. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2048. *(srb->virt_addr)++ = byte;
  2049. srb->total_xfer_length--;
  2050. if (debug_enabled(DBG_PIO))
  2051. printk(" %02x", byte);
  2052. }
  2053. #endif
  2054. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2055. }
  2056. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  2057. /*srb->total_xfer_length = 0; */
  2058. if (debug_enabled(DBG_PIO))
  2059. printk("\n");
  2060. }
  2061. #endif /* DC395x_LASTPIO */
  2062. #if 0
  2063. /*
  2064. * KG: This was in DATAOUT. Does it also belong here?
  2065. * Nobody seems to know what counter and fifo_cnt count exactly ...
  2066. */
  2067. if (!(scsi_status & SCSIXFERDONE)) {
  2068. /*
  2069. * when data transfer from DMA FIFO to SCSI FIFO
  2070. * if there was some data left in SCSI FIFO
  2071. */
  2072. d_left_counter =
  2073. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  2074. 0x1F);
  2075. if (srb->dcb->sync_period & WIDE_SYNC)
  2076. d_left_counter <<= 1;
  2077. /*
  2078. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  2079. * so need to *= 2
  2080. * KG: Seems to be correct ...
  2081. */
  2082. }
  2083. #endif
  2084. /* KG: This should not be needed any more! */
  2085. if (d_left_counter == 0
  2086. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  2087. #if 0
  2088. int ctr = 6000000;
  2089. u8 TempDMAstatus;
  2090. do {
  2091. TempDMAstatus =
  2092. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2093. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  2094. if (!ctr)
  2095. dprintkl(KERN_ERR,
  2096. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  2097. srb->total_xfer_length = 0;
  2098. #endif
  2099. srb->total_xfer_length = d_left_counter;
  2100. } else { /* phase changed */
  2101. /*
  2102. * parsing the case:
  2103. * when a transfer not yet complete
  2104. * but be disconnected by target
  2105. * if transfer not yet complete
  2106. * there were some data residue in SCSI FIFO or
  2107. * SCSI transfer counter not empty
  2108. */
  2109. sg_update_list(srb, d_left_counter);
  2110. }
  2111. }
  2112. /* KG: The target may decide to disconnect: Empty FIFO before! */
  2113. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  2114. cleanup_after_transfer(acb, srb);
  2115. }
  2116. }
  2117. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2118. u16 *pscsi_status)
  2119. {
  2120. dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n",
  2121. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2122. data_io_transfer(acb, srb, XFERDATAIN);
  2123. }
  2124. static void data_io_transfer(struct AdapterCtlBlk *acb,
  2125. struct ScsiReqBlk *srb, u16 io_dir)
  2126. {
  2127. struct DeviceCtlBlk *dcb = srb->dcb;
  2128. u8 bval;
  2129. dprintkdbg(DBG_0,
  2130. "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2131. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun,
  2132. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2133. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2134. if (srb == acb->tmp_srb)
  2135. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2136. if (srb->sg_index >= srb->sg_count) {
  2137. /* can't happen? out of bounds error */
  2138. return;
  2139. }
  2140. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2141. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2142. /*
  2143. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2144. * Maybe, even ABORTXFER would be appropriate
  2145. */
  2146. if (dma_status & XFERPENDING) {
  2147. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2148. "Expect trouble!\n");
  2149. dump_register_info(acb, dcb, srb);
  2150. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2151. }
  2152. /* clear_fifo(acb, "IO"); */
  2153. /*
  2154. * load what physical address of Scatter/Gather list table
  2155. * want to be transfer
  2156. */
  2157. srb->state |= SRB_DATA_XFER;
  2158. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2159. if (srb->cmd->use_sg) { /* with S/G */
  2160. io_dir |= DMACMD_SG;
  2161. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2162. srb->sg_bus_addr +
  2163. sizeof(struct SGentry) *
  2164. srb->sg_index);
  2165. /* load how many bytes in the sg list table */
  2166. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2167. ((u32)(srb->sg_count -
  2168. srb->sg_index) << 3));
  2169. } else { /* without S/G */
  2170. io_dir &= ~DMACMD_SG;
  2171. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2172. srb->segment_x[0].address);
  2173. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2174. srb->segment_x[0].length);
  2175. }
  2176. /* load total transfer length (24bits) max value 16Mbyte */
  2177. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2178. srb->total_xfer_length);
  2179. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2180. if (io_dir & DMACMD_DIR) { /* read */
  2181. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2182. SCMD_DMA_IN);
  2183. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2184. } else {
  2185. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2186. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2187. SCMD_DMA_OUT);
  2188. }
  2189. }
  2190. #if DC395x_LASTPIO
  2191. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2192. /*
  2193. * load what physical address of Scatter/Gather list table
  2194. * want to be transfer
  2195. */
  2196. srb->state |= SRB_DATA_XFER;
  2197. /* load total transfer length (24bits) max value 16Mbyte */
  2198. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2199. srb->total_xfer_length);
  2200. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2201. if (io_dir & DMACMD_DIR) { /* read */
  2202. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2203. SCMD_FIFO_IN);
  2204. } else { /* write */
  2205. int ln = srb->total_xfer_length;
  2206. if (srb->dcb->sync_period & WIDE_SYNC)
  2207. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2208. CFG2_WIDEFIFO);
  2209. dprintkdbg(DBG_PIO,
  2210. "data_io_transfer: PIO %i bytes from %p:",
  2211. srb->total_xfer_length, srb->virt_addr);
  2212. while (srb->total_xfer_length) {
  2213. if (debug_enabled(DBG_PIO))
  2214. printk(" %02x", (unsigned char) *(srb->virt_addr));
  2215. DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
  2216. *(srb->virt_addr)++);
  2217. sg_subtract_one(srb);
  2218. }
  2219. if (srb->dcb->sync_period & WIDE_SYNC) {
  2220. if (ln % 2) {
  2221. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2222. if (debug_enabled(DBG_PIO))
  2223. printk(" |00");
  2224. }
  2225. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2226. }
  2227. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2228. if (debug_enabled(DBG_PIO))
  2229. printk("\n");
  2230. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2231. SCMD_FIFO_OUT);
  2232. }
  2233. }
  2234. #endif /* DC395x_LASTPIO */
  2235. else { /* xfer pad */
  2236. u8 data = 0, data2 = 0;
  2237. if (srb->sg_count) {
  2238. srb->adapter_status = H_OVER_UNDER_RUN;
  2239. srb->status |= OVER_RUN;
  2240. }
  2241. /*
  2242. * KG: despite the fact that we are using 16 bits I/O ops
  2243. * the SCSI FIFO is only 8 bits according to the docs
  2244. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2245. */
  2246. if (dcb->sync_period & WIDE_SYNC) {
  2247. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2248. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2249. CFG2_WIDEFIFO);
  2250. if (io_dir & DMACMD_DIR) {
  2251. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2252. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2253. } else {
  2254. /* Danger, Robinson: If you find KGs
  2255. * scattered over the wide disk, the driver
  2256. * or chip is to blame :-( */
  2257. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2258. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2259. }
  2260. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2261. } else {
  2262. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2263. /* Danger, Robinson: If you find a collection of Ks on your disk
  2264. * something broke :-( */
  2265. if (io_dir & DMACMD_DIR)
  2266. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2267. else
  2268. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2269. }
  2270. srb->state |= SRB_XFERPAD;
  2271. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2272. /* SCSI command */
  2273. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2274. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2275. }
  2276. }
  2277. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2278. u16 *pscsi_status)
  2279. {
  2280. dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n",
  2281. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2282. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2283. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2284. srb->state = SRB_COMPLETED;
  2285. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2286. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2287. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2288. }
  2289. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2290. u16 *pscsi_status)
  2291. {
  2292. dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n",
  2293. srb->cmd->pid, srb->cmd->device->id, srb->cmd->device->lun);
  2294. srb->state = SRB_STATUS;
  2295. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2296. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2297. }
  2298. /* Check if the message is complete */
  2299. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2300. {
  2301. if (*msgbuf == EXTENDED_MESSAGE) {
  2302. if (len < 2)
  2303. return 0;
  2304. if (len < msgbuf[1] + 2)
  2305. return 0;
  2306. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2307. if (len < 2)
  2308. return 0;
  2309. return 1;
  2310. }
  2311. /* reject_msg */
  2312. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2313. struct ScsiReqBlk *srb)
  2314. {
  2315. srb->msgout_buf[0] = MESSAGE_REJECT;
  2316. srb->msg_count = 1;
  2317. DC395x_ENABLE_MSGOUT;
  2318. srb->state &= ~SRB_MSGIN;
  2319. srb->state |= SRB_MSGOUT;
  2320. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2321. srb->msgin_buf[0],
  2322. srb->dcb->target_id, srb->dcb->target_lun);
  2323. }
  2324. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2325. struct DeviceCtlBlk *dcb, u8 tag)
  2326. {
  2327. struct ScsiReqBlk *srb = NULL;
  2328. struct ScsiReqBlk *i;
  2329. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n",
  2330. srb->cmd->pid, tag, srb);
  2331. if (!(dcb->tag_mask & (1 << tag)))
  2332. dprintkl(KERN_DEBUG,
  2333. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2334. dcb->tag_mask, tag);
  2335. if (list_empty(&dcb->srb_going_list))
  2336. goto mingx0;
  2337. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2338. if (i->tag_number == tag) {
  2339. srb = i;
  2340. break;
  2341. }
  2342. }
  2343. if (!srb)
  2344. goto mingx0;
  2345. dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n",
  2346. srb->cmd->pid, srb->dcb->target_id, srb->dcb->target_lun);
  2347. if (dcb->flag & ABORT_DEV_) {
  2348. /*srb->state = SRB_ABORT_SENT; */
  2349. enable_msgout_abort(acb, srb);
  2350. }
  2351. if (!(srb->state & SRB_DISCONNECT))
  2352. goto mingx0;
  2353. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2354. srb->state |= dcb->active_srb->state;
  2355. srb->state |= SRB_DATA_XFER;
  2356. dcb->active_srb = srb;
  2357. /* How can we make the DORS happy? */
  2358. return srb;
  2359. mingx0:
  2360. srb = acb->tmp_srb;
  2361. srb->state = SRB_UNEXPECT_RESEL;
  2362. dcb->active_srb = srb;
  2363. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2364. srb->msg_count = 1;
  2365. DC395x_ENABLE_MSGOUT;
  2366. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2367. return srb;
  2368. }
  2369. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2370. struct DeviceCtlBlk *dcb)
  2371. {
  2372. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2373. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2374. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2375. set_xfer_rate(acb, dcb);
  2376. }
  2377. /* set async transfer mode */
  2378. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2379. {
  2380. struct DeviceCtlBlk *dcb = srb->dcb;
  2381. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2382. dcb->target_id, dcb->target_lun);
  2383. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2384. dcb->sync_mode |= SYNC_NEGO_DONE;
  2385. /*dcb->sync_period &= 0; */
  2386. dcb->sync_offset = 0;
  2387. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2388. srb->state &= ~SRB_DO_SYNC_NEGO;
  2389. reprogram_regs(acb, dcb);
  2390. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2391. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2392. build_wdtr(acb, dcb, srb);
  2393. DC395x_ENABLE_MSGOUT;
  2394. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2395. }
  2396. }
  2397. /* set sync transfer mode */
  2398. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2399. {
  2400. struct DeviceCtlBlk *dcb = srb->dcb;
  2401. u8 bval;
  2402. int fact;
  2403. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2404. "(%02i.%01i MHz) Offset %i\n",
  2405. dcb->target_id, srb->msgin_buf[3] << 2,
  2406. (250 / srb->msgin_buf[3]),
  2407. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2408. srb->msgin_buf[4]);
  2409. if (srb->msgin_buf[4] > 15)
  2410. srb->msgin_buf[4] = 15;
  2411. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2412. dcb->sync_offset = 0;
  2413. else if (dcb->sync_offset == 0)
  2414. dcb->sync_offset = srb->msgin_buf[4];
  2415. if (srb->msgin_buf[4] > dcb->sync_offset)
  2416. srb->msgin_buf[4] = dcb->sync_offset;
  2417. else
  2418. dcb->sync_offset = srb->msgin_buf[4];
  2419. bval = 0;
  2420. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2421. || dcb->min_nego_period >
  2422. clock_period[bval]))
  2423. bval++;
  2424. if (srb->msgin_buf[3] < clock_period[bval])
  2425. dprintkl(KERN_INFO,
  2426. "msgin_set_sync: Increase sync nego period to %ins\n",
  2427. clock_period[bval] << 2);
  2428. srb->msgin_buf[3] = clock_period[bval];
  2429. dcb->sync_period &= 0xf0;
  2430. dcb->sync_period |= ALT_SYNC | bval;
  2431. dcb->min_nego_period = srb->msgin_buf[3];
  2432. if (dcb->sync_period & WIDE_SYNC)
  2433. fact = 500;
  2434. else
  2435. fact = 250;
  2436. dprintkl(KERN_INFO,
  2437. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2438. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2439. dcb->min_nego_period << 2, dcb->sync_offset,
  2440. (fact / dcb->min_nego_period),
  2441. ((fact % dcb->min_nego_period) * 10 +
  2442. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2443. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2444. /* Reply with corrected SDTR Message */
  2445. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2446. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2447. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2448. srb->msg_count = 5;
  2449. DC395x_ENABLE_MSGOUT;
  2450. dcb->sync_mode |= SYNC_NEGO_DONE;
  2451. } else {
  2452. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2453. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2454. build_wdtr(acb, dcb, srb);
  2455. DC395x_ENABLE_MSGOUT;
  2456. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2457. }
  2458. }
  2459. srb->state &= ~SRB_DO_SYNC_NEGO;
  2460. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2461. reprogram_regs(acb, dcb);
  2462. }
  2463. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2464. struct ScsiReqBlk *srb)
  2465. {
  2466. struct DeviceCtlBlk *dcb = srb->dcb;
  2467. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2468. dcb->sync_period &= ~WIDE_SYNC;
  2469. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2470. dcb->sync_mode |= WIDE_NEGO_DONE;
  2471. srb->state &= ~SRB_DO_WIDE_NEGO;
  2472. reprogram_regs(acb, dcb);
  2473. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2474. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2475. build_sdtr(acb, dcb, srb);
  2476. DC395x_ENABLE_MSGOUT;
  2477. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2478. }
  2479. }
  2480. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2481. {
  2482. struct DeviceCtlBlk *dcb = srb->dcb;
  2483. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2484. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2485. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2486. if (srb->msgin_buf[3] > wide)
  2487. srb->msgin_buf[3] = wide;
  2488. /* Completed */
  2489. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2490. dprintkl(KERN_DEBUG,
  2491. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2492. dcb->target_id);
  2493. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2494. srb->msg_count = 4;
  2495. srb->state |= SRB_DO_WIDE_NEGO;
  2496. DC395x_ENABLE_MSGOUT;
  2497. }
  2498. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2499. if (srb->msgin_buf[3] > 0)
  2500. dcb->sync_period |= WIDE_SYNC;
  2501. else
  2502. dcb->sync_period &= ~WIDE_SYNC;
  2503. srb->state &= ~SRB_DO_WIDE_NEGO;
  2504. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2505. dprintkdbg(DBG_1,
  2506. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2507. (8 << srb->msgin_buf[3]), dcb->target_id);
  2508. reprogram_regs(acb, dcb);
  2509. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2510. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2511. build_sdtr(acb, dcb, srb);
  2512. DC395x_ENABLE_MSGOUT;
  2513. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2514. }
  2515. }
  2516. /*
  2517. * extended message codes:
  2518. *
  2519. * code description
  2520. *
  2521. * 02h Reserved
  2522. * 00h MODIFY DATA POINTER
  2523. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2524. * 03h WIDE DATA TRANSFER REQUEST
  2525. * 04h - 7Fh Reserved
  2526. * 80h - FFh Vendor specific
  2527. */
  2528. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2529. u16 *pscsi_status)
  2530. {
  2531. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2532. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->pid);
  2533. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2534. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2535. /* Now eval the msg */
  2536. switch (srb->msgin_buf[0]) {
  2537. case DISCONNECT:
  2538. srb->state = SRB_DISCONNECT;
  2539. break;
  2540. case SIMPLE_QUEUE_TAG:
  2541. case HEAD_OF_QUEUE_TAG:
  2542. case ORDERED_QUEUE_TAG:
  2543. srb =
  2544. msgin_qtag(acb, dcb,
  2545. srb->msgin_buf[1]);
  2546. break;
  2547. case MESSAGE_REJECT:
  2548. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2549. DO_CLRATN | DO_DATALATCH);
  2550. /* A sync nego message was rejected ! */
  2551. if (srb->state & SRB_DO_SYNC_NEGO) {
  2552. msgin_set_async(acb, srb);
  2553. break;
  2554. }
  2555. /* A wide nego message was rejected ! */
  2556. if (srb->state & SRB_DO_WIDE_NEGO) {
  2557. msgin_set_nowide(acb, srb);
  2558. break;
  2559. }
  2560. enable_msgout_abort(acb, srb);
  2561. /*srb->state |= SRB_ABORT_SENT */
  2562. break;
  2563. case EXTENDED_MESSAGE:
  2564. /* SDTR */
  2565. if (srb->msgin_buf[1] == 3
  2566. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2567. msgin_set_sync(acb, srb);
  2568. break;
  2569. }
  2570. /* WDTR */
  2571. if (srb->msgin_buf[1] == 2
  2572. && srb->msgin_buf[2] == EXTENDED_WDTR
  2573. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2574. msgin_set_wide(acb, srb);
  2575. break;
  2576. }
  2577. msgin_reject(acb, srb);
  2578. break;
  2579. case MSG_IGNOREWIDE:
  2580. /* Discard wide residual */
  2581. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2582. break;
  2583. case COMMAND_COMPLETE:
  2584. /* nothing has to be done */
  2585. break;
  2586. case SAVE_POINTERS:
  2587. /*
  2588. * SAVE POINTER may be ignored as we have the struct
  2589. * ScsiReqBlk* associated with the scsi command.
  2590. */
  2591. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2592. "SAVE POINTER rem=%i Ignore\n",
  2593. srb->cmd->pid, srb->total_xfer_length);
  2594. break;
  2595. case RESTORE_POINTERS:
  2596. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2597. break;
  2598. case ABORT:
  2599. dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) "
  2600. "<%02i-%i> ABORT msg\n",
  2601. srb->cmd->pid, dcb->target_id,
  2602. dcb->target_lun);
  2603. dcb->flag |= ABORT_DEV_;
  2604. enable_msgout_abort(acb, srb);
  2605. break;
  2606. default:
  2607. /* reject unknown messages */
  2608. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2609. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2610. srb->msg_count = 1;
  2611. srb->msgout_buf[0] = dcb->identify_msg;
  2612. DC395x_ENABLE_MSGOUT;
  2613. srb->state |= SRB_MSGOUT;
  2614. /*break; */
  2615. }
  2616. msgin_reject(acb, srb);
  2617. }
  2618. /* Clear counter and MsgIn state */
  2619. srb->state &= ~SRB_MSGIN;
  2620. acb->msg_len = 0;
  2621. }
  2622. *pscsi_status = PH_BUS_FREE;
  2623. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2624. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2625. }
  2626. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2627. u16 *pscsi_status)
  2628. {
  2629. dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->pid);
  2630. clear_fifo(acb, "msgin_phase1");
  2631. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2632. if (!(srb->state & SRB_MSGIN)) {
  2633. srb->state &= ~SRB_DISCONNECT;
  2634. srb->state |= SRB_MSGIN;
  2635. }
  2636. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2637. /* SCSI command */
  2638. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2639. }
  2640. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2641. u16 *pscsi_status)
  2642. {
  2643. }
  2644. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2645. u16 *pscsi_status)
  2646. {
  2647. }
  2648. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2649. {
  2650. struct DeviceCtlBlk *i;
  2651. /* set all lun device's period, offset */
  2652. if (dcb->identify_msg & 0x07)
  2653. return;
  2654. if (acb->scan_devices) {
  2655. current_sync_offset = dcb->sync_offset;
  2656. return;
  2657. }
  2658. list_for_each_entry(i, &acb->dcb_list, list)
  2659. if (i->target_id == dcb->target_id) {
  2660. i->sync_period = dcb->sync_period;
  2661. i->sync_offset = dcb->sync_offset;
  2662. i->sync_mode = dcb->sync_mode;
  2663. i->min_nego_period = dcb->min_nego_period;
  2664. }
  2665. }
  2666. static void disconnect(struct AdapterCtlBlk *acb)
  2667. {
  2668. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2669. struct ScsiReqBlk *srb;
  2670. if (!dcb) {
  2671. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2672. udelay(500);
  2673. /* Suspend queue for a while */
  2674. acb->scsi_host->last_reset =
  2675. jiffies + HZ / 2 +
  2676. HZ * acb->eeprom.delay_time;
  2677. clear_fifo(acb, "disconnectEx");
  2678. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2679. return;
  2680. }
  2681. srb = dcb->active_srb;
  2682. acb->active_dcb = NULL;
  2683. dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->pid);
  2684. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2685. clear_fifo(acb, "disconnect");
  2686. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2687. if (srb->state & SRB_UNEXPECT_RESEL) {
  2688. dprintkl(KERN_ERR,
  2689. "disconnect: Unexpected reselection <%02i-%i>\n",
  2690. dcb->target_id, dcb->target_lun);
  2691. srb->state = 0;
  2692. waiting_process_next(acb);
  2693. } else if (srb->state & SRB_ABORT_SENT) {
  2694. dcb->flag &= ~ABORT_DEV_;
  2695. acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
  2696. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2697. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2698. waiting_process_next(acb);
  2699. } else {
  2700. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2701. || !(srb->
  2702. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2703. /*
  2704. * Selection time out
  2705. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2706. */
  2707. /* Unexp. Disc / Sel Timeout */
  2708. if (srb->state != SRB_START_
  2709. && srb->state != SRB_MSGOUT) {
  2710. srb->state = SRB_READY;
  2711. dprintkl(KERN_DEBUG,
  2712. "disconnect: (pid#%li) Unexpected\n",
  2713. srb->cmd->pid);
  2714. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2715. goto disc1;
  2716. } else {
  2717. /* Normal selection timeout */
  2718. dprintkdbg(DBG_KG, "disconnect: (pid#%li) "
  2719. "<%02i-%i> SelTO\n", srb->cmd->pid,
  2720. dcb->target_id, dcb->target_lun);
  2721. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2722. || acb->scan_devices) {
  2723. srb->target_status =
  2724. SCSI_STAT_SEL_TIMEOUT;
  2725. goto disc1;
  2726. }
  2727. free_tag(dcb, srb);
  2728. srb_going_to_waiting_move(dcb, srb);
  2729. dprintkdbg(DBG_KG,
  2730. "disconnect: (pid#%li) Retry\n",
  2731. srb->cmd->pid);
  2732. waiting_set_timer(acb, HZ / 20);
  2733. }
  2734. } else if (srb->state & SRB_DISCONNECT) {
  2735. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2736. /*
  2737. * SRB_DISCONNECT (This is what we expect!)
  2738. */
  2739. if (bval & 0x40) {
  2740. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2741. " 0x%02x: ACK set! Other controllers?\n",
  2742. bval);
  2743. /* It could come from another initiator, therefore don't do much ! */
  2744. } else
  2745. waiting_process_next(acb);
  2746. } else if (srb->state & SRB_COMPLETED) {
  2747. disc1:
  2748. /*
  2749. ** SRB_COMPLETED
  2750. */
  2751. free_tag(dcb, srb);
  2752. dcb->active_srb = NULL;
  2753. srb->state = SRB_FREE;
  2754. srb_done(acb, dcb, srb);
  2755. }
  2756. }
  2757. }
  2758. static void reselect(struct AdapterCtlBlk *acb)
  2759. {
  2760. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2761. struct ScsiReqBlk *srb = NULL;
  2762. u16 rsel_tar_lun_id;
  2763. u8 id, lun;
  2764. u8 arblostflag = 0;
  2765. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2766. clear_fifo(acb, "reselect");
  2767. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2768. /* Read Reselected Target ID and LUN */
  2769. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2770. if (dcb) { /* Arbitration lost but Reselection win */
  2771. srb = dcb->active_srb;
  2772. if (!srb) {
  2773. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2774. "but active_srb == NULL\n");
  2775. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2776. return;
  2777. }
  2778. /* Why the if ? */
  2779. if (!acb->scan_devices) {
  2780. dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> "
  2781. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2782. srb->cmd->pid, dcb->target_id,
  2783. dcb->target_lun, rsel_tar_lun_id,
  2784. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2785. arblostflag = 1;
  2786. /*srb->state |= SRB_DISCONNECT; */
  2787. srb->state = SRB_READY;
  2788. free_tag(dcb, srb);
  2789. srb_going_to_waiting_move(dcb, srb);
  2790. waiting_set_timer(acb, HZ / 20);
  2791. /* return; */
  2792. }
  2793. }
  2794. /* Read Reselected Target Id and LUN */
  2795. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2796. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2797. "Got %i!\n", rsel_tar_lun_id);
  2798. id = rsel_tar_lun_id & 0xff;
  2799. lun = (rsel_tar_lun_id >> 8) & 7;
  2800. dcb = find_dcb(acb, id, lun);
  2801. if (!dcb) {
  2802. dprintkl(KERN_ERR, "reselect: From non existent device "
  2803. "<%02i-%i>\n", id, lun);
  2804. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2805. return;
  2806. }
  2807. acb->active_dcb = dcb;
  2808. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2809. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2810. "disconnection? <%02i-%i>\n",
  2811. dcb->target_id, dcb->target_lun);
  2812. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2813. srb = acb->tmp_srb;
  2814. dcb->active_srb = srb;
  2815. } else {
  2816. /* There can be only one! */
  2817. srb = dcb->active_srb;
  2818. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2819. /*
  2820. * abort command
  2821. */
  2822. dprintkl(KERN_DEBUG,
  2823. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2824. dcb->target_id, dcb->target_lun);
  2825. srb = acb->tmp_srb;
  2826. srb->state = SRB_UNEXPECT_RESEL;
  2827. dcb->active_srb = srb;
  2828. enable_msgout_abort(acb, srb);
  2829. } else {
  2830. if (dcb->flag & ABORT_DEV_) {
  2831. /*srb->state = SRB_ABORT_SENT; */
  2832. enable_msgout_abort(acb, srb);
  2833. } else
  2834. srb->state = SRB_DATA_XFER;
  2835. }
  2836. }
  2837. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2838. /* Program HA ID, target ID, period and offset */
  2839. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2840. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2841. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2842. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2843. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2844. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2845. /* SCSI command */
  2846. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2847. }
  2848. static inline u8 tagq_blacklist(char *name)
  2849. {
  2850. #ifndef DC395x_NO_TAGQ
  2851. #if 0
  2852. u8 i;
  2853. for (i = 0; i < BADDEVCNT; i++)
  2854. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2855. return 1;
  2856. #endif
  2857. return 0;
  2858. #else
  2859. return 1;
  2860. #endif
  2861. }
  2862. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2863. {
  2864. /* Check for SCSI format (ANSI and Response data format) */
  2865. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2866. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2867. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2868. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2869. /* ((dcb->dev_type == TYPE_DISK)
  2870. || (dcb->dev_type == TYPE_MOD)) && */
  2871. !tagq_blacklist(((char *)ptr) + 8)) {
  2872. if (dcb->max_command == 1)
  2873. dcb->max_command =
  2874. dcb->acb->tag_max_num;
  2875. dcb->sync_mode |= EN_TAG_QUEUEING;
  2876. /*dcb->tag_mask = 0; */
  2877. } else
  2878. dcb->max_command = 1;
  2879. }
  2880. }
  2881. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2882. struct ScsiInqData *ptr)
  2883. {
  2884. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2885. dcb->dev_type = bval1;
  2886. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2887. disc_tagq_set(dcb, ptr);
  2888. }
  2889. /* unmap mapped pci regions from SRB */
  2890. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2891. {
  2892. struct scsi_cmnd *cmd = srb->cmd;
  2893. enum dma_data_direction dir = cmd->sc_data_direction;
  2894. if (cmd->use_sg && dir != PCI_DMA_NONE) {
  2895. int i;
  2896. /* unmap DC395x SG list */
  2897. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2898. srb->sg_bus_addr, SEGMENTX_LEN);
  2899. pci_unmap_single(acb->dev, srb->sg_bus_addr,
  2900. SEGMENTX_LEN,
  2901. PCI_DMA_TODEVICE);
  2902. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2903. cmd->use_sg, cmd->request_buffer);
  2904. /* unmap the sg segments */
  2905. for (i = 0; i < srb->sg_count; i++)
  2906. kunmap(virt_to_page(srb->virt_map[i]));
  2907. pci_unmap_sg(acb->dev,
  2908. (struct scatterlist *)cmd->request_buffer,
  2909. cmd->use_sg, dir);
  2910. } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
  2911. dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
  2912. srb->segment_x[0].address, cmd->request_bufflen);
  2913. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2914. cmd->request_bufflen, dir);
  2915. }
  2916. }
  2917. /* unmap mapped pci sense buffer from SRB */
  2918. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2919. struct ScsiReqBlk *srb)
  2920. {
  2921. if (!(srb->flag & AUTO_REQSENSE))
  2922. return;
  2923. /* Unmap sense buffer */
  2924. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2925. srb->segment_x[0].address);
  2926. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2927. srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
  2928. /* Restore SG stuff */
  2929. srb->total_xfer_length = srb->xferred;
  2930. srb->segment_x[0].address =
  2931. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2932. srb->segment_x[0].length =
  2933. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2934. }
  2935. /*
  2936. * Complete execution of a SCSI command
  2937. * Signal completion to the generic SCSI driver
  2938. */
  2939. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2940. struct ScsiReqBlk *srb)
  2941. {
  2942. u8 tempcnt, status;
  2943. struct scsi_cmnd *cmd = srb->cmd;
  2944. struct ScsiInqData *ptr;
  2945. enum dma_data_direction dir = cmd->sc_data_direction;
  2946. if (cmd->use_sg) {
  2947. struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
  2948. ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset);
  2949. } else {
  2950. ptr = (struct ScsiInqData *)(cmd->request_buffer);
  2951. }
  2952. dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
  2953. srb->cmd->device->id, srb->cmd->device->lun);
  2954. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p addr=%p\n",
  2955. srb, cmd->use_sg, srb->sg_index, srb->sg_count,
  2956. cmd->request_buffer, ptr);
  2957. status = srb->target_status;
  2958. if (srb->flag & AUTO_REQSENSE) {
  2959. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2960. pci_unmap_srb_sense(acb, srb);
  2961. /*
  2962. ** target status..........................
  2963. */
  2964. srb->flag &= ~AUTO_REQSENSE;
  2965. srb->adapter_status = 0;
  2966. srb->target_status = CHECK_CONDITION << 1;
  2967. if (debug_enabled(DBG_1)) {
  2968. switch (cmd->sense_buffer[2] & 0x0f) {
  2969. case NOT_READY:
  2970. dprintkl(KERN_DEBUG,
  2971. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2972. cmd->cmnd[0], dcb->target_id,
  2973. dcb->target_lun, status, acb->scan_devices);
  2974. break;
  2975. case UNIT_ATTENTION:
  2976. dprintkl(KERN_DEBUG,
  2977. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2978. cmd->cmnd[0], dcb->target_id,
  2979. dcb->target_lun, status, acb->scan_devices);
  2980. break;
  2981. case ILLEGAL_REQUEST:
  2982. dprintkl(KERN_DEBUG,
  2983. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2984. cmd->cmnd[0], dcb->target_id,
  2985. dcb->target_lun, status, acb->scan_devices);
  2986. break;
  2987. case MEDIUM_ERROR:
  2988. dprintkl(KERN_DEBUG,
  2989. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2990. cmd->cmnd[0], dcb->target_id,
  2991. dcb->target_lun, status, acb->scan_devices);
  2992. break;
  2993. case HARDWARE_ERROR:
  2994. dprintkl(KERN_DEBUG,
  2995. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2996. cmd->cmnd[0], dcb->target_id,
  2997. dcb->target_lun, status, acb->scan_devices);
  2998. break;
  2999. }
  3000. if (cmd->sense_buffer[7] >= 6)
  3001. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  3002. "(0x%08x 0x%08x)\n",
  3003. cmd->sense_buffer[2], cmd->sense_buffer[12],
  3004. cmd->sense_buffer[13],
  3005. *((unsigned int *)(cmd->sense_buffer + 3)),
  3006. *((unsigned int *)(cmd->sense_buffer + 8)));
  3007. else
  3008. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  3009. cmd->sense_buffer[2],
  3010. *((unsigned int *)(cmd->sense_buffer + 3)));
  3011. }
  3012. if (status == (CHECK_CONDITION << 1)) {
  3013. cmd->result = DID_BAD_TARGET << 16;
  3014. goto ckc_e;
  3015. }
  3016. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  3017. if (srb->total_xfer_length
  3018. && srb->total_xfer_length >= cmd->underflow)
  3019. cmd->result =
  3020. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3021. srb->end_message, CHECK_CONDITION);
  3022. /*SET_RES_DID(cmd->result,DID_OK) */
  3023. else
  3024. cmd->result =
  3025. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3026. srb->end_message, CHECK_CONDITION);
  3027. goto ckc_e;
  3028. }
  3029. /*************************************************************/
  3030. if (status) {
  3031. /*
  3032. * target status..........................
  3033. */
  3034. if (status_byte(status) == CHECK_CONDITION) {
  3035. request_sense(acb, dcb, srb);
  3036. return;
  3037. } else if (status_byte(status) == QUEUE_FULL) {
  3038. tempcnt = (u8)list_size(&dcb->srb_going_list);
  3039. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  3040. dcb->target_id, dcb->target_lun, tempcnt);
  3041. if (tempcnt > 1)
  3042. tempcnt--;
  3043. dcb->max_command = tempcnt;
  3044. free_tag(dcb, srb);
  3045. srb_going_to_waiting_move(dcb, srb);
  3046. waiting_set_timer(acb, HZ / 20);
  3047. srb->adapter_status = 0;
  3048. srb->target_status = 0;
  3049. return;
  3050. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  3051. srb->adapter_status = H_SEL_TIMEOUT;
  3052. srb->target_status = 0;
  3053. cmd->result = DID_NO_CONNECT << 16;
  3054. } else {
  3055. srb->adapter_status = 0;
  3056. SET_RES_DID(cmd->result, DID_ERROR);
  3057. SET_RES_MSG(cmd->result, srb->end_message);
  3058. SET_RES_TARGET(cmd->result, status);
  3059. }
  3060. } else {
  3061. /*
  3062. ** process initiator status..........................
  3063. */
  3064. status = srb->adapter_status;
  3065. if (status & H_OVER_UNDER_RUN) {
  3066. srb->target_status = 0;
  3067. SET_RES_DID(cmd->result, DID_OK);
  3068. SET_RES_MSG(cmd->result, srb->end_message);
  3069. } else if (srb->status & PARITY_ERROR) {
  3070. SET_RES_DID(cmd->result, DID_PARITY);
  3071. SET_RES_MSG(cmd->result, srb->end_message);
  3072. } else { /* No error */
  3073. srb->adapter_status = 0;
  3074. srb->target_status = 0;
  3075. SET_RES_DID(cmd->result, DID_OK);
  3076. }
  3077. }
  3078. if (dir != PCI_DMA_NONE) {
  3079. if (cmd->use_sg)
  3080. pci_dma_sync_sg_for_cpu(acb->dev,
  3081. (struct scatterlist *)cmd->
  3082. request_buffer, cmd->use_sg, dir);
  3083. else if (cmd->request_buffer)
  3084. pci_dma_sync_single_for_cpu(acb->dev,
  3085. srb->segment_x[0].address,
  3086. cmd->request_bufflen, dir);
  3087. }
  3088. if ((cmd->result & RES_DID) == 0 && cmd->cmnd[0] == INQUIRY
  3089. && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8
  3090. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  3091. dcb->inquiry7 = ptr->Flags;
  3092. /* Check Error Conditions */
  3093. ckc_e:
  3094. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  3095. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  3096. if (cmd->cmnd[0] == INQUIRY && (cmd->result == (DID_OK << 16)
  3097. || status_byte(cmd->
  3098. result) &
  3099. CHECK_CONDITION)) {
  3100. if (!dcb->init_tcq_flag) {
  3101. add_dev(acb, dcb, ptr);
  3102. dcb->init_tcq_flag = 1;
  3103. }
  3104. }
  3105. /* Here is the info for Doug Gilbert's sg3 ... */
  3106. cmd->resid = srb->total_xfer_length;
  3107. /* This may be interpreted by sb. or not ... */
  3108. cmd->SCp.this_residual = srb->total_xfer_length;
  3109. cmd->SCp.buffers_residual = 0;
  3110. if (debug_enabled(DBG_KG)) {
  3111. if (srb->total_xfer_length)
  3112. dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> "
  3113. "cmnd=0x%02x Missed %i bytes\n",
  3114. cmd->pid, cmd->device->id, cmd->device->lun,
  3115. cmd->cmnd[0], srb->total_xfer_length);
  3116. }
  3117. srb_going_remove(dcb, srb);
  3118. /* Add to free list */
  3119. if (srb == acb->tmp_srb)
  3120. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  3121. else {
  3122. dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n",
  3123. cmd->pid, cmd->result);
  3124. srb_free_insert(acb, srb);
  3125. }
  3126. pci_unmap_srb(acb, srb);
  3127. cmd->scsi_done(cmd);
  3128. waiting_process_next(acb);
  3129. }
  3130. /* abort all cmds in our queues */
  3131. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  3132. struct scsi_cmnd *cmd, u8 force)
  3133. {
  3134. struct DeviceCtlBlk *dcb;
  3135. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3136. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3137. struct ScsiReqBlk *srb;
  3138. struct ScsiReqBlk *tmp;
  3139. struct scsi_cmnd *p;
  3140. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3141. enum dma_data_direction dir;
  3142. int result;
  3143. p = srb->cmd;
  3144. dir = p->sc_data_direction;
  3145. result = MK_RES(0, did_flag, 0, 0);
  3146. printk("G:%li(%02i-%i) ", p->pid,
  3147. p->device->id, p->device->lun);
  3148. srb_going_remove(dcb, srb);
  3149. free_tag(dcb, srb);
  3150. srb_free_insert(acb, srb);
  3151. p->result = result;
  3152. pci_unmap_srb_sense(acb, srb);
  3153. pci_unmap_srb(acb, srb);
  3154. if (force) {
  3155. /* For new EH, we normally don't need to give commands back,
  3156. * as they all complete or all time out */
  3157. p->scsi_done(p);
  3158. }
  3159. }
  3160. if (!list_empty(&dcb->srb_going_list))
  3161. dprintkl(KERN_DEBUG,
  3162. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3163. dcb->target_id, dcb->target_lun);
  3164. if (dcb->tag_mask)
  3165. dprintkl(KERN_DEBUG,
  3166. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3167. dcb->target_id, dcb->target_lun,
  3168. dcb->tag_mask);
  3169. /* Waiting queue */
  3170. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3171. int result;
  3172. p = srb->cmd;
  3173. result = MK_RES(0, did_flag, 0, 0);
  3174. printk("W:%li<%02i-%i>", p->pid, p->device->id,
  3175. p->device->lun);
  3176. srb_waiting_remove(dcb, srb);
  3177. srb_free_insert(acb, srb);
  3178. p->result = result;
  3179. pci_unmap_srb_sense(acb, srb);
  3180. pci_unmap_srb(acb, srb);
  3181. if (force) {
  3182. /* For new EH, we normally don't need to give commands back,
  3183. * as they all complete or all time out */
  3184. cmd->scsi_done(cmd);
  3185. }
  3186. }
  3187. if (!list_empty(&dcb->srb_waiting_list))
  3188. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3189. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3190. dcb->target_lun);
  3191. dcb->flag &= ~ABORT_DEV_;
  3192. }
  3193. printk("\n");
  3194. }
  3195. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3196. {
  3197. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3198. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3199. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3200. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3201. /* nothing */;
  3202. }
  3203. static void set_basic_config(struct AdapterCtlBlk *acb)
  3204. {
  3205. u8 bval;
  3206. u16 wval;
  3207. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3208. if (acb->config & HCC_PARITY)
  3209. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3210. else
  3211. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3212. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3213. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3214. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3215. /* program Host ID */
  3216. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3217. /* set ansynchronous transfer */
  3218. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3219. /* Turn LED control off */
  3220. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3221. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3222. /* DMA config */
  3223. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3224. wval |=
  3225. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3226. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3227. /* Clear pending interrupt status */
  3228. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3229. /* Enable SCSI interrupt */
  3230. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3231. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3232. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3233. );
  3234. }
  3235. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3236. {
  3237. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3238. /* delay half a second */
  3239. if (timer_pending(&acb->waiting_timer))
  3240. del_timer(&acb->waiting_timer);
  3241. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3242. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3243. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3244. udelay(500);
  3245. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3246. acb->scsi_host->last_reset =
  3247. jiffies + 5 * HZ / 2 +
  3248. HZ * acb->eeprom.delay_time;
  3249. clear_fifo(acb, "scsi_reset_detect");
  3250. set_basic_config(acb);
  3251. /*1.25 */
  3252. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3253. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3254. acb->acb_flag |= RESET_DONE;
  3255. } else {
  3256. acb->acb_flag |= RESET_DETECT;
  3257. reset_dev_param(acb);
  3258. doing_srb_done(acb, DID_RESET, NULL, 1);
  3259. /*DC395x_RecoverSRB( acb ); */
  3260. acb->active_dcb = NULL;
  3261. acb->acb_flag = 0;
  3262. waiting_process_next(acb);
  3263. }
  3264. }
  3265. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3266. struct ScsiReqBlk *srb)
  3267. {
  3268. struct scsi_cmnd *cmd = srb->cmd;
  3269. dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n",
  3270. cmd->pid, cmd->device->id, cmd->device->lun);
  3271. srb->flag |= AUTO_REQSENSE;
  3272. srb->adapter_status = 0;
  3273. srb->target_status = 0;
  3274. /* KG: Can this prevent crap sense data ? */
  3275. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  3276. /* Save some data */
  3277. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3278. srb->segment_x[0].address;
  3279. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3280. srb->segment_x[0].length;
  3281. srb->xferred = srb->total_xfer_length;
  3282. /* srb->segment_x : a one entry of S/G list table */
  3283. srb->total_xfer_length = sizeof(cmd->sense_buffer);
  3284. srb->segment_x[0].length = sizeof(cmd->sense_buffer);
  3285. /* Map sense buffer */
  3286. srb->segment_x[0].address =
  3287. pci_map_single(acb->dev, cmd->sense_buffer,
  3288. sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE);
  3289. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3290. cmd->sense_buffer, srb->segment_x[0].address,
  3291. sizeof(cmd->sense_buffer));
  3292. srb->sg_count = 1;
  3293. srb->sg_index = 0;
  3294. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3295. dprintkl(KERN_DEBUG,
  3296. "request_sense: (pid#%li) failed <%02i-%i>\n",
  3297. srb->cmd->pid, dcb->target_id, dcb->target_lun);
  3298. srb_going_to_waiting_move(dcb, srb);
  3299. waiting_set_timer(acb, HZ / 100);
  3300. }
  3301. }
  3302. /**
  3303. * device_alloc - Allocate a new device instance. This create the
  3304. * devices instance and sets up all the data items. The adapter
  3305. * instance is required to obtain confiuration information for this
  3306. * device. This does *not* add this device to the adapters device
  3307. * list.
  3308. *
  3309. * @acb: The adapter to obtain configuration information from.
  3310. * @target: The target for the new device.
  3311. * @lun: The lun for the new device.
  3312. *
  3313. * Return the new device if succesfull or NULL on failure.
  3314. **/
  3315. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3316. u8 target, u8 lun)
  3317. {
  3318. struct NvRamType *eeprom = &acb->eeprom;
  3319. u8 period_index = eeprom->target[target].period & 0x07;
  3320. struct DeviceCtlBlk *dcb;
  3321. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3322. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3323. if (!dcb)
  3324. return NULL;
  3325. dcb->acb = NULL;
  3326. INIT_LIST_HEAD(&dcb->srb_going_list);
  3327. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3328. dcb->active_srb = NULL;
  3329. dcb->tag_mask = 0;
  3330. dcb->max_command = 1;
  3331. dcb->target_id = target;
  3332. dcb->target_lun = lun;
  3333. #ifndef DC395x_NO_DISCONNECT
  3334. dcb->identify_msg =
  3335. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3336. #else
  3337. dcb->identify_msg = IDENTIFY(0, lun);
  3338. #endif
  3339. dcb->dev_mode = eeprom->target[target].cfg0;
  3340. dcb->inquiry7 = 0;
  3341. dcb->sync_mode = 0;
  3342. dcb->min_nego_period = clock_period[period_index];
  3343. dcb->sync_period = 0;
  3344. dcb->sync_offset = 0;
  3345. dcb->flag = 0;
  3346. #ifndef DC395x_NO_WIDE
  3347. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3348. && (acb->config & HCC_WIDE_CARD))
  3349. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3350. #endif
  3351. #ifndef DC395x_NO_SYNC
  3352. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3353. if (!(lun) || current_sync_offset)
  3354. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3355. #endif
  3356. if (dcb->target_lun != 0) {
  3357. /* Copy settings */
  3358. struct DeviceCtlBlk *p;
  3359. list_for_each_entry(p, &acb->dcb_list, list)
  3360. if (p->target_id == dcb->target_id)
  3361. break;
  3362. dprintkdbg(DBG_1,
  3363. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3364. dcb->target_id, dcb->target_lun,
  3365. p->target_id, p->target_lun);
  3366. dcb->sync_mode = p->sync_mode;
  3367. dcb->sync_period = p->sync_period;
  3368. dcb->min_nego_period = p->min_nego_period;
  3369. dcb->sync_offset = p->sync_offset;
  3370. dcb->inquiry7 = p->inquiry7;
  3371. }
  3372. return dcb;
  3373. }
  3374. /**
  3375. * adapter_add_device - Adds the device instance to the adaptor instance.
  3376. *
  3377. * @acb: The adapter device to be updated
  3378. * @dcb: A newly created and intialised device instance to add.
  3379. **/
  3380. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3381. struct DeviceCtlBlk *dcb)
  3382. {
  3383. /* backpointer to adapter */
  3384. dcb->acb = acb;
  3385. /* set run_robin to this device if it is currently empty */
  3386. if (list_empty(&acb->dcb_list))
  3387. acb->dcb_run_robin = dcb;
  3388. /* add device to list */
  3389. list_add_tail(&dcb->list, &acb->dcb_list);
  3390. /* update device maps */
  3391. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3392. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3393. }
  3394. /**
  3395. * adapter_remove_device - Removes the device instance from the adaptor
  3396. * instance. The device instance is not check in any way or freed by this.
  3397. * The caller is expected to take care of that. This will simply remove the
  3398. * device from the adapters data strcutures.
  3399. *
  3400. * @acb: The adapter device to be updated
  3401. * @dcb: A device that has previously been added to the adapter.
  3402. **/
  3403. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3404. struct DeviceCtlBlk *dcb)
  3405. {
  3406. struct DeviceCtlBlk *i;
  3407. struct DeviceCtlBlk *tmp;
  3408. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3409. dcb->target_id, dcb->target_lun);
  3410. /* fix up any pointers to this device that we have in the adapter */
  3411. if (acb->active_dcb == dcb)
  3412. acb->active_dcb = NULL;
  3413. if (acb->dcb_run_robin == dcb)
  3414. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3415. /* unlink from list */
  3416. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3417. if (dcb == i) {
  3418. list_del(&i->list);
  3419. break;
  3420. }
  3421. /* clear map and children */
  3422. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3423. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3424. dcb->acb = NULL;
  3425. }
  3426. /**
  3427. * adapter_remove_and_free_device - Removes a single device from the adapter
  3428. * and then frees the device information.
  3429. *
  3430. * @acb: The adapter device to be updated
  3431. * @dcb: A device that has previously been added to the adapter.
  3432. */
  3433. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3434. struct DeviceCtlBlk *dcb)
  3435. {
  3436. if (list_size(&dcb->srb_going_list) > 1) {
  3437. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3438. "Won't remove because of %i active requests.\n",
  3439. dcb->target_id, dcb->target_lun,
  3440. list_size(&dcb->srb_going_list));
  3441. return;
  3442. }
  3443. adapter_remove_device(acb, dcb);
  3444. kfree(dcb);
  3445. }
  3446. /**
  3447. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3448. * devices associated with the specified adapter.
  3449. *
  3450. * @acb: The adapter from which all devices should be removed.
  3451. **/
  3452. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3453. {
  3454. struct DeviceCtlBlk *dcb;
  3455. struct DeviceCtlBlk *tmp;
  3456. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3457. list_size(&acb->dcb_list));
  3458. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3459. adapter_remove_and_free_device(acb, dcb);
  3460. }
  3461. /**
  3462. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3463. * scsi device that we need to deal with. We allocate a new device and then
  3464. * insert that device into the adapters device list.
  3465. *
  3466. * @scsi_device: The new scsi device that we need to handle.
  3467. **/
  3468. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3469. {
  3470. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3471. struct DeviceCtlBlk *dcb;
  3472. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3473. if (!dcb)
  3474. return -ENOMEM;
  3475. adapter_add_device(acb, dcb);
  3476. return 0;
  3477. }
  3478. /**
  3479. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3480. * device that is going away.
  3481. *
  3482. * @scsi_device: The new scsi device that we need to handle.
  3483. **/
  3484. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3485. {
  3486. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3487. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3488. if (dcb)
  3489. adapter_remove_and_free_device(acb, dcb);
  3490. }
  3491. /**
  3492. * trms1040_wait_30us: wait for 30 us
  3493. *
  3494. * Waits for 30us (using the chip by the looks of it..)
  3495. *
  3496. * @io_port: base I/O address
  3497. **/
  3498. static void __devinit trms1040_wait_30us(unsigned long io_port)
  3499. {
  3500. /* ScsiPortStallExecution(30); wait 30 us */
  3501. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3502. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3503. /* nothing */ ;
  3504. }
  3505. /**
  3506. * trms1040_write_cmd - write the secified command and address to
  3507. * chip
  3508. *
  3509. * @io_port: base I/O address
  3510. * @cmd: SB + op code (command) to send
  3511. * @addr: address to send
  3512. **/
  3513. static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3514. {
  3515. int i;
  3516. u8 send_data;
  3517. /* program SB + OP code */
  3518. for (i = 0; i < 3; i++, cmd <<= 1) {
  3519. send_data = NVR_SELECT;
  3520. if (cmd & 0x04) /* Start from bit 2 */
  3521. send_data |= NVR_BITOUT;
  3522. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3523. trms1040_wait_30us(io_port);
  3524. outb((send_data | NVR_CLOCK),
  3525. io_port + TRM_S1040_GEN_NVRAM);
  3526. trms1040_wait_30us(io_port);
  3527. }
  3528. /* send address */
  3529. for (i = 0; i < 7; i++, addr <<= 1) {
  3530. send_data = NVR_SELECT;
  3531. if (addr & 0x40) /* Start from bit 6 */
  3532. send_data |= NVR_BITOUT;
  3533. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3534. trms1040_wait_30us(io_port);
  3535. outb((send_data | NVR_CLOCK),
  3536. io_port + TRM_S1040_GEN_NVRAM);
  3537. trms1040_wait_30us(io_port);
  3538. }
  3539. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3540. trms1040_wait_30us(io_port);
  3541. }
  3542. /**
  3543. * trms1040_set_data - store a single byte in the eeprom
  3544. *
  3545. * Called from write all to write a single byte into the SSEEPROM
  3546. * Which is done one bit at a time.
  3547. *
  3548. * @io_port: base I/O address
  3549. * @addr: offset into EEPROM
  3550. * @byte: bytes to write
  3551. **/
  3552. static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3553. {
  3554. int i;
  3555. u8 send_data;
  3556. /* Send write command & address */
  3557. trms1040_write_cmd(io_port, 0x05, addr);
  3558. /* Write data */
  3559. for (i = 0; i < 8; i++, byte <<= 1) {
  3560. send_data = NVR_SELECT;
  3561. if (byte & 0x80) /* Start from bit 7 */
  3562. send_data |= NVR_BITOUT;
  3563. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3564. trms1040_wait_30us(io_port);
  3565. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3566. trms1040_wait_30us(io_port);
  3567. }
  3568. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3569. trms1040_wait_30us(io_port);
  3570. /* Disable chip select */
  3571. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3572. trms1040_wait_30us(io_port);
  3573. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3574. trms1040_wait_30us(io_port);
  3575. /* Wait for write ready */
  3576. while (1) {
  3577. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3578. trms1040_wait_30us(io_port);
  3579. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3580. trms1040_wait_30us(io_port);
  3581. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3582. break;
  3583. }
  3584. /* Disable chip select */
  3585. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3586. }
  3587. /**
  3588. * trms1040_write_all - write 128 bytes to the eeprom
  3589. *
  3590. * Write the supplied 128 bytes to the chips SEEPROM
  3591. *
  3592. * @eeprom: the data to write
  3593. * @io_port: the base io port
  3594. **/
  3595. static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3596. {
  3597. u8 *b_eeprom = (u8 *)eeprom;
  3598. u8 addr;
  3599. /* Enable SEEPROM */
  3600. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3601. io_port + TRM_S1040_GEN_CONTROL);
  3602. /* write enable */
  3603. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3604. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3605. trms1040_wait_30us(io_port);
  3606. /* write */
  3607. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3608. trms1040_set_data(io_port, addr, *b_eeprom);
  3609. /* write disable */
  3610. trms1040_write_cmd(io_port, 0x04, 0x00);
  3611. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3612. trms1040_wait_30us(io_port);
  3613. /* Disable SEEPROM */
  3614. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3615. io_port + TRM_S1040_GEN_CONTROL);
  3616. }
  3617. /**
  3618. * trms1040_get_data - get a single byte from the eeprom
  3619. *
  3620. * Called from read all to read a single byte into the SSEEPROM
  3621. * Which is done one bit at a time.
  3622. *
  3623. * @io_port: base I/O address
  3624. * @addr: offset into SEEPROM
  3625. *
  3626. * Returns the the byte read.
  3627. **/
  3628. static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
  3629. {
  3630. int i;
  3631. u8 read_byte;
  3632. u8 result = 0;
  3633. /* Send read command & address */
  3634. trms1040_write_cmd(io_port, 0x06, addr);
  3635. /* read data */
  3636. for (i = 0; i < 8; i++) {
  3637. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3638. trms1040_wait_30us(io_port);
  3639. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3640. /* Get data bit while falling edge */
  3641. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3642. result <<= 1;
  3643. if (read_byte & NVR_BITIN)
  3644. result |= 1;
  3645. trms1040_wait_30us(io_port);
  3646. }
  3647. /* Disable chip select */
  3648. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3649. return result;
  3650. }
  3651. /**
  3652. * trms1040_read_all - read all bytes from the eeprom
  3653. *
  3654. * Read the 128 bytes from the SEEPROM.
  3655. *
  3656. * @eeprom: where to store the data
  3657. * @io_port: the base io port
  3658. **/
  3659. static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3660. {
  3661. u8 *b_eeprom = (u8 *)eeprom;
  3662. u8 addr;
  3663. /* Enable SEEPROM */
  3664. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3665. io_port + TRM_S1040_GEN_CONTROL);
  3666. /* read details */
  3667. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3668. *b_eeprom = trms1040_get_data(io_port, addr);
  3669. /* Disable SEEPROM */
  3670. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3671. io_port + TRM_S1040_GEN_CONTROL);
  3672. }
  3673. /**
  3674. * check_eeprom - get and check contents of the eeprom
  3675. *
  3676. * Read seeprom 128 bytes into the memory provider in eeprom.
  3677. * Checks the checksum and if it's not correct it uses a set of default
  3678. * values.
  3679. *
  3680. * @eeprom: caller allocated strcuture to read the eeprom data into
  3681. * @io_port: io port to read from
  3682. **/
  3683. static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3684. {
  3685. u16 *w_eeprom = (u16 *)eeprom;
  3686. u16 w_addr;
  3687. u16 cksum;
  3688. u32 d_addr;
  3689. u32 *d_eeprom;
  3690. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3691. cksum = 0;
  3692. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3693. w_addr++, w_eeprom++)
  3694. cksum += *w_eeprom;
  3695. if (cksum != 0x1234) {
  3696. /*
  3697. * Checksum is wrong.
  3698. * Load a set of defaults into the eeprom buffer
  3699. */
  3700. dprintkl(KERN_WARNING,
  3701. "EEProm checksum error: using default values and options.\n");
  3702. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3703. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3704. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3705. eeprom->sub_sys_id[1] =
  3706. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3707. eeprom->sub_class = 0x00;
  3708. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3709. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3710. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3711. eeprom->device_id[1] =
  3712. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3713. eeprom->reserved = 0x00;
  3714. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3715. d_addr < 16; d_addr++, d_eeprom++)
  3716. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3717. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3718. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3719. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3720. *d_eeprom = 0x00;
  3721. /* Now load defaults (maybe set by boot/module params) */
  3722. set_safe_settings();
  3723. fix_settings();
  3724. eeprom_override(eeprom);
  3725. eeprom->cksum = 0x00;
  3726. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3727. w_addr < 63; w_addr++, w_eeprom++)
  3728. cksum += *w_eeprom;
  3729. *w_eeprom = 0x1234 - cksum;
  3730. trms1040_write_all(eeprom, io_port);
  3731. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3732. } else {
  3733. set_safe_settings();
  3734. eeprom_index_to_delay(eeprom);
  3735. eeprom_override(eeprom);
  3736. }
  3737. }
  3738. /**
  3739. * print_eeprom_settings - output the eeprom settings
  3740. * to the kernel log so people can see what they were.
  3741. *
  3742. * @eeprom: The eeprom data strucutre to show details for.
  3743. **/
  3744. static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
  3745. {
  3746. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3747. eeprom->scsi_id,
  3748. eeprom->target[0].period,
  3749. clock_speed[eeprom->target[0].period] / 10,
  3750. clock_speed[eeprom->target[0].period] % 10,
  3751. eeprom->target[0].cfg0);
  3752. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3753. eeprom->channel_cfg, eeprom->max_tag,
  3754. 1 << eeprom->max_tag, eeprom->delay_time);
  3755. }
  3756. /* Free SG tables */
  3757. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3758. {
  3759. int i;
  3760. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3761. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3762. kfree(acb->srb_array[i].segment_x);
  3763. vfree(acb->srb_array[0].virt_map);
  3764. }
  3765. /*
  3766. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3767. * should never cross a page boundary */
  3768. static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3769. {
  3770. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3771. *SEGMENTX_LEN;
  3772. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3773. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3774. int srb_idx = 0;
  3775. unsigned i = 0;
  3776. struct SGentry *ptr;
  3777. void **virt_array;
  3778. for (i = 0; i < DC395x_MAX_SRB_CNT; i++) {
  3779. acb->srb_array[i].segment_x = NULL;
  3780. acb->srb_array[i].virt_map = NULL;
  3781. }
  3782. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3783. while (pages--) {
  3784. ptr = (struct SGentry *)kmalloc(PAGE_SIZE, GFP_KERNEL);
  3785. if (!ptr) {
  3786. adapter_sg_tables_free(acb);
  3787. return 1;
  3788. }
  3789. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3790. PAGE_SIZE, ptr, srb_idx);
  3791. i = 0;
  3792. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3793. acb->srb_array[srb_idx++].segment_x =
  3794. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3795. }
  3796. if (i < srbs_per_page)
  3797. acb->srb.segment_x =
  3798. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3799. else
  3800. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3801. virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*));
  3802. if (!virt_array) {
  3803. adapter_sg_tables_free(acb);
  3804. return 1;
  3805. }
  3806. for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) {
  3807. acb->srb_array[i].virt_map = virt_array;
  3808. virt_array += DC395x_MAX_SG_LISTENTRY;
  3809. }
  3810. return 0;
  3811. }
  3812. /**
  3813. * adapter_print_config - print adapter connection and termination
  3814. * config
  3815. *
  3816. * The io port in the adapter needs to have been set before calling
  3817. * this function.
  3818. *
  3819. * @acb: The adapter to print the information for.
  3820. **/
  3821. static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
  3822. {
  3823. u8 bval;
  3824. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3825. dprintkl(KERN_INFO, "%sConnectors: ",
  3826. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3827. if (!(bval & CON5068))
  3828. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3829. if (!(bval & CON68))
  3830. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3831. if (!(bval & CON50))
  3832. printk("int50 ");
  3833. if ((bval & (CON5068 | CON50 | CON68)) ==
  3834. 0 /*(CON5068 | CON50 | CON68) */ )
  3835. printk(" Oops! (All 3?) ");
  3836. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3837. printk(" Termination: ");
  3838. if (bval & DIS_TERM)
  3839. printk("Disabled\n");
  3840. else {
  3841. if (bval & AUTOTERM)
  3842. printk("Auto ");
  3843. if (bval & LOW8TERM)
  3844. printk("Low ");
  3845. if (bval & UP8TERM)
  3846. printk("High ");
  3847. printk("\n");
  3848. }
  3849. }
  3850. /**
  3851. * adapter_init_params - Initialize the various parameters in the
  3852. * adapter structure. Note that the pointer to the scsi_host is set
  3853. * early (when this instance is created) and the io_port and irq
  3854. * values are set later after they have been reserved. This just gets
  3855. * everything set to a good starting position.
  3856. *
  3857. * The eeprom structure in the adapter needs to have been set before
  3858. * calling this function.
  3859. *
  3860. * @acb: The adapter to initialize.
  3861. **/
  3862. static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
  3863. {
  3864. struct NvRamType *eeprom = &acb->eeprom;
  3865. int i;
  3866. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3867. /* NOTE: acb->io_port_base is set at port registration time */
  3868. /* NOTE: acb->io_port_len is set at port registration time */
  3869. INIT_LIST_HEAD(&acb->dcb_list);
  3870. acb->dcb_run_robin = NULL;
  3871. acb->active_dcb = NULL;
  3872. INIT_LIST_HEAD(&acb->srb_free_list);
  3873. /* temp SRB for Q tag used or abort command used */
  3874. acb->tmp_srb = &acb->srb;
  3875. init_timer(&acb->waiting_timer);
  3876. init_timer(&acb->selto_timer);
  3877. acb->srb_count = DC395x_MAX_SRB_CNT;
  3878. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3879. /* NOTE: acb->irq_level is set at IRQ registration time */
  3880. acb->tag_max_num = 1 << eeprom->max_tag;
  3881. if (acb->tag_max_num > 30)
  3882. acb->tag_max_num = 30;
  3883. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3884. acb->gmode2 = eeprom->channel_cfg;
  3885. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3886. if (eeprom->channel_cfg & NAC_SCANLUN)
  3887. acb->lun_chk = 1;
  3888. acb->scan_devices = 1;
  3889. acb->scsi_host->this_id = eeprom->scsi_id;
  3890. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3891. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3892. acb->dcb_map[i] = 0;
  3893. acb->msg_len = 0;
  3894. /* link static array of srbs into the srb free list */
  3895. for (i = 0; i < acb->srb_count - 1; i++)
  3896. srb_free_insert(acb, &acb->srb_array[i]);
  3897. }
  3898. /**
  3899. * adapter_init_host - Initialize the scsi host instance based on
  3900. * values that we have already stored in the adapter instance. There's
  3901. * some mention that a lot of these are deprecated, so we won't use
  3902. * them (we'll use the ones in the adapter instance) but we'll fill
  3903. * them in in case something else needs them.
  3904. *
  3905. * The eeprom structure, irq and io ports in the adapter need to have
  3906. * been set before calling this function.
  3907. *
  3908. * @host: The scsi host instance to fill in the values for.
  3909. **/
  3910. static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
  3911. {
  3912. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3913. struct NvRamType *eeprom = &acb->eeprom;
  3914. host->max_cmd_len = 24;
  3915. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3916. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3917. host->this_id = (int)eeprom->scsi_id;
  3918. host->io_port = acb->io_port_base;
  3919. host->n_io_port = acb->io_port_len;
  3920. host->dma_channel = -1;
  3921. host->unique_id = acb->io_port_base;
  3922. host->irq = acb->irq_level;
  3923. host->last_reset = jiffies;
  3924. host->max_id = 16;
  3925. if (host->max_id - 1 == eeprom->scsi_id)
  3926. host->max_id--;
  3927. #ifdef CONFIG_SCSI_MULTI_LUN
  3928. if (eeprom->channel_cfg & NAC_SCANLUN)
  3929. host->max_lun = 8;
  3930. else
  3931. host->max_lun = 1;
  3932. #else
  3933. host->max_lun = 1;
  3934. #endif
  3935. }
  3936. /**
  3937. * adapter_init_chip - Get the chip into a know state and figure out
  3938. * some of the settings that apply to this adapter.
  3939. *
  3940. * The io port in the adapter needs to have been set before calling
  3941. * this function. The config will be configured correctly on return.
  3942. *
  3943. * @acb: The adapter which we are to init.
  3944. **/
  3945. static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
  3946. {
  3947. struct NvRamType *eeprom = &acb->eeprom;
  3948. /* Mask all the interrupt */
  3949. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3950. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3951. /* Reset SCSI module */
  3952. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3953. /* Reset PCI/DMA module */
  3954. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3955. udelay(20);
  3956. /* program configuration 0 */
  3957. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3958. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3959. acb->config |= HCC_WIDE_CARD;
  3960. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3961. acb->config |= HCC_SCSI_RESET;
  3962. if (acb->config & HCC_SCSI_RESET) {
  3963. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3964. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3965. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3966. /*spin_unlock_irq (&io_request_lock); */
  3967. udelay(500);
  3968. acb->scsi_host->last_reset =
  3969. jiffies + HZ / 2 +
  3970. HZ * acb->eeprom.delay_time;
  3971. /*spin_lock_irq (&io_request_lock); */
  3972. }
  3973. }
  3974. /**
  3975. * init_adapter - Grab the resource for the card, setup the adapter
  3976. * information, set the card into a known state, create the various
  3977. * tables etc etc. This basically gets all adapter information all up
  3978. * to date, intialised and gets the chip in sync with it.
  3979. *
  3980. * @host: This hosts adapter structure
  3981. * @io_port: The base I/O port
  3982. * @irq: IRQ
  3983. *
  3984. * Returns 0 if the initialization succeeds, any other value on
  3985. * failure.
  3986. **/
  3987. static int __devinit adapter_init(struct AdapterCtlBlk *acb,
  3988. unsigned long io_port, u32 io_port_len, unsigned int irq)
  3989. {
  3990. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  3991. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  3992. goto failed;
  3993. }
  3994. /* store port base to indicate we have registered it */
  3995. acb->io_port_base = io_port;
  3996. acb->io_port_len = io_port_len;
  3997. if (request_irq(irq, dc395x_interrupt, SA_SHIRQ, DC395X_NAME, acb)) {
  3998. /* release the region we just claimed */
  3999. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  4000. goto failed;
  4001. }
  4002. /* store irq to indicate we have registered it */
  4003. acb->irq_level = irq;
  4004. /* get eeprom configuration information and command line settings etc */
  4005. check_eeprom(&acb->eeprom, io_port);
  4006. print_eeprom_settings(&acb->eeprom);
  4007. /* setup adapter control block */
  4008. adapter_init_params(acb);
  4009. /* display card connectors/termination settings */
  4010. adapter_print_config(acb);
  4011. if (adapter_sg_tables_alloc(acb)) {
  4012. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  4013. goto failed;
  4014. }
  4015. adapter_init_scsi_host(acb->scsi_host);
  4016. adapter_init_chip(acb);
  4017. set_basic_config(acb);
  4018. dprintkdbg(DBG_0,
  4019. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  4020. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  4021. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  4022. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  4023. return 0;
  4024. failed:
  4025. if (acb->irq_level)
  4026. free_irq(acb->irq_level, acb);
  4027. if (acb->io_port_base)
  4028. release_region(acb->io_port_base, acb->io_port_len);
  4029. adapter_sg_tables_free(acb);
  4030. return 1;
  4031. }
  4032. /**
  4033. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  4034. * stopping all operations and disabling interrupt generation on the
  4035. * card.
  4036. *
  4037. * @acb: The adapter which we are to shutdown.
  4038. **/
  4039. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  4040. {
  4041. /* disable interrupts */
  4042. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  4043. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  4044. /* reset the scsi bus */
  4045. if (acb->config & HCC_SCSI_RESET)
  4046. reset_scsi_bus(acb);
  4047. /* clear any pending interupt state */
  4048. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  4049. }
  4050. /**
  4051. * adapter_uninit - Shut down the chip and release any resources that
  4052. * we had allocated. Once this returns the adapter should not be used
  4053. * anymore.
  4054. *
  4055. * @acb: The adapter which we are to un-initialize.
  4056. **/
  4057. static void adapter_uninit(struct AdapterCtlBlk *acb)
  4058. {
  4059. unsigned long flags;
  4060. DC395x_LOCK_IO(acb->scsi_host, flags);
  4061. /* remove timers */
  4062. if (timer_pending(&acb->waiting_timer))
  4063. del_timer(&acb->waiting_timer);
  4064. if (timer_pending(&acb->selto_timer))
  4065. del_timer(&acb->selto_timer);
  4066. adapter_uninit_chip(acb);
  4067. adapter_remove_and_free_all_devices(acb);
  4068. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4069. if (acb->irq_level)
  4070. free_irq(acb->irq_level, acb);
  4071. if (acb->io_port_base)
  4072. release_region(acb->io_port_base, acb->io_port_len);
  4073. adapter_sg_tables_free(acb);
  4074. }
  4075. #undef SPRINTF
  4076. #define SPRINTF(args...) pos += sprintf(pos, args)
  4077. #undef YESNO
  4078. #define YESNO(YN) \
  4079. if (YN) SPRINTF(" Yes ");\
  4080. else SPRINTF(" No ")
  4081. static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
  4082. char **start, off_t offset, int length, int inout)
  4083. {
  4084. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  4085. int spd, spd1;
  4086. char *pos = buffer;
  4087. struct DeviceCtlBlk *dcb;
  4088. unsigned long flags;
  4089. int dev;
  4090. if (inout) /* Has data been written to the file ? */
  4091. return -EPERM;
  4092. SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
  4093. SPRINTF(" Driver Version " DC395X_VERSION "\n");
  4094. DC395x_LOCK_IO(acb->scsi_host, flags);
  4095. SPRINTF("SCSI Host Nr %i, ", host->host_no);
  4096. SPRINTF("DC395U/UW/F DC315/U %s\n",
  4097. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  4098. SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
  4099. SPRINTF("irq_level 0x%04x, ", acb->irq_level);
  4100. SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  4101. SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun);
  4102. SPRINTF("AdapterID %i\n", host->this_id);
  4103. SPRINTF("tag_max_num %i", acb->tag_max_num);
  4104. /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  4105. SPRINTF(", FilterCfg 0x%02x",
  4106. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  4107. SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
  4108. /*SPRINTF("\n"); */
  4109. SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  4110. SPRINTF
  4111. ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4112. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  4113. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  4114. acb->dcb_map[6], acb->dcb_map[7]);
  4115. SPRINTF
  4116. (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4117. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  4118. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  4119. acb->dcb_map[14], acb->dcb_map[15]);
  4120. SPRINTF
  4121. ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  4122. dev = 0;
  4123. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4124. int nego_period;
  4125. SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
  4126. dcb->target_lun);
  4127. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  4128. YESNO(dcb->sync_offset);
  4129. YESNO(dcb->sync_period & WIDE_SYNC);
  4130. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  4131. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  4132. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  4133. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  4134. if (dcb->sync_offset)
  4135. SPRINTF(" %03i ns ", nego_period);
  4136. else
  4137. SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
  4138. if (dcb->sync_offset & 0x0f) {
  4139. spd = 1000 / (nego_period);
  4140. spd1 = 1000 % (nego_period);
  4141. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  4142. SPRINTF(" %2i.%1i M %02i ", spd, spd1,
  4143. (dcb->sync_offset & 0x0f));
  4144. } else
  4145. SPRINTF(" ");
  4146. /* Add more info ... */
  4147. SPRINTF(" %02i\n", dcb->max_command);
  4148. dev++;
  4149. }
  4150. if (timer_pending(&acb->waiting_timer))
  4151. SPRINTF("Waiting queue timer running\n");
  4152. else
  4153. SPRINTF("\n");
  4154. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4155. struct ScsiReqBlk *srb;
  4156. if (!list_empty(&dcb->srb_waiting_list))
  4157. SPRINTF("DCB (%02i-%i): Waiting: %i:",
  4158. dcb->target_id, dcb->target_lun,
  4159. list_size(&dcb->srb_waiting_list));
  4160. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  4161. SPRINTF(" %li", srb->cmd->pid);
  4162. if (!list_empty(&dcb->srb_going_list))
  4163. SPRINTF("\nDCB (%02i-%i): Going : %i:",
  4164. dcb->target_id, dcb->target_lun,
  4165. list_size(&dcb->srb_going_list));
  4166. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4167. SPRINTF(" %li", srb->cmd->pid);
  4168. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4169. SPRINTF("\n");
  4170. }
  4171. if (debug_enabled(DBG_1)) {
  4172. SPRINTF("DCB list for ACB %p:\n", acb);
  4173. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4174. SPRINTF("%p -> ", dcb);
  4175. }
  4176. SPRINTF("END\n");
  4177. }
  4178. *start = buffer + offset;
  4179. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4180. if (pos - buffer < offset)
  4181. return 0;
  4182. else if (pos - buffer - offset < length)
  4183. return pos - buffer - offset;
  4184. else
  4185. return length;
  4186. }
  4187. static struct scsi_host_template dc395x_driver_template = {
  4188. .module = THIS_MODULE,
  4189. .proc_name = DC395X_NAME,
  4190. .proc_info = dc395x_proc_info,
  4191. .name = DC395X_BANNER " " DC395X_VERSION,
  4192. .queuecommand = dc395x_queue_command,
  4193. .bios_param = dc395x_bios_param,
  4194. .slave_alloc = dc395x_slave_alloc,
  4195. .slave_destroy = dc395x_slave_destroy,
  4196. .can_queue = DC395x_MAX_CAN_QUEUE,
  4197. .this_id = 7,
  4198. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4199. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4200. .eh_abort_handler = dc395x_eh_abort,
  4201. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4202. .unchecked_isa_dma = 0,
  4203. .use_clustering = DISABLE_CLUSTERING,
  4204. };
  4205. /**
  4206. * banner_display - Display banner on first instance of driver
  4207. * initialized.
  4208. **/
  4209. static void banner_display(void)
  4210. {
  4211. static int banner_done = 0;
  4212. if (!banner_done)
  4213. {
  4214. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4215. banner_done = 1;
  4216. }
  4217. }
  4218. /**
  4219. * dc395x_init_one - Initialise a single instance of the adapter.
  4220. *
  4221. * The PCI layer will call this once for each instance of the adapter
  4222. * that it finds in the system. The pci_dev strcuture indicates which
  4223. * instance we are being called from.
  4224. *
  4225. * @dev: The PCI device to intialize.
  4226. * @id: Looks like a pointer to the entry in our pci device table
  4227. * that was actually matched by the PCI subsystem.
  4228. *
  4229. * Returns 0 on success, or an error code (-ve) on failure.
  4230. **/
  4231. static int __devinit dc395x_init_one(struct pci_dev *dev,
  4232. const struct pci_device_id *id)
  4233. {
  4234. struct Scsi_Host *scsi_host = NULL;
  4235. struct AdapterCtlBlk *acb = NULL;
  4236. unsigned long io_port_base;
  4237. unsigned int io_port_len;
  4238. unsigned int irq;
  4239. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4240. banner_display();
  4241. if (pci_enable_device(dev))
  4242. {
  4243. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4244. return -ENODEV;
  4245. }
  4246. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4247. io_port_len = pci_resource_len(dev, 0);
  4248. irq = dev->irq;
  4249. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4250. /* allocate scsi host information (includes out adapter) */
  4251. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4252. sizeof(struct AdapterCtlBlk));
  4253. if (!scsi_host) {
  4254. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4255. goto fail;
  4256. }
  4257. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4258. acb->scsi_host = scsi_host;
  4259. acb->dev = dev;
  4260. /* initialise the adapter and everything we need */
  4261. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4262. dprintkl(KERN_INFO, "adapter init failed\n");
  4263. goto fail;
  4264. }
  4265. pci_set_master(dev);
  4266. /* get the scsi mid level to scan for new devices on the bus */
  4267. if (scsi_add_host(scsi_host, &dev->dev)) {
  4268. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4269. goto fail;
  4270. }
  4271. pci_set_drvdata(dev, scsi_host);
  4272. scsi_scan_host(scsi_host);
  4273. return 0;
  4274. fail:
  4275. if (acb != NULL)
  4276. adapter_uninit(acb);
  4277. if (scsi_host != NULL)
  4278. scsi_host_put(scsi_host);
  4279. pci_disable_device(dev);
  4280. return -ENODEV;
  4281. }
  4282. /**
  4283. * dc395x_remove_one - Called to remove a single instance of the
  4284. * adapter.
  4285. *
  4286. * @dev: The PCI device to intialize.
  4287. **/
  4288. static void __devexit dc395x_remove_one(struct pci_dev *dev)
  4289. {
  4290. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4291. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4292. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4293. scsi_remove_host(scsi_host);
  4294. adapter_uninit(acb);
  4295. pci_disable_device(dev);
  4296. scsi_host_put(scsi_host);
  4297. pci_set_drvdata(dev, NULL);
  4298. }
  4299. static struct pci_device_id dc395x_pci_table[] = {
  4300. {
  4301. .vendor = PCI_VENDOR_ID_TEKRAM,
  4302. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4303. .subvendor = PCI_ANY_ID,
  4304. .subdevice = PCI_ANY_ID,
  4305. },
  4306. {} /* Terminating entry */
  4307. };
  4308. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4309. static struct pci_driver dc395x_driver = {
  4310. .name = DC395X_NAME,
  4311. .id_table = dc395x_pci_table,
  4312. .probe = dc395x_init_one,
  4313. .remove = __devexit_p(dc395x_remove_one),
  4314. };
  4315. /**
  4316. * dc395x_module_init - Module initialization function
  4317. *
  4318. * Used by both module and built-in driver to initialise this driver.
  4319. **/
  4320. static int __init dc395x_module_init(void)
  4321. {
  4322. return pci_module_init(&dc395x_driver);
  4323. }
  4324. /**
  4325. * dc395x_module_exit - Module cleanup function.
  4326. **/
  4327. static void __exit dc395x_module_exit(void)
  4328. {
  4329. pci_unregister_driver(&dc395x_driver);
  4330. }
  4331. module_init(dc395x_module_init);
  4332. module_exit(dc395x_module_exit);
  4333. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4334. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4335. MODULE_LICENSE("GPL");