aic79xx_osm.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017
  1. /*
  2. * Adaptec AIC79xx device driver for Linux.
  3. *
  4. * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
  5. *
  6. * --------------------------------------------------------------------------
  7. * Copyright (c) 1994-2000 Justin T. Gibbs.
  8. * Copyright (c) 1997-1999 Doug Ledford
  9. * Copyright (c) 2000-2003 Adaptec Inc.
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions, and the following disclaimer,
  17. * without modification.
  18. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  19. * substantially similar to the "NO WARRANTY" disclaimer below
  20. * ("Disclaimer") and any redistribution must be conditioned upon
  21. * including a substantially similar Disclaimer requirement for further
  22. * binary redistribution.
  23. * 3. Neither the names of the above-listed copyright holders nor the names
  24. * of any contributors may be used to endorse or promote products derived
  25. * from this software without specific prior written permission.
  26. *
  27. * Alternatively, this software may be distributed under the terms of the
  28. * GNU General Public License ("GPL") version 2 as published by the Free
  29. * Software Foundation.
  30. *
  31. * NO WARRANTY
  32. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  35. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  36. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  37. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  38. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  39. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  40. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  41. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  42. * POSSIBILITY OF SUCH DAMAGES.
  43. */
  44. #include "aic79xx_osm.h"
  45. #include "aic79xx_inline.h"
  46. #include <scsi/scsicam.h>
  47. /*
  48. * Include aiclib.c as part of our
  49. * "module dependencies are hard" work around.
  50. */
  51. #include "aiclib.c"
  52. #include <linux/init.h> /* __setup */
  53. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  54. #include "sd.h" /* For geometry detection */
  55. #endif
  56. #include <linux/mm.h> /* For fetching system memory size */
  57. #include <linux/delay.h> /* For ssleep/msleep */
  58. /*
  59. * Lock protecting manipulation of the ahd softc list.
  60. */
  61. spinlock_t ahd_list_spinlock;
  62. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  63. /* For dynamic sglist size calculation. */
  64. u_int ahd_linux_nseg;
  65. #endif
  66. /*
  67. * Bucket size for counting good commands in between bad ones.
  68. */
  69. #define AHD_LINUX_ERR_THRESH 1000
  70. /*
  71. * Set this to the delay in seconds after SCSI bus reset.
  72. * Note, we honor this only for the initial bus reset.
  73. * The scsi error recovery code performs its own bus settle
  74. * delay handling for error recovery actions.
  75. */
  76. #ifdef CONFIG_AIC79XX_RESET_DELAY_MS
  77. #define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
  78. #else
  79. #define AIC79XX_RESET_DELAY 5000
  80. #endif
  81. /*
  82. * To change the default number of tagged transactions allowed per-device,
  83. * add a line to the lilo.conf file like:
  84. * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
  85. * which will result in the first four devices on the first two
  86. * controllers being set to a tagged queue depth of 32.
  87. *
  88. * The tag_commands is an array of 16 to allow for wide and twin adapters.
  89. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
  90. * for channel 1.
  91. */
  92. typedef struct {
  93. uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */
  94. } adapter_tag_info_t;
  95. /*
  96. * Modify this as you see fit for your system.
  97. *
  98. * 0 tagged queuing disabled
  99. * 1 <= n <= 253 n == max tags ever dispatched.
  100. *
  101. * The driver will throttle the number of commands dispatched to a
  102. * device if it returns queue full. For devices with a fixed maximum
  103. * queue depth, the driver will eventually determine this depth and
  104. * lock it in (a console message is printed to indicate that a lock
  105. * has occurred). On some devices, queue full is returned for a temporary
  106. * resource shortage. These devices will return queue full at varying
  107. * depths. The driver will throttle back when the queue fulls occur and
  108. * attempt to slowly increase the depth over time as the device recovers
  109. * from the resource shortage.
  110. *
  111. * In this example, the first line will disable tagged queueing for all
  112. * the devices on the first probed aic79xx adapter.
  113. *
  114. * The second line enables tagged queueing with 4 commands/LUN for IDs
  115. * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
  116. * driver to attempt to use up to 64 tags for ID 1.
  117. *
  118. * The third line is the same as the first line.
  119. *
  120. * The fourth line disables tagged queueing for devices 0 and 3. It
  121. * enables tagged queueing for the other IDs, with 16 commands/LUN
  122. * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
  123. * IDs 2, 5-7, and 9-15.
  124. */
  125. /*
  126. * NOTE: The below structure is for reference only, the actual structure
  127. * to modify in order to change things is just below this comment block.
  128. adapter_tag_info_t aic79xx_tag_info[] =
  129. {
  130. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  131. {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
  132. {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
  133. {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
  134. };
  135. */
  136. #ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
  137. #define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
  138. #else
  139. #define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
  140. #endif
  141. #define AIC79XX_CONFIGED_TAG_COMMANDS { \
  142. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  143. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  144. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  145. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  146. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  147. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  148. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
  149. AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \
  150. }
  151. /*
  152. * By default, use the number of commands specified by
  153. * the users kernel configuration.
  154. */
  155. static adapter_tag_info_t aic79xx_tag_info[] =
  156. {
  157. {AIC79XX_CONFIGED_TAG_COMMANDS},
  158. {AIC79XX_CONFIGED_TAG_COMMANDS},
  159. {AIC79XX_CONFIGED_TAG_COMMANDS},
  160. {AIC79XX_CONFIGED_TAG_COMMANDS},
  161. {AIC79XX_CONFIGED_TAG_COMMANDS},
  162. {AIC79XX_CONFIGED_TAG_COMMANDS},
  163. {AIC79XX_CONFIGED_TAG_COMMANDS},
  164. {AIC79XX_CONFIGED_TAG_COMMANDS},
  165. {AIC79XX_CONFIGED_TAG_COMMANDS},
  166. {AIC79XX_CONFIGED_TAG_COMMANDS},
  167. {AIC79XX_CONFIGED_TAG_COMMANDS},
  168. {AIC79XX_CONFIGED_TAG_COMMANDS},
  169. {AIC79XX_CONFIGED_TAG_COMMANDS},
  170. {AIC79XX_CONFIGED_TAG_COMMANDS},
  171. {AIC79XX_CONFIGED_TAG_COMMANDS},
  172. {AIC79XX_CONFIGED_TAG_COMMANDS}
  173. };
  174. /*
  175. * By default, read streaming is disabled. In theory,
  176. * read streaming should enhance performance, but early
  177. * U320 drive firmware actually performs slower with
  178. * read streaming enabled.
  179. */
  180. #ifdef CONFIG_AIC79XX_ENABLE_RD_STRM
  181. #define AIC79XX_CONFIGED_RD_STRM 0xFFFF
  182. #else
  183. #define AIC79XX_CONFIGED_RD_STRM 0
  184. #endif
  185. static uint16_t aic79xx_rd_strm_info[] =
  186. {
  187. AIC79XX_CONFIGED_RD_STRM,
  188. AIC79XX_CONFIGED_RD_STRM,
  189. AIC79XX_CONFIGED_RD_STRM,
  190. AIC79XX_CONFIGED_RD_STRM,
  191. AIC79XX_CONFIGED_RD_STRM,
  192. AIC79XX_CONFIGED_RD_STRM,
  193. AIC79XX_CONFIGED_RD_STRM,
  194. AIC79XX_CONFIGED_RD_STRM,
  195. AIC79XX_CONFIGED_RD_STRM,
  196. AIC79XX_CONFIGED_RD_STRM,
  197. AIC79XX_CONFIGED_RD_STRM,
  198. AIC79XX_CONFIGED_RD_STRM,
  199. AIC79XX_CONFIGED_RD_STRM,
  200. AIC79XX_CONFIGED_RD_STRM,
  201. AIC79XX_CONFIGED_RD_STRM,
  202. AIC79XX_CONFIGED_RD_STRM
  203. };
  204. /*
  205. * DV option:
  206. *
  207. * positive value = DV Enabled
  208. * zero = DV Disabled
  209. * negative value = DV Default for adapter type/seeprom
  210. */
  211. #ifdef CONFIG_AIC79XX_DV_SETTING
  212. #define AIC79XX_CONFIGED_DV CONFIG_AIC79XX_DV_SETTING
  213. #else
  214. #define AIC79XX_CONFIGED_DV -1
  215. #endif
  216. static int8_t aic79xx_dv_settings[] =
  217. {
  218. AIC79XX_CONFIGED_DV,
  219. AIC79XX_CONFIGED_DV,
  220. AIC79XX_CONFIGED_DV,
  221. AIC79XX_CONFIGED_DV,
  222. AIC79XX_CONFIGED_DV,
  223. AIC79XX_CONFIGED_DV,
  224. AIC79XX_CONFIGED_DV,
  225. AIC79XX_CONFIGED_DV,
  226. AIC79XX_CONFIGED_DV,
  227. AIC79XX_CONFIGED_DV,
  228. AIC79XX_CONFIGED_DV,
  229. AIC79XX_CONFIGED_DV,
  230. AIC79XX_CONFIGED_DV,
  231. AIC79XX_CONFIGED_DV,
  232. AIC79XX_CONFIGED_DV,
  233. AIC79XX_CONFIGED_DV
  234. };
  235. /*
  236. * The I/O cell on the chip is very configurable in respect to its analog
  237. * characteristics. Set the defaults here; they can be overriden with
  238. * the proper insmod parameters.
  239. */
  240. struct ahd_linux_iocell_opts
  241. {
  242. uint8_t precomp;
  243. uint8_t slewrate;
  244. uint8_t amplitude;
  245. };
  246. #define AIC79XX_DEFAULT_PRECOMP 0xFF
  247. #define AIC79XX_DEFAULT_SLEWRATE 0xFF
  248. #define AIC79XX_DEFAULT_AMPLITUDE 0xFF
  249. #define AIC79XX_DEFAULT_IOOPTS \
  250. { \
  251. AIC79XX_DEFAULT_PRECOMP, \
  252. AIC79XX_DEFAULT_SLEWRATE, \
  253. AIC79XX_DEFAULT_AMPLITUDE \
  254. }
  255. #define AIC79XX_PRECOMP_INDEX 0
  256. #define AIC79XX_SLEWRATE_INDEX 1
  257. #define AIC79XX_AMPLITUDE_INDEX 2
  258. static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
  259. {
  260. AIC79XX_DEFAULT_IOOPTS,
  261. AIC79XX_DEFAULT_IOOPTS,
  262. AIC79XX_DEFAULT_IOOPTS,
  263. AIC79XX_DEFAULT_IOOPTS,
  264. AIC79XX_DEFAULT_IOOPTS,
  265. AIC79XX_DEFAULT_IOOPTS,
  266. AIC79XX_DEFAULT_IOOPTS,
  267. AIC79XX_DEFAULT_IOOPTS,
  268. AIC79XX_DEFAULT_IOOPTS,
  269. AIC79XX_DEFAULT_IOOPTS,
  270. AIC79XX_DEFAULT_IOOPTS,
  271. AIC79XX_DEFAULT_IOOPTS,
  272. AIC79XX_DEFAULT_IOOPTS,
  273. AIC79XX_DEFAULT_IOOPTS,
  274. AIC79XX_DEFAULT_IOOPTS,
  275. AIC79XX_DEFAULT_IOOPTS
  276. };
  277. /*
  278. * There should be a specific return value for this in scsi.h, but
  279. * it seems that most drivers ignore it.
  280. */
  281. #define DID_UNDERFLOW DID_ERROR
  282. void
  283. ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
  284. {
  285. printk("(scsi%d:%c:%d:%d): ",
  286. ahd->platform_data->host->host_no,
  287. scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
  288. scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
  289. scb != NULL ? SCB_GET_LUN(scb) : -1);
  290. }
  291. /*
  292. * XXX - these options apply unilaterally to _all_ adapters
  293. * cards in the system. This should be fixed. Exceptions to this
  294. * rule are noted in the comments.
  295. */
  296. /*
  297. * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
  298. * has no effect on any later resets that might occur due to things like
  299. * SCSI bus timeouts.
  300. */
  301. static uint32_t aic79xx_no_reset;
  302. /*
  303. * Certain PCI motherboards will scan PCI devices from highest to lowest,
  304. * others scan from lowest to highest, and they tend to do all kinds of
  305. * strange things when they come into contact with PCI bridge chips. The
  306. * net result of all this is that the PCI card that is actually used to boot
  307. * the machine is very hard to detect. Most motherboards go from lowest
  308. * PCI slot number to highest, and the first SCSI controller found is the
  309. * one you boot from. The only exceptions to this are when a controller
  310. * has its BIOS disabled. So, we by default sort all of our SCSI controllers
  311. * from lowest PCI slot number to highest PCI slot number. We also force
  312. * all controllers with their BIOS disabled to the end of the list. This
  313. * works on *almost* all computers. Where it doesn't work, we have this
  314. * option. Setting this option to non-0 will reverse the order of the sort
  315. * to highest first, then lowest, but will still leave cards with their BIOS
  316. * disabled at the very end. That should fix everyone up unless there are
  317. * really strange cirumstances.
  318. */
  319. static uint32_t aic79xx_reverse_scan;
  320. /*
  321. * Should we force EXTENDED translation on a controller.
  322. * 0 == Use whatever is in the SEEPROM or default to off
  323. * 1 == Use whatever is in the SEEPROM or default to on
  324. */
  325. static uint32_t aic79xx_extended;
  326. /*
  327. * PCI bus parity checking of the Adaptec controllers. This is somewhat
  328. * dubious at best. To my knowledge, this option has never actually
  329. * solved a PCI parity problem, but on certain machines with broken PCI
  330. * chipset configurations, it can generate tons of false error messages.
  331. * It's included in the driver for completeness.
  332. * 0 = Shut off PCI parity check
  333. * non-0 = Enable PCI parity check
  334. *
  335. * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
  336. * variable to -1 you would actually want to simply pass the variable
  337. * name without a number. That will invert the 0 which will result in
  338. * -1.
  339. */
  340. static uint32_t aic79xx_pci_parity = ~0;
  341. /*
  342. * There are lots of broken chipsets in the world. Some of them will
  343. * violate the PCI spec when we issue byte sized memory writes to our
  344. * controller. I/O mapped register access, if allowed by the given
  345. * platform, will work in almost all cases.
  346. */
  347. uint32_t aic79xx_allow_memio = ~0;
  348. /*
  349. * aic79xx_detect() has been run, so register all device arrivals
  350. * immediately with the system rather than deferring to the sorted
  351. * attachment performed by aic79xx_detect().
  352. */
  353. int aic79xx_detect_complete;
  354. /*
  355. * So that we can set how long each device is given as a selection timeout.
  356. * The table of values goes like this:
  357. * 0 - 256ms
  358. * 1 - 128ms
  359. * 2 - 64ms
  360. * 3 - 32ms
  361. * We default to 256ms because some older devices need a longer time
  362. * to respond to initial selection.
  363. */
  364. static uint32_t aic79xx_seltime;
  365. /*
  366. * Certain devices do not perform any aging on commands. Should the
  367. * device be saturated by commands in one portion of the disk, it is
  368. * possible for transactions on far away sectors to never be serviced.
  369. * To handle these devices, we can periodically send an ordered tag to
  370. * force all outstanding transactions to be serviced prior to a new
  371. * transaction.
  372. */
  373. uint32_t aic79xx_periodic_otag;
  374. /*
  375. * Module information and settable options.
  376. */
  377. static char *aic79xx = NULL;
  378. MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
  379. MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver");
  380. MODULE_LICENSE("Dual BSD/GPL");
  381. MODULE_VERSION(AIC79XX_DRIVER_VERSION);
  382. module_param(aic79xx, charp, 0);
  383. MODULE_PARM_DESC(aic79xx,
  384. "period delimited, options string.\n"
  385. " verbose Enable verbose/diagnostic logging\n"
  386. " allow_memio Allow device registers to be memory mapped\n"
  387. " debug Bitmask of debug values to enable\n"
  388. " no_reset Supress initial bus resets\n"
  389. " extended Enable extended geometry on all controllers\n"
  390. " periodic_otag Send an ordered tagged transaction\n"
  391. " periodically to prevent tag starvation.\n"
  392. " This may be required by some older disk\n"
  393. " or drives/RAID arrays.\n"
  394. " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
  395. " tag_info:<tag_str> Set per-target tag depth\n"
  396. " global_tag_depth:<int> Global tag depth for all targets on all buses\n"
  397. " rd_strm:<rd_strm_masks> Set per-target read streaming setting.\n"
  398. " dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
  399. " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
  400. " precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
  401. " amplitude:<int> Set the signal amplitude (0-7).\n"
  402. " seltime:<int> Selection Timeout:\n"
  403. " (0/256ms,1/128ms,2/64ms,3/32ms)\n"
  404. "\n"
  405. " Sample /etc/modprobe.conf line:\n"
  406. " Enable verbose logging\n"
  407. " Set tag depth on Controller 2/Target 2 to 10 tags\n"
  408. " Shorten the selection timeout to 128ms\n"
  409. "\n"
  410. " options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
  411. "\n"
  412. " Sample /etc/modprobe.conf line:\n"
  413. " Change Read Streaming for Controller's 2 and 3\n"
  414. "\n"
  415. " options aic79xx 'aic79xx=rd_strm:{..0xFFF0.0xC0F0}'");
  416. static void ahd_linux_handle_scsi_status(struct ahd_softc *,
  417. struct ahd_linux_device *,
  418. struct scb *);
  419. static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
  420. Scsi_Cmnd *cmd);
  421. static void ahd_linux_filter_inquiry(struct ahd_softc *ahd,
  422. struct ahd_devinfo *devinfo);
  423. static void ahd_linux_dev_timed_unfreeze(u_long arg);
  424. static void ahd_linux_sem_timeout(u_long arg);
  425. static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
  426. static void ahd_linux_size_nseg(void);
  427. static void ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd);
  428. static void ahd_linux_start_dv(struct ahd_softc *ahd);
  429. static void ahd_linux_dv_timeout(struct scsi_cmnd *cmd);
  430. static int ahd_linux_dv_thread(void *data);
  431. static void ahd_linux_kill_dv_thread(struct ahd_softc *ahd);
  432. static void ahd_linux_dv_target(struct ahd_softc *ahd, u_int target);
  433. static void ahd_linux_dv_transition(struct ahd_softc *ahd,
  434. struct scsi_cmnd *cmd,
  435. struct ahd_devinfo *devinfo,
  436. struct ahd_linux_target *targ);
  437. static void ahd_linux_dv_fill_cmd(struct ahd_softc *ahd,
  438. struct scsi_cmnd *cmd,
  439. struct ahd_devinfo *devinfo);
  440. static void ahd_linux_dv_inq(struct ahd_softc *ahd,
  441. struct scsi_cmnd *cmd,
  442. struct ahd_devinfo *devinfo,
  443. struct ahd_linux_target *targ,
  444. u_int request_length);
  445. static void ahd_linux_dv_tur(struct ahd_softc *ahd,
  446. struct scsi_cmnd *cmd,
  447. struct ahd_devinfo *devinfo);
  448. static void ahd_linux_dv_rebd(struct ahd_softc *ahd,
  449. struct scsi_cmnd *cmd,
  450. struct ahd_devinfo *devinfo,
  451. struct ahd_linux_target *targ);
  452. static void ahd_linux_dv_web(struct ahd_softc *ahd,
  453. struct scsi_cmnd *cmd,
  454. struct ahd_devinfo *devinfo,
  455. struct ahd_linux_target *targ);
  456. static void ahd_linux_dv_reb(struct ahd_softc *ahd,
  457. struct scsi_cmnd *cmd,
  458. struct ahd_devinfo *devinfo,
  459. struct ahd_linux_target *targ);
  460. static void ahd_linux_dv_su(struct ahd_softc *ahd,
  461. struct scsi_cmnd *cmd,
  462. struct ahd_devinfo *devinfo,
  463. struct ahd_linux_target *targ);
  464. static int ahd_linux_fallback(struct ahd_softc *ahd,
  465. struct ahd_devinfo *devinfo);
  466. static __inline int ahd_linux_dv_fallback(struct ahd_softc *ahd,
  467. struct ahd_devinfo *devinfo);
  468. static void ahd_linux_dv_complete(Scsi_Cmnd *cmd);
  469. static void ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ);
  470. static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
  471. struct ahd_devinfo *devinfo);
  472. static u_int ahd_linux_user_dv_setting(struct ahd_softc *ahd);
  473. static void ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd);
  474. static void ahd_linux_device_queue_depth(struct ahd_softc *ahd,
  475. struct ahd_linux_device *dev);
  476. static struct ahd_linux_target* ahd_linux_alloc_target(struct ahd_softc*,
  477. u_int, u_int);
  478. static void ahd_linux_free_target(struct ahd_softc*,
  479. struct ahd_linux_target*);
  480. static struct ahd_linux_device* ahd_linux_alloc_device(struct ahd_softc*,
  481. struct ahd_linux_target*,
  482. u_int);
  483. static void ahd_linux_free_device(struct ahd_softc*,
  484. struct ahd_linux_device*);
  485. static void ahd_linux_run_device_queue(struct ahd_softc*,
  486. struct ahd_linux_device*);
  487. static void ahd_linux_setup_tag_info_global(char *p);
  488. static aic_option_callback_t ahd_linux_setup_tag_info;
  489. static aic_option_callback_t ahd_linux_setup_rd_strm_info;
  490. static aic_option_callback_t ahd_linux_setup_dv;
  491. static aic_option_callback_t ahd_linux_setup_iocell_info;
  492. static int ahd_linux_next_unit(void);
  493. static void ahd_runq_tasklet(unsigned long data);
  494. static int aic79xx_setup(char *c);
  495. /****************************** Inlines ***************************************/
  496. static __inline void ahd_schedule_completeq(struct ahd_softc *ahd);
  497. static __inline void ahd_schedule_runq(struct ahd_softc *ahd);
  498. static __inline void ahd_setup_runq_tasklet(struct ahd_softc *ahd);
  499. static __inline void ahd_teardown_runq_tasklet(struct ahd_softc *ahd);
  500. static __inline struct ahd_linux_device*
  501. ahd_linux_get_device(struct ahd_softc *ahd, u_int channel,
  502. u_int target, u_int lun, int alloc);
  503. static struct ahd_cmd *ahd_linux_run_complete_queue(struct ahd_softc *ahd);
  504. static __inline void ahd_linux_check_device_queue(struct ahd_softc *ahd,
  505. struct ahd_linux_device *dev);
  506. static __inline struct ahd_linux_device *
  507. ahd_linux_next_device_to_run(struct ahd_softc *ahd);
  508. static __inline void ahd_linux_run_device_queues(struct ahd_softc *ahd);
  509. static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
  510. static __inline void
  511. ahd_schedule_completeq(struct ahd_softc *ahd)
  512. {
  513. if ((ahd->platform_data->flags & AHD_RUN_CMPLT_Q_TIMER) == 0) {
  514. ahd->platform_data->flags |= AHD_RUN_CMPLT_Q_TIMER;
  515. ahd->platform_data->completeq_timer.expires = jiffies;
  516. add_timer(&ahd->platform_data->completeq_timer);
  517. }
  518. }
  519. /*
  520. * Must be called with our lock held.
  521. */
  522. static __inline void
  523. ahd_schedule_runq(struct ahd_softc *ahd)
  524. {
  525. tasklet_schedule(&ahd->platform_data->runq_tasklet);
  526. }
  527. static __inline
  528. void ahd_setup_runq_tasklet(struct ahd_softc *ahd)
  529. {
  530. tasklet_init(&ahd->platform_data->runq_tasklet, ahd_runq_tasklet,
  531. (unsigned long)ahd);
  532. }
  533. static __inline void
  534. ahd_teardown_runq_tasklet(struct ahd_softc *ahd)
  535. {
  536. tasklet_kill(&ahd->platform_data->runq_tasklet);
  537. }
  538. static __inline struct ahd_linux_device*
  539. ahd_linux_get_device(struct ahd_softc *ahd, u_int channel, u_int target,
  540. u_int lun, int alloc)
  541. {
  542. struct ahd_linux_target *targ;
  543. struct ahd_linux_device *dev;
  544. u_int target_offset;
  545. target_offset = target;
  546. if (channel != 0)
  547. target_offset += 8;
  548. targ = ahd->platform_data->targets[target_offset];
  549. if (targ == NULL) {
  550. if (alloc != 0) {
  551. targ = ahd_linux_alloc_target(ahd, channel, target);
  552. if (targ == NULL)
  553. return (NULL);
  554. } else
  555. return (NULL);
  556. }
  557. dev = targ->devices[lun];
  558. if (dev == NULL && alloc != 0)
  559. dev = ahd_linux_alloc_device(ahd, targ, lun);
  560. return (dev);
  561. }
  562. #define AHD_LINUX_MAX_RETURNED_ERRORS 4
  563. static struct ahd_cmd *
  564. ahd_linux_run_complete_queue(struct ahd_softc *ahd)
  565. {
  566. struct ahd_cmd *acmd;
  567. u_long done_flags;
  568. int with_errors;
  569. with_errors = 0;
  570. ahd_done_lock(ahd, &done_flags);
  571. while ((acmd = TAILQ_FIRST(&ahd->platform_data->completeq)) != NULL) {
  572. Scsi_Cmnd *cmd;
  573. if (with_errors > AHD_LINUX_MAX_RETURNED_ERRORS) {
  574. /*
  575. * Linux uses stack recursion to requeue
  576. * commands that need to be retried. Avoid
  577. * blowing out the stack by "spoon feeding"
  578. * commands that completed with error back
  579. * the operating system in case they are going
  580. * to be retried. "ick"
  581. */
  582. ahd_schedule_completeq(ahd);
  583. break;
  584. }
  585. TAILQ_REMOVE(&ahd->platform_data->completeq,
  586. acmd, acmd_links.tqe);
  587. cmd = &acmd_scsi_cmd(acmd);
  588. cmd->host_scribble = NULL;
  589. if (ahd_cmd_get_transaction_status(cmd) != DID_OK
  590. || (cmd->result & 0xFF) != SCSI_STATUS_OK)
  591. with_errors++;
  592. cmd->scsi_done(cmd);
  593. }
  594. ahd_done_unlock(ahd, &done_flags);
  595. return (acmd);
  596. }
  597. static __inline void
  598. ahd_linux_check_device_queue(struct ahd_softc *ahd,
  599. struct ahd_linux_device *dev)
  600. {
  601. if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) != 0
  602. && dev->active == 0) {
  603. dev->flags &= ~AHD_DEV_FREEZE_TIL_EMPTY;
  604. dev->qfrozen--;
  605. }
  606. if (TAILQ_FIRST(&dev->busyq) == NULL
  607. || dev->openings == 0 || dev->qfrozen != 0)
  608. return;
  609. ahd_linux_run_device_queue(ahd, dev);
  610. }
  611. static __inline struct ahd_linux_device *
  612. ahd_linux_next_device_to_run(struct ahd_softc *ahd)
  613. {
  614. if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0
  615. || (ahd->platform_data->qfrozen != 0
  616. && AHD_DV_SIMQ_FROZEN(ahd) == 0))
  617. return (NULL);
  618. return (TAILQ_FIRST(&ahd->platform_data->device_runq));
  619. }
  620. static __inline void
  621. ahd_linux_run_device_queues(struct ahd_softc *ahd)
  622. {
  623. struct ahd_linux_device *dev;
  624. while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
  625. TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
  626. dev->flags &= ~AHD_DEV_ON_RUN_LIST;
  627. ahd_linux_check_device_queue(ahd, dev);
  628. }
  629. }
  630. static __inline void
  631. ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
  632. {
  633. Scsi_Cmnd *cmd;
  634. int direction;
  635. cmd = scb->io_ctx;
  636. direction = cmd->sc_data_direction;
  637. ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
  638. if (cmd->use_sg != 0) {
  639. struct scatterlist *sg;
  640. sg = (struct scatterlist *)cmd->request_buffer;
  641. pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
  642. } else if (cmd->request_bufflen != 0) {
  643. pci_unmap_single(ahd->dev_softc,
  644. scb->platform_data->buf_busaddr,
  645. cmd->request_bufflen, direction);
  646. }
  647. }
  648. /******************************** Macros **************************************/
  649. #define BUILD_SCSIID(ahd, cmd) \
  650. ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
  651. /************************ Host template entry points *************************/
  652. static int ahd_linux_detect(Scsi_Host_Template *);
  653. static const char *ahd_linux_info(struct Scsi_Host *);
  654. static int ahd_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
  655. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  656. static int ahd_linux_slave_alloc(Scsi_Device *);
  657. static int ahd_linux_slave_configure(Scsi_Device *);
  658. static void ahd_linux_slave_destroy(Scsi_Device *);
  659. #if defined(__i386__)
  660. static int ahd_linux_biosparam(struct scsi_device*,
  661. struct block_device*, sector_t, int[]);
  662. #endif
  663. #else
  664. static int ahd_linux_release(struct Scsi_Host *);
  665. static void ahd_linux_select_queue_depth(struct Scsi_Host *host,
  666. Scsi_Device *scsi_devs);
  667. #if defined(__i386__)
  668. static int ahd_linux_biosparam(Disk *, kdev_t, int[]);
  669. #endif
  670. #endif
  671. static int ahd_linux_bus_reset(Scsi_Cmnd *);
  672. static int ahd_linux_dev_reset(Scsi_Cmnd *);
  673. static int ahd_linux_abort(Scsi_Cmnd *);
  674. /*
  675. * Calculate a safe value for AHD_NSEG (as expressed through ahd_linux_nseg).
  676. *
  677. * In pre-2.5.X...
  678. * The midlayer allocates an S/G array dynamically when a command is issued
  679. * using SCSI malloc. This array, which is in an OS dependent format that
  680. * must later be copied to our private S/G list, is sized to house just the
  681. * number of segments needed for the current transfer. Since the code that
  682. * sizes the SCSI malloc pool does not take into consideration fragmentation
  683. * of the pool, executing transactions numbering just a fraction of our
  684. * concurrent transaction limit with SG list lengths aproaching AHC_NSEG will
  685. * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
  686. * mid-layer does not properly handle this scsi malloc failures for the S/G
  687. * array and the result can be a lockup of the I/O subsystem. We try to size
  688. * our S/G list so that it satisfies our drivers allocation requirements in
  689. * addition to avoiding fragmentation of the SCSI malloc pool.
  690. */
  691. static void
  692. ahd_linux_size_nseg(void)
  693. {
  694. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  695. u_int cur_size;
  696. u_int best_size;
  697. /*
  698. * The SCSI allocator rounds to the nearest 512 bytes
  699. * an cannot allocate across a page boundary. Our algorithm
  700. * is to start at 1K of scsi malloc space per-command and
  701. * loop through all factors of the PAGE_SIZE and pick the best.
  702. */
  703. best_size = 0;
  704. for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
  705. u_int nseg;
  706. nseg = cur_size / sizeof(struct scatterlist);
  707. if (nseg < AHD_LINUX_MIN_NSEG)
  708. continue;
  709. if (best_size == 0) {
  710. best_size = cur_size;
  711. ahd_linux_nseg = nseg;
  712. } else {
  713. u_int best_rem;
  714. u_int cur_rem;
  715. /*
  716. * Compare the traits of the current "best_size"
  717. * with the current size to determine if the
  718. * current size is a better size.
  719. */
  720. best_rem = best_size % sizeof(struct scatterlist);
  721. cur_rem = cur_size % sizeof(struct scatterlist);
  722. if (cur_rem < best_rem) {
  723. best_size = cur_size;
  724. ahd_linux_nseg = nseg;
  725. }
  726. }
  727. }
  728. #endif
  729. }
  730. /*
  731. * Try to detect an Adaptec 79XX controller.
  732. */
  733. static int
  734. ahd_linux_detect(Scsi_Host_Template *template)
  735. {
  736. struct ahd_softc *ahd;
  737. int found;
  738. int error = 0;
  739. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  740. /*
  741. * It is a bug that the upper layer takes
  742. * this lock just prior to calling us.
  743. */
  744. spin_unlock_irq(&io_request_lock);
  745. #endif
  746. /*
  747. * Sanity checking of Linux SCSI data structures so
  748. * that some of our hacks^H^H^H^H^Hassumptions aren't
  749. * violated.
  750. */
  751. if (offsetof(struct ahd_cmd_internal, end)
  752. > offsetof(struct scsi_cmnd, host_scribble)) {
  753. printf("ahd_linux_detect: SCSI data structures changed.\n");
  754. printf("ahd_linux_detect: Unable to attach\n");
  755. return (0);
  756. }
  757. /*
  758. * Determine an appropriate size for our Scatter Gatther lists.
  759. */
  760. ahd_linux_size_nseg();
  761. #ifdef MODULE
  762. /*
  763. * If we've been passed any parameters, process them now.
  764. */
  765. if (aic79xx)
  766. aic79xx_setup(aic79xx);
  767. #endif
  768. template->proc_name = "aic79xx";
  769. /*
  770. * Initialize our softc list lock prior to
  771. * probing for any adapters.
  772. */
  773. ahd_list_lockinit();
  774. #ifdef CONFIG_PCI
  775. error = ahd_linux_pci_init();
  776. if (error)
  777. return error;
  778. #endif
  779. /*
  780. * Register with the SCSI layer all
  781. * controllers we've found.
  782. */
  783. found = 0;
  784. TAILQ_FOREACH(ahd, &ahd_tailq, links) {
  785. if (ahd_linux_register_host(ahd, template) == 0)
  786. found++;
  787. }
  788. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  789. spin_lock_irq(&io_request_lock);
  790. #endif
  791. aic79xx_detect_complete++;
  792. return 0;
  793. }
  794. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  795. /*
  796. * Free the passed in Scsi_Host memory structures prior to unloading the
  797. * module.
  798. */
  799. static int
  800. ahd_linux_release(struct Scsi_Host * host)
  801. {
  802. struct ahd_softc *ahd;
  803. u_long l;
  804. ahd_list_lock(&l);
  805. if (host != NULL) {
  806. /*
  807. * We should be able to just perform
  808. * the free directly, but check our
  809. * list for extra sanity.
  810. */
  811. ahd = ahd_find_softc(*(struct ahd_softc **)host->hostdata);
  812. if (ahd != NULL) {
  813. u_long s;
  814. ahd_lock(ahd, &s);
  815. ahd_intr_enable(ahd, FALSE);
  816. ahd_unlock(ahd, &s);
  817. ahd_free(ahd);
  818. }
  819. }
  820. ahd_list_unlock(&l);
  821. return (0);
  822. }
  823. #endif
  824. /*
  825. * Return a string describing the driver.
  826. */
  827. static const char *
  828. ahd_linux_info(struct Scsi_Host *host)
  829. {
  830. static char buffer[512];
  831. char ahd_info[256];
  832. char *bp;
  833. struct ahd_softc *ahd;
  834. bp = &buffer[0];
  835. ahd = *(struct ahd_softc **)host->hostdata;
  836. memset(bp, 0, sizeof(buffer));
  837. strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
  838. strcat(bp, AIC79XX_DRIVER_VERSION);
  839. strcat(bp, "\n");
  840. strcat(bp, " <");
  841. strcat(bp, ahd->description);
  842. strcat(bp, ">\n");
  843. strcat(bp, " ");
  844. ahd_controller_info(ahd, ahd_info);
  845. strcat(bp, ahd_info);
  846. strcat(bp, "\n");
  847. return (bp);
  848. }
  849. /*
  850. * Queue an SCB to the controller.
  851. */
  852. static int
  853. ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
  854. {
  855. struct ahd_softc *ahd;
  856. struct ahd_linux_device *dev;
  857. u_long flags;
  858. ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
  859. /*
  860. * Save the callback on completion function.
  861. */
  862. cmd->scsi_done = scsi_done;
  863. ahd_midlayer_entrypoint_lock(ahd, &flags);
  864. /*
  865. * Close the race of a command that was in the process of
  866. * being queued to us just as our simq was frozen. Let
  867. * DV commands through so long as we are only frozen to
  868. * perform DV.
  869. */
  870. if (ahd->platform_data->qfrozen != 0
  871. && AHD_DV_CMD(cmd) == 0) {
  872. ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
  873. ahd_linux_queue_cmd_complete(ahd, cmd);
  874. ahd_schedule_completeq(ahd);
  875. ahd_midlayer_entrypoint_unlock(ahd, &flags);
  876. return (0);
  877. }
  878. dev = ahd_linux_get_device(ahd, cmd->device->channel,
  879. cmd->device->id, cmd->device->lun,
  880. /*alloc*/TRUE);
  881. if (dev == NULL) {
  882. ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
  883. ahd_linux_queue_cmd_complete(ahd, cmd);
  884. ahd_schedule_completeq(ahd);
  885. ahd_midlayer_entrypoint_unlock(ahd, &flags);
  886. printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
  887. ahd_name(ahd));
  888. return (0);
  889. }
  890. if (cmd->cmd_len > MAX_CDB_LEN)
  891. return (-EINVAL);
  892. cmd->result = CAM_REQ_INPROG << 16;
  893. TAILQ_INSERT_TAIL(&dev->busyq, (struct ahd_cmd *)cmd, acmd_links.tqe);
  894. if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
  895. TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
  896. dev->flags |= AHD_DEV_ON_RUN_LIST;
  897. ahd_linux_run_device_queues(ahd);
  898. }
  899. ahd_midlayer_entrypoint_unlock(ahd, &flags);
  900. return (0);
  901. }
  902. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  903. static int
  904. ahd_linux_slave_alloc(Scsi_Device *device)
  905. {
  906. struct ahd_softc *ahd;
  907. ahd = *((struct ahd_softc **)device->host->hostdata);
  908. if (bootverbose)
  909. printf("%s: Slave Alloc %d\n", ahd_name(ahd), device->id);
  910. return (0);
  911. }
  912. static int
  913. ahd_linux_slave_configure(Scsi_Device *device)
  914. {
  915. struct ahd_softc *ahd;
  916. struct ahd_linux_device *dev;
  917. u_long flags;
  918. ahd = *((struct ahd_softc **)device->host->hostdata);
  919. if (bootverbose)
  920. printf("%s: Slave Configure %d\n", ahd_name(ahd), device->id);
  921. ahd_midlayer_entrypoint_lock(ahd, &flags);
  922. /*
  923. * Since Linux has attached to the device, configure
  924. * it so we don't free and allocate the device
  925. * structure on every command.
  926. */
  927. dev = ahd_linux_get_device(ahd, device->channel,
  928. device->id, device->lun,
  929. /*alloc*/TRUE);
  930. if (dev != NULL) {
  931. dev->flags &= ~AHD_DEV_UNCONFIGURED;
  932. dev->flags |= AHD_DEV_SLAVE_CONFIGURED;
  933. dev->scsi_device = device;
  934. ahd_linux_device_queue_depth(ahd, dev);
  935. }
  936. ahd_midlayer_entrypoint_unlock(ahd, &flags);
  937. return (0);
  938. }
  939. static void
  940. ahd_linux_slave_destroy(Scsi_Device *device)
  941. {
  942. struct ahd_softc *ahd;
  943. struct ahd_linux_device *dev;
  944. u_long flags;
  945. ahd = *((struct ahd_softc **)device->host->hostdata);
  946. if (bootverbose)
  947. printf("%s: Slave Destroy %d\n", ahd_name(ahd), device->id);
  948. ahd_midlayer_entrypoint_lock(ahd, &flags);
  949. dev = ahd_linux_get_device(ahd, device->channel,
  950. device->id, device->lun,
  951. /*alloc*/FALSE);
  952. /*
  953. * Filter out "silly" deletions of real devices by only
  954. * deleting devices that have had slave_configure()
  955. * called on them. All other devices that have not
  956. * been configured will automatically be deleted by
  957. * the refcounting process.
  958. */
  959. if (dev != NULL
  960. && (dev->flags & AHD_DEV_SLAVE_CONFIGURED) != 0) {
  961. dev->flags |= AHD_DEV_UNCONFIGURED;
  962. if (TAILQ_EMPTY(&dev->busyq)
  963. && dev->active == 0
  964. && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
  965. ahd_linux_free_device(ahd, dev);
  966. }
  967. ahd_midlayer_entrypoint_unlock(ahd, &flags);
  968. }
  969. #else
  970. /*
  971. * Sets the queue depth for each SCSI device hanging
  972. * off the input host adapter.
  973. */
  974. static void
  975. ahd_linux_select_queue_depth(struct Scsi_Host * host,
  976. Scsi_Device * scsi_devs)
  977. {
  978. Scsi_Device *device;
  979. Scsi_Device *ldev;
  980. struct ahd_softc *ahd;
  981. u_long flags;
  982. ahd = *((struct ahd_softc **)host->hostdata);
  983. ahd_lock(ahd, &flags);
  984. for (device = scsi_devs; device != NULL; device = device->next) {
  985. /*
  986. * Watch out for duplicate devices. This works around
  987. * some quirks in how the SCSI scanning code does its
  988. * device management.
  989. */
  990. for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
  991. if (ldev->host == device->host
  992. && ldev->channel == device->channel
  993. && ldev->id == device->id
  994. && ldev->lun == device->lun)
  995. break;
  996. }
  997. /* Skip duplicate. */
  998. if (ldev != device)
  999. continue;
  1000. if (device->host == host) {
  1001. struct ahd_linux_device *dev;
  1002. /*
  1003. * Since Linux has attached to the device, configure
  1004. * it so we don't free and allocate the device
  1005. * structure on every command.
  1006. */
  1007. dev = ahd_linux_get_device(ahd, device->channel,
  1008. device->id, device->lun,
  1009. /*alloc*/TRUE);
  1010. if (dev != NULL) {
  1011. dev->flags &= ~AHD_DEV_UNCONFIGURED;
  1012. dev->scsi_device = device;
  1013. ahd_linux_device_queue_depth(ahd, dev);
  1014. device->queue_depth = dev->openings
  1015. + dev->active;
  1016. if ((dev->flags & (AHD_DEV_Q_BASIC
  1017. | AHD_DEV_Q_TAGGED)) == 0) {
  1018. /*
  1019. * We allow the OS to queue 2 untagged
  1020. * transactions to us at any time even
  1021. * though we can only execute them
  1022. * serially on the controller/device.
  1023. * This should remove some latency.
  1024. */
  1025. device->queue_depth = 2;
  1026. }
  1027. }
  1028. }
  1029. }
  1030. ahd_unlock(ahd, &flags);
  1031. }
  1032. #endif
  1033. #if defined(__i386__)
  1034. /*
  1035. * Return the disk geometry for the given SCSI device.
  1036. */
  1037. static int
  1038. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1039. ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  1040. sector_t capacity, int geom[])
  1041. {
  1042. uint8_t *bh;
  1043. #else
  1044. ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
  1045. {
  1046. struct scsi_device *sdev = disk->device;
  1047. u_long capacity = disk->capacity;
  1048. struct buffer_head *bh;
  1049. #endif
  1050. int heads;
  1051. int sectors;
  1052. int cylinders;
  1053. int ret;
  1054. int extended;
  1055. struct ahd_softc *ahd;
  1056. ahd = *((struct ahd_softc **)sdev->host->hostdata);
  1057. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1058. bh = scsi_bios_ptable(bdev);
  1059. #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
  1060. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
  1061. #else
  1062. bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
  1063. #endif
  1064. if (bh) {
  1065. ret = scsi_partsize(bh, capacity,
  1066. &geom[2], &geom[0], &geom[1]);
  1067. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1068. kfree(bh);
  1069. #else
  1070. brelse(bh);
  1071. #endif
  1072. if (ret != -1)
  1073. return (ret);
  1074. }
  1075. heads = 64;
  1076. sectors = 32;
  1077. cylinders = aic_sector_div(capacity, heads, sectors);
  1078. if (aic79xx_extended != 0)
  1079. extended = 1;
  1080. else
  1081. extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
  1082. if (extended && cylinders >= 1024) {
  1083. heads = 255;
  1084. sectors = 63;
  1085. cylinders = aic_sector_div(capacity, heads, sectors);
  1086. }
  1087. geom[0] = heads;
  1088. geom[1] = sectors;
  1089. geom[2] = cylinders;
  1090. return (0);
  1091. }
  1092. #endif
  1093. /*
  1094. * Abort the current SCSI command(s).
  1095. */
  1096. static int
  1097. ahd_linux_abort(Scsi_Cmnd *cmd)
  1098. {
  1099. struct ahd_softc *ahd;
  1100. struct ahd_cmd *acmd;
  1101. struct ahd_cmd *list_acmd;
  1102. struct ahd_linux_device *dev;
  1103. struct scb *pending_scb;
  1104. u_long s;
  1105. u_int saved_scbptr;
  1106. u_int active_scbptr;
  1107. u_int last_phase;
  1108. u_int cdb_byte;
  1109. int retval;
  1110. int was_paused;
  1111. int paused;
  1112. int wait;
  1113. int disconnected;
  1114. ahd_mode_state saved_modes;
  1115. pending_scb = NULL;
  1116. paused = FALSE;
  1117. wait = FALSE;
  1118. ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
  1119. acmd = (struct ahd_cmd *)cmd;
  1120. printf("%s:%d:%d:%d: Attempting to abort cmd %p:",
  1121. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1122. cmd->device->lun, cmd);
  1123. for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
  1124. printf(" 0x%x", cmd->cmnd[cdb_byte]);
  1125. printf("\n");
  1126. /*
  1127. * In all versions of Linux, we have to work around
  1128. * a major flaw in how the mid-layer is locked down
  1129. * if we are to sleep successfully in our error handler
  1130. * while allowing our interrupt handler to run. Since
  1131. * the midlayer acquires either the io_request_lock or
  1132. * our lock prior to calling us, we must use the
  1133. * spin_unlock_irq() method for unlocking our lock.
  1134. * This will force interrupts to be enabled on the
  1135. * current CPU. Since the EH thread should not have
  1136. * been running with CPU interrupts disabled other than
  1137. * by acquiring either the io_request_lock or our own
  1138. * lock, this *should* be safe.
  1139. */
  1140. ahd_midlayer_entrypoint_lock(ahd, &s);
  1141. /*
  1142. * First determine if we currently own this command.
  1143. * Start by searching the device queue. If not found
  1144. * there, check the pending_scb list. If not found
  1145. * at all, and the system wanted us to just abort the
  1146. * command, return success.
  1147. */
  1148. dev = ahd_linux_get_device(ahd, cmd->device->channel,
  1149. cmd->device->id, cmd->device->lun,
  1150. /*alloc*/FALSE);
  1151. if (dev == NULL) {
  1152. /*
  1153. * No target device for this command exists,
  1154. * so we must not still own the command.
  1155. */
  1156. printf("%s:%d:%d:%d: Is not an active device\n",
  1157. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1158. cmd->device->lun);
  1159. retval = SUCCESS;
  1160. goto no_cmd;
  1161. }
  1162. TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
  1163. if (list_acmd == acmd)
  1164. break;
  1165. }
  1166. if (list_acmd != NULL) {
  1167. printf("%s:%d:%d:%d: Command found on device queue\n",
  1168. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1169. cmd->device->lun);
  1170. TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
  1171. cmd->result = DID_ABORT << 16;
  1172. ahd_linux_queue_cmd_complete(ahd, cmd);
  1173. retval = SUCCESS;
  1174. goto done;
  1175. }
  1176. /*
  1177. * See if we can find a matching cmd in the pending list.
  1178. */
  1179. LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
  1180. if (pending_scb->io_ctx == cmd)
  1181. break;
  1182. }
  1183. if (pending_scb == NULL) {
  1184. printf("%s:%d:%d:%d: Command not found\n",
  1185. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1186. cmd->device->lun);
  1187. goto no_cmd;
  1188. }
  1189. if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
  1190. /*
  1191. * We can't queue two recovery actions using the same SCB
  1192. */
  1193. retval = FAILED;
  1194. goto done;
  1195. }
  1196. /*
  1197. * Ensure that the card doesn't do anything
  1198. * behind our back. Also make sure that we
  1199. * didn't "just" miss an interrupt that would
  1200. * affect this cmd.
  1201. */
  1202. was_paused = ahd_is_paused(ahd);
  1203. ahd_pause_and_flushwork(ahd);
  1204. paused = TRUE;
  1205. if ((pending_scb->flags & SCB_ACTIVE) == 0) {
  1206. printf("%s:%d:%d:%d: Command already completed\n",
  1207. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1208. cmd->device->lun);
  1209. goto no_cmd;
  1210. }
  1211. printf("%s: At time of recovery, card was %spaused\n",
  1212. ahd_name(ahd), was_paused ? "" : "not ");
  1213. ahd_dump_card_state(ahd);
  1214. disconnected = TRUE;
  1215. if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A',
  1216. cmd->device->lun, SCB_GET_TAG(pending_scb),
  1217. ROLE_INITIATOR, CAM_REQ_ABORTED,
  1218. SEARCH_COMPLETE) > 0) {
  1219. printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
  1220. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1221. cmd->device->lun);
  1222. retval = SUCCESS;
  1223. goto done;
  1224. }
  1225. saved_modes = ahd_save_modes(ahd);
  1226. ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
  1227. last_phase = ahd_inb(ahd, LASTPHASE);
  1228. saved_scbptr = ahd_get_scbptr(ahd);
  1229. active_scbptr = saved_scbptr;
  1230. if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
  1231. struct scb *bus_scb;
  1232. bus_scb = ahd_lookup_scb(ahd, active_scbptr);
  1233. if (bus_scb == pending_scb)
  1234. disconnected = FALSE;
  1235. }
  1236. /*
  1237. * At this point, pending_scb is the scb associated with the
  1238. * passed in command. That command is currently active on the
  1239. * bus or is in the disconnected state.
  1240. */
  1241. if (last_phase != P_BUSFREE
  1242. && SCB_GET_TAG(pending_scb) == active_scbptr) {
  1243. /*
  1244. * We're active on the bus, so assert ATN
  1245. * and hope that the target responds.
  1246. */
  1247. pending_scb = ahd_lookup_scb(ahd, active_scbptr);
  1248. pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
  1249. ahd_outb(ahd, MSG_OUT, HOST_MSG);
  1250. ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
  1251. printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
  1252. ahd_name(ahd), cmd->device->channel,
  1253. cmd->device->id, cmd->device->lun);
  1254. wait = TRUE;
  1255. } else if (disconnected) {
  1256. /*
  1257. * Actually re-queue this SCB in an attempt
  1258. * to select the device before it reconnects.
  1259. */
  1260. pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
  1261. ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
  1262. pending_scb->hscb->cdb_len = 0;
  1263. pending_scb->hscb->task_attribute = 0;
  1264. pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
  1265. if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
  1266. /*
  1267. * Mark the SCB has having an outstanding
  1268. * task management function. Should the command
  1269. * complete normally before the task management
  1270. * function can be sent, the host will be notified
  1271. * to abort our requeued SCB.
  1272. */
  1273. ahd_outb(ahd, SCB_TASK_MANAGEMENT,
  1274. pending_scb->hscb->task_management);
  1275. } else {
  1276. /*
  1277. * If non-packetized, set the MK_MESSAGE control
  1278. * bit indicating that we desire to send a message.
  1279. * We also set the disconnected flag since there is
  1280. * no guarantee that our SCB control byte matches
  1281. * the version on the card. We don't want the
  1282. * sequencer to abort the command thinking an
  1283. * unsolicited reselection occurred.
  1284. */
  1285. pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
  1286. /*
  1287. * The sequencer will never re-reference the
  1288. * in-core SCB. To make sure we are notified
  1289. * during reslection, set the MK_MESSAGE flag in
  1290. * the card's copy of the SCB.
  1291. */
  1292. ahd_outb(ahd, SCB_CONTROL,
  1293. ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
  1294. }
  1295. /*
  1296. * Clear out any entries in the QINFIFO first
  1297. * so we are the next SCB for this target
  1298. * to run.
  1299. */
  1300. ahd_search_qinfifo(ahd, cmd->device->id,
  1301. cmd->device->channel + 'A', cmd->device->lun,
  1302. SCB_LIST_NULL, ROLE_INITIATOR,
  1303. CAM_REQUEUE_REQ, SEARCH_COMPLETE);
  1304. ahd_qinfifo_requeue_tail(ahd, pending_scb);
  1305. ahd_set_scbptr(ahd, saved_scbptr);
  1306. ahd_print_path(ahd, pending_scb);
  1307. printf("Device is disconnected, re-queuing SCB\n");
  1308. wait = TRUE;
  1309. } else {
  1310. printf("%s:%d:%d:%d: Unable to deliver message\n",
  1311. ahd_name(ahd), cmd->device->channel,
  1312. cmd->device->id, cmd->device->lun);
  1313. retval = FAILED;
  1314. goto done;
  1315. }
  1316. no_cmd:
  1317. /*
  1318. * Our assumption is that if we don't have the command, no
  1319. * recovery action was required, so we return success. Again,
  1320. * the semantics of the mid-layer recovery engine are not
  1321. * well defined, so this may change in time.
  1322. */
  1323. retval = SUCCESS;
  1324. done:
  1325. if (paused)
  1326. ahd_unpause(ahd);
  1327. if (wait) {
  1328. struct timer_list timer;
  1329. int ret;
  1330. pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
  1331. spin_unlock_irq(&ahd->platform_data->spin_lock);
  1332. init_timer(&timer);
  1333. timer.data = (u_long)pending_scb;
  1334. timer.expires = jiffies + (5 * HZ);
  1335. timer.function = ahd_linux_sem_timeout;
  1336. add_timer(&timer);
  1337. printf("Recovery code sleeping\n");
  1338. down(&ahd->platform_data->eh_sem);
  1339. printf("Recovery code awake\n");
  1340. ret = del_timer_sync(&timer);
  1341. if (ret == 0) {
  1342. printf("Timer Expired\n");
  1343. retval = FAILED;
  1344. }
  1345. spin_lock_irq(&ahd->platform_data->spin_lock);
  1346. }
  1347. ahd_schedule_runq(ahd);
  1348. ahd_linux_run_complete_queue(ahd);
  1349. ahd_midlayer_entrypoint_unlock(ahd, &s);
  1350. return (retval);
  1351. }
  1352. static void
  1353. ahd_linux_dev_reset_complete(Scsi_Cmnd *cmd)
  1354. {
  1355. free(cmd, M_DEVBUF);
  1356. }
  1357. /*
  1358. * Attempt to send a target reset message to the device that timed out.
  1359. */
  1360. static int
  1361. ahd_linux_dev_reset(Scsi_Cmnd *cmd)
  1362. {
  1363. struct ahd_softc *ahd;
  1364. struct scsi_cmnd *recovery_cmd;
  1365. struct ahd_linux_device *dev;
  1366. struct ahd_initiator_tinfo *tinfo;
  1367. struct ahd_tmode_tstate *tstate;
  1368. struct scb *scb;
  1369. struct hardware_scb *hscb;
  1370. u_long s;
  1371. struct timer_list timer;
  1372. int retval;
  1373. ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
  1374. recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
  1375. if (!recovery_cmd)
  1376. return (FAILED);
  1377. memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
  1378. recovery_cmd->device = cmd->device;
  1379. recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
  1380. #if AHD_DEBUG
  1381. if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
  1382. printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
  1383. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  1384. cmd->device->lun, cmd);
  1385. #endif
  1386. ahd_lock(ahd, &s);
  1387. dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
  1388. cmd->device->lun, /*alloc*/FALSE);
  1389. if (dev == NULL) {
  1390. ahd_unlock(ahd, &s);
  1391. kfree(recovery_cmd);
  1392. return (FAILED);
  1393. }
  1394. if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) {
  1395. ahd_unlock(ahd, &s);
  1396. kfree(recovery_cmd);
  1397. return (FAILED);
  1398. }
  1399. tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
  1400. cmd->device->id, &tstate);
  1401. recovery_cmd->result = CAM_REQ_INPROG << 16;
  1402. recovery_cmd->host_scribble = (char *)scb;
  1403. scb->io_ctx = recovery_cmd;
  1404. scb->platform_data->dev = dev;
  1405. scb->sg_count = 0;
  1406. ahd_set_residual(scb, 0);
  1407. ahd_set_sense_residual(scb, 0);
  1408. hscb = scb->hscb;
  1409. hscb->control = 0;
  1410. hscb->scsiid = BUILD_SCSIID(ahd, cmd);
  1411. hscb->lun = cmd->device->lun;
  1412. hscb->cdb_len = 0;
  1413. hscb->task_management = SIU_TASKMGMT_LUN_RESET;
  1414. scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
  1415. if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  1416. scb->flags |= SCB_PACKETIZED;
  1417. } else {
  1418. hscb->control |= MK_MESSAGE;
  1419. }
  1420. dev->openings--;
  1421. dev->active++;
  1422. dev->commands_issued++;
  1423. LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
  1424. ahd_queue_scb(ahd, scb);
  1425. scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
  1426. spin_unlock_irq(&ahd->platform_data->spin_lock);
  1427. init_timer(&timer);
  1428. timer.data = (u_long)scb;
  1429. timer.expires = jiffies + (5 * HZ);
  1430. timer.function = ahd_linux_sem_timeout;
  1431. add_timer(&timer);
  1432. printf("Recovery code sleeping\n");
  1433. down(&ahd->platform_data->eh_sem);
  1434. printf("Recovery code awake\n");
  1435. retval = SUCCESS;
  1436. if (del_timer_sync(&timer) == 0) {
  1437. printf("Timer Expired\n");
  1438. retval = FAILED;
  1439. }
  1440. spin_lock_irq(&ahd->platform_data->spin_lock);
  1441. ahd_schedule_runq(ahd);
  1442. ahd_linux_run_complete_queue(ahd);
  1443. ahd_unlock(ahd, &s);
  1444. printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
  1445. return (retval);
  1446. }
  1447. /*
  1448. * Reset the SCSI bus.
  1449. */
  1450. static int
  1451. ahd_linux_bus_reset(Scsi_Cmnd *cmd)
  1452. {
  1453. struct ahd_softc *ahd;
  1454. u_long s;
  1455. int found;
  1456. ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
  1457. #ifdef AHD_DEBUG
  1458. if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
  1459. printf("%s: Bus reset called for cmd %p\n",
  1460. ahd_name(ahd), cmd);
  1461. #endif
  1462. ahd_lock(ahd, &s);
  1463. found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
  1464. /*initiate reset*/TRUE);
  1465. ahd_linux_run_complete_queue(ahd);
  1466. ahd_unlock(ahd, &s);
  1467. if (bootverbose)
  1468. printf("%s: SCSI bus reset delivered. "
  1469. "%d SCBs aborted.\n", ahd_name(ahd), found);
  1470. return (SUCCESS);
  1471. }
  1472. Scsi_Host_Template aic79xx_driver_template = {
  1473. .module = THIS_MODULE,
  1474. .name = "aic79xx",
  1475. .proc_info = ahd_linux_proc_info,
  1476. .info = ahd_linux_info,
  1477. .queuecommand = ahd_linux_queue,
  1478. .eh_abort_handler = ahd_linux_abort,
  1479. .eh_device_reset_handler = ahd_linux_dev_reset,
  1480. .eh_bus_reset_handler = ahd_linux_bus_reset,
  1481. #if defined(__i386__)
  1482. .bios_param = ahd_linux_biosparam,
  1483. #endif
  1484. .can_queue = AHD_MAX_QUEUE,
  1485. .this_id = -1,
  1486. .cmd_per_lun = 2,
  1487. .use_clustering = ENABLE_CLUSTERING,
  1488. .slave_alloc = ahd_linux_slave_alloc,
  1489. .slave_configure = ahd_linux_slave_configure,
  1490. .slave_destroy = ahd_linux_slave_destroy,
  1491. };
  1492. /**************************** Tasklet Handler *********************************/
  1493. /*
  1494. * In 2.4.X and above, this routine is called from a tasklet,
  1495. * so we must re-acquire our lock prior to executing this code.
  1496. * In all prior kernels, ahd_schedule_runq() calls this routine
  1497. * directly and ahd_schedule_runq() is called with our lock held.
  1498. */
  1499. static void
  1500. ahd_runq_tasklet(unsigned long data)
  1501. {
  1502. struct ahd_softc* ahd;
  1503. struct ahd_linux_device *dev;
  1504. u_long flags;
  1505. ahd = (struct ahd_softc *)data;
  1506. ahd_lock(ahd, &flags);
  1507. while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
  1508. TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
  1509. dev->flags &= ~AHD_DEV_ON_RUN_LIST;
  1510. ahd_linux_check_device_queue(ahd, dev);
  1511. /* Yeild to our interrupt handler */
  1512. ahd_unlock(ahd, &flags);
  1513. ahd_lock(ahd, &flags);
  1514. }
  1515. ahd_unlock(ahd, &flags);
  1516. }
  1517. /******************************** Bus DMA *************************************/
  1518. int
  1519. ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
  1520. bus_size_t alignment, bus_size_t boundary,
  1521. dma_addr_t lowaddr, dma_addr_t highaddr,
  1522. bus_dma_filter_t *filter, void *filterarg,
  1523. bus_size_t maxsize, int nsegments,
  1524. bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
  1525. {
  1526. bus_dma_tag_t dmat;
  1527. dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
  1528. if (dmat == NULL)
  1529. return (ENOMEM);
  1530. /*
  1531. * Linux is very simplistic about DMA memory. For now don't
  1532. * maintain all specification information. Once Linux supplies
  1533. * better facilities for doing these operations, or the
  1534. * needs of this particular driver change, we might need to do
  1535. * more here.
  1536. */
  1537. dmat->alignment = alignment;
  1538. dmat->boundary = boundary;
  1539. dmat->maxsize = maxsize;
  1540. *ret_tag = dmat;
  1541. return (0);
  1542. }
  1543. void
  1544. ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
  1545. {
  1546. free(dmat, M_DEVBUF);
  1547. }
  1548. int
  1549. ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
  1550. int flags, bus_dmamap_t *mapp)
  1551. {
  1552. bus_dmamap_t map;
  1553. map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
  1554. if (map == NULL)
  1555. return (ENOMEM);
  1556. /*
  1557. * Although we can dma data above 4GB, our
  1558. * "consistent" memory is below 4GB for
  1559. * space efficiency reasons (only need a 4byte
  1560. * address). For this reason, we have to reset
  1561. * our dma mask when doing allocations.
  1562. */
  1563. if (ahd->dev_softc != NULL)
  1564. if (pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) {
  1565. printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
  1566. kfree(map);
  1567. return (ENODEV);
  1568. }
  1569. *vaddr = pci_alloc_consistent(ahd->dev_softc,
  1570. dmat->maxsize, &map->bus_addr);
  1571. if (ahd->dev_softc != NULL)
  1572. if (pci_set_dma_mask(ahd->dev_softc,
  1573. ahd->platform_data->hw_dma_mask)) {
  1574. printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
  1575. kfree(map);
  1576. return (ENODEV);
  1577. }
  1578. if (*vaddr == NULL)
  1579. return (ENOMEM);
  1580. *mapp = map;
  1581. return(0);
  1582. }
  1583. void
  1584. ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
  1585. void* vaddr, bus_dmamap_t map)
  1586. {
  1587. pci_free_consistent(ahd->dev_softc, dmat->maxsize,
  1588. vaddr, map->bus_addr);
  1589. }
  1590. int
  1591. ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
  1592. void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
  1593. void *cb_arg, int flags)
  1594. {
  1595. /*
  1596. * Assume for now that this will only be used during
  1597. * initialization and not for per-transaction buffer mapping.
  1598. */
  1599. bus_dma_segment_t stack_sg;
  1600. stack_sg.ds_addr = map->bus_addr;
  1601. stack_sg.ds_len = dmat->maxsize;
  1602. cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
  1603. return (0);
  1604. }
  1605. void
  1606. ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
  1607. {
  1608. /*
  1609. * The map may is NULL in our < 2.3.X implementation.
  1610. */
  1611. if (map != NULL)
  1612. free(map, M_DEVBUF);
  1613. }
  1614. int
  1615. ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
  1616. {
  1617. /* Nothing to do */
  1618. return (0);
  1619. }
  1620. /********************* Platform Dependent Functions ***************************/
  1621. /*
  1622. * Compare "left hand" softc with "right hand" softc, returning:
  1623. * < 0 - lahd has a lower priority than rahd
  1624. * 0 - Softcs are equal
  1625. * > 0 - lahd has a higher priority than rahd
  1626. */
  1627. int
  1628. ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
  1629. {
  1630. int value;
  1631. /*
  1632. * Under Linux, cards are ordered as follows:
  1633. * 1) PCI devices that are marked as the boot controller.
  1634. * 2) PCI devices with BIOS enabled sorted by bus/slot/func.
  1635. * 3) All remaining PCI devices sorted by bus/slot/func.
  1636. */
  1637. #if 0
  1638. value = (lahd->flags & AHD_BOOT_CHANNEL)
  1639. - (rahd->flags & AHD_BOOT_CHANNEL);
  1640. if (value != 0)
  1641. /* Controllers set for boot have a *higher* priority */
  1642. return (value);
  1643. #endif
  1644. value = (lahd->flags & AHD_BIOS_ENABLED)
  1645. - (rahd->flags & AHD_BIOS_ENABLED);
  1646. if (value != 0)
  1647. /* Controllers with BIOS enabled have a *higher* priority */
  1648. return (value);
  1649. /* Still equal. Sort by bus/slot/func. */
  1650. if (aic79xx_reverse_scan != 0)
  1651. value = ahd_get_pci_bus(lahd->dev_softc)
  1652. - ahd_get_pci_bus(rahd->dev_softc);
  1653. else
  1654. value = ahd_get_pci_bus(rahd->dev_softc)
  1655. - ahd_get_pci_bus(lahd->dev_softc);
  1656. if (value != 0)
  1657. return (value);
  1658. if (aic79xx_reverse_scan != 0)
  1659. value = ahd_get_pci_slot(lahd->dev_softc)
  1660. - ahd_get_pci_slot(rahd->dev_softc);
  1661. else
  1662. value = ahd_get_pci_slot(rahd->dev_softc)
  1663. - ahd_get_pci_slot(lahd->dev_softc);
  1664. if (value != 0)
  1665. return (value);
  1666. value = rahd->channel - lahd->channel;
  1667. return (value);
  1668. }
  1669. static void
  1670. ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
  1671. {
  1672. if ((instance >= 0) && (targ >= 0)
  1673. && (instance < NUM_ELEMENTS(aic79xx_tag_info))
  1674. && (targ < AHD_NUM_TARGETS)) {
  1675. aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
  1676. if (bootverbose)
  1677. printf("tag_info[%d:%d] = %d\n", instance, targ, value);
  1678. }
  1679. }
  1680. static void
  1681. ahd_linux_setup_rd_strm_info(u_long arg, int instance, int targ, int32_t value)
  1682. {
  1683. if ((instance >= 0)
  1684. && (instance < NUM_ELEMENTS(aic79xx_rd_strm_info))) {
  1685. aic79xx_rd_strm_info[instance] = value & 0xFFFF;
  1686. if (bootverbose)
  1687. printf("rd_strm[%d] = 0x%x\n", instance, value);
  1688. }
  1689. }
  1690. static void
  1691. ahd_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
  1692. {
  1693. if ((instance >= 0)
  1694. && (instance < NUM_ELEMENTS(aic79xx_dv_settings))) {
  1695. aic79xx_dv_settings[instance] = value;
  1696. if (bootverbose)
  1697. printf("dv[%d] = %d\n", instance, value);
  1698. }
  1699. }
  1700. static void
  1701. ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
  1702. {
  1703. if ((instance >= 0)
  1704. && (instance < NUM_ELEMENTS(aic79xx_iocell_info))) {
  1705. uint8_t *iocell_info;
  1706. iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
  1707. iocell_info[index] = value & 0xFFFF;
  1708. if (bootverbose)
  1709. printf("iocell[%d:%ld] = %d\n", instance, index, value);
  1710. }
  1711. }
  1712. static void
  1713. ahd_linux_setup_tag_info_global(char *p)
  1714. {
  1715. int tags, i, j;
  1716. tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
  1717. printf("Setting Global Tags= %d\n", tags);
  1718. for (i = 0; i < NUM_ELEMENTS(aic79xx_tag_info); i++) {
  1719. for (j = 0; j < AHD_NUM_TARGETS; j++) {
  1720. aic79xx_tag_info[i].tag_commands[j] = tags;
  1721. }
  1722. }
  1723. }
  1724. /*
  1725. * Handle Linux boot parameters. This routine allows for assigning a value
  1726. * to a parameter with a ':' between the parameter and the value.
  1727. * ie. aic79xx=stpwlev:1,extended
  1728. */
  1729. static int
  1730. aic79xx_setup(char *s)
  1731. {
  1732. int i, n;
  1733. char *p;
  1734. char *end;
  1735. static struct {
  1736. const char *name;
  1737. uint32_t *flag;
  1738. } options[] = {
  1739. { "extended", &aic79xx_extended },
  1740. { "no_reset", &aic79xx_no_reset },
  1741. { "verbose", &aic79xx_verbose },
  1742. { "allow_memio", &aic79xx_allow_memio},
  1743. #ifdef AHD_DEBUG
  1744. { "debug", &ahd_debug },
  1745. #endif
  1746. { "reverse_scan", &aic79xx_reverse_scan },
  1747. { "periodic_otag", &aic79xx_periodic_otag },
  1748. { "pci_parity", &aic79xx_pci_parity },
  1749. { "seltime", &aic79xx_seltime },
  1750. { "tag_info", NULL },
  1751. { "global_tag_depth", NULL},
  1752. { "rd_strm", NULL },
  1753. { "dv", NULL },
  1754. { "slewrate", NULL },
  1755. { "precomp", NULL },
  1756. { "amplitude", NULL },
  1757. };
  1758. end = strchr(s, '\0');
  1759. /*
  1760. * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
  1761. * will never be 0 in this case.
  1762. */
  1763. n = 0;
  1764. while ((p = strsep(&s, ",.")) != NULL) {
  1765. if (*p == '\0')
  1766. continue;
  1767. for (i = 0; i < NUM_ELEMENTS(options); i++) {
  1768. n = strlen(options[i].name);
  1769. if (strncmp(options[i].name, p, n) == 0)
  1770. break;
  1771. }
  1772. if (i == NUM_ELEMENTS(options))
  1773. continue;
  1774. if (strncmp(p, "global_tag_depth", n) == 0) {
  1775. ahd_linux_setup_tag_info_global(p + n);
  1776. } else if (strncmp(p, "tag_info", n) == 0) {
  1777. s = aic_parse_brace_option("tag_info", p + n, end,
  1778. 2, ahd_linux_setup_tag_info, 0);
  1779. } else if (strncmp(p, "rd_strm", n) == 0) {
  1780. s = aic_parse_brace_option("rd_strm", p + n, end,
  1781. 1, ahd_linux_setup_rd_strm_info, 0);
  1782. } else if (strncmp(p, "dv", n) == 0) {
  1783. s = aic_parse_brace_option("dv", p + n, end, 1,
  1784. ahd_linux_setup_dv, 0);
  1785. } else if (strncmp(p, "slewrate", n) == 0) {
  1786. s = aic_parse_brace_option("slewrate",
  1787. p + n, end, 1, ahd_linux_setup_iocell_info,
  1788. AIC79XX_SLEWRATE_INDEX);
  1789. } else if (strncmp(p, "precomp", n) == 0) {
  1790. s = aic_parse_brace_option("precomp",
  1791. p + n, end, 1, ahd_linux_setup_iocell_info,
  1792. AIC79XX_PRECOMP_INDEX);
  1793. } else if (strncmp(p, "amplitude", n) == 0) {
  1794. s = aic_parse_brace_option("amplitude",
  1795. p + n, end, 1, ahd_linux_setup_iocell_info,
  1796. AIC79XX_AMPLITUDE_INDEX);
  1797. } else if (p[n] == ':') {
  1798. *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
  1799. } else if (!strncmp(p, "verbose", n)) {
  1800. *(options[i].flag) = 1;
  1801. } else {
  1802. *(options[i].flag) ^= 0xFFFFFFFF;
  1803. }
  1804. }
  1805. return 1;
  1806. }
  1807. __setup("aic79xx=", aic79xx_setup);
  1808. uint32_t aic79xx_verbose;
  1809. int
  1810. ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
  1811. {
  1812. char buf[80];
  1813. struct Scsi_Host *host;
  1814. char *new_name;
  1815. u_long s;
  1816. u_long target;
  1817. template->name = ahd->description;
  1818. host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
  1819. if (host == NULL)
  1820. return (ENOMEM);
  1821. *((struct ahd_softc **)host->hostdata) = ahd;
  1822. ahd_lock(ahd, &s);
  1823. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1824. scsi_assign_lock(host, &ahd->platform_data->spin_lock);
  1825. #elif AHD_SCSI_HAS_HOST_LOCK != 0
  1826. host->lock = &ahd->platform_data->spin_lock;
  1827. #endif
  1828. ahd->platform_data->host = host;
  1829. host->can_queue = AHD_MAX_QUEUE;
  1830. host->cmd_per_lun = 2;
  1831. host->sg_tablesize = AHD_NSEG;
  1832. host->this_id = ahd->our_id;
  1833. host->irq = ahd->platform_data->irq;
  1834. host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
  1835. host->max_lun = AHD_NUM_LUNS;
  1836. host->max_channel = 0;
  1837. host->sg_tablesize = AHD_NSEG;
  1838. ahd_set_unit(ahd, ahd_linux_next_unit());
  1839. sprintf(buf, "scsi%d", host->host_no);
  1840. new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
  1841. if (new_name != NULL) {
  1842. strcpy(new_name, buf);
  1843. ahd_set_name(ahd, new_name);
  1844. }
  1845. host->unique_id = ahd->unit;
  1846. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  1847. scsi_set_pci_device(host, ahd->dev_softc);
  1848. #endif
  1849. ahd_linux_setup_user_rd_strm_settings(ahd);
  1850. ahd_linux_initialize_scsi_bus(ahd);
  1851. ahd_unlock(ahd, &s);
  1852. ahd->platform_data->dv_pid = kernel_thread(ahd_linux_dv_thread, ahd, 0);
  1853. ahd_lock(ahd, &s);
  1854. if (ahd->platform_data->dv_pid < 0) {
  1855. printf("%s: Failed to create DV thread, error= %d\n",
  1856. ahd_name(ahd), ahd->platform_data->dv_pid);
  1857. return (-ahd->platform_data->dv_pid);
  1858. }
  1859. /*
  1860. * Initially allocate *all* of our linux target objects
  1861. * so that the DV thread will scan them all in parallel
  1862. * just after driver initialization. Any device that
  1863. * does not exist will have its target object destroyed
  1864. * by the selection timeout handler. In the case of a
  1865. * device that appears after the initial DV scan, async
  1866. * negotiation will occur for the first command, and DV
  1867. * will comence should that first command be successful.
  1868. */
  1869. for (target = 0; target < host->max_id; target++) {
  1870. /*
  1871. * Skip our own ID. Some Compaq/HP storage devices
  1872. * have enclosure management devices that respond to
  1873. * single bit selection (i.e. selecting ourselves).
  1874. * It is expected that either an external application
  1875. * or a modified kernel will be used to probe this
  1876. * ID if it is appropriate. To accommodate these
  1877. * installations, ahc_linux_alloc_target() will allocate
  1878. * for our ID if asked to do so.
  1879. */
  1880. if (target == ahd->our_id)
  1881. continue;
  1882. ahd_linux_alloc_target(ahd, 0, target);
  1883. }
  1884. ahd_intr_enable(ahd, TRUE);
  1885. ahd_linux_start_dv(ahd);
  1886. ahd_unlock(ahd, &s);
  1887. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  1888. scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
  1889. scsi_scan_host(host);
  1890. #endif
  1891. return (0);
  1892. }
  1893. uint64_t
  1894. ahd_linux_get_memsize(void)
  1895. {
  1896. struct sysinfo si;
  1897. si_meminfo(&si);
  1898. return ((uint64_t)si.totalram << PAGE_SHIFT);
  1899. }
  1900. /*
  1901. * Find the smallest available unit number to use
  1902. * for a new device. We don't just use a static
  1903. * count to handle the "repeated hot-(un)plug"
  1904. * scenario.
  1905. */
  1906. static int
  1907. ahd_linux_next_unit(void)
  1908. {
  1909. struct ahd_softc *ahd;
  1910. int unit;
  1911. unit = 0;
  1912. retry:
  1913. TAILQ_FOREACH(ahd, &ahd_tailq, links) {
  1914. if (ahd->unit == unit) {
  1915. unit++;
  1916. goto retry;
  1917. }
  1918. }
  1919. return (unit);
  1920. }
  1921. /*
  1922. * Place the SCSI bus into a known state by either resetting it,
  1923. * or forcing transfer negotiations on the next command to any
  1924. * target.
  1925. */
  1926. static void
  1927. ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
  1928. {
  1929. u_int target_id;
  1930. u_int numtarg;
  1931. target_id = 0;
  1932. numtarg = 0;
  1933. if (aic79xx_no_reset != 0)
  1934. ahd->flags &= ~AHD_RESET_BUS_A;
  1935. if ((ahd->flags & AHD_RESET_BUS_A) != 0)
  1936. ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
  1937. else
  1938. numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
  1939. /*
  1940. * Force negotiation to async for all targets that
  1941. * will not see an initial bus reset.
  1942. */
  1943. for (; target_id < numtarg; target_id++) {
  1944. struct ahd_devinfo devinfo;
  1945. struct ahd_initiator_tinfo *tinfo;
  1946. struct ahd_tmode_tstate *tstate;
  1947. tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
  1948. target_id, &tstate);
  1949. ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
  1950. CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
  1951. ahd_update_neg_request(ahd, &devinfo, tstate,
  1952. tinfo, AHD_NEG_ALWAYS);
  1953. }
  1954. /* Give the bus some time to recover */
  1955. if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
  1956. ahd_freeze_simq(ahd);
  1957. init_timer(&ahd->platform_data->reset_timer);
  1958. ahd->platform_data->reset_timer.data = (u_long)ahd;
  1959. ahd->platform_data->reset_timer.expires =
  1960. jiffies + (AIC79XX_RESET_DELAY * HZ)/1000;
  1961. ahd->platform_data->reset_timer.function =
  1962. (ahd_linux_callback_t *)ahd_release_simq;
  1963. add_timer(&ahd->platform_data->reset_timer);
  1964. }
  1965. }
  1966. int
  1967. ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
  1968. {
  1969. ahd->platform_data =
  1970. malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT);
  1971. if (ahd->platform_data == NULL)
  1972. return (ENOMEM);
  1973. memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
  1974. TAILQ_INIT(&ahd->platform_data->completeq);
  1975. TAILQ_INIT(&ahd->platform_data->device_runq);
  1976. ahd->platform_data->irq = AHD_LINUX_NOIRQ;
  1977. ahd->platform_data->hw_dma_mask = 0xFFFFFFFF;
  1978. ahd_lockinit(ahd);
  1979. ahd_done_lockinit(ahd);
  1980. init_timer(&ahd->platform_data->completeq_timer);
  1981. ahd->platform_data->completeq_timer.data = (u_long)ahd;
  1982. ahd->platform_data->completeq_timer.function =
  1983. (ahd_linux_callback_t *)ahd_linux_thread_run_complete_queue;
  1984. init_MUTEX_LOCKED(&ahd->platform_data->eh_sem);
  1985. init_MUTEX_LOCKED(&ahd->platform_data->dv_sem);
  1986. init_MUTEX_LOCKED(&ahd->platform_data->dv_cmd_sem);
  1987. ahd_setup_runq_tasklet(ahd);
  1988. ahd->seltime = (aic79xx_seltime & 0x3) << 4;
  1989. return (0);
  1990. }
  1991. void
  1992. ahd_platform_free(struct ahd_softc *ahd)
  1993. {
  1994. struct ahd_linux_target *targ;
  1995. struct ahd_linux_device *dev;
  1996. int i, j;
  1997. if (ahd->platform_data != NULL) {
  1998. del_timer_sync(&ahd->platform_data->completeq_timer);
  1999. ahd_linux_kill_dv_thread(ahd);
  2000. ahd_teardown_runq_tasklet(ahd);
  2001. if (ahd->platform_data->host != NULL) {
  2002. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  2003. scsi_remove_host(ahd->platform_data->host);
  2004. #endif
  2005. scsi_host_put(ahd->platform_data->host);
  2006. }
  2007. /* destroy all of the device and target objects */
  2008. for (i = 0; i < AHD_NUM_TARGETS; i++) {
  2009. targ = ahd->platform_data->targets[i];
  2010. if (targ != NULL) {
  2011. /* Keep target around through the loop. */
  2012. targ->refcount++;
  2013. for (j = 0; j < AHD_NUM_LUNS; j++) {
  2014. if (targ->devices[j] == NULL)
  2015. continue;
  2016. dev = targ->devices[j];
  2017. ahd_linux_free_device(ahd, dev);
  2018. }
  2019. /*
  2020. * Forcibly free the target now that
  2021. * all devices are gone.
  2022. */
  2023. ahd_linux_free_target(ahd, targ);
  2024. }
  2025. }
  2026. if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
  2027. free_irq(ahd->platform_data->irq, ahd);
  2028. if (ahd->tags[0] == BUS_SPACE_PIO
  2029. && ahd->bshs[0].ioport != 0)
  2030. release_region(ahd->bshs[0].ioport, 256);
  2031. if (ahd->tags[1] == BUS_SPACE_PIO
  2032. && ahd->bshs[1].ioport != 0)
  2033. release_region(ahd->bshs[1].ioport, 256);
  2034. if (ahd->tags[0] == BUS_SPACE_MEMIO
  2035. && ahd->bshs[0].maddr != NULL) {
  2036. iounmap(ahd->bshs[0].maddr);
  2037. release_mem_region(ahd->platform_data->mem_busaddr,
  2038. 0x1000);
  2039. }
  2040. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  2041. /*
  2042. * In 2.4 we detach from the scsi midlayer before the PCI
  2043. * layer invokes our remove callback. No per-instance
  2044. * detach is provided, so we must reach inside the PCI
  2045. * subsystem's internals and detach our driver manually.
  2046. */
  2047. if (ahd->dev_softc != NULL)
  2048. ahd->dev_softc->driver = NULL;
  2049. #endif
  2050. free(ahd->platform_data, M_DEVBUF);
  2051. }
  2052. }
  2053. void
  2054. ahd_platform_init(struct ahd_softc *ahd)
  2055. {
  2056. /*
  2057. * Lookup and commit any modified IO Cell options.
  2058. */
  2059. if (ahd->unit < NUM_ELEMENTS(aic79xx_iocell_info)) {
  2060. struct ahd_linux_iocell_opts *iocell_opts;
  2061. iocell_opts = &aic79xx_iocell_info[ahd->unit];
  2062. if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
  2063. AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
  2064. if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
  2065. AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
  2066. if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
  2067. AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
  2068. }
  2069. }
  2070. void
  2071. ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
  2072. {
  2073. ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
  2074. SCB_GET_CHANNEL(ahd, scb),
  2075. SCB_GET_LUN(scb), SCB_LIST_NULL,
  2076. ROLE_UNKNOWN, CAM_REQUEUE_REQ);
  2077. }
  2078. void
  2079. ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
  2080. ahd_queue_alg alg)
  2081. {
  2082. struct ahd_linux_device *dev;
  2083. int was_queuing;
  2084. int now_queuing;
  2085. dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
  2086. devinfo->target,
  2087. devinfo->lun, /*alloc*/FALSE);
  2088. if (dev == NULL)
  2089. return;
  2090. was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
  2091. switch (alg) {
  2092. default:
  2093. case AHD_QUEUE_NONE:
  2094. now_queuing = 0;
  2095. break;
  2096. case AHD_QUEUE_BASIC:
  2097. now_queuing = AHD_DEV_Q_BASIC;
  2098. break;
  2099. case AHD_QUEUE_TAGGED:
  2100. now_queuing = AHD_DEV_Q_TAGGED;
  2101. break;
  2102. }
  2103. if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
  2104. && (was_queuing != now_queuing)
  2105. && (dev->active != 0)) {
  2106. dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
  2107. dev->qfrozen++;
  2108. }
  2109. dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
  2110. if (now_queuing) {
  2111. u_int usertags;
  2112. usertags = ahd_linux_user_tagdepth(ahd, devinfo);
  2113. if (!was_queuing) {
  2114. /*
  2115. * Start out agressively and allow our
  2116. * dynamic queue depth algorithm to take
  2117. * care of the rest.
  2118. */
  2119. dev->maxtags = usertags;
  2120. dev->openings = dev->maxtags - dev->active;
  2121. }
  2122. if (dev->maxtags == 0) {
  2123. /*
  2124. * Queueing is disabled by the user.
  2125. */
  2126. dev->openings = 1;
  2127. } else if (alg == AHD_QUEUE_TAGGED) {
  2128. dev->flags |= AHD_DEV_Q_TAGGED;
  2129. if (aic79xx_periodic_otag != 0)
  2130. dev->flags |= AHD_DEV_PERIODIC_OTAG;
  2131. } else
  2132. dev->flags |= AHD_DEV_Q_BASIC;
  2133. } else {
  2134. /* We can only have one opening. */
  2135. dev->maxtags = 0;
  2136. dev->openings = 1 - dev->active;
  2137. }
  2138. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  2139. if (dev->scsi_device != NULL) {
  2140. switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
  2141. case AHD_DEV_Q_BASIC:
  2142. scsi_adjust_queue_depth(dev->scsi_device,
  2143. MSG_SIMPLE_TASK,
  2144. dev->openings + dev->active);
  2145. break;
  2146. case AHD_DEV_Q_TAGGED:
  2147. scsi_adjust_queue_depth(dev->scsi_device,
  2148. MSG_ORDERED_TASK,
  2149. dev->openings + dev->active);
  2150. break;
  2151. default:
  2152. /*
  2153. * We allow the OS to queue 2 untagged transactions to
  2154. * us at any time even though we can only execute them
  2155. * serially on the controller/device. This should
  2156. * remove some latency.
  2157. */
  2158. scsi_adjust_queue_depth(dev->scsi_device,
  2159. /*NON-TAGGED*/0,
  2160. /*queue depth*/2);
  2161. break;
  2162. }
  2163. }
  2164. #endif
  2165. }
  2166. int
  2167. ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
  2168. int lun, u_int tag, role_t role, uint32_t status)
  2169. {
  2170. int targ;
  2171. int maxtarg;
  2172. int maxlun;
  2173. int clun;
  2174. int count;
  2175. if (tag != SCB_LIST_NULL)
  2176. return (0);
  2177. targ = 0;
  2178. if (target != CAM_TARGET_WILDCARD) {
  2179. targ = target;
  2180. maxtarg = targ + 1;
  2181. } else {
  2182. maxtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
  2183. }
  2184. clun = 0;
  2185. if (lun != CAM_LUN_WILDCARD) {
  2186. clun = lun;
  2187. maxlun = clun + 1;
  2188. } else {
  2189. maxlun = AHD_NUM_LUNS;
  2190. }
  2191. count = 0;
  2192. for (; targ < maxtarg; targ++) {
  2193. for (; clun < maxlun; clun++) {
  2194. struct ahd_linux_device *dev;
  2195. struct ahd_busyq *busyq;
  2196. struct ahd_cmd *acmd;
  2197. dev = ahd_linux_get_device(ahd, /*chan*/0, targ,
  2198. clun, /*alloc*/FALSE);
  2199. if (dev == NULL)
  2200. continue;
  2201. busyq = &dev->busyq;
  2202. while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
  2203. Scsi_Cmnd *cmd;
  2204. cmd = &acmd_scsi_cmd(acmd);
  2205. TAILQ_REMOVE(busyq, acmd,
  2206. acmd_links.tqe);
  2207. count++;
  2208. cmd->result = status << 16;
  2209. ahd_linux_queue_cmd_complete(ahd, cmd);
  2210. }
  2211. }
  2212. }
  2213. return (count);
  2214. }
  2215. static void
  2216. ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd)
  2217. {
  2218. u_long flags;
  2219. ahd_lock(ahd, &flags);
  2220. del_timer(&ahd->platform_data->completeq_timer);
  2221. ahd->platform_data->flags &= ~AHD_RUN_CMPLT_Q_TIMER;
  2222. ahd_linux_run_complete_queue(ahd);
  2223. ahd_unlock(ahd, &flags);
  2224. }
  2225. static void
  2226. ahd_linux_start_dv(struct ahd_softc *ahd)
  2227. {
  2228. /*
  2229. * Freeze the simq and signal ahd_linux_queue to not let any
  2230. * more commands through
  2231. */
  2232. if ((ahd->platform_data->flags & AHD_DV_ACTIVE) == 0) {
  2233. #ifdef AHD_DEBUG
  2234. if (ahd_debug & AHD_SHOW_DV)
  2235. printf("%s: Starting DV\n", ahd_name(ahd));
  2236. #endif
  2237. ahd->platform_data->flags |= AHD_DV_ACTIVE;
  2238. ahd_freeze_simq(ahd);
  2239. /* Wake up the DV kthread */
  2240. up(&ahd->platform_data->dv_sem);
  2241. }
  2242. }
  2243. static int
  2244. ahd_linux_dv_thread(void *data)
  2245. {
  2246. struct ahd_softc *ahd;
  2247. int target;
  2248. u_long s;
  2249. ahd = (struct ahd_softc *)data;
  2250. #ifdef AHD_DEBUG
  2251. if (ahd_debug & AHD_SHOW_DV)
  2252. printf("In DV Thread\n");
  2253. #endif
  2254. /*
  2255. * Complete thread creation.
  2256. */
  2257. lock_kernel();
  2258. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
  2259. /*
  2260. * Don't care about any signals.
  2261. */
  2262. siginitsetinv(&current->blocked, 0);
  2263. daemonize();
  2264. sprintf(current->comm, "ahd_dv_%d", ahd->unit);
  2265. #else
  2266. daemonize("ahd_dv_%d", ahd->unit);
  2267. current->flags |= PF_NOFREEZE;
  2268. #endif
  2269. unlock_kernel();
  2270. while (1) {
  2271. /*
  2272. * Use down_interruptible() rather than down() to
  2273. * avoid inclusion in the load average.
  2274. */
  2275. down_interruptible(&ahd->platform_data->dv_sem);
  2276. /* Check to see if we've been signaled to exit */
  2277. ahd_lock(ahd, &s);
  2278. if ((ahd->platform_data->flags & AHD_DV_SHUTDOWN) != 0) {
  2279. ahd_unlock(ahd, &s);
  2280. break;
  2281. }
  2282. ahd_unlock(ahd, &s);
  2283. #ifdef AHD_DEBUG
  2284. if (ahd_debug & AHD_SHOW_DV)
  2285. printf("%s: Beginning Domain Validation\n",
  2286. ahd_name(ahd));
  2287. #endif
  2288. /*
  2289. * Wait for any pending commands to drain before proceeding.
  2290. */
  2291. ahd_lock(ahd, &s);
  2292. while (LIST_FIRST(&ahd->pending_scbs) != NULL) {
  2293. ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_EMPTY;
  2294. ahd_unlock(ahd, &s);
  2295. down_interruptible(&ahd->platform_data->dv_sem);
  2296. ahd_lock(ahd, &s);
  2297. }
  2298. /*
  2299. * Wait for the SIMQ to be released so that DV is the
  2300. * only reason the queue is frozen.
  2301. */
  2302. while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
  2303. ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
  2304. ahd_unlock(ahd, &s);
  2305. down_interruptible(&ahd->platform_data->dv_sem);
  2306. ahd_lock(ahd, &s);
  2307. }
  2308. ahd_unlock(ahd, &s);
  2309. for (target = 0; target < AHD_NUM_TARGETS; target++)
  2310. ahd_linux_dv_target(ahd, target);
  2311. ahd_lock(ahd, &s);
  2312. ahd->platform_data->flags &= ~AHD_DV_ACTIVE;
  2313. ahd_unlock(ahd, &s);
  2314. /*
  2315. * Release the SIMQ so that normal commands are
  2316. * allowed to continue on the bus.
  2317. */
  2318. ahd_release_simq(ahd);
  2319. }
  2320. up(&ahd->platform_data->eh_sem);
  2321. return (0);
  2322. }
  2323. static void
  2324. ahd_linux_kill_dv_thread(struct ahd_softc *ahd)
  2325. {
  2326. u_long s;
  2327. ahd_lock(ahd, &s);
  2328. if (ahd->platform_data->dv_pid != 0) {
  2329. ahd->platform_data->flags |= AHD_DV_SHUTDOWN;
  2330. ahd_unlock(ahd, &s);
  2331. up(&ahd->platform_data->dv_sem);
  2332. /*
  2333. * Use the eh_sem as an indicator that the
  2334. * dv thread is exiting. Note that the dv
  2335. * thread must still return after performing
  2336. * the up on our semaphore before it has
  2337. * completely exited this module. Unfortunately,
  2338. * there seems to be no easy way to wait for the
  2339. * exit of a thread for which you are not the
  2340. * parent (dv threads are parented by init).
  2341. * Cross your fingers...
  2342. */
  2343. down(&ahd->platform_data->eh_sem);
  2344. /*
  2345. * Mark the dv thread as already dead. This
  2346. * avoids attempting to kill it a second time.
  2347. * This is necessary because we must kill the
  2348. * DV thread before calling ahd_free() in the
  2349. * module shutdown case to avoid bogus locking
  2350. * in the SCSI mid-layer, but we ahd_free() is
  2351. * called without killing the DV thread in the
  2352. * instance detach case, so ahd_platform_free()
  2353. * calls us again to verify that the DV thread
  2354. * is dead.
  2355. */
  2356. ahd->platform_data->dv_pid = 0;
  2357. } else {
  2358. ahd_unlock(ahd, &s);
  2359. }
  2360. }
  2361. #define AHD_LINUX_DV_INQ_SHORT_LEN 36
  2362. #define AHD_LINUX_DV_INQ_LEN 256
  2363. #define AHD_LINUX_DV_TIMEOUT (HZ / 4)
  2364. #define AHD_SET_DV_STATE(ahd, targ, newstate) \
  2365. ahd_set_dv_state(ahd, targ, newstate, __LINE__)
  2366. static __inline void
  2367. ahd_set_dv_state(struct ahd_softc *ahd, struct ahd_linux_target *targ,
  2368. ahd_dv_state newstate, u_int line)
  2369. {
  2370. ahd_dv_state oldstate;
  2371. oldstate = targ->dv_state;
  2372. #ifdef AHD_DEBUG
  2373. if (ahd_debug & AHD_SHOW_DV)
  2374. printf("%s:%d: Going from state %d to state %d\n",
  2375. ahd_name(ahd), line, oldstate, newstate);
  2376. #endif
  2377. if (oldstate == newstate)
  2378. targ->dv_state_retry++;
  2379. else
  2380. targ->dv_state_retry = 0;
  2381. targ->dv_state = newstate;
  2382. }
  2383. static void
  2384. ahd_linux_dv_target(struct ahd_softc *ahd, u_int target_offset)
  2385. {
  2386. struct ahd_devinfo devinfo;
  2387. struct ahd_linux_target *targ;
  2388. struct scsi_cmnd *cmd;
  2389. struct scsi_device *scsi_dev;
  2390. struct scsi_sense_data *sense;
  2391. uint8_t *buffer;
  2392. u_long s;
  2393. u_int timeout;
  2394. int echo_size;
  2395. sense = NULL;
  2396. buffer = NULL;
  2397. echo_size = 0;
  2398. ahd_lock(ahd, &s);
  2399. targ = ahd->platform_data->targets[target_offset];
  2400. if (targ == NULL || (targ->flags & AHD_DV_REQUIRED) == 0) {
  2401. ahd_unlock(ahd, &s);
  2402. return;
  2403. }
  2404. ahd_compile_devinfo(&devinfo, ahd->our_id, targ->target, /*lun*/0,
  2405. targ->channel + 'A', ROLE_INITIATOR);
  2406. #ifdef AHD_DEBUG
  2407. if (ahd_debug & AHD_SHOW_DV) {
  2408. ahd_print_devinfo(ahd, &devinfo);
  2409. printf("Performing DV\n");
  2410. }
  2411. #endif
  2412. ahd_unlock(ahd, &s);
  2413. cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
  2414. scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
  2415. scsi_dev->host = ahd->platform_data->host;
  2416. scsi_dev->id = devinfo.target;
  2417. scsi_dev->lun = devinfo.lun;
  2418. scsi_dev->channel = devinfo.channel - 'A';
  2419. ahd->platform_data->dv_scsi_dev = scsi_dev;
  2420. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_SHORT_ASYNC);
  2421. while (targ->dv_state != AHD_DV_STATE_EXIT) {
  2422. timeout = AHD_LINUX_DV_TIMEOUT;
  2423. switch (targ->dv_state) {
  2424. case AHD_DV_STATE_INQ_SHORT_ASYNC:
  2425. case AHD_DV_STATE_INQ_ASYNC:
  2426. case AHD_DV_STATE_INQ_ASYNC_VERIFY:
  2427. /*
  2428. * Set things to async narrow to reduce the
  2429. * chance that the INQ will fail.
  2430. */
  2431. ahd_lock(ahd, &s);
  2432. ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
  2433. AHD_TRANS_GOAL, /*paused*/FALSE);
  2434. ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  2435. AHD_TRANS_GOAL, /*paused*/FALSE);
  2436. ahd_unlock(ahd, &s);
  2437. timeout = 10 * HZ;
  2438. targ->flags &= ~AHD_INQ_VALID;
  2439. /* FALLTHROUGH */
  2440. case AHD_DV_STATE_INQ_VERIFY:
  2441. {
  2442. u_int inq_len;
  2443. if (targ->dv_state == AHD_DV_STATE_INQ_SHORT_ASYNC)
  2444. inq_len = AHD_LINUX_DV_INQ_SHORT_LEN;
  2445. else
  2446. inq_len = targ->inq_data->additional_length + 5;
  2447. ahd_linux_dv_inq(ahd, cmd, &devinfo, targ, inq_len);
  2448. break;
  2449. }
  2450. case AHD_DV_STATE_TUR:
  2451. case AHD_DV_STATE_BUSY:
  2452. timeout = 5 * HZ;
  2453. ahd_linux_dv_tur(ahd, cmd, &devinfo);
  2454. break;
  2455. case AHD_DV_STATE_REBD:
  2456. ahd_linux_dv_rebd(ahd, cmd, &devinfo, targ);
  2457. break;
  2458. case AHD_DV_STATE_WEB:
  2459. ahd_linux_dv_web(ahd, cmd, &devinfo, targ);
  2460. break;
  2461. case AHD_DV_STATE_REB:
  2462. ahd_linux_dv_reb(ahd, cmd, &devinfo, targ);
  2463. break;
  2464. case AHD_DV_STATE_SU:
  2465. ahd_linux_dv_su(ahd, cmd, &devinfo, targ);
  2466. timeout = 50 * HZ;
  2467. break;
  2468. default:
  2469. ahd_print_devinfo(ahd, &devinfo);
  2470. printf("Unknown DV state %d\n", targ->dv_state);
  2471. goto out;
  2472. }
  2473. /* Queue the command and wait for it to complete */
  2474. /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
  2475. init_timer(&cmd->eh_timeout);
  2476. #ifdef AHD_DEBUG
  2477. if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
  2478. /*
  2479. * All of the printfs during negotiation
  2480. * really slow down the negotiation.
  2481. * Add a bit of time just to be safe.
  2482. */
  2483. timeout += HZ;
  2484. #endif
  2485. scsi_add_timer(cmd, timeout, ahd_linux_dv_timeout);
  2486. /*
  2487. * In 2.5.X, it is assumed that all calls from the
  2488. * "midlayer" (which we are emulating) will have the
  2489. * ahd host lock held. For other kernels, the
  2490. * io_request_lock must be held.
  2491. */
  2492. #if AHD_SCSI_HAS_HOST_LOCK != 0
  2493. ahd_lock(ahd, &s);
  2494. #else
  2495. spin_lock_irqsave(&io_request_lock, s);
  2496. #endif
  2497. ahd_linux_queue(cmd, ahd_linux_dv_complete);
  2498. #if AHD_SCSI_HAS_HOST_LOCK != 0
  2499. ahd_unlock(ahd, &s);
  2500. #else
  2501. spin_unlock_irqrestore(&io_request_lock, s);
  2502. #endif
  2503. down_interruptible(&ahd->platform_data->dv_cmd_sem);
  2504. /*
  2505. * Wait for the SIMQ to be released so that DV is the
  2506. * only reason the queue is frozen.
  2507. */
  2508. ahd_lock(ahd, &s);
  2509. while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
  2510. ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
  2511. ahd_unlock(ahd, &s);
  2512. down_interruptible(&ahd->platform_data->dv_sem);
  2513. ahd_lock(ahd, &s);
  2514. }
  2515. ahd_unlock(ahd, &s);
  2516. ahd_linux_dv_transition(ahd, cmd, &devinfo, targ);
  2517. }
  2518. out:
  2519. if ((targ->flags & AHD_INQ_VALID) != 0
  2520. && ahd_linux_get_device(ahd, devinfo.channel - 'A',
  2521. devinfo.target, devinfo.lun,
  2522. /*alloc*/FALSE) == NULL) {
  2523. /*
  2524. * The DV state machine failed to configure this device.
  2525. * This is normal if DV is disabled. Since we have inquiry
  2526. * data, filter it and use the "optimistic" negotiation
  2527. * parameters found in the inquiry string.
  2528. */
  2529. ahd_linux_filter_inquiry(ahd, &devinfo);
  2530. if ((targ->flags & (AHD_BASIC_DV|AHD_ENHANCED_DV)) != 0) {
  2531. ahd_print_devinfo(ahd, &devinfo);
  2532. printf("DV failed to configure device. "
  2533. "Please file a bug report against "
  2534. "this driver.\n");
  2535. }
  2536. }
  2537. if (cmd != NULL)
  2538. free(cmd, M_DEVBUF);
  2539. if (ahd->platform_data->dv_scsi_dev != NULL) {
  2540. free(ahd->platform_data->dv_scsi_dev, M_DEVBUF);
  2541. ahd->platform_data->dv_scsi_dev = NULL;
  2542. }
  2543. ahd_lock(ahd, &s);
  2544. if (targ->dv_buffer != NULL) {
  2545. free(targ->dv_buffer, M_DEVBUF);
  2546. targ->dv_buffer = NULL;
  2547. }
  2548. if (targ->dv_buffer1 != NULL) {
  2549. free(targ->dv_buffer1, M_DEVBUF);
  2550. targ->dv_buffer1 = NULL;
  2551. }
  2552. targ->flags &= ~AHD_DV_REQUIRED;
  2553. if (targ->refcount == 0)
  2554. ahd_linux_free_target(ahd, targ);
  2555. ahd_unlock(ahd, &s);
  2556. }
  2557. static __inline int
  2558. ahd_linux_dv_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
  2559. {
  2560. u_long s;
  2561. int retval;
  2562. ahd_lock(ahd, &s);
  2563. retval = ahd_linux_fallback(ahd, devinfo);
  2564. ahd_unlock(ahd, &s);
  2565. return (retval);
  2566. }
  2567. static void
  2568. ahd_linux_dv_transition(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  2569. struct ahd_devinfo *devinfo,
  2570. struct ahd_linux_target *targ)
  2571. {
  2572. u_int32_t status;
  2573. status = aic_error_action(cmd, targ->inq_data,
  2574. ahd_cmd_get_transaction_status(cmd),
  2575. ahd_cmd_get_scsi_status(cmd));
  2576. #ifdef AHD_DEBUG
  2577. if (ahd_debug & AHD_SHOW_DV) {
  2578. ahd_print_devinfo(ahd, devinfo);
  2579. printf("Entering ahd_linux_dv_transition, state= %d, "
  2580. "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
  2581. status, cmd->result);
  2582. }
  2583. #endif
  2584. switch (targ->dv_state) {
  2585. case AHD_DV_STATE_INQ_SHORT_ASYNC:
  2586. case AHD_DV_STATE_INQ_ASYNC:
  2587. switch (status & SS_MASK) {
  2588. case SS_NOP:
  2589. {
  2590. AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
  2591. break;
  2592. }
  2593. case SS_INQ_REFRESH:
  2594. AHD_SET_DV_STATE(ahd, targ,
  2595. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2596. break;
  2597. case SS_TUR:
  2598. case SS_RETRY:
  2599. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2600. if (ahd_cmd_get_transaction_status(cmd)
  2601. == CAM_REQUEUE_REQ)
  2602. targ->dv_state_retry--;
  2603. if ((status & SS_ERRMASK) == EBUSY)
  2604. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
  2605. if (targ->dv_state_retry < 10)
  2606. break;
  2607. /* FALLTHROUGH */
  2608. default:
  2609. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2610. #ifdef AHD_DEBUG
  2611. if (ahd_debug & AHD_SHOW_DV) {
  2612. ahd_print_devinfo(ahd, devinfo);
  2613. printf("Failed DV inquiry, skipping\n");
  2614. }
  2615. #endif
  2616. break;
  2617. }
  2618. break;
  2619. case AHD_DV_STATE_INQ_ASYNC_VERIFY:
  2620. switch (status & SS_MASK) {
  2621. case SS_NOP:
  2622. {
  2623. u_int xportflags;
  2624. u_int spi3data;
  2625. if (memcmp(targ->inq_data, targ->dv_buffer,
  2626. AHD_LINUX_DV_INQ_LEN) != 0) {
  2627. /*
  2628. * Inquiry data must have changed.
  2629. * Try from the top again.
  2630. */
  2631. AHD_SET_DV_STATE(ahd, targ,
  2632. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2633. break;
  2634. }
  2635. AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
  2636. targ->flags |= AHD_INQ_VALID;
  2637. if (ahd_linux_user_dv_setting(ahd) == 0)
  2638. break;
  2639. xportflags = targ->inq_data->flags;
  2640. if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
  2641. break;
  2642. spi3data = targ->inq_data->spi3data;
  2643. switch (spi3data & SID_SPI_CLOCK_DT_ST) {
  2644. default:
  2645. case SID_SPI_CLOCK_ST:
  2646. /* Assume only basic DV is supported. */
  2647. targ->flags |= AHD_BASIC_DV;
  2648. break;
  2649. case SID_SPI_CLOCK_DT:
  2650. case SID_SPI_CLOCK_DT_ST:
  2651. targ->flags |= AHD_ENHANCED_DV;
  2652. break;
  2653. }
  2654. break;
  2655. }
  2656. case SS_INQ_REFRESH:
  2657. AHD_SET_DV_STATE(ahd, targ,
  2658. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2659. break;
  2660. case SS_TUR:
  2661. case SS_RETRY:
  2662. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2663. if (ahd_cmd_get_transaction_status(cmd)
  2664. == CAM_REQUEUE_REQ)
  2665. targ->dv_state_retry--;
  2666. if ((status & SS_ERRMASK) == EBUSY)
  2667. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
  2668. if (targ->dv_state_retry < 10)
  2669. break;
  2670. /* FALLTHROUGH */
  2671. default:
  2672. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2673. #ifdef AHD_DEBUG
  2674. if (ahd_debug & AHD_SHOW_DV) {
  2675. ahd_print_devinfo(ahd, devinfo);
  2676. printf("Failed DV inquiry, skipping\n");
  2677. }
  2678. #endif
  2679. break;
  2680. }
  2681. break;
  2682. case AHD_DV_STATE_INQ_VERIFY:
  2683. switch (status & SS_MASK) {
  2684. case SS_NOP:
  2685. {
  2686. if (memcmp(targ->inq_data, targ->dv_buffer,
  2687. AHD_LINUX_DV_INQ_LEN) == 0) {
  2688. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2689. break;
  2690. }
  2691. #ifdef AHD_DEBUG
  2692. if (ahd_debug & AHD_SHOW_DV) {
  2693. int i;
  2694. ahd_print_devinfo(ahd, devinfo);
  2695. printf("Inquiry buffer mismatch:");
  2696. for (i = 0; i < AHD_LINUX_DV_INQ_LEN; i++) {
  2697. if ((i & 0xF) == 0)
  2698. printf("\n ");
  2699. printf("0x%x:0x0%x ",
  2700. ((uint8_t *)targ->inq_data)[i],
  2701. targ->dv_buffer[i]);
  2702. }
  2703. printf("\n");
  2704. }
  2705. #endif
  2706. if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
  2707. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2708. break;
  2709. }
  2710. /*
  2711. * Do not count "falling back"
  2712. * against our retries.
  2713. */
  2714. targ->dv_state_retry = 0;
  2715. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2716. break;
  2717. }
  2718. case SS_INQ_REFRESH:
  2719. AHD_SET_DV_STATE(ahd, targ,
  2720. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2721. break;
  2722. case SS_TUR:
  2723. case SS_RETRY:
  2724. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2725. if (ahd_cmd_get_transaction_status(cmd)
  2726. == CAM_REQUEUE_REQ) {
  2727. targ->dv_state_retry--;
  2728. } else if ((status & SSQ_FALLBACK) != 0) {
  2729. if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
  2730. AHD_SET_DV_STATE(ahd, targ,
  2731. AHD_DV_STATE_EXIT);
  2732. break;
  2733. }
  2734. /*
  2735. * Do not count "falling back"
  2736. * against our retries.
  2737. */
  2738. targ->dv_state_retry = 0;
  2739. } else if ((status & SS_ERRMASK) == EBUSY)
  2740. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
  2741. if (targ->dv_state_retry < 10)
  2742. break;
  2743. /* FALLTHROUGH */
  2744. default:
  2745. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2746. #ifdef AHD_DEBUG
  2747. if (ahd_debug & AHD_SHOW_DV) {
  2748. ahd_print_devinfo(ahd, devinfo);
  2749. printf("Failed DV inquiry, skipping\n");
  2750. }
  2751. #endif
  2752. break;
  2753. }
  2754. break;
  2755. case AHD_DV_STATE_TUR:
  2756. switch (status & SS_MASK) {
  2757. case SS_NOP:
  2758. if ((targ->flags & AHD_BASIC_DV) != 0) {
  2759. ahd_linux_filter_inquiry(ahd, devinfo);
  2760. AHD_SET_DV_STATE(ahd, targ,
  2761. AHD_DV_STATE_INQ_VERIFY);
  2762. } else if ((targ->flags & AHD_ENHANCED_DV) != 0) {
  2763. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REBD);
  2764. } else {
  2765. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2766. }
  2767. break;
  2768. case SS_RETRY:
  2769. case SS_TUR:
  2770. if ((status & SS_ERRMASK) == EBUSY) {
  2771. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
  2772. break;
  2773. }
  2774. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2775. if (ahd_cmd_get_transaction_status(cmd)
  2776. == CAM_REQUEUE_REQ) {
  2777. targ->dv_state_retry--;
  2778. } else if ((status & SSQ_FALLBACK) != 0) {
  2779. if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
  2780. AHD_SET_DV_STATE(ahd, targ,
  2781. AHD_DV_STATE_EXIT);
  2782. break;
  2783. }
  2784. /*
  2785. * Do not count "falling back"
  2786. * against our retries.
  2787. */
  2788. targ->dv_state_retry = 0;
  2789. }
  2790. if (targ->dv_state_retry >= 10) {
  2791. #ifdef AHD_DEBUG
  2792. if (ahd_debug & AHD_SHOW_DV) {
  2793. ahd_print_devinfo(ahd, devinfo);
  2794. printf("DV TUR reties exhausted\n");
  2795. }
  2796. #endif
  2797. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2798. break;
  2799. }
  2800. if (status & SSQ_DELAY)
  2801. ssleep(1);
  2802. break;
  2803. case SS_START:
  2804. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_SU);
  2805. break;
  2806. case SS_INQ_REFRESH:
  2807. AHD_SET_DV_STATE(ahd, targ,
  2808. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2809. break;
  2810. default:
  2811. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2812. break;
  2813. }
  2814. break;
  2815. case AHD_DV_STATE_REBD:
  2816. switch (status & SS_MASK) {
  2817. case SS_NOP:
  2818. {
  2819. uint32_t echo_size;
  2820. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
  2821. echo_size = scsi_3btoul(&targ->dv_buffer[1]);
  2822. echo_size &= 0x1FFF;
  2823. #ifdef AHD_DEBUG
  2824. if (ahd_debug & AHD_SHOW_DV) {
  2825. ahd_print_devinfo(ahd, devinfo);
  2826. printf("Echo buffer size= %d\n", echo_size);
  2827. }
  2828. #endif
  2829. if (echo_size == 0) {
  2830. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2831. break;
  2832. }
  2833. /* Generate the buffer pattern */
  2834. targ->dv_echo_size = echo_size;
  2835. ahd_linux_generate_dv_pattern(targ);
  2836. /*
  2837. * Setup initial negotiation values.
  2838. */
  2839. ahd_linux_filter_inquiry(ahd, devinfo);
  2840. break;
  2841. }
  2842. case SS_INQ_REFRESH:
  2843. AHD_SET_DV_STATE(ahd, targ,
  2844. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2845. break;
  2846. case SS_RETRY:
  2847. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2848. if (ahd_cmd_get_transaction_status(cmd)
  2849. == CAM_REQUEUE_REQ)
  2850. targ->dv_state_retry--;
  2851. if (targ->dv_state_retry <= 10)
  2852. break;
  2853. #ifdef AHD_DEBUG
  2854. if (ahd_debug & AHD_SHOW_DV) {
  2855. ahd_print_devinfo(ahd, devinfo);
  2856. printf("DV REBD reties exhausted\n");
  2857. }
  2858. #endif
  2859. /* FALLTHROUGH */
  2860. case SS_FATAL:
  2861. default:
  2862. /*
  2863. * Setup initial negotiation values
  2864. * and try level 1 DV.
  2865. */
  2866. ahd_linux_filter_inquiry(ahd, devinfo);
  2867. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_VERIFY);
  2868. targ->dv_echo_size = 0;
  2869. break;
  2870. }
  2871. break;
  2872. case AHD_DV_STATE_WEB:
  2873. switch (status & SS_MASK) {
  2874. case SS_NOP:
  2875. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REB);
  2876. break;
  2877. case SS_INQ_REFRESH:
  2878. AHD_SET_DV_STATE(ahd, targ,
  2879. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2880. break;
  2881. case SS_RETRY:
  2882. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2883. if (ahd_cmd_get_transaction_status(cmd)
  2884. == CAM_REQUEUE_REQ) {
  2885. targ->dv_state_retry--;
  2886. } else if ((status & SSQ_FALLBACK) != 0) {
  2887. if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
  2888. AHD_SET_DV_STATE(ahd, targ,
  2889. AHD_DV_STATE_EXIT);
  2890. break;
  2891. }
  2892. /*
  2893. * Do not count "falling back"
  2894. * against our retries.
  2895. */
  2896. targ->dv_state_retry = 0;
  2897. }
  2898. if (targ->dv_state_retry <= 10)
  2899. break;
  2900. /* FALLTHROUGH */
  2901. #ifdef AHD_DEBUG
  2902. if (ahd_debug & AHD_SHOW_DV) {
  2903. ahd_print_devinfo(ahd, devinfo);
  2904. printf("DV WEB reties exhausted\n");
  2905. }
  2906. #endif
  2907. default:
  2908. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2909. break;
  2910. }
  2911. break;
  2912. case AHD_DV_STATE_REB:
  2913. switch (status & SS_MASK) {
  2914. case SS_NOP:
  2915. if (memcmp(targ->dv_buffer, targ->dv_buffer1,
  2916. targ->dv_echo_size) != 0) {
  2917. if (ahd_linux_dv_fallback(ahd, devinfo) != 0)
  2918. AHD_SET_DV_STATE(ahd, targ,
  2919. AHD_DV_STATE_EXIT);
  2920. else
  2921. AHD_SET_DV_STATE(ahd, targ,
  2922. AHD_DV_STATE_WEB);
  2923. break;
  2924. }
  2925. if (targ->dv_buffer != NULL) {
  2926. free(targ->dv_buffer, M_DEVBUF);
  2927. targ->dv_buffer = NULL;
  2928. }
  2929. if (targ->dv_buffer1 != NULL) {
  2930. free(targ->dv_buffer1, M_DEVBUF);
  2931. targ->dv_buffer1 = NULL;
  2932. }
  2933. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2934. break;
  2935. case SS_INQ_REFRESH:
  2936. AHD_SET_DV_STATE(ahd, targ,
  2937. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2938. break;
  2939. case SS_RETRY:
  2940. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2941. if (ahd_cmd_get_transaction_status(cmd)
  2942. == CAM_REQUEUE_REQ) {
  2943. targ->dv_state_retry--;
  2944. } else if ((status & SSQ_FALLBACK) != 0) {
  2945. if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
  2946. AHD_SET_DV_STATE(ahd, targ,
  2947. AHD_DV_STATE_EXIT);
  2948. break;
  2949. }
  2950. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
  2951. }
  2952. if (targ->dv_state_retry <= 10) {
  2953. if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
  2954. msleep(ahd->our_id*1000/10);
  2955. break;
  2956. }
  2957. #ifdef AHD_DEBUG
  2958. if (ahd_debug & AHD_SHOW_DV) {
  2959. ahd_print_devinfo(ahd, devinfo);
  2960. printf("DV REB reties exhausted\n");
  2961. }
  2962. #endif
  2963. /* FALLTHROUGH */
  2964. default:
  2965. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2966. break;
  2967. }
  2968. break;
  2969. case AHD_DV_STATE_SU:
  2970. switch (status & SS_MASK) {
  2971. case SS_NOP:
  2972. case SS_INQ_REFRESH:
  2973. AHD_SET_DV_STATE(ahd, targ,
  2974. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2975. break;
  2976. default:
  2977. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  2978. break;
  2979. }
  2980. break;
  2981. case AHD_DV_STATE_BUSY:
  2982. switch (status & SS_MASK) {
  2983. case SS_NOP:
  2984. case SS_INQ_REFRESH:
  2985. AHD_SET_DV_STATE(ahd, targ,
  2986. AHD_DV_STATE_INQ_SHORT_ASYNC);
  2987. break;
  2988. case SS_TUR:
  2989. case SS_RETRY:
  2990. AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
  2991. if (ahd_cmd_get_transaction_status(cmd)
  2992. == CAM_REQUEUE_REQ) {
  2993. targ->dv_state_retry--;
  2994. } else if (targ->dv_state_retry < 60) {
  2995. if ((status & SSQ_DELAY) != 0)
  2996. ssleep(1);
  2997. } else {
  2998. #ifdef AHD_DEBUG
  2999. if (ahd_debug & AHD_SHOW_DV) {
  3000. ahd_print_devinfo(ahd, devinfo);
  3001. printf("DV BUSY reties exhausted\n");
  3002. }
  3003. #endif
  3004. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  3005. }
  3006. break;
  3007. default:
  3008. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  3009. break;
  3010. }
  3011. break;
  3012. default:
  3013. printf("%s: Invalid DV completion state %d\n", ahd_name(ahd),
  3014. targ->dv_state);
  3015. AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
  3016. break;
  3017. }
  3018. }
  3019. static void
  3020. ahd_linux_dv_fill_cmd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3021. struct ahd_devinfo *devinfo)
  3022. {
  3023. memset(cmd, 0, sizeof(struct scsi_cmnd));
  3024. cmd->device = ahd->platform_data->dv_scsi_dev;
  3025. cmd->scsi_done = ahd_linux_dv_complete;
  3026. }
  3027. /*
  3028. * Synthesize an inquiry command. On the return trip, it'll be
  3029. * sniffed and the device transfer settings set for us.
  3030. */
  3031. static void
  3032. ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3033. struct ahd_devinfo *devinfo, struct ahd_linux_target *targ,
  3034. u_int request_length)
  3035. {
  3036. #ifdef AHD_DEBUG
  3037. if (ahd_debug & AHD_SHOW_DV) {
  3038. ahd_print_devinfo(ahd, devinfo);
  3039. printf("Sending INQ\n");
  3040. }
  3041. #endif
  3042. if (targ->inq_data == NULL)
  3043. targ->inq_data = malloc(AHD_LINUX_DV_INQ_LEN,
  3044. M_DEVBUF, M_WAITOK);
  3045. if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC) {
  3046. if (targ->dv_buffer != NULL)
  3047. free(targ->dv_buffer, M_DEVBUF);
  3048. targ->dv_buffer = malloc(AHD_LINUX_DV_INQ_LEN,
  3049. M_DEVBUF, M_WAITOK);
  3050. }
  3051. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3052. cmd->sc_data_direction = DMA_FROM_DEVICE;
  3053. cmd->cmd_len = 6;
  3054. cmd->cmnd[0] = INQUIRY;
  3055. cmd->cmnd[4] = request_length;
  3056. cmd->request_bufflen = request_length;
  3057. if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC)
  3058. cmd->request_buffer = targ->dv_buffer;
  3059. else
  3060. cmd->request_buffer = targ->inq_data;
  3061. memset(cmd->request_buffer, 0, AHD_LINUX_DV_INQ_LEN);
  3062. }
  3063. static void
  3064. ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3065. struct ahd_devinfo *devinfo)
  3066. {
  3067. #ifdef AHD_DEBUG
  3068. if (ahd_debug & AHD_SHOW_DV) {
  3069. ahd_print_devinfo(ahd, devinfo);
  3070. printf("Sending TUR\n");
  3071. }
  3072. #endif
  3073. /* Do a TUR to clear out any non-fatal transitional state */
  3074. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3075. cmd->sc_data_direction = DMA_NONE;
  3076. cmd->cmd_len = 6;
  3077. cmd->cmnd[0] = TEST_UNIT_READY;
  3078. }
  3079. #define AHD_REBD_LEN 4
  3080. static void
  3081. ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3082. struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
  3083. {
  3084. #ifdef AHD_DEBUG
  3085. if (ahd_debug & AHD_SHOW_DV) {
  3086. ahd_print_devinfo(ahd, devinfo);
  3087. printf("Sending REBD\n");
  3088. }
  3089. #endif
  3090. if (targ->dv_buffer != NULL)
  3091. free(targ->dv_buffer, M_DEVBUF);
  3092. targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
  3093. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3094. cmd->sc_data_direction = DMA_FROM_DEVICE;
  3095. cmd->cmd_len = 10;
  3096. cmd->cmnd[0] = READ_BUFFER;
  3097. cmd->cmnd[1] = 0x0b;
  3098. scsi_ulto3b(AHD_REBD_LEN, &cmd->cmnd[6]);
  3099. cmd->request_bufflen = AHD_REBD_LEN;
  3100. cmd->underflow = cmd->request_bufflen;
  3101. cmd->request_buffer = targ->dv_buffer;
  3102. }
  3103. static void
  3104. ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3105. struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
  3106. {
  3107. #ifdef AHD_DEBUG
  3108. if (ahd_debug & AHD_SHOW_DV) {
  3109. ahd_print_devinfo(ahd, devinfo);
  3110. printf("Sending WEB\n");
  3111. }
  3112. #endif
  3113. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3114. cmd->sc_data_direction = DMA_TO_DEVICE;
  3115. cmd->cmd_len = 10;
  3116. cmd->cmnd[0] = WRITE_BUFFER;
  3117. cmd->cmnd[1] = 0x0a;
  3118. scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
  3119. cmd->request_bufflen = targ->dv_echo_size;
  3120. cmd->underflow = cmd->request_bufflen;
  3121. cmd->request_buffer = targ->dv_buffer;
  3122. }
  3123. static void
  3124. ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3125. struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
  3126. {
  3127. #ifdef AHD_DEBUG
  3128. if (ahd_debug & AHD_SHOW_DV) {
  3129. ahd_print_devinfo(ahd, devinfo);
  3130. printf("Sending REB\n");
  3131. }
  3132. #endif
  3133. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3134. cmd->sc_data_direction = DMA_FROM_DEVICE;
  3135. cmd->cmd_len = 10;
  3136. cmd->cmnd[0] = READ_BUFFER;
  3137. cmd->cmnd[1] = 0x0a;
  3138. scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
  3139. cmd->request_bufflen = targ->dv_echo_size;
  3140. cmd->underflow = cmd->request_bufflen;
  3141. cmd->request_buffer = targ->dv_buffer1;
  3142. }
  3143. static void
  3144. ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
  3145. struct ahd_devinfo *devinfo,
  3146. struct ahd_linux_target *targ)
  3147. {
  3148. u_int le;
  3149. le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
  3150. #ifdef AHD_DEBUG
  3151. if (ahd_debug & AHD_SHOW_DV) {
  3152. ahd_print_devinfo(ahd, devinfo);
  3153. printf("Sending SU\n");
  3154. }
  3155. #endif
  3156. ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
  3157. cmd->sc_data_direction = DMA_NONE;
  3158. cmd->cmd_len = 6;
  3159. cmd->cmnd[0] = START_STOP_UNIT;
  3160. cmd->cmnd[4] = le | SSS_START;
  3161. }
  3162. static int
  3163. ahd_linux_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
  3164. {
  3165. struct ahd_linux_target *targ;
  3166. struct ahd_initiator_tinfo *tinfo;
  3167. struct ahd_transinfo *goal;
  3168. struct ahd_tmode_tstate *tstate;
  3169. u_int width;
  3170. u_int period;
  3171. u_int offset;
  3172. u_int ppr_options;
  3173. u_int cur_speed;
  3174. u_int wide_speed;
  3175. u_int narrow_speed;
  3176. u_int fallback_speed;
  3177. #ifdef AHD_DEBUG
  3178. if (ahd_debug & AHD_SHOW_DV) {
  3179. ahd_print_devinfo(ahd, devinfo);
  3180. printf("Trying to fallback\n");
  3181. }
  3182. #endif
  3183. targ = ahd->platform_data->targets[devinfo->target_offset];
  3184. tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
  3185. devinfo->our_scsiid,
  3186. devinfo->target, &tstate);
  3187. goal = &tinfo->goal;
  3188. width = goal->width;
  3189. period = goal->period;
  3190. offset = goal->offset;
  3191. ppr_options = goal->ppr_options;
  3192. if (offset == 0)
  3193. period = AHD_ASYNC_XFER_PERIOD;
  3194. if (targ->dv_next_narrow_period == 0)
  3195. targ->dv_next_narrow_period = MAX(period, AHD_SYNCRATE_ULTRA2);
  3196. if (targ->dv_next_wide_period == 0)
  3197. targ->dv_next_wide_period = period;
  3198. if (targ->dv_max_width == 0)
  3199. targ->dv_max_width = width;
  3200. if (targ->dv_max_ppr_options == 0)
  3201. targ->dv_max_ppr_options = ppr_options;
  3202. if (targ->dv_last_ppr_options == 0)
  3203. targ->dv_last_ppr_options = ppr_options;
  3204. cur_speed = aic_calc_speed(width, period, offset, AHD_SYNCRATE_MIN);
  3205. wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
  3206. targ->dv_next_wide_period,
  3207. MAX_OFFSET, AHD_SYNCRATE_MIN);
  3208. narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
  3209. targ->dv_next_narrow_period,
  3210. MAX_OFFSET, AHD_SYNCRATE_MIN);
  3211. fallback_speed = aic_calc_speed(width, period+1, offset,
  3212. AHD_SYNCRATE_MIN);
  3213. #ifdef AHD_DEBUG
  3214. if (ahd_debug & AHD_SHOW_DV) {
  3215. printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
  3216. "fallback_speed= %d\n", cur_speed, wide_speed,
  3217. narrow_speed, fallback_speed);
  3218. }
  3219. #endif
  3220. if (cur_speed > 160000) {
  3221. /*
  3222. * Paced/DT/IU_REQ only transfer speeds. All we
  3223. * can do is fallback in terms of syncrate.
  3224. */
  3225. period++;
  3226. } else if (cur_speed > 80000) {
  3227. if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  3228. /*
  3229. * Try without IU_REQ as it may be confusing
  3230. * an expander.
  3231. */
  3232. ppr_options &= ~MSG_EXT_PPR_IU_REQ;
  3233. } else {
  3234. /*
  3235. * Paced/DT only transfer speeds. All we
  3236. * can do is fallback in terms of syncrate.
  3237. */
  3238. period++;
  3239. ppr_options = targ->dv_max_ppr_options;
  3240. }
  3241. } else if (cur_speed > 3300) {
  3242. /*
  3243. * In this range we the following
  3244. * options ordered from highest to
  3245. * lowest desireability:
  3246. *
  3247. * o Wide/DT
  3248. * o Wide/non-DT
  3249. * o Narrow at a potentally higher sync rate.
  3250. *
  3251. * All modes are tested with and without IU_REQ
  3252. * set since using IUs may confuse an expander.
  3253. */
  3254. if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  3255. ppr_options &= ~MSG_EXT_PPR_IU_REQ;
  3256. } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
  3257. /*
  3258. * Try going non-DT.
  3259. */
  3260. ppr_options = targ->dv_max_ppr_options;
  3261. ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  3262. } else if (targ->dv_last_ppr_options != 0) {
  3263. /*
  3264. * Try without QAS or any other PPR options.
  3265. * We may need a non-PPR message to work with
  3266. * an expander. We look at the "last PPR options"
  3267. * so we will perform this fallback even if the
  3268. * target responded to our PPR negotiation with
  3269. * no option bits set.
  3270. */
  3271. ppr_options = 0;
  3272. } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
  3273. /*
  3274. * If the next narrow speed is greater than
  3275. * the next wide speed, fallback to narrow.
  3276. * Otherwise fallback to the next DT/Wide setting.
  3277. * The narrow async speed will always be smaller
  3278. * than the wide async speed, so handle this case
  3279. * specifically.
  3280. */
  3281. ppr_options = targ->dv_max_ppr_options;
  3282. if (narrow_speed > fallback_speed
  3283. || period >= AHD_ASYNC_XFER_PERIOD) {
  3284. targ->dv_next_wide_period = period+1;
  3285. width = MSG_EXT_WDTR_BUS_8_BIT;
  3286. period = targ->dv_next_narrow_period;
  3287. } else {
  3288. period++;
  3289. }
  3290. } else if ((ahd->features & AHD_WIDE) != 0
  3291. && targ->dv_max_width != 0
  3292. && wide_speed >= fallback_speed
  3293. && (targ->dv_next_wide_period <= AHD_ASYNC_XFER_PERIOD
  3294. || period >= AHD_ASYNC_XFER_PERIOD)) {
  3295. /*
  3296. * We are narrow. Try falling back
  3297. * to the next wide speed with
  3298. * all supported ppr options set.
  3299. */
  3300. targ->dv_next_narrow_period = period+1;
  3301. width = MSG_EXT_WDTR_BUS_16_BIT;
  3302. period = targ->dv_next_wide_period;
  3303. ppr_options = targ->dv_max_ppr_options;
  3304. } else {
  3305. /* Only narrow fallback is allowed. */
  3306. period++;
  3307. ppr_options = targ->dv_max_ppr_options;
  3308. }
  3309. } else {
  3310. return (-1);
  3311. }
  3312. offset = MAX_OFFSET;
  3313. ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_PACED);
  3314. ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, FALSE);
  3315. if (period == 0) {
  3316. period = 0;
  3317. offset = 0;
  3318. ppr_options = 0;
  3319. if (width == MSG_EXT_WDTR_BUS_8_BIT)
  3320. targ->dv_next_narrow_period = AHD_ASYNC_XFER_PERIOD;
  3321. else
  3322. targ->dv_next_wide_period = AHD_ASYNC_XFER_PERIOD;
  3323. }
  3324. ahd_set_syncrate(ahd, devinfo, period, offset,
  3325. ppr_options, AHD_TRANS_GOAL, FALSE);
  3326. targ->dv_last_ppr_options = ppr_options;
  3327. return (0);
  3328. }
  3329. static void
  3330. ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
  3331. {
  3332. struct ahd_softc *ahd;
  3333. struct scb *scb;
  3334. u_long flags;
  3335. ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
  3336. ahd_lock(ahd, &flags);
  3337. #ifdef AHD_DEBUG
  3338. if (ahd_debug & AHD_SHOW_DV) {
  3339. printf("%s: Timeout while doing DV command %x.\n",
  3340. ahd_name(ahd), cmd->cmnd[0]);
  3341. ahd_dump_card_state(ahd);
  3342. }
  3343. #endif
  3344. /*
  3345. * Guard against "done race". No action is
  3346. * required if we just completed.
  3347. */
  3348. if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
  3349. ahd_unlock(ahd, &flags);
  3350. return;
  3351. }
  3352. /*
  3353. * Command has not completed. Mark this
  3354. * SCB as having failing status prior to
  3355. * resetting the bus, so we get the correct
  3356. * error code.
  3357. */
  3358. if ((scb->flags & SCB_SENSE) != 0)
  3359. ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
  3360. else
  3361. ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
  3362. ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
  3363. /*
  3364. * Add a minimal bus settle delay for devices that are slow to
  3365. * respond after bus resets.
  3366. */
  3367. ahd_freeze_simq(ahd);
  3368. init_timer(&ahd->platform_data->reset_timer);
  3369. ahd->platform_data->reset_timer.data = (u_long)ahd;
  3370. ahd->platform_data->reset_timer.expires = jiffies + HZ / 2;
  3371. ahd->platform_data->reset_timer.function =
  3372. (ahd_linux_callback_t *)ahd_release_simq;
  3373. add_timer(&ahd->platform_data->reset_timer);
  3374. if (ahd_linux_next_device_to_run(ahd) != NULL)
  3375. ahd_schedule_runq(ahd);
  3376. ahd_linux_run_complete_queue(ahd);
  3377. ahd_unlock(ahd, &flags);
  3378. }
  3379. static void
  3380. ahd_linux_dv_complete(struct scsi_cmnd *cmd)
  3381. {
  3382. struct ahd_softc *ahd;
  3383. ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
  3384. /* Delete the DV timer before it goes off! */
  3385. scsi_delete_timer(cmd);
  3386. #ifdef AHD_DEBUG
  3387. if (ahd_debug & AHD_SHOW_DV)
  3388. printf("%s:%c:%d: Command completed, status= 0x%x\n",
  3389. ahd_name(ahd), cmd->device->channel, cmd->device->id,
  3390. cmd->result);
  3391. #endif
  3392. /* Wake up the state machine */
  3393. up(&ahd->platform_data->dv_cmd_sem);
  3394. }
  3395. static void
  3396. ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ)
  3397. {
  3398. uint16_t b;
  3399. u_int i;
  3400. u_int j;
  3401. if (targ->dv_buffer != NULL)
  3402. free(targ->dv_buffer, M_DEVBUF);
  3403. targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
  3404. if (targ->dv_buffer1 != NULL)
  3405. free(targ->dv_buffer1, M_DEVBUF);
  3406. targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
  3407. i = 0;
  3408. b = 0x0001;
  3409. for (j = 0 ; i < targ->dv_echo_size; j++) {
  3410. if (j < 32) {
  3411. /*
  3412. * 32bytes of sequential numbers.
  3413. */
  3414. targ->dv_buffer[i++] = j & 0xff;
  3415. } else if (j < 48) {
  3416. /*
  3417. * 32bytes of repeating 0x0000, 0xffff.
  3418. */
  3419. targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
  3420. } else if (j < 64) {
  3421. /*
  3422. * 32bytes of repeating 0x5555, 0xaaaa.
  3423. */
  3424. targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
  3425. } else {
  3426. /*
  3427. * Remaining buffer is filled with a repeating
  3428. * patter of:
  3429. *
  3430. * 0xffff
  3431. * ~0x0001 << shifted once in each loop.
  3432. */
  3433. if (j & 0x02) {
  3434. if (j & 0x01) {
  3435. targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
  3436. b <<= 1;
  3437. if (b == 0x0000)
  3438. b = 0x0001;
  3439. } else {
  3440. targ->dv_buffer[i++] = (~b & 0xff);
  3441. }
  3442. } else {
  3443. targ->dv_buffer[i++] = 0xff;
  3444. }
  3445. }
  3446. }
  3447. }
  3448. static u_int
  3449. ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
  3450. {
  3451. static int warned_user;
  3452. u_int tags;
  3453. tags = 0;
  3454. if ((ahd->user_discenable & devinfo->target_mask) != 0) {
  3455. if (ahd->unit >= NUM_ELEMENTS(aic79xx_tag_info)) {
  3456. if (warned_user == 0) {
  3457. printf(KERN_WARNING
  3458. "aic79xx: WARNING: Insufficient tag_info instances\n"
  3459. "aic79xx: for installed controllers. Using defaults\n"
  3460. "aic79xx: Please update the aic79xx_tag_info array in\n"
  3461. "aic79xx: the aic79xx_osm.c source file.\n");
  3462. warned_user++;
  3463. }
  3464. tags = AHD_MAX_QUEUE;
  3465. } else {
  3466. adapter_tag_info_t *tag_info;
  3467. tag_info = &aic79xx_tag_info[ahd->unit];
  3468. tags = tag_info->tag_commands[devinfo->target_offset];
  3469. if (tags > AHD_MAX_QUEUE)
  3470. tags = AHD_MAX_QUEUE;
  3471. }
  3472. }
  3473. return (tags);
  3474. }
  3475. static u_int
  3476. ahd_linux_user_dv_setting(struct ahd_softc *ahd)
  3477. {
  3478. static int warned_user;
  3479. int dv;
  3480. if (ahd->unit >= NUM_ELEMENTS(aic79xx_dv_settings)) {
  3481. if (warned_user == 0) {
  3482. printf(KERN_WARNING
  3483. "aic79xx: WARNING: Insufficient dv settings instances\n"
  3484. "aic79xx: for installed controllers. Using defaults\n"
  3485. "aic79xx: Please update the aic79xx_dv_settings array in"
  3486. "aic79xx: the aic79xx_osm.c source file.\n");
  3487. warned_user++;
  3488. }
  3489. dv = -1;
  3490. } else {
  3491. dv = aic79xx_dv_settings[ahd->unit];
  3492. }
  3493. if (dv < 0) {
  3494. /*
  3495. * Apply the default.
  3496. */
  3497. dv = 1;
  3498. if (ahd->seep_config != 0)
  3499. dv = (ahd->seep_config->bios_control & CFENABLEDV);
  3500. }
  3501. return (dv);
  3502. }
  3503. static void
  3504. ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd)
  3505. {
  3506. static int warned_user;
  3507. u_int rd_strm_mask;
  3508. u_int target_id;
  3509. /*
  3510. * If we have specific read streaming info for this controller,
  3511. * apply it. Otherwise use the defaults.
  3512. */
  3513. if (ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {
  3514. if (warned_user == 0) {
  3515. printf(KERN_WARNING
  3516. "aic79xx: WARNING: Insufficient rd_strm instances\n"
  3517. "aic79xx: for installed controllers. Using defaults\n"
  3518. "aic79xx: Please update the aic79xx_rd_strm_info array\n"
  3519. "aic79xx: in the aic79xx_osm.c source file.\n");
  3520. warned_user++;
  3521. }
  3522. rd_strm_mask = AIC79XX_CONFIGED_RD_STRM;
  3523. } else {
  3524. rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];
  3525. }
  3526. for (target_id = 0; target_id < 16; target_id++) {
  3527. struct ahd_devinfo devinfo;
  3528. struct ahd_initiator_tinfo *tinfo;
  3529. struct ahd_tmode_tstate *tstate;
  3530. tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
  3531. target_id, &tstate);
  3532. ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
  3533. CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
  3534. tinfo->user.ppr_options &= ~MSG_EXT_PPR_RD_STRM;
  3535. if ((rd_strm_mask & devinfo.target_mask) != 0)
  3536. tinfo->user.ppr_options |= MSG_EXT_PPR_RD_STRM;
  3537. }
  3538. }
  3539. /*
  3540. * Determines the queue depth for a given device.
  3541. */
  3542. static void
  3543. ahd_linux_device_queue_depth(struct ahd_softc *ahd,
  3544. struct ahd_linux_device *dev)
  3545. {
  3546. struct ahd_devinfo devinfo;
  3547. u_int tags;
  3548. ahd_compile_devinfo(&devinfo,
  3549. ahd->our_id,
  3550. dev->target->target, dev->lun,
  3551. dev->target->channel == 0 ? 'A' : 'B',
  3552. ROLE_INITIATOR);
  3553. tags = ahd_linux_user_tagdepth(ahd, &devinfo);
  3554. if (tags != 0
  3555. && dev->scsi_device != NULL
  3556. && dev->scsi_device->tagged_supported != 0) {
  3557. ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED);
  3558. ahd_print_devinfo(ahd, &devinfo);
  3559. printf("Tagged Queuing enabled. Depth %d\n", tags);
  3560. } else {
  3561. ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE);
  3562. }
  3563. }
  3564. static void
  3565. ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
  3566. {
  3567. struct ahd_cmd *acmd;
  3568. struct scsi_cmnd *cmd;
  3569. struct scb *scb;
  3570. struct hardware_scb *hscb;
  3571. struct ahd_initiator_tinfo *tinfo;
  3572. struct ahd_tmode_tstate *tstate;
  3573. u_int col_idx;
  3574. uint16_t mask;
  3575. if ((dev->flags & AHD_DEV_ON_RUN_LIST) != 0)
  3576. panic("running device on run list");
  3577. while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
  3578. && dev->openings > 0 && dev->qfrozen == 0) {
  3579. /*
  3580. * Schedule us to run later. The only reason we are not
  3581. * running is because the whole controller Q is frozen.
  3582. */
  3583. if (ahd->platform_data->qfrozen != 0
  3584. && AHD_DV_SIMQ_FROZEN(ahd) == 0) {
  3585. TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
  3586. dev, links);
  3587. dev->flags |= AHD_DEV_ON_RUN_LIST;
  3588. return;
  3589. }
  3590. cmd = &acmd_scsi_cmd(acmd);
  3591. /*
  3592. * Get an scb to use.
  3593. */
  3594. tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
  3595. cmd->device->id, &tstate);
  3596. if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
  3597. || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
  3598. col_idx = AHD_NEVER_COL_IDX;
  3599. } else {
  3600. col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
  3601. cmd->device->lun);
  3602. }
  3603. if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
  3604. TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
  3605. dev, links);
  3606. dev->flags |= AHD_DEV_ON_RUN_LIST;
  3607. ahd->flags |= AHD_RESOURCE_SHORTAGE;
  3608. return;
  3609. }
  3610. TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
  3611. scb->io_ctx = cmd;
  3612. scb->platform_data->dev = dev;
  3613. hscb = scb->hscb;
  3614. cmd->host_scribble = (char *)scb;
  3615. /*
  3616. * Fill out basics of the HSCB.
  3617. */
  3618. hscb->control = 0;
  3619. hscb->scsiid = BUILD_SCSIID(ahd, cmd);
  3620. hscb->lun = cmd->device->lun;
  3621. scb->hscb->task_management = 0;
  3622. mask = SCB_GET_TARGET_MASK(ahd, scb);
  3623. if ((ahd->user_discenable & mask) != 0)
  3624. hscb->control |= DISCENB;
  3625. if (AHD_DV_CMD(cmd) != 0)
  3626. scb->flags |= SCB_SILENT;
  3627. if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
  3628. scb->flags |= SCB_PACKETIZED;
  3629. if ((tstate->auto_negotiate & mask) != 0) {
  3630. scb->flags |= SCB_AUTO_NEGOTIATE;
  3631. scb->hscb->control |= MK_MESSAGE;
  3632. }
  3633. if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
  3634. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  3635. int msg_bytes;
  3636. uint8_t tag_msgs[2];
  3637. msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
  3638. if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
  3639. hscb->control |= tag_msgs[0];
  3640. if (tag_msgs[0] == MSG_ORDERED_TASK)
  3641. dev->commands_since_idle_or_otag = 0;
  3642. } else
  3643. #endif
  3644. if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
  3645. && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
  3646. hscb->control |= MSG_ORDERED_TASK;
  3647. dev->commands_since_idle_or_otag = 0;
  3648. } else {
  3649. hscb->control |= MSG_SIMPLE_TASK;
  3650. }
  3651. }
  3652. hscb->cdb_len = cmd->cmd_len;
  3653. memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
  3654. scb->sg_count = 0;
  3655. ahd_set_residual(scb, 0);
  3656. ahd_set_sense_residual(scb, 0);
  3657. if (cmd->use_sg != 0) {
  3658. void *sg;
  3659. struct scatterlist *cur_seg;
  3660. u_int nseg;
  3661. int dir;
  3662. cur_seg = (struct scatterlist *)cmd->request_buffer;
  3663. dir = cmd->sc_data_direction;
  3664. nseg = pci_map_sg(ahd->dev_softc, cur_seg,
  3665. cmd->use_sg, dir);
  3666. scb->platform_data->xfer_len = 0;
  3667. for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
  3668. dma_addr_t addr;
  3669. bus_size_t len;
  3670. addr = sg_dma_address(cur_seg);
  3671. len = sg_dma_len(cur_seg);
  3672. scb->platform_data->xfer_len += len;
  3673. sg = ahd_sg_setup(ahd, scb, sg, addr, len,
  3674. /*last*/nseg == 1);
  3675. }
  3676. } else if (cmd->request_bufflen != 0) {
  3677. void *sg;
  3678. dma_addr_t addr;
  3679. int dir;
  3680. sg = scb->sg_list;
  3681. dir = cmd->sc_data_direction;
  3682. addr = pci_map_single(ahd->dev_softc,
  3683. cmd->request_buffer,
  3684. cmd->request_bufflen, dir);
  3685. scb->platform_data->xfer_len = cmd->request_bufflen;
  3686. scb->platform_data->buf_busaddr = addr;
  3687. sg = ahd_sg_setup(ahd, scb, sg, addr,
  3688. cmd->request_bufflen, /*last*/TRUE);
  3689. }
  3690. LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
  3691. dev->openings--;
  3692. dev->active++;
  3693. dev->commands_issued++;
  3694. /* Update the error counting bucket and dump if needed */
  3695. if (dev->target->cmds_since_error) {
  3696. dev->target->cmds_since_error++;
  3697. if (dev->target->cmds_since_error >
  3698. AHD_LINUX_ERR_THRESH)
  3699. dev->target->cmds_since_error = 0;
  3700. }
  3701. if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
  3702. dev->commands_since_idle_or_otag++;
  3703. scb->flags |= SCB_ACTIVE;
  3704. ahd_queue_scb(ahd, scb);
  3705. }
  3706. }
  3707. /*
  3708. * SCSI controller interrupt handler.
  3709. */
  3710. irqreturn_t
  3711. ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
  3712. {
  3713. struct ahd_softc *ahd;
  3714. u_long flags;
  3715. int ours;
  3716. ahd = (struct ahd_softc *) dev_id;
  3717. ahd_lock(ahd, &flags);
  3718. ours = ahd_intr(ahd);
  3719. if (ahd_linux_next_device_to_run(ahd) != NULL)
  3720. ahd_schedule_runq(ahd);
  3721. ahd_linux_run_complete_queue(ahd);
  3722. ahd_unlock(ahd, &flags);
  3723. return IRQ_RETVAL(ours);
  3724. }
  3725. void
  3726. ahd_platform_flushwork(struct ahd_softc *ahd)
  3727. {
  3728. while (ahd_linux_run_complete_queue(ahd) != NULL)
  3729. ;
  3730. }
  3731. static struct ahd_linux_target*
  3732. ahd_linux_alloc_target(struct ahd_softc *ahd, u_int channel, u_int target)
  3733. {
  3734. struct ahd_linux_target *targ;
  3735. targ = malloc(sizeof(*targ), M_DEVBUF, M_NOWAIT);
  3736. if (targ == NULL)
  3737. return (NULL);
  3738. memset(targ, 0, sizeof(*targ));
  3739. targ->channel = channel;
  3740. targ->target = target;
  3741. targ->ahd = ahd;
  3742. targ->flags = AHD_DV_REQUIRED;
  3743. ahd->platform_data->targets[target] = targ;
  3744. return (targ);
  3745. }
  3746. static void
  3747. ahd_linux_free_target(struct ahd_softc *ahd, struct ahd_linux_target *targ)
  3748. {
  3749. struct ahd_devinfo devinfo;
  3750. struct ahd_initiator_tinfo *tinfo;
  3751. struct ahd_tmode_tstate *tstate;
  3752. u_int our_id;
  3753. u_int target_offset;
  3754. char channel;
  3755. /*
  3756. * Force a negotiation to async/narrow on any
  3757. * future command to this device unless a bus
  3758. * reset occurs between now and that command.
  3759. */
  3760. channel = 'A' + targ->channel;
  3761. our_id = ahd->our_id;
  3762. target_offset = targ->target;
  3763. tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
  3764. targ->target, &tstate);
  3765. ahd_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
  3766. channel, ROLE_INITIATOR);
  3767. ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
  3768. AHD_TRANS_GOAL, /*paused*/FALSE);
  3769. ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  3770. AHD_TRANS_GOAL, /*paused*/FALSE);
  3771. ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS);
  3772. ahd->platform_data->targets[target_offset] = NULL;
  3773. if (targ->inq_data != NULL)
  3774. free(targ->inq_data, M_DEVBUF);
  3775. if (targ->dv_buffer != NULL)
  3776. free(targ->dv_buffer, M_DEVBUF);
  3777. if (targ->dv_buffer1 != NULL)
  3778. free(targ->dv_buffer1, M_DEVBUF);
  3779. free(targ, M_DEVBUF);
  3780. }
  3781. static struct ahd_linux_device*
  3782. ahd_linux_alloc_device(struct ahd_softc *ahd,
  3783. struct ahd_linux_target *targ, u_int lun)
  3784. {
  3785. struct ahd_linux_device *dev;
  3786. dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
  3787. if (dev == NULL)
  3788. return (NULL);
  3789. memset(dev, 0, sizeof(*dev));
  3790. init_timer(&dev->timer);
  3791. TAILQ_INIT(&dev->busyq);
  3792. dev->flags = AHD_DEV_UNCONFIGURED;
  3793. dev->lun = lun;
  3794. dev->target = targ;
  3795. /*
  3796. * We start out life using untagged
  3797. * transactions of which we allow one.
  3798. */
  3799. dev->openings = 1;
  3800. /*
  3801. * Set maxtags to 0. This will be changed if we
  3802. * later determine that we are dealing with
  3803. * a tagged queuing capable device.
  3804. */
  3805. dev->maxtags = 0;
  3806. targ->refcount++;
  3807. targ->devices[lun] = dev;
  3808. return (dev);
  3809. }
  3810. static void
  3811. ahd_linux_free_device(struct ahd_softc *ahd, struct ahd_linux_device *dev)
  3812. {
  3813. struct ahd_linux_target *targ;
  3814. del_timer(&dev->timer);
  3815. targ = dev->target;
  3816. targ->devices[dev->lun] = NULL;
  3817. free(dev, M_DEVBUF);
  3818. targ->refcount--;
  3819. if (targ->refcount == 0
  3820. && (targ->flags & AHD_DV_REQUIRED) == 0)
  3821. ahd_linux_free_target(ahd, targ);
  3822. }
  3823. void
  3824. ahd_send_async(struct ahd_softc *ahd, char channel,
  3825. u_int target, u_int lun, ac_code code, void *arg)
  3826. {
  3827. switch (code) {
  3828. case AC_TRANSFER_NEG:
  3829. {
  3830. char buf[80];
  3831. struct ahd_linux_target *targ;
  3832. struct info_str info;
  3833. struct ahd_initiator_tinfo *tinfo;
  3834. struct ahd_tmode_tstate *tstate;
  3835. info.buffer = buf;
  3836. info.length = sizeof(buf);
  3837. info.offset = 0;
  3838. info.pos = 0;
  3839. tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
  3840. target, &tstate);
  3841. /*
  3842. * Don't bother reporting results while
  3843. * negotiations are still pending.
  3844. */
  3845. if (tinfo->curr.period != tinfo->goal.period
  3846. || tinfo->curr.width != tinfo->goal.width
  3847. || tinfo->curr.offset != tinfo->goal.offset
  3848. || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
  3849. if (bootverbose == 0)
  3850. break;
  3851. /*
  3852. * Don't bother reporting results that
  3853. * are identical to those last reported.
  3854. */
  3855. targ = ahd->platform_data->targets[target];
  3856. if (targ == NULL)
  3857. break;
  3858. if (tinfo->curr.period == targ->last_tinfo.period
  3859. && tinfo->curr.width == targ->last_tinfo.width
  3860. && tinfo->curr.offset == targ->last_tinfo.offset
  3861. && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
  3862. if (bootverbose == 0)
  3863. break;
  3864. targ->last_tinfo.period = tinfo->curr.period;
  3865. targ->last_tinfo.width = tinfo->curr.width;
  3866. targ->last_tinfo.offset = tinfo->curr.offset;
  3867. targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
  3868. printf("(%s:%c:", ahd_name(ahd), channel);
  3869. if (target == CAM_TARGET_WILDCARD)
  3870. printf("*): ");
  3871. else
  3872. printf("%d): ", target);
  3873. ahd_format_transinfo(&info, &tinfo->curr);
  3874. if (info.pos < info.length)
  3875. *info.buffer = '\0';
  3876. else
  3877. buf[info.length - 1] = '\0';
  3878. printf("%s", buf);
  3879. break;
  3880. }
  3881. case AC_SENT_BDR:
  3882. {
  3883. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  3884. WARN_ON(lun != CAM_LUN_WILDCARD);
  3885. scsi_report_device_reset(ahd->platform_data->host,
  3886. channel - 'A', target);
  3887. #else
  3888. Scsi_Device *scsi_dev;
  3889. /*
  3890. * Find the SCSI device associated with this
  3891. * request and indicate that a UA is expected.
  3892. */
  3893. for (scsi_dev = ahd->platform_data->host->host_queue;
  3894. scsi_dev != NULL; scsi_dev = scsi_dev->next) {
  3895. if (channel - 'A' == scsi_dev->channel
  3896. && target == scsi_dev->id
  3897. && (lun == CAM_LUN_WILDCARD
  3898. || lun == scsi_dev->lun)) {
  3899. scsi_dev->was_reset = 1;
  3900. scsi_dev->expecting_cc_ua = 1;
  3901. }
  3902. }
  3903. #endif
  3904. break;
  3905. }
  3906. case AC_BUS_RESET:
  3907. if (ahd->platform_data->host != NULL) {
  3908. scsi_report_bus_reset(ahd->platform_data->host,
  3909. channel - 'A');
  3910. }
  3911. break;
  3912. default:
  3913. panic("ahd_send_async: Unexpected async event");
  3914. }
  3915. }
  3916. /*
  3917. * Calls the higher level scsi done function and frees the scb.
  3918. */
  3919. void
  3920. ahd_done(struct ahd_softc *ahd, struct scb *scb)
  3921. {
  3922. Scsi_Cmnd *cmd;
  3923. struct ahd_linux_device *dev;
  3924. if ((scb->flags & SCB_ACTIVE) == 0) {
  3925. printf("SCB %d done'd twice\n", SCB_GET_TAG(scb));
  3926. ahd_dump_card_state(ahd);
  3927. panic("Stopping for safety");
  3928. }
  3929. LIST_REMOVE(scb, pending_links);
  3930. cmd = scb->io_ctx;
  3931. dev = scb->platform_data->dev;
  3932. dev->active--;
  3933. dev->openings++;
  3934. if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
  3935. cmd->result &= ~(CAM_DEV_QFRZN << 16);
  3936. dev->qfrozen--;
  3937. }
  3938. ahd_linux_unmap_scb(ahd, scb);
  3939. /*
  3940. * Guard against stale sense data.
  3941. * The Linux mid-layer assumes that sense
  3942. * was retrieved anytime the first byte of
  3943. * the sense buffer looks "sane".
  3944. */
  3945. cmd->sense_buffer[0] = 0;
  3946. if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
  3947. uint32_t amount_xferred;
  3948. amount_xferred =
  3949. ahd_get_transfer_length(scb) - ahd_get_residual(scb);
  3950. if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
  3951. #ifdef AHD_DEBUG
  3952. if ((ahd_debug & AHD_SHOW_MISC) != 0) {
  3953. ahd_print_path(ahd, scb);
  3954. printf("Set CAM_UNCOR_PARITY\n");
  3955. }
  3956. #endif
  3957. ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
  3958. #ifdef AHD_REPORT_UNDERFLOWS
  3959. /*
  3960. * This code is disabled by default as some
  3961. * clients of the SCSI system do not properly
  3962. * initialize the underflow parameter. This
  3963. * results in spurious termination of commands
  3964. * that complete as expected (e.g. underflow is
  3965. * allowed as command can return variable amounts
  3966. * of data.
  3967. */
  3968. } else if (amount_xferred < scb->io_ctx->underflow) {
  3969. u_int i;
  3970. ahd_print_path(ahd, scb);
  3971. printf("CDB:");
  3972. for (i = 0; i < scb->io_ctx->cmd_len; i++)
  3973. printf(" 0x%x", scb->io_ctx->cmnd[i]);
  3974. printf("\n");
  3975. ahd_print_path(ahd, scb);
  3976. printf("Saw underflow (%ld of %ld bytes). "
  3977. "Treated as error\n",
  3978. ahd_get_residual(scb),
  3979. ahd_get_transfer_length(scb));
  3980. ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
  3981. #endif
  3982. } else {
  3983. ahd_set_transaction_status(scb, CAM_REQ_CMP);
  3984. }
  3985. } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
  3986. ahd_linux_handle_scsi_status(ahd, dev, scb);
  3987. } else if (ahd_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
  3988. dev->flags |= AHD_DEV_UNCONFIGURED;
  3989. if (AHD_DV_CMD(cmd) == FALSE)
  3990. dev->target->flags &= ~AHD_DV_REQUIRED;
  3991. }
  3992. /*
  3993. * Start DV for devices that require it assuming the first command
  3994. * sent does not result in a selection timeout.
  3995. */
  3996. if (ahd_get_transaction_status(scb) != CAM_SEL_TIMEOUT
  3997. && (dev->target->flags & AHD_DV_REQUIRED) != 0)
  3998. ahd_linux_start_dv(ahd);
  3999. if (dev->openings == 1
  4000. && ahd_get_transaction_status(scb) == CAM_REQ_CMP
  4001. && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
  4002. dev->tag_success_count++;
  4003. /*
  4004. * Some devices deal with temporary internal resource
  4005. * shortages by returning queue full. When the queue
  4006. * full occurrs, we throttle back. Slowly try to get
  4007. * back to our previous queue depth.
  4008. */
  4009. if ((dev->openings + dev->active) < dev->maxtags
  4010. && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
  4011. dev->tag_success_count = 0;
  4012. dev->openings++;
  4013. }
  4014. if (dev->active == 0)
  4015. dev->commands_since_idle_or_otag = 0;
  4016. if (TAILQ_EMPTY(&dev->busyq)) {
  4017. if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
  4018. && dev->active == 0
  4019. && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
  4020. ahd_linux_free_device(ahd, dev);
  4021. } else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
  4022. TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
  4023. dev->flags |= AHD_DEV_ON_RUN_LIST;
  4024. }
  4025. if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
  4026. printf("Recovery SCB completes\n");
  4027. if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
  4028. || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
  4029. ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
  4030. if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
  4031. scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
  4032. up(&ahd->platform_data->eh_sem);
  4033. }
  4034. }
  4035. ahd_free_scb(ahd, scb);
  4036. ahd_linux_queue_cmd_complete(ahd, cmd);
  4037. if ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_EMPTY) != 0
  4038. && LIST_FIRST(&ahd->pending_scbs) == NULL) {
  4039. ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_EMPTY;
  4040. up(&ahd->platform_data->dv_sem);
  4041. }
  4042. }
  4043. static void
  4044. ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
  4045. struct ahd_linux_device *dev, struct scb *scb)
  4046. {
  4047. struct ahd_devinfo devinfo;
  4048. ahd_compile_devinfo(&devinfo,
  4049. ahd->our_id,
  4050. dev->target->target, dev->lun,
  4051. dev->target->channel == 0 ? 'A' : 'B',
  4052. ROLE_INITIATOR);
  4053. /*
  4054. * We don't currently trust the mid-layer to
  4055. * properly deal with queue full or busy. So,
  4056. * when one occurs, we tell the mid-layer to
  4057. * unconditionally requeue the command to us
  4058. * so that we can retry it ourselves. We also
  4059. * implement our own throttling mechanism so
  4060. * we don't clobber the device with too many
  4061. * commands.
  4062. */
  4063. switch (ahd_get_scsi_status(scb)) {
  4064. default:
  4065. break;
  4066. case SCSI_STATUS_CHECK_COND:
  4067. case SCSI_STATUS_CMD_TERMINATED:
  4068. {
  4069. Scsi_Cmnd *cmd;
  4070. /*
  4071. * Copy sense information to the OS's cmd
  4072. * structure if it is available.
  4073. */
  4074. cmd = scb->io_ctx;
  4075. if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
  4076. struct scsi_status_iu_header *siu;
  4077. u_int sense_size;
  4078. u_int sense_offset;
  4079. if (scb->flags & SCB_SENSE) {
  4080. sense_size = MIN(sizeof(struct scsi_sense_data)
  4081. - ahd_get_sense_residual(scb),
  4082. sizeof(cmd->sense_buffer));
  4083. sense_offset = 0;
  4084. } else {
  4085. /*
  4086. * Copy only the sense data into the provided
  4087. * buffer.
  4088. */
  4089. siu = (struct scsi_status_iu_header *)
  4090. scb->sense_data;
  4091. sense_size = MIN(scsi_4btoul(siu->sense_length),
  4092. sizeof(cmd->sense_buffer));
  4093. sense_offset = SIU_SENSE_OFFSET(siu);
  4094. }
  4095. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  4096. memcpy(cmd->sense_buffer,
  4097. ahd_get_sense_buf(ahd, scb)
  4098. + sense_offset, sense_size);
  4099. cmd->result |= (DRIVER_SENSE << 24);
  4100. #ifdef AHD_DEBUG
  4101. if (ahd_debug & AHD_SHOW_SENSE) {
  4102. int i;
  4103. printf("Copied %d bytes of sense data at %d:",
  4104. sense_size, sense_offset);
  4105. for (i = 0; i < sense_size; i++) {
  4106. if ((i & 0xF) == 0)
  4107. printf("\n");
  4108. printf("0x%x ", cmd->sense_buffer[i]);
  4109. }
  4110. printf("\n");
  4111. }
  4112. #endif
  4113. }
  4114. break;
  4115. }
  4116. case SCSI_STATUS_QUEUE_FULL:
  4117. {
  4118. /*
  4119. * By the time the core driver has returned this
  4120. * command, all other commands that were queued
  4121. * to us but not the device have been returned.
  4122. * This ensures that dev->active is equal to
  4123. * the number of commands actually queued to
  4124. * the device.
  4125. */
  4126. dev->tag_success_count = 0;
  4127. if (dev->active != 0) {
  4128. /*
  4129. * Drop our opening count to the number
  4130. * of commands currently outstanding.
  4131. */
  4132. dev->openings = 0;
  4133. #ifdef AHD_DEBUG
  4134. if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
  4135. ahd_print_path(ahd, scb);
  4136. printf("Dropping tag count to %d\n",
  4137. dev->active);
  4138. }
  4139. #endif
  4140. if (dev->active == dev->tags_on_last_queuefull) {
  4141. dev->last_queuefull_same_count++;
  4142. /*
  4143. * If we repeatedly see a queue full
  4144. * at the same queue depth, this
  4145. * device has a fixed number of tag
  4146. * slots. Lock in this tag depth
  4147. * so we stop seeing queue fulls from
  4148. * this device.
  4149. */
  4150. if (dev->last_queuefull_same_count
  4151. == AHD_LOCK_TAGS_COUNT) {
  4152. dev->maxtags = dev->active;
  4153. ahd_print_path(ahd, scb);
  4154. printf("Locking max tag count at %d\n",
  4155. dev->active);
  4156. }
  4157. } else {
  4158. dev->tags_on_last_queuefull = dev->active;
  4159. dev->last_queuefull_same_count = 0;
  4160. }
  4161. ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
  4162. ahd_set_scsi_status(scb, SCSI_STATUS_OK);
  4163. ahd_platform_set_tags(ahd, &devinfo,
  4164. (dev->flags & AHD_DEV_Q_BASIC)
  4165. ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
  4166. break;
  4167. }
  4168. /*
  4169. * Drop down to a single opening, and treat this
  4170. * as if the target returned BUSY SCSI status.
  4171. */
  4172. dev->openings = 1;
  4173. ahd_platform_set_tags(ahd, &devinfo,
  4174. (dev->flags & AHD_DEV_Q_BASIC)
  4175. ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
  4176. ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
  4177. /* FALLTHROUGH */
  4178. }
  4179. case SCSI_STATUS_BUSY:
  4180. /*
  4181. * Set a short timer to defer sending commands for
  4182. * a bit since Linux will not delay in this case.
  4183. */
  4184. if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {
  4185. printf("%s:%c:%d: Device Timer still active during "
  4186. "busy processing\n", ahd_name(ahd),
  4187. dev->target->channel, dev->target->target);
  4188. break;
  4189. }
  4190. dev->flags |= AHD_DEV_TIMER_ACTIVE;
  4191. dev->qfrozen++;
  4192. init_timer(&dev->timer);
  4193. dev->timer.data = (u_long)dev;
  4194. dev->timer.expires = jiffies + (HZ/2);
  4195. dev->timer.function = ahd_linux_dev_timed_unfreeze;
  4196. add_timer(&dev->timer);
  4197. break;
  4198. }
  4199. }
  4200. static void
  4201. ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd)
  4202. {
  4203. /*
  4204. * Typically, the complete queue has very few entries
  4205. * queued to it before the queue is emptied by
  4206. * ahd_linux_run_complete_queue, so sorting the entries
  4207. * by generation number should be inexpensive.
  4208. * We perform the sort so that commands that complete
  4209. * with an error are retuned in the order origionally
  4210. * queued to the controller so that any subsequent retries
  4211. * are performed in order. The underlying ahd routines do
  4212. * not guarantee the order that aborted commands will be
  4213. * returned to us.
  4214. */
  4215. struct ahd_completeq *completeq;
  4216. struct ahd_cmd *list_cmd;
  4217. struct ahd_cmd *acmd;
  4218. /*
  4219. * Map CAM error codes into Linux Error codes. We
  4220. * avoid the conversion so that the DV code has the
  4221. * full error information available when making
  4222. * state change decisions.
  4223. */
  4224. if (AHD_DV_CMD(cmd) == FALSE) {
  4225. uint32_t status;
  4226. u_int new_status;
  4227. status = ahd_cmd_get_transaction_status(cmd);
  4228. if (status != CAM_REQ_CMP) {
  4229. struct ahd_linux_device *dev;
  4230. struct ahd_devinfo devinfo;
  4231. cam_status cam_status;
  4232. uint32_t action;
  4233. u_int scsi_status;
  4234. dev = ahd_linux_get_device(ahd, cmd->device->channel,
  4235. cmd->device->id,
  4236. cmd->device->lun,
  4237. /*alloc*/FALSE);
  4238. if (dev == NULL)
  4239. goto no_fallback;
  4240. ahd_compile_devinfo(&devinfo,
  4241. ahd->our_id,
  4242. dev->target->target, dev->lun,
  4243. dev->target->channel == 0 ? 'A':'B',
  4244. ROLE_INITIATOR);
  4245. scsi_status = ahd_cmd_get_scsi_status(cmd);
  4246. cam_status = ahd_cmd_get_transaction_status(cmd);
  4247. action = aic_error_action(cmd, dev->target->inq_data,
  4248. cam_status, scsi_status);
  4249. if ((action & SSQ_FALLBACK) != 0) {
  4250. /* Update stats */
  4251. dev->target->errors_detected++;
  4252. if (dev->target->cmds_since_error == 0)
  4253. dev->target->cmds_since_error++;
  4254. else {
  4255. dev->target->cmds_since_error = 0;
  4256. ahd_linux_fallback(ahd, &devinfo);
  4257. }
  4258. }
  4259. }
  4260. no_fallback:
  4261. switch (status) {
  4262. case CAM_REQ_INPROG:
  4263. case CAM_REQ_CMP:
  4264. case CAM_SCSI_STATUS_ERROR:
  4265. new_status = DID_OK;
  4266. break;
  4267. case CAM_REQ_ABORTED:
  4268. new_status = DID_ABORT;
  4269. break;
  4270. case CAM_BUSY:
  4271. new_status = DID_BUS_BUSY;
  4272. break;
  4273. case CAM_REQ_INVALID:
  4274. case CAM_PATH_INVALID:
  4275. new_status = DID_BAD_TARGET;
  4276. break;
  4277. case CAM_SEL_TIMEOUT:
  4278. new_status = DID_NO_CONNECT;
  4279. break;
  4280. case CAM_SCSI_BUS_RESET:
  4281. case CAM_BDR_SENT:
  4282. new_status = DID_RESET;
  4283. break;
  4284. case CAM_UNCOR_PARITY:
  4285. new_status = DID_PARITY;
  4286. break;
  4287. case CAM_CMD_TIMEOUT:
  4288. new_status = DID_TIME_OUT;
  4289. break;
  4290. case CAM_UA_ABORT:
  4291. case CAM_REQ_CMP_ERR:
  4292. case CAM_AUTOSENSE_FAIL:
  4293. case CAM_NO_HBA:
  4294. case CAM_DATA_RUN_ERR:
  4295. case CAM_UNEXP_BUSFREE:
  4296. case CAM_SEQUENCE_FAIL:
  4297. case CAM_CCB_LEN_ERR:
  4298. case CAM_PROVIDE_FAIL:
  4299. case CAM_REQ_TERMIO:
  4300. case CAM_UNREC_HBA_ERROR:
  4301. case CAM_REQ_TOO_BIG:
  4302. new_status = DID_ERROR;
  4303. break;
  4304. case CAM_REQUEUE_REQ:
  4305. /*
  4306. * If we want the request requeued, make sure there
  4307. * are sufficent retries. In the old scsi error code,
  4308. * we used to be able to specify a result code that
  4309. * bypassed the retry count. Now we must use this
  4310. * hack. We also "fake" a check condition with
  4311. * a sense code of ABORTED COMMAND. This seems to
  4312. * evoke a retry even if this command is being sent
  4313. * via the eh thread. Ick! Ick! Ick!
  4314. */
  4315. if (cmd->retries > 0)
  4316. cmd->retries--;
  4317. new_status = DID_OK;
  4318. ahd_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
  4319. cmd->result |= (DRIVER_SENSE << 24);
  4320. memset(cmd->sense_buffer, 0,
  4321. sizeof(cmd->sense_buffer));
  4322. cmd->sense_buffer[0] = SSD_ERRCODE_VALID
  4323. | SSD_CURRENT_ERROR;
  4324. cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
  4325. break;
  4326. default:
  4327. /* We should never get here */
  4328. new_status = DID_ERROR;
  4329. break;
  4330. }
  4331. ahd_cmd_set_transaction_status(cmd, new_status);
  4332. }
  4333. completeq = &ahd->platform_data->completeq;
  4334. list_cmd = TAILQ_FIRST(completeq);
  4335. acmd = (struct ahd_cmd *)cmd;
  4336. while (list_cmd != NULL
  4337. && acmd_scsi_cmd(list_cmd).serial_number
  4338. < acmd_scsi_cmd(acmd).serial_number)
  4339. list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
  4340. if (list_cmd != NULL)
  4341. TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
  4342. else
  4343. TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
  4344. }
  4345. static void
  4346. ahd_linux_filter_inquiry(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
  4347. {
  4348. struct scsi_inquiry_data *sid;
  4349. struct ahd_initiator_tinfo *tinfo;
  4350. struct ahd_transinfo *user;
  4351. struct ahd_transinfo *goal;
  4352. struct ahd_transinfo *curr;
  4353. struct ahd_tmode_tstate *tstate;
  4354. struct ahd_linux_device *dev;
  4355. u_int width;
  4356. u_int period;
  4357. u_int offset;
  4358. u_int ppr_options;
  4359. u_int trans_version;
  4360. u_int prot_version;
  4361. /*
  4362. * Determine if this lun actually exists. If so,
  4363. * hold on to its corresponding device structure.
  4364. * If not, make sure we release the device and
  4365. * don't bother processing the rest of this inquiry
  4366. * command.
  4367. */
  4368. dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
  4369. devinfo->target, devinfo->lun,
  4370. /*alloc*/TRUE);
  4371. sid = (struct scsi_inquiry_data *)dev->target->inq_data;
  4372. if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
  4373. dev->flags &= ~AHD_DEV_UNCONFIGURED;
  4374. } else {
  4375. dev->flags |= AHD_DEV_UNCONFIGURED;
  4376. return;
  4377. }
  4378. /*
  4379. * Update our notion of this device's transfer
  4380. * negotiation capabilities.
  4381. */
  4382. tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
  4383. devinfo->our_scsiid,
  4384. devinfo->target, &tstate);
  4385. user = &tinfo->user;
  4386. goal = &tinfo->goal;
  4387. curr = &tinfo->curr;
  4388. width = user->width;
  4389. period = user->period;
  4390. offset = user->offset;
  4391. ppr_options = user->ppr_options;
  4392. trans_version = user->transport_version;
  4393. prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
  4394. /*
  4395. * Only attempt SPI3/4 once we've verified that
  4396. * the device claims to support SPI3/4 features.
  4397. */
  4398. if (prot_version < SCSI_REV_2)
  4399. trans_version = SID_ANSI_REV(sid);
  4400. else
  4401. trans_version = SCSI_REV_2;
  4402. if ((sid->flags & SID_WBus16) == 0)
  4403. width = MSG_EXT_WDTR_BUS_8_BIT;
  4404. if ((sid->flags & SID_Sync) == 0) {
  4405. period = 0;
  4406. offset = 0;
  4407. ppr_options = 0;
  4408. }
  4409. if ((sid->spi3data & SID_SPI_QAS) == 0)
  4410. ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
  4411. if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
  4412. ppr_options &= MSG_EXT_PPR_QAS_REQ;
  4413. if ((sid->spi3data & SID_SPI_IUS) == 0)
  4414. ppr_options &= (MSG_EXT_PPR_DT_REQ
  4415. | MSG_EXT_PPR_QAS_REQ);
  4416. if (prot_version > SCSI_REV_2
  4417. && ppr_options != 0)
  4418. trans_version = user->transport_version;
  4419. ahd_validate_width(ahd, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
  4420. ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
  4421. ahd_validate_offset(ahd, /*tinfo limit*/NULL, period,
  4422. &offset, width, ROLE_UNKNOWN);
  4423. if (offset == 0 || period == 0) {
  4424. period = 0;
  4425. offset = 0;
  4426. ppr_options = 0;
  4427. }
  4428. /* Apply our filtered user settings. */
  4429. curr->transport_version = trans_version;
  4430. curr->protocol_version = prot_version;
  4431. ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, /*paused*/FALSE);
  4432. ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options,
  4433. AHD_TRANS_GOAL, /*paused*/FALSE);
  4434. }
  4435. void
  4436. ahd_freeze_simq(struct ahd_softc *ahd)
  4437. {
  4438. ahd->platform_data->qfrozen++;
  4439. if (ahd->platform_data->qfrozen == 1) {
  4440. scsi_block_requests(ahd->platform_data->host);
  4441. ahd_platform_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
  4442. CAM_LUN_WILDCARD, SCB_LIST_NULL,
  4443. ROLE_INITIATOR, CAM_REQUEUE_REQ);
  4444. }
  4445. }
  4446. void
  4447. ahd_release_simq(struct ahd_softc *ahd)
  4448. {
  4449. u_long s;
  4450. int unblock_reqs;
  4451. unblock_reqs = 0;
  4452. ahd_lock(ahd, &s);
  4453. if (ahd->platform_data->qfrozen > 0)
  4454. ahd->platform_data->qfrozen--;
  4455. if (ahd->platform_data->qfrozen == 0) {
  4456. unblock_reqs = 1;
  4457. }
  4458. if (AHD_DV_SIMQ_FROZEN(ahd)
  4459. && ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_RELEASE) != 0)) {
  4460. ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_RELEASE;
  4461. up(&ahd->platform_data->dv_sem);
  4462. }
  4463. ahd_schedule_runq(ahd);
  4464. ahd_unlock(ahd, &s);
  4465. /*
  4466. * There is still a race here. The mid-layer
  4467. * should keep its own freeze count and use
  4468. * a bottom half handler to run the queues
  4469. * so we can unblock with our own lock held.
  4470. */
  4471. if (unblock_reqs)
  4472. scsi_unblock_requests(ahd->platform_data->host);
  4473. }
  4474. static void
  4475. ahd_linux_sem_timeout(u_long arg)
  4476. {
  4477. struct scb *scb;
  4478. struct ahd_softc *ahd;
  4479. u_long s;
  4480. scb = (struct scb *)arg;
  4481. ahd = scb->ahd_softc;
  4482. ahd_lock(ahd, &s);
  4483. if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
  4484. scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
  4485. up(&ahd->platform_data->eh_sem);
  4486. }
  4487. ahd_unlock(ahd, &s);
  4488. }
  4489. static void
  4490. ahd_linux_dev_timed_unfreeze(u_long arg)
  4491. {
  4492. struct ahd_linux_device *dev;
  4493. struct ahd_softc *ahd;
  4494. u_long s;
  4495. dev = (struct ahd_linux_device *)arg;
  4496. ahd = dev->target->ahd;
  4497. ahd_lock(ahd, &s);
  4498. dev->flags &= ~AHD_DEV_TIMER_ACTIVE;
  4499. if (dev->qfrozen > 0)
  4500. dev->qfrozen--;
  4501. if (dev->qfrozen == 0
  4502. && (dev->flags & AHD_DEV_ON_RUN_LIST) == 0)
  4503. ahd_linux_run_device_queue(ahd, dev);
  4504. if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
  4505. && dev->active == 0)
  4506. ahd_linux_free_device(ahd, dev);
  4507. ahd_unlock(ahd, &s);
  4508. }
  4509. void
  4510. ahd_platform_dump_card_state(struct ahd_softc *ahd)
  4511. {
  4512. struct ahd_linux_device *dev;
  4513. int target;
  4514. int maxtarget;
  4515. int lun;
  4516. int i;
  4517. maxtarget = (ahd->features & AHD_WIDE) ? 15 : 7;
  4518. for (target = 0; target <=maxtarget; target++) {
  4519. for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
  4520. struct ahd_cmd *acmd;
  4521. dev = ahd_linux_get_device(ahd, 0, target,
  4522. lun, /*alloc*/FALSE);
  4523. if (dev == NULL)
  4524. continue;
  4525. printf("DevQ(%d:%d:%d): ", 0, target, lun);
  4526. i = 0;
  4527. TAILQ_FOREACH(acmd, &dev->busyq, acmd_links.tqe) {
  4528. if (i++ > AHD_SCB_MAX)
  4529. break;
  4530. }
  4531. printf("%d waiting\n", i);
  4532. }
  4533. }
  4534. }
  4535. static int __init
  4536. ahd_linux_init(void)
  4537. {
  4538. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
  4539. return ahd_linux_detect(&aic79xx_driver_template);
  4540. #else
  4541. scsi_register_module(MODULE_SCSI_HA, &aic79xx_driver_template);
  4542. if (aic79xx_driver_template.present == 0) {
  4543. scsi_unregister_module(MODULE_SCSI_HA,
  4544. &aic79xx_driver_template);
  4545. return (-ENODEV);
  4546. }
  4547. return (0);
  4548. #endif
  4549. }
  4550. static void __exit
  4551. ahd_linux_exit(void)
  4552. {
  4553. struct ahd_softc *ahd;
  4554. /*
  4555. * Shutdown DV threads before going into the SCSI mid-layer.
  4556. * This avoids situations where the mid-layer locks the entire
  4557. * kernel so that waiting for our DV threads to exit leads
  4558. * to deadlock.
  4559. */
  4560. TAILQ_FOREACH(ahd, &ahd_tailq, links) {
  4561. ahd_linux_kill_dv_thread(ahd);
  4562. }
  4563. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
  4564. /*
  4565. * In 2.4 we have to unregister from the PCI core _after_
  4566. * unregistering from the scsi midlayer to avoid dangling
  4567. * references.
  4568. */
  4569. scsi_unregister_module(MODULE_SCSI_HA, &aic79xx_driver_template);
  4570. #endif
  4571. ahd_linux_pci_exit();
  4572. }
  4573. module_init(ahd_linux_init);
  4574. module_exit(ahd_linux_exit);